Skip to content

Commit

Permalink
Use #connect_timeout=(Time::Span)
Browse files Browse the repository at this point in the history
  • Loading branch information
yanecc committed Apr 29, 2024
1 parent ab6610e commit 41ea575
Show file tree
Hide file tree
Showing 6 changed files with 253 additions and 253 deletions.
86 changes: 43 additions & 43 deletions src/mockgpt.cr
Original file line number Diff line number Diff line change
@@ -1,43 +1,43 @@
require "grip"
require "uri"
require "json"
require "colorize"
require "option_parser"
require "./mockgpt/*"
require "./mockgpt/struct/*"

module Mocker
class_property ip : String = "localhost"
class_property port : Int32 = 3000
class_property model : String = "codellama:13b"
class_property gpt : String = "gpt-4"
end

homePath = "#{Path.home}/mocker.json"
exePath = "#{File.dirname Process.executable_path.not_nil!}/mocker.json"
confPath = File.exists?(exePath) ? exePath : homePath
if File.file?(confPath)
mocker = JSON.parse(File.read(confPath))
Mocker.ip = mocker["ip"].as_s if mocker["ip"]?
Mocker.port = mocker["port"].as_i if mocker["port"]?
Mocker.model = mocker["model"].as_s if mocker["model"]?
end

OptionParser.parse do |parser|
parser.banner = "Usage: mockgpt [arguments]"
parser.on("-b HOST", "--binding HOST", "Bind to the specified IP") { |_host| Mocker.ip = _host }
parser.on("-p PORT", "--port PORT", "Run on the specified port") { |_port| Mocker.port = _port.to_i }
parser.on("-m MODEL", "--mocker MODEL", "Employ the specified model") { |_model| Mocker.model = _model }
parser.on("-h", "--help", "Show this help") do
puts parser
exit
end
parser.invalid_option do |flag|
STDERR.puts "ERROR: #{flag} is not a valid option."
STDERR.puts parser
exit(1)
end
end

mockgpt = Application.new(Mocker.ip, Mocker.port)
mockgpt.run
require "grip"
require "uri"
require "json"
require "colorize"
require "option_parser"
require "./mockgpt/*"
require "./mockgpt/struct/*"

module Mocker
class_property ip : String = "localhost"
class_property port : Int32 = 3000
class_property model : String = "codellama:13b"
class_property gpt : String = "gpt-4"
end

homePath = "#{Path.home}/mocker.json"
exePath = "#{File.dirname Process.executable_path.not_nil!}/mocker.json"
confPath = File.exists?(exePath) ? exePath : homePath
if File.file?(confPath)
mocker = JSON.parse(File.read(confPath))
Mocker.ip = mocker["ip"].as_s if mocker["ip"]?
Mocker.port = mocker["port"].as_i if mocker["port"]?
Mocker.model = mocker["model"].as_s if mocker["model"]?
end

OptionParser.parse do |parser|
parser.banner = "Usage: mockgpt [arguments]"
parser.on("-b HOST", "--binding HOST", "Bind to the specified IP") { |_host| Mocker.ip = _host }
parser.on("-p PORT", "--port PORT", "Run on the specified port") { |_port| Mocker.port = _port.to_i }
parser.on("-m MODEL", "--mocker MODEL", "Employ the specified model") { |_model| Mocker.model = _model }
parser.on("-h", "--help", "Show this help") do
puts parser
exit
end
parser.invalid_option do |flag|
STDERR.puts "ERROR: #{flag} is not a valid option."
STDERR.puts parser
exit(1)
end
end

mockgpt = Application.new(Mocker.ip, Mocker.port)
mockgpt.run
214 changes: 107 additions & 107 deletions src/mockgpt/controllers.cr
Original file line number Diff line number Diff line change
@@ -1,107 +1,107 @@
class MockGPT < Grip::Controllers::Http
def connect(context : Context)
context
.put_status(HTTP::Status::OK)
.halt
end

def ollama(context : Context)
params = context.fetch_json_params
params["model"] = Mocker.model
presetUri = URI.parse "http://#{ENV["OLLAMA_HOST"]}"
url = URI.new scheme: presetUri.scheme || "http", host: presetUri.host || "localhost", port: presetUri.port || 11434
agent = HTTP::Client.new url
agent.connect_timeout = 5
print " POST ".colorize.bright.on_blue, "| #{url}/api/chat | "
currentTime = Time.local.to_unix
if params["stream"]?
print "stream | "
begin
agent.post(path: "/api/chat", body: params.to_json) do |response|
# context.send_resp response.body_io.try &.gets_to_end

leadChunk = {
"choices" => [
{
"delta" => {
"role" => "assistant",
"content" => "",
},
"index" => 0,
"finish_reason" => nil,
},
],
"created" => currentTime,
"id" => "chatcmpl",
"object" => "chat.completion.chunk",
"model" => Mocker.gpt,
}
context
.send_resp("data: #{leadChunk.to_json}\n\n")
.response.flush

# response.body_io.each_line.skip(1).each do |line|
response.body_io.each_line do |line|
chunk = JSON.parse line

transformedChunk = {
"choices" => [
{
"delta" => {
"content" => chunk["message"]["content"].as_s,
},
"index" => 0,
"finish_reason" => chunk["done"].as_bool ? "stop" : nil,
},
],
"created" => currentTime,
"id" => "chatcmpl",
"object" => "chat.completion.chunk",
"model" => Mocker.gpt,
}
context
.send_resp("data: #{transformedChunk.to_json}\n\n")
.response.flush
end
puts " Done ".colorize.bright.on_green
context.send_resp "data: [DONE]"
end
rescue
puts " Fail ".colorize.bright.on_red
context
.put_status(HTTP::Status::BAD_REQUEST)
.halt
end
else
print "normal | "
begin
plainRequest = PlainRequest.from_json(params.to_json)
plainResponse = agent.post(path: "/api/chat", body: plainRequest.to_json)
respJson = JSON.parse plainResponse.body
gptResponse = PlainResponse.new(
created: currentTime,
choices: [
Choice.new(
message: Message.new(
role: respJson["message"]["role"].as_s,
content: respJson["message"]["content"].as_s,
),
),
],
usage: Usage.new(
prompt_tokens: respJson["prompt_eval_count"].as_i,
completion_tokens: respJson["eval_count"].as_i,
total_tokens: respJson["prompt_eval_count"].as_i + respJson["eval_count"].as_i,
),
)
puts " Done ".colorize.bright.on_green
context.send_resp gptResponse.to_json
rescue
puts " Fail ".colorize.bright.on_red
context
.put_status(HTTP::Status::BAD_REQUEST)
.halt
end
end
end
end
class MockGPT < Grip::Controllers::Http
def connect(context : Context)
context
.put_status(HTTP::Status::OK)
.halt
end

def ollama(context : Context)
params = context.fetch_json_params
params["model"] = Mocker.model
presetUri = URI.parse "http://#{ENV["OLLAMA_HOST"]}"
url = URI.new scheme: presetUri.scheme || "http", host: presetUri.host || "localhost", port: presetUri.port || 11434
agent = HTTP::Client.new url
agent.connect_timeout = 5.seconds
print " POST ".colorize.bright.on_blue, "| #{url}/api/chat | "
currentTime = Time.local.to_unix
if params["stream"]?
print "stream | "
begin
agent.post(path: "/api/chat", body: params.to_json) do |response|
# context.send_resp response.body_io.try &.gets_to_end

leadChunk = {
"choices" => [
{
"delta" => {
"role" => "assistant",
"content" => "",
},
"index" => 0,
"finish_reason" => nil,
},
],
"created" => currentTime,
"id" => "chatcmpl",
"object" => "chat.completion.chunk",
"model" => Mocker.gpt,
}
context
.send_resp("data: #{leadChunk.to_json}\n\n")
.response.flush

# response.body_io.each_line.skip(1).each do |line|
response.body_io.each_line do |line|
chunk = JSON.parse line

transformedChunk = {
"choices" => [
{
"delta" => {
"content" => chunk["message"]["content"].as_s,
},
"index" => 0,
"finish_reason" => chunk["done"].as_bool ? "stop" : nil,
},
],
"created" => currentTime,
"id" => "chatcmpl",
"object" => "chat.completion.chunk",
"model" => Mocker.gpt,
}
context
.send_resp("data: #{transformedChunk.to_json}\n\n")
.response.flush
end
puts " Done ".colorize.bright.on_green
context.send_resp "data: [DONE]"
end
rescue
puts " Fail ".colorize.bright.on_red
context
.put_status(HTTP::Status::BAD_REQUEST)
.halt
end
else
print "normal | "
begin
plainRequest = PlainRequest.from_json(params.to_json)
plainResponse = agent.post(path: "/api/chat", body: plainRequest.to_json)
respJson = JSON.parse plainResponse.body
gptResponse = PlainResponse.new(
created: currentTime,
choices: [
Choice.new(
message: Message.new(
role: respJson["message"]["role"].as_s,
content: respJson["message"]["content"].as_s,
),
),
],
usage: Usage.new(
prompt_tokens: respJson["prompt_eval_count"].as_i,
completion_tokens: respJson["eval_count"].as_i,
total_tokens: respJson["prompt_eval_count"].as_i + respJson["eval_count"].as_i,
),
)
puts " Done ".colorize.bright.on_green
context.send_resp gptResponse.to_json
rescue
puts " Fail ".colorize.bright.on_red
context
.put_status(HTTP::Status::BAD_REQUEST)
.halt
end
end
end
end
42 changes: 21 additions & 21 deletions src/mockgpt/cors.cr
Original file line number Diff line number Diff line change
@@ -1,21 +1,21 @@
class CrossOriginResourceSharing
include HTTP::Handler

def call(context : HTTP::Server::Context)
context.response.headers.add "Server", "Grip/v2"
context.response.headers.add "Access-Control-Allow-Origin", "*"
context.response.headers.add "Access-Control-Allow-Headers", "*"
context.response.headers.add "Access-Control-Allow-Credentials", "true"
context.response.headers.add "Content-Type", "text/event-stream; charset=utf-8"
context.response.headers.add "Cache-Control", "no-cache"
context.response.headers.add "X-Accel-Buffering", "no"

unless context.request.method.in? ["POST", "OPTIONS"]
context.response.headers.add "Access-Control-Allow-Methods", "POST"

return context.put_status(HTTP::Status::METHOD_NOT_ALLOWED)
end

call_next(context)
end
end
class CrossOriginResourceSharing
include HTTP::Handler

def call(context : HTTP::Server::Context)
context.response.headers.add "Server", "Grip/v2"
context.response.headers.add "Access-Control-Allow-Origin", "*"
context.response.headers.add "Access-Control-Allow-Headers", "*"
context.response.headers.add "Access-Control-Allow-Credentials", "true"
context.response.headers.add "Content-Type", "text/event-stream; charset=utf-8"
context.response.headers.add "Cache-Control", "no-cache"
context.response.headers.add "X-Accel-Buffering", "no"

unless context.request.method.in? ["POST", "OPTIONS"]
context.response.headers.add "Access-Control-Allow-Methods", "POST"

return context.put_status(HTTP::Status::METHOD_NOT_ALLOWED)
end

call_next(context)
end
end
32 changes: 16 additions & 16 deletions src/mockgpt/routes.cr
Original file line number Diff line number Diff line change
@@ -1,16 +1,16 @@
class Application < Grip::Application
def initialize(@host : String, @port : Int32)
super(environment: "production", serve_static: false)

router.insert(1, CrossOriginResourceSharing.new)

post "/v1/chat/completions", MockGPT, as: :ollama
options "/v1/chat/completions", MockGPT, as: :connect
end

getter host : String
getter port : Int32
getter reuse_port : Bool = true
getter fallthrough : Bool = true
getter directory_listing : Bool = false
end
class Application < Grip::Application
def initialize(@host : String, @port : Int32)
super(environment: "production", serve_static: false)

router.insert(1, CrossOriginResourceSharing.new)

post "/v1/chat/completions", MockGPT, as: :ollama
options "/v1/chat/completions", MockGPT, as: :connect
end

getter host : String
getter port : Int32
getter reuse_port : Bool = true
getter fallthrough : Bool = true
getter directory_listing : Bool = false
end
Loading

0 comments on commit 41ea575

Please sign in to comment.