Skip to content

Commit

Permalink
Fix param name
Browse files Browse the repository at this point in the history
  • Loading branch information
hupe1980 committed Jul 20, 2023
1 parent 70bd6f9 commit 86edc89
Show file tree
Hide file tree
Showing 12 changed files with 34 additions and 31 deletions.
2 changes: 1 addition & 1 deletion examples/llm_bash/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ import (

func main() {
openai, err := llm.NewOpenAI(os.Getenv("OPENAI_API_KEY"), func(o *llm.OpenAIOptions) {
o.Temperatur = 0.01
o.Temperature = 0.01
})
if err != nil {
log.Fatal(err)
Expand Down
2 changes: 1 addition & 1 deletion model/chatmodel/azure_openai.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ func NewAzureOpenAI(apiKey, baseURL string, optFns ...func(o *AzureOpenAIOptions
Verbose: golc.Verbose,
},
ModelName: openai.GPT3Dot5Turbo,
Temperatur: 1,
Temperature: 1,
TopP: 1,
PresencePenalty: 0,
FrequencyPenalty: 0,
Expand Down
11 changes: 6 additions & 5 deletions model/chatmodel/openai.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ type OpenAIOptions struct {
// Model name to use.
ModelName string
// Sampling temperature to use.
Temperatur float32
Temperature float32
// The maximum number of tokens to generate in the completion.
// -1 returns as many tokens as possible given the prompt and
//the models maximal context size.
Expand Down Expand Up @@ -87,7 +87,7 @@ func NewOpenAIFromClient(client OpenAIClient, optFns ...func(o *OpenAIOptions))
Verbose: golc.Verbose,
},
ModelName: openai.GPT3Dot5Turbo,
Temperatur: 1,
Temperature: 1,
TopP: 1,
PresencePenalty: 0,
FrequencyPenalty: 0,
Expand Down Expand Up @@ -152,9 +152,10 @@ func (cm *OpenAI) Generate(ctx context.Context, messages schema.ChatMessages, op
}

res, err := cm.client.CreateChatCompletion(ctx, openai.ChatCompletionRequest{
Model: cm.opts.ModelName,
Messages: openAIMessages,
Functions: functions,
Model: cm.opts.ModelName,
Temperature: cm.opts.Temperature,
Messages: openAIMessages,
Functions: functions,
})
if err != nil {
return nil, err
Expand Down
6 changes: 4 additions & 2 deletions model/chatmodel/openai_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,8 @@ func TestOpenAI_Generate(t *testing.T) {

// Define the expected arguments and response for the mock client
expectedRequest := openai.ChatCompletionRequest{
Model: openAI.opts.ModelName,
Model: openAI.opts.ModelName,
Temperature: 1,
Messages: []openai.ChatCompletionMessage{
{Role: "user", Content: "Hello"},
{Role: "assistant", Content: "Hi there"},
Expand Down Expand Up @@ -63,7 +64,8 @@ func TestOpenAI_Generate(t *testing.T) {

// Define the expected arguments and error for the mock client
expectedRequest := openai.ChatCompletionRequest{
Model: openAI.opts.ModelName,
Model: openAI.opts.ModelName,
Temperature: 1,
Messages: []openai.ChatCompletionMessage{
{Role: "user", Content: "Hello"},
{Role: "assistant", Content: "Hi there"},
Expand Down
6 changes: 3 additions & 3 deletions model/chatmodel/palm.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ type PalmOptions struct {
ModelName string `map:"model_name,omitempty"`

// Temperature is the sampling temperature to use during text generation.
Temperatur float32 `map:"temperatur,omitempty"`
Temperature float32 `map:"temperature,omitempty"`

// TopP is the total probability mass of tokens to consider at each step.
TopP float32 `map:"top_p,omitempty"`
Expand All @@ -51,7 +51,7 @@ func NewPalm(client PalmClient, optFns ...func(o *PalmOptions)) (*Palm, error) {
Verbose: golc.Verbose,
},
ModelName: "models/chat-bison-001",
Temperatur: 0.7,
Temperature: 0.7,
CandidateCount: 1,
}

Expand Down Expand Up @@ -87,7 +87,7 @@ func (l *Palm) Generate(ctx context.Context, prompt string, optFns ...func(o *sc
res, err := l.client.GenerateMessage(ctx, &generativelanguagepb.GenerateMessageRequest{
Prompt: &generativelanguagepb.MessagePrompt{},
Model: l.opts.ModelName,
Temperature: &l.opts.Temperatur,
Temperature: &l.opts.Temperature,
TopP: &l.opts.TopP,
TopK: &l.opts.TopK,
CandidateCount: &l.opts.CandidateCount,
Expand Down
4 changes: 2 additions & 2 deletions model/chatmodel/palm_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -73,15 +73,15 @@ func TestPalm(t *testing.T) {
t.Run("InvocationParams", func(t *testing.T) {
// Create a Palm instance
llm, err := NewPalm(&mockPalmClient{}, func(o *PalmOptions) {
o.Temperatur = 0.7
o.Temperature = 0.7
})
assert.NoError(t, err)

// Call the InvocationParams method
params := llm.InvocationParams()

// Assert the result
assert.Equal(t, float32(0.7), params["temperatur"])
assert.Equal(t, float32(0.7), params["temperature"])
})
}

Expand Down
8 changes: 4 additions & 4 deletions model/llm/cohere.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,8 +35,8 @@ type CohereOptions struct {
// MaxTokens denotes the number of tokens to predict per generation.
MaxTokens uint `map:"max_tokens"`

// Temperatur is a non-negative float that tunes the degree of randomness in generation.
Temperatur float64 `map:"temperature"`
// Temperature is a non-negative float that tunes the degree of randomness in generation.
Temperature float64 `map:"temperature"`

// K specifies the number of top most likely tokens to consider for generation at each step.
K int `map:"k"`
Expand Down Expand Up @@ -91,7 +91,7 @@ func NewCohereFromClient(client CohereClient, optFns ...func(o *CohereOptions))
Model: "medium",
NumGenerations: 1,
MaxTokens: 256,
Temperatur: 0.75,
Temperature: 0.75,
K: 0,
P: 1,
FrequencyPenalty: 0,
Expand Down Expand Up @@ -133,7 +133,7 @@ func (l *Cohere) Generate(ctx context.Context, prompt string, optFns ...func(o *
Model: l.opts.Model,
NumGenerations: l.opts.NumGenerations,
MaxTokens: l.opts.MaxTokens,
Temperature: l.opts.Temperatur,
Temperature: l.opts.Temperature,
K: l.opts.K,
P: l.opts.P,
PresencePenalty: l.opts.PresencePenalty,
Expand Down
6 changes: 3 additions & 3 deletions model/llm/openai.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ type OpenAIOptions struct {
// ModelName is the name of the OpenAI language model to use.
ModelName string `map:"model_name,omitempty"`
// Temperature is the sampling temperature to use during text generation.
Temperatur float32 `map:"temperatur,omitempty"`
Temperature float32 `map:"temperature,omitempty"`
// MaxTokens is the maximum number of tokens to generate in the completion.
MaxTokens int `map:"max_tokens,omitempty"`
// TopP is the total probability mass of tokens to consider at each step.
Expand Down Expand Up @@ -98,7 +98,7 @@ func NewOpenAIFromClient(client OpenAIClient, optFns ...func(o *OpenAIOptions))
Verbose: golc.Verbose,
},
ModelName: openai.GPT3TextDavinci002,
Temperatur: 0.7,
Temperature: 0.7,
MaxTokens: 256,
TopP: 1,
PresencePenalty: 0,
Expand Down Expand Up @@ -140,7 +140,7 @@ func (l *OpenAI) Generate(ctx context.Context, prompt string, optFns ...func(o *
completionRequest := openai.CompletionRequest{
Prompt: prompt,
Model: l.opts.ModelName,
Temperature: l.opts.Temperatur,
Temperature: l.opts.Temperature,
MaxTokens: l.opts.MaxTokens,
TopP: l.opts.TopP,
PresencePenalty: l.opts.PresencePenalty,
Expand Down
6 changes: 3 additions & 3 deletions model/llm/palm.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ type PalmOptions struct {
ModelName string `map:"model_name,omitempty"`

// Temperature is the sampling temperature to use during text generation.
Temperatur float32 `map:"temperatur,omitempty"`
Temperature float32 `map:"temperature,omitempty"`

// TopP is the total probability mass of tokens to consider at each step.
TopP float32 `map:"top_p,omitempty"`
Expand Down Expand Up @@ -54,7 +54,7 @@ func NewPalm(client PalmClient, optFns ...func(o *PalmOptions)) (*Palm, error) {
Verbose: golc.Verbose,
},
ModelName: "models/text-bison-001",
Temperatur: 0.7,
Temperature: 0.7,
CandidateCount: 1,
}

Expand Down Expand Up @@ -92,7 +92,7 @@ func (l *Palm) Generate(ctx context.Context, prompt string, optFns ...func(o *sc
Text: prompt,
},
Model: l.opts.ModelName,
Temperature: &l.opts.Temperatur,
Temperature: &l.opts.Temperature,
TopP: &l.opts.TopP,
TopK: &l.opts.TopK,
MaxOutputTokens: &l.opts.MaxOutputTokens,
Expand Down
4 changes: 2 additions & 2 deletions model/llm/palm_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ func TestPalm(t *testing.T) {
t.Run("InvocationParams", func(t *testing.T) {
// Create a Palm instance
llm, err := NewPalm(&mockPalmClient{}, func(o *PalmOptions) {
o.Temperatur = 0.7
o.Temperature = 0.7
o.MaxOutputTokens = 4711
})
assert.NoError(t, err)
Expand All @@ -81,7 +81,7 @@ func TestPalm(t *testing.T) {
params := llm.InvocationParams()

// Assert the result
assert.Equal(t, float32(0.7), params["temperatur"])
assert.Equal(t, float32(0.7), params["temperature"])
assert.Equal(t, int32(4711), params["max_output_tokens"])
})
}
Expand Down
6 changes: 3 additions & 3 deletions model/llm/vertexai.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ type VertexAIOptions struct {
schema.Tokenizer `map:"-"`

// Temperature is the sampling temperature to use during text generation.
Temperatur float32 `map:"temperatur"`
Temperature float32 `map:"temperature"`

// MaxOutputTokens determines the maximum amount of text output from one prompt.
MaxOutputTokens int `map:"max_output_tokens"`
Expand All @@ -56,7 +56,7 @@ func NewVertexAI(client VertexAIClient, endpoint string, optFns ...func(o *Verte
CallbackOptions: &schema.CallbackOptions{
Verbose: golc.Verbose,
},
Temperatur: 0.0,
Temperature: 0.0,
MaxOutputTokens: 128,
TopP: 0.95,
TopK: 40,
Expand Down Expand Up @@ -101,7 +101,7 @@ func (l *VertexAI) Generate(ctx context.Context, prompt string, optFns ...func(o
}

parameters, err := structpb.NewValue(map[string]any{
"temperature": l.opts.Temperatur,
"temperature": l.opts.Temperature,
"max_output_tokens": l.opts.MaxOutputTokens,
"top_p": l.opts.TopP,
"top_k": l.opts.TopK,
Expand Down
4 changes: 2 additions & 2 deletions model/llm/vertexai_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ func TestVertexAI_Generate(t *testing.T) {
t.Run("InvocationParams", func(t *testing.T) {
// Create a VertexAI instance
llm, err := NewVertexAI(&mockVertexAIClient{}, "dummy", func(o *VertexAIOptions) {
o.Temperatur = 0.7
o.Temperature = 0.7
o.MaxOutputTokens = 4711
})
assert.NoError(t, err)
Expand All @@ -85,7 +85,7 @@ func TestVertexAI_Generate(t *testing.T) {
params := llm.InvocationParams()

// Assert the result
assert.Equal(t, float32(0.7), params["temperatur"])
assert.Equal(t, float32(0.7), params["temperature"])
assert.Equal(t, 4711, params["max_output_tokens"])
})
}
Expand Down

0 comments on commit 86edc89

Please sign in to comment.