forked from charmbracelet/mods
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmods.go
385 lines (356 loc) · 9.98 KB
/
mods.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
package main
import (
"bufio"
"context"
"errors"
"fmt"
"io"
"math"
"net/http"
"os"
"strings"
"time"
tea "github.com/charmbracelet/bubbletea"
"github.com/charmbracelet/lipgloss"
"github.com/mattn/go-isatty"
openai "github.com/sashabaranov/go-openai"
)
type state int
const (
startState state = iota
configLoadedState
completionState
errorState
)
// Mods is the Bubble Tea model that manages reading stdin and querying the
// OpenAI API.
type Mods struct {
Config Config
Output string
Input string
Styles styles
Error *modsError
state state
retries int
renderer *lipgloss.Renderer
anim tea.Model
width int
height int
}
func newMods(r *lipgloss.Renderer) *Mods {
return &Mods{
Styles: makeStyles(r),
state: startState,
renderer: r,
}
}
// completionInput is a tea.Msg that wraps the content read from stdin.
type completionInput struct{ content string }
// completionOutput a tea.Msg that wraps the content returned from openai.
type completionOutput struct{ content string }
// modsError is a wrapper around an error that adds additional context.
type modsError struct {
err error
reason string
}
func (m modsError) Error() string {
return m.err.Error()
}
// Init implements tea.Model.
func (m *Mods) Init() tea.Cmd {
return m.loadConfigCmd
}
// Update implements tea.Model.
func (m *Mods) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
switch msg := msg.(type) {
case Config:
m.Config = msg
m.state = configLoadedState
if m.Config.ShowHelp || m.Config.Version || m.Config.Settings {
return m, tea.Quit
}
m.anim = newAnim(m.Config.Fanciness, m.Config.StatusText, m.renderer, m.Styles)
return m, tea.Batch(readStdinCmd, m.anim.Init())
case completionInput:
if msg.content == "" && m.Config.Prefix == "" {
return m, tea.Quit
}
if msg.content != "" {
m.Input = msg.content
}
m.state = completionState
return m, m.startCompletionCmd(msg.content)
case completionOutput:
m.Output = msg.content
return m, tea.Quit
case modsError:
m.Error = &msg
m.state = errorState
return m, tea.Quit
case tea.WindowSizeMsg:
m.width, m.height = msg.Width, msg.Height
case tea.KeyMsg:
switch msg.String() {
case "q", "ctrl+c":
return m, tea.Quit
}
}
if m.state == configLoadedState || m.state == completionState {
var cmd tea.Cmd
m.anim, cmd = m.anim.Update(msg)
return m, cmd
}
return m, nil
}
// View implements tea.Model.
func (m *Mods) View() string {
//nolint:exhaustive
switch m.state {
case errorState:
return m.ErrorView()
case completionState:
if !m.Config.Quiet {
return m.anim.View()
}
}
return ""
}
// ErrorView renders the currently set modsError.
func (m Mods) ErrorView() string {
const maxWidth = 120
const horizontalPadding = 2
w := m.width - (horizontalPadding * 2)
if w > maxWidth {
w = maxWidth
}
s := m.renderer.NewStyle().Width(w).Padding(0, horizontalPadding)
return fmt.Sprintf(
"\n%s\n\n%s\n\n",
s.Render(m.Styles.ErrorHeader.String(), m.Error.reason),
s.Render(m.Styles.ErrorDetails.Render(m.Error.Error())),
)
}
// FormattedOutput returns the response from OpenAI with the user configured
// prefix and standard in settings.
func (m *Mods) FormattedOutput() string {
prefixFormat := "> %s\n\n---\n\n%s"
stdinFormat := "```\n%s```\n\n---\n\n%s"
out := m.Output
if m.Config.IncludePrompt != 0 && m.Input != "" {
if m.Config.IncludePrompt < 0 {
out = fmt.Sprintf(stdinFormat, m.Input, out)
} else {
scanner := bufio.NewScanner(strings.NewReader(m.Input))
i := 0
in := ""
for scanner.Scan() {
if i == m.Config.IncludePrompt {
break
}
in += (scanner.Text() + "\n")
i++
}
out = fmt.Sprintf(stdinFormat, in, out)
}
}
if m.Config.IncludePromptArgs || m.Config.IncludePrompt != 0 {
prefix := m.Config.Prefix
if m.Config.Format {
prefix = fmt.Sprintf("%s %s", prefix, m.Config.FormatText)
}
out = fmt.Sprintf(prefixFormat, prefix, out)
}
return out
}
func (m *Mods) retry(content string, err modsError) tea.Msg {
m.retries++
if m.retries >= m.Config.MaxRetries {
return err
}
wait := time.Millisecond * 100 * time.Duration(math.Pow(2, float64(m.retries))) //nolint:gomnd
time.Sleep(wait)
return completionInput{content}
}
func (m *Mods) loadConfigCmd() tea.Msg {
cfg, err := newConfig()
if err != nil {
var fpe flagParseError
switch {
case errors.As(err, &fpe):
me := modsError{}
me.reason = fmt.Sprintf("Missing flag: %s", m.Styles.InlineCode.Render(fpe.Flag()))
me.err = fmt.Errorf("Check out %s %s", m.Styles.InlineCode.Render("mods -h"), m.Styles.Comment.Render("for help."))
return me
default:
return modsError{err, "There was an error loading your config file."}
}
}
return cfg
}
func (m *Mods) startCompletionCmd(content string) tea.Cmd {
return func() tea.Msg {
var ok bool
var mod Model
var api API
var key string
var ccfg openai.ClientConfig
cfg := m.Config
mod, ok = cfg.Models[cfg.Model]
if !ok {
if cfg.API == "" {
return modsError{
reason: "Model " + m.Styles.InlineCode.Render(cfg.Model) + " is not in the settings file.",
err: fmt.Errorf("Please specify an API endpoint with %s or configure the model in the settings: %s", m.Styles.InlineCode.Render("--api"), m.Styles.InlineCode.Render("mods -s")),
}
}
mod.Name = cfg.Model
mod.API = cfg.API
mod.MaxChars = cfg.MaxInputChars
}
for _, a := range cfg.APIs {
if mod.API == a.Name {
api = a
break
}
}
if api.Name == "" {
eps := make([]string, 0)
for _, a := range cfg.APIs {
eps = append(eps, m.Styles.InlineCode.Render(a.Name))
}
return modsError{
reason: fmt.Sprintf("The API endpoint %s is not configured ", m.Styles.InlineCode.Render(cfg.API)),
err: fmt.Errorf("Your configured API endpoints are: %s", eps),
}
}
if api.APIKeyEnv != "" {
key = os.Getenv(api.APIKeyEnv)
}
switch mod.API {
case "openai":
if key == "" {
key = os.Getenv("OPENAI_API_KEY")
}
if key == "" {
return modsError{
reason: m.Styles.InlineCode.Render("OPENAI_API_KEY") + " environment variable is required.",
err: fmt.Errorf("You can grab one at %s", m.Styles.Link.Render("https://platform.openai.com/account/api-keys.")),
}
}
ccfg = openai.DefaultConfig(key)
if api.BaseURL != "" {
ccfg.BaseURL = api.BaseURL
}
case "azure", "azure-ad":
if key == "" {
key = os.Getenv("AZURE_OPENAI_KEY")
}
if key == "" {
return modsError{
reason: m.Styles.InlineCode.Render("AZURE_OPENAI_KEY") + " environment variable is required.",
err: fmt.Errorf("You can apply for one at %s", m.Styles.Link.Render("https://aka.ms/oai/access")),
}
}
ccfg = openai.DefaultAzureConfig(key, api.BaseURL)
if mod.API == "azure-ad" {
ccfg.APIType = openai.APITypeAzureAD
}
default:
ccfg = openai.DefaultConfig(key)
if api.BaseURL != "" {
ccfg.BaseURL = api.BaseURL
}
}
client := openai.NewClientWithConfig(ccfg)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
prefix := cfg.Prefix
if cfg.Format {
prefix = fmt.Sprintf("%s %s", prefix, cfg.FormatText)
}
if prefix != "" {
content = strings.TrimSpace(prefix + "\n\n" + content)
}
if !cfg.NoLimit {
if len(content) > mod.MaxChars {
content = content[:mod.MaxChars]
}
}
resp, err := client.CreateChatCompletion(
ctx,
openai.ChatCompletionRequest{
Model: mod.Name,
Temperature: noOmitFloat(cfg.Temperature),
TopP: noOmitFloat(cfg.TopP),
MaxTokens: cfg.MaxTokens,
Messages: []openai.ChatCompletionMessage{
{
Role: openai.ChatMessageRoleUser,
Content: content,
},
},
},
)
ae := &openai.APIError{}
if errors.As(err, &ae) {
switch ae.HTTPStatusCode {
case http.StatusNotFound:
if mod.Fallback != "" {
m.Config.Model = mod.Fallback
return m.retry(content, modsError{err: err, reason: "OpenAI API server error."})
}
return modsError{err: err, reason: fmt.Sprintf("Missing model '%s' for API '%s'", cfg.Model, cfg.API)}
case http.StatusBadRequest:
if ae.Code == "context_length_exceeded" {
pe := modsError{err: err, reason: "Maximum prompt size exceeded."}
if cfg.NoLimit {
return pe
}
return m.retry(content[:len(content)-10], pe)
}
// bad request (do not retry)
return modsError{err: err, reason: "OpenAI API request error."}
case http.StatusUnauthorized:
// invalid auth or key (do not retry)
return modsError{err: err, reason: "Invalid OpenAI API key."}
case http.StatusTooManyRequests:
// rate limiting or engine overload (wait and retry)
return m.retry(content, modsError{err: err, reason: "You’ve hit your OpenAI API rate limit."})
case http.StatusInternalServerError:
if mod.API == "openai" {
return m.retry(content, modsError{err: err, reason: "OpenAI API server error."})
}
return modsError{err: err, reason: fmt.Sprintf("Error loading model '%s' for API '%s'", mod.Name, mod.API)}
default:
return m.retry(content, modsError{err: err, reason: "Unknown API error."})
}
}
if err != nil {
return modsError{err: err, reason: fmt.Sprintf("There was a problem with the %s API request.", mod.API)}
}
return completionOutput{resp.Choices[0].Message.Content}
}
}
func readStdinCmd() tea.Msg {
if !isatty.IsTerminal(os.Stdin.Fd()) {
reader := bufio.NewReader(os.Stdin)
stdinBytes, err := io.ReadAll(reader)
if err != nil {
return modsError{err, "Unable to read stdin."}
}
return completionInput{string(stdinBytes)}
}
return completionInput{""}
}
// noOmitFloat converts a 0.0 value to a float usable by the OpenAI client
// library, which currently uses Float32 fields in the request struct with the
// omitempty tag. This means we need to use math.SmallestNonzeroFloat32 instead
// of 0.0 so it doesn't get stripped from the request and replaced server side
// with the default values.
// Issue: https://github.com/sashabaranov/go-openai/issues/9
func noOmitFloat(f float32) float32 {
if f == 0.0 {
return math.SmallestNonzeroFloat32
}
return f
}