198 lines
4.3 KiB
Go
198 lines
4.3 KiB
Go
package app
|
|
|
|
import (
|
|
"errors"
|
|
"io"
|
|
"lazykimi/internal/config"
|
|
"lazykimi/internal/ui/chatarea"
|
|
"lazykimi/internal/ui/keymaps"
|
|
"lazykimi/internal/ui/statusbar"
|
|
"lazykimi/pkg/api"
|
|
|
|
"github.com/charmbracelet/bubbles/v2/key"
|
|
"github.com/charmbracelet/bubbles/v2/textinput"
|
|
tea "github.com/charmbracelet/bubbletea/v2"
|
|
"github.com/charmbracelet/lipgloss/v2"
|
|
"github.com/sashabaranov/go-openai"
|
|
)
|
|
|
|
var _ tea.Model = (*Model)(nil)
|
|
|
|
type Model struct {
|
|
api *api.Client
|
|
|
|
keys keymaps.KeyMap
|
|
|
|
chatarea chatarea.Model
|
|
statusbar statusbar.Model
|
|
ti textinput.Model
|
|
|
|
markdownMode bool
|
|
currentStream *openai.ChatCompletionStream
|
|
|
|
ready bool
|
|
}
|
|
|
|
func NewApp(apiKey string) (*Model, error) {
|
|
// Load config
|
|
cfg, err := config.LoadConfig()
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
apiClient := api.NewClient(apiKey, cfg.Model)
|
|
|
|
// Initialize with default width/height - will be resized by WindowSizeMsg
|
|
chatarea := chatarea.New(20, 80)
|
|
statusbar := statusbar.New(20)
|
|
|
|
// Initialize text ti
|
|
ti := textinput.New()
|
|
ti.VirtualCursor = false
|
|
ti.Placeholder = "Send a message..."
|
|
ti.Prompt = "> "
|
|
ti.Focus()
|
|
|
|
// Use a more reasonable welcome message
|
|
chatarea.AddAssistantMessage("Welcome! I'm ready to chat with you.")
|
|
|
|
return &Model{
|
|
api: apiClient,
|
|
chatarea: chatarea,
|
|
statusbar: statusbar,
|
|
ti: ti,
|
|
keys: keymaps.Default,
|
|
}, nil
|
|
}
|
|
|
|
// Init implements tea.Model.
|
|
func (m *Model) Init() tea.Cmd {
|
|
return nil
|
|
}
|
|
|
|
// Update implements tea.Model.
|
|
func (m *Model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
|
|
var cmds []tea.Cmd
|
|
var cmd tea.Cmd
|
|
|
|
switch msg := msg.(type) {
|
|
case tea.WindowSizeMsg:
|
|
m.chatarea.Width = msg.Width
|
|
m.chatarea.Height = msg.Height - 3
|
|
m.ti.SetWidth(msg.Width - 4)
|
|
m.ti.Reset()
|
|
m.statusbar.SetWidth(msg.Width)
|
|
m.ready = true
|
|
|
|
case *openai.ChatCompletionStream:
|
|
m.currentStream = msg
|
|
cmds = append(cmds, m.receiveFromStream)
|
|
|
|
case openai.ChatCompletionStreamResponse:
|
|
content := msg.Choices[0].Delta.Content
|
|
m.chatarea.AppendCurrentResponse(content)
|
|
cmds = append(cmds, m.statusbar.Spinner.Tick)
|
|
cmds = append(cmds, m.receiveFromStream)
|
|
|
|
case tea.KeyMsg:
|
|
switch {
|
|
case key.Matches(msg, m.keys.Quit):
|
|
return m, tea.Quit
|
|
case key.Matches(msg, m.keys.ToggleMarkdown):
|
|
m.markdownMode = !m.markdownMode
|
|
m.statusbar.SetMarkdownMode(m.markdownMode)
|
|
m.chatarea.SetMarkdownMode(m.markdownMode)
|
|
return m, nil
|
|
case key.Matches(msg, m.keys.Clear):
|
|
m.chatarea.Clear()
|
|
m.statusbar.ClearFlashMessage()
|
|
cmds = append(cmds, statusbar.FlashSuccess("Chat cleared"))
|
|
|
|
case key.Matches(msg, m.keys.Submit):
|
|
input := m.ti.Value()
|
|
if input == "" {
|
|
cmds = append(cmds, statusbar.FlashError("empty message"))
|
|
break
|
|
}
|
|
m.ti.Reset()
|
|
m.ti.Blur()
|
|
m.chatarea.AddUserMessage(input)
|
|
m.statusbar.ClearFlashMessage()
|
|
m.statusbar.SetGenerating(true)
|
|
cmds = append(cmds,
|
|
m.statusbar.Spinner.Tick,
|
|
func() tea.Msg {
|
|
if len(m.chatarea.Messages) == 0 {
|
|
return errors.New("no messages to send")
|
|
}
|
|
stream, err := m.api.SendChatCompletion(m.chatarea.Messages)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
return stream
|
|
},
|
|
)
|
|
}
|
|
|
|
case error:
|
|
return m, m.handleError(msg)
|
|
}
|
|
|
|
if m.ti.Focused() {
|
|
m.ti, cmd = m.ti.Update(msg)
|
|
cmds = append(cmds, cmd)
|
|
}
|
|
|
|
m.chatarea, cmd = m.chatarea.Update(msg)
|
|
cmds = append(cmds, cmd)
|
|
|
|
m.statusbar, cmd = m.statusbar.Update(msg)
|
|
cmds = append(cmds, cmd)
|
|
|
|
return m, tea.Batch(cmds...)
|
|
}
|
|
|
|
// View implements tea.Model.
|
|
func (m *Model) View() string {
|
|
if !m.ready {
|
|
return ""
|
|
}
|
|
return lipgloss.JoinVertical(lipgloss.Center, m.chatarea.View(), m.ti.View(), m.statusbar.View())
|
|
}
|
|
|
|
func (a *Model) receiveFromStream() tea.Msg {
|
|
if a.currentStream == nil {
|
|
return errors.New("no active stream")
|
|
}
|
|
|
|
resp, err := a.api.GetNextResponse(a.currentStream)
|
|
|
|
if errors.Is(err, io.EOF) {
|
|
// Reset loading state
|
|
a.statusbar.SetGenerating(false)
|
|
// Normal end of stream
|
|
a.chatarea.CommitCurrentResponse()
|
|
a.api.CloseStream(a.currentStream)
|
|
a.currentStream = nil
|
|
a.statusbar.ClearFlashMessage()
|
|
a.ti.Focus()
|
|
return nil
|
|
}
|
|
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
return resp
|
|
}
|
|
|
|
// handleError handles error messages
|
|
func (a *Model) handleError(err error) tea.Cmd {
|
|
// Handle error
|
|
if a.currentStream != nil {
|
|
a.api.CloseStream(a.currentStream)
|
|
a.currentStream = nil
|
|
}
|
|
return statusbar.FlashError(err.Error())
|
|
}
|