Initilaize

This commit is contained in:
NEO
2025-04-15 19:58:39 +08:00
commit 126b128db2
11 changed files with 1079 additions and 0 deletions

196
internal/ui/app/app.go Normal file
View File

@@ -0,0 +1,196 @@
package app
import (
"errors"
"io"
"lazykimi/internal/config"
"lazykimi/internal/ui/chatarea"
"lazykimi/internal/ui/keymaps"
"lazykimi/internal/ui/statusbar"
"lazykimi/pkg/api"
"github.com/charmbracelet/bubbles/v2/key"
"github.com/charmbracelet/bubbles/v2/textinput"
tea "github.com/charmbracelet/bubbletea/v2"
"github.com/charmbracelet/lipgloss/v2"
"github.com/sashabaranov/go-openai"
)
var _ tea.Model = (*Model)(nil)
type Model struct {
api *api.Client
keys keymaps.KeyMap
chatarea chatarea.Model
statusbar statusbar.Model
ti textinput.Model
markdownMode bool
currentStream *openai.ChatCompletionStream
ready bool
}
func NewApp(apiKey string) (*Model, error) {
// Load config
cfg, err := config.LoadConfig()
if err != nil {
return nil, err
}
apiClient := api.NewClient(apiKey, cfg.Model)
// Initialize with default width/height - will be resized by WindowSizeMsg
chatarea := chatarea.New(20, 80)
statusbar := statusbar.New(20)
// Initialize text ti
ti := textinput.New()
ti.Placeholder = "Send a message..."
ti.Prompt = "> "
ti.Focus()
// Use a more reasonable welcome message
chatarea.AddAssistantMessage("Welcome! I'm ready to chat with you.")
return &Model{
api: apiClient,
chatarea: chatarea,
statusbar: statusbar,
ti: ti,
keys: keymaps.Default,
}, nil
}
// Init implements tea.Model.
func (m *Model) Init() tea.Cmd {
return nil
}
// Update implements tea.Model.
func (m *Model) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
var cmds []tea.Cmd
var cmd tea.Cmd
switch msg := msg.(type) {
case tea.WindowSizeMsg:
m.chatarea.Width = msg.Width
m.chatarea.Height = msg.Height - 3
m.ti.SetWidth(msg.Width - 4)
m.ti.Reset()
m.statusbar.SetWidth(msg.Width)
m.ready = true
case *openai.ChatCompletionStream:
m.currentStream = msg
cmds = append(cmds, m.receiveFromStream)
case openai.ChatCompletionStreamResponse:
content := msg.Choices[0].Delta.Content
m.chatarea.AppendCurrentResponse(content)
cmds = append(cmds, m.statusbar.Spinner.Tick)
cmds = append(cmds, m.receiveFromStream)
case tea.KeyMsg:
switch {
case key.Matches(msg, m.keys.Quit):
return m, tea.Quit
case key.Matches(msg, m.keys.ToggleMarkdown):
m.markdownMode = !m.markdownMode
m.statusbar.SetMarkdownMode(m.markdownMode)
m.chatarea.SetMarkdownMode(m.markdownMode)
return m, nil
case key.Matches(msg, m.keys.Clear):
m.chatarea.Clear()
m.statusbar.ClearFlashMessage()
cmds = append(cmds, statusbar.FlashSuccess("Chat cleared"))
case key.Matches(msg, m.keys.Submit):
input := m.ti.Value()
if input == "" {
cmds = append(cmds, statusbar.FlashError("empty message"))
break
}
m.ti.Reset()
m.ti.Blur()
m.chatarea.AddUserMessage(input)
m.statusbar.ClearFlashMessage()
m.statusbar.SetGenerating(true)
cmds = append(cmds,
m.statusbar.Spinner.Tick,
func() tea.Msg {
if len(m.chatarea.Messages) == 0 {
return errors.New("no messages to send")
}
stream, err := m.api.SendChatCompletion(m.chatarea.Messages)
if err != nil {
return err
}
return stream
},
)
}
case error:
return m, m.handleError(msg)
}
if m.ti.Focused() {
m.ti, cmd = m.ti.Update(msg)
cmds = append(cmds, cmd)
}
m.chatarea, cmd = m.chatarea.Update(msg)
cmds = append(cmds, cmd)
m.statusbar, cmd = m.statusbar.Update(msg)
cmds = append(cmds, cmd)
return m, tea.Batch(cmds...)
}
// View implements tea.Model.
func (m *Model) View() string {
if !m.ready {
return ""
}
return lipgloss.JoinVertical(lipgloss.Center, m.chatarea.View(), m.ti.View(), m.statusbar.View())
}
func (a *Model) receiveFromStream() tea.Msg {
if a.currentStream == nil {
return errors.New("no active stream")
}
resp, err := a.api.GetNextResponse(a.currentStream)
if errors.Is(err, io.EOF) {
// Reset loading state
a.statusbar.SetGenerating(false)
// Normal end of stream
a.chatarea.CommitCurrentResponse()
a.api.CloseStream(a.currentStream)
a.currentStream = nil
a.statusbar.ClearFlashMessage()
a.ti.Focus()
return nil
}
if err != nil {
return err
}
return resp
}
// handleError handles error messages
func (a *Model) handleError(err error) tea.Cmd {
// Handle error
if a.currentStream != nil {
a.api.CloseStream(a.currentStream)
a.currentStream = nil
}
return statusbar.FlashError(err.Error())
}

View File

@@ -0,0 +1,201 @@
package chatarea
import (
"fmt"
"lazykimi/pkg/theme"
"strings"
"github.com/charmbracelet/bubbles/v2/viewport"
tea "github.com/charmbracelet/bubbletea/v2"
"github.com/charmbracelet/glamour"
"github.com/charmbracelet/lipgloss/v2"
"github.com/sashabaranov/go-openai"
"github.com/muesli/reflow/wordwrap"
)
type Style struct {
UserMessage lipgloss.Style
AssistantMessage lipgloss.Style
SystemMessage lipgloss.Style
Divider lipgloss.Style
}
type Model struct {
Width, Height int
systemPrompt string
Messages []openai.ChatCompletionMessage
streamingResponse string
markdownMode bool
vp viewport.Model
glmr *glamour.TermRenderer
style Style
ready bool
}
func (m *Model) SetMarkdownMode(markdown bool) {
m.markdownMode = markdown
}
func New(width, height int) Model {
vp := viewport.New()
vp.Style = lipgloss.NewStyle().
Border(lipgloss.RoundedBorder()).
BorderForeground(theme.Gray).
Padding(0, 0)
vp.MouseWheelEnabled = true
baseMessage := lipgloss.NewStyle().Padding(0, 2).Bold(true)
return Model{
vp: vp,
style: Style{
UserMessage: baseMessage.Foreground(theme.Yellow),
AssistantMessage: baseMessage.Foreground(theme.Blue),
SystemMessage: baseMessage.Foreground(theme.Green),
Divider: lipgloss.NewStyle().Foreground(theme.Gray),
},
}
}
func (m Model) Update(msg tea.Msg) (Model, tea.Cmd) {
var cmd tea.Cmd
switch msg := msg.(type) {
case tea.WindowSizeMsg:
m.Width = msg.Width
m.Height = msg.Height - 3
m.vp.SetWidth(m.Width)
m.vp.SetHeight(m.Height)
// // Initialize markdown renderer
// gr, err := glamour.NewTermRenderer(
// glamour.WithAutoStyle(),
// glamour.WithEmoji(),
// glamour.WithWordWrap(m.Width-6),
// )
// if err != nil {
// panic(err)
// }
// m.glmr = gr
m.ready = true
}
m.vp, cmd = m.vp.Update(msg)
return m, cmd
}
func (m Model) View() string {
if !m.ready {
return ""
}
var messages []string
divider := m.style.Divider.Render(strings.Repeat("─", m.Width-6))
// Render history messages
for _, msg := range m.Messages {
messages = append(messages, m.formatMessage(msg))
messages = append(messages, divider)
}
// Render current response if loading
if m.streamingResponse != "" {
messages = append(messages, m.formatMessage(openai.ChatCompletionMessage{
Role: openai.ChatMessageRoleAssistant,
Content: m.streamingResponse,
}))
}
// Set content with proper width
m.vp.SetContent(strings.Join(messages, "\n"))
// // Auto-scroll to bottom if we weren't already at the bottom
// // before this update
if !m.vp.AtBottom() {
m.vp.GotoBottom()
}
return m.vp.View()
}
// AddUserMessage adds a user message to the chat
func (c *Model) AddUserMessage(content string) {
c.Messages = append(c.Messages, openai.ChatCompletionMessage{
Role: openai.ChatMessageRoleUser,
Content: content,
})
}
// AddAssistantMessage adds an assistant message to the chat
func (c *Model) AddAssistantMessage(content string) {
c.Messages = append(c.Messages, openai.ChatCompletionMessage{
Role: openai.ChatMessageRoleAssistant,
Content: content,
})
}
// AppendCurrentResponse updates the current streaming response
func (c *Model) AppendCurrentResponse(content string) {
c.streamingResponse += content
}
// CommitCurrentResponse adds the current response as a message and clears it
func (c *Model) CommitCurrentResponse() {
if c.streamingResponse != "" {
c.AddAssistantMessage(c.streamingResponse)
c.streamingResponse = ""
}
}
// Clear clears all messages except the system prompt
func (c *Model) Clear() {
c.Messages = []openai.ChatCompletionMessage{}
c.streamingResponse = ""
// Re-add system message if set
if c.systemPrompt != "" {
c.Messages = append(c.Messages, openai.ChatCompletionMessage{
Role: openai.ChatMessageRoleSystem,
Content: c.systemPrompt,
})
}
}
// SetSystemPrompt sets a new system prompt and updates the first message if needed
func (c *Model) SetSystemPrompt(prompt string) {
c.systemPrompt = prompt
c.Clear() // This will add the new system prompt
}
func (m *Model) formatMessage(msg openai.ChatCompletionMessage) string {
var (
indicator string
style lipgloss.Style
useMd bool
)
switch msg.Role {
case openai.ChatMessageRoleUser:
indicator = "You"
style = m.style.UserMessage
case openai.ChatMessageRoleAssistant:
indicator = "AI"
style = m.style.AssistantMessage
useMd = m.markdownMode
case openai.ChatMessageRoleSystem:
indicator = "System"
style = m.style.SystemMessage
useMd = m.markdownMode
}
content := strings.TrimSpace(msg.Content)
content = wordwrap.String(content, m.Width-3)
if useMd {
renderedContent, _ := m.glmr.Render(content)
return fmt.Sprintf("%s\n%s", indicator, renderedContent)
}
return fmt.Sprintf("%s\n%s", indicator, style.Render(content))
}

View File

@@ -0,0 +1,74 @@
package keymaps
import "github.com/charmbracelet/bubbles/v2/key"
// KeyMap defines all keyboard shortcuts
type KeyMap struct {
Quit key.Binding
Help key.Binding
ToggleMarkdown key.Binding
Submit key.Binding
Clear key.Binding
ScrollUp key.Binding
ScrollDown key.Binding
PageUp key.Binding
PageDown key.Binding
}
// ShortHelp returns a short help message
func (k KeyMap) ShortHelp() []key.Binding {
return []key.Binding{k.Help, k.Quit}
}
// FullHelp returns complete help information
func (k KeyMap) FullHelp() [][]key.Binding {
return [][]key.Binding{
{k.Help},
{k.Quit},
{k.ToggleMarkdown},
{k.Submit},
{k.Clear},
{k.ScrollUp, k.ScrollDown},
{k.PageUp, k.PageDown},
}
}
// Default returns the default key bindings
var Default = KeyMap{
Quit: key.NewBinding(
key.WithKeys("ctrl+c"),
key.WithHelp("ctrl+c", "quit"),
),
Help: key.NewBinding(
key.WithKeys("ctrl+h"),
key.WithHelp("ctrl+h", "help"),
),
ToggleMarkdown: key.NewBinding(
key.WithKeys("tab"),
key.WithHelp("tab", "toggle view"),
),
Submit: key.NewBinding(
key.WithKeys("enter"),
key.WithHelp("enter", "send"),
),
Clear: key.NewBinding(
key.WithKeys("ctrl+l"),
key.WithHelp("ctrl+l", "clear"),
),
ScrollUp: key.NewBinding(
key.WithKeys("up", "k"),
key.WithHelp("↑/k", "scroll up"),
),
ScrollDown: key.NewBinding(
key.WithKeys("down", "j"),
key.WithHelp("↓/j", "scroll down"),
),
PageUp: key.NewBinding(
key.WithKeys("pgup"),
key.WithHelp("PgUp", "page up"),
),
PageDown: key.NewBinding(
key.WithKeys("pgdown"),
key.WithHelp("PgDn", "page down"),
),
}

View File

@@ -0,0 +1,255 @@
package statusbar
import (
"fmt"
"lazykimi/pkg/theme"
"strings"
"time"
"github.com/charmbracelet/bubbles/v2/spinner"
tea "github.com/charmbracelet/bubbletea/v2"
"github.com/charmbracelet/lipgloss/v2"
)
type FlashType string
const (
FlashTypeError FlashType = "error"
FlashTypeSuccess FlashType = "success"
)
func FlashSuccess(msg string) tea.Cmd {
return func() tea.Msg {
return EventFlashSuccess(msg)
}
}
func FlashError(msg string) tea.Cmd {
return func() tea.Msg {
return EventFlashError(msg)
}
}
type (
EventFlashSuccess string
EventFlashError string
)
// Model represents the application status bar
type Model struct {
// Layout
width int
styles Style
// Left sections
messagesCount int
modelName string
markdownMode bool
// Right sections
Spinner spinner.Model
isGenerating bool
startTime time.Time
flashType FlashType
flashMessage string
}
// Style represents the styling for the status bar
type Style struct {
base lipgloss.Style
divider lipgloss.Style
message lipgloss.Style
model lipgloss.Style
mode lipgloss.Style
flash map[FlashType]lipgloss.Style
bar lipgloss.Style
}
// New creates a new status bar component
func New(width int) Model {
s := spinner.New()
s.Spinner = spinner.Moon
// Initialize base styles
baseStyle := lipgloss.NewStyle().Bold(true).Padding(0, 1)
return Model{
width: width,
Spinner: s,
styles: Style{
base: baseStyle,
divider: lipgloss.NewStyle().
Foreground(lipgloss.Color("#666666")).
Padding(0, 1),
message: baseStyle.Foreground(theme.Yellow),
model: baseStyle.Foreground(theme.Blue),
mode: baseStyle.Foreground(theme.Green),
flash: map[FlashType]lipgloss.Style{
FlashTypeError: baseStyle.Foreground(theme.Red),
FlashTypeSuccess: baseStyle.Foreground(theme.Green),
},
bar: lipgloss.NewStyle().
BorderStyle(lipgloss.NormalBorder()).
BorderForeground(theme.Gray).
BorderTop(true).
Width(width),
},
}
}
// SetWidth updates the width of the status bar
func (s *Model) SetWidth(width int) {
s.width = width
}
// SetLoading sets loading state
func (s *Model) SetGenerating(loading bool) {
s.isGenerating = loading
if loading {
s.startTime = time.Now()
}
}
func (s *Model) ClearFlashMessage() {
s.flashType = ""
s.flashMessage = ""
}
// SetError sets an error message
func (s *Model) setFlashMessage(t FlashType, msg string) {
s.flashType = t
s.flashMessage = msg
}
// SetMessageCount sets the message count
func (s *Model) SetMessageCount(count int) {
s.messagesCount = count
}
// SetModelName sets the model name
func (s *Model) SetModelName(name string) {
s.modelName = name
}
// SetMarkdownMode sets the markdown mode
func (s *Model) SetMarkdownMode(enable bool) {
s.markdownMode = enable
}
// Init implements tea.Model
func (s *Model) Init() tea.Cmd {
return s.Spinner.Tick
}
// Update implements tea.Model
func (s Model) Update(msg tea.Msg) (Model, tea.Cmd) {
var cmd tea.Cmd
var cmds []tea.Cmd
switch msg := msg.(type) {
case tea.WindowSizeMsg:
s.SetWidth(msg.Width)
case spinner.TickMsg:
s.Spinner, cmd = s.Spinner.Update(msg)
cmds = append(cmds, cmd)
case EventFlashError:
s.setFlashMessage(FlashTypeError, string(msg))
case EventFlashSuccess:
s.setFlashMessage(FlashTypeSuccess, string(msg))
}
return s, tea.Batch(cmds...)
}
// View implements tea.Model
func (s *Model) View() string {
// Render left and right sections
leftBar := s.joinSections(s.getLeftSections())
rightBar := s.joinSections(s.getRightSections())
// Calculate and handle available space
availableWidth := s.width
leftWidth := lipgloss.Width(leftBar)
rightWidth := lipgloss.Width(rightBar)
spacerWidth := availableWidth - leftWidth - rightWidth
var statusBar string
// Handle different space scenarios
switch {
case spacerWidth >= 0:
// Enough space for everything
spacer := strings.Repeat(" ", spacerWidth)
statusBar = fmt.Sprintf("%s%s%s", leftBar, spacer, rightBar)
case availableWidth >= leftWidth:
// Only show left bar
statusBar = leftBar
default:
// Not enough space - truncate left bar
statusBar = lipgloss.NewStyle().
MaxWidth(availableWidth).
Render(leftBar)
}
// Apply the final bar styling
return s.styles.bar.Width(s.width).Render(statusBar)
}
// getLeftSections returns the rendered left sections of the status bar
func (s *Model) getLeftSections() []string {
mode := "PLAIN"
if s.markdownMode {
mode = "MARKDOWN"
}
return []string{
s.styles.message.Render(fmt.Sprintf("💬 %d", s.messagesCount)),
s.styles.model.Render(fmt.Sprintf("🤖 %s", s.modelName)),
s.styles.mode.Render(mode),
}
}
// getRightSections returns the rendered right sections of the status bar
func (s *Model) getRightSections() []string {
var sections []string
// Generation status
if s.isGenerating {
duration := time.Since(s.startTime).Round(time.Millisecond)
sections = append(sections,
s.styles.flash[FlashTypeSuccess].Render(
fmt.Sprintf("⚡ %s %.1fs", s.Spinner.View(), duration.Seconds()),
),
)
}
// Flash message
if s.flashMessage != "" {
var icon string
switch s.flashType {
case FlashTypeError:
icon = "×"
case FlashTypeSuccess:
icon = "✓"
}
sections = append(sections,
s.styles.flash[s.flashType].Render(
fmt.Sprintf("%s %s", icon, s.flashMessage),
),
)
}
return sections
}
// joinSections joins sections with a dot separator
func (s *Model) joinSections(sections []string) string {
if len(sections) == 0 {
return ""
}
separator := s.styles.divider.SetString("┃").String()
result := sections[0]
for _, section := range sections[1:] {
result = lipgloss.JoinHorizontal(lipgloss.Center, result, separator, section)
}
return result
}