init: initial commit

This commit is contained in:
Blizzard
2026-04-07 17:35:09 +08:00
commit 680ecc320f
129 changed files with 10562 additions and 0 deletions
+239
View File
@@ -0,0 +1,239 @@
package chat
import (
"context"
"fmt"
"log/slog"
"strings"
"engimind/internal/config"
"engimind/internal/project"
"engimind/internal/vector"
"github.com/wailsapp/wails/v3/pkg/application"
)
// ChatService handles chat interactions and chapter generation (A+C→B).
type ChatService struct {
llm *LLMClient
rag *vector.RAGService
configSvc *config.ConfigService
projectSvc *project.ProjectService
}
// NewChatService creates a chat service.
func NewChatService(
configSvc *config.ConfigService,
projectSvc *project.ProjectService,
rag *vector.RAGService,
) *ChatService {
return &ChatService{
llm: NewLLMClient(),
rag: rag,
configSvc: configSvc,
projectSvc: projectSvc,
}
}
// SendMessage handles a user chat message with RAG context.
func (s *ChatService) SendMessage(content string, selectedFileIDs []string, modelID string) (string, error) {
providers, _ := s.configSvc.GetAllProviders()
var provider *struct{ URL, Key, Model, ProviderType string }
for _, p := range providers {
if p.ID == modelID && p.Enabled {
provider = &struct{ URL, Key, Model, ProviderType string }{p.BaseURL, p.APIKey, p.ModelID, p.Provider}
break
}
}
if provider == nil {
return "", fmt.Errorf("model %s not found or disabled", modelID)
}
// RAG: search context
projectID := s.projectSvc.GetCurrentProjectID()
var contextText string
if projectID != "" && len(selectedFileIDs) > 0 {
embCfg := s.getEmbeddingConfig()
chunks, err := s.rag.SearchContext(context.Background(), projectID, content, 5, embCfg)
if err != nil {
slog.Warn("RAG search failed, proceeding without context", "err", err)
} else {
var parts []string
for _, c := range chunks {
parts = append(parts, c.Text)
}
contextText = strings.Join(parts, "\n\n---\n\n")
}
}
messages := []Message{
{Role: "system", Content: "你是一位专业的工程技术助手。基于提供的工程素材回答问题,引用来源时使用 [N] 标注。"},
}
if contextText != "" {
messages = append(messages, Message{
Role: "user",
Content: fmt.Sprintf("参考以下工程素材:\n\n%s\n\n---\n\n用户提问:%s", contextText, content),
})
} else {
messages = append(messages, Message{Role: "user", Content: content})
}
resp, err := s.llm.Complete(provider.URL, provider.Key, provider.Model, messages)
if err != nil {
return "", err
}
if len(resp.Choices) == 0 {
return "", fmt.Errorf("no response from model")
}
return resp.Choices[0].Message.Content, nil
}
// StreamMessage handles a user chat message and streams the response via Wails events.
func (s *ChatService) StreamMessage(content string, selectedFileIDs []string, modelID string, messageID string) (string, error) {
providers, _ := s.configSvc.GetAllProviders()
var provider *struct{ URL, Key, Model, ProviderType string }
for _, p := range providers {
if p.ID == modelID && p.Enabled {
provider = &struct{ URL, Key, Model, ProviderType string }{p.BaseURL, p.APIKey, p.ModelID, p.Provider}
break
}
}
if provider == nil {
return "", fmt.Errorf("model %s not found or disabled", modelID)
}
projectID := s.projectSvc.GetCurrentProjectID()
var contextText string
if projectID != "" && len(selectedFileIDs) > 0 {
embCfg := s.getEmbeddingConfig()
chunks, err := s.rag.SearchContext(context.Background(), projectID, content, 5, embCfg)
if err == nil {
var parts []string
for _, c := range chunks {
parts = append(parts, c.Text)
}
contextText = strings.Join(parts, "\n\n---\n\n")
}
}
messages := []Message{
{Role: "system", Content: "你是一位专业的工程技术助手。基于提供的工程素材回答问题,引用来源时使用 [N] 标注。"},
}
if contextText != "" {
messages = append(messages, Message{
Role: "user",
Content: fmt.Sprintf("参考以下工程素材:\n\n%s\n\n---\n\n用户提问:%s", contextText, content),
})
} else {
messages = append(messages, Message{Role: "user", Content: content})
}
var fullText string
err := s.llm.StreamComplete(provider.URL, provider.Key, provider.Model, messages, func(chunk string) {
fullText += chunk
application.Get().Event.Emit("chat_stream_"+messageID, fullText)
})
return fullText, err
}
// GenerateChapter implements the A+C→B logic for a single chapter.
func (s *ChatService) GenerateChapter(chapterTitle string, selectedFileIDs []string, modelID string) (string, error) {
providers, _ := s.configSvc.GetAllProviders()
var provider *struct{ URL, Key, Model string }
for _, p := range providers {
if p.ID == modelID && p.Enabled {
provider = &struct{ URL, Key, Model string }{p.BaseURL, p.APIKey, p.ModelID}
break
}
}
if provider == nil {
return "", fmt.Errorf("model %s not found or disabled", modelID)
}
// RAG: search context for chapter topic
projectID := s.projectSvc.GetCurrentProjectID()
embCfg := s.getEmbeddingConfig()
chunks, err := s.rag.SearchContext(context.Background(), projectID, chapterTitle, 8, embCfg)
if err != nil {
slog.Warn("RAG search failed for chapter generation", "err", err)
}
var contextParts []string
for _, c := range chunks {
contextParts = append(contextParts, c.Text)
}
contextText := strings.Join(contextParts, "\n\n---\n\n")
prompt := fmt.Sprintf(
"你是工程报告撰写专家。请根据以下工程素材,按照章节要求撰写报告内容。\n\n"+
"## 章节要求\n%s\n\n"+
"## 参考素材\n%s\n\n"+
"## 输出要求\n"+
"1. 使用 Markdown 格式\n"+
"2. 引用素材时使用 [N] 标注\n"+
"3. 内容专业、结构清晰\n"+
"4. 包含具体数据和分析结论",
chapterTitle, contextText,
)
messages := []Message{
{Role: "system", Content: "你是一位资深工程报告撰写专家,擅长根据工程素材生成结构化的技术报告章节。"},
{Role: "user", Content: prompt},
}
resp, err := s.llm.Complete(provider.URL, provider.Key, provider.Model, messages)
if err != nil {
return "", err
}
if len(resp.Choices) == 0 {
return "", fmt.Errorf("no response from model")
}
return resp.Choices[0].Message.Content, nil
}
// StreamTemplateDirectory uses the active LLM to extract a structured chapter outline from delivery standard text, streaming to frontend.
func (s *ChatService) StreamTemplateDirectory(content string, modelID string, messageID string) (string, error) {
providers, _ := s.configSvc.GetAllProviders()
var provider *struct{ URL, Key, Model, ProviderType string }
for _, p := range providers {
if p.ID == modelID && p.Enabled {
provider = &struct{ URL, Key, Model, ProviderType string }{p.BaseURL, p.APIKey, p.ModelID, p.Provider}
break
}
}
if provider == nil {
return "", fmt.Errorf("model %s not found or disabled", modelID)
}
prompt := fmt.Sprintf(
"你是一个工程标准的目录解析助手。请从下面提供的交付标准文本中提取工程的主干章节,并以 JSON 数组的格式返回。\n\n"+
"### 要求:\n"+
"1. 只返回 JSON 数组,不包含其他废话或者回答前缀。\n"+
"2. 输出格式必须严格符合:[{ \"id\": \"chapter-1\", \"title\": \"1. 原材料进场检验\", \"content\": \"...如果标准里有简要描述可附上\" }]\n\n"+
"### 交付标准内容:\n%s",
content,
)
messages := []Message{
{Role: "system", Content: "你是一个专业的结构化数据抽取工具。你只输出合法的 JSON,不要使用 Markdown 代码块包裹,也不要给出任何其他解释。"},
{Role: "user", Content: prompt},
}
var fullText string
err := s.llm.StreamComplete(provider.URL, provider.Key, provider.Model, messages, func(chunk string) {
fullText += chunk
application.Get().Event.Emit("chat_stream_"+messageID, fullText)
})
return fullText, err
}
func (s *ChatService) getEmbeddingConfig() vector.EmbeddingConfig {
// Use bge-m3 via Ollama as default embedding model
return vector.EmbeddingConfig{
BaseURL: "http://localhost:11434",
Model: "bge-m3",
APIKey: "",
Provider: "Ollama",
}
}
+133
View File
@@ -0,0 +1,133 @@
package chat
import (
"bufio"
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
"strings"
"time"
)
// LLMClient provides a unified interface for OpenAI-compatible LLM APIs.
type LLMClient struct {
client *http.Client
}
// NewLLMClient creates a new LLM client.
func NewLLMClient() *LLMClient {
return &LLMClient{
client: &http.Client{Timeout: 120 * time.Second},
}
}
// Message represents a chat message in OpenAI format.
type Message struct {
Role string `json:"role"`
Content string `json:"content"`
}
// ChatRequest is the OpenAI chat completion request.
type ChatRequest struct {
Model string `json:"model"`
Messages []Message `json:"messages"`
Stream bool `json:"stream"`
Temperature float64 `json:"temperature,omitempty"`
MaxTokens int `json:"max_tokens,omitempty"`
}
// ChatResponse is a non-streaming response.
type ChatResponse struct {
Choices []struct {
Message Message `json:"message"`
} `json:"choices"`
Usage struct {
PromptTokens int `json:"prompt_tokens"`
CompletionTokens int `json:"completion_tokens"`
} `json:"usage"`
}
// StreamDelta from SSE streaming.
type streamChunk struct {
Choices []struct {
Delta struct {
Content string `json:"content"`
} `json:"delta"`
FinishReason *string `json:"finish_reason"`
} `json:"choices"`
}
// Complete sends a non-streaming chat request.
func (c *LLMClient) Complete(baseURL, apiKey, model string, messages []Message) (*ChatResponse, error) {
reqBody := ChatRequest{
Model: model,
Messages: messages,
Stream: false,
}
body, _ := json.Marshal(reqBody)
req, _ := http.NewRequest("POST", baseURL+"/v1/chat/completions", bytes.NewReader(body))
req.Header.Set("Content-Type", "application/json")
if apiKey != "" {
req.Header.Set("Authorization", "Bearer "+apiKey)
}
resp, err := c.client.Do(req)
if err != nil {
return nil, fmt.Errorf("llm request: %w", err)
}
defer resp.Body.Close()
data, _ := io.ReadAll(resp.Body)
var result ChatResponse
if err := json.Unmarshal(data, &result); err != nil {
return nil, fmt.Errorf("parse llm response: %w", err)
}
return &result, nil
}
// StreamComplete sends a streaming chat request, calling onChunk for each token.
func (c *LLMClient) StreamComplete(baseURL, apiKey, model string, messages []Message, onChunk func(string)) error {
reqBody := ChatRequest{
Model: model,
Messages: messages,
Stream: true,
}
body, _ := json.Marshal(reqBody)
req, _ := http.NewRequest("POST", baseURL+"/v1/chat/completions", bytes.NewReader(body))
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Accept", "text/event-stream")
if apiKey != "" {
req.Header.Set("Authorization", "Bearer "+apiKey)
}
resp, err := c.client.Do(req)
if err != nil {
return fmt.Errorf("llm stream request: %w", err)
}
defer resp.Body.Close()
scanner := bufio.NewScanner(resp.Body)
for scanner.Scan() {
line := scanner.Text()
if !strings.HasPrefix(line, "data: ") {
continue
}
data := strings.TrimPrefix(line, "data: ")
if data == "[DONE]" {
break
}
var chunk streamChunk
if err := json.Unmarshal([]byte(data), &chunk); err != nil {
continue
}
if len(chunk.Choices) > 0 && chunk.Choices[0].Delta.Content != "" {
onChunk(chunk.Choices[0].Delta.Content)
}
}
return scanner.Err()
}