init: initial commit

This commit is contained in:
Blizzard
2026-04-07 17:35:09 +08:00
commit 680ecc320f
129 changed files with 10562 additions and 0 deletions
+239
View File
@@ -0,0 +1,239 @@
package chat
import (
"context"
"fmt"
"log/slog"
"strings"
"engimind/internal/config"
"engimind/internal/project"
"engimind/internal/vector"
"github.com/wailsapp/wails/v3/pkg/application"
)
// ChatService handles chat interactions and chapter generation (A+C→B).
type ChatService struct {
llm *LLMClient
rag *vector.RAGService
configSvc *config.ConfigService
projectSvc *project.ProjectService
}
// NewChatService creates a chat service.
func NewChatService(
configSvc *config.ConfigService,
projectSvc *project.ProjectService,
rag *vector.RAGService,
) *ChatService {
return &ChatService{
llm: NewLLMClient(),
rag: rag,
configSvc: configSvc,
projectSvc: projectSvc,
}
}
// SendMessage handles a user chat message with RAG context.
func (s *ChatService) SendMessage(content string, selectedFileIDs []string, modelID string) (string, error) {
providers, _ := s.configSvc.GetAllProviders()
var provider *struct{ URL, Key, Model, ProviderType string }
for _, p := range providers {
if p.ID == modelID && p.Enabled {
provider = &struct{ URL, Key, Model, ProviderType string }{p.BaseURL, p.APIKey, p.ModelID, p.Provider}
break
}
}
if provider == nil {
return "", fmt.Errorf("model %s not found or disabled", modelID)
}
// RAG: search context
projectID := s.projectSvc.GetCurrentProjectID()
var contextText string
if projectID != "" && len(selectedFileIDs) > 0 {
embCfg := s.getEmbeddingConfig()
chunks, err := s.rag.SearchContext(context.Background(), projectID, content, 5, embCfg)
if err != nil {
slog.Warn("RAG search failed, proceeding without context", "err", err)
} else {
var parts []string
for _, c := range chunks {
parts = append(parts, c.Text)
}
contextText = strings.Join(parts, "\n\n---\n\n")
}
}
messages := []Message{
{Role: "system", Content: "你是一位专业的工程技术助手。基于提供的工程素材回答问题,引用来源时使用 [N] 标注。"},
}
if contextText != "" {
messages = append(messages, Message{
Role: "user",
Content: fmt.Sprintf("参考以下工程素材:\n\n%s\n\n---\n\n用户提问:%s", contextText, content),
})
} else {
messages = append(messages, Message{Role: "user", Content: content})
}
resp, err := s.llm.Complete(provider.URL, provider.Key, provider.Model, messages)
if err != nil {
return "", err
}
if len(resp.Choices) == 0 {
return "", fmt.Errorf("no response from model")
}
return resp.Choices[0].Message.Content, nil
}
// StreamMessage handles a user chat message and streams the response via Wails events.
func (s *ChatService) StreamMessage(content string, selectedFileIDs []string, modelID string, messageID string) (string, error) {
providers, _ := s.configSvc.GetAllProviders()
var provider *struct{ URL, Key, Model, ProviderType string }
for _, p := range providers {
if p.ID == modelID && p.Enabled {
provider = &struct{ URL, Key, Model, ProviderType string }{p.BaseURL, p.APIKey, p.ModelID, p.Provider}
break
}
}
if provider == nil {
return "", fmt.Errorf("model %s not found or disabled", modelID)
}
projectID := s.projectSvc.GetCurrentProjectID()
var contextText string
if projectID != "" && len(selectedFileIDs) > 0 {
embCfg := s.getEmbeddingConfig()
chunks, err := s.rag.SearchContext(context.Background(), projectID, content, 5, embCfg)
if err == nil {
var parts []string
for _, c := range chunks {
parts = append(parts, c.Text)
}
contextText = strings.Join(parts, "\n\n---\n\n")
}
}
messages := []Message{
{Role: "system", Content: "你是一位专业的工程技术助手。基于提供的工程素材回答问题,引用来源时使用 [N] 标注。"},
}
if contextText != "" {
messages = append(messages, Message{
Role: "user",
Content: fmt.Sprintf("参考以下工程素材:\n\n%s\n\n---\n\n用户提问:%s", contextText, content),
})
} else {
messages = append(messages, Message{Role: "user", Content: content})
}
var fullText string
err := s.llm.StreamComplete(provider.URL, provider.Key, provider.Model, messages, func(chunk string) {
fullText += chunk
application.Get().Event.Emit("chat_stream_"+messageID, fullText)
})
return fullText, err
}
// GenerateChapter implements the A+C→B logic for a single chapter.
func (s *ChatService) GenerateChapter(chapterTitle string, selectedFileIDs []string, modelID string) (string, error) {
providers, _ := s.configSvc.GetAllProviders()
var provider *struct{ URL, Key, Model string }
for _, p := range providers {
if p.ID == modelID && p.Enabled {
provider = &struct{ URL, Key, Model string }{p.BaseURL, p.APIKey, p.ModelID}
break
}
}
if provider == nil {
return "", fmt.Errorf("model %s not found or disabled", modelID)
}
// RAG: search context for chapter topic
projectID := s.projectSvc.GetCurrentProjectID()
embCfg := s.getEmbeddingConfig()
chunks, err := s.rag.SearchContext(context.Background(), projectID, chapterTitle, 8, embCfg)
if err != nil {
slog.Warn("RAG search failed for chapter generation", "err", err)
}
var contextParts []string
for _, c := range chunks {
contextParts = append(contextParts, c.Text)
}
contextText := strings.Join(contextParts, "\n\n---\n\n")
prompt := fmt.Sprintf(
"你是工程报告撰写专家。请根据以下工程素材,按照章节要求撰写报告内容。\n\n"+
"## 章节要求\n%s\n\n"+
"## 参考素材\n%s\n\n"+
"## 输出要求\n"+
"1. 使用 Markdown 格式\n"+
"2. 引用素材时使用 [N] 标注\n"+
"3. 内容专业、结构清晰\n"+
"4. 包含具体数据和分析结论",
chapterTitle, contextText,
)
messages := []Message{
{Role: "system", Content: "你是一位资深工程报告撰写专家,擅长根据工程素材生成结构化的技术报告章节。"},
{Role: "user", Content: prompt},
}
resp, err := s.llm.Complete(provider.URL, provider.Key, provider.Model, messages)
if err != nil {
return "", err
}
if len(resp.Choices) == 0 {
return "", fmt.Errorf("no response from model")
}
return resp.Choices[0].Message.Content, nil
}
// StreamTemplateDirectory uses the active LLM to extract a structured chapter outline from delivery standard text, streaming to frontend.
func (s *ChatService) StreamTemplateDirectory(content string, modelID string, messageID string) (string, error) {
providers, _ := s.configSvc.GetAllProviders()
var provider *struct{ URL, Key, Model, ProviderType string }
for _, p := range providers {
if p.ID == modelID && p.Enabled {
provider = &struct{ URL, Key, Model, ProviderType string }{p.BaseURL, p.APIKey, p.ModelID, p.Provider}
break
}
}
if provider == nil {
return "", fmt.Errorf("model %s not found or disabled", modelID)
}
prompt := fmt.Sprintf(
"你是一个工程标准的目录解析助手。请从下面提供的交付标准文本中提取工程的主干章节,并以 JSON 数组的格式返回。\n\n"+
"### 要求:\n"+
"1. 只返回 JSON 数组,不包含其他废话或者回答前缀。\n"+
"2. 输出格式必须严格符合:[{ \"id\": \"chapter-1\", \"title\": \"1. 原材料进场检验\", \"content\": \"...如果标准里有简要描述可附上\" }]\n\n"+
"### 交付标准内容:\n%s",
content,
)
messages := []Message{
{Role: "system", Content: "你是一个专业的结构化数据抽取工具。你只输出合法的 JSON,不要使用 Markdown 代码块包裹,也不要给出任何其他解释。"},
{Role: "user", Content: prompt},
}
var fullText string
err := s.llm.StreamComplete(provider.URL, provider.Key, provider.Model, messages, func(chunk string) {
fullText += chunk
application.Get().Event.Emit("chat_stream_"+messageID, fullText)
})
return fullText, err
}
func (s *ChatService) getEmbeddingConfig() vector.EmbeddingConfig {
// Use bge-m3 via Ollama as default embedding model
return vector.EmbeddingConfig{
BaseURL: "http://localhost:11434",
Model: "bge-m3",
APIKey: "",
Provider: "Ollama",
}
}