init: initial commit

This commit is contained in:
Blizzard
2026-04-07 17:35:09 +08:00
commit 680ecc320f
129 changed files with 10562 additions and 0 deletions
+120
View File
@@ -0,0 +1,120 @@
package vector
import (
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
"time"
)
// EmbeddingService calls Ollama or OpenAI-compatible APIs for embeddings.
type EmbeddingService struct {
client *http.Client
}
// NewEmbeddingService creates an embedding service.
func NewEmbeddingService() *EmbeddingService {
return &EmbeddingService{
client: &http.Client{Timeout: 60 * time.Second},
}
}
// EmbeddingRequest is the request body for Ollama embedding API.
type ollamaEmbedReq struct {
Model string `json:"model"`
Prompt string `json:"prompt"`
}
type ollamaEmbedResp struct {
Embedding []float32 `json:"embedding"`
}
// openAI-compatible embedding request
type openAIEmbedReq struct {
Model string `json:"model"`
Input string `json:"input"`
}
type openAIEmbedResp struct {
Data []struct {
Embedding []float32 `json:"embedding"`
} `json:"data"`
}
// GetEmbedding generates an embedding vector for the given text.
// provider: "ollama" or "openai" (compatible format)
func (s *EmbeddingService) GetEmbedding(text, baseURL, model, apiKey, provider string) ([]float32, error) {
switch provider {
case "Ollama":
return s.ollamaEmbed(text, baseURL, model)
default:
return s.openAIEmbed(text, baseURL, model, apiKey)
}
}
func (s *EmbeddingService) ollamaEmbed(text, baseURL, model string) ([]float32, error) {
body, _ := json.Marshal(ollamaEmbedReq{Model: model, Prompt: text})
resp, err := s.client.Post(baseURL+"/api/embeddings", "application/json", bytes.NewReader(body))
if err != nil {
return nil, fmt.Errorf("ollama embed request: %w", err)
}
defer resp.Body.Close()
data, _ := io.ReadAll(resp.Body)
var result ollamaEmbedResp
if err := json.Unmarshal(data, &result); err != nil {
return nil, fmt.Errorf("parse ollama response: %w", err)
}
if len(result.Embedding) == 0 {
return nil, fmt.Errorf("empty embedding returned")
}
return result.Embedding, nil
}
func (s *EmbeddingService) openAIEmbed(text, baseURL, model, apiKey string) ([]float32, error) {
body, _ := json.Marshal(openAIEmbedReq{Model: model, Input: text})
req, _ := http.NewRequest("POST", baseURL+"/v1/embeddings", bytes.NewReader(body))
req.Header.Set("Content-Type", "application/json")
if apiKey != "" {
req.Header.Set("Authorization", "Bearer "+apiKey)
}
resp, err := s.client.Do(req)
if err != nil {
return nil, fmt.Errorf("openai embed request: %w", err)
}
defer resp.Body.Close()
data, _ := io.ReadAll(resp.Body)
var result openAIEmbedResp
if err := json.Unmarshal(data, &result); err != nil {
return nil, fmt.Errorf("parse openai response: %w", err)
}
if len(result.Data) == 0 || len(result.Data[0].Embedding) == 0 {
return nil, fmt.Errorf("empty embedding returned")
}
return result.Data[0].Embedding, nil
}
// ChunkText splits text into overlapping chunks for vectorization.
// chunkSize: target characters per chunk, overlap: characters of overlap.
func ChunkText(text string, chunkSize, overlap int) []string {
runes := []rune(text)
if len(runes) <= chunkSize {
return []string{text}
}
var chunks []string
start := 0
for start < len(runes) {
end := start + chunkSize
if end > len(runes) {
end = len(runes)
}
chunks = append(chunks, string(runes[start:end]))
start += chunkSize - overlap
}
return chunks
}