Files
ai_proxy/server/service/app/ai_proxy.go

283 lines
7.6 KiB
Go

package app
import (
"bufio"
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"strings"
"time"
"git.echol.cn/loser/ai_proxy/server/global"
"git.echol.cn/loser/ai_proxy/server/model/app"
"git.echol.cn/loser/ai_proxy/server/model/app/request"
"git.echol.cn/loser/ai_proxy/server/model/app/response"
"github.com/gin-gonic/gin"
"go.uber.org/zap"
)
type AiProxyService struct{}
// ProcessChatCompletion 处理聊天补全请求
func (s *AiProxyService) ProcessChatCompletion(ctx context.Context, req *request.ChatCompletionRequest) (*response.ChatCompletionResponse, error) {
// 1. 根据模型获取配置
if req.Model == "" {
return nil, fmt.Errorf("model 参数不能为空")
}
preset, provider, err := s.getConfigByModel(req.Model)
if err != nil {
return nil, err
}
// 2. 注入预设
if preset != nil {
injector := NewPresetInjector(preset)
req.Messages = injector.InjectMessages(req.Messages)
injector.ApplyPresetParameters(req)
}
// 3. 转发请求到上游
resp, err := s.forwardRequest(ctx, provider, req)
if err != nil {
return nil, err
}
// 4. 处理响应
if preset != nil && len(resp.Choices) > 0 {
injector := NewPresetInjector(preset)
resp.Choices[0].Message.Content = injector.ProcessResponse(resp.Choices[0].Message.Content)
}
return resp, nil
}
// ProcessChatCompletionStream 处理流式聊天补全请求
func (s *AiProxyService) ProcessChatCompletionStream(c *gin.Context, req *request.ChatCompletionRequest) {
// 1. 根据模型获取配置
if req.Model == "" {
c.JSON(http.StatusBadRequest, gin.H{"error": "model 参数不能为空"})
return
}
preset, provider, err := s.getConfigByModel(req.Model)
if err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
// 2. 注入预设
var injector *PresetInjector
if preset != nil {
injector = NewPresetInjector(preset)
req.Messages = injector.InjectMessages(req.Messages)
injector.ApplyPresetParameters(req)
}
// 3. 设置 SSE 响应头
c.Header("Content-Type", "text/event-stream")
c.Header("Cache-Control", "no-cache")
c.Header("Connection", "keep-alive")
c.Header("X-Accel-Buffering", "no")
// 4. 转发流式请求
err = s.forwardStreamRequest(c, provider, req, injector)
if err != nil {
global.GVA_LOG.Error("流式请求失败", zap.Error(err))
}
}
// getConfigByModel 根据模型名称获取配置
func (s *AiProxyService) getConfigByModel(modelName string) (*app.AiPreset, *app.AiProvider, error) {
// 查找启用的模型配置
var model app.AiModel
err := global.GVA_DB.Preload("Provider").Preload("Preset").
Where("name = ? AND enabled = ?", modelName, true).
First(&model).Error
if err != nil {
return nil, nil, fmt.Errorf("未找到模型配置: %s", modelName)
}
// 检查提供商是否启用
if !model.Provider.Enabled {
return nil, nil, fmt.Errorf("提供商已禁用")
}
return model.Preset, &model.Provider, nil
}
// forwardRequest 转发请求到上游 AI 服务
func (s *AiProxyService) forwardRequest(ctx context.Context, provider *app.AiProvider, req *request.ChatCompletionRequest) (*response.ChatCompletionResponse, error) {
// 使用提供商的默认模型(如果请求中没有指定)
if req.Model == "" && provider.Model != "" {
req.Model = provider.Model
}
// 构建请求
reqBody, err := json.Marshal(req)
if err != nil {
return nil, fmt.Errorf("序列化请求失败: %w", err)
}
url := strings.TrimRight(provider.BaseURL, "/") + "/v1/chat/completions"
httpReq, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(reqBody))
if err != nil {
return nil, fmt.Errorf("创建请求失败: %w", err)
}
httpReq.Header.Set("Content-Type", "application/json")
httpReq.Header.Set("Authorization", "Bearer "+provider.APIKey)
// 发送请求
client := &http.Client{Timeout: time.Duration(provider.Timeout) * time.Second}
httpResp, err := client.Do(httpReq)
if err != nil {
return nil, fmt.Errorf("请求失败: %w", err)
}
defer httpResp.Body.Close()
if httpResp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(httpResp.Body)
return nil, fmt.Errorf("上游返回错误: %d - %s", httpResp.StatusCode, string(body))
}
// 解析响应
var resp response.ChatCompletionResponse
if err := json.NewDecoder(httpResp.Body).Decode(&resp); err != nil {
return nil, fmt.Errorf("解析响应失败: %w", err)
}
return &resp, nil
}
// forwardStreamRequest 转发流式请求
func (s *AiProxyService) forwardStreamRequest(c *gin.Context, provider *app.AiProvider, req *request.ChatCompletionRequest, injector *PresetInjector) error {
// 使用提供商的默认模型
if req.Model == "" && provider.Model != "" {
req.Model = provider.Model
}
reqBody, err := json.Marshal(req)
if err != nil {
return err
}
url := strings.TrimRight(provider.BaseURL, "/") + "/v1/chat/completions"
httpReq, err := http.NewRequestWithContext(c.Request.Context(), "POST", url, bytes.NewReader(reqBody))
if err != nil {
return err
}
httpReq.Header.Set("Content-Type", "application/json")
httpReq.Header.Set("Authorization", "Bearer "+provider.APIKey)
client := &http.Client{Timeout: time.Duration(provider.Timeout) * time.Second}
httpResp, err := client.Do(httpReq)
if err != nil {
return err
}
defer httpResp.Body.Close()
if httpResp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(httpResp.Body)
return fmt.Errorf("上游返回错误: %d - %s", httpResp.StatusCode, string(body))
}
// 读取并转发流式响应
reader := bufio.NewReader(httpResp.Body)
flusher, ok := c.Writer.(http.Flusher)
if !ok {
return fmt.Errorf("不支持流式响应")
}
for {
line, err := reader.ReadBytes('\n')
if err != nil {
if err == io.EOF {
break
}
return err
}
// 跳过空行
if len(bytes.TrimSpace(line)) == 0 {
continue
}
// 处理 SSE 数据
if bytes.HasPrefix(line, []byte("data: ")) {
data := bytes.TrimPrefix(line, []byte("data: "))
data = bytes.TrimSpace(data)
// 检查是否是结束标记
if string(data) == "[DONE]" {
c.Writer.Write([]byte("data: [DONE]\n\n"))
flusher.Flush()
break
}
// 解析并处理响应
var chunk response.ChatCompletionStreamResponse
if err := json.Unmarshal(data, &chunk); err != nil {
continue
}
// 应用输出正则处理
if injector != nil && len(chunk.Choices) > 0 && chunk.Choices[0].Delta.Content != "" {
chunk.Choices[0].Delta.Content = injector.ProcessResponse(chunk.Choices[0].Delta.Content)
}
// 重新序列化并发送
processedData, _ := json.Marshal(chunk)
c.Writer.Write([]byte("data: "))
c.Writer.Write(processedData)
c.Writer.Write([]byte("\n\n"))
flusher.Flush()
}
}
return nil
}
// GetAvailableModels 获取用户可用的模型列表
func (s *AiProxyService) GetAvailableModels(apiKey *app.AiApiKey) (*response.ModelListResponse, error) {
// 查询所有启用的模型
var models []app.AiModel
query := global.GVA_DB.Where("enabled = ?", true)
// 如果 API Key 限制了模型,只返回允许的模型
if len(apiKey.AllowedModels) > 0 {
query = query.Where("name IN ?", apiKey.AllowedModels)
}
if err := query.Find(&models).Error; err != nil {
return nil, fmt.Errorf("查询模型列表失败: %w", err)
}
// 构建响应
modelList := &response.ModelListResponse{
Object: "list",
Data: make([]response.ModelInfo, 0, len(models)),
}
// 去重(同一模型可能在多个提供商下配置)
seen := make(map[string]bool)
for _, model := range models {
if !seen[model.Name] {
seen[model.Name] = true
modelList.Data = append(modelList.Data, response.ModelInfo{
ID: model.Name,
Object: "model",
Created: model.CreatedAt.Unix(),
OwnedBy: "system",
})
}
}
return modelList, nil
}