Files
st-react/server/service/app/conversation.go
2026-03-03 04:28:33 +08:00

1677 lines
49 KiB
Go
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

package app
import (
"bufio"
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"strings"
"time"
"git.echol.cn/loser/st/server/global"
"git.echol.cn/loser/st/server/model/app"
"git.echol.cn/loser/st/server/model/app/request"
"git.echol.cn/loser/st/server/model/app/response"
"gorm.io/datatypes"
"gorm.io/gorm"
)
type ConversationService struct{}
// CreateConversation 创建对话
func (s *ConversationService) CreateConversation(userID uint, req *request.CreateConversationRequest) (*response.ConversationResponse, error) {
// 验证角色卡是否存在且有权访问
var character app.AICharacter
err := global.GVA_DB.Where("id = ? AND (user_id = ? OR is_public = ?)", req.CharacterID, userID, true).
First(&character).Error
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
return nil, errors.New("角色卡不存在或无权访问")
}
return nil, err
}
// 生成对话标题
title := req.Title
if title == "" {
title = "与 " + character.Name + " 的对话"
}
// 获取默认 AI 配置
var aiConfig app.AIConfig
err = global.GVA_DB.Where("is_active = ?", true).
Order("is_default DESC, created_at DESC").
First(&aiConfig).Error
// 设置 AI 配置
aiProvider := req.AIProvider
model := req.Model
if err == nil {
// 如果找到了默认配置,使用它
if aiProvider == "" {
aiProvider = aiConfig.Provider
}
if model == "" {
model = aiConfig.DefaultModel
}
global.GVA_LOG.Info(fmt.Sprintf("创建对话使用 AI 配置: %s (Provider: %s, Model: %s)", aiConfig.Name, aiProvider, model))
} else {
// 如果没有找到配置,使用默认值
if aiProvider == "" {
aiProvider = "openai"
}
if model == "" {
model = "gpt-4"
}
global.GVA_LOG.Warn("未找到默认 AI 配置,使用硬编码默认值")
}
// 创建对话
conversation := app.Conversation{
UserID: userID,
CharacterID: req.CharacterID,
Title: title,
PresetID: req.PresetID,
WorldbookID: req.WorldbookID,
WorldbookEnabled: req.WorldbookEnabled,
AIProvider: aiProvider,
Model: model,
Settings: datatypes.JSON("{}"),
}
err = global.GVA_DB.Create(&conversation).Error
if err != nil {
return nil, err
}
// 如果角色有开场白,创建开场白消息
if character.FirstMes != "" {
// 获取用户信息
var user app.AppUser
err = global.GVA_DB.Where("id = ?", userID).First(&user).Error
if err != nil {
global.GVA_LOG.Warn(fmt.Sprintf("获取用户信息失败: %v", err))
}
userName := user.Username
if userName == "" {
userName = user.NickName
}
// 【重要】不再应用正则脚本处理开场白,保留原始内容
// 让前端来处理 <Status_block> 和 <maintext> 的渲染
processedFirstMes := character.FirstMes
global.GVA_LOG.Info(fmt.Sprintf("[开场白] 保留原始内容,长度=%d", len(processedFirstMes)))
firstMessage := app.Message{
ConversationID: conversation.ID,
Role: "assistant",
Content: processedFirstMes,
TokenCount: len(processedFirstMes) / 4,
}
err = global.GVA_DB.Create(&firstMessage).Error
if err != nil {
global.GVA_LOG.Warn(fmt.Sprintf("创建开场白消息失败: %v", err))
} else {
// 更新对话统计
conversation.MessageCount = 1
conversation.TokenCount = firstMessage.TokenCount
global.GVA_DB.Model(&conversation).Updates(map[string]interface{}{
"message_count": 1,
"token_count": firstMessage.TokenCount,
})
}
}
resp := response.ToConversationResponse(&conversation)
return &resp, nil
}
// GetConversationList 获取对话列表
func (s *ConversationService) GetConversationList(userID uint, req *request.GetConversationListRequest) (*response.ConversationListResponse, error) {
var conversations []app.Conversation
var total int64
db := global.GVA_DB.Model(&app.Conversation{}).Where("user_id = ?", userID)
// 统计总数
err := db.Count(&total).Error
if err != nil {
return nil, err
}
// 分页查询
offset := (req.Page - 1) * req.PageSize
err = db.Order("updated_at DESC").Offset(offset).Limit(req.PageSize).Find(&conversations).Error
if err != nil {
return nil, err
}
// 收集所有角色ID
characterIDs := make([]uint, 0, len(conversations))
for _, conv := range conversations {
characterIDs = append(characterIDs, conv.CharacterID)
}
// 批量查询角色信息(只查询必要字段)
var characters []app.AICharacter
if len(characterIDs) > 0 {
err = global.GVA_DB.Select("id, name, avatar, description, created_at, updated_at").
Where("id IN ?", characterIDs).
Find(&characters).Error
if err != nil {
return nil, err
}
}
// 创建角色ID到角色的映射
characterMap := make(map[uint]*app.AICharacter)
for i := range characters {
characterMap[characters[i].ID] = &characters[i]
}
// 转换响应(使用轻量级结构)
list := make([]response.ConversationListItemResponse, len(conversations))
for i, conv := range conversations {
character := characterMap[conv.CharacterID]
list[i] = response.ToConversationListItemResponse(&conv, character)
}
return &response.ConversationListResponse{
List: list,
Total: total,
Page: req.Page,
PageSize: req.PageSize,
}, nil
}
// GetConversationByID 获取对话详情
func (s *ConversationService) GetConversationByID(userID, conversationID uint) (*response.ConversationResponse, error) {
var conversation app.Conversation
err := global.GVA_DB.Where("id = ? AND user_id = ?", conversationID, userID).
First(&conversation).Error
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
return nil, errors.New("对话不存在或无权访问")
}
return nil, err
}
resp := response.ToConversationResponse(&conversation)
return &resp, nil
}
// UpdateConversationSettings 更新对话设置
func (s *ConversationService) UpdateConversationSettings(userID, conversationID uint, req *request.UpdateConversationSettingsRequest) error {
var conversation app.Conversation
err := global.GVA_DB.Where("id = ? AND user_id = ?", conversationID, userID).First(&conversation).Error
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
return errors.New("对话不存在或无权访问")
}
return err
}
updates := make(map[string]interface{})
// 更新设置
if req.Settings != nil {
settingsJSON, err := json.Marshal(req.Settings)
if err != nil {
return err
}
updates["settings"] = datatypes.JSON(settingsJSON)
}
// 更新世界书ID
if req.WorldbookID != nil {
updates["worldbook_id"] = req.WorldbookID
}
// 更新世界书启用状态
if req.WorldbookEnabled != nil {
updates["worldbook_enabled"] = *req.WorldbookEnabled
}
if len(updates) == 0 {
return nil
}
return global.GVA_DB.Model(&conversation).Updates(updates).Error
}
// DeleteConversation 删除对话
func (s *ConversationService) DeleteConversation(userID, conversationID uint) error {
// 开启事务
return global.GVA_DB.Transaction(func(tx *gorm.DB) error {
// 删除对话的所有消息
err := tx.Where("conversation_id = ?", conversationID).Delete(&app.Message{}).Error
if err != nil {
return err
}
// 删除对话
result := tx.Where("id = ? AND user_id = ?", conversationID, userID).Delete(&app.Conversation{})
if result.Error != nil {
return result.Error
}
if result.RowsAffected == 0 {
return errors.New("对话不存在或无权删除")
}
return nil
})
}
// GetMessageList 获取消息列表
func (s *ConversationService) GetMessageList(userID, conversationID uint, req *request.GetMessageListRequest) (*response.MessageListResponse, error) {
// 验证对话权限
var conversation app.Conversation
err := global.GVA_DB.Where("id = ? AND user_id = ?", conversationID, userID).
First(&conversation).Error
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
return nil, errors.New("对话不存在或无权访问")
}
return nil, err
}
var messages []app.Message
var total int64
db := global.GVA_DB.Model(&app.Message{}).Where("conversation_id = ?", conversationID)
// 统计总数
err = db.Count(&total).Error
if err != nil {
return nil, err
}
// 分页查询
offset := (req.Page - 1) * req.PageSize
err = db.Order("created_at ASC").Offset(offset).Limit(req.PageSize).Find(&messages).Error
if err != nil {
return nil, err
}
// 转换响应
list := make([]response.MessageResponse, len(messages))
for i, msg := range messages {
list[i] = response.ToMessageResponse(&msg)
}
return &response.MessageListResponse{
List: list,
Total: total,
Page: req.Page,
PageSize: req.PageSize,
}, nil
}
// SendMessage 发送消息并获取 AI 回复
func (s *ConversationService) SendMessage(userID, conversationID uint, req *request.SendMessageRequest) (*response.MessageResponse, error) {
// 验证对话权限
var conversation app.Conversation
err := global.GVA_DB.Where("id = ? AND user_id = ?", conversationID, userID).
First(&conversation).Error
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
return nil, errors.New("对话不存在或无权访问")
}
return nil, err
}
// 获取角色卡信息
var character app.AICharacter
err = global.GVA_DB.Where("id = ?", conversation.CharacterID).First(&character).Error
if err != nil {
return nil, errors.New("角色卡不存在")
}
// 获取用户信息
var user app.AppUser
err = global.GVA_DB.Where("id = ?", userID).First(&user).Error
if err != nil {
global.GVA_LOG.Warn(fmt.Sprintf("获取用户信息失败: %v", err))
}
userName := user.Username
if userName == "" {
userName = user.NickName
}
// 应用输入阶段的正则脚本 (Placement 0)
processedContent := req.Content
var regexService RegexScriptService
global.GVA_LOG.Info(fmt.Sprintf("查询输入阶段正则脚本: userID=%d, placement=0, charID=%d", userID, conversation.CharacterID))
inputScripts, err := regexService.GetScriptsForPlacement(userID, 0, &conversation.CharacterID, nil)
if err != nil {
global.GVA_LOG.Error(fmt.Sprintf("查询输入阶段正则脚本失败: %v", err))
} else {
global.GVA_LOG.Info(fmt.Sprintf("找到 %d 个输入阶段正则脚本", len(inputScripts)))
if len(inputScripts) > 0 {
processedContent = regexService.ExecuteScripts(inputScripts, processedContent, userName, character.Name)
global.GVA_LOG.Info(fmt.Sprintf("应用了 %d 个输入阶段正则脚本,原文: %s, 处理后: %s", len(inputScripts), req.Content, processedContent))
}
}
// 保存用户消息
userMessage := app.Message{
ConversationID: conversationID,
Role: "user",
Content: processedContent,
TokenCount: len(processedContent) / 4, // 简单估算
}
err = global.GVA_DB.Create(&userMessage).Error
if err != nil {
return nil, err
}
// 获取对话历史最近10条
var messages []app.Message
err = global.GVA_DB.Where("conversation_id = ?", conversationID).
Order("created_at DESC").
Limit(10).
Find(&messages).Error
if err != nil {
return nil, err
}
// 反转消息顺序(从旧到新)
for i, j := 0, len(messages)-1; i < j; i, j = i+1, j-1 {
messages[i], messages[j] = messages[j], messages[i]
}
// 调用 AI 服务获取回复
aiResponse, err := s.callAIService(conversation, character, messages)
if err != nil {
return nil, err
}
// 保存 AI 回复
assistantMessage := app.Message{
ConversationID: conversationID,
Role: "assistant",
Content: aiResponse,
TokenCount: len(aiResponse) / 4, // 简单估算
}
err = global.GVA_DB.Create(&assistantMessage).Error
if err != nil {
return nil, err
}
// 更新对话统计
err = global.GVA_DB.Model(&conversation).Updates(map[string]interface{}{
"message_count": gorm.Expr("message_count + ?", 2),
"token_count": gorm.Expr("token_count + ?", userMessage.TokenCount+assistantMessage.TokenCount),
}).Error
if err != nil {
return nil, err
}
// 提取并保存变量 (从 AI 回复中提取 {{setvar::key::value}})
newVars, cleanedContent := regexService.ExtractSetVars(assistantMessage.Content)
if len(newVars) > 0 {
// 加载现有变量
var existingVars map[string]string
if len(conversation.Variables) > 0 {
json.Unmarshal(conversation.Variables, &existingVars)
}
if existingVars == nil {
existingVars = make(map[string]string)
}
// 合并新变量
for k, v := range newVars {
existingVars[k] = v
}
// 保存回数据库
varsJSON, _ := json.Marshal(existingVars)
global.GVA_DB.Model(&conversation).Update("variables", datatypes.JSON(varsJSON))
global.GVA_LOG.Info(fmt.Sprintf("提取并保存了 %d 个变量: %v", len(newVars), newVars))
}
// 先替换 {{getvar::}} 为实际变量值(在应用正则脚本之前)
var currentVars map[string]string
if len(conversation.Variables) > 0 {
json.Unmarshal(conversation.Variables, &currentVars)
}
displayContent := cleanedContent // 使用清理后的内容(移除了 {{setvar::}}
if currentVars != nil {
displayContent = regexService.SubstituteGetVars(displayContent, currentVars)
global.GVA_LOG.Info(fmt.Sprintf("替换了 {{getvar::}} 变量"))
}
// 注意:此时 displayContent 中的 <Status_block> 已经被 Placement 1 正则脚本
// 替换成了包含 YAML 数据的 HTML 模板,所以不需要再提取和保护
// 直接返回给前端即可
resp := response.ToMessageResponse(&assistantMessage)
resp.Content = displayContent // 使用处理后的显示内容
return &resp, nil
}
// callAIService 调用 AI 服务
func (s *ConversationService) callAIService(conversation app.Conversation, character app.AICharacter, messages []app.Message) (string, error) {
// 获取 AI 配置
var aiConfig app.AIConfig
var err error
// 1. 尝试从对话设置中获取指定的 AI 配置 ID
var configID uint
if len(conversation.Settings) > 0 {
var settings map[string]interface{}
if err := json.Unmarshal(conversation.Settings, &settings); err == nil {
if id, ok := settings["aiConfigId"].(float64); ok {
configID = uint(id)
}
}
}
if configID > 0 {
// 使用用户指定的 AI 配置
global.GVA_LOG.Info(fmt.Sprintf("使用用户指定的 AI 配置 ID: %d", configID))
err = global.GVA_DB.Where("id = ? AND is_active = ?", configID, true).First(&aiConfig).Error
if err != nil {
global.GVA_LOG.Error(fmt.Sprintf("未找到指定的 AI 配置 ID: %d, 错误: %v", configID, err))
}
}
if err != nil || configID == 0 {
// 使用默认 AI 配置
global.GVA_LOG.Info("尝试使用默认 AI 配置")
err = global.GVA_DB.Where("is_active = ?", true).
Order("is_default DESC, created_at DESC").
First(&aiConfig).Error
if err != nil {
global.GVA_LOG.Error(fmt.Sprintf("未找到默认 AI 配置, 错误: %v", err))
}
}
if err != nil {
return "", errors.New("未找到可用的 AI 配置,请在管理后台添加并激活 AI 配置")
}
global.GVA_LOG.Info(fmt.Sprintf("使用 AI 配置: %s (Provider: %s, Model: %s)", aiConfig.Name, aiConfig.Provider, aiConfig.DefaultModel))
// 2. 尝试从对话设置中获取预设 ID 并加载预设参数
var preset *app.AIPreset
var presetID uint
if len(conversation.Settings) > 0 {
var settings map[string]interface{}
if err := json.Unmarshal(conversation.Settings, &settings); err == nil {
if id, ok := settings["presetId"].(float64); ok {
presetID = uint(id)
}
}
}
// 加载预设
if presetID > 0 {
var loadedPreset app.AIPreset
if err := global.GVA_DB.First(&loadedPreset, presetID).Error; err == nil {
preset = &loadedPreset
global.GVA_LOG.Info(fmt.Sprintf("使用预设: %s (Temperature: %.2f, TopP: %.2f)", preset.Name, preset.Temperature, preset.TopP))
// 增加预设使用次数
global.GVA_DB.Model(&preset).Update("use_count", gorm.Expr("use_count + ?", 1))
} else {
global.GVA_LOG.Warn(fmt.Sprintf("未找到预设 ID: %d, 使用默认参数", presetID))
}
}
// 构建系统提示词(如果预设有系统提示词,则追加到角色卡提示词后)
systemPrompt := s.buildSystemPrompt(character)
if preset != nil && preset.SystemPrompt != "" {
systemPrompt = systemPrompt + "\n\n" + preset.SystemPrompt
global.GVA_LOG.Info("已追加预设的系统提示词")
}
// 集成世界书触发引擎
if conversation.WorldbookEnabled && conversation.WorldbookID != nil {
global.GVA_LOG.Info(fmt.Sprintf("世界书已启用ID: %d", *conversation.WorldbookID))
// 提取消息内容用于扫描
var messageContents []string
for _, msg := range messages {
messageContents = append(messageContents, msg.Content)
}
// 使用世界书引擎扫描并触发条目
engine := &WorldbookEngine{}
triggered, err := engine.ScanAndTrigger(*conversation.WorldbookID, messageContents)
if err != nil {
global.GVA_LOG.Warn(fmt.Sprintf("世界书触发失败: %v", err))
} else if len(triggered) > 0 {
global.GVA_LOG.Info(fmt.Sprintf("触发了 %d 个世界书条目", len(triggered)))
// 将触发的世界书内容注入到系统提示词
systemPrompt = engine.BuildPromptWithWorldbook(systemPrompt, triggered)
} else {
global.GVA_LOG.Info("没有触发任何世界书条目")
}
}
// 构建消息列表
apiMessages := s.buildAPIMessages(messages, systemPrompt)
// 打印发送给AI的完整内容
global.GVA_LOG.Info("========== 发送给AI的完整内容 ==========")
global.GVA_LOG.Info(fmt.Sprintf("系统提示词: %s", systemPrompt))
global.GVA_LOG.Info("消息列表:")
for i, msg := range apiMessages {
global.GVA_LOG.Info(fmt.Sprintf(" [%d] Role: %s, Content: %s", i, msg["role"], msg["content"]))
}
global.GVA_LOG.Info("==========================================")
// 确定使用的模型如果用户在设置中指定了AI配置则使用该配置的默认模型
// 否则使用对话创建时的模型(向后兼容)
model := aiConfig.DefaultModel
if model == "" {
// 如果AI配置没有默认模型才使用对话表中的模型
model = conversation.Model
}
if model == "" {
// 最后的兜底
model = "gpt-4"
}
global.GVA_LOG.Info(fmt.Sprintf("使用模型: %s (来源: AI配置 %s)", model, aiConfig.Name))
// 根据提供商调用不同的 API
var aiResponse string
switch aiConfig.Provider {
case "openai", "custom":
aiResponse, err = s.callOpenAIAPI(&aiConfig, model, apiMessages, preset)
case "anthropic":
aiResponse, err = s.callAnthropicAPI(&aiConfig, model, apiMessages, systemPrompt, preset)
default:
return "", fmt.Errorf("不支持的 AI 提供商: %s", aiConfig.Provider)
}
// 打印AI返回的完整内容
if err != nil {
global.GVA_LOG.Error(fmt.Sprintf("========== AI返回错误 ==========\n%v\n==========================================", err))
return "", err
}
global.GVA_LOG.Info(fmt.Sprintf("========== AI返回的完整内容 ==========\n%s\n==========================================", aiResponse))
// 应用输出阶段的正则脚本 (Placement 1)
// 这里会把 <Status_block> 替换成 HTML 模板,并注入 YAML 数据
var regexService RegexScriptService
global.GVA_LOG.Info(fmt.Sprintf("查询输出阶段正则脚本: userID=%d, placement=1, charID=%d", conversation.UserID, conversation.CharacterID))
outputScripts, err := regexService.GetScriptsForPlacement(conversation.UserID, 1, &conversation.CharacterID, nil)
if err != nil {
global.GVA_LOG.Error(fmt.Sprintf("查询输出阶段正则脚本失败: %v", err))
} else {
global.GVA_LOG.Info(fmt.Sprintf("找到 %d 个输出阶段正则脚本", len(outputScripts)))
if len(outputScripts) > 0 {
// 获取用户信息
var user app.AppUser
err = global.GVA_DB.Where("id = ?", conversation.UserID).First(&user).Error
userName := ""
if err == nil {
userName = user.Username
if userName == "" {
userName = user.NickName
}
}
originalResponse := aiResponse
aiResponse = regexService.ExecuteScripts(outputScripts, aiResponse, userName, character.Name)
global.GVA_LOG.Info(fmt.Sprintf("应用了 %d 个输出阶段正则脚本,原始长度: %d, 处理后长度: %d", len(outputScripts), len(originalResponse), len(aiResponse)))
}
}
return aiResponse, nil
}
// buildSystemPrompt 构建系统提示词
func (s *ConversationService) buildSystemPrompt(character app.AICharacter) string {
prompt := fmt.Sprintf("你是 %s。", character.Name)
if character.Description != "" {
prompt += fmt.Sprintf("\n\n描述%s", character.Description)
}
if character.Personality != "" {
prompt += fmt.Sprintf("\n\n性格%s", character.Personality)
}
if character.Scenario != "" {
prompt += fmt.Sprintf("\n\n场景%s", character.Scenario)
}
if character.FirstMes != "" {
prompt += fmt.Sprintf("\n\n开场白%s", character.FirstMes)
}
if character.MesExample != "" {
prompt += fmt.Sprintf("\n\n对话示例\n%s", character.MesExample)
}
if character.SystemPrompt != "" {
prompt += fmt.Sprintf("\n\n系统提示%s", character.SystemPrompt)
}
// 处理世界书 (Character Book)
if len(character.CharacterBook) > 0 {
var characterBook map[string]interface{}
if err := json.Unmarshal(character.CharacterBook, &characterBook); err == nil {
if entries, ok := characterBook["entries"].([]interface{}); ok && len(entries) > 0 {
prompt += "\n\n世界设定"
for _, entry := range entries {
if entryMap, ok := entry.(map[string]interface{}); ok {
// 默认启用除非明确设置为false
enabled := true
if enabledVal, ok := entryMap["enabled"].(bool); ok {
enabled = enabledVal
}
if !enabled {
continue
}
// 添加世界书条目内容
if content, ok := entryMap["content"].(string); ok && content != "" {
prompt += fmt.Sprintf("\n- %s", content)
}
}
}
}
}
}
prompt += "\n\n请根据以上设定进行角色扮演保持角色的性格和说话方式。"
// 应用MVU变量替换
prompt = s.applyMacroVariables(prompt, character)
return prompt
}
// applyMacroVariables 应用宏变量替换 (MVU功能)
func (s *ConversationService) applyMacroVariables(text string, character app.AICharacter) string {
// 获取当前时间
now := time.Now()
// 基础变量
replacements := map[string]string{
"{{char}}": character.Name,
"{{user}}": "用户", // 可以从用户信息中获取
"{{time}}": now.Format("15:04"),
"{{date}}": now.Format("2006-01-02"),
"{{datetime}}": now.Format("2006-01-02 15:04:05"),
"{{weekday}}": s.getWeekdayInChinese(now.Weekday()),
"{{idle_duration}}": "0分钟",
}
// 执行替换
result := text
for macro, value := range replacements {
result = strings.ReplaceAll(result, macro, value)
}
return result
}
// getWeekdayInChinese 获取中文星期
func (s *ConversationService) getWeekdayInChinese(weekday time.Weekday) string {
weekdays := map[time.Weekday]string{
time.Sunday: "星期日",
time.Monday: "星期一",
time.Tuesday: "星期二",
time.Wednesday: "星期三",
time.Thursday: "星期四",
time.Friday: "星期五",
time.Saturday: "星期六",
}
return weekdays[weekday]
}
// SendMessageStream 流式发送消息并获取 AI 回复
func (s *ConversationService) SendMessageStream(userID, conversationID uint, req *request.SendMessageRequest, streamChan chan string, doneChan chan bool) error {
defer close(streamChan)
defer close(doneChan)
// 验证对话权限
var conversation app.Conversation
err := global.GVA_DB.Where("id = ? AND user_id = ?", conversationID, userID).
First(&conversation).Error
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
return errors.New("对话不存在或无权访问")
}
return err
}
// 获取角色卡信息
var character app.AICharacter
err = global.GVA_DB.Where("id = ?", conversation.CharacterID).First(&character).Error
if err != nil {
return errors.New("角色卡不存在")
}
// 获取用户信息
var user app.AppUser
err = global.GVA_DB.Where("id = ?", userID).First(&user).Error
if err != nil {
global.GVA_LOG.Warn(fmt.Sprintf("[流式传输] 获取用户信息失败: %v", err))
}
userName := user.Username
if userName == "" {
userName = user.NickName
}
// 应用输入阶段的正则脚本 (Placement 0)
processedContent := req.Content
var regexService RegexScriptService
global.GVA_LOG.Info(fmt.Sprintf("[流式传输] 查询输入阶段正则脚本: userID=%d, placement=0, charID=%d", userID, conversation.CharacterID))
inputScripts, err := regexService.GetScriptsForPlacement(userID, 0, &conversation.CharacterID, nil)
if err != nil {
global.GVA_LOG.Error(fmt.Sprintf("[流式传输] 查询输入阶段正则脚本失败: %v", err))
} else {
global.GVA_LOG.Info(fmt.Sprintf("[流式传输] 找到 %d 个输入阶段正则脚本", len(inputScripts)))
if len(inputScripts) > 0 {
processedContent = regexService.ExecuteScripts(inputScripts, processedContent, userName, character.Name)
global.GVA_LOG.Info(fmt.Sprintf("[流式传输] 应用了 %d 个输入阶段正则脚本", len(inputScripts)))
}
}
// 保存用户消息
userMessage := app.Message{
ConversationID: conversationID,
Role: "user",
Content: processedContent,
TokenCount: len(processedContent) / 4,
}
err = global.GVA_DB.Create(&userMessage).Error
if err != nil {
return err
}
// 获取对话历史最近10条
var messages []app.Message
err = global.GVA_DB.Where("conversation_id = ?", conversationID).
Order("created_at DESC").
Limit(10).
Find(&messages).Error
if err != nil {
return err
}
// 反转消息顺序(从旧到新)
for i, j := 0, len(messages)-1; i < j; i, j = i+1, j-1 {
messages[i], messages[j] = messages[j], messages[i]
}
// 获取 AI 配置
var aiConfig app.AIConfig
var configID uint
if len(conversation.Settings) > 0 {
var settings map[string]interface{}
if err := json.Unmarshal(conversation.Settings, &settings); err == nil {
if id, ok := settings["aiConfigId"].(float64); ok {
configID = uint(id)
}
}
}
if configID > 0 {
err = global.GVA_DB.Where("id = ? AND is_active = ?", configID, true).First(&aiConfig).Error
}
if err != nil || configID == 0 {
err = global.GVA_DB.Where("is_active = ?", true).
Order("is_default DESC, created_at DESC").
First(&aiConfig).Error
}
if err != nil {
return errors.New("未找到可用的 AI 配置")
}
// 加载预设
var streamPreset *app.AIPreset
var streamPresetID uint
if len(conversation.Settings) > 0 {
var settings map[string]interface{}
if err := json.Unmarshal(conversation.Settings, &settings); err == nil {
if id, ok := settings["presetId"].(float64); ok {
streamPresetID = uint(id)
}
}
}
if streamPresetID > 0 {
var loadedPreset app.AIPreset
if err := global.GVA_DB.First(&loadedPreset, streamPresetID).Error; err == nil {
streamPreset = &loadedPreset
global.GVA_LOG.Info(fmt.Sprintf("[流式传输] 使用预设: %s (Temperature: %.2f)", streamPreset.Name, streamPreset.Temperature))
global.GVA_DB.Model(streamPreset).Update("use_count", gorm.Expr("use_count + ?", 1))
}
}
// 构建系统提示词(应用预设)
systemPrompt := s.buildSystemPrompt(character)
if streamPreset != nil && streamPreset.SystemPrompt != "" {
systemPrompt = systemPrompt + "\n\n" + streamPreset.SystemPrompt
}
// 集成世界书触发引擎(流式传输)
if conversation.WorldbookEnabled && conversation.WorldbookID != nil {
global.GVA_LOG.Info(fmt.Sprintf("[流式传输] 世界书已启用ID: %d", *conversation.WorldbookID))
var messageContents []string
for _, msg := range messages {
messageContents = append(messageContents, msg.Content)
}
engine := &WorldbookEngine{}
triggeredEntries, wbErr := engine.ScanAndTrigger(*conversation.WorldbookID, messageContents)
if wbErr != nil {
global.GVA_LOG.Warn(fmt.Sprintf("[流式传输] 世界书触发失败: %v", wbErr))
} else if len(triggeredEntries) > 0 {
global.GVA_LOG.Info(fmt.Sprintf("[流式传输] 触发了 %d 个世界书条目", len(triggeredEntries)))
systemPrompt = engine.BuildPromptWithWorldbook(systemPrompt, triggeredEntries)
} else {
global.GVA_LOG.Info("[流式传输] 没有触发任何世界书条目")
}
}
apiMessages := s.buildAPIMessages(messages, systemPrompt)
// 打印发送给AI的完整内容流式传输
global.GVA_LOG.Info("========== [流式传输] 发送给AI的完整内容 ==========")
global.GVA_LOG.Info(fmt.Sprintf("系统提示词: %s", systemPrompt))
global.GVA_LOG.Info("消息列表:")
for i, msg := range apiMessages {
global.GVA_LOG.Info(fmt.Sprintf(" [%d] Role: %s, Content: %s", i, msg["role"], msg["content"]))
}
global.GVA_LOG.Info("==========================================")
// 确定使用的模型
model := aiConfig.DefaultModel
if model == "" {
model = conversation.Model
}
if model == "" {
model = "gpt-4"
}
global.GVA_LOG.Info(fmt.Sprintf("[流式传输] 使用模型: %s (Provider: %s)", model, aiConfig.Provider))
// 调用流式 API
var fullContent string
switch aiConfig.Provider {
case "openai", "custom":
fullContent, err = s.callOpenAIAPIStream(&aiConfig, model, apiMessages, streamPreset, streamChan)
case "anthropic":
fullContent, err = s.callAnthropicAPIStream(&aiConfig, model, apiMessages, systemPrompt, streamPreset, streamChan)
default:
return fmt.Errorf("不支持的 AI 提供商: %s", aiConfig.Provider)
}
if err != nil {
global.GVA_LOG.Error(fmt.Sprintf("========== [流式传输] AI返回错误 ==========\n%v\n==========================================", err))
return err
}
// 打印AI返回的完整内容
global.GVA_LOG.Info(fmt.Sprintf("========== [流式传输] AI返回的完整内容 ==========\n%s\n==========================================", fullContent))
// 应用输出阶段的正则脚本 (Placement 1)
global.GVA_LOG.Info(fmt.Sprintf("[流式传输] 查询输出阶段正则脚本: userID=%d, placement=1, charID=%d", userID, conversation.CharacterID))
outputScripts, err := regexService.GetScriptsForPlacement(userID, 1, &conversation.CharacterID, nil)
if err != nil {
global.GVA_LOG.Error(fmt.Sprintf("[流式传输] 查询输出阶段正则脚本失败: %v", err))
} else {
global.GVA_LOG.Info(fmt.Sprintf("[流式传输] 找到 %d 个输出阶段正则脚本", len(outputScripts)))
if len(outputScripts) > 0 {
fullContent = regexService.ExecuteScripts(outputScripts, fullContent, userName, character.Name)
global.GVA_LOG.Info(fmt.Sprintf("[流式传输] 应用了 %d 个输出阶段正则脚本", len(outputScripts)))
}
}
// 保存 AI 回复
assistantMessage := app.Message{
ConversationID: conversationID,
Role: "assistant",
Content: fullContent,
TokenCount: len(fullContent) / 4,
}
err = global.GVA_DB.Create(&assistantMessage).Error
if err != nil {
return err
}
// 更新对话统计
err = global.GVA_DB.Model(&conversation).Updates(map[string]interface{}{
"message_count": gorm.Expr("message_count + ?", 2),
"token_count": gorm.Expr("token_count + ?", userMessage.TokenCount+assistantMessage.TokenCount),
}).Error
doneChan <- true
return err
}
// callOpenAIAPIStream 调用 OpenAI API 流式传输
func (s *ConversationService) callOpenAIAPIStream(config *app.AIConfig, model string, messages []map[string]string, preset *app.AIPreset, streamChan chan string) (string, error) {
client := &http.Client{Timeout: 120 * time.Second}
if model == "" {
model = config.DefaultModel
}
if model == "" {
model = "gpt-4"
}
// 应用预设参数
temperature := 0.7
maxTokens := 2000
var topP *float64
var frequencyPenalty *float64
var presencePenalty *float64
var stopSequences []string
if preset != nil {
temperature = preset.Temperature
maxTokens = preset.MaxTokens
if preset.TopP > 0 {
topP = &preset.TopP
}
if preset.FrequencyPenalty != 0 {
frequencyPenalty = &preset.FrequencyPenalty
}
if preset.PresencePenalty != 0 {
presencePenalty = &preset.PresencePenalty
}
if len(preset.StopSequences) > 0 {
json.Unmarshal(preset.StopSequences, &stopSequences)
}
}
// 构建请求体,启用流式传输
requestBody := map[string]interface{}{
"model": model,
"messages": messages,
"temperature": temperature,
"max_tokens": maxTokens,
"stream": true,
}
if topP != nil {
requestBody["top_p"] = *topP
}
if frequencyPenalty != nil {
requestBody["frequency_penalty"] = *frequencyPenalty
}
if presencePenalty != nil {
requestBody["presence_penalty"] = *presencePenalty
}
if len(stopSequences) > 0 {
requestBody["stop"] = stopSequences
}
bodyBytes, err := json.Marshal(requestBody)
if err != nil {
return "", fmt.Errorf("序列化请求失败: %v", err)
}
endpoint := config.BaseURL + "/chat/completions"
req, err := http.NewRequest("POST", endpoint, bytes.NewBuffer(bodyBytes))
if err != nil {
return "", fmt.Errorf("创建请求失败: %v", err)
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Authorization", "Bearer "+config.APIKey)
resp, err := client.Do(req)
if err != nil {
return "", fmt.Errorf("请求失败: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
return "", fmt.Errorf("API 返回错误 %d: %s", resp.StatusCode, string(body))
}
// 读取流式响应
var fullContent strings.Builder
reader := bufio.NewReader(resp.Body)
for {
line, err := reader.ReadString('\n')
if err != nil {
if err == io.EOF {
break
}
return "", fmt.Errorf("读取流失败: %v", err)
}
line = strings.TrimSpace(line)
if line == "" || line == "data: [DONE]" {
continue
}
if strings.HasPrefix(line, "data: ") {
data := strings.TrimPrefix(line, "data: ")
var streamResp struct {
Choices []struct {
Delta struct {
Content string `json:"content"`
} `json:"delta"`
} `json:"choices"`
}
if err := json.Unmarshal([]byte(data), &streamResp); err != nil {
continue
}
if len(streamResp.Choices) > 0 {
content := streamResp.Choices[0].Delta.Content
if content != "" {
fullContent.WriteString(content)
streamChan <- content
}
}
}
}
return fullContent.String(), nil
}
// callAnthropicAPIStream 调用 Anthropic API 流式传输
func (s *ConversationService) callAnthropicAPIStream(config *app.AIConfig, model string, messages []map[string]string, systemPrompt string, preset *app.AIPreset, streamChan chan string) (string, error) {
client := &http.Client{Timeout: 120 * time.Second}
if model == "" {
model = config.DefaultModel
}
if model == "" {
model = "claude-3-sonnet-20240229"
}
// Anthropic API 不支持 system role
apiMessages := make([]map[string]string, 0)
for _, msg := range messages {
if msg["role"] != "system" {
apiMessages = append(apiMessages, msg)
}
}
// 应用预设参数
maxTokens := 2000
var temperature *float64
var topP *float64
var stopSequences []string
if preset != nil {
maxTokens = preset.MaxTokens
if preset.Temperature > 0 {
temperature = &preset.Temperature
}
if preset.TopP > 0 {
topP = &preset.TopP
}
if len(preset.StopSequences) > 0 {
json.Unmarshal(preset.StopSequences, &stopSequences)
}
}
requestBody := map[string]interface{}{
"model": model,
"messages": apiMessages,
"system": systemPrompt,
"max_tokens": maxTokens,
"stream": true,
}
if temperature != nil {
requestBody["temperature"] = *temperature
}
if topP != nil {
requestBody["top_p"] = *topP
}
if len(stopSequences) > 0 {
requestBody["stop_sequences"] = stopSequences
}
bodyBytes, err := json.Marshal(requestBody)
if err != nil {
return "", fmt.Errorf("序列化请求失败: %v", err)
}
endpoint := config.BaseURL + "/messages"
req, err := http.NewRequest("POST", endpoint, bytes.NewBuffer(bodyBytes))
if err != nil {
return "", fmt.Errorf("创建请求失败: %v", err)
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("x-api-key", config.APIKey)
req.Header.Set("anthropic-version", "2023-06-01")
resp, err := client.Do(req)
if err != nil {
return "", fmt.Errorf("请求失败: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
return "", fmt.Errorf("API 返回错误 %d: %s", resp.StatusCode, string(body))
}
// 读取流式响应
var fullContent strings.Builder
reader := bufio.NewReader(resp.Body)
for {
line, err := reader.ReadString('\n')
if err != nil {
if err == io.EOF {
break
}
return "", fmt.Errorf("读取流失败: %v", err)
}
line = strings.TrimSpace(line)
if line == "" {
continue
}
if strings.HasPrefix(line, "data: ") {
data := strings.TrimPrefix(line, "data: ")
var streamResp struct {
Type string `json:"type"`
Delta struct {
Type string `json:"type"`
Text string `json:"text"`
} `json:"delta"`
}
if err := json.Unmarshal([]byte(data), &streamResp); err != nil {
continue
}
if streamResp.Type == "content_block_delta" && streamResp.Delta.Text != "" {
fullContent.WriteString(streamResp.Delta.Text)
streamChan <- streamResp.Delta.Text
}
}
}
return fullContent.String(), nil
}
// RegenerateMessage 重新生成最后一条 AI 回复(非流式)
func (s *ConversationService) RegenerateMessage(userID, conversationID uint) (*response.MessageResponse, error) {
var conversation app.Conversation
err := global.GVA_DB.Where("id = ? AND user_id = ?", conversationID, userID).First(&conversation).Error
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
return nil, errors.New("对话不存在或无权访问")
}
return nil, err
}
var character app.AICharacter
err = global.GVA_DB.Where("id = ?", conversation.CharacterID).First(&character).Error
if err != nil {
return nil, errors.New("角色卡不存在")
}
// 删除最后一条 AI 回复
var lastAssistantMsg app.Message
if err = global.GVA_DB.Where("conversation_id = ? AND role = ?", conversationID, "assistant").
Order("created_at DESC").First(&lastAssistantMsg).Error; err == nil {
global.GVA_DB.Delete(&lastAssistantMsg)
global.GVA_DB.Model(&conversation).Updates(map[string]interface{}{
"message_count": gorm.Expr("GREATEST(message_count - 1, 0)"),
"token_count": gorm.Expr("GREATEST(token_count - ?, 0)", lastAssistantMsg.TokenCount),
})
}
// 获取删除后的消息历史
var messages []app.Message
err = global.GVA_DB.Where("conversation_id = ?", conversationID).
Order("created_at DESC").Limit(10).Find(&messages).Error
if err != nil {
return nil, err
}
if len(messages) == 0 {
return nil, errors.New("没有可用的消息历史")
}
for i, j := 0, len(messages)-1; i < j; i, j = i+1, j-1 {
messages[i], messages[j] = messages[j], messages[i]
}
aiResponse, err := s.callAIService(conversation, character, messages)
if err != nil {
return nil, err
}
assistantMessage := app.Message{
ConversationID: conversationID,
Role: "assistant",
Content: aiResponse,
TokenCount: len(aiResponse) / 4,
}
if err = global.GVA_DB.Create(&assistantMessage).Error; err != nil {
return nil, err
}
global.GVA_DB.Model(&conversation).Updates(map[string]interface{}{
"message_count": gorm.Expr("message_count + ?", 1),
"token_count": gorm.Expr("token_count + ?", assistantMessage.TokenCount),
})
resp := response.ToMessageResponse(&assistantMessage)
return &resp, nil
}
// RegenerateMessageStream 流式重新生成最后一条 AI 回复
func (s *ConversationService) RegenerateMessageStream(userID, conversationID uint, streamChan chan string, doneChan chan bool) error {
defer close(streamChan)
defer close(doneChan)
var conversation app.Conversation
err := global.GVA_DB.Where("id = ? AND user_id = ?", conversationID, userID).First(&conversation).Error
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
return errors.New("对话不存在或无权访问")
}
return err
}
var character app.AICharacter
err = global.GVA_DB.Where("id = ?", conversation.CharacterID).First(&character).Error
if err != nil {
return errors.New("角色卡不存在")
}
// 删除最后一条 AI 回复
var lastAssistantMsg app.Message
if err = global.GVA_DB.Where("conversation_id = ? AND role = ?", conversationID, "assistant").
Order("created_at DESC").First(&lastAssistantMsg).Error; err == nil {
global.GVA_DB.Delete(&lastAssistantMsg)
global.GVA_DB.Model(&conversation).Updates(map[string]interface{}{
"message_count": gorm.Expr("GREATEST(message_count - 1, 0)"),
"token_count": gorm.Expr("GREATEST(token_count - ?, 0)", lastAssistantMsg.TokenCount),
})
}
// 获取删除后的消息历史
var messages []app.Message
err = global.GVA_DB.Where("conversation_id = ?", conversationID).
Order("created_at DESC").Limit(10).Find(&messages).Error
if err != nil {
return err
}
if len(messages) == 0 {
return errors.New("没有可用的消息历史")
}
for i, j := 0, len(messages)-1; i < j; i, j = i+1, j-1 {
messages[i], messages[j] = messages[j], messages[i]
}
// 获取 AI 配置
var aiConfig app.AIConfig
var configID uint
if len(conversation.Settings) > 0 {
var settings map[string]interface{}
if err := json.Unmarshal(conversation.Settings, &settings); err == nil {
if id, ok := settings["aiConfigId"].(float64); ok {
configID = uint(id)
}
}
}
if configID > 0 {
err = global.GVA_DB.Where("id = ? AND is_active = ?", configID, true).First(&aiConfig).Error
}
if err != nil || configID == 0 {
err = global.GVA_DB.Where("is_active = ?", true).
Order("is_default DESC, created_at DESC").
First(&aiConfig).Error
}
if err != nil {
return errors.New("未找到可用的 AI 配置")
}
// 加载预设
var preset *app.AIPreset
var presetID uint
if len(conversation.Settings) > 0 {
var settings map[string]interface{}
if err := json.Unmarshal(conversation.Settings, &settings); err == nil {
if id, ok := settings["presetId"].(float64); ok {
presetID = uint(id)
}
}
}
if presetID > 0 {
var loadedPreset app.AIPreset
if err := global.GVA_DB.First(&loadedPreset, presetID).Error; err == nil {
preset = &loadedPreset
}
}
systemPrompt := s.buildSystemPrompt(character)
if preset != nil && preset.SystemPrompt != "" {
systemPrompt = systemPrompt + "\n\n" + preset.SystemPrompt
}
apiMessages := s.buildAPIMessages(messages, systemPrompt)
model := aiConfig.DefaultModel
if model == "" {
model = conversation.Model
}
if model == "" {
model = "gpt-4"
}
var fullContent string
switch aiConfig.Provider {
case "openai", "custom":
fullContent, err = s.callOpenAIAPIStream(&aiConfig, model, apiMessages, preset, streamChan)
case "anthropic":
fullContent, err = s.callAnthropicAPIStream(&aiConfig, model, apiMessages, systemPrompt, preset, streamChan)
default:
return fmt.Errorf("不支持的 AI 提供商: %s", aiConfig.Provider)
}
if err != nil {
return err
}
assistantMessage := app.Message{
ConversationID: conversationID,
Role: "assistant",
Content: fullContent,
TokenCount: len(fullContent) / 4,
}
if err = global.GVA_DB.Create(&assistantMessage).Error; err != nil {
return err
}
global.GVA_DB.Model(&conversation).Updates(map[string]interface{}{
"message_count": gorm.Expr("message_count + ?", 1),
"token_count": gorm.Expr("token_count + ?", assistantMessage.TokenCount),
})
doneChan <- true
return nil
}
func (s *ConversationService) buildAPIMessages(messages []app.Message, systemPrompt string) []map[string]string {
apiMessages := make([]map[string]string, 0, len(messages)+1)
// 添加系统消息OpenAI 格式)
apiMessages = append(apiMessages, map[string]string{
"role": "system",
"content": systemPrompt,
})
// 添加历史消息
for _, msg := range messages {
if msg.Role == "system" {
continue // 跳过已有的系统消息
}
apiMessages = append(apiMessages, map[string]string{
"role": msg.Role,
"content": msg.Content,
})
}
return apiMessages
}
// callOpenAIAPI 调用 OpenAI API
func (s *ConversationService) callOpenAIAPI(config *app.AIConfig, model string, messages []map[string]string, preset *app.AIPreset) (string, error) {
client := &http.Client{Timeout: 120 * time.Second}
// 使用配置的模型或默认模型
if model == "" {
model = config.DefaultModel
}
if model == "" {
model = "gpt-4"
}
// 应用预设参数(如果有预设)
temperature := 0.7
maxTokens := 2000
var topP *float64
var frequencyPenalty *float64
var presencePenalty *float64
var stopSequences []string
if preset != nil {
temperature = preset.Temperature
maxTokens = preset.MaxTokens
if preset.TopP > 0 {
topP = &preset.TopP
}
if preset.FrequencyPenalty != 0 {
frequencyPenalty = &preset.FrequencyPenalty
}
if preset.PresencePenalty != 0 {
presencePenalty = &preset.PresencePenalty
}
// 解析停止序列
if len(preset.StopSequences) > 0 {
json.Unmarshal(preset.StopSequences, &stopSequences)
}
global.GVA_LOG.Info(fmt.Sprintf("应用预设参数: Temperature=%.2f, MaxTokens=%d, TopP=%.2f", temperature, maxTokens, preset.TopP))
}
// 构建请求体
requestBody := map[string]interface{}{
"model": model,
"messages": messages,
"temperature": temperature,
"max_tokens": maxTokens,
}
// 添加可选参数
if topP != nil {
requestBody["top_p"] = *topP
}
if frequencyPenalty != nil {
requestBody["frequency_penalty"] = *frequencyPenalty
}
if presencePenalty != nil {
requestBody["presence_penalty"] = *presencePenalty
}
if len(stopSequences) > 0 {
requestBody["stop"] = stopSequences
}
bodyBytes, err := json.Marshal(requestBody)
if err != nil {
return "", fmt.Errorf("序列化请求失败: %v", err)
}
// 创建请求
endpoint := config.BaseURL + "/chat/completions"
req, err := http.NewRequest("POST", endpoint, bytes.NewBuffer(bodyBytes))
if err != nil {
return "", fmt.Errorf("创建请求失败: %v", err)
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Authorization", "Bearer "+config.APIKey)
// 发送请求
resp, err := client.Do(req)
if err != nil {
return "", fmt.Errorf("请求失败: %v", err)
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return "", fmt.Errorf("读取响应失败: %v", err)
}
if resp.StatusCode != http.StatusOK {
return "", fmt.Errorf("API 返回错误 %d: %s", resp.StatusCode, string(body))
}
// 解析响应
var result struct {
Choices []struct {
Message struct {
Content string `json:"content"`
} `json:"message"`
} `json:"choices"`
Error *struct {
Message string `json:"message"`
} `json:"error"`
}
err = json.Unmarshal(body, &result)
if err != nil {
return "", fmt.Errorf("解析响应失败: %v", err)
}
if result.Error != nil {
return "", fmt.Errorf("API 错误: %s", result.Error.Message)
}
if len(result.Choices) == 0 {
return "", errors.New("API 未返回任何回复")
}
return result.Choices[0].Message.Content, nil
}
// callAnthropicAPI 调用 Anthropic API
func (s *ConversationService) callAnthropicAPI(config *app.AIConfig, model string, messages []map[string]string, systemPrompt string, preset *app.AIPreset) (string, error) {
client := &http.Client{Timeout: 120 * time.Second}
// 使用配置的模型或默认模型
if model == "" {
model = config.DefaultModel
}
if model == "" {
model = "claude-3-sonnet-20240229"
}
// Anthropic API 不支持 system role需要单独传递
apiMessages := make([]map[string]string, 0)
for _, msg := range messages {
if msg["role"] != "system" {
apiMessages = append(apiMessages, msg)
}
}
// 应用预设参数(如果有预设)
maxTokens := 2000
var temperature *float64
var topP *float64
var stopSequences []string
if preset != nil {
maxTokens = preset.MaxTokens
if preset.Temperature > 0 {
temperature = &preset.Temperature
}
if preset.TopP > 0 {
topP = &preset.TopP
}
// 解析停止序列
if len(preset.StopSequences) > 0 {
json.Unmarshal(preset.StopSequences, &stopSequences)
}
global.GVA_LOG.Info(fmt.Sprintf("应用预设参数: Temperature=%.2f, MaxTokens=%d, TopP=%.2f", preset.Temperature, maxTokens, preset.TopP))
}
// 构建请求体
requestBody := map[string]interface{}{
"model": model,
"messages": apiMessages,
"system": systemPrompt,
"max_tokens": maxTokens,
}
// 添加可选参数
if temperature != nil {
requestBody["temperature"] = *temperature
}
if topP != nil {
requestBody["top_p"] = *topP
}
if len(stopSequences) > 0 {
requestBody["stop_sequences"] = stopSequences
}
bodyBytes, err := json.Marshal(requestBody)
if err != nil {
return "", fmt.Errorf("序列化请求失败: %v", err)
}
// 创建请求
endpoint := config.BaseURL + "/messages"
req, err := http.NewRequest("POST", endpoint, bytes.NewBuffer(bodyBytes))
if err != nil {
return "", fmt.Errorf("创建请求失败: %v", err)
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("x-api-key", config.APIKey)
req.Header.Set("anthropic-version", "2023-06-01")
// 发送请求
resp, err := client.Do(req)
if err != nil {
return "", fmt.Errorf("请求失败: %v", err)
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return "", fmt.Errorf("读取响应失败: %v", err)
}
if resp.StatusCode != http.StatusOK {
return "", fmt.Errorf("API 返回错误 %d: %s", resp.StatusCode, string(body))
}
// 解析响应
var result struct {
Content []struct {
Text string `json:"text"`
} `json:"content"`
Error *struct {
Message string `json:"message"`
} `json:"error"`
}
err = json.Unmarshal(body, &result)
if err != nil {
return "", fmt.Errorf("解析响应失败: %v", err)
}
if result.Error != nil {
return "", fmt.Errorf("API 错误: %s", result.Error.Message)
}
if len(result.Content) == 0 {
return "", errors.New("API 未返回任何回复")
}
return result.Content[0].Text, nil
}