@@ -657,8 +657,31 @@ func (s *ConversationService) SendMessageStream(userID, conversationID uint, req
return errors . New ( "未找到可用的 AI 配置" )
}
// 构建系统提示词和消息列表
// 加载预设
var streamPreset * app . AIPreset
var streamPresetID uint
if len ( conversation . Settings ) > 0 {
var settings map [ string ] interface { }
if err := json . Unmarshal ( conversation . Settings , & settings ) ; err == nil {
if id , ok := settings [ "presetId" ] . ( float64 ) ; ok {
streamPresetID = uint ( id )
}
}
}
if streamPresetID > 0 {
var loadedPreset app . AIPreset
if err := global . GVA_DB . First ( & loadedPreset , streamPresetID ) . Error ; err == nil {
streamPreset = & loadedPreset
global . GVA_LOG . Info ( fmt . Sprintf ( "[流式传输] 使用预设: %s (Temperature: %.2f)" , streamPreset . Name , streamPreset . Temperature ) )
global . GVA_DB . Model ( streamPreset ) . Update ( "use_count" , gorm . Expr ( "use_count + ?" , 1 ) )
}
}
// 构建系统提示词(应用预设)
systemPrompt := s . buildSystemPrompt ( character )
if streamPreset != nil && streamPreset . SystemPrompt != "" {
systemPrompt = systemPrompt + "\n\n" + streamPreset . SystemPrompt
}
apiMessages := s . buildAPIMessages ( messages , systemPrompt )
// 打印发送给AI的完整内容( 流式传输)
@@ -685,9 +708,9 @@ func (s *ConversationService) SendMessageStream(userID, conversationID uint, req
var fullContent string
switch aiConfig . Provider {
case "openai" , "custom" :
fullContent , err = s . callOpenAIAPIStream ( & aiConfig , model , apiMessages , streamChan )
fullContent , err = s . callOpenAIAPIStream ( & aiConfig , model , apiMessages , streamPreset , streamChan )
case "anthropic" :
fullContent , err = s . callAnthropicAPIStream ( & aiConfig , model , apiMessages , systemPrompt , streamChan )
fullContent , err = s . callAnthropicAPIStream ( & aiConfig , model , apiMessages , systemPrompt , streamPreset , streamChan )
default :
return fmt . Errorf ( "不支持的 AI 提供商: %s" , aiConfig . Provider )
}
@@ -724,7 +747,7 @@ func (s *ConversationService) SendMessageStream(userID, conversationID uint, req
}
// callOpenAIAPIStream 调用 OpenAI API 流式传输
func ( s * ConversationService ) callOpenAIAPIStream ( config * app . AIConfig , model string , messages [ ] map [ string ] string , streamChan chan string ) ( string , error ) {
func ( s * ConversationService ) callOpenAIAPIStream ( config * app . AIConfig , model string , messages [ ] map [ string ] string , preset * app . AIPreset , streamChan chan string ) ( string , error ) {
client := & http . Client { Timeout : 120 * time . Second }
if model == "" {
@@ -734,15 +757,53 @@ func (s *ConversationService) callOpenAIAPIStream(config *app.AIConfig, model st
model = "gpt-4"
}
// 应用预设参数
temperature := 0.7
maxTokens := 2000
var topP * float64
var frequencyPenalty * float64
var presencePenalty * float64
var stopSequences [ ] string
if preset != nil {
temperature = preset . Temperature
maxTokens = preset . MaxTokens
if preset . TopP > 0 {
topP = & preset . TopP
}
if preset . FrequencyPenalty != 0 {
frequencyPenalty = & preset . FrequencyPenalty
}
if preset . PresencePenalty != 0 {
presencePenalty = & preset . PresencePenalty
}
if len ( preset . StopSequences ) > 0 {
json . Unmarshal ( preset . StopSequences , & stopSequences )
}
}
// 构建请求体,启用流式传输
requestBody := map [ string ] interface { } {
"model" : model ,
"messages" : messages ,
"temperature" : 0.7 ,
"max_tokens" : 2000 ,
"temperature" : temperature ,
"max_tokens" : maxTokens ,
"stream" : true ,
}
if topP != nil {
requestBody [ "top_p" ] = * topP
}
if frequencyPenalty != nil {
requestBody [ "frequency_penalty" ] = * frequencyPenalty
}
if presencePenalty != nil {
requestBody [ "presence_penalty" ] = * presencePenalty
}
if len ( stopSequences ) > 0 {
requestBody [ "stop" ] = stopSequences
}
bodyBytes , err := json . Marshal ( requestBody )
if err != nil {
return "" , fmt . Errorf ( "序列化请求失败: %v" , err )
@@ -815,7 +876,7 @@ func (s *ConversationService) callOpenAIAPIStream(config *app.AIConfig, model st
}
// callAnthropicAPIStream 调用 Anthropic API 流式传输
func ( s * ConversationService ) callAnthropicAPIStream ( config * app . AIConfig , model string , messages [ ] map [ string ] string , systemPrompt string , streamChan chan string ) ( string , error ) {
func ( s * ConversationService ) callAnthropicAPIStream ( config * app . AIConfig , model string , messages [ ] map [ string ] string , systemPrompt string , preset * app . AIPreset , streamChan chan string ) ( string , error ) {
client := & http . Client { Timeout : 120 * time . Second }
if model == "" {
@@ -833,14 +894,43 @@ func (s *ConversationService) callAnthropicAPIStream(config *app.AIConfig, model
}
}
// 应用预设参数
maxTokens := 2000
var temperature * float64
var topP * float64
var stopSequences [ ] string
if preset != nil {
maxTokens = preset . MaxTokens
if preset . Temperature > 0 {
temperature = & preset . Temperature
}
if preset . TopP > 0 {
topP = & preset . TopP
}
if len ( preset . StopSequences ) > 0 {
json . Unmarshal ( preset . StopSequences , & stopSequences )
}
}
requestBody := map [ string ] interface { } {
"model" : model ,
"messages" : apiMessages ,
"system" : systemPrompt ,
"max_tokens" : 2000 ,
"max_tokens" : maxTokens ,
"stream" : true ,
}
if temperature != nil {
requestBody [ "temperature" ] = * temperature
}
if topP != nil {
requestBody [ "top_p" ] = * topP
}
if len ( stopSequences ) > 0 {
requestBody [ "stop_sequences" ] = stopSequences
}
bodyBytes , err := json . Marshal ( requestBody )
if err != nil {
return "" , fmt . Errorf ( "序列化请求失败: %v" , err )
@@ -910,6 +1000,205 @@ func (s *ConversationService) callAnthropicAPIStream(config *app.AIConfig, model
return fullContent . String ( ) , nil
}
// RegenerateMessage 重新生成最后一条 AI 回复(非流式)
func ( s * ConversationService ) RegenerateMessage ( userID , conversationID uint ) ( * response . MessageResponse , error ) {
var conversation app . Conversation
err := global . GVA_DB . Where ( "id = ? AND user_id = ?" , conversationID , userID ) . First ( & conversation ) . Error
if err != nil {
if errors . Is ( err , gorm . ErrRecordNotFound ) {
return nil , errors . New ( "对话不存在或无权访问" )
}
return nil , err
}
var character app . AICharacter
err = global . GVA_DB . Where ( "id = ?" , conversation . CharacterID ) . First ( & character ) . Error
if err != nil {
return nil , errors . New ( "角色卡不存在" )
}
// 删除最后一条 AI 回复
var lastAssistantMsg app . Message
if err = global . GVA_DB . Where ( "conversation_id = ? AND role = ?" , conversationID , "assistant" ) .
Order ( "created_at DESC" ) . First ( & lastAssistantMsg ) . Error ; err == nil {
global . GVA_DB . Delete ( & lastAssistantMsg )
global . GVA_DB . Model ( & conversation ) . Updates ( map [ string ] interface { } {
"message_count" : gorm . Expr ( "GREATEST(message_count - 1, 0)" ) ,
"token_count" : gorm . Expr ( "GREATEST(token_count - ?, 0)" , lastAssistantMsg . TokenCount ) ,
} )
}
// 获取删除后的消息历史
var messages [ ] app . Message
err = global . GVA_DB . Where ( "conversation_id = ?" , conversationID ) .
Order ( "created_at DESC" ) . Limit ( 10 ) . Find ( & messages ) . Error
if err != nil {
return nil , err
}
if len ( messages ) == 0 {
return nil , errors . New ( "没有可用的消息历史" )
}
for i , j := 0 , len ( messages ) - 1 ; i < j ; i , j = i + 1 , j - 1 {
messages [ i ] , messages [ j ] = messages [ j ] , messages [ i ]
}
aiResponse , err := s . callAIService ( conversation , character , messages )
if err != nil {
return nil , err
}
assistantMessage := app . Message {
ConversationID : conversationID ,
Role : "assistant" ,
Content : aiResponse ,
TokenCount : len ( aiResponse ) / 4 ,
}
if err = global . GVA_DB . Create ( & assistantMessage ) . Error ; err != nil {
return nil , err
}
global . GVA_DB . Model ( & conversation ) . Updates ( map [ string ] interface { } {
"message_count" : gorm . Expr ( "message_count + ?" , 1 ) ,
"token_count" : gorm . Expr ( "token_count + ?" , assistantMessage . TokenCount ) ,
} )
resp := response . ToMessageResponse ( & assistantMessage )
return & resp , nil
}
// RegenerateMessageStream 流式重新生成最后一条 AI 回复
func ( s * ConversationService ) RegenerateMessageStream ( userID , conversationID uint , streamChan chan string , doneChan chan bool ) error {
defer close ( streamChan )
defer close ( doneChan )
var conversation app . Conversation
err := global . GVA_DB . Where ( "id = ? AND user_id = ?" , conversationID , userID ) . First ( & conversation ) . Error
if err != nil {
if errors . Is ( err , gorm . ErrRecordNotFound ) {
return errors . New ( "对话不存在或无权访问" )
}
return err
}
var character app . AICharacter
err = global . GVA_DB . Where ( "id = ?" , conversation . CharacterID ) . First ( & character ) . Error
if err != nil {
return errors . New ( "角色卡不存在" )
}
// 删除最后一条 AI 回复
var lastAssistantMsg app . Message
if err = global . GVA_DB . Where ( "conversation_id = ? AND role = ?" , conversationID , "assistant" ) .
Order ( "created_at DESC" ) . First ( & lastAssistantMsg ) . Error ; err == nil {
global . GVA_DB . Delete ( & lastAssistantMsg )
global . GVA_DB . Model ( & conversation ) . Updates ( map [ string ] interface { } {
"message_count" : gorm . Expr ( "GREATEST(message_count - 1, 0)" ) ,
"token_count" : gorm . Expr ( "GREATEST(token_count - ?, 0)" , lastAssistantMsg . TokenCount ) ,
} )
}
// 获取删除后的消息历史
var messages [ ] app . Message
err = global . GVA_DB . Where ( "conversation_id = ?" , conversationID ) .
Order ( "created_at DESC" ) . Limit ( 10 ) . Find ( & messages ) . Error
if err != nil {
return err
}
if len ( messages ) == 0 {
return errors . New ( "没有可用的消息历史" )
}
for i , j := 0 , len ( messages ) - 1 ; i < j ; i , j = i + 1 , j - 1 {
messages [ i ] , messages [ j ] = messages [ j ] , messages [ i ]
}
// 获取 AI 配置
var aiConfig app . AIConfig
var configID uint
if len ( conversation . Settings ) > 0 {
var settings map [ string ] interface { }
if err := json . Unmarshal ( conversation . Settings , & settings ) ; err == nil {
if id , ok := settings [ "aiConfigId" ] . ( float64 ) ; ok {
configID = uint ( id )
}
}
}
if configID > 0 {
err = global . GVA_DB . Where ( "id = ? AND is_active = ?" , configID , true ) . First ( & aiConfig ) . Error
}
if err != nil || configID == 0 {
err = global . GVA_DB . Where ( "is_active = ?" , true ) .
Order ( "is_default DESC, created_at DESC" ) .
First ( & aiConfig ) . Error
}
if err != nil {
return errors . New ( "未找到可用的 AI 配置" )
}
// 加载预设
var preset * app . AIPreset
var presetID uint
if len ( conversation . Settings ) > 0 {
var settings map [ string ] interface { }
if err := json . Unmarshal ( conversation . Settings , & settings ) ; err == nil {
if id , ok := settings [ "presetId" ] . ( float64 ) ; ok {
presetID = uint ( id )
}
}
}
if presetID > 0 {
var loadedPreset app . AIPreset
if err := global . GVA_DB . First ( & loadedPreset , presetID ) . Error ; err == nil {
preset = & loadedPreset
}
}
systemPrompt := s . buildSystemPrompt ( character )
if preset != nil && preset . SystemPrompt != "" {
systemPrompt = systemPrompt + "\n\n" + preset . SystemPrompt
}
apiMessages := s . buildAPIMessages ( messages , systemPrompt )
model := aiConfig . DefaultModel
if model == "" {
model = conversation . Model
}
if model == "" {
model = "gpt-4"
}
var fullContent string
switch aiConfig . Provider {
case "openai" , "custom" :
fullContent , err = s . callOpenAIAPIStream ( & aiConfig , model , apiMessages , preset , streamChan )
case "anthropic" :
fullContent , err = s . callAnthropicAPIStream ( & aiConfig , model , apiMessages , systemPrompt , preset , streamChan )
default :
return fmt . Errorf ( "不支持的 AI 提供商: %s" , aiConfig . Provider )
}
if err != nil {
return err
}
assistantMessage := app . Message {
ConversationID : conversationID ,
Role : "assistant" ,
Content : fullContent ,
TokenCount : len ( fullContent ) / 4 ,
}
if err = global . GVA_DB . Create ( & assistantMessage ) . Error ; err != nil {
return err
}
global . GVA_DB . Model ( & conversation ) . Updates ( map [ string ] interface { } {
"message_count" : gorm . Expr ( "message_count + ?" , 1 ) ,
"token_count" : gorm . Expr ( "token_count + ?" , assistantMessage . TokenCount ) ,
} )
doneChan <- true
return nil
}
func ( s * ConversationService ) buildAPIMessages ( messages [ ] app . Message , systemPrompt string ) [ ] map [ string ] string {
apiMessages := make ( [ ] map [ string ] string , 0 , len ( messages ) + 1 )