🎨 优化前端对话页面 && 优化ai流式传输

Signed-off-by: Echo <1711788888@qq.com>
This commit is contained in:
2026-02-27 22:50:26 +08:00
parent f4e166c5ee
commit 689e8af3df
7 changed files with 721 additions and 320 deletions

View File

@@ -202,6 +202,83 @@ func (a *ConversationApi) GetMessageList(c *gin.Context) {
commonResponse.OkWithData(resp, c)
}
// RegenerateMessage
// @Tags AppConversation
// @Summary 重新生成最后一条 AI 回复
// @Produce application/json
// @Param id path int true "对话ID"
// @Param stream query bool false "是否流式传输"
// @Success 200 {object} commonResponse.Response{data=response.MessageResponse} "重新生成成功"
// @Router /app/conversation/:id/regenerate [post]
// @Security ApiKeyAuth
func (a *ConversationApi) RegenerateMessage(c *gin.Context) {
userID := common.GetAppUserID(c)
conversationID, err := strconv.ParseUint(c.Param("id"), 10, 32)
if err != nil {
commonResponse.FailWithMessage("无效的对话ID", c)
return
}
if c.Query("stream") == "true" {
a.regenerateMessageStream(c, userID, uint(conversationID))
return
}
resp, err := service.ServiceGroupApp.AppServiceGroup.ConversationService.RegenerateMessage(userID, uint(conversationID))
if err != nil {
global.GVA_LOG.Error("重新生成消息失败", zap.Error(err))
commonResponse.FailWithMessage(err.Error(), c)
return
}
commonResponse.OkWithData(resp, c)
}
func (a *ConversationApi) regenerateMessageStream(c *gin.Context, userID, conversationID uint) {
c.Header("Content-Type", "text/event-stream")
c.Header("Cache-Control", "no-cache")
c.Header("Connection", "keep-alive")
c.Header("X-Accel-Buffering", "no")
streamChan := make(chan string, 100)
errorChan := make(chan error, 1)
doneChan := make(chan bool, 1)
go func() {
if err := service.ServiceGroupApp.AppServiceGroup.ConversationService.RegenerateMessageStream(
userID, conversationID, streamChan, doneChan,
); err != nil {
errorChan <- err
}
}()
flusher, ok := c.Writer.(http.Flusher)
if !ok {
commonResponse.FailWithMessage("不支持流式传输", c)
return
}
for {
select {
case chunk := <-streamChan:
c.Writer.Write([]byte("event: message\n"))
c.Writer.Write([]byte(fmt.Sprintf("data: %s\n\n", chunk)))
flusher.Flush()
case err := <-errorChan:
c.Writer.Write([]byte("event: error\n"))
c.Writer.Write([]byte(fmt.Sprintf("data: %s\n\n", err.Error())))
flusher.Flush()
return
case <-doneChan:
c.Writer.Write([]byte("event: done\n"))
c.Writer.Write([]byte("data: \n\n"))
flusher.Flush()
return
case <-c.Request.Context().Done():
return
}
}
}
// SendMessage
// @Tags AppConversation
// @Summary 发送消息

View File

@@ -1,9 +0,0 @@
-- 修复 ai_world_info 表结构
-- 如果表存在旧的 name 字段,需要删除并重新创建
-- 删除旧表(如果存在)
DROP TABLE IF EXISTS ai_character_world_info CASCADE;
DROP TABLE IF EXISTS ai_world_info CASCADE;
-- 表将由 Gorm AutoMigrate 自动创建
-- 重启服务器即可

View File

@@ -21,5 +21,6 @@ func (r *ConversationRouter) InitConversationRouter(Router *gin.RouterGroup) {
conversationRouter.DELETE(":id", conversationApi.DeleteConversation) // 删除对话
conversationRouter.GET(":id/messages", conversationApi.GetMessageList) // 获取消息列表
conversationRouter.POST(":id/message", conversationApi.SendMessage) // 发送消息
conversationRouter.POST(":id/regenerate", conversationApi.RegenerateMessage) // 重新生成消息
}
}

View File

@@ -657,8 +657,31 @@ func (s *ConversationService) SendMessageStream(userID, conversationID uint, req
return errors.New("未找到可用的 AI 配置")
}
// 构建系统提示词和消息列表
// 加载预设
var streamPreset *app.AIPreset
var streamPresetID uint
if len(conversation.Settings) > 0 {
var settings map[string]interface{}
if err := json.Unmarshal(conversation.Settings, &settings); err == nil {
if id, ok := settings["presetId"].(float64); ok {
streamPresetID = uint(id)
}
}
}
if streamPresetID > 0 {
var loadedPreset app.AIPreset
if err := global.GVA_DB.First(&loadedPreset, streamPresetID).Error; err == nil {
streamPreset = &loadedPreset
global.GVA_LOG.Info(fmt.Sprintf("[流式传输] 使用预设: %s (Temperature: %.2f)", streamPreset.Name, streamPreset.Temperature))
global.GVA_DB.Model(streamPreset).Update("use_count", gorm.Expr("use_count + ?", 1))
}
}
// 构建系统提示词(应用预设)
systemPrompt := s.buildSystemPrompt(character)
if streamPreset != nil && streamPreset.SystemPrompt != "" {
systemPrompt = systemPrompt + "\n\n" + streamPreset.SystemPrompt
}
apiMessages := s.buildAPIMessages(messages, systemPrompt)
// 打印发送给AI的完整内容流式传输
@@ -685,9 +708,9 @@ func (s *ConversationService) SendMessageStream(userID, conversationID uint, req
var fullContent string
switch aiConfig.Provider {
case "openai", "custom":
fullContent, err = s.callOpenAIAPIStream(&aiConfig, model, apiMessages, streamChan)
fullContent, err = s.callOpenAIAPIStream(&aiConfig, model, apiMessages, streamPreset, streamChan)
case "anthropic":
fullContent, err = s.callAnthropicAPIStream(&aiConfig, model, apiMessages, systemPrompt, streamChan)
fullContent, err = s.callAnthropicAPIStream(&aiConfig, model, apiMessages, systemPrompt, streamPreset, streamChan)
default:
return fmt.Errorf("不支持的 AI 提供商: %s", aiConfig.Provider)
}
@@ -724,7 +747,7 @@ func (s *ConversationService) SendMessageStream(userID, conversationID uint, req
}
// callOpenAIAPIStream 调用 OpenAI API 流式传输
func (s *ConversationService) callOpenAIAPIStream(config *app.AIConfig, model string, messages []map[string]string, streamChan chan string) (string, error) {
func (s *ConversationService) callOpenAIAPIStream(config *app.AIConfig, model string, messages []map[string]string, preset *app.AIPreset, streamChan chan string) (string, error) {
client := &http.Client{Timeout: 120 * time.Second}
if model == "" {
@@ -734,15 +757,53 @@ func (s *ConversationService) callOpenAIAPIStream(config *app.AIConfig, model st
model = "gpt-4"
}
// 应用预设参数
temperature := 0.7
maxTokens := 2000
var topP *float64
var frequencyPenalty *float64
var presencePenalty *float64
var stopSequences []string
if preset != nil {
temperature = preset.Temperature
maxTokens = preset.MaxTokens
if preset.TopP > 0 {
topP = &preset.TopP
}
if preset.FrequencyPenalty != 0 {
frequencyPenalty = &preset.FrequencyPenalty
}
if preset.PresencePenalty != 0 {
presencePenalty = &preset.PresencePenalty
}
if len(preset.StopSequences) > 0 {
json.Unmarshal(preset.StopSequences, &stopSequences)
}
}
// 构建请求体,启用流式传输
requestBody := map[string]interface{}{
"model": model,
"messages": messages,
"temperature": 0.7,
"max_tokens": 2000,
"temperature": temperature,
"max_tokens": maxTokens,
"stream": true,
}
if topP != nil {
requestBody["top_p"] = *topP
}
if frequencyPenalty != nil {
requestBody["frequency_penalty"] = *frequencyPenalty
}
if presencePenalty != nil {
requestBody["presence_penalty"] = *presencePenalty
}
if len(stopSequences) > 0 {
requestBody["stop"] = stopSequences
}
bodyBytes, err := json.Marshal(requestBody)
if err != nil {
return "", fmt.Errorf("序列化请求失败: %v", err)
@@ -815,7 +876,7 @@ func (s *ConversationService) callOpenAIAPIStream(config *app.AIConfig, model st
}
// callAnthropicAPIStream 调用 Anthropic API 流式传输
func (s *ConversationService) callAnthropicAPIStream(config *app.AIConfig, model string, messages []map[string]string, systemPrompt string, streamChan chan string) (string, error) {
func (s *ConversationService) callAnthropicAPIStream(config *app.AIConfig, model string, messages []map[string]string, systemPrompt string, preset *app.AIPreset, streamChan chan string) (string, error) {
client := &http.Client{Timeout: 120 * time.Second}
if model == "" {
@@ -833,14 +894,43 @@ func (s *ConversationService) callAnthropicAPIStream(config *app.AIConfig, model
}
}
// 应用预设参数
maxTokens := 2000
var temperature *float64
var topP *float64
var stopSequences []string
if preset != nil {
maxTokens = preset.MaxTokens
if preset.Temperature > 0 {
temperature = &preset.Temperature
}
if preset.TopP > 0 {
topP = &preset.TopP
}
if len(preset.StopSequences) > 0 {
json.Unmarshal(preset.StopSequences, &stopSequences)
}
}
requestBody := map[string]interface{}{
"model": model,
"messages": apiMessages,
"system": systemPrompt,
"max_tokens": 2000,
"max_tokens": maxTokens,
"stream": true,
}
if temperature != nil {
requestBody["temperature"] = *temperature
}
if topP != nil {
requestBody["top_p"] = *topP
}
if len(stopSequences) > 0 {
requestBody["stop_sequences"] = stopSequences
}
bodyBytes, err := json.Marshal(requestBody)
if err != nil {
return "", fmt.Errorf("序列化请求失败: %v", err)
@@ -910,6 +1000,205 @@ func (s *ConversationService) callAnthropicAPIStream(config *app.AIConfig, model
return fullContent.String(), nil
}
// RegenerateMessage 重新生成最后一条 AI 回复(非流式)
func (s *ConversationService) RegenerateMessage(userID, conversationID uint) (*response.MessageResponse, error) {
var conversation app.Conversation
err := global.GVA_DB.Where("id = ? AND user_id = ?", conversationID, userID).First(&conversation).Error
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
return nil, errors.New("对话不存在或无权访问")
}
return nil, err
}
var character app.AICharacter
err = global.GVA_DB.Where("id = ?", conversation.CharacterID).First(&character).Error
if err != nil {
return nil, errors.New("角色卡不存在")
}
// 删除最后一条 AI 回复
var lastAssistantMsg app.Message
if err = global.GVA_DB.Where("conversation_id = ? AND role = ?", conversationID, "assistant").
Order("created_at DESC").First(&lastAssistantMsg).Error; err == nil {
global.GVA_DB.Delete(&lastAssistantMsg)
global.GVA_DB.Model(&conversation).Updates(map[string]interface{}{
"message_count": gorm.Expr("GREATEST(message_count - 1, 0)"),
"token_count": gorm.Expr("GREATEST(token_count - ?, 0)", lastAssistantMsg.TokenCount),
})
}
// 获取删除后的消息历史
var messages []app.Message
err = global.GVA_DB.Where("conversation_id = ?", conversationID).
Order("created_at DESC").Limit(10).Find(&messages).Error
if err != nil {
return nil, err
}
if len(messages) == 0 {
return nil, errors.New("没有可用的消息历史")
}
for i, j := 0, len(messages)-1; i < j; i, j = i+1, j-1 {
messages[i], messages[j] = messages[j], messages[i]
}
aiResponse, err := s.callAIService(conversation, character, messages)
if err != nil {
return nil, err
}
assistantMessage := app.Message{
ConversationID: conversationID,
Role: "assistant",
Content: aiResponse,
TokenCount: len(aiResponse) / 4,
}
if err = global.GVA_DB.Create(&assistantMessage).Error; err != nil {
return nil, err
}
global.GVA_DB.Model(&conversation).Updates(map[string]interface{}{
"message_count": gorm.Expr("message_count + ?", 1),
"token_count": gorm.Expr("token_count + ?", assistantMessage.TokenCount),
})
resp := response.ToMessageResponse(&assistantMessage)
return &resp, nil
}
// RegenerateMessageStream 流式重新生成最后一条 AI 回复
func (s *ConversationService) RegenerateMessageStream(userID, conversationID uint, streamChan chan string, doneChan chan bool) error {
defer close(streamChan)
defer close(doneChan)
var conversation app.Conversation
err := global.GVA_DB.Where("id = ? AND user_id = ?", conversationID, userID).First(&conversation).Error
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
return errors.New("对话不存在或无权访问")
}
return err
}
var character app.AICharacter
err = global.GVA_DB.Where("id = ?", conversation.CharacterID).First(&character).Error
if err != nil {
return errors.New("角色卡不存在")
}
// 删除最后一条 AI 回复
var lastAssistantMsg app.Message
if err = global.GVA_DB.Where("conversation_id = ? AND role = ?", conversationID, "assistant").
Order("created_at DESC").First(&lastAssistantMsg).Error; err == nil {
global.GVA_DB.Delete(&lastAssistantMsg)
global.GVA_DB.Model(&conversation).Updates(map[string]interface{}{
"message_count": gorm.Expr("GREATEST(message_count - 1, 0)"),
"token_count": gorm.Expr("GREATEST(token_count - ?, 0)", lastAssistantMsg.TokenCount),
})
}
// 获取删除后的消息历史
var messages []app.Message
err = global.GVA_DB.Where("conversation_id = ?", conversationID).
Order("created_at DESC").Limit(10).Find(&messages).Error
if err != nil {
return err
}
if len(messages) == 0 {
return errors.New("没有可用的消息历史")
}
for i, j := 0, len(messages)-1; i < j; i, j = i+1, j-1 {
messages[i], messages[j] = messages[j], messages[i]
}
// 获取 AI 配置
var aiConfig app.AIConfig
var configID uint
if len(conversation.Settings) > 0 {
var settings map[string]interface{}
if err := json.Unmarshal(conversation.Settings, &settings); err == nil {
if id, ok := settings["aiConfigId"].(float64); ok {
configID = uint(id)
}
}
}
if configID > 0 {
err = global.GVA_DB.Where("id = ? AND is_active = ?", configID, true).First(&aiConfig).Error
}
if err != nil || configID == 0 {
err = global.GVA_DB.Where("is_active = ?", true).
Order("is_default DESC, created_at DESC").
First(&aiConfig).Error
}
if err != nil {
return errors.New("未找到可用的 AI 配置")
}
// 加载预设
var preset *app.AIPreset
var presetID uint
if len(conversation.Settings) > 0 {
var settings map[string]interface{}
if err := json.Unmarshal(conversation.Settings, &settings); err == nil {
if id, ok := settings["presetId"].(float64); ok {
presetID = uint(id)
}
}
}
if presetID > 0 {
var loadedPreset app.AIPreset
if err := global.GVA_DB.First(&loadedPreset, presetID).Error; err == nil {
preset = &loadedPreset
}
}
systemPrompt := s.buildSystemPrompt(character)
if preset != nil && preset.SystemPrompt != "" {
systemPrompt = systemPrompt + "\n\n" + preset.SystemPrompt
}
apiMessages := s.buildAPIMessages(messages, systemPrompt)
model := aiConfig.DefaultModel
if model == "" {
model = conversation.Model
}
if model == "" {
model = "gpt-4"
}
var fullContent string
switch aiConfig.Provider {
case "openai", "custom":
fullContent, err = s.callOpenAIAPIStream(&aiConfig, model, apiMessages, preset, streamChan)
case "anthropic":
fullContent, err = s.callAnthropicAPIStream(&aiConfig, model, apiMessages, systemPrompt, preset, streamChan)
default:
return fmt.Errorf("不支持的 AI 提供商: %s", aiConfig.Provider)
}
if err != nil {
return err
}
assistantMessage := app.Message{
ConversationID: conversationID,
Role: "assistant",
Content: fullContent,
TokenCount: len(fullContent) / 4,
}
if err = global.GVA_DB.Create(&assistantMessage).Error; err != nil {
return err
}
global.GVA_DB.Model(&conversation).Updates(map[string]interface{}{
"message_count": gorm.Expr("message_count + ?", 1),
"token_count": gorm.Expr("token_count + ?", assistantMessage.TokenCount),
})
doneChan <- true
return nil
}
func (s *ConversationService) buildAPIMessages(messages []app.Message, systemPrompt string) []map[string]string {
apiMessages := make([]map[string]string, 0, len(messages)+1)