diff --git a/backend/app/utils/llm_client.py b/backend/app/utils/llm_client.py index 6db1548..6c1a81f 100644 --- a/backend/app/utils/llm_client.py +++ b/backend/app/utils/llm_client.py @@ -4,6 +4,7 @@ LLM客户端封装 """ import json +import re from typing import Optional, Dict, Any, List from openai import OpenAI @@ -61,7 +62,10 @@ class LLMClient: kwargs["response_format"] = response_format response = self.client.chat.completions.create(**kwargs) - return response.choices[0].message.content + content = response.choices[0].message.content + # 部分模型(如MiniMax M2.5)会在content中包含思考内容,需要移除 + content = re.sub(r'[\s\S]*?', '', content).strip() + return content def chat_json( self, @@ -86,6 +90,14 @@ class LLMClient: max_tokens=max_tokens, response_format={"type": "json_object"} ) - - return json.loads(response) + # 清理markdown代码块标记 + cleaned_response = response.strip() + cleaned_response = re.sub(r'^```(?:json)?\s*\n?', '', cleaned_response, flags=re.IGNORECASE) + cleaned_response = re.sub(r'\n?```\s*$', '', cleaned_response) + cleaned_response = cleaned_response.strip() + + try: + return json.loads(cleaned_response) + except json.JSONDecodeError: + raise ValueError(f"LLM返回的JSON格式无效: {cleaned_response}")