diff --git a/api/app.py b/api/app.py index f0cc7b4..24b1289 100644 --- a/api/app.py +++ b/api/app.py @@ -723,14 +723,17 @@ def venice_chat(): json={ 'model': venice_model, 'messages': messages, - 'max_tokens': 512, + 'max_tokens': 4096, 'temperature': 0.7 }, - timeout=30 + timeout=60 ) resp.raise_for_status() result = resp.json() - reply = result['choices'][0]['message']['content'] + reply = result['choices'][0]['message'].get('content', '') or '' + # Reasoning models may put output in reasoning_content + if not reply.strip(): + reply = result['choices'][0]['message'].get('reasoning_content', '') or '' return jsonify({'reply': reply}) except req.exceptions.RequestException as e: return jsonify({'error': f'Venice API error: {str(e)}'}), 502 diff --git a/api/sitrep_generator.py b/api/sitrep_generator.py index 1849ff6..cad756c 100644 --- a/api/sitrep_generator.py +++ b/api/sitrep_generator.py @@ -320,14 +320,17 @@ def generate_with_venice(user_prompt): {'role': 'system', 'content': SITREP_SYSTEM_PROMPT}, {'role': 'user', 'content': user_prompt} ], - 'max_tokens': 2048, + 'max_tokens': 8192, 'temperature': 0.7 }, - timeout=60 + timeout=120 ) resp.raise_for_status() result = resp.json() - content = result['choices'][0]['message']['content'] + content = result['choices'][0]['message'].get('content', '') or '' + # Reasoning models may put output in reasoning_content + if not content.strip(): + content = result['choices'][0]['message'].get('reasoning_content', '') or '' log.info(f'Venice AI response received ({len(content)} chars)') return content, venice_model except Exception as e: