feat: update Venice AI model to glm-4.7-flash-heretic, increase tokens for reasoning model, inline SITREP source links

This commit is contained in:
jae 2026-04-15 07:49:49 +00:00
parent 3d6a6cfdc9
commit 00e3d07743
2 changed files with 12 additions and 6 deletions

View file

@ -723,14 +723,17 @@ def venice_chat():
json={
'model': venice_model,
'messages': messages,
'max_tokens': 512,
'max_tokens': 4096,
'temperature': 0.7
},
timeout=30
timeout=60
)
resp.raise_for_status()
result = resp.json()
reply = result['choices'][0]['message']['content']
reply = result['choices'][0]['message'].get('content', '') or ''
# Reasoning models may put output in reasoning_content
if not reply.strip():
reply = result['choices'][0]['message'].get('reasoning_content', '') or ''
return jsonify({'reply': reply})
except req.exceptions.RequestException as e:
return jsonify({'error': f'Venice API error: {str(e)}'}), 502

View file

@ -320,14 +320,17 @@ def generate_with_venice(user_prompt):
{'role': 'system', 'content': SITREP_SYSTEM_PROMPT},
{'role': 'user', 'content': user_prompt}
],
'max_tokens': 2048,
'max_tokens': 8192,
'temperature': 0.7
},
timeout=60
timeout=120
)
resp.raise_for_status()
result = resp.json()
content = result['choices'][0]['message']['content']
content = result['choices'][0]['message'].get('content', '') or ''
# Reasoning models may put output in reasoning_content
if not content.strip():
content = result['choices'][0]['message'].get('reasoning_content', '') or ''
log.info(f'Venice AI response received ({len(content)} chars)')
return content, venice_model
except Exception as e: