feat: add web-search, image-gen, voice-tts tools to web-ui
Some checks are pending
CI / build-check-test (push) Waiting to run

- web-search: DuckDuckGo search with inline result cards
- image-gen: Venice AI image generation with inline preview + download
- voice-tts: Venice AI TTS with inline audio player
- All use correct ToolRenderer class pattern matching jae-web-ui API
This commit is contained in:
JAE 2026-03-25 18:32:10 +00:00
parent ff883ac79f
commit 29574c7c86
3 changed files with 304 additions and 0 deletions

View file

@ -0,0 +1,108 @@
import type { AgentTool } from "@jaeswift/jae-agent-core";
import { Type } from "@sinclair/typebox";
import type { ToolResultMessage } from "@jaeswift/jae-ai";
import { html } from "lit";
import { Image } from "lucide";
import { registerToolRenderer, renderHeader } from "./renderer-registry.js";
import { getAppStorage } from "../storage/app-storage.js";
import type { ToolRenderer, ToolRenderResult } from "./types.js";
const imageGenSchema = Type.Object({
prompt: Type.String({ description: "Image generation prompt describing what to create" }),
model: Type.Optional(Type.String({ description: "Venice image model (default: fluently-xl)" })),
width: Type.Optional(Type.Number({ description: "Width in pixels (default: 1024)" })),
height: Type.Optional(Type.Number({ description: "Height in pixels (default: 1024)" })),
steps: Type.Optional(Type.Number({ description: "Inference steps (default: 20)" })),
});
export interface ImageGenDetails {
dataUrl?: string;
model: string;
prompt: string;
width: number;
height: number;
error?: string;
}
interface ImageGenParams {
prompt: string;
model?: string;
width?: number;
height?: number;
steps?: number;
}
export const imageGenTool: AgentTool<typeof imageGenSchema, ImageGenDetails> = {
name: "generate_image",
label: "Generate Image",
description: "Generate an image using Venice AI image models. Displays inline in chat.",
parameters: imageGenSchema,
async execute(toolCallId, params, signal) {
const { prompt, model = "fluently-xl", width = 1024, height = 1024, steps = 20 } = params;
const apiKey = await getAppStorage().providerKeys.get("venice");
if (!apiKey) {
return {
content: [{ type: "text", text: "Error: Venice API key not set. Add it in Settings > Providers & Models > Venice." }],
details: { model, prompt, width, height, error: "No API key" },
};
}
const res = await fetch("https://api.venice.ai/api/v1/image/generate", {
method: "POST",
headers: { "Content-Type": "application/json", Authorization: `Bearer ${apiKey}` },
body: JSON.stringify({ model, prompt, width, height, steps, return_binary: false, safe_mode: false }),
signal: signal ?? AbortSignal.timeout(60000),
});
if (!res.ok) {
const err = await res.text();
return {
content: [{ type: "text", text: `Image generation failed (${res.status}): ${err}` }],
details: { model, prompt, width, height, error: err },
};
}
const data = await res.json() as any;
const b64 = data?.images?.[0];
if (!b64) {
return {
content: [{ type: "text", text: "No image returned from Venice API." }],
details: { model, prompt, width, height, error: "No image data" },
};
}
const dataUrl = `data:image/png;base64,${b64}`;
return {
content: [{ type: "text", text: `Image generated successfully. Model: ${model}, Size: ${width}x${height}` }],
details: { dataUrl, model, prompt, width, height },
};
},
};
class ImageGenRenderer implements ToolRenderer<ImageGenParams, ImageGenDetails> {
render(params: ImageGenParams | undefined, result: ToolResultMessage<ImageGenDetails> | undefined): ToolRenderResult {
const state = result ? (result.isError ? "error" : "complete") : "inprogress";
if (result?.details?.dataUrl) {
const d = result.details;
return {
content: html`
<div class="flex flex-col gap-3">
${renderHeader(state, Image, "Image Generated")}
<img src=${d.dataUrl} alt=${d.prompt}
class="rounded-lg max-w-full border border-border shadow-sm"
style="max-height:512px;object-fit:contain;"
/>
<div class="flex items-center justify-between text-xs text-muted-foreground">
<span>${d.model} · ${d.width}×${d.height}</span>
<a href=${d.dataUrl} download="generated.png" class="text-primary hover:underline"> Download</a>
</div>
<div class="text-xs text-foreground italic">${d.prompt}</div>
</div>`,
isCustom: false,
};
}
return { content: renderHeader(state, Image, `Generating image: ${params?.prompt?.slice(0, 50) ?? "..."}`), isCustom: false };
}
}
registerToolRenderer("generate_image", new ImageGenRenderer());
export function createImageGenTool(): AgentTool<typeof imageGenSchema, ImageGenDetails> {
return imageGenTool;
}

View file

@ -0,0 +1,90 @@
import type { AgentTool } from "@jaeswift/jae-agent-core";
import { Type } from "@sinclair/typebox";
import type { ToolResultMessage } from "@jaeswift/jae-ai";
import { html } from "lit";
import { Volume2 } from "lucide";
import { registerToolRenderer, renderHeader } from "./renderer-registry.js";
import { getAppStorage } from "../storage/app-storage.js";
import type { ToolRenderer, ToolRenderResult } from "./types.js";
const ttsSchema = Type.Object({
text: Type.String({ description: "Text to convert to speech" }),
model: Type.Optional(Type.String({ description: "Venice TTS model (default: tts-kokoro)" })),
voice: Type.Optional(Type.String({ description: "Voice ID (default: af_heart)" })),
});
export interface TTSDetails {
audioUrl?: string;
model: string;
voice: string;
text: string;
error?: string;
}
interface TTSParams {
text: string;
model?: string;
voice?: string;
}
export const ttsTool: AgentTool<typeof ttsSchema, TTSDetails> = {
name: "text_to_speech",
label: "Text to Speech",
description: "Convert text to speech using Venice AI TTS. Audio plays inline in chat.",
parameters: ttsSchema,
async execute(toolCallId, params, signal) {
const { text, model = "tts-kokoro", voice = "af_heart" } = params;
const apiKey = await getAppStorage().providerKeys.get("venice");
if (!apiKey) {
return {
content: [{ type: "text", text: "Error: Venice API key not set." }],
details: { model, voice, text, error: "No API key" },
};
}
const res = await fetch("https://api.venice.ai/api/v1/audio/speech", {
method: "POST",
headers: { "Content-Type": "application/json", Authorization: `Bearer ${apiKey}` },
body: JSON.stringify({ model, input: text, voice }),
signal: signal ?? AbortSignal.timeout(30000),
});
if (!res.ok) {
const err = await res.text();
return {
content: [{ type: "text", text: `TTS failed (${res.status}): ${err}` }],
details: { model, voice, text, error: err },
};
}
const blob = await res.blob();
const audioUrl = URL.createObjectURL(blob);
return {
content: [{ type: "text", text: `Speech generated. Model: ${model}, Voice: ${voice}` }],
details: { audioUrl, model, voice, text },
};
},
};
class TTSRenderer implements ToolRenderer<TTSParams, TTSDetails> {
render(params: TTSParams | undefined, result: ToolResultMessage<TTSDetails> | undefined): ToolRenderResult {
const state = result ? (result.isError ? "error" : "complete") : "inprogress";
if (result?.details?.audioUrl) {
const d = result.details;
return {
content: html`
<div class="flex flex-col gap-2">
${renderHeader(state, Volume2, "Speech Generated")}
<audio controls src=${d.audioUrl} class="w-full"></audio>
<div class="text-xs text-muted-foreground">${d.model} · ${d.voice}</div>
<div class="text-xs text-foreground italic">${d.text.length > 100 ? d.text.slice(0, 100) + "..." : d.text}</div>
</div>`,
isCustom: false,
};
}
return { content: renderHeader(state, Volume2, `Speaking: ${params?.text?.slice(0, 50) ?? "..."}`), isCustom: false };
}
}
registerToolRenderer("text_to_speech", new TTSRenderer());
export function createTTSTool(): AgentTool<typeof ttsSchema, TTSDetails> {
return ttsTool;
}

View file

@ -0,0 +1,106 @@
import type { AgentTool } from "@jaeswift/jae-agent-core";
import { Type } from "@sinclair/typebox";
import type { ToolResultMessage } from "@jaeswift/jae-ai";
import { html } from "lit";
import { Globe } from "lucide";
import { registerToolRenderer, renderHeader } from "./renderer-registry.js";
import type { ToolRenderer, ToolRenderResult } from "./types.js";
const webSearchSchema = Type.Object({
query: Type.String({ description: "Search query" }),
limit: Type.Optional(Type.Number({ description: "Max results (default: 5)" })),
});
export interface WebSearchResult {
title: string;
url: string;
snippet: string;
}
export interface WebSearchDetails {
results: WebSearchResult[];
query: string;
error?: string;
}
interface WebSearchParams {
query: string;
limit?: number;
}
async function fetchDuckDuckGo(query: string, limit: number): Promise<WebSearchResult[]> {
const encoded = encodeURIComponent(query);
const res = await fetch(`https://api.duckduckgo.com/?q=${encoded}&format=json&no_redirect=1&no_html=1&skip_disambig=1`);
if (!res.ok) throw new Error(`Search returned ${res.status}`);
const data = await res.json() as any;
const results: WebSearchResult[] = [];
if (data.AbstractText && data.AbstractURL) {
results.push({ title: data.Heading || query, url: data.AbstractURL, snippet: data.AbstractText });
}
for (const topic of (data.RelatedTopics || [])) {
if (results.length >= limit) break;
if (topic.FirstURL && topic.Text) {
results.push({ title: topic.Text.split(" - ")[0], url: topic.FirstURL, snippet: topic.Text });
} else if (topic.Topics) {
for (const sub of topic.Topics) {
if (results.length >= limit) break;
if (sub.FirstURL && sub.Text) results.push({ title: sub.Text.split(" - ")[0], url: sub.FirstURL, snippet: sub.Text });
}
}
}
for (const r of (data.Results || [])) {
if (results.length >= limit) break;
if (r.FirstURL && r.Text) results.push({ title: r.Title || r.Text, url: r.FirstURL, snippet: r.Text });
}
return results.slice(0, limit);
}
export const webSearchTool: AgentTool<typeof webSearchSchema, WebSearchDetails> = {
name: "web_search",
label: "Web Search",
description: "Search the web for current information using DuckDuckGo.",
parameters: webSearchSchema,
async execute(toolCallId, params, signal) {
const { query, limit = 5 } = params;
try {
const results = await fetchDuckDuckGo(query, limit);
const lines = results.map((r, i) => `[${i + 1}] ${r.title}` + "\n" + r.url + "\n" + r.snippet);
const text = results.length === 0 ? `No results for: ${query}` : lines.join("\n\n");
return { content: [{ type: "text", text }], details: { results, query } };
} catch (err: any) {
return { content: [{ type: "text", text: `Search failed: ${err.message}` }], details: { results: [], query, error: err.message } };
}
},
};
class WebSearchRenderer implements ToolRenderer<WebSearchParams, WebSearchDetails> {
render(params: WebSearchParams | undefined, result: ToolResultMessage<WebSearchDetails> | undefined): ToolRenderResult {
const state = result ? (result.isError ? "error" : "complete") : "inprogress";
if (result?.details?.results?.length) {
const details = result.details;
return {
content: html`
<div class="flex flex-col gap-3">
${renderHeader(state, Globe, `Web Search: ${details.query}`)}
<div class="flex flex-col gap-2">
${details.results.map((r) => html`
<div class="flex flex-col gap-0.5 p-2 rounded border border-border bg-background">
<a href=${r.url} target="_blank" rel="noopener" class="text-sm font-medium text-primary hover:underline">${r.title}</a>
<span class="text-xs text-muted-foreground truncate">${r.url}</span>
<span class="text-xs text-foreground mt-1">${r.snippet}</span>
</div>
`)}
</div>
</div>`,
isCustom: false,
};
}
return { content: renderHeader(state, Globe, `Searching: ${params?.query ?? "..."}`), isCustom: false };
}
}
registerToolRenderer("web_search", new WebSearchRenderer());
export function createWebSearchTool(): AgentTool<typeof webSearchSchema, WebSearchDetails> {
return webSearchTool;
}