311 lines
7.3 KiB
TypeScript
311 lines
7.3 KiB
TypeScript
import { Type } from "@sinclair/typebox";
|
|
import { beforeEach, describe, expect, it, vi } from "vitest";
|
|
import { getModel } from "../src/models.js";
|
|
import { streamSimple } from "../src/stream.js";
|
|
import type { Tool } from "../src/types.js";
|
|
|
|
const mockState = vi.hoisted(() => ({
|
|
lastParams: undefined as unknown,
|
|
chunks: undefined as
|
|
| Array<null | {
|
|
id?: string;
|
|
choices?: Array<{ delta: Record<string, unknown>; finish_reason: string | null; usage?: unknown }>;
|
|
usage?: {
|
|
prompt_tokens: number;
|
|
completion_tokens: number;
|
|
prompt_tokens_details: { cached_tokens: number };
|
|
completion_tokens_details: { reasoning_tokens: number };
|
|
};
|
|
}>
|
|
| undefined,
|
|
}));
|
|
|
|
vi.mock("openai", () => {
|
|
class FakeOpenAI {
|
|
chat = {
|
|
completions: {
|
|
create: async (params: unknown) => {
|
|
mockState.lastParams = params;
|
|
return {
|
|
async *[Symbol.asyncIterator]() {
|
|
const chunks = mockState.chunks ?? [
|
|
{
|
|
choices: [{ delta: {}, finish_reason: "stop" }],
|
|
usage: {
|
|
prompt_tokens: 1,
|
|
completion_tokens: 1,
|
|
prompt_tokens_details: { cached_tokens: 0 },
|
|
completion_tokens_details: { reasoning_tokens: 0 },
|
|
},
|
|
},
|
|
];
|
|
for (const chunk of chunks) {
|
|
yield chunk;
|
|
}
|
|
},
|
|
};
|
|
},
|
|
},
|
|
};
|
|
}
|
|
|
|
return { default: FakeOpenAI };
|
|
});
|
|
|
|
describe("openai-completions tool_choice", () => {
|
|
beforeEach(() => {
|
|
mockState.lastParams = undefined;
|
|
mockState.chunks = undefined;
|
|
});
|
|
|
|
it("forwards toolChoice from simple options to payload", async () => {
|
|
const { compat: _compat, ...baseModel } = getModel("openai", "gpt-4o-mini")!;
|
|
const model = { ...baseModel, api: "openai-completions" } as const;
|
|
const tools: Tool[] = [
|
|
{
|
|
name: "ping",
|
|
description: "Ping tool",
|
|
parameters: Type.Object({
|
|
ok: Type.Boolean(),
|
|
}),
|
|
},
|
|
];
|
|
let payload: unknown;
|
|
|
|
await streamSimple(
|
|
model,
|
|
{
|
|
messages: [
|
|
{
|
|
role: "user",
|
|
content: "Call ping with ok=true",
|
|
timestamp: Date.now(),
|
|
},
|
|
],
|
|
tools,
|
|
},
|
|
{
|
|
apiKey: "test",
|
|
toolChoice: "required",
|
|
onPayload: (params: unknown) => {
|
|
payload = params;
|
|
},
|
|
} as unknown as Parameters<typeof streamSimple>[2],
|
|
).result();
|
|
|
|
const params = (payload ?? mockState.lastParams) as { tool_choice?: string; tools?: unknown[] };
|
|
expect(params.tool_choice).toBe("required");
|
|
expect(Array.isArray(params.tools)).toBe(true);
|
|
expect(params.tools?.length ?? 0).toBeGreaterThan(0);
|
|
});
|
|
|
|
it("omits strict when compat disables strict mode", async () => {
|
|
const { compat: _compat, ...baseModel } = getModel("openai", "gpt-4o-mini")!;
|
|
const model = {
|
|
...baseModel,
|
|
api: "openai-completions",
|
|
compat: { supportsStrictMode: false },
|
|
} as const;
|
|
const tools: Tool[] = [
|
|
{
|
|
name: "ping",
|
|
description: "Ping tool",
|
|
parameters: Type.Object({
|
|
ok: Type.Boolean(),
|
|
}),
|
|
},
|
|
];
|
|
let payload: unknown;
|
|
|
|
await streamSimple(
|
|
model,
|
|
{
|
|
messages: [
|
|
{
|
|
role: "user",
|
|
content: "Call ping with ok=true",
|
|
timestamp: Date.now(),
|
|
},
|
|
],
|
|
tools,
|
|
},
|
|
{
|
|
apiKey: "test",
|
|
onPayload: (params: unknown) => {
|
|
payload = params;
|
|
},
|
|
} as unknown as Parameters<typeof streamSimple>[2],
|
|
).result();
|
|
|
|
const params = (payload ?? mockState.lastParams) as { tools?: Array<{ function?: Record<string, unknown> }> };
|
|
const tool = params.tools?.[0]?.function;
|
|
expect(tool).toBeTruthy();
|
|
expect(tool?.strict).toBeUndefined();
|
|
expect("strict" in (tool ?? {})).toBe(false);
|
|
});
|
|
|
|
it("maps groq qwen3 reasoning levels to default reasoning_effort", async () => {
|
|
const model = getModel("groq", "qwen/qwen3-32b")!;
|
|
let payload: unknown;
|
|
|
|
await streamSimple(
|
|
model,
|
|
{
|
|
messages: [
|
|
{
|
|
role: "user",
|
|
content: "Hi",
|
|
timestamp: Date.now(),
|
|
},
|
|
],
|
|
},
|
|
{
|
|
apiKey: "test",
|
|
reasoning: "medium",
|
|
onPayload: (params: unknown) => {
|
|
payload = params;
|
|
},
|
|
},
|
|
).result();
|
|
|
|
const params = (payload ?? mockState.lastParams) as { reasoning_effort?: string };
|
|
expect(params.reasoning_effort).toBe("default");
|
|
});
|
|
|
|
it("keeps normal reasoning_effort for groq models without compat mapping", async () => {
|
|
const model = getModel("groq", "openai/gpt-oss-20b")!;
|
|
let payload: unknown;
|
|
|
|
await streamSimple(
|
|
model,
|
|
{
|
|
messages: [
|
|
{
|
|
role: "user",
|
|
content: "Hi",
|
|
timestamp: Date.now(),
|
|
},
|
|
],
|
|
},
|
|
{
|
|
apiKey: "test",
|
|
reasoning: "medium",
|
|
onPayload: (params: unknown) => {
|
|
payload = params;
|
|
},
|
|
},
|
|
).result();
|
|
|
|
const params = (payload ?? mockState.lastParams) as { reasoning_effort?: string };
|
|
expect(params.reasoning_effort).toBe("medium");
|
|
});
|
|
|
|
it("maps non-standard provider finish_reason values to stopReason error", async () => {
|
|
mockState.chunks = [
|
|
{
|
|
choices: [{ delta: { content: "partial" }, finish_reason: null }],
|
|
},
|
|
{
|
|
choices: [{ delta: {}, finish_reason: "network_error" }],
|
|
usage: {
|
|
prompt_tokens: 1,
|
|
completion_tokens: 1,
|
|
prompt_tokens_details: { cached_tokens: 0 },
|
|
completion_tokens_details: { reasoning_tokens: 0 },
|
|
},
|
|
},
|
|
];
|
|
|
|
const model = getModel("zai", "glm-5")!;
|
|
const response = await streamSimple(
|
|
model,
|
|
{
|
|
messages: [
|
|
{
|
|
role: "user",
|
|
content: "Hi",
|
|
timestamp: Date.now(),
|
|
},
|
|
],
|
|
},
|
|
{ apiKey: "test" },
|
|
).result();
|
|
|
|
expect(response.stopReason).toBe("error");
|
|
expect(response.errorMessage).toBe("Provider finish_reason: network_error");
|
|
});
|
|
|
|
it("ignores null stream chunks from openai-compatible providers", async () => {
|
|
mockState.chunks = [
|
|
null,
|
|
{
|
|
id: "chatcmpl-test",
|
|
choices: [{ delta: { content: "OK" }, finish_reason: null }],
|
|
},
|
|
{
|
|
id: "chatcmpl-test",
|
|
choices: [{ delta: {}, finish_reason: "stop" }],
|
|
usage: {
|
|
prompt_tokens: 3,
|
|
completion_tokens: 1,
|
|
prompt_tokens_details: { cached_tokens: 0 },
|
|
completion_tokens_details: { reasoning_tokens: 0 },
|
|
},
|
|
},
|
|
];
|
|
|
|
const { compat: _compat, ...baseModel } = getModel("openai", "gpt-4o-mini")!;
|
|
const model = { ...baseModel, api: "openai-completions" } as const;
|
|
const response = await streamSimple(
|
|
model,
|
|
{
|
|
messages: [
|
|
{
|
|
role: "user",
|
|
content: "Reply with exactly OK",
|
|
timestamp: Date.now(),
|
|
},
|
|
],
|
|
},
|
|
{ apiKey: "test" },
|
|
).result();
|
|
|
|
expect(response.stopReason).toBe("stop");
|
|
expect(response.errorMessage).toBeUndefined();
|
|
expect(response.responseId).toBe("chatcmpl-test");
|
|
expect(response.usage.totalTokens).toBe(4);
|
|
expect(response.content).toEqual([{ type: "text", text: "OK" }]);
|
|
});
|
|
|
|
it("uses OpenRouter reasoning object instead of reasoning_effort", async () => {
|
|
const model = getModel("openrouter", "deepseek/deepseek-r1")!;
|
|
let payload: unknown;
|
|
|
|
await streamSimple(
|
|
model,
|
|
{
|
|
messages: [
|
|
{
|
|
role: "user",
|
|
content: "Hi",
|
|
timestamp: Date.now(),
|
|
},
|
|
],
|
|
},
|
|
{
|
|
apiKey: "test",
|
|
reasoning: "high",
|
|
onPayload: (params: unknown) => {
|
|
payload = params;
|
|
},
|
|
},
|
|
).result();
|
|
|
|
const params = (payload ?? mockState.lastParams) as {
|
|
reasoning?: { effort?: string };
|
|
reasoning_effort?: string;
|
|
};
|
|
expect(params.reasoning).toEqual({ effort: "high" });
|
|
expect(params.reasoning_effort).toBeUndefined();
|
|
});
|
|
});
|