Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,11 @@
# Changelog

## Unreleased

### Features

- **ai:** add `openai-compatible` adapter + docs/tests for generic OpenAI-compatible gateways (Groq, Cerebras, Vercel AI Gateway) and pin Vitest configs per workspace.

## [15.0.2](///compare/15.0.0...15.0.1) (2025-11-23)

### Features
Expand Down
24 changes: 24 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -284,6 +284,29 @@ console.log(result.responseText, result.confidenceScore);
- [**API Reference**](https://github.com/ax-llm/ax/blob/main/docs/API.md) -
Complete documentation

## OpenAI-Compatible Providers

Many platforms expose an OpenAI-compatible API (Groq, Cerebras, Fireworks, Vercel AI Gateway, custom proxies, etc.). Configure them with the new `openai-compatible` provider:

```typescript
const llm = ai({
name: "openai-compatible",
apiKey: process.env.AI_COMPAT_API_KEY!,
endpoint: process.env.AI_COMPAT_API_URL!, // e.g. https://api.groq.com/openai/v1
headers: { "x-gateway-name": "prod-cluster" }, // optional vendor headers
config: {
model: process.env.AI_COMPAT_MODEL ?? "groq/llama3-70b-8192",
stream: false,
},
});
```

- **Groq:** set `endpoint` to `https://api.groq.com/openai/v1` and avoid unsupported params such as `logit_bias`, `logprobs`, `messages[].name`, or `n` values other than `1`.
- **Cerebras:** use `https://api.cerebras.ai/v1` and omit `frequency_penalty`, `presence_penalty`, `logit_bias`, and `service_tier`. Pass vendor-specific flags via `extra_body` (see their docs).
- **Vercel AI Gateway / custom proxies:** point `endpoint` at the gateway URL (e.g., `https://gateway.ai.cloudflare.com/.../openai`) and add any routing headers required by your setup.

Set `AI_COMPAT_API_KEY` (or reuse `AI_GATEWAY_API_KEY`) plus `AI_COMPAT_API_URL` before running examples like `npm run tsx src/examples/openai-compatible.ts`.

## Examples

Run any example:
Expand All @@ -299,6 +322,7 @@ OPENAI_APIKEY=your-key npm run tsx ./src/examples/[example-name].ts
- [agent.ts](src/examples/agent.ts) - Multi-agent collaboration
- [streaming1.ts](src/examples/streaming1.ts) - Real-time streaming responses
- [multi-modal.ts](src/examples/multi-modal.ts) - Image + text processing
- [openai-compatible.ts](src/examples/openai-compatible.ts) - Connect to Groq, Cerebras, Vercel AI Gateway, or custom OpenAI-compatible endpoints

### Production Patterns

Expand Down
16 changes: 16 additions & 0 deletions src/ax/ai/integration.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -42,4 +42,20 @@ describe('AI Factory Integration', () => {
expect((llm as any).ai.apiURL).toBe('https://api.openai.com/v1');
});
});

describe('OpenAI-compatible provider', () => {
it('configures endpoint and headers', async () => {
const llm = ai({
name: 'openai-compatible',
apiKey: 'compat-key',
endpoint: 'https://api.compat.test/v1',
headers: { 'x-compat-provider': 'demo' },
});

expect((llm as any).ai.apiURL).toBe('https://api.compat.test/v1');
expect((llm as any).ai.getName()).toBe('OpenAI-Compatible');
const headers = await (llm as any).ai.headers();
expect(headers['x-compat-provider']).toBe('demo');
});
});
});
61 changes: 61 additions & 0 deletions src/ax/ai/openai-compatible/api.test.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
import { describe, expect, it } from 'vitest';

import { AxAIOpenAICompatible } from './api.js';

describe('AxAIOpenAICompatible', () => {
it('throws when endpoint is missing', () => {
expect(
() =>
new AxAIOpenAICompatible({
name: 'openai-compatible',
apiKey: 'test',
// @ts-expect-error testing runtime validation
endpoint: '',
})
).toThrow('OpenAI-compatible endpoint not set');
});

it('sets custom endpoint, headers, and provider name', async () => {
const llm = new AxAIOpenAICompatible({
name: 'openai-compatible',
apiKey: 'test',
endpoint: 'https://api.example.com/v1',
headers: { 'x-provider': 'custom' },
providerName: 'Example Gateway',
});

expect((llm as any).apiURL).toBe('https://api.example.com/v1');
expect(llm.getName()).toBe('Example Gateway');

const headers = await (llm as any).headers();
expect(headers).toMatchObject({
Authorization: 'Bearer test',
'x-provider': 'custom',
});
});

it('allows overriding support metadata', () => {
const llm = new AxAIOpenAICompatible({
name: 'openai-compatible',
apiKey: 'test',
endpoint: 'https://api.example.com/v1',
supportFor: () => ({
functions: false,
streaming: false,
media: {
images: { supported: false, formats: [] },
audio: { supported: false, formats: [] },
files: { supported: false, formats: [], uploadMethod: 'none' },
urls: { supported: false, webSearch: false, contextFetching: false },
},
caching: { supported: false, types: [] },
thinking: false,
multiTurn: false,
}),
});

const features = llm.getFeatures('any-model');
expect(features.functions).toBe(false);
expect(features.streaming).toBe(false);
});
});
116 changes: 116 additions & 0 deletions src/ax/ai/openai-compatible/api.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,116 @@
import type { AxAIFeatures } from '../base.js';
import type { AxModelInfo } from '../types.js';
import {
AxAIOpenAIBase,
type AxAIOpenAIBaseArgs,
axAIOpenAIDefaultConfig,
} from '../openai/api.js';
import type {
AxAIOpenAIChatRequest,
AxAIOpenAIConfig,
} from '../openai/chat_types.js';

export type AxAIOpenAICompatibleConfig = AxAIOpenAIConfig<string, string>;

export type AxAIOpenAICompatibleArgs<TModelKey = string> = {
name: 'openai-compatible';
} & Omit<
AxAIOpenAIBaseArgs<
string,
string,
TModelKey,
AxAIOpenAIChatRequest<string>
>,
'name' | 'config' | 'modelInfo' | 'supportFor'
> & {
endpoint: string;
config?: Partial<AxAIOpenAICompatibleConfig>;
modelInfo?: AxModelInfo[];
/** Optional static headers merged with Authorization */
headers?: Record<string, string>;
/** Display name used for metrics/logging */
providerName?: string;
/** Optional override for capability metadata */
supportFor?: AxAIFeatures | ((model: string) => AxAIFeatures);
};

const defaultFeatures: AxAIFeatures = {
functions: true,
streaming: true,
hasThinkingBudget: false,
hasShowThoughts: false,
media: {
images: {
supported: true,
formats: ['image/jpeg', 'image/png', 'image/webp', 'image/gif'],
detailLevels: ['auto', 'high', 'low'],
},
audio: {
supported: true,
formats: ['wav', 'mp3', 'ogg'],
},
files: {
supported: true,
formats: ['text/plain', 'application/pdf', 'image/jpeg', 'image/png'],
uploadMethod: 'upload',
},
urls: {
supported: false,
webSearch: false,
contextFetching: false,
},
},
caching: {
supported: false,
types: [],
},
thinking: false,
multiTurn: true,
};

export class AxAIOpenAICompatible<
TModelKey = string,
> extends AxAIOpenAIBase<string, string, TModelKey> {
constructor({
apiKey,
endpoint,
config,
options,
models,
modelInfo,
headers,
providerName,
supportFor,
chatReqUpdater,
}: Readonly<AxAIOpenAICompatibleArgs<TModelKey>>) {
if (!apiKey || apiKey === '') {
throw new Error('OpenAI-compatible API key not set');
}

if (!endpoint || endpoint === '') {
throw new Error('OpenAI-compatible endpoint not set');
}

const mergedConfig: AxAIOpenAICompatibleConfig = {
...axAIOpenAIDefaultConfig(),
...config,
};

super({
apiKey,
apiURL: endpoint,
config: mergedConfig,
options,
models,
modelInfo: modelInfo ?? [],
supportFor: supportFor ?? defaultFeatures,
chatReqUpdater,
});

super.setName(providerName ?? 'OpenAI-Compatible');
super.setHeaders(async () => ({
Authorization: `Bearer ${apiKey}`,
...(headers ?? {}),
}));
}
}
8 changes: 8 additions & 0 deletions src/ax/ai/wrap.ts
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,10 @@ import { AxAIMistral, type AxAIMistralArgs } from './mistral/api.js';
import type { AxAIMistralModel } from './mistral/types.js';
import { AxAIOllama, type AxAIOllamaArgs } from './ollama/api.js';
import { AxAIOpenAI, type AxAIOpenAIArgs } from './openai/api.js';
import {
AxAIOpenAICompatible,
type AxAIOpenAICompatibleArgs,
} from './openai-compatible/api.js';
import type {
AxAIOpenAIEmbedModel,
AxAIOpenAIModel,
Expand Down Expand Up @@ -60,6 +64,7 @@ import type { AxAIGrokModel } from './x-grok/types.js';

export type AxAIArgs<TModelKey> =
| AxAIOpenAIArgs<'openai', AxAIOpenAIModel, AxAIOpenAIEmbedModel, TModelKey>
| AxAIOpenAICompatibleArgs<TModelKey>
| AxAIOpenAIResponsesArgs<
'openai-responses',
AxAIOpenAIResponsesModel,
Expand Down Expand Up @@ -165,6 +170,9 @@ export class AxAI<TModelKey = string>
case 'openai':
this.ai = new AxAIOpenAI<TModelKey>(options);
break;
case 'openai-compatible':
this.ai = new AxAIOpenAICompatible<TModelKey>(options);
break;
case 'openai-responses':
this.ai = new AxAIOpenAIResponses<TModelKey>(options);
break;
Expand Down
8 changes: 8 additions & 0 deletions src/ax/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -275,6 +275,11 @@ import {
type AxAIOpenAIResponsesWebSearchCallSearchingEvent,
type AxAIOpenAIResponsesWebSearchToolCall,
} from './ai/openai/responses_types.js';
import {
AxAIOpenAICompatible,
type AxAIOpenAICompatibleArgs,
type AxAIOpenAICompatibleConfig,
} from './ai/openai-compatible/api.js';
import {
AxAIOpenRouter,
type AxAIOpenRouterArgs,
Expand Down Expand Up @@ -722,6 +727,7 @@ export { AxAIMistralModel };
export { AxAIOllama };
export { AxAIOpenAI };
export { AxAIOpenAIBase };
export { AxAIOpenAICompatible };
export { AxAIOpenAIEmbedModel };
export { AxAIOpenAIModel };
export { AxAIOpenAIResponses };
Expand Down Expand Up @@ -980,6 +986,8 @@ export type { AxAIOpenAIBaseArgs };
export type { AxAIOpenAIChatRequest };
export type { AxAIOpenAIChatResponse };
export type { AxAIOpenAIChatResponseDelta };
export type { AxAIOpenAICompatibleArgs };
export type { AxAIOpenAICompatibleConfig };
export type { AxAIOpenAIConfig };
export type { AxAIOpenAIEmbedRequest };
export type { AxAIOpenAIEmbedResponse };
Expand Down
8 changes: 8 additions & 0 deletions src/ax/vitest.config.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
import { defineConfig } from 'vitest/config';

export default defineConfig({
test: {
root: __dirname,
environment: 'node',
},
});
63 changes: 63 additions & 0 deletions src/examples/openai-compatible.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
import { ai } from '@ax-llm/ax';

const apiKey =
process.env.AI_COMPAT_API_KEY ?? process.env.AI_GATEWAY_API_KEY ?? '';
const endpoint =
process.env.AI_COMPAT_API_URL ?? process.env.AI_GATEWAY_API_URL ?? '';
const model =
process.env.AI_COMPAT_MODEL ?? process.env.OPENAI_COMPAT_MODEL ?? 'gpt-4o-mini';

if (!apiKey || !endpoint) {
console.error(
'Set AI_COMPAT_API_KEY and AI_COMPAT_API_URL (or AI_GATEWAY_* aliases) before running this example.'
);
process.exit(1);
}

const providerHeader = process.env.AI_COMPAT_PROVIDER_HEADER?.split('=');

const llm = ai({
name: 'openai-compatible',
apiKey,
endpoint,
headers:
providerHeader && providerHeader.length === 2
? { [providerHeader[0]!]: providerHeader[1]! }
: undefined,
config: { model, stream: false },
});

async function main() {
console.log(`Calling ${endpoint} (${model}) via openai-compatible adapter...`);
const response = await llm.chat(
{
model,
chatPrompt: [
{
role: 'system',
content:
'You are an eager assistant that briefly summarizes the users request.',
},
{
role: 'user',
content:
'Explain why OpenAI-compatible gateways are useful when building Ax apps.',
},
],
},
{ stream: false }
);

if ('results' in response) {
console.log(
`[${response.results[0]?.finishReason}] ${response.results[0]?.content}`
);
} else {
console.log('Received stream; consume reader() to process chunks.');
}
}

main().catch((err) => {
console.error(err);
process.exit(1);
});
8 changes: 8 additions & 0 deletions src/tools/vitest.config.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
import { defineConfig } from 'vitest/config';

export default defineConfig({
test: {
root: __dirname,
environment: 'node',
},
});
Loading