Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@
},
"dependencies": {
"@ai-sdk/anthropic": "^2.0.45",
"@ai-sdk/google": "^2.0.39",
"@anthropic-ai/sdk": "^0.68.0",
"@axe-core/puppeteer": "^4.10.2",
"@genkit-ai/compat-oai": "1.23.0",
Expand Down
15 changes: 15 additions & 0 deletions pnpm-lock.yaml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

24 changes: 21 additions & 3 deletions runner/codegen/ai-sdk-runner.ts
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ import {
SystemModelMessage,
TextPart,
} from 'ai';
import {google, GoogleGenerativeAIProviderOptions} from '@ai-sdk/google';
import {anthropic, AnthropicProviderOptions} from '@ai-sdk/anthropic';
import z from 'zod';
import {callWithTimeout} from '../utils/timeout.js';
Expand All @@ -27,6 +28,10 @@ const SUPPORTED_MODELS = [
'claude-opus-4.1-with-thinking',
'claude-sonnet-4.5-no-thinking',
'claude-sonnet-4.5-with-thinking',
'gemini-2.5-flash-lite',
'gemini-2.5-flash',
'gemini-2.5-pro',
'gemini-3-pro-preview',
] as const;

// Increased to a very high value as we rely on an actual timeout
Expand Down Expand Up @@ -131,7 +136,8 @@ export class AiSDKRunner implements LlmRunner {
private async _getAiSdkModelOptions(
request: LocalLlmGenerateTextRequestOptions,
): Promise<{model: LanguageModel; providerOptions: {}}> {
switch (request.model) {
const modelName = request.model as (typeof SUPPORTED_MODELS)[number];
switch (modelName) {
case 'claude-opus-4.1-no-thinking':
case 'claude-opus-4.1-with-thinking': {
const thinkingEnabled = request.model.endsWith('with-thinking');
Expand All @@ -149,11 +155,23 @@ export class AiSDKRunner implements LlmRunner {
return {
model: anthropic('claude-sonnet-4-5'),
providerOptions: {
sendReasoning: true,
thinking: {type: 'enabled'},
sendReasoning: thinkingEnabled,
thinking: {type: thinkingEnabled ? 'enabled' : 'disabled'},
} satisfies AnthropicProviderOptions,
};
}
case 'gemini-2.5-flash-lite':
case 'gemini-2.5-flash':
case 'gemini-2.5-pro':
case 'gemini-3-pro-preview':
return {
model: google(modelName),
providerOptions: {
thinkingConfig: {
includeThoughts: request.thinkingConfig?.includeThoughts,
},
} satisfies GoogleGenerativeAIProviderOptions,
};
default:
throw new Error(`Unexpected model in AI SDK runner: ${request.model}.`);
}
Expand Down
4 changes: 3 additions & 1 deletion runner/codegen/genkit/providers/gemini.ts
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,9 @@ export class GeminiModelProvider extends GenkitModelProvider {
validateGeneratedFiles(files: LlmResponseFile[]): boolean {
// Gemini responses occasionally get truncated on `class=`.
// Consider these cases as invalid so they don't influence the results.
return files.length === 0 || !files.some(file => file.code.trim().endsWith('class='));
// TODO: Consider re-enabling this.
// return files.length === 0 || !files.some(file => file.code.trim().endsWith('class='));
return true;
}

private async countGeminiTokens(
Expand Down
Loading