diff --git a/go/ai/example_test.go b/go/ai/example_test.go new file mode 100644 index 0000000000..afcdbbc6bb --- /dev/null +++ b/go/ai/example_test.go @@ -0,0 +1,160 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// SPDX-License-Identifier: Apache-2.0 + +// Package ai_test provides examples for ai package helper functions. +// +// The ai package contains helper types and functions used with genkit. +// Most generation and definition functions are in the genkit package; +// see that package for the primary API documentation. +package ai_test + +import ( + "fmt" + + "github.com/firebase/genkit/go/ai" +) + +// This example demonstrates creating different types of message parts. +func ExampleNewTextPart() { + // Create a text part + part := ai.NewTextPart("Hello, world!") + fmt.Println(part.Text) + // Output: Hello, world! +} + +// This example demonstrates creating a message with text content. +func ExampleNewUserTextMessage() { + // Create a user message with text + msg := ai.NewUserTextMessage("What is the capital of France?") + fmt.Println("Role:", msg.Role) + fmt.Println("Text:", msg.Content[0].Text) + // Output: + // Role: user + // Text: What is the capital of France? +} + +// This example demonstrates creating system and model messages. +func ExampleNewSystemTextMessage() { + // Create a system message + sysMsg := ai.NewSystemTextMessage("You are a helpful assistant.") + fmt.Println("System role:", sysMsg.Role) + + // Create a model response message + modelMsg := ai.NewModelTextMessage("I'm here to help!") + fmt.Println("Model role:", modelMsg.Role) + // Output: + // System role: system + // Model role: model +} + +// This example demonstrates creating a data part for raw string content. +func ExampleNewDataPart() { + // Create a data part with raw string content + part := ai.NewDataPart(`{"name": "Alice", "age": 30}`) + fmt.Println("Is data part:", part.IsData()) + fmt.Println("Content:", part.Text) + // Output: + // Is data part: true + // Content: {"name": "Alice", "age": 30} +} + +// This example demonstrates accessing text from a Part. +func ExamplePart_Text() { + // Create a part with text + part := ai.NewTextPart("Sample text content") + + // Access the text field directly + fmt.Println(part.Text) + // Output: Sample text content +} + +// This example demonstrates the Document type used in RAG applications. +func ExampleDocument() { + // Create a document with text content + doc := &ai.Document{ + Content: []*ai.Part{ + ai.NewTextPart("This is the document content."), + }, + Metadata: map[string]any{ + "source": "knowledge-base", + "page": 42, + }, + } + + fmt.Println("Content:", doc.Content[0].Text) + fmt.Println("Source:", doc.Metadata["source"]) + // Output: + // Content: This is the document content. + // Source: knowledge-base +} + +// This example demonstrates creating an Embedding for vector search. +func ExampleEmbedding() { + // Create an embedding (typically returned by an embedder) + embedding := &ai.Embedding{ + Embedding: []float32{0.1, 0.2, 0.3, 0.4, 0.5}, + Metadata: map[string]any{ + "source": "document-1", + }, + } + + fmt.Printf("Embedding dimensions: %d\n", len(embedding.Embedding)) + fmt.Printf("First value: %.1f\n", embedding.Embedding[0]) + // Output: + // Embedding dimensions: 5 + // First value: 0.1 +} + +// This example demonstrates creating a media part for images or other media. +func ExampleNewMediaPart() { + // Create a media part with base64-encoded image data + // In practice, you would encode actual image bytes + imageData := "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJ..." + part := ai.NewMediaPart("image/png", imageData) + + fmt.Println("Is media:", part.IsMedia()) + fmt.Println("Content type:", part.ContentType) + // Output: + // Is media: true + // Content type: image/png +} + +// This example demonstrates creating a model reference with configuration. +func ExampleNewModelRef() { + // Create a reference to a model with custom configuration + // The config type depends on the model provider + modelRef := ai.NewModelRef("googleai/gemini-2.5-flash", map[string]any{ + "temperature": 0.7, + }) + + fmt.Println("Model name:", modelRef.Name()) + // Output: Model name: googleai/gemini-2.5-flash +} + +// This example demonstrates building a multi-turn conversation. +func ExampleNewUserMessage() { + // Build a conversation with multiple parts + userMsg := ai.NewUserMessage( + ai.NewTextPart("What's in this image?"), + ai.NewMediaPart("image/jpeg", "base64data..."), + ) + + fmt.Println("Role:", userMsg.Role) + fmt.Println("Parts:", len(userMsg.Content)) + // Output: + // Role: user + // Parts: 2 +} diff --git a/go/core/doc.go b/go/core/doc.go new file mode 100644 index 0000000000..e4528df7f6 --- /dev/null +++ b/go/core/doc.go @@ -0,0 +1,230 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Package core implements Genkit's foundational action system and runtime machinery. + +This package is primarily intended for plugin developers and Genkit internals. +Application developers should use the genkit package instead, which provides +a higher-level, more convenient API. + +# Actions + +Actions are the fundamental building blocks of Genkit. Every operation - flows, +model calls, tool invocations, retrieval - is implemented as an action. Actions +provide: + + - Type-safe input/output with JSON schema validation + - Automatic tracing and observability + - Consistent error handling + - Registration in the action registry + +Define a non-streaming action: + + action := core.DefineAction(registry, "myAction", + func(ctx context.Context, input string) (string, error) { + return "processed: " + input, nil + }, + ) + + result, err := action.Run(context.Background(), "hello") + +Define a streaming action that sends chunks during execution: + + streamingAction := core.DefineStreamingAction(registry, "countdown", + func(ctx context.Context, start int, cb core.StreamCallback[string]) (string, error) { + for i := start; i > 0; i-- { + if cb != nil { + if err := cb(ctx, fmt.Sprintf("T-%d", i)); err != nil { + return "", err + } + } + time.Sleep(time.Second) + } + return "Liftoff!", nil + }, + ) + +# Flows + +Flows are user-defined actions that orchestrate AI operations. They are the +primary way application developers define business logic in Genkit: + + flow := core.DefineFlow(registry, "myFlow", + func(ctx context.Context, input string) (string, error) { + // Use Run to create traced sub-steps + result, err := core.Run(ctx, "step1", func() (string, error) { + return process(input), nil + }) + if err != nil { + return "", err + } + return result, nil + }, + ) + +Streaming flows can send intermediate results to callers: + + streamingFlow := core.DefineStreamingFlow(registry, "generateReport", + func(ctx context.Context, input Input, cb core.StreamCallback[Progress]) (Report, error) { + for i := 0; i < 100; i += 10 { + if cb != nil { + cb(ctx, Progress{Percent: i}) + } + // ... work ... + } + return Report{...}, nil + }, + ) + +# Traced Steps with Run + +Use [Run] within flows to create traced sub-operations. Each Run call creates +a span in the trace that's visible in the Genkit Developer UI: + + result, err := core.Run(ctx, "fetchData", func() (Data, error) { + return fetchFromAPI() + }) + + processed, err := core.Run(ctx, "processData", func() (Result, error) { + return process(result) + }) + +# Middleware + +Actions support middleware for cross-cutting concerns like logging, metrics, +or authentication: + + loggingMiddleware := func(next core.StreamingFunc[string, string, struct{}]) core.StreamingFunc[string, string, struct{}] { + return func(ctx context.Context, input string, cb core.StreamCallback[struct{}]) (string, error) { + log.Printf("Input: %s", input) + output, err := next(ctx, input, cb) + log.Printf("Output: %s, Error: %v", output, err) + return output, err + } + } + +Chain multiple middleware together: + + combined := core.ChainMiddleware(loggingMiddleware, metricsMiddleware) + wrappedFn := combined(originalFunc) + +# Schema Management + +Register JSON schemas for use in prompts and validation: + + // Define a schema from a map + core.DefineSchema(registry, "Person", map[string]any{ + "type": "object", + "properties": map[string]any{ + "name": map[string]any{"type": "string"}, + "age": map[string]any{"type": "integer"}, + }, + "required": []any{"name"}, + }) + + // Define a schema from a Go type (recommended) + core.DefineSchemaFor[Person](registry) + +Schemas can be referenced in .prompt files by name. + +# Plugin Development + +Plugins extend Genkit's functionality by providing models, tools, retrievers, +and other capabilities. Implement the [api.Plugin] interface: + + type MyPlugin struct { + APIKey string + } + + func (p *MyPlugin) Name() string { + return "myplugin" + } + + func (p *MyPlugin) Init(ctx context.Context) []api.Action { + // Initialize the plugin and return actions to register + model := ai.DefineModel(...) + tool := ai.DefineTool(...) + return []api.Action{model, tool} + } + +For plugins that resolve actions dynamically (e.g., listing available models +from an API), implement [api.DynamicPlugin]: + + type DynamicModelPlugin struct{} + + func (p *DynamicModelPlugin) ListActions(ctx context.Context) []api.ActionDesc { + // Return descriptors of available actions + return []api.ActionDesc{ + {Key: "/model/myplugin/model-a", Name: "model-a"}, + {Key: "/model/myplugin/model-b", Name: "model-b"}, + } + } + + func (p *DynamicModelPlugin) ResolveAction(atype api.ActionType, name string) api.Action { + // Create and return the action on demand + return createModel(name) + } + +# Background Actions + +For long-running operations, use background actions that return immediately +with an operation ID that can be polled for completion: + + bgAction := core.DefineBackgroundAction(registry, "longTask", + func(ctx context.Context, input Input) (Output, error) { + // Start the operation + return startLongOperation(input) + }, + func(ctx context.Context, op *core.Operation[Output]) (*core.Operation[Output], error) { + // Check operation status + return checkOperationStatus(op) + }, + ) + +# Error Handling + +Return user-facing errors with appropriate status codes: + + if err := validate(input); err != nil { + return nil, core.NewPublicError(core.INVALID_ARGUMENT, "Invalid input", map[string]any{ + "field": "email", + "error": err.Error(), + }) + } + +For internal errors that should be logged but not exposed to users: + + return nil, core.NewError(core.INTERNAL, "database connection failed: %v", err) + +# Context + +Access action context for metadata and configuration: + + ctx := core.FromContext(ctx) + if ctx != nil { + // Access action-specific context values + } + +Set action context for nested operations: + + ctx = core.WithActionContext(ctx, core.ActionContext{ + "requestId": requestID, + }) + +For more information, see https://genkit.dev/docs/plugins +*/ +package core diff --git a/go/core/example_test.go b/go/core/example_test.go new file mode 100644 index 0000000000..c6212c3e9d --- /dev/null +++ b/go/core/example_test.go @@ -0,0 +1,197 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// SPDX-License-Identifier: Apache-2.0 + +package core_test + +import ( + "context" + "fmt" + "strings" + + "github.com/firebase/genkit/go/core" + "github.com/firebase/genkit/go/internal/registry" +) + +// This example demonstrates defining a simple flow. +func ExampleDefineFlow() { + r := registry.New() + + // Define a flow that processes input + flow := core.DefineFlow(r, "uppercase", + func(ctx context.Context, input string) (string, error) { + return strings.ToUpper(input), nil + }, + ) + + // Run the flow + result, err := flow.Run(context.Background(), "hello") + if err != nil { + fmt.Println("Error:", err) + return + } + fmt.Println(result) + // Output: HELLO +} + +// This example demonstrates defining a streaming flow. +func ExampleDefineStreamingFlow() { + r := registry.New() + + // Define a streaming flow that counts down + flow := core.DefineStreamingFlow(r, "countdown", + func(ctx context.Context, start int, cb core.StreamCallback[int]) (string, error) { + for i := start; i > 0; i-- { + if cb != nil { + if err := cb(ctx, i); err != nil { + return "", err + } + } + } + return "Done!", nil + }, + ) + + // Use Stream() iterator to receive chunks + iter := flow.Stream(context.Background(), 3) + iter(func(val *core.StreamingFlowValue[string, int], err error) bool { + if err != nil { + fmt.Println("Error:", err) + return false + } + if val.Done { + fmt.Println("Result:", val.Output) + } else { + fmt.Println("Count:", val.Stream) + } + return true + }) + // Output: + // Count: 3 + // Count: 2 + // Count: 1 + // Result: Done! +} + +// This example demonstrates using Run to create traced sub-steps. +func ExampleRun() { + r := registry.New() + + // Define a flow that uses Run for traced steps + flow := core.DefineFlow(r, "pipeline", + func(ctx context.Context, input string) (string, error) { + // Each Run creates a traced step visible in the Dev UI + upper, err := core.Run(ctx, "toUpper", func() (string, error) { + return strings.ToUpper(input), nil + }) + if err != nil { + return "", err + } + + result, err := core.Run(ctx, "addPrefix", func() (string, error) { + return "RESULT: " + upper, nil + }) + return result, err + }, + ) + + result, err := flow.Run(context.Background(), "hello") + if err != nil { + fmt.Println("Error:", err) + return + } + fmt.Println(result) + // Output: RESULT: HELLO +} + +// This example demonstrates defining a schema from a Go type. +func ExampleDefineSchemaFor() { + r := registry.New() + + // Define a struct type + type Person struct { + Name string `json:"name"` + Age int `json:"age"` + } + + // Register the schema + core.DefineSchemaFor[Person](r) + + // The schema is now registered and can be referenced in .prompt files + fmt.Println("Schema registered") + // Output: Schema registered +} + +// This example demonstrates defining a schema from a map. +func ExampleDefineSchema() { + r := registry.New() + + // Define a JSON schema as a map + core.DefineSchema(r, "Address", map[string]any{ + "type": "object", + "properties": map[string]any{ + "street": map[string]any{"type": "string"}, + "city": map[string]any{"type": "string"}, + "zip": map[string]any{"type": "string"}, + }, + "required": []any{"street", "city"}, + }) + + fmt.Println("Schema registered: Address") + // Output: Schema registered: Address +} + +// This example demonstrates using ChainMiddleware to combine middleware. +func ExampleChainMiddleware() { + // Define a middleware that wraps function calls + logMiddleware := func(next core.StreamingFunc[string, string, struct{}]) core.StreamingFunc[string, string, struct{}] { + return func(ctx context.Context, input string, cb core.StreamCallback[struct{}]) (string, error) { + fmt.Println("Before:", input) + result, err := next(ctx, input, cb) + fmt.Println("After:", result) + return result, err + } + } + + // The original function + originalFn := func(ctx context.Context, input string, cb core.StreamCallback[struct{}]) (string, error) { + return strings.ToUpper(input), nil + } + + // Chain and apply middleware + wrapped := core.ChainMiddleware(logMiddleware)(originalFn) + + result, _ := wrapped(context.Background(), "hello", nil) + fmt.Println("Final:", result) + // Output: + // Before: hello + // After: HELLO + // Final: HELLO +} + +// This example demonstrates creating user-facing errors. +func ExampleNewPublicError() { + // Create a user-facing error with details + err := core.NewPublicError(core.INVALID_ARGUMENT, "Invalid email format", map[string]any{ + "field": "email", + "value": "not-an-email", + }) + + fmt.Println("Status:", err.Status) + fmt.Println("Message:", err.Message) + // Output: + // Status: INVALID_ARGUMENT + // Message: Invalid email format +} diff --git a/go/core/logger/doc.go b/go/core/logger/doc.go new file mode 100644 index 0000000000..b3e421abc6 --- /dev/null +++ b/go/core/logger/doc.go @@ -0,0 +1,110 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Package logger provides context-scoped structured logging for Genkit. + +This package wraps the standard library's [log/slog] package to provide +context-aware logging throughout Genkit operations. Logs are automatically +associated with the current action or flow context. + +# Usage + +Retrieve the logger from context within action or flow handlers: + + func myFlow(ctx context.Context, input string) (string, error) { + log := logger.FromContext(ctx) + + log.Info("Processing input", "size", len(input)) + log.Debug("Input details", "value", input) + + result, err := process(input) + if err != nil { + log.Error("Processing failed", "error", err) + return "", err + } + + log.Info("Processing complete", "resultSize", len(result)) + return result, nil + } + +# Log Levels + +Control the global log level to filter output: + + // Show debug logs (verbose) + logger.SetLevel(slog.LevelDebug) + + // Show info and above (default) + logger.SetLevel(slog.LevelInfo) + + // Show only warnings and errors + logger.SetLevel(slog.LevelWarn) + + // Show only errors + logger.SetLevel(slog.LevelError) + + // Get the current log level + level := logger.GetLevel() + +# Context Integration + +The logger is automatically available in action and flow contexts. It +inherits from the context passed to [genkit.Init] and flows through +all nested operations. + +For custom operations outside of actions/flows, attach a logger to context: + + log := slog.Default() + ctx = logger.WithContext(ctx, log) + +# slog Compatibility + +The logger returned by [FromContext] is a standard [*slog.Logger] and +supports all slog methods: + + log := logger.FromContext(ctx) + + // Structured logging with attributes + log.Info("User action", + "userId", userID, + "action", "login", + "duration", elapsed, + ) + + // Grouped attributes + log.Info("Request completed", + slog.Group("request", + "method", r.Method, + "path", r.URL.Path, + ), + slog.Group("response", + "status", status, + "bytes", written, + ), + ) + + // With pre-set attributes + requestLog := log.With("requestId", requestID) + requestLog.Info("Starting") + // ... later ... + requestLog.Info("Finished") + +This package is primarily used by Genkit internals but is useful for +plugin developers who need consistent logging that integrates with +Genkit's observability features. +*/ +package logger diff --git a/go/core/tracing/doc.go b/go/core/tracing/doc.go new file mode 100644 index 0000000000..aae609c517 --- /dev/null +++ b/go/core/tracing/doc.go @@ -0,0 +1,109 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Package tracing provides execution trace support for Genkit operations. + +This package implements OpenTelemetry-based tracing for Genkit actions and flows. +Traces capture the execution path, inputs, outputs, and timing of operations, +enabling observability and debugging through the Genkit Developer UI and +external telemetry systems. + +# Automatic Tracing + +Actions and flows defined with Genkit are automatically traced. Each action +execution creates a span with input/output data, timing, and any errors. +Use [core.Run] within flows to create traced sub-steps: + + // In a real scenario, 'r' would be the registry from your Genkit instance. + var r api.Registry + flow := core.DefineFlow(r, "myFlow", + func(ctx context.Context, input string) (string, error) { + // This creates a traced step named "processData" + result, err := core.Run(ctx, "processData", func() (string, error) { + return process(input), nil + }) + return result, err + }, + ) + +# Tracer Access + +Access the OpenTelemetry tracer provider for custom instrumentation: + + provider := tracing.TracerProvider() + + // Get a tracer for custom spans + tracer := tracing.Tracer() + +# Telemetry Export + +Configure trace export to send telemetry to external systems. For immediate +export (suitable for local storage): + + tracing.WriteTelemetryImmediate(client) + +For batched export (more efficient for network calls): + + shutdown := tracing.WriteTelemetryBatch(client) + defer shutdown(ctx) + +# Dev UI Integration + +When the GENKIT_ENV environment variable is set to "dev", traces are +automatically sent to the Genkit Developer UI's telemetry server. The Dev UI +provides: + + - Visual trace exploration with timing breakdown + - Input/output inspection for each action + - Error highlighting and stack traces + - Performance analysis across flow executions + +Set GENKIT_TELEMETRY_SERVER to configure a custom telemetry endpoint. + +# Span Metadata + +Create spans with rich metadata for better observability: + + metadata := &tracing.SpanMetadata{ + Name: "processDocument", + Type: "action", + Subtype: "retriever", + } + + output, err := tracing.RunInNewSpan(ctx, metadata, input, + func(ctx context.Context, in Input) (Output, error) { + // Operation runs within the traced span + return process(in), nil + }, + ) + +# Trace Information + +Extract trace context for correlation with external systems: + + info := tracing.GetTraceInfo(ctx) + if info != nil { + log.Printf("TraceID: %s, SpanID: %s", info.TraceID, info.SpanID) + } + +This package is primarily intended for Genkit internals and advanced plugin +development. Most application developers will interact with tracing through +the automatic instrumentation provided by the genkit package. + +For more information on observability, see https://genkit.dev/docs/observability +*/ +package tracing diff --git a/go/genkit/doc.go b/go/genkit/doc.go new file mode 100644 index 0000000000..f72b7c1e7b --- /dev/null +++ b/go/genkit/doc.go @@ -0,0 +1,408 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// SPDX-License-Identifier: Apache-2.0 + +/* +Package genkit provides a framework for building AI-powered applications in Go. + +Genkit is an open-source framework that helps you build, deploy, and monitor +production-ready AI features. It provides a unified interface for working with +large language models (LLMs), managing prompts, defining workflows, and integrating +with various AI service providers. + +For comprehensive documentation, tutorials, and examples, visit https://genkit.dev + +# Getting Started + +Initialize Genkit with a plugin to connect to an AI provider: + + ctx := context.Background() + g := genkit.Init(ctx, + genkit.WithPlugins(&googlegenai.GoogleAI{}), + ) + +Generate text with a simple prompt: + + text, err := genkit.GenerateText(ctx, g, + ai.WithModelName("googleai/gemini-2.5-flash"), + ai.WithPrompt("Tell me a joke"), + ) + if err != nil { + log.Fatal(err) + } + fmt.Println(text) + +# Models + +Models represent AI language models that generate content. Use plugins to access +models from providers like Google AI, Vertex AI, Anthropic, or Ollama. Models are +referenced by name and can include provider-specific configuration: + + resp, err := genkit.Generate(ctx, g, + ai.WithModelName("googleai/gemini-2.5-flash"), + ai.WithPrompt("Explain quantum computing in simple terms"), + ) + +You can set a default model during initialization: + + g := genkit.Init(ctx, + genkit.WithPlugins(&googlegenai.GoogleAI{}), + genkit.WithDefaultModel("googleai/gemini-2.5-flash"), + ) + +# Flows + +Flows are reusable, observable functions that orchestrate AI operations. They +provide automatic tracing, can be exposed as HTTP endpoints, and support both +streaming and non-streaming execution. + +Define a simple flow: + + jokesFlow := genkit.DefineFlow(g, "jokesFlow", + func(ctx context.Context, topic string) (string, error) { + return genkit.GenerateText(ctx, g, + ai.WithPrompt("Share a joke about %s.", topic), + ) + }, + ) + + joke, err := jokesFlow.Run(ctx, "programming") + +Define a streaming flow that sends chunks as they're generated: + + streamingFlow := genkit.DefineStreamingFlow(g, "streamingJokes", + func(ctx context.Context, topic string, sendChunk ai.ModelStreamCallback) (string, error) { + resp, err := genkit.Generate(ctx, g, + ai.WithPrompt("Share a joke about %s.", topic), + ai.WithStreaming(sendChunk), + ) + if err != nil { + return "", err + } + return resp.Text(), nil + }, + ) + +Use [Run] within flows to create traced sub-steps for observability: + + genkit.DefineFlow(g, "pipeline", + func(ctx context.Context, input string) (string, error) { + result, err := genkit.Run(ctx, "processStep", func() (string, error) { + return process(input), nil + }) + return result, err + }, + ) + +# Prompts + +Prompts can be defined programmatically or loaded from .prompt files (Dotprompt format). +They encapsulate model configuration, input schemas, and template logic for reuse. + +Define a prompt in code: + + jokePrompt := genkit.DefinePrompt(g, "joke", + ai.WithModelName("googleai/gemini-2.5-flash"), + ai.WithInputType(JokeRequest{Topic: "default topic"}), + ai.WithPrompt("Share a joke about {{topic}}."), + ) + + stream := jokePrompt.ExecuteStream(ctx, ai.WithInput(map[string]any{"topic": "cats"})) + for result, err := range stream { + if err != nil { + return err + } + if result.Done { + fmt.Println(result.Response.Text()) + } + } + +For type-safe prompts with structured input and output, use [DefineDataPrompt]: + + type RecipeRequest struct { + Cuisine string `json:"cuisine"` + Dish string `json:"dish"` + ServingSize int `json:"servingSize"` + } + + type Recipe struct { + Title string `json:"title"` + Ingredients []string `json:"ingredients"` + Instructions []string `json:"instructions"` + } + + recipePrompt := genkit.DefineDataPrompt[RecipeRequest, *Recipe](g, "recipe", + ai.WithSystem("You are an experienced chef."), + ai.WithPrompt("Create a {{cuisine}} {{dish}} recipe for {{servingSize}} people."), + ) + + for result, err := range recipePrompt.ExecuteStream(ctx, RecipeRequest{ + Cuisine: "Italian", Dish: "pasta", ServingSize: 4, + }) { + // result.Chunk is *Recipe, result.Output is final *Recipe + } + +Load prompts from .prompt files by specifying a prompt directory: + + g := genkit.Init(ctx, + genkit.WithPlugins(&googlegenai.GoogleAI{}), + genkit.WithPromptDir("./prompts"), + ) + + // Look up a loaded prompt + jokePrompt := genkit.LookupPrompt(g, "joke") + + // Or with type parameters for structured I/O + recipePrompt := genkit.LookupDataPrompt[RecipeRequest, *Recipe](g, "recipe") + +When using .prompt files with custom output schemas, register the schema first: + + genkit.DefineSchemaFor[Recipe](g) + +# Tools + +Tools extend model capabilities by allowing them to call functions during generation. +Define tools that the model can invoke to perform actions or retrieve information: + + weatherTool := genkit.DefineTool(g, "getWeather", + "Gets the current weather for a city", + func(ctx *ai.ToolContext, city string) (string, error) { + // Fetch weather data... + return "Sunny, 72°F", nil + }, + ) + + resp, err := genkit.Generate(ctx, g, + ai.WithPrompt("What's the weather in Paris?"), + ai.WithTools(weatherTool), + ) + +# Structured Output + +Generate structured data that conforms to Go types using [GenerateData] or +[GenerateDataStream]. Use jsonschema struct tags to provide descriptions and +constraints that help the model understand the expected output: + + type Joke struct { + Joke string `json:"joke" jsonschema:"description=The joke text"` + Category string `json:"category" jsonschema:"description=The joke category"` + } + + joke, resp, err := genkit.GenerateData[*Joke](ctx, g, + ai.WithPrompt("Tell me a programming joke"), + ) + +For streaming structured output: + + stream := genkit.GenerateDataStream[*Recipe](ctx, g, + ai.WithPrompt("Create a pasta recipe"), + ) + for result, err := range stream { + if err != nil { + return nil, err + } + if result.Done { + return result.Output, nil + } + // result.Chunk contains partial Recipe as it streams + fmt.Printf("Got %d ingredients so far\n", len(result.Chunk.Ingredients)) + } + +# Streaming + +Genkit supports streaming at multiple levels. Use [GenerateStream] for streaming +model responses: + + stream := genkit.GenerateStream(ctx, g, + ai.WithPrompt("Write a short story"), + ) + for result, err := range stream { + if err != nil { + log.Fatal(err) + } + if result.Done { + fmt.Println("\n--- Complete ---") + } else { + fmt.Print(result.Chunk.Text()) + } + } + +Use [DefineStreamingFlow] for flows that stream custom data types: + + genkit.DefineStreamingFlow(g, "countdown", + func(ctx context.Context, count int, sendChunk func(context.Context, int) error) (string, error) { + for i := count; i > 0; i-- { + if err := sendChunk(ctx, i); err != nil { + return "", err + } + time.Sleep(time.Second) + } + return "Liftoff!", nil + }, + ) + +# Development Mode and Dev UI + +Set GENKIT_ENV=dev to enable development features including the Reflection API +server that powers the Genkit Developer UI: + + $ export GENKIT_ENV=dev + $ go run main.go + +Then run the Dev UI to inspect flows, test prompts, and view traces: + + $ npx genkit start -- go run main.go + +The Dev UI provides: + - Interactive flow testing with input/output inspection + - Prompt playground for iterating on prompts + - Trace viewer for debugging and performance analysis + - Action browser for exploring registered actions + +# HTTP Server Integration + +Expose flows as HTTP endpoints for production deployment using [Handler]: + + mux := http.NewServeMux() + for _, flow := range genkit.ListFlows(g) { + mux.HandleFunc("POST /"+flow.Name(), genkit.Handler(flow)) + } + log.Fatal(server.Start(ctx, "127.0.0.1:8080", mux)) + +Handlers support streaming responses via Server-Sent Events when the client +sends Accept: text/event-stream. For durable streaming that survives reconnects, +use [WithStreamManager]: + + mux.HandleFunc("POST /countdown", genkit.Handler(countdown, + genkit.WithStreamManager(streaming.NewInMemoryStreamManager( + streaming.WithTTL(10*time.Minute), + )), + )) + +# Plugins + +Genkit's functionality is extended through plugins that provide models, tools, +retrievers, and other capabilities. Common plugins include: + + - googlegenai: Google AI (Gemini models) + - vertexai: Google Cloud Vertex AI + - ollama: Local Ollama models + +Initialize plugins during [Init]: + + g := genkit.Init(ctx, + genkit.WithPlugins( + &googlegenai.GoogleAI{}, + &vertexai.VertexAI{ProjectID: "my-project"}, + ), + ) + +# Messages and Parts + +Build conversation messages using helper functions from the [ai] package. These +are used with [ai.WithMessages] or when building custom conversation flows: + + // Create messages for a conversation + messages := []*ai.Message{ + ai.NewSystemTextMessage("You are a helpful assistant."), + ai.NewUserTextMessage("Hello!"), + ai.NewModelTextMessage("Hi there! How can I help?"), + } + + resp, err := genkit.Generate(ctx, g, + ai.WithMessages(messages...), + ai.WithPrompt("What can you do?"), + ) + +For multi-modal content, combine text and media parts: + + userMsg := ai.NewUserMessage( + ai.NewTextPart("What's in this image?"), + ai.NewMediaPart("image/png", base64ImageData), + ) + +Available message constructors in the [ai] package: + + - [ai.NewUserTextMessage], [ai.NewUserMessage]: User messages + - [ai.NewModelTextMessage], [ai.NewModelMessage]: Model responses + - [ai.NewSystemTextMessage], [ai.NewSystemMessage]: System instructions + +Available part constructors in the [ai] package: + + - [ai.NewTextPart]: Text content + - [ai.NewMediaPart]: Images, audio, video (base64-encoded) + - [ai.NewDataPart]: Raw data strings + - [ai.NewToolRequestPart], [ai.NewToolResponsePart]: Tool interactions + +# Generation Options + +Generation functions ([Generate], [GenerateText], [GenerateData], [GenerateStream]) +accept options from the [ai] package to control behavior. The most common options: + +Model and Configuration: + + - [ai.WithModel]: Specify the model (accepts [ai.ModelRef] or plugin model refs) + - [ai.WithModelName]: Specify model by name string (e.g., "googleai/gemini-2.5-flash") + - [ai.WithConfig]: Set generation parameters (temperature, max tokens, etc.) + +Prompting: + + - [ai.WithPrompt]: Set the user prompt (supports format strings) + - [ai.WithSystem]: Set system instructions + - [ai.WithMessages]: Provide conversation history + +Tools and Output: + + - [ai.WithTools]: Enable tools the model can call + - [ai.WithOutputType]: Request structured output matching a Go type + - [ai.WithOutputFormat]: Specify output format (json, text, etc.) + +Streaming: + + - [ai.WithStreaming]: Enable streaming with a callback function + +Example combining multiple options: + + resp, err := genkit.Generate(ctx, g, + ai.WithModelName("googleai/gemini-2.5-flash"), + ai.WithSystem("You are a helpful coding assistant."), + ai.WithMessages(conversationHistory...), + ai.WithPrompt("Explain this code: %s", code), + ai.WithTools(searchTool, calculatorTool), + // Config is provider-specific (e.g., genai.GenerateContentConfig for Google AI) + ) + +# Unregistered Components + +For advanced use cases, the [ai] package provides New* functions to create +components without registering them in Genkit. This is useful for plugins +or when you need to pass components directly: + + - [ai.NewTool]: Create an unregistered tool + - [ai.NewModel]: Create an unregistered model + - [ai.NewRetriever]: Create an unregistered retriever + - [ai.NewEmbedder]: Create an unregistered embedder + +Use the corresponding Define* functions in this package to create and register +components for use with Genkit's action system, tracing, and Dev UI. + +# Additional Resources + + - Documentation: https://genkit.dev + - Go Getting Started: https://genkit.dev/go/docs/get-started-go + - Samples: https://github.com/firebase/genkit/tree/main/go/samples + - GitHub: https://github.com/firebase/genkit +*/ +package genkit diff --git a/go/genkit/example_test.go b/go/genkit/example_test.go new file mode 100644 index 0000000000..917e8dc49c --- /dev/null +++ b/go/genkit/example_test.go @@ -0,0 +1,322 @@ +// Copyright 2025 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// SPDX-License-Identifier: Apache-2.0 + +package genkit_test + +import ( + "context" + "fmt" + "log" + "net/http" + "strings" + + "github.com/firebase/genkit/go/ai" + "github.com/firebase/genkit/go/core" + "github.com/firebase/genkit/go/genkit" +) + +// This example shows basic initialization and flow definition. +func Example() { + ctx := context.Background() + + // Initialize Genkit (without plugins for this example) + g := genkit.Init(ctx) + + // Define a simple flow + greetFlow := genkit.DefineFlow(g, "greet", + func(ctx context.Context, name string) (string, error) { + return fmt.Sprintf("Hello, %s!", name), nil + }, + ) + + // Run the flow + greeting, err := greetFlow.Run(ctx, "World") + if err != nil { + log.Fatal(err) + } + fmt.Println(greeting) + // Output: Hello, World! +} + +// This example demonstrates defining a simple non-streaming flow. +func ExampleDefineFlow() { + ctx := context.Background() + g := genkit.Init(ctx) + + // Define a flow that processes input + uppercaseFlow := genkit.DefineFlow(g, "uppercase", + func(ctx context.Context, input string) (string, error) { + return strings.ToUpper(input), nil + }, + ) + + // Run the flow + result, err := uppercaseFlow.Run(ctx, "hello") + if err != nil { + log.Fatal(err) + } + fmt.Println(result) + // Output: HELLO +} + +// This example demonstrates defining a streaming flow that sends +// chunks to the caller as they are produced. +func ExampleDefineStreamingFlow() { + ctx := context.Background() + g := genkit.Init(ctx) + + // Define a streaming flow that counts down + countdownFlow := genkit.DefineStreamingFlow(g, "countdown", + func(ctx context.Context, start int, sendChunk func(context.Context, int) error) (string, error) { + for i := start; i > 0; i-- { + if err := sendChunk(ctx, i); err != nil { + return "", err + } + } + return "Liftoff!", nil + }, + ) + + // Stream results using the iterator + iter := countdownFlow.Stream(ctx, 3) + iter(func(val *core.StreamingFlowValue[string, int], err error) bool { + if err != nil { + log.Fatal(err) + } + if val.Done { + fmt.Println("Final:", val.Output) + } else { + fmt.Println("Count:", val.Stream) + } + return true + }) + // Output: + // Count: 3 + // Count: 2 + // Count: 1 + // Final: Liftoff! +} + +// This example demonstrates using Run to create traced sub-steps +// within a flow for better observability. +func ExampleRun() { + ctx := context.Background() + g := genkit.Init(ctx) + + // Define a flow with traced sub-steps + pipelineFlow := genkit.DefineFlow(g, "pipeline", + func(ctx context.Context, input string) (string, error) { + // Each Run call creates a traced step visible in the Dev UI + upper, err := genkit.Run(ctx, "uppercase", func() (string, error) { + return strings.ToUpper(input), nil + }) + if err != nil { + return "", err + } + + result, err := genkit.Run(ctx, "addPrefix", func() (string, error) { + return "Processed: " + upper, nil + }) + return result, err + }, + ) + + result, err := pipelineFlow.Run(ctx, "hello") + if err != nil { + log.Fatal(err) + } + fmt.Println(result) + // Output: Processed: HELLO +} + +// This example demonstrates defining a tool that models can call +// during generation. +func ExampleDefineTool() { + ctx := context.Background() + g := genkit.Init(ctx) + + // Define a tool that adds two numbers + _ = genkit.DefineTool(g, "add", + "Adds two numbers together", + func(ctx *ai.ToolContext, input struct { + A float64 `json:"a" jsonschema:"description=First number"` + B float64 `json:"b" jsonschema:"description=Second number"` + }) (float64, error) { + return input.A + input.B, nil + }, + ) + + // The tool is now registered and can be used with ai.WithTools() + // when calling genkit.Generate() + fmt.Println("Tool registered: add") + // Output: Tool registered: add +} + +// This example demonstrates defining a reusable prompt with a template. +func ExampleDefinePrompt() { + ctx := context.Background() + g := genkit.Init(ctx) + + // Define a prompt with Handlebars template syntax + prompt := genkit.DefinePrompt(g, "greeting", + ai.WithPrompt("Say hello to {{name}} in a {{style}} way."), + ) + + // Render the prompt (without executing - useful for inspection) + rendered, err := prompt.Render(ctx, map[string]any{ + "name": "Alice", + "style": "friendly", + }) + if err != nil { + log.Fatal(err) + } + // The rendered prompt contains the messages that would be sent + fmt.Println(rendered.Messages[0].Content[0].Text) + // Output: Say hello to Alice in a friendly way. +} + +// This example demonstrates registering a Go type as a named schema. +func ExampleDefineSchemaFor() { + ctx := context.Background() + g := genkit.Init(ctx) + + // Define a struct type + type Person struct { + Name string `json:"name" jsonschema:"description=The person's name"` + Age int `json:"age" jsonschema:"description=The person's age"` + } + + // Register the schema - this makes it available for .prompt files + // that reference it by name (e.g., "output: { schema: Person }") + genkit.DefineSchemaFor[Person](g) + + fmt.Println("Schema registered: Person") + // Output: Schema registered: Person +} + +// This example demonstrates creating an HTTP server that exposes +// all registered flows as endpoints. +func ExampleListFlows_httpServer() { + ctx := context.Background() + g := genkit.Init(ctx) + + // Define some flows + genkit.DefineFlow(g, "echo", func(ctx context.Context, s string) (string, error) { + return s, nil + }) + + genkit.DefineFlow(g, "reverse", func(ctx context.Context, s string) (string, error) { + runes := []rune(s) + for i, j := 0, len(runes)-1; i < j; i, j = i+1, j-1 { + runes[i], runes[j] = runes[j], runes[i] + } + return string(runes), nil + }) + + // Create HTTP handlers for all flows + mux := http.NewServeMux() + for _, flow := range genkit.ListFlows(g) { + mux.HandleFunc("POST /"+flow.Name(), genkit.Handler(flow)) + } + + // The mux now has: + // - POST /echo + // - POST /reverse + fmt.Printf("Registered %d flow handlers\n", len(genkit.ListFlows(g))) + // Output: Registered 2 flow handlers +} + +// This example demonstrates using Handler to expose a single flow +// as an HTTP endpoint. +func ExampleHandler() { + ctx := context.Background() + g := genkit.Init(ctx) + + // Define a flow + greetFlow := genkit.DefineFlow(g, "greet", + func(ctx context.Context, name string) (string, error) { + return fmt.Sprintf("Hello, %s!", name), nil + }, + ) + + // Create an HTTP handler for the flow + mux := http.NewServeMux() + mux.HandleFunc("POST /greet", genkit.Handler(greetFlow)) + + // The handler accepts JSON: {"data": "World"} + // and returns JSON: {"result": "Hello, World!"} + fmt.Println("Handler registered at POST /greet") + // Output: Handler registered at POST /greet +} + +// This example demonstrates using type-safe data prompts with +// strongly-typed input and output. +func ExampleDefineDataPrompt() { + ctx := context.Background() + g := genkit.Init(ctx) + + // Define input and output types + type JokeRequest struct { + Topic string `json:"topic"` + } + + type Joke struct { + Setup string `json:"setup"` + Punchline string `json:"punchline"` + } + + // Define a type-safe prompt + // Note: In production, you'd also set ai.WithModel(...) + _ = genkit.DefineDataPrompt[JokeRequest, *Joke](g, "joke", + ai.WithPrompt("Tell a joke about {{topic}}. Return JSON with setup and punchline."), + ) + + // The prompt can now be executed with: + // for result, err := range jokePrompt.ExecuteStream(ctx, JokeRequest{Topic: "cats"}) { + // if result.Done { + // fmt.Println(result.Output.Setup) + // fmt.Println(result.Output.Punchline) + // } + // } + + fmt.Println("DataPrompt registered: joke") + // Output: DataPrompt registered: joke +} + +// This example demonstrates looking up a prompt that was loaded +// from a .prompt file. +func ExampleLookupPrompt() { + ctx := context.Background() + + // In production, you would initialize with a prompt directory: + // g := genkit.Init(ctx, genkit.WithPromptDir("./prompts")) + + g := genkit.Init(ctx) + + // Define a prompt programmatically (simulating a loaded prompt) + genkit.DefinePrompt(g, "greeting", + ai.WithPrompt("Hello {{name}}!"), + ) + + // Look up the prompt by name + prompt := genkit.LookupPrompt(g, "greeting") + if prompt == nil { + log.Fatal("Prompt not found") + } + + fmt.Println("Found prompt:", prompt.Name()) + // Output: Found prompt: greeting +} diff --git a/go/genkit/genkit.go b/go/genkit/genkit.go index 8123debf37..83429ca0d1 100644 --- a/go/genkit/genkit.go +++ b/go/genkit/genkit.go @@ -427,12 +427,14 @@ func ListTools(g *Genkit) []ai.Tool { // DefineModel defines a custom model implementation, registers it as a [core.Action] // of type Model, and returns an [ai.Model] interface. // -// The `provider` and `name` arguments form the unique identifier for the model -// (e.g., "myProvider/myModel"). The `info` argument provides metadata about the -// model's capabilities ([ai.ModelInfo]). The `fn` argument ([ai.ModelFunc]) -// implements the actual generation logic, handling input requests ([ai.ModelRequest]) -// and producing responses ([ai.ModelResponse]), potentially streaming chunks -// ([ai.ModelResponseChunk]) via the callback. +// The `name` argument is the unique identifier for the model (e.g., "myProvider/myModel"). +// The `opts` argument provides metadata about the model's capabilities ([ai.ModelOptions]). +// The `fn` argument ([ai.ModelFunc]) implements the actual generation logic, handling +// input requests ([ai.ModelRequest]) and producing responses ([ai.ModelResponse]), +// potentially streaming chunks ([ai.ModelResponseChunk]) via the callback. +// +// For models that don't need to be registered (e.g., for plugin development or testing), +// use [ai.NewModel] instead. // // Example: // @@ -510,7 +512,7 @@ func LookupBackgroundModel(g *Genkit, name string) ai.BackgroundModel { } // DefineTool defines a tool that can be used by models during generation, -// registers it as a [core.Action] of type Tool, and returns an [ai.ToolDef]. +// registers it as a [core.Action] of type Tool, and returns an [ai.Tool]. // Tools allow models to interact with external systems or perform specific computations. // // The `name` is the identifier the model uses to request the tool. The `description` @@ -520,7 +522,13 @@ func LookupBackgroundModel(g *Genkit, name string) ai.BackgroundModel { // `inputSchema` and `outputSchema` in the tool's definition, which guide the model // on how to provide input and interpret output. // -// Use [ai.WithInputSchema] to provide a custom JSON schema instead of inferring from the type parameter. +// For tools that don't need to be registered (e.g., dynamically created tools), +// use [ai.NewTool] instead. +// +// # Options +// +// - [ai.WithInputSchema]: Provide a custom JSON schema instead of inferring from the type parameter +// - [ai.WithInputSchemaName]: Reference a pre-registered schema by name // // Example: // @@ -563,38 +571,6 @@ func DefineTool[In, Out any](g *Genkit, name, description string, fn ai.ToolFunc // input of type `any`, and returning an output of type `Out`. // // Deprecated: Use [DefineTool] with [ai.WithInputSchema] instead. -// -// Example: -// -// // Define a custom input schema -// inputSchema := map[string]any{ -// "type": "object", -// "properties": map[string]any{ -// "city": map[string]any{"type": "string"}, -// "unit": map[string]any{ -// "type": "string", -// "enum": []any{"C", "F"}, -// }, -// }, -// "required": []string{"city"}, -// } -// -// // Define the tool with the schema -// weatherTool := genkit.DefineTool(g, "getWeather", -// "Fetches the weather for a given city with unit preference", -// func(ctx *ai.ToolContext, input any) (string, error) { -// // Parse and validate input -// data := input.(map[string]any) -// city := data["city"].(string) -// unit := "C" // default -// if u, ok := data["unit"].(string); ok { -// unit = u -// } -// // Implementation... -// return fmt.Sprintf("Weather in %s: 25°%s", city, unit), nil -// }, -// ai.WithToolInputSchema(inputSchema), -// ) func DefineToolWithInputSchema[Out any](g *Genkit, name, description string, inputSchema map[string]any, fn ai.ToolFunc[any, Out]) ai.Tool { return ai.DefineTool(g.reg, name, description, fn, ai.WithInputSchema(inputSchema)) } @@ -610,7 +586,13 @@ func DefineToolWithInputSchema[Out any](g *Genkit, name, description string, inp // returning an [ai.MultipartToolResponse] which contains both the output and optional // content parts. // -// Use [ai.WithInputSchema] to provide a custom JSON schema instead of inferring from the type parameter. +// For multipart tools that don't need to be registered (e.g., dynamically created tools), +// use [ai.NewMultipartTool] instead. +// +// # Options +// +// - [ai.WithInputSchema]: Provide a custom JSON schema instead of inferring from the type parameter +// - [ai.WithInputSchemaName]: Reference a pre-registered schema by name // // Example: // @@ -661,18 +643,55 @@ func LookupTool(g *Genkit, name string) ai.Tool { } // DefinePrompt defines a prompt programmatically, registers it as a [core.Action] -// of type Prompt, and returns an executable [ai.prompt]. +// of type Prompt, and returns an executable [ai.Prompt]. // // This provides an alternative to defining prompts in `.prompt` files, offering // more flexibility through Go code. Prompts encapsulate configuration (model, parameters), // message templates (system, user, history), input/output schemas, and associated tools. // // Prompts can be executed in two main ways: -// 1. Render + Generate: Call [Prompt.Render] to get [ai.GenerateActionOptions], +// 1. Render + Generate: Call [ai.Prompt.Render] to get [ai.GenerateActionOptions], // modify them if needed, and pass them to [GenerateWithRequest]. -// 2. Execute: Call [Prompt.Execute] directly, passing input and execution options. -// -// Options ([ai.PromptOption]) are used to configure the prompt during definition. +// 2. Execute: Call [ai.Prompt.Execute] directly, passing input and execution options. +// +// For prompts that don't need to be registered (e.g., for single-use or testing), +// use [ai.NewPrompt] instead. +// +// # Options +// +// Model and Configuration: +// - [ai.WithModel]: Specify the model (accepts [ai.Model] or [ai.ModelRef]) +// - [ai.WithModelName]: Specify model by name string +// - [ai.WithConfig]: Set generation parameters (temperature, max tokens, etc.) +// +// Prompt Content: +// - [ai.WithPrompt]: Set the user prompt template (supports {{variable}} syntax) +// - [ai.WithPromptFn]: Set a function that generates the user prompt dynamically +// - [ai.WithSystem]: Set system instructions template +// - [ai.WithSystemFn]: Set a function that generates system instructions dynamically +// - [ai.WithMessages]: Provide static conversation history +// - [ai.WithMessagesFn]: Provide a function that generates conversation history +// +// Input Schema: +// - [ai.WithInputType]: Set input schema from a Go type (provides default values) +// - [ai.WithInputSchema]: Provide a custom JSON schema for input +// - [ai.WithInputSchemaName]: Reference a pre-registered schema by name +// +// Output Schema: +// - [ai.WithOutputType]: Set output schema from a Go type +// - [ai.WithOutputSchema]: Provide a custom JSON schema for output +// - [ai.WithOutputSchemaName]: Reference a pre-registered schema by name +// - [ai.WithOutputFormat]: Specify output format (json, text, etc.) +// +// Tools and Resources: +// - [ai.WithTools]: Enable tools the model can call +// - [ai.WithToolChoice]: Control whether tool calls are required, optional, or disabled +// - [ai.WithMaxTurns]: Set maximum tool call iterations +// - [ai.WithResources]: Attach resources available during generation +// +// Metadata: +// - [ai.WithDescription]: Set a description for the prompt +// - [ai.WithMetadata]: Set arbitrary metadata // // Example: // @@ -687,12 +706,12 @@ func LookupTool(g *Genkit, name string) ai.Tool { // // Define the prompt // capitalPrompt := genkit.DefinePrompt(g, "findCapital", // ai.WithDescription("Finds the capital of a country."), -// ai.WithModelName("googleai/gemini-2.5-flash"), // Specify the model +// ai.WithModelName("googleai/gemini-2.5-flash"), // ai.WithSystem("You are a helpful geography assistant."), // ai.WithPrompt("What is the capital of {{country}}?"), // ai.WithInputType(GeoInput{Country: "USA"}), // ai.WithOutputType(GeoOutput{}), -// ai.WithConfig(&ai.GenerationCommonConfig{Temperature: 0.5}), +// // Config is provider-specific, e.g., genai.GenerateContentConfig for Google AI // ) // // // Option 1: Render + Generate (using default input "USA") @@ -777,6 +796,14 @@ func DefineSchemaFor[T any](g *Genkit) { // It automatically infers input schema from the In type parameter and configures // output schema and JSON format from the Out type parameter (unless Out is string). // +// This is a convenience wrapper around [DefinePrompt] that provides compile-time +// type safety for both input and output. For prompts that don't need to be registered, +// use [ai.NewDataPrompt] instead. +// +// DefineDataPrompt accepts the same options as [DefinePrompt]. See [DefinePrompt] for +// the full list of available options. Note that input and output schemas are automatically +// inferred from the type parameters. +// // Example: // // type GeoInput struct { @@ -826,8 +853,7 @@ func LookupDataPrompt[In, Out any](g *Genkit, name string) *ai.DataPrompt[In, Ou // // handle error // } // -// // Optional: Modify actionOpts here if needed -// // actionOpts.Config = &ai.GenerationCommonConfig{ Temperature: 0.8 } +// // Optional: Modify actionOpts here if needed (config is provider-specific) // // resp, err := genkit.GenerateWithRequest(ctx, g, actionOpts, nil, nil) // No middleware or streaming // if err != nil { @@ -842,12 +868,50 @@ func GenerateWithRequest(ctx context.Context, g *Genkit, actionOpts *ai.Generate // provided via [ai.GenerateOption] arguments. It's a convenient way to make // generation calls without pre-defining a prompt object. // +// # Options +// +// Model and Configuration: +// - [ai.WithModel]: Specify the model (accepts [ai.Model] or [ai.ModelRef]) +// - [ai.WithModelName]: Specify model by name string (e.g., "googleai/gemini-2.5-flash") +// - [ai.WithConfig]: Set generation parameters (temperature, max tokens, etc.) +// +// Prompting: +// - [ai.WithPrompt]: Set the user prompt (supports format strings) +// - [ai.WithPromptFn]: Set a function that generates the user prompt dynamically +// - [ai.WithSystem]: Set system instructions +// - [ai.WithSystemFn]: Set a function that generates system instructions dynamically +// - [ai.WithMessages]: Provide conversation history +// - [ai.WithMessagesFn]: Provide a function that generates conversation history +// +// Tools and Resources: +// - [ai.WithTools]: Enable tools the model can call +// - [ai.WithToolChoice]: Control whether tool calls are required, optional, or disabled +// - [ai.WithMaxTurns]: Set maximum tool call iterations +// - [ai.WithReturnToolRequests]: Return tool requests instead of executing them +// - [ai.WithResources]: Attach resources available during generation +// +// Output: +// - [ai.WithOutputType]: Request structured output matching a Go type +// - [ai.WithOutputSchema]: Provide a custom JSON schema for output +// - [ai.WithOutputSchemaName]: Reference a pre-registered schema by name +// - [ai.WithOutputFormat]: Specify output format (json, text, etc.) +// - [ai.WithOutputEnums]: Constrain output to specific enum values +// +// Context and Streaming: +// - [ai.WithDocs]: Provide context documents +// - [ai.WithTextDocs]: Provide context as text strings +// - [ai.WithStreaming]: Enable streaming with a callback function +// - [ai.WithMiddleware]: Apply middleware to the model request/response +// +// Tool Continuation: +// - [ai.WithToolResponses]: Resume generation with tool response parts +// - [ai.WithToolRestarts]: Resume generation by restarting tool requests +// // Example: // // resp, err := genkit.Generate(ctx, g, // ai.WithModelName("googleai/gemini-2.5-flash"), // ai.WithPrompt("Write a short poem about clouds."), -// ai.WithConfig(&genai.GenerateContentConfig{MaxOutputTokens: 50}), // ) // if err != nil { // log.Fatalf("Generate failed: %v", err) @@ -869,6 +933,9 @@ func Generate(ctx context.Context, g *Genkit, opts ...ai.GenerateOption) (*ai.Mo // // Otherwise the Chunk field of the passed [ai.ModelStreamValue] holds a streamed chunk. // +// GenerateStream accepts the same options as [Generate]. See [Generate] for the full +// list of available options. +// // Example: // // for result, err := range genkit.GenerateStream(ctx, g, @@ -888,11 +955,15 @@ func GenerateStream(ctx context.Context, g *Genkit, opts ...ai.GenerateOption) i } // GenerateOperation performs a model generation request using a flexible set of options -// provided via [ai.GenerateOption] arguments. It's a convenient way to make -// generation calls without pre-defining a prompt object. +// provided via [ai.GenerateOption] arguments. It's designed for long-running generation +// tasks that may not complete immediately. // // Unlike [Generate], this function returns a [ai.ModelOperation] which can be used to -// check the status of the operation and get the result. +// check the status of the operation and get the result. Use [CheckModelOperation] to +// poll for completion. +// +// GenerateOperation accepts the same options as [Generate]. See [Generate] for the full +// list of available options. // // Example: // @@ -928,7 +999,9 @@ func CheckModelOperation(ctx context.Context, g *Genkit, op *ai.ModelOperation) // GenerateText performs a model generation request similar to [Generate], but // directly returns the generated text content as a string. It's a convenience // wrapper for cases where only the textual output is needed. -// It accepts the same [ai.GenerateOption] arguments as [Generate]. +// +// GenerateText accepts the same options as [Generate]. See [Generate] for the full +// list of available options. // // Example: // @@ -944,16 +1017,13 @@ func GenerateText(ctx context.Context, g *Genkit, opts ...ai.GenerateOption) (st } // GenerateData performs a model generation request, expecting structured output -// (typically JSON) that conforms to the schema of the provided `value` argument. -// It attempts to unmarshal the model's response directly into the `value`. -// The `value` argument must be a pointer to a struct or map. +// (typically JSON) that conforms to the schema inferred from the Out type parameter. +// It automatically sets output type and JSON format, unmarshals the response, and +// returns the typed result. // -// Use [ai.WithOutputType] or [ai.WithOutputFormat](ai.OutputFormatJSON) in the -// options to instruct the model to generate JSON. [ai.WithOutputType] is preferred -// as it infers the JSON schema from the `value` type and passes it to the model. -// -// It returns the full [ai.ModelResponse] along with any error. The generated data -// populates the `value` pointed to. +// GenerateData accepts the same options as [Generate]. See [Generate] for the full +// list of available options. Note that output options like [ai.WithOutputType] are +// automatically applied based on the Out type parameter. // // Example: // @@ -987,6 +1057,10 @@ func GenerateData[Out any](ctx context.Context, g *Genkit, opts ...ai.GenerateOp // // Otherwise the Chunk field of the passed [ai.StreamValue] holds a streamed chunk. // +// GenerateDataStream accepts the same options as [Generate]. See [Generate] for the full +// list of available options. Note that output options are automatically applied based on +// the Out type parameter. +// // Example: // // type Story struct { @@ -994,7 +1068,7 @@ func GenerateData[Out any](ctx context.Context, g *Genkit, opts ...ai.GenerateOp // Content string `json:"content"` // } // -// for result, err := range genkit.GenerateDataStream[Story, *ai.ModelResponseChunk](ctx, g, +// for result, err := range genkit.GenerateDataStream[Story](ctx, g, // ai.WithPrompt("Write a short story about a brave knight."), // ) { // if err != nil { @@ -1015,10 +1089,18 @@ func GenerateDataStream[Out any](ctx context.Context, g *Genkit, opts ...ai.Gene // relevant documents from registered retrievers without directly calling the // retriever instance. // +// # Options +// +// - [ai.WithRetriever]: Specify the retriever (accepts [ai.Retriever] or [ai.RetrieverRef]) +// - [ai.WithRetrieverName]: Specify retriever by name string +// - [ai.WithConfig]: Set retriever-specific configuration +// - [ai.WithTextDocs]: Provide query text as documents +// - [ai.WithDocs]: Provide query as [ai.Document] instances +// // Example: // // resp, err := genkit.Retrieve(ctx, g, -// ai.WithRetriever(ai.NewRetrieverRef("myRetriever", nil)), +// ai.WithRetrieverName("myRetriever"), // ai.WithTextDocs("What is the capital of France?"), // ) // if err != nil { @@ -1036,10 +1118,18 @@ func Retrieve(ctx context.Context, g *Genkit, opts ...ai.RetrieverOption) (*ai.R // provided via [ai.EmbedderOption] arguments. It's a convenient way to generate // embeddings from registered embedders without directly calling the embedder instance. // +// # Options +// +// - [ai.WithEmbedder]: Specify the embedder (accepts [ai.Embedder] or [ai.EmbedderRef]) +// - [ai.WithEmbedderName]: Specify embedder by name string +// - [ai.WithConfig]: Set embedder-specific configuration +// - [ai.WithTextDocs]: Provide text to embed +// - [ai.WithDocs]: Provide [ai.Document] instances to embed +// // Example: // // resp, err := genkit.Embed(ctx, g, -// ai.WithEmbedder(ai.NewEmbedderRef("myEmbedder", nil)), +// ai.WithEmbedderName("myEmbedder"), // ai.WithTextDocs("Hello, world!"), // ) // if err != nil { @@ -1058,9 +1148,12 @@ func Embed(ctx context.Context, g *Genkit, opts ...ai.EmbedderOption) (*ai.Embed // Retrievers are used to find documents relevant to a given query, often by // performing similarity searches in a vector database. // -// The `provider` and `name` form the unique identifier. The `ret` function +// The `name` is the unique identifier for the retriever. The `fn` function // contains the logic to process an [ai.RetrieverRequest] (containing the query) // and return an [ai.RetrieverResponse] (containing the relevant documents). +// +// For retrievers that don't need to be registered (e.g., for plugin development), +// use [ai.NewRetriever] instead. func DefineRetriever(g *Genkit, name string, opts *ai.RetrieverOptions, fn ai.RetrieverFunc) ai.Retriever { return ai.DefineRetriever(g.reg, name, opts, fn) } @@ -1076,9 +1169,12 @@ func LookupRetriever(g *Genkit, name string) ai.Retriever { // [core.Action] of type Embedder, and returns an [ai.Embedder]. // Embedders convert text documents or queries into numerical vector representations (embeddings). // -// The `provider` and `name` are specified in the `opts` parameter which forms the unique identifier. -// The `embed` function contains the logic to process an [ai.EmbedRequest] (containing documents or a query) +// The `name` is the unique identifier for the embedder. +// The `fn` function contains the logic to process an [ai.EmbedRequest] (containing documents or a query) // and return an [ai.EmbedResponse] (containing the corresponding embeddings). +// +// For embedders that don't need to be registered (e.g., for plugin development), +// use [ai.NewEmbedder] instead. func DefineEmbedder(g *Genkit, name string, opts *ai.EmbedderOptions, fn ai.EmbedderFunc) ai.Embedder { return ai.DefineEmbedder(g.reg, name, opts, fn) } @@ -1144,6 +1240,14 @@ func LookupEvaluator(g *Genkit, name string) ai.Evaluator { // evaluations using registered evaluators without directly calling the // evaluator instance. // +// # Options +// +// - [ai.WithEvaluator]: Specify the evaluator (accepts [ai.Evaluator] or [ai.EvaluatorRef]) +// - [ai.WithEvaluatorName]: Specify evaluator by name string +// - [ai.WithDataset]: Provide the dataset of examples to evaluate +// - [ai.WithID]: Set a unique identifier for this evaluation run +// - [ai.WithConfig]: Set evaluator-specific configuration +// // Example: // // dataset := []*ai.Example{ @@ -1154,8 +1258,8 @@ func LookupEvaluator(g *Genkit, name string) ai.Evaluator { // } // // resp, err := genkit.Evaluate(ctx, g, -// ai.WithEvaluator(ai.NewEvaluatorRef("myEvaluator", nil)), -// ai.WithDataset(dataset), +// ai.WithEvaluatorName("myEvaluator"), +// ai.WithDataset(dataset...), // ) // if err != nil { // log.Fatalf("Evaluate failed: %v", err)