Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 12 additions & 0 deletions llm_openrouter.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,11 @@ class ReasoningEffortEnum(str, Enum):
high = "high"


class WebSearchEngineEnum(str, Enum):
exa = "exa"
native = "native"


class _mixin:
class Options(Chat.Options):
online: Optional[bool] = Field(
Expand All @@ -61,6 +66,10 @@ class Options(Chat.Options):
description="Set to true to enable reasoning with default parameters",
default=None,
)
web_search_engine: Optional[WebSearchEngineEnum] = Field(
description='Web search engine: "exa" or "native"',
default=None,
)

@field_validator("provider")
def validate_provider(cls, provider):
Expand All @@ -81,9 +90,12 @@ def build_kwargs(self, prompt, stream):
kwargs.pop("reasoning_effort", None)
kwargs.pop("reasoning_max_tokens", None)
kwargs.pop("reasoning_enabled", None)
kwargs.pop("web_search_engine", None)
extra_body = {}
if prompt.options.online:
extra_body["plugins"] = [{"id": "web"}]
if prompt.options.web_search_engine:
extra_body["plugins"] = [{"id": "web", "engine": prompt.options.web_search_engine}]
if prompt.options.provider:
extra_body["provider"] = prompt.options.provider
reasoning = {}
Expand Down
33 changes: 33 additions & 0 deletions tests/test_llm_openrouter.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
from click.testing import CliRunner
from inline_snapshot import snapshot
from llm.cli import cli
from unittest.mock import MagicMock

TINY_PNG = (
b"\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\xa6\x00\x00\x01\x1a"
Expand Down Expand Up @@ -143,3 +144,35 @@ def llm_version() -> str:
"model": "openai/gpt-4.1-mini",
}
)


def test_web_search_engine_native():
model = llm.get_model("openrouter/openai/gpt-5")
prompt = MagicMock()
prompt.options = model.Options(web_search_engine="native")
kwargs = model.build_kwargs(prompt, stream=False)
assert kwargs["extra_body"]["plugins"] == [{"id": "web", "engine": "native"}]


def test_web_search_engine_exa():
model = llm.get_model("openrouter/openai/gpt-5")
prompt = MagicMock()
prompt.options = model.Options(web_search_engine="exa")
kwargs = model.build_kwargs(prompt, stream=False)
assert kwargs["extra_body"]["plugins"] == [{"id": "web", "engine": "exa"}]


def test_online():
model = llm.get_model("openrouter/openai/gpt-5")
prompt = MagicMock()
prompt.options = model.Options(online=True)
kwargs = model.build_kwargs(prompt, stream=False)
assert kwargs["extra_body"]["plugins"] == [{"id": "web"}]


def test_online_and_web_search_engine_together():
model = llm.get_model("openrouter/openai/gpt-5")
prompt = MagicMock()
prompt.options = model.Options(online=True, web_search_engine="native")
kwargs = model.build_kwargs(prompt, stream=False)
assert kwargs["extra_body"]["plugins"] == [{"id": "web", "engine": "native"}]