Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .github/workflows/tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,8 @@ jobs:
activate-environment: true # Activate for simple `uv sync` below
- run: uv sync
- uses: j178/prek-action@v1
with:
prek-version: 0.2.14 # Downpin for https://github.com/j178/prek/issues/1104
- uses: pre-commit-ci/[email protected]
if: always()
lint:
Expand Down
5 changes: 3 additions & 2 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -82,8 +82,9 @@ repos:
rev: v1.18.1
hooks:
- id: jupytext
args: [--to, md, --pipe, black]
additional_dependencies: [black<25.11.0] # Downpin for https://github.com/mwouts/jupytext/issues/1467
# SEE: https://github.com/mwouts/jupytext/issues/1467
args: [--to, md, --pipe-fmt, ipynb, --pipe, "black {}"]
additional_dependencies: [black]
files: ^docs/.*\.ipynb$
- repo: https://github.com/jsh9/markdown-toc-creator
rev: 0.1.3
Expand Down
4 changes: 2 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ classifiers = [
dependencies = [
"anyio",
"fhaviary[llm]>=0.27", # For partial tool concurrency
"fhlmi>=0.40.1", # Pin for new llm router calling syntax
"fhlmi>=0.41.0", # Pin for LiteLLMModel.get_router
"html2text", # TODO: evaluate moving to an opt-in dependency
"httpx",
"httpx-aiohttp",
Expand Down Expand Up @@ -64,7 +64,7 @@ dev = [
"litellm>=1.71", # Lower pin for aiohttp transport adoption
"mypy>=1.8", # Pin for mutable-override
"paper-qa[docling,image,ldp,memory,pypdf-media,pymupdf,typing,zotero,local,qdrant,office]",
"prek",
"prek<0.2.15", # Downpin for https://github.com/j178/prek/issues/1104
"pydantic~=2.11", # Pin for start of model_fields deprecation
"pylint-pydantic",
"pytest-asyncio",
Expand Down
2 changes: 1 addition & 1 deletion src/paperqa/settings.py
Original file line number Diff line number Diff line change
Expand Up @@ -1111,7 +1111,7 @@ def make_aviary_tool_selector(self, agent_type: str | type) -> ToolSelector | No
):
return ToolSelector(
model_name=self.agent.agent_llm,
acompletion=self.get_agent_llm().router().acompletion,
acompletion=self.get_agent_llm().get_router().acompletion,
**(self.agent.agent_config or {}),
)
return None
Expand Down
12 changes: 6 additions & 6 deletions src/paperqa/types.py
Original file line number Diff line number Diff line change
Expand Up @@ -394,14 +394,14 @@ def add_tokens(self, result: LLMResult | Message) -> None:
prompt_count=result.info["usage"][0],
completion_count=result.info["usage"][1],
)

prompt_count = result.prompt_count or 0
completion_count = result.completion_count or 0
if result.model not in self.token_counts:
self.token_counts[result.model] = [
result.prompt_count,
result.completion_count,
]
self.token_counts[result.model] = [prompt_count, completion_count]
else:
self.token_counts[result.model][0] += result.prompt_count
self.token_counts[result.model][1] += result.completion_count
self.token_counts[result.model][0] += prompt_count
self.token_counts[result.model][1] += completion_count

self.cost += result.cost

Expand Down
8 changes: 7 additions & 1 deletion tests/test_paperqa.py
Original file line number Diff line number Diff line change
Expand Up @@ -447,7 +447,9 @@ def accum(x) -> None:
assert isinstance(first_id, UUID)
assert completion.text
assert completion.seconds_to_first_token > 0
assert completion.prompt_count is not None
assert completion.prompt_count > 0
assert completion.completion_count is not None
assert completion.completion_count > 0
assert completion.model == "babbage-002"
assert str(completion) == "".join(outputs)
Expand All @@ -461,15 +463,17 @@ def accum(x) -> None:
assert completion.text
assert completion.seconds_to_first_token == 0
assert completion.seconds_to_last_token > 0
assert completion.prompt_count is not None
assert completion.prompt_count > 0
assert completion.completion_count is not None
assert completion.completion_count > 0
try:
assert completion.model == "babbage-002"
assert completion.cost > 0
except AssertionError:
# Account for https://github.com/BerriAI/litellm/issues/10572
assert any(
"Could not find cost for model".lower() in r.message.lower()
"Failed to calculate cost".lower() in r.message.lower()
for r in caplog.records
)

Expand All @@ -492,7 +496,9 @@ def accum(x) -> None:
callbacks=[accum],
)
assert completion.seconds_to_first_token > 0
assert completion.prompt_count is not None
assert completion.prompt_count > 0
assert completion.completion_count is not None
assert completion.completion_count > 0
assert str(completion) == "".join(outputs)
assert isinstance(completion.text, str)
Expand Down
48 changes: 24 additions & 24 deletions uv.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.