Skip to content

Commit cf87220

Browse files
build: Bump version to 0.2.16
1 parent 19c90d9 commit cf87220

File tree

4 files changed

+282
-10
lines changed

4 files changed

+282
-10
lines changed

llama_stack/ui/package.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020
"@radix-ui/react-tooltip": "^1.2.6",
2121
"class-variance-authority": "^0.7.1",
2222
"clsx": "^2.1.1",
23-
"llama-stack-client": "^0.2.15",
23+
"llama-stack-client": ""0.2.16",
2424
"lucide-react": "^0.510.0",
2525
"next": "15.3.3",
2626
"next-auth": "^4.24.11",

pyproject.toml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
44

55
[project]
66
name = "llama_stack"
7-
version = "0.2.15"
7+
version = "0.2.16"
88
authors = [{ name = "Meta Llama", email = "[email protected]" }]
99
description = "Llama Stack"
1010
readme = "README.md"
@@ -28,7 +28,7 @@ dependencies = [
2828
"huggingface-hub>=0.34.0,<1.0",
2929
"jinja2>=3.1.6",
3030
"jsonschema",
31-
"llama-stack-client>=0.2.15",
31+
"llama-stack-client>=0.2.16",
3232
"llama-api-client>=0.1.2",
3333
"openai>=1.66",
3434
"prompt-toolkit",
@@ -53,7 +53,7 @@ dependencies = [
5353
ui = [
5454
"streamlit",
5555
"pandas",
56-
"llama-stack-client>=0.2.15",
56+
"llama-stack-client>=0.2.16",
5757
"streamlit-option-menu",
5858
]
5959

requirements.txt

Lines changed: 272 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,272 @@
1+
# This file was autogenerated by uv via the following command:
2+
# uv export --frozen --no-hashes --no-emit-project --no-default-groups --output-file=requirements.txt
3+
aiohappyeyeballs==2.5.0
4+
# via aiohttp
5+
aiohttp==3.12.13
6+
# via llama-stack
7+
aiosignal==1.3.2
8+
# via aiohttp
9+
aiosqlite==0.21.0
10+
# via llama-stack
11+
annotated-types==0.7.0
12+
# via pydantic
13+
anyio==4.8.0
14+
# via
15+
# httpx
16+
# llama-api-client
17+
# llama-stack-client
18+
# openai
19+
# starlette
20+
asyncpg==0.30.0
21+
# via llama-stack
22+
attrs==25.1.0
23+
# via
24+
# aiohttp
25+
# jsonschema
26+
# referencing
27+
certifi==2025.1.31
28+
# via
29+
# httpcore
30+
# httpx
31+
# requests
32+
cffi==1.17.1 ; platform_python_implementation != 'PyPy'
33+
# via cryptography
34+
charset-normalizer==3.4.1
35+
# via requests
36+
click==8.1.8
37+
# via
38+
# llama-stack-client
39+
# uvicorn
40+
colorama==0.4.6 ; sys_platform == 'win32'
41+
# via
42+
# click
43+
# tqdm
44+
cryptography==45.0.5
45+
# via python-jose
46+
deprecated==1.2.18
47+
# via
48+
# opentelemetry-api
49+
# opentelemetry-exporter-otlp-proto-http
50+
# opentelemetry-semantic-conventions
51+
distro==1.9.0
52+
# via
53+
# llama-api-client
54+
# llama-stack-client
55+
# openai
56+
ecdsa==0.19.1
57+
# via python-jose
58+
fastapi==0.115.8
59+
# via llama-stack
60+
filelock==3.17.0
61+
# via huggingface-hub
62+
fire==0.7.0
63+
# via
64+
# llama-stack
65+
# llama-stack-client
66+
frozenlist==1.5.0
67+
# via
68+
# aiohttp
69+
# aiosignal
70+
fsspec==2024.12.0
71+
# via huggingface-hub
72+
googleapis-common-protos==1.67.0
73+
# via opentelemetry-exporter-otlp-proto-http
74+
h11==0.16.0
75+
# via
76+
# httpcore
77+
# llama-stack
78+
# uvicorn
79+
hf-xet==1.1.5 ; platform_machine == 'aarch64' or platform_machine == 'amd64' or platform_machine == 'arm64' or platform_machine == 'x86_64'
80+
# via huggingface-hub
81+
httpcore==1.0.9
82+
# via httpx
83+
httpx==0.28.1
84+
# via
85+
# llama-api-client
86+
# llama-stack
87+
# llama-stack-client
88+
# openai
89+
huggingface-hub==0.34.1
90+
# via llama-stack
91+
idna==3.10
92+
# via
93+
# anyio
94+
# httpx
95+
# requests
96+
# yarl
97+
importlib-metadata==8.5.0
98+
# via opentelemetry-api
99+
jinja2==3.1.6
100+
# via llama-stack
101+
jiter==0.8.2
102+
# via openai
103+
jsonschema==4.23.0
104+
# via llama-stack
105+
jsonschema-specifications==2024.10.1
106+
# via jsonschema
107+
llama-api-client==0.1.2
108+
# via llama-stack
109+
llama-stack-client==0.2.16
110+
# via llama-stack
111+
markdown-it-py==3.0.0
112+
# via rich
113+
markupsafe==3.0.2
114+
# via jinja2
115+
mdurl==0.1.2
116+
# via markdown-it-py
117+
multidict==6.1.0
118+
# via
119+
# aiohttp
120+
# yarl
121+
numpy==2.2.3
122+
# via pandas
123+
openai==1.71.0
124+
# via llama-stack
125+
opentelemetry-api==1.30.0
126+
# via
127+
# opentelemetry-exporter-otlp-proto-http
128+
# opentelemetry-sdk
129+
# opentelemetry-semantic-conventions
130+
opentelemetry-exporter-otlp-proto-common==1.30.0
131+
# via opentelemetry-exporter-otlp-proto-http
132+
opentelemetry-exporter-otlp-proto-http==1.30.0
133+
# via llama-stack
134+
opentelemetry-proto==1.30.0
135+
# via
136+
# opentelemetry-exporter-otlp-proto-common
137+
# opentelemetry-exporter-otlp-proto-http
138+
opentelemetry-sdk==1.30.0
139+
# via
140+
# llama-stack
141+
# opentelemetry-exporter-otlp-proto-http
142+
opentelemetry-semantic-conventions==0.51b0
143+
# via opentelemetry-sdk
144+
packaging==24.2
145+
# via huggingface-hub
146+
pandas==2.2.3
147+
# via llama-stack-client
148+
pillow==11.1.0
149+
# via llama-stack
150+
prompt-toolkit==3.0.50
151+
# via
152+
# llama-stack
153+
# llama-stack-client
154+
propcache==0.3.0
155+
# via
156+
# aiohttp
157+
# yarl
158+
protobuf==5.29.5
159+
# via
160+
# googleapis-common-protos
161+
# opentelemetry-proto
162+
pyaml==25.1.0
163+
# via llama-stack-client
164+
pyasn1==0.4.8
165+
# via
166+
# python-jose
167+
# rsa
168+
pycparser==2.22 ; platform_python_implementation != 'PyPy'
169+
# via cffi
170+
pydantic==2.11.7
171+
# via
172+
# fastapi
173+
# llama-api-client
174+
# llama-stack
175+
# llama-stack-client
176+
# openai
177+
pydantic-core==2.33.2
178+
# via pydantic
179+
pygments==2.19.1
180+
# via rich
181+
python-dateutil==2.9.0.post0
182+
# via pandas
183+
python-dotenv==1.0.1
184+
# via llama-stack
185+
python-jose==3.4.0
186+
# via llama-stack
187+
python-multipart==0.0.20
188+
# via llama-stack
189+
pytz==2025.1
190+
# via pandas
191+
pyyaml==6.0.2
192+
# via
193+
# huggingface-hub
194+
# pyaml
195+
referencing==0.36.2
196+
# via
197+
# jsonschema
198+
# jsonschema-specifications
199+
regex==2024.11.6
200+
# via tiktoken
201+
requests==2.32.4
202+
# via
203+
# huggingface-hub
204+
# llama-stack-client
205+
# opentelemetry-exporter-otlp-proto-http
206+
# tiktoken
207+
rich==13.9.4
208+
# via
209+
# llama-stack
210+
# llama-stack-client
211+
rpds-py==0.22.3
212+
# via
213+
# jsonschema
214+
# referencing
215+
rsa==4.9
216+
# via python-jose
217+
six==1.17.0
218+
# via
219+
# ecdsa
220+
# python-dateutil
221+
sniffio==1.3.1
222+
# via
223+
# anyio
224+
# llama-api-client
225+
# llama-stack-client
226+
# openai
227+
starlette==0.45.3
228+
# via
229+
# fastapi
230+
# llama-stack
231+
termcolor==2.5.0
232+
# via
233+
# fire
234+
# llama-stack
235+
# llama-stack-client
236+
tiktoken==0.9.0
237+
# via llama-stack
238+
tqdm==4.67.1
239+
# via
240+
# huggingface-hub
241+
# llama-stack-client
242+
# openai
243+
typing-extensions==4.12.2
244+
# via
245+
# aiosqlite
246+
# anyio
247+
# fastapi
248+
# huggingface-hub
249+
# llama-api-client
250+
# llama-stack-client
251+
# openai
252+
# opentelemetry-sdk
253+
# pydantic
254+
# pydantic-core
255+
# referencing
256+
# typing-inspection
257+
typing-inspection==0.4.1
258+
# via pydantic
259+
tzdata==2025.1
260+
# via pandas
261+
urllib3==2.5.0
262+
# via requests
263+
uvicorn==0.34.0
264+
# via llama-stack
265+
wcwidth==0.2.13
266+
# via prompt-toolkit
267+
wrapt==1.17.2
268+
# via deprecated
269+
yarl==1.18.3
270+
# via aiohttp
271+
zipp==3.21.0
272+
# via importlib-metadata

uv.lock

Lines changed: 6 additions & 6 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

0 commit comments

Comments
 (0)