Skip to content

Commit da86ca4

Browse files
Add benchmarking script (#923)
1 parent 48651f6 commit da86ca4

File tree

4 files changed

+253
-0
lines changed

4 files changed

+253
-0
lines changed

requirements.txt

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,3 +21,10 @@ pytest==8.2.1
2121
pytest-httpbin==2.0.0
2222
pytest-trio==0.8.0
2323
werkzeug<2.1 # See: https://github.com/psf/httpbin/issues/35
24+
25+
# Benchmarking and profiling
26+
uvicorn==0.30.1
27+
aiohttp==3.9.5
28+
urllib3==2.2.1
29+
matplotlib==3.7.5
30+
pyinstrument==4.6.2

scripts/benchmark

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
#!/bin/sh -e
2+
3+
# Usage: scripts/benchmark async|sync
4+
5+
export PREFIX=""
6+
if [ -d 'venv' ] ; then
7+
export PREFIX="venv/bin/"
8+
fi
9+
10+
set -x
11+
12+
${PREFIX}python tests/benchmark/server.py &
13+
SERVER_PID=$!
14+
EXIT_CODE=0
15+
${PREFIX}python tests/benchmark/client.py "$@" || EXIT_CODE=$?
16+
kill $SERVER_PID
17+
exit $EXIT_CODE

tests/benchmark/client.py

Lines changed: 190 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,190 @@
1+
import asyncio
2+
import os
3+
import sys
4+
import time
5+
from concurrent.futures import ThreadPoolExecutor
6+
from contextlib import contextmanager
7+
from typing import Any, Callable, Coroutine, Iterator, List
8+
9+
import aiohttp
10+
import matplotlib.pyplot as plt # type: ignore[import-untyped]
11+
import pyinstrument
12+
import urllib3
13+
from matplotlib.axes import Axes # type: ignore[import-untyped]
14+
15+
import httpcore
16+
17+
PORT = 1234
18+
URL = f"http://localhost:{PORT}/req"
19+
REPEATS = 10
20+
REQUESTS = 500
21+
CONCURRENCY = 20
22+
POOL_LIMIT = 100
23+
PROFILE = False
24+
os.environ["HTTPCORE_PREFER_ANYIO"] = "0"
25+
26+
27+
def duration(start: float) -> int:
28+
return int((time.monotonic() - start) * 1000)
29+
30+
31+
@contextmanager
32+
def profile():
33+
if not PROFILE:
34+
yield
35+
return
36+
with pyinstrument.Profiler() as profiler:
37+
yield
38+
profiler.open_in_browser()
39+
40+
41+
async def run_async_requests(axis: Axes) -> None:
42+
async def gather_limited_concurrency(
43+
coros: Iterator[Coroutine[Any, Any, Any]], concurrency: int = CONCURRENCY
44+
) -> None:
45+
sem = asyncio.Semaphore(concurrency)
46+
47+
async def coro_with_sem(coro: Coroutine[Any, Any, Any]) -> None:
48+
async with sem:
49+
await coro
50+
51+
await asyncio.gather(*(coro_with_sem(c) for c in coros))
52+
53+
async def httpcore_get(
54+
pool: httpcore.AsyncConnectionPool, timings: List[int]
55+
) -> None:
56+
start = time.monotonic()
57+
res = await pool.request("GET", URL)
58+
assert len(await res.aread()) == 2000
59+
assert res.status == 200, f"status_code={res.status}"
60+
timings.append(duration(start))
61+
62+
async def aiohttp_get(session: aiohttp.ClientSession, timings: List[int]) -> None:
63+
start = time.monotonic()
64+
async with session.request("GET", URL) as res:
65+
assert len(await res.read()) == 2000
66+
assert res.status == 200, f"status={res.status}"
67+
timings.append(duration(start))
68+
69+
async with httpcore.AsyncConnectionPool(max_connections=POOL_LIMIT) as pool:
70+
# warmup
71+
await gather_limited_concurrency(
72+
(httpcore_get(pool, []) for _ in range(REQUESTS)), CONCURRENCY * 2
73+
)
74+
75+
timings: List[int] = []
76+
start = time.monotonic()
77+
with profile():
78+
for _ in range(REPEATS):
79+
await gather_limited_concurrency(
80+
(httpcore_get(pool, timings) for _ in range(REQUESTS))
81+
)
82+
axis.plot(
83+
[*range(len(timings))], timings, label=f"httpcore (tot={duration(start)}ms)"
84+
)
85+
86+
connector = aiohttp.TCPConnector(limit=POOL_LIMIT)
87+
async with aiohttp.ClientSession(connector=connector) as session:
88+
# warmup
89+
await gather_limited_concurrency(
90+
(aiohttp_get(session, []) for _ in range(REQUESTS)), CONCURRENCY * 2
91+
)
92+
93+
timings = []
94+
start = time.monotonic()
95+
for _ in range(REPEATS):
96+
await gather_limited_concurrency(
97+
(aiohttp_get(session, timings) for _ in range(REQUESTS))
98+
)
99+
axis.plot(
100+
[*range(len(timings))], timings, label=f"aiohttp (tot={duration(start)}ms)"
101+
)
102+
103+
104+
def run_sync_requests(axis: Axes) -> None:
105+
def run_in_executor(
106+
fns: Iterator[Callable[[], None]], executor: ThreadPoolExecutor
107+
) -> None:
108+
futures = [executor.submit(fn) for fn in fns]
109+
for future in futures:
110+
future.result()
111+
112+
def httpcore_get(pool: httpcore.ConnectionPool, timings: List[int]) -> None:
113+
start = time.monotonic()
114+
res = pool.request("GET", URL)
115+
assert len(res.read()) == 2000
116+
assert res.status == 200, f"status_code={res.status}"
117+
timings.append(duration(start))
118+
119+
def urllib3_get(pool: urllib3.HTTPConnectionPool, timings: List[int]) -> None:
120+
start = time.monotonic()
121+
res = pool.request("GET", "/req")
122+
assert len(res.data) == 2000
123+
assert res.status == 200, f"status={res.status}"
124+
timings.append(duration(start))
125+
126+
with httpcore.ConnectionPool(max_connections=POOL_LIMIT) as pool:
127+
# warmup
128+
with ThreadPoolExecutor(max_workers=CONCURRENCY * 2) as exec:
129+
run_in_executor(
130+
(lambda: httpcore_get(pool, []) for _ in range(REQUESTS)),
131+
exec,
132+
)
133+
134+
timings: List[int] = []
135+
exec = ThreadPoolExecutor(max_workers=CONCURRENCY)
136+
start = time.monotonic()
137+
with profile():
138+
for _ in range(REPEATS):
139+
run_in_executor(
140+
(lambda: httpcore_get(pool, timings) for _ in range(REQUESTS)), exec
141+
)
142+
exec.shutdown(wait=True)
143+
axis.plot(
144+
[*range(len(timings))], timings, label=f"httpcore (tot={duration(start)}ms)"
145+
)
146+
147+
with urllib3.HTTPConnectionPool(
148+
"localhost", PORT, maxsize=POOL_LIMIT
149+
) as urllib3_pool:
150+
# warmup
151+
with ThreadPoolExecutor(max_workers=CONCURRENCY * 2) as exec:
152+
run_in_executor(
153+
(lambda: urllib3_get(urllib3_pool, []) for _ in range(REQUESTS)),
154+
exec,
155+
)
156+
157+
timings = []
158+
exec = ThreadPoolExecutor(max_workers=CONCURRENCY)
159+
start = time.monotonic()
160+
for _ in range(REPEATS):
161+
run_in_executor(
162+
(lambda: urllib3_get(urllib3_pool, timings) for _ in range(REQUESTS)),
163+
exec,
164+
)
165+
exec.shutdown(wait=True)
166+
axis.plot(
167+
[*range(len(timings))], timings, label=f"urllib3 (tot={duration(start)}ms)"
168+
)
169+
170+
171+
def main() -> None:
172+
mode = sys.argv[1] if len(sys.argv) == 2 else None
173+
assert mode in ("async", "sync"), "Usage: python client.py <async|sync>"
174+
175+
fig, ax = plt.subplots()
176+
177+
if mode == "async":
178+
asyncio.run(run_async_requests(ax))
179+
else:
180+
run_sync_requests(ax)
181+
182+
plt.legend(loc="upper left")
183+
ax.set_xlabel("# request")
184+
ax.set_ylabel("[ms]")
185+
plt.show()
186+
print("DONE", flush=True)
187+
188+
189+
if __name__ == "__main__":
190+
main()

tests/benchmark/server.py

Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,39 @@
1+
import asyncio
2+
3+
import uvicorn
4+
5+
PORT = 1234
6+
RESP = b"a" * 2000
7+
SLEEP = 0.01
8+
9+
10+
async def app(scope, receive, send):
11+
assert scope["type"] == "http"
12+
assert scope["path"] == "/req"
13+
assert not (await receive()).get("more_body", False)
14+
15+
await asyncio.sleep(SLEEP)
16+
await send(
17+
{
18+
"type": "http.response.start",
19+
"status": 200,
20+
"headers": [[b"content-type", b"text/plain"]],
21+
}
22+
)
23+
await send(
24+
{
25+
"type": "http.response.body",
26+
"body": RESP,
27+
}
28+
)
29+
30+
31+
if __name__ == "__main__":
32+
uvicorn.run(
33+
app,
34+
port=PORT,
35+
log_level="error",
36+
# Keep warmed up connections alive during the test to have consistent results across test runs.
37+
# This avoids timing differences with connections getting closed and reopened in the background.
38+
timeout_keep_alive=100,
39+
)

0 commit comments

Comments
 (0)