Skip to content

Commit 2954aeb

Browse files
Add benchmarking script
1 parent e987df2 commit 2954aeb

File tree

4 files changed

+261
-6
lines changed

4 files changed

+261
-6
lines changed

requirements.txt

Lines changed: 13 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -3,21 +3,28 @@
33
# Docs
44
mkdocs==1.6.0
55
mkdocs-autorefs==1.0.1
6-
mkdocs-material==9.5.20
6+
mkdocs-material==9.5.25
77
mkdocs-material-extensions==1.3.1
8-
mkdocstrings[python-legacy]==0.25.0
8+
mkdocstrings[python-legacy]==0.25.1
99
jinja2==3.1.4
1010

1111
# Packaging
1212
build==1.2.1
13-
twine==5.0.0
13+
twine==5.1.0
1414

1515
# Tests & Linting
16-
coverage[toml]==7.5.0
17-
ruff==0.4.2
16+
coverage[toml]==7.5.3
17+
ruff==0.4.7
1818
mypy==1.10.0
1919
trio-typing==0.10.0
20-
pytest==8.2.0
20+
pytest==8.2.1
2121
pytest-httpbin==2.0.0
2222
pytest-trio==0.8.0
2323
werkzeug<2.1 # See: https://github.com/psf/httpbin/issues/35
24+
25+
# Benchmarking and profiling
26+
uvicorn==0.30.1
27+
aiohttp==3.9.5
28+
requests==2.32.3
29+
matplotlib==3.7.5
30+
pyinstrument==4.6.2

scripts/benchmark

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
#!/bin/sh -e
2+
3+
# Usage: scripts/benchmark async|sync
4+
5+
export PREFIX=""
6+
if [ -d 'venv' ] ; then
7+
export PREFIX="venv/bin/"
8+
fi
9+
10+
set -x
11+
12+
${PREFIX}python tests/benchmark/server.py &
13+
SERVER_PID=$!
14+
EXIT_CODE=0
15+
${PREFIX}python tests/benchmark/client.py "$@" || EXIT_CODE=$?
16+
kill $SERVER_PID
17+
exit $EXIT_CODE

tests/benchmark/client.py

Lines changed: 192 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,192 @@
1+
import asyncio
2+
import os
3+
import sys
4+
import time
5+
from concurrent.futures import ThreadPoolExecutor
6+
from contextlib import contextmanager
7+
from typing import Any, Callable, Coroutine, Iterator, List
8+
9+
import aiohttp
10+
import matplotlib.pyplot as plt # type: ignore[import-untyped]
11+
import pyinstrument
12+
import requests # type: ignore[import-untyped]
13+
from matplotlib.axes import Axes # type: ignore[import-untyped]
14+
from requests.adapters import HTTPAdapter # type: ignore[import-untyped]
15+
16+
import httpcore
17+
18+
PORT = 1234
19+
URL = f"http://localhost:{PORT}/req"
20+
REPEATS = 10
21+
REQUESTS = 500
22+
CONCURRENCY = 20
23+
POOL_LIMIT = 100
24+
PROFILE = False
25+
os.environ["HTTPCORE_PREFER_ANYIO"] = "0"
26+
27+
28+
def duration(start: float) -> int:
29+
return int((time.monotonic() - start) * 1000)
30+
31+
32+
@contextmanager
33+
def profile():
34+
if not PROFILE:
35+
yield
36+
return
37+
with pyinstrument.Profiler() as profiler:
38+
yield
39+
profiler.open_in_browser()
40+
41+
42+
async def run_async_requests(axis: Axes) -> None:
43+
async def gather_limited_concurrency(
44+
coros: Iterator[Coroutine[Any, Any, Any]], concurrency: int = CONCURRENCY
45+
) -> None:
46+
sem = asyncio.Semaphore(concurrency)
47+
48+
async def coro_with_sem(coro: Coroutine[Any, Any, Any]) -> None:
49+
async with sem:
50+
await coro
51+
52+
await asyncio.gather(*(coro_with_sem(c) for c in coros))
53+
54+
async def httpcore_get(
55+
pool: httpcore.AsyncConnectionPool, timings: List[int]
56+
) -> None:
57+
start = time.monotonic()
58+
res = await pool.request("GET", URL)
59+
assert len(await res.aread()) == 2000
60+
assert res.status == 200, f"status_code={res.status}"
61+
timings.append(duration(start))
62+
63+
async def aiohttp_get(session: aiohttp.ClientSession, timings: List[int]) -> None:
64+
start = time.monotonic()
65+
async with session.request("GET", URL) as res:
66+
assert len(await res.read()) == 2000
67+
assert res.status == 200, f"status={res.status}"
68+
timings.append(duration(start))
69+
70+
async with httpcore.AsyncConnectionPool(max_connections=POOL_LIMIT) as pool:
71+
# warmup
72+
await gather_limited_concurrency(
73+
(httpcore_get(pool, []) for _ in range(REQUESTS)), CONCURRENCY * 2
74+
)
75+
76+
timings: List[int] = []
77+
start = time.monotonic()
78+
with profile():
79+
for _ in range(REPEATS):
80+
await gather_limited_concurrency(
81+
(httpcore_get(pool, timings) for _ in range(REQUESTS))
82+
)
83+
axis.plot(
84+
[*range(len(timings))], timings, label=f"httpcore (tot={duration(start)}ms)"
85+
)
86+
87+
connector = aiohttp.TCPConnector(limit=POOL_LIMIT)
88+
async with aiohttp.ClientSession(connector=connector) as session:
89+
# warmup
90+
await gather_limited_concurrency(
91+
(aiohttp_get(session, []) for _ in range(REQUESTS)), CONCURRENCY * 2
92+
)
93+
94+
timings = []
95+
start = time.monotonic()
96+
for _ in range(REPEATS):
97+
await gather_limited_concurrency(
98+
(aiohttp_get(session, timings) for _ in range(REQUESTS))
99+
)
100+
axis.plot(
101+
[*range(len(timings))], timings, label=f"aiohttp (tot={duration(start)}ms)"
102+
)
103+
104+
105+
def run_sync_requests(axis: Axes) -> None:
106+
def run_in_executor(
107+
fns: Iterator[Callable[[], None]], executor: ThreadPoolExecutor
108+
) -> None:
109+
futures = [executor.submit(fn) for fn in fns]
110+
for future in futures:
111+
future.result()
112+
113+
def httpcore_get(pool: httpcore.ConnectionPool, timings: List[int]) -> None:
114+
start = time.monotonic()
115+
res = pool.request("GET", URL)
116+
assert len(res.read()) == 2000
117+
assert res.status == 200, f"status_code={res.status}"
118+
timings.append(duration(start))
119+
120+
def requests_get(session: requests.Session, timings: List[int]) -> None:
121+
start = time.monotonic()
122+
res = session.get(URL)
123+
assert len(res.text) == 2000
124+
assert res.status_code == 200, f"status={res.status_code}"
125+
timings.append(duration(start))
126+
127+
with httpcore.ConnectionPool(max_connections=POOL_LIMIT) as pool:
128+
# warmup
129+
with ThreadPoolExecutor(max_workers=CONCURRENCY * 2) as exec:
130+
run_in_executor(
131+
(lambda: httpcore_get(pool, []) for _ in range(REQUESTS)),
132+
exec,
133+
)
134+
135+
timings: List[int] = []
136+
exec = ThreadPoolExecutor(max_workers=CONCURRENCY)
137+
start = time.monotonic()
138+
with profile():
139+
for _ in range(REPEATS):
140+
run_in_executor(
141+
(lambda: httpcore_get(pool, timings) for _ in range(REQUESTS)), exec
142+
)
143+
exec.shutdown(wait=True)
144+
axis.plot(
145+
[*range(len(timings))], timings, label=f"httpcore (tot={duration(start)}ms)"
146+
)
147+
148+
with requests.Session() as session:
149+
session.mount(
150+
"http://", HTTPAdapter(pool_connections=POOL_LIMIT, pool_maxsize=POOL_LIMIT)
151+
)
152+
# warmup
153+
with ThreadPoolExecutor(max_workers=CONCURRENCY * 2) as exec:
154+
run_in_executor(
155+
(lambda: requests_get(session, []) for _ in range(REQUESTS)),
156+
exec,
157+
)
158+
159+
timings = []
160+
exec = ThreadPoolExecutor(max_workers=CONCURRENCY)
161+
start = time.monotonic()
162+
for _ in range(REPEATS):
163+
run_in_executor(
164+
(lambda: requests_get(session, timings) for _ in range(REQUESTS)),
165+
exec,
166+
)
167+
exec.shutdown(wait=True)
168+
axis.plot(
169+
[*range(len(timings))], timings, label=f"requests (tot={duration(start)}ms)"
170+
)
171+
172+
173+
def main() -> None:
174+
mode = sys.argv[1] if len(sys.argv) == 2 else None
175+
assert mode in ("async", "sync"), "Usage: python client.py <async|sync>"
176+
177+
fig, ax = plt.subplots()
178+
179+
if mode == "async":
180+
asyncio.run(run_async_requests(ax))
181+
else:
182+
run_sync_requests(ax)
183+
184+
plt.legend(loc="upper left")
185+
ax.set_xlabel("# request")
186+
ax.set_ylabel("[ms]")
187+
plt.show()
188+
print("DONE", flush=True)
189+
190+
191+
if __name__ == "__main__":
192+
main()

tests/benchmark/server.py

Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,39 @@
1+
import asyncio
2+
3+
import uvicorn
4+
5+
PORT = 1234
6+
RESP = b"a" * 2000
7+
SLEEP = 0.01
8+
9+
10+
async def app(scope, receive, send):
11+
assert scope["type"] == "http"
12+
assert scope["path"] == "/req"
13+
assert not (await receive()).get("more_body", False)
14+
15+
await asyncio.sleep(SLEEP)
16+
await send(
17+
{
18+
"type": "http.response.start",
19+
"status": 200,
20+
"headers": [[b"content-type", b"text/plain"]],
21+
}
22+
)
23+
await send(
24+
{
25+
"type": "http.response.body",
26+
"body": RESP,
27+
}
28+
)
29+
30+
31+
if __name__ == "__main__":
32+
uvicorn.run(
33+
app,
34+
port=PORT,
35+
log_level="error",
36+
# Keep warmed up connections alive during the test to have consistent results across test runs.
37+
# This avoids timing differences with connections getting closed and reopened in the background.
38+
timeout_keep_alive=100,
39+
)

0 commit comments

Comments
 (0)