Compare commits
51 Commits
6c84a144c0
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0ae7b13407 | ||
|
|
a1996b1c9e | ||
|
|
051c0ac719 | ||
|
|
b4cf4fc8ae | ||
|
|
251d99795b | ||
|
|
cca76d4974 | ||
|
|
a64b09de8e | ||
|
|
de5f586bc7 | ||
|
|
3e2c431f49 | ||
|
|
9a56dc778e | ||
|
|
3593481b30 | ||
|
|
c1c92ddc39 | ||
|
|
a741c0a017 | ||
|
|
53fdc4527f | ||
|
|
94e91d9e27 | ||
|
|
e9ed041996 | ||
|
|
c3a4b07d3a | ||
|
|
3b5ebbaa2e | ||
|
|
c46a347def | ||
|
|
802170087a | ||
|
|
4cbd157896 | ||
|
|
56db4d26da | ||
|
|
64f3fedb9f | ||
|
|
8a909cd79d | ||
|
|
c33cdc9216 | ||
|
|
41a900037d | ||
|
|
8c99544e34 | ||
|
|
fa3621806d | ||
|
|
76dac61eb6 | ||
|
|
918d03cc58 | ||
|
|
c191942712 | ||
|
|
ef0d8f347b | ||
|
|
a1c238d4a1 | ||
|
|
5f52c83aca | ||
|
|
ed9bad9024 | ||
|
|
9ed328ceac | ||
|
|
44d61727ab | ||
|
|
29b4a36863 | ||
|
|
288bd95f62 | ||
|
|
b3966c9a9f | ||
|
|
d4e3638143 | ||
|
|
b8f7217e43 | ||
|
|
e7de479c88 | ||
|
|
28c9830f56 | ||
|
|
7dc3926f48 | ||
|
|
ba60d087c0 | ||
|
|
aac69f6a3e | ||
|
|
6d9a21ac02 | ||
|
|
e78fc8dc3c | ||
|
|
3638c607da | ||
| e6c82ad3c0 |
9
.containerignore
Normal file
9
.containerignore
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
.venv/
|
||||||
|
.git/
|
||||||
|
tests/
|
||||||
|
docs/
|
||||||
|
*.prof
|
||||||
|
*.egg-info/
|
||||||
|
__pycache__/
|
||||||
|
.gitea/
|
||||||
|
.pytest_cache/
|
||||||
48
.gitea/workflows/ci.yaml
Normal file
48
.gitea/workflows/ci.yaml
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
name: ci
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [main]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
test:
|
||||||
|
runs-on: linux
|
||||||
|
container: python:3.13-alpine
|
||||||
|
steps:
|
||||||
|
- run: apk add --no-cache git
|
||||||
|
- run: |
|
||||||
|
git clone --depth 1 \
|
||||||
|
-c "http.extraHeader=Authorization: token ${{ github.token }}" \
|
||||||
|
"${{ github.server_url }}/${{ github.repository }}.git" .
|
||||||
|
- run: pip install --no-cache-dir -r requirements.txt ruff pytest
|
||||||
|
- run: ruff check src/ tests/
|
||||||
|
- run: PYTHONPATH=src pytest tests/ -v
|
||||||
|
|
||||||
|
secrets:
|
||||||
|
runs-on: linux
|
||||||
|
container: ghcr.io/gitleaks/gitleaks:latest
|
||||||
|
steps:
|
||||||
|
- run: |
|
||||||
|
git clone \
|
||||||
|
-c "http.extraHeader=Authorization: token ${{ github.token }}" \
|
||||||
|
"${{ github.server_url }}/${{ github.repository }}.git" .
|
||||||
|
- run: gitleaks detect --source . -v
|
||||||
|
|
||||||
|
build:
|
||||||
|
needs: [test, secrets]
|
||||||
|
runs-on: linux
|
||||||
|
container: quay.io/podman/stable
|
||||||
|
env:
|
||||||
|
CONTAINER_HOST: unix:///var/run/docker.sock
|
||||||
|
steps:
|
||||||
|
- run: dnf install -y git
|
||||||
|
- run: |
|
||||||
|
git clone --depth 1 \
|
||||||
|
-c "http.extraHeader=Authorization: token ${{ github.token }}" \
|
||||||
|
"${{ github.server_url }}/${{ github.repository }}.git" .
|
||||||
|
- run: echo "$HARBOR_PASS" | podman --remote login -u "$HARBOR_USER" --password-stdin harbor.mymx.me
|
||||||
|
env:
|
||||||
|
HARBOR_USER: ${{ secrets.HARBOR_USER }}
|
||||||
|
HARBOR_PASS: ${{ secrets.HARBOR_PASS }}
|
||||||
|
- run: podman --remote build -t harbor.mymx.me/s5p/s5p:latest -f Containerfile .
|
||||||
|
- run: podman --remote push harbor.mymx.me/s5p/s5p:latest
|
||||||
5
.gitleaks.toml
Normal file
5
.gitleaks.toml
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
[allowlist]
|
||||||
|
paths = [
|
||||||
|
'''tests/''',
|
||||||
|
'''docs/''',
|
||||||
|
]
|
||||||
@@ -1,13 +1,17 @@
|
|||||||
FROM python:3.13-alpine
|
FROM python:3.13-alpine
|
||||||
|
|
||||||
RUN pip install --no-cache-dir pyyaml>=6.0
|
|
||||||
|
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
|
|
||||||
|
COPY requirements.txt .
|
||||||
|
RUN pip install --no-cache-dir --upgrade pip && \
|
||||||
|
pip install --no-cache-dir -r requirements.txt
|
||||||
|
|
||||||
ENV PYTHONUNBUFFERED=1 \
|
ENV PYTHONUNBUFFERED=1 \
|
||||||
PYTHONDONTWRITEBYTECODE=1 \
|
PYTHONDONTWRITEBYTECODE=1 \
|
||||||
PYTHONPATH=/app/src
|
PYTHONPATH=/app/src
|
||||||
|
|
||||||
|
COPY src/ /app/src/
|
||||||
|
|
||||||
EXPOSE 1080
|
EXPOSE 1080
|
||||||
STOPSIGNAL SIGTERM
|
STOPSIGNAL SIGTERM
|
||||||
|
|
||||||
|
|||||||
9
Makefile
9
Makefile
@@ -1,10 +1,17 @@
|
|||||||
APP_NAME := s5p
|
APP_NAME := s5p
|
||||||
|
|
||||||
.PHONY: install test lint clean build up down logs
|
.PHONY: install install-service test lint clean build up down logs
|
||||||
|
|
||||||
install:
|
install:
|
||||||
pip install -e .
|
pip install -e .
|
||||||
|
|
||||||
|
install-service:
|
||||||
|
sudo mkdir -p /etc/s5p
|
||||||
|
sudo cp config/s5p.service /etc/systemd/system/s5p.service
|
||||||
|
sudo systemctl daemon-reload
|
||||||
|
@echo "Unit installed. Configure /etc/s5p/s5p.yaml, then:"
|
||||||
|
@echo " sudo systemctl enable --now s5p"
|
||||||
|
|
||||||
test:
|
test:
|
||||||
pytest tests/ -v
|
pytest tests/ -v
|
||||||
|
|
||||||
|
|||||||
25
PROJECT.md
25
PROJECT.md
@@ -19,15 +19,16 @@ Client -------> s5p -------> Hop 1 -------> Hop 2 -------> Target
|
|||||||
SOCKS5 proto1 proto2 protoN
|
SOCKS5 proto1 proto2 protoN
|
||||||
```
|
```
|
||||||
|
|
||||||
- **server.py** -- asyncio SOCKS5 server, bidirectional relay, signal handling
|
- **server.py** -- asyncio SOCKS5 server, bidirectional relay, signal handling, multi-pool orchestration
|
||||||
- **proto.py** -- protocol handshakes (SOCKS5, SOCKS4/4a, HTTP CONNECT), chain builder
|
- **proto.py** -- protocol handshakes (SOCKS5, SOCKS4/4a, HTTP CONNECT), chain builder
|
||||||
- **config.py** -- YAML config loading, proxy URL parsing, API response parsing, pool config
|
- **config.py** -- YAML config loading, proxy URL parsing, API response parsing, pool/listener config
|
||||||
- **pool.py** -- managed proxy pool (multi-source, health-tested, persistent)
|
- **pool.py** -- named proxy pool (multi-source, health-tested, persistent, MITM filtering)
|
||||||
- **http.py** -- minimal async HTTP/1.1 client (GET/POST JSON, no external deps)
|
- **http.py** -- minimal async HTTP/1.1 client (GET/POST JSON, no external deps)
|
||||||
- **connpool.py** -- pre-warmed TCP connection pool to first chain hop
|
- **connpool.py** -- pre-warmed TCP connection pool to first chain hop
|
||||||
- **api.py** -- built-in HTTP control API (runtime metrics, pool state, config reload)
|
- **api.py** -- built-in HTTP control API (runtime metrics, multi-pool state, config reload)
|
||||||
|
- **tor.py** -- Tor control port integration (NEWNYM signaling, periodic circuit rotation)
|
||||||
- **cli.py** -- argparse CLI, logging setup, cProfile support
|
- **cli.py** -- argparse CLI, logging setup, cProfile support
|
||||||
- **metrics.py** -- connection counters and human-readable summary (lock-free, asyncio-only)
|
- **metrics.py** -- connection counters, per-listener latency, rate tracking (lock-free, asyncio-only)
|
||||||
|
|
||||||
## Deployment
|
## Deployment
|
||||||
|
|
||||||
@@ -36,9 +37,13 @@ Client -------> s5p -------> Hop 1 -------> Hop 2 -------> Target
|
|||||||
| Local venv | `pip install -e .` then `s5p -c config/s5p.yaml` |
|
| Local venv | `pip install -e .` then `s5p -c config/s5p.yaml` |
|
||||||
| Container | `make build && make up` (Alpine, ~59MB) |
|
| Container | `make build && make up` (Alpine, ~59MB) |
|
||||||
|
|
||||||
Container mounts `./src` and `./config/s5p.yaml` read-only, plus
|
Production images bake source into the image via `COPY src/ /app/src/`.
|
||||||
|
Config and data are mounted at runtime: `./config/s5p.yaml` (ro) and
|
||||||
`~/.cache/s5p` as `/data` for pool state and profile output.
|
`~/.cache/s5p` as `/data` for pool state and profile output.
|
||||||
No application code is baked into the image.
|
The compose.yaml volume mount overrides source for local dev.
|
||||||
|
|
||||||
|
CI pushes `harbor.mymx.me/s5p/s5p:latest` on every push to `main`
|
||||||
|
(lint + tests must pass first).
|
||||||
|
|
||||||
## Dependencies
|
## Dependencies
|
||||||
|
|
||||||
@@ -68,3 +73,9 @@ All other functionality uses Python stdlib (`asyncio`, `socket`, `struct`).
|
|||||||
- **Async HTTP** -- native asyncio HTTP client replaces blocking urllib, parallel fetches
|
- **Async HTTP** -- native asyncio HTTP client replaces blocking urllib, parallel fetches
|
||||||
- **First-hop pool** -- pre-warmed TCP connections to chain[0], stale-evicted, auto-refilled
|
- **First-hop pool** -- pre-warmed TCP connections to chain[0], stale-evicted, auto-refilled
|
||||||
- **Control API** -- built-in asyncio HTTP server, no Flask/external deps, disabled by default
|
- **Control API** -- built-in asyncio HTTP server, no Flask/external deps, disabled by default
|
||||||
|
- **Tor integration** -- control port NEWNYM signaling, periodic circuit rotation
|
||||||
|
- **Multi-Tor** -- round-robin traffic across multiple Tor nodes (`tor_nodes`)
|
||||||
|
- **Multi-listener** -- per-port chain depth and pool assignment
|
||||||
|
- **Named pools** -- independent proxy pools with per-listener binding (`proxy_pools:`)
|
||||||
|
- **MITM filtering** -- `mitm: true/false` source filter, `?mitm=0/1` API query param
|
||||||
|
- **Per-listener latency** -- independent latency tracking per listener in `/status`
|
||||||
|
|||||||
77
README.md
77
README.md
@@ -11,13 +11,16 @@ through configurable chains of SOCKS4, SOCKS5, and HTTP CONNECT proxies.
|
|||||||
- Per-hop authentication (username/password)
|
- Per-hop authentication (username/password)
|
||||||
- DNS leak prevention (domain names forwarded to proxies, never resolved locally)
|
- DNS leak prevention (domain names forwarded to proxies, never resolved locally)
|
||||||
- Tor integration (SOCKS5 hop + control port NEWNYM for circuit rotation)
|
- Tor integration (SOCKS5 hop + control port NEWNYM for circuit rotation)
|
||||||
- Managed proxy pool: multiple sources (API + file), health-tested, weighted selection
|
- Multi-Tor round-robin (`tor_nodes` distributes traffic across Tor instances)
|
||||||
|
- Multi-listener: different ports with different chain depths and pool assignments
|
||||||
|
- Named proxy pools: independent sources, health testing, and state per pool
|
||||||
|
- MITM source filter (`mitm: true/false` adds `?mitm=0/1` to API requests)
|
||||||
- Per-proxy failure backoff (60s cooldown), stale proxy expiry, chain pre-flight
|
- Per-proxy failure backoff (60s cooldown), stale proxy expiry, chain pre-flight
|
||||||
- Fast warm start (seconds on restart vs minutes on cold start)
|
- Fast warm start (seconds on restart vs minutes on cold start)
|
||||||
- Connection retry with proxy rotation (configurable attempts)
|
- Connection retry with proxy rotation (configurable attempts)
|
||||||
- Dead proxy reporting to upstream API (optional `report_url`)
|
- Dead proxy reporting to upstream API (optional `report_url`)
|
||||||
- SIGHUP hot reload (timeout, retries, log_level, pool config)
|
- SIGHUP hot reload (timeout, retries, log_level, pool config)
|
||||||
- Connection metrics with pool stats (logged periodically and on shutdown)
|
- Connection metrics with per-listener latency and pool stats
|
||||||
- Concurrent connection limit with backpressure (`max_connections`)
|
- Concurrent connection limit with backpressure (`max_connections`)
|
||||||
- Async HTTP client for proxy source fetching (parallel, no threads)
|
- Async HTTP client for proxy source fetching (parallel, no threads)
|
||||||
- First-hop TCP connection pool (pre-warmed, stale-evicted)
|
- First-hop TCP connection pool (pre-warmed, stale-evicted)
|
||||||
@@ -54,8 +57,13 @@ make logs # podman-compose logs -f
|
|||||||
make down # podman-compose down
|
make down # podman-compose down
|
||||||
```
|
```
|
||||||
|
|
||||||
Source, config, and data are bind-mounted, not baked into the image.
|
Production images bake source into the image. Config and data are mounted
|
||||||
Pool state and profile output persist in `~/.cache/s5p/` (`/data` inside container).
|
at runtime. Pool state and profile output persist in `~/.cache/s5p/`
|
||||||
|
(`/data` inside container). The compose.yaml volume mount overrides
|
||||||
|
source for local dev.
|
||||||
|
|
||||||
|
CI (Gitea Actions) runs lint + tests on push to `main`, then builds and
|
||||||
|
pushes `harbor.mymx.me/s5p/s5p:latest`.
|
||||||
|
|
||||||
## Configuration
|
## Configuration
|
||||||
|
|
||||||
@@ -66,29 +74,54 @@ cp config/example.yaml config/s5p.yaml
|
|||||||
```
|
```
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
listen: 127.0.0.1:1080
|
|
||||||
timeout: 10
|
timeout: 10
|
||||||
retries: 3
|
retries: 3
|
||||||
max_connections: 256 # concurrent connection limit
|
max_connections: 256 # concurrent connection limit
|
||||||
pool_size: 8 # pre-warmed connections to first hop
|
pool_size: 8 # pre-warmed connections to first hop
|
||||||
api_listen: 127.0.0.1:1081 # control API (disabled by default)
|
api_listen: 127.0.0.1:1081 # control API (disabled by default)
|
||||||
|
|
||||||
chain:
|
# Named proxy pools (each with independent sources and health testing)
|
||||||
- socks5://127.0.0.1:9050 # Tor
|
proxy_pools:
|
||||||
|
clean:
|
||||||
|
sources:
|
||||||
|
- url: http://10.200.1.250:8081/proxies/all
|
||||||
|
mitm: false # filter: ?mitm=0
|
||||||
|
refresh: 300
|
||||||
|
test_interval: 120
|
||||||
|
max_fails: 3
|
||||||
|
mitm:
|
||||||
|
sources:
|
||||||
|
- url: http://10.200.1.250:8081/proxies/all
|
||||||
|
mitm: true # filter: ?mitm=1
|
||||||
|
refresh: 300
|
||||||
|
test_interval: 120
|
||||||
|
max_fails: 3
|
||||||
|
|
||||||
|
# Multi-listener: each port gets a chain depth and pool assignment
|
||||||
|
# Use "pool" for listener default, "pool:name" for explicit pool per hop,
|
||||||
|
# or [pool:a, pool:b] for random choice from candidates per connection
|
||||||
|
listeners:
|
||||||
|
- listen: 0.0.0.0:1080
|
||||||
|
pool: clean
|
||||||
|
chain:
|
||||||
|
- socks5://127.0.0.1:9050
|
||||||
|
- pool # Tor + 2 clean proxies
|
||||||
|
- pool
|
||||||
|
- listen: 0.0.0.0:1081
|
||||||
|
chain:
|
||||||
|
- socks5://127.0.0.1:9050
|
||||||
|
- [pool:clean, pool:mitm] # random choice per connection
|
||||||
|
- [pool:clean, pool:mitm] # independent random choice
|
||||||
|
- listen: 0.0.0.0:1082
|
||||||
|
chain:
|
||||||
|
- socks5://127.0.0.1:9050 # Tor only
|
||||||
|
|
||||||
|
# Singular proxy_pool: still works (becomes pool "default")
|
||||||
|
|
||||||
tor:
|
tor:
|
||||||
control_port: 9051 # Tor control port (NEWNYM)
|
control_port: 9051 # Tor control port (NEWNYM)
|
||||||
password: "" # or cookie_file for auth
|
password: "" # or cookie_file for auth
|
||||||
newnym_interval: 0 # periodic circuit rotation (0 = manual)
|
newnym_interval: 0 # periodic circuit rotation (0 = manual)
|
||||||
|
|
||||||
proxy_pool:
|
|
||||||
sources:
|
|
||||||
- url: http://10.200.1.250:8081/proxies
|
|
||||||
proto: socks5
|
|
||||||
- file: /etc/s5p/proxies.txt # one proxy URL per line
|
|
||||||
refresh: 300 # re-fetch interval (seconds)
|
|
||||||
test_interval: 120 # health test cycle (seconds)
|
|
||||||
max_fails: 3 # evict after N consecutive failures
|
|
||||||
```
|
```
|
||||||
|
|
||||||
`config/s5p.yaml` is gitignored; `config/example.yaml` is the tracked template.
|
`config/s5p.yaml` is gitignored; `config/example.yaml` is the tracked template.
|
||||||
@@ -117,11 +150,13 @@ Options:
|
|||||||
## How Chaining Works
|
## How Chaining Works
|
||||||
|
|
||||||
```
|
```
|
||||||
Client -> s5p -> [static chain] -> [weighted alive proxy from pool] -> Destination
|
:1080 Client -> s5p -> Tor -> [clean] -> [clean] -> Dest (2 clean hops)
|
||||||
|
:1081 Client -> s5p -> Tor -> [clean|mitm] -> [clean|mitm] -> Dest (random)
|
||||||
|
:1082 Client -> s5p -> Tor -> Dest (Tor only)
|
||||||
```
|
```
|
||||||
|
|
||||||
s5p connects to Hop1 via TCP, negotiates the hop protocol (SOCKS5/4/HTTP),
|
s5p connects to Hop1 via TCP, negotiates the hop protocol (SOCKS5/4/HTTP),
|
||||||
then over that tunnel negotiates with Hop2, and so on. If a proxy pool is
|
then over that tunnel negotiates with Hop2, and so on. Each listener draws
|
||||||
configured, an alive proxy is appended per-connection, weighted toward those
|
from its assigned named pool -- alive proxies are appended per-connection
|
||||||
with the most recent successful health test. Each hop only sees its immediate
|
(one per `pool` entry), weighted toward those with the most recent successful
|
||||||
neighbors.
|
health test. Each hop only sees its immediate neighbors.
|
||||||
|
|||||||
24
ROADMAP.md
24
ROADMAP.md
@@ -1,6 +1,6 @@
|
|||||||
# s5p -- Roadmap
|
# s5p -- Roadmap
|
||||||
|
|
||||||
## v0.1.0 (current)
|
## v0.1.0
|
||||||
|
|
||||||
- [x] SOCKS5 server (CONNECT command)
|
- [x] SOCKS5 server (CONNECT command)
|
||||||
- [x] Proxy chaining (SOCKS5, SOCKS4/4a, HTTP CONNECT)
|
- [x] Proxy chaining (SOCKS5, SOCKS4/4a, HTTP CONNECT)
|
||||||
@@ -23,12 +23,26 @@
|
|||||||
## v0.2.0
|
## v0.2.0
|
||||||
|
|
||||||
- [x] Built-in control API (runtime metrics, pool state, config reload)
|
- [x] Built-in control API (runtime metrics, pool state, config reload)
|
||||||
- [ ] SOCKS5 server authentication (username/password)
|
|
||||||
- [x] Tor control port integration (circuit renewal via NEWNYM)
|
- [x] Tor control port integration (circuit renewal via NEWNYM)
|
||||||
- [ ] Metrics (connections/sec, bytes relayed, hop latency)
|
- [x] Metrics (connections/sec, bytes relayed, hop latency)
|
||||||
|
- [x] Multi-listener with per-port chain depth
|
||||||
|
- [x] Per-listener latency tracking
|
||||||
|
- [x] Dynamic health test concurrency (auto-scales to ~10% of pool)
|
||||||
|
- [x] Multi-Tor round-robin (`tor_nodes`)
|
||||||
|
- [x] Named proxy pools with per-listener assignment (`proxy_pools:`)
|
||||||
|
- [x] MITM source filter (`mitm: true/false` on pool sources)
|
||||||
|
|
||||||
## v0.3.0
|
## v0.3.0 (current)
|
||||||
|
|
||||||
|
- [x] SOCKS5 server authentication (username/password)
|
||||||
|
- [x] Systemd service unit
|
||||||
|
- [x] CLI test coverage
|
||||||
|
- [x] Protocol test coverage (SOCKS5/4/HTTP handshakes)
|
||||||
|
- [x] API documentation (full response schemas)
|
||||||
|
- [x] Prometheus metrics endpoint (`/metrics` OpenMetrics format)
|
||||||
|
- [x] Listener-level retry override
|
||||||
|
- [x] Pool-level proxy protocol filter (`allowed_protos`)
|
||||||
|
- [x] Connection pooling documentation
|
||||||
- [ ] UDP ASSOCIATE support (SOCKS5 UDP relay)
|
- [ ] UDP ASSOCIATE support (SOCKS5 UDP relay)
|
||||||
- [ ] BIND support
|
- [ ] BIND support
|
||||||
- [ ] Chain randomization (random order, random subset)
|
- [ ] Chain randomization (random order, random subset)
|
||||||
@@ -36,6 +50,4 @@
|
|||||||
## v1.0.0
|
## v1.0.0
|
||||||
|
|
||||||
- [ ] Stable API and config format
|
- [ ] Stable API and config format
|
||||||
- [ ] Comprehensive test suite with mock proxies
|
|
||||||
- [ ] Systemd service unit
|
|
||||||
- [ ] Performance benchmarks
|
- [ ] Performance benchmarks
|
||||||
|
|||||||
39
TASKS.md
39
TASKS.md
@@ -45,7 +45,42 @@
|
|||||||
- [x] Built-in control API (`api.py`, `--api`, `api_listen`)
|
- [x] Built-in control API (`api.py`, `--api`, `api_listen`)
|
||||||
|
|
||||||
- [x] Tor control port integration (NEWNYM signaling, periodic rotation)
|
- [x] Tor control port integration (NEWNYM signaling, periodic rotation)
|
||||||
|
- [x] Replace HTTP health check with TLS handshake (round-robin targets, no httpbin dependency)
|
||||||
|
|
||||||
|
- [x] Multi-listener with configurable proxy chaining (per-port chain depth)
|
||||||
|
- [x] Connection rate and chain latency metrics (rate/s, p50/p95/p99)
|
||||||
|
- [x] Per-listener latency tracking
|
||||||
|
- [x] Dynamic health test concurrency
|
||||||
|
- [x] Multi-Tor round-robin via `tor_nodes` config
|
||||||
|
- [x] Named proxy pools with per-listener assignment (`proxy_pools:`, `pool:`)
|
||||||
|
- [x] `mitm` source filter (`?mitm=0` / `?mitm=1` API query param)
|
||||||
|
- [x] Per-pool state files (`pool-{name}.json`)
|
||||||
|
- [x] Per-pool log prefixes (`pool[name]: ...`)
|
||||||
|
- [x] API: merged `/pool` with per-pool breakdown, `/status` pools summary
|
||||||
|
- [x] Backward compat: singular `proxy_pool:` registers as `"default"`
|
||||||
|
|
||||||
|
- [x] Integration tests with mock SOCKS5 proxy (end-to-end)
|
||||||
|
- [x] Per-destination bypass rules (CIDR, suffix, exact match)
|
||||||
|
- [x] Weighted multi-candidate pool selection
|
||||||
|
- [x] Onion chain-only routing (.onion skips pool hops)
|
||||||
|
- [x] Graceful shutdown timeout (fixes cProfile data dump)
|
||||||
|
|
||||||
|
- [x] Gitea CI workflow (lint + test + Harbor image push)
|
||||||
|
|
||||||
|
## v0.3.0 Stabilization
|
||||||
|
- [x] Version bump to 0.3.0
|
||||||
|
- [x] Systemd service unit (`config/s5p.service`, `make install-service`)
|
||||||
|
- [x] CLI argument parsing tests (`tests/test_cli.py`)
|
||||||
|
- [x] Protocol handshake tests (`tests/test_proto.py` -- SOCKS5/4/HTTP)
|
||||||
|
- [x] API reference documentation (`docs/USAGE.md`)
|
||||||
|
- [x] Prometheus `/metrics` endpoint (OpenMetrics format)
|
||||||
|
|
||||||
|
## Quick Wins
|
||||||
|
- [x] Listener-level retry override (`retries` per listener)
|
||||||
|
- [x] Pool-level proxy protocol filter (`allowed_protos` on proxy pool)
|
||||||
|
- [x] Document connection pooling (`pool_size`/`pool_max_idle` in CHEATSHEET.md)
|
||||||
|
|
||||||
## Next
|
## Next
|
||||||
- [ ] Integration tests with mock proxy server
|
- [ ] UDP ASSOCIATE support
|
||||||
- [ ] SOCKS5 server-side authentication
|
- [ ] BIND support
|
||||||
|
- [ ] Chain randomization
|
||||||
|
|||||||
14
TODO.md
14
TODO.md
@@ -4,21 +4,29 @@
|
|||||||
|
|
||||||
- SOCKS5 BIND and UDP ASSOCIATE commands
|
- SOCKS5 BIND and UDP ASSOCIATE commands
|
||||||
- Chain randomization modes (round-robin, sticky-per-destination)
|
- Chain randomization modes (round-robin, sticky-per-destination)
|
||||||
- Per-destination chain rules (bypass chain for local addresses)
|
|
||||||
- Systemd socket activation
|
- Systemd socket activation
|
||||||
|
- Per-pool health test chain override (different base chain per pool)
|
||||||
|
- ~~Pool-level proxy protocol filter (only socks5 from pool X, only http from pool Y)~~ (done)
|
||||||
|
- ~~Listener-level retry override (different retry count per listener)~~ (done)
|
||||||
|
|
||||||
## Performance
|
## Performance
|
||||||
|
|
||||||
- Benchmark relay throughput vs direct connection
|
- Benchmark relay throughput vs direct connection
|
||||||
- Tune buffer sizes for different workloads
|
- Tune buffer sizes for different workloads
|
||||||
- Connection pooling for frequently-used chains
|
- ~~Connection pooling for frequently-used chains~~ (done: `pool_size`/`pool_max_idle`)
|
||||||
|
|
||||||
## Security
|
## Security
|
||||||
|
|
||||||
- Optional SOCKS5 server authentication
|
- ~~Optional SOCKS5 server authentication~~ (done: fa36218)
|
||||||
- Rate limiting per source IP
|
- Rate limiting per source IP
|
||||||
- Access control lists
|
- Access control lists
|
||||||
|
|
||||||
|
## Observability
|
||||||
|
|
||||||
|
- ~~Prometheus metrics endpoint (`/metrics` in OpenMetrics format)~~ (done)
|
||||||
|
- Per-pool health test success rate tracking
|
||||||
|
- Per-pool latency breakdown in `/status`
|
||||||
|
|
||||||
## Docs
|
## Docs
|
||||||
|
|
||||||
- Man page
|
- Man page
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ services:
|
|||||||
- ./src:/app/src:ro,Z
|
- ./src:/app/src:ro,Z
|
||||||
- ./config/s5p.yaml:/app/config/s5p.yaml:ro,Z
|
- ./config/s5p.yaml:/app/config/s5p.yaml:ro,Z
|
||||||
- ~/.cache/s5p:/data:Z
|
- ~/.cache/s5p:/data:Z
|
||||||
# command: ["-c", "/app/config/s5p.yaml", "--cprofile", "/data/s5p.prof"]
|
command: ["-c", "/app/config/s5p.yaml", "--cprofile", "/data/s5p.prof"]
|
||||||
network_mode: host
|
network_mode: host
|
||||||
logging:
|
logging:
|
||||||
driver: k8s-file
|
driver: k8s-file
|
||||||
|
|||||||
@@ -20,20 +20,47 @@ chain:
|
|||||||
# - socks4://proxy:1080 # post-Tor SOCKS4/4a proxy
|
# - socks4://proxy:1080 # post-Tor SOCKS4/4a proxy
|
||||||
# - http://user:pass@proxy:8080 # post-Tor HTTP CONNECT proxy
|
# - http://user:pass@proxy:8080 # post-Tor HTTP CONNECT proxy
|
||||||
|
|
||||||
# Managed proxy pool -- fetches from multiple sources, health-tests,
|
# Named proxy pools -- each pool has its own sources, health tests,
|
||||||
# and rotates alive proxies per-connection after the static chain.
|
# and state file. Listeners reference pools by name via the "pool:" key.
|
||||||
|
#
|
||||||
|
# proxy_pools:
|
||||||
|
# clean: # MITM-free proxies
|
||||||
|
# sources:
|
||||||
|
# - url: http://10.200.1.250:8081/proxies/all
|
||||||
|
# mitm: false # filter: mitm=0 query param
|
||||||
|
# allowed_protos: [socks5] # only accept socks5 from sources
|
||||||
|
# state_file: /data/pool-clean.json
|
||||||
|
# refresh: 300
|
||||||
|
# test_interval: 120
|
||||||
|
# test_timeout: 12
|
||||||
|
# max_fails: 5
|
||||||
|
# mitm: # MITM-capable proxies
|
||||||
|
# sources:
|
||||||
|
# - url: http://10.200.1.250:8081/proxies/all
|
||||||
|
# mitm: true # filter: mitm=1 query param
|
||||||
|
# state_file: /data/pool-mitm.json
|
||||||
|
# refresh: 300
|
||||||
|
# test_interval: 120
|
||||||
|
# test_timeout: 12
|
||||||
|
# max_fails: 5
|
||||||
|
|
||||||
|
# Single proxy pool (legacy, still supported -- becomes pool "default"):
|
||||||
# proxy_pool:
|
# proxy_pool:
|
||||||
# sources:
|
# sources:
|
||||||
# - url: http://10.200.1.250:8081/proxies
|
# - url: http://10.200.1.250:8081/proxies
|
||||||
# proto: socks5 # optional: filter by protocol
|
# proto: socks5 # optional: filter by protocol
|
||||||
# country: US # optional: filter by country
|
# country: US # optional: filter by country
|
||||||
# limit: 1000 # optional: max proxies to fetch
|
# limit: 1000 # optional: max proxies to fetch
|
||||||
|
# mitm: false # optional: filter by MITM status (true/false)
|
||||||
# - file: /etc/s5p/proxies.txt # text file, one proxy URL per line
|
# - file: /etc/s5p/proxies.txt # text file, one proxy URL per line
|
||||||
# refresh: 300 # re-fetch sources interval (seconds)
|
# refresh: 300 # re-fetch sources interval (seconds)
|
||||||
# test_interval: 120 # health test cycle interval (seconds)
|
# test_interval: 120 # health test cycle interval (seconds)
|
||||||
# test_url: http://httpbin.org/ip # URL for health checks
|
# test_targets: # TLS handshake targets (round-robin)
|
||||||
|
# - www.google.com
|
||||||
|
# - www.cloudflare.com
|
||||||
|
# - www.amazon.com
|
||||||
# test_timeout: 15 # per-test timeout (seconds)
|
# test_timeout: 15 # per-test timeout (seconds)
|
||||||
# test_concurrency: 5 # parallel health tests
|
# test_concurrency: 25 # max parallel tests (auto-scales to ~10% of pool)
|
||||||
# max_fails: 3 # consecutive fails before eviction
|
# max_fails: 3 # consecutive fails before eviction
|
||||||
# state_file: "" # empty = ~/.cache/s5p/pool.json
|
# state_file: "" # empty = ~/.cache/s5p/pool.json
|
||||||
# report_url: "" # POST dead proxies here (optional)
|
# report_url: "" # POST dead proxies here (optional)
|
||||||
@@ -47,6 +74,71 @@ chain:
|
|||||||
# cookie_file: "" # CookieAuthentication file path
|
# cookie_file: "" # CookieAuthentication file path
|
||||||
# newnym_interval: 0 # periodic NEWNYM (seconds, 0 = manual only)
|
# newnym_interval: 0 # periodic NEWNYM (seconds, 0 = manual only)
|
||||||
|
|
||||||
|
# Multi-Tor round-robin -- distribute traffic across multiple Tor nodes.
|
||||||
|
# When present, the first hop in each listener's chain is REPLACED at
|
||||||
|
# connection time by round-robin selection from this list. The first hop
|
||||||
|
# specified in each listener's chain acts as a fallback only; tor_nodes
|
||||||
|
# takes precedence for both client traffic and pool health tests.
|
||||||
|
# Connection pools are pre-warmed for every node listed here.
|
||||||
|
# tor_nodes:
|
||||||
|
# - socks5://10.200.1.1:9050
|
||||||
|
# - socks5://10.200.1.254:9050
|
||||||
|
# - socks5://10.200.1.250:9050
|
||||||
|
# - socks5://10.200.1.13:9050
|
||||||
|
|
||||||
|
# Multi-listener mode -- each listener gets its own address, chain,
|
||||||
|
# and optional pool assignment. The "pool" keyword in a chain appends
|
||||||
|
# a random alive proxy from the named pool (or "default" if unnamed).
|
||||||
|
# Multiple "pool" entries = multiple pool hops (deeper chaining).
|
||||||
|
#
|
||||||
|
# Per-hop pool references: use "pool:name" to draw from a specific pool
|
||||||
|
# at that hop position. Bare "pool" uses the listener's "pool:" default.
|
||||||
|
# This lets a single listener mix pools in one chain.
|
||||||
|
#
|
||||||
|
# Multi-candidate hops: use a YAML list to randomly pick from a set of
|
||||||
|
# pools at each hop. On each connection, one pool is chosen per hop.
|
||||||
|
#
|
||||||
|
# listeners:
|
||||||
|
# - listen: 0.0.0.0:1080
|
||||||
|
# pool: clean # default for bare "pool"
|
||||||
|
# auth: # SOCKS5 username/password (RFC 1929)
|
||||||
|
# alice: s3cret # username: password
|
||||||
|
# bob: hunter2
|
||||||
|
# bypass: # skip chain for these destinations
|
||||||
|
# - 127.0.0.0/8 # loopback
|
||||||
|
# - 10.0.0.0/8 # RFC 1918
|
||||||
|
# - 192.168.0.0/16 # RFC 1918
|
||||||
|
# - 172.16.0.0/12 # RFC 1918
|
||||||
|
# - fc00::/7 # IPv6 ULA
|
||||||
|
# - localhost # exact hostname
|
||||||
|
# - .local # domain suffix
|
||||||
|
# chain:
|
||||||
|
# - socks5://127.0.0.1:9050 # first hop (overridden by tor_nodes)
|
||||||
|
# - [pool:clean, pool:mitm] # random choice per connection
|
||||||
|
# - [pool:clean, pool:mitm] # independent random choice
|
||||||
|
#
|
||||||
|
# - listen: 0.0.0.0:1081
|
||||||
|
# pool: clean
|
||||||
|
# retries: 5 # override global retries for this listener
|
||||||
|
# chain:
|
||||||
|
# - socks5://127.0.0.1:9050
|
||||||
|
# - pool # bare: uses default "clean"
|
||||||
|
# - pool
|
||||||
|
#
|
||||||
|
# - listen: 0.0.0.0:1082
|
||||||
|
# chain:
|
||||||
|
# - socks5://127.0.0.1:9050 # Tor only (no pool hops)
|
||||||
|
#
|
||||||
|
# - listen: 0.0.0.0:1083
|
||||||
|
# pool: clean
|
||||||
|
# chain:
|
||||||
|
# - socks5://127.0.0.1:9050
|
||||||
|
# - pool # bare "pool" = clean
|
||||||
|
# - pool:mitm # explicit = mitm
|
||||||
|
#
|
||||||
|
# When using "listeners:", the top-level "listen" and "chain" keys are ignored.
|
||||||
|
# If "listeners:" is absent, the old format is used (single listener).
|
||||||
|
|
||||||
# Legacy proxy source (still supported, auto-converts to proxy_pool):
|
# Legacy proxy source (still supported, auto-converts to proxy_pool):
|
||||||
# proxy_source:
|
# proxy_source:
|
||||||
# url: http://10.200.1.250:8081/proxies
|
# url: http://10.200.1.250:8081/proxies
|
||||||
|
|||||||
15
config/s5p.service
Normal file
15
config/s5p.service
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
[Unit]
|
||||||
|
Description=s5p SOCKS5 proxy
|
||||||
|
After=network-online.target
|
||||||
|
Wants=network-online.target
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Type=simple
|
||||||
|
ExecStart=/usr/local/bin/s5p -c /etc/s5p/s5p.yaml
|
||||||
|
Restart=on-failure
|
||||||
|
RestartSec=5
|
||||||
|
KillSignal=SIGTERM
|
||||||
|
TimeoutStopSec=10
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
@@ -21,6 +21,18 @@ s5p --tracemalloc # memory profile (top 10)
|
|||||||
s5p --tracemalloc 20 # memory profile (top 20)
|
s5p --tracemalloc 20 # memory profile (top 20)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Systemd
|
||||||
|
|
||||||
|
```
|
||||||
|
make install-service # install unit + reload
|
||||||
|
sudo systemctl enable --now s5p # enable + start
|
||||||
|
sudo systemctl status s5p # check status
|
||||||
|
sudo systemctl restart s5p # restart
|
||||||
|
sudo systemctl stop s5p # stop
|
||||||
|
journalctl -u s5p -f # follow logs
|
||||||
|
journalctl -u s5p --since "5 min ago" # recent logs
|
||||||
|
```
|
||||||
|
|
||||||
## Container
|
## Container
|
||||||
|
|
||||||
```
|
```
|
||||||
@@ -30,7 +42,18 @@ make logs # podman-compose logs -f
|
|||||||
make down # podman-compose down
|
make down # podman-compose down
|
||||||
```
|
```
|
||||||
|
|
||||||
Volumes: `./src` (ro), `./config/s5p.yaml` (ro), `~/.cache/s5p` → `/data` (pool state + profiles)
|
Volumes: `./config/s5p.yaml` (ro), `~/.cache/s5p` → `/data` (pool state + profiles)
|
||||||
|
Dev override: compose.yaml mounts `./src` (ro) over the baked-in source.
|
||||||
|
|
||||||
|
## CI
|
||||||
|
|
||||||
|
Gitea Actions runs on push to `main`:
|
||||||
|
|
||||||
|
1. `ruff check` + `pytest` (test)
|
||||||
|
2. `gitleaks detect` (secrets scan)
|
||||||
|
3. Build + push `harbor.mymx.me/s5p/s5p:latest`
|
||||||
|
|
||||||
|
Secrets: `HARBOR_USER` / `HARBOR_PASS` (configured in Gitea repo settings).
|
||||||
|
|
||||||
## Config
|
## Config
|
||||||
|
|
||||||
@@ -38,6 +61,118 @@ Volumes: `./src` (ro), `./config/s5p.yaml` (ro), `~/.cache/s5p` → `/data` (poo
|
|||||||
cp config/example.yaml config/s5p.yaml # create live config (gitignored)
|
cp config/example.yaml config/s5p.yaml # create live config (gitignored)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Multi-Listener (config)
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
listeners:
|
||||||
|
- listen: 0.0.0.0:1080
|
||||||
|
pool: clean # default for bare "pool"
|
||||||
|
chain:
|
||||||
|
- socks5://127.0.0.1:9050
|
||||||
|
- pool # Tor + 2 clean hops
|
||||||
|
- pool
|
||||||
|
- listen: 0.0.0.0:1081
|
||||||
|
pool: clean
|
||||||
|
chain:
|
||||||
|
- socks5://127.0.0.1:9050
|
||||||
|
- pool:clean # per-hop: explicit clean
|
||||||
|
- pool:mitm # per-hop: explicit mitm
|
||||||
|
- listen: 0.0.0.0:1082
|
||||||
|
chain:
|
||||||
|
- socks5://127.0.0.1:9050 # Tor only
|
||||||
|
- listen: 0.0.0.0:1083
|
||||||
|
chain:
|
||||||
|
- socks5://127.0.0.1:9050
|
||||||
|
- [pool:clean, pool:mitm] # random choice per connection
|
||||||
|
- [pool:clean, pool:mitm] # independent random choice
|
||||||
|
```
|
||||||
|
|
||||||
|
Per-hop pool: `pool` = listener default, `pool:name` = explicit pool,
|
||||||
|
`[pool:a, pool:b]` = random choice from candidates.
|
||||||
|
|
||||||
|
## Bypass Rules (config)
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
listeners:
|
||||||
|
- listen: 0.0.0.0:1080
|
||||||
|
bypass:
|
||||||
|
- 127.0.0.0/8 # CIDR
|
||||||
|
- 10.0.0.0/8 # CIDR
|
||||||
|
- 192.168.0.0/16 # CIDR
|
||||||
|
- localhost # exact hostname
|
||||||
|
- .local # domain suffix
|
||||||
|
chain:
|
||||||
|
- socks5://127.0.0.1:9050
|
||||||
|
- pool
|
||||||
|
```
|
||||||
|
|
||||||
|
| Pattern | Type | Matches |
|
||||||
|
|---------|------|---------|
|
||||||
|
| `10.0.0.0/8` | CIDR | IPs in network |
|
||||||
|
| `127.0.0.1` | Exact IP | That IP only |
|
||||||
|
| `localhost` | Exact host | String equal |
|
||||||
|
| `.local` | Suffix | `*.local` and `local` |
|
||||||
|
|
||||||
|
## Listener Authentication (config)
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
listeners:
|
||||||
|
- listen: 0.0.0.0:1080
|
||||||
|
auth:
|
||||||
|
alice: s3cret
|
||||||
|
bob: hunter2
|
||||||
|
chain:
|
||||||
|
- socks5://127.0.0.1:9050
|
||||||
|
- pool
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
curl -x socks5h://alice:s3cret@127.0.0.1:1080 https://example.com
|
||||||
|
```
|
||||||
|
|
||||||
|
No `auth:` key = no authentication required (default).
|
||||||
|
|
||||||
|
## Listener Retry Override (config)
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
listeners:
|
||||||
|
- listen: 0.0.0.0:1080
|
||||||
|
retries: 5 # override global retries
|
||||||
|
chain:
|
||||||
|
- socks5://127.0.0.1:9050
|
||||||
|
- pool
|
||||||
|
- listen: 0.0.0.0:1082
|
||||||
|
chain:
|
||||||
|
- socks5://127.0.0.1:9050 # 0 = use global default
|
||||||
|
```
|
||||||
|
|
||||||
|
Per-listener `retries` overrides the global `retries` setting. Set to 0 (or
|
||||||
|
omit) to inherit the global value.
|
||||||
|
|
||||||
|
## Pool Protocol Filter (config)
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
proxy_pools:
|
||||||
|
socks_only:
|
||||||
|
allowed_protos: [socks5] # reject http proxies
|
||||||
|
sources:
|
||||||
|
- url: http://api:8081/proxies/all
|
||||||
|
```
|
||||||
|
|
||||||
|
When set, proxies not matching `allowed_protos` are silently dropped during
|
||||||
|
merge. Useful when a source returns mixed protocols but the pool should
|
||||||
|
only serve a specific type.
|
||||||
|
|
||||||
|
## Multi-Tor Round-Robin (config)
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
tor_nodes: # overrides first hop in all listeners
|
||||||
|
- socks5://10.200.1.1:9050
|
||||||
|
- socks5://10.200.1.254:9050
|
||||||
|
- socks5://10.200.1.250:9050
|
||||||
|
- socks5://10.200.1.13:9050
|
||||||
|
```
|
||||||
|
|
||||||
## Performance Tuning (config)
|
## Performance Tuning (config)
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
@@ -46,21 +181,56 @@ pool_size: 8 # pre-warmed TCP conns to first hop (0 = off)
|
|||||||
pool_max_idle: 30 # evict idle pooled conns (seconds)
|
pool_max_idle: 30 # evict idle pooled conns (seconds)
|
||||||
```
|
```
|
||||||
|
|
||||||
## Proxy Pool (config)
|
## Connection Pool (config)
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
proxy_pool:
|
pool_size: 8 # pre-warmed TCP connections per first hop (0 = off)
|
||||||
sources:
|
pool_max_idle: 30 # evict idle connections after N seconds
|
||||||
- url: http://10.200.1.250:8081/proxies
|
|
||||||
proto: socks5
|
|
||||||
limit: 1000
|
|
||||||
- file: /etc/s5p/proxies.txt
|
|
||||||
refresh: 300 # re-fetch interval
|
|
||||||
test_interval: 120 # health test cycle
|
|
||||||
max_fails: 3 # evict after N fails
|
|
||||||
report_url: "" # POST dead proxies (optional)
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Pre-warms TCP connections to the first hop in the chain. Only the raw TCP
|
||||||
|
connection is pooled -- SOCKS/HTTP negotiation consumes it. One pool is
|
||||||
|
created per unique first hop (shared across listeners). Requires at least
|
||||||
|
one hop in `chain`.
|
||||||
|
|
||||||
|
| Setting | Default | Notes |
|
||||||
|
|---------|---------|-------|
|
||||||
|
| `pool_size` | 0 (off) | Connections per first hop |
|
||||||
|
| `pool_max_idle` | 30 | Idle eviction in seconds |
|
||||||
|
|
||||||
|
## Named Proxy Pools (config)
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
proxy_pools:
|
||||||
|
clean:
|
||||||
|
sources:
|
||||||
|
- url: http://10.200.1.250:8081/proxies/all
|
||||||
|
mitm: false # adds ?mitm=0
|
||||||
|
state_file: /data/pool-clean.json
|
||||||
|
refresh: 300
|
||||||
|
test_interval: 120
|
||||||
|
max_fails: 3
|
||||||
|
mitm:
|
||||||
|
sources:
|
||||||
|
- url: http://10.200.1.250:8081/proxies/all
|
||||||
|
mitm: true # adds ?mitm=1
|
||||||
|
state_file: /data/pool-mitm.json
|
||||||
|
refresh: 300
|
||||||
|
test_interval: 120
|
||||||
|
max_fails: 3
|
||||||
|
```
|
||||||
|
|
||||||
|
Singular `proxy_pool:` still works (becomes pool "default").
|
||||||
|
|
||||||
|
## Source Filters (proxy_pool sources)
|
||||||
|
|
||||||
|
| Filter | Values | Query param |
|
||||||
|
|--------|--------|-------------|
|
||||||
|
| `proto` | socks5/socks4/http | `?proto=...` |
|
||||||
|
| `country` | ISO alpha-2 | `?country=...` |
|
||||||
|
| `limit` | integer | `?limit=...` |
|
||||||
|
| `mitm` | true/false | `?mitm=1` / `?mitm=0` |
|
||||||
|
|
||||||
## Tor Control Port (config)
|
## Tor Control Port (config)
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
@@ -107,7 +277,7 @@ http://user:pass@host:port
|
|||||||
s5p --api 127.0.0.1:1081 -c config/s5p.yaml # enable API
|
s5p --api 127.0.0.1:1081 -c config/s5p.yaml # enable API
|
||||||
|
|
||||||
curl -s http://127.0.0.1:1081/status | jq . # runtime status
|
curl -s http://127.0.0.1:1081/status | jq . # runtime status
|
||||||
curl -s http://127.0.0.1:1081/metrics | jq . # full metrics
|
curl -s http://127.0.0.1:1081/metrics # prometheus metrics
|
||||||
curl -s http://127.0.0.1:1081/pool | jq . # all proxies
|
curl -s http://127.0.0.1:1081/pool | jq . # all proxies
|
||||||
curl -s http://127.0.0.1:1081/pool/alive | jq . # alive only
|
curl -s http://127.0.0.1:1081/pool/alive | jq . # alive only
|
||||||
curl -s http://127.0.0.1:1081/config | jq . # current config
|
curl -s http://127.0.0.1:1081/config | jq . # current config
|
||||||
@@ -142,9 +312,30 @@ python -m pstats ~/.cache/s5p/s5p.prof # container profile output
|
|||||||
## Metrics Log
|
## Metrics Log
|
||||||
|
|
||||||
```
|
```
|
||||||
metrics: conn=142 ok=98 fail=44 retries=88 active=3 in=1.2M out=4.5M up=0h05m12s pool=42/65
|
metrics: conn=1842 ok=1790 fail=52 retries=67 active=3 in=50.0M out=1.0G rate=4.72/s p50=198.3ms p95=890.1ms up=1h01m01s pool=42/65
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Prometheus Metrics (`/metrics`)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
curl -s http://127.0.0.1:1081/metrics
|
||||||
|
```
|
||||||
|
|
||||||
|
```
|
||||||
|
# TYPE s5p_connections counter
|
||||||
|
s5p_connections_total 1842
|
||||||
|
# TYPE s5p_active_connections gauge
|
||||||
|
s5p_active_connections 3
|
||||||
|
# TYPE s5p_pool_proxies_alive gauge
|
||||||
|
s5p_pool_proxies_alive{pool="clean"} 30
|
||||||
|
# TYPE s5p_chain_latency_seconds summary
|
||||||
|
s5p_chain_latency_seconds{quantile="0.5"} 0.198300
|
||||||
|
s5p_chain_latency_seconds{quantile="0.95"} 0.890100
|
||||||
|
# EOF
|
||||||
|
```
|
||||||
|
|
||||||
|
OpenMetrics format. Use `/status` for JSON equivalent.
|
||||||
|
|
||||||
## Troubleshooting
|
## Troubleshooting
|
||||||
|
|
||||||
| Symptom | Check |
|
| Symptom | Check |
|
||||||
|
|||||||
@@ -38,8 +38,40 @@ make build # podman-compose build
|
|||||||
make up # podman-compose up -d
|
make up # podman-compose up -d
|
||||||
```
|
```
|
||||||
|
|
||||||
The Alpine-based image (~59MB) contains only Python and PyYAML.
|
The Alpine-based image (~59MB) contains Python, PyYAML, and baked-in
|
||||||
Application source and config are bind-mounted at runtime.
|
source. Config is mounted at runtime. The compose.yaml volume mount
|
||||||
|
overrides source for local dev.
|
||||||
|
|
||||||
|
## Systemd Service
|
||||||
|
|
||||||
|
Install s5p as a systemd service for automatic startup and restart.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Install the binary
|
||||||
|
cd ~/git/s5p
|
||||||
|
source .venv/bin/activate
|
||||||
|
pip install -e .
|
||||||
|
|
||||||
|
# Copy config
|
||||||
|
sudo mkdir -p /etc/s5p
|
||||||
|
sudo cp config/example.yaml /etc/s5p/s5p.yaml
|
||||||
|
sudo nano /etc/s5p/s5p.yaml # edit with your settings
|
||||||
|
|
||||||
|
# Install the unit (copies service file + daemon-reload)
|
||||||
|
make install-service
|
||||||
|
|
||||||
|
# Enable and start
|
||||||
|
sudo systemctl enable --now s5p
|
||||||
|
|
||||||
|
# Check status
|
||||||
|
sudo systemctl status s5p
|
||||||
|
journalctl -u s5p -f
|
||||||
|
```
|
||||||
|
|
||||||
|
The service unit expects:
|
||||||
|
- Binary at `/usr/local/bin/s5p`
|
||||||
|
- Config at `/etc/s5p/s5p.yaml`
|
||||||
|
- Restarts on failure with 5-second delay
|
||||||
|
|
||||||
## Install Tor (optional)
|
## Install Tor (optional)
|
||||||
|
|
||||||
|
|||||||
759
docs/USAGE.md
759
docs/USAGE.md
@@ -39,7 +39,6 @@ cp config/example.yaml config/s5p.yaml
|
|||||||
| `config/s5p.yaml` | no (gitignored) | Live config with real proxy addresses |
|
| `config/s5p.yaml` | no (gitignored) | Live config with real proxy addresses |
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
listen: 127.0.0.1:1080
|
|
||||||
timeout: 10
|
timeout: 10
|
||||||
retries: 3
|
retries: 3
|
||||||
log_level: info
|
log_level: info
|
||||||
@@ -48,24 +47,318 @@ pool_size: 0 # pre-warmed TCP connections to first hop (0 = disable
|
|||||||
pool_max_idle: 30 # max idle time for pooled connections (seconds)
|
pool_max_idle: 30 # max idle time for pooled connections (seconds)
|
||||||
api_listen: "" # control API bind address (empty = disabled)
|
api_listen: "" # control API bind address (empty = disabled)
|
||||||
|
|
||||||
chain:
|
# Named proxy pools (each with its own sources and filters)
|
||||||
- socks5://127.0.0.1:9050
|
proxy_pools:
|
||||||
|
clean:
|
||||||
|
sources:
|
||||||
|
- url: http://10.200.1.250:8081/proxies/all
|
||||||
|
mitm: false
|
||||||
|
refresh: 300
|
||||||
|
test_interval: 120
|
||||||
|
test_timeout: 8
|
||||||
|
max_fails: 3
|
||||||
|
|
||||||
proxy_pool:
|
# Multi-listener (each port gets its own chain depth and pool)
|
||||||
sources:
|
listeners:
|
||||||
- url: http://10.200.1.250:8081/proxies
|
- listen: 0.0.0.0:1080
|
||||||
proto: socks5
|
pool: clean
|
||||||
limit: 1000
|
chain:
|
||||||
- file: /etc/s5p/proxies.txt
|
- socks5://127.0.0.1:9050
|
||||||
refresh: 300
|
- pool # Tor + 2 clean proxies
|
||||||
test_interval: 120
|
- pool
|
||||||
test_url: http://httpbin.org/ip
|
- listen: 0.0.0.0:1081
|
||||||
test_timeout: 15
|
pool: clean
|
||||||
test_concurrency: 5
|
chain:
|
||||||
max_fails: 3
|
- socks5://127.0.0.1:9050
|
||||||
state_file: "" # empty = ~/.cache/s5p/pool.json
|
- pool # Tor + 1 clean proxy
|
||||||
|
|
||||||
|
# Or single-listener (old format):
|
||||||
|
# listen: 127.0.0.1:1080
|
||||||
|
# chain:
|
||||||
|
# - socks5://127.0.0.1:9050
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Multi-Tor Round-Robin
|
||||||
|
|
||||||
|
Distribute traffic across multiple Tor nodes instead of funneling everything
|
||||||
|
through a single one. When `tor_nodes` is configured, the first hop in each
|
||||||
|
listener's chain is replaced at connection time by round-robin selection.
|
||||||
|
Health tests also rotate across all nodes.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
tor_nodes:
|
||||||
|
- socks5://10.200.1.1:9050
|
||||||
|
- socks5://10.200.1.254:9050
|
||||||
|
- socks5://10.200.1.250:9050
|
||||||
|
- socks5://10.200.1.13:9050
|
||||||
|
```
|
||||||
|
|
||||||
|
When `tor_nodes` is absent, listeners use their configured first hop as before.
|
||||||
|
When present, `tor_nodes` overrides the first hop everywhere.
|
||||||
|
|
||||||
|
If `pool_size > 0`, pre-warmed connection pools are created for all nodes
|
||||||
|
automatically.
|
||||||
|
|
||||||
|
### API
|
||||||
|
|
||||||
|
`tor_nodes` appears in both `/config` and `/status` responses:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
curl -s http://127.0.0.1:1090/config | jq '.tor_nodes'
|
||||||
|
curl -s http://127.0.0.1:1090/status | jq '.tor_nodes'
|
||||||
|
```
|
||||||
|
|
||||||
|
## Named Proxy Pools
|
||||||
|
|
||||||
|
Define multiple proxy pools with different source filters. Each listener can
|
||||||
|
reference a specific pool by name via the `pool:` key.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
proxy_pools:
|
||||||
|
clean:
|
||||||
|
sources:
|
||||||
|
- url: http://10.200.1.250:8081/proxies/all
|
||||||
|
mitm: false
|
||||||
|
state_file: /data/pool-clean.json
|
||||||
|
refresh: 300
|
||||||
|
test_interval: 120
|
||||||
|
test_timeout: 8
|
||||||
|
max_fails: 3
|
||||||
|
mitm:
|
||||||
|
sources:
|
||||||
|
- url: http://10.200.1.250:8081/proxies/all
|
||||||
|
mitm: true
|
||||||
|
state_file: /data/pool-mitm.json
|
||||||
|
refresh: 300
|
||||||
|
test_interval: 120
|
||||||
|
test_timeout: 8
|
||||||
|
max_fails: 3
|
||||||
|
```
|
||||||
|
|
||||||
|
Each pool has independent health testing, state persistence, and source
|
||||||
|
refresh cycles. The `mitm` source filter adds `?mitm=0` or `?mitm=1` to
|
||||||
|
API requests.
|
||||||
|
|
||||||
|
### Pool protocol filter
|
||||||
|
|
||||||
|
Use `allowed_protos` to restrict a pool to specific proxy protocols.
|
||||||
|
Proxies not matching the list are silently dropped during merge, regardless
|
||||||
|
of source type (API or file).
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
proxy_pools:
|
||||||
|
socks_only:
|
||||||
|
allowed_protos: [socks5] # reject http/socks4 proxies
|
||||||
|
sources:
|
||||||
|
- url: http://api:8081/proxies/all
|
||||||
|
any_proto:
|
||||||
|
sources:
|
||||||
|
- url: http://api:8081/proxies/all # no filter, accept all
|
||||||
|
```
|
||||||
|
|
||||||
|
Valid values: `socks5`, `socks4`, `http`. Visible in `/config` API response
|
||||||
|
when set.
|
||||||
|
|
||||||
|
### Backward compatibility
|
||||||
|
|
||||||
|
The singular `proxy_pool:` key still works -- it registers as pool `"default"`.
|
||||||
|
If both `proxy_pool:` and `proxy_pools:` are present, `proxy_pools:` wins;
|
||||||
|
the singular is registered as `"default"` only when not already defined.
|
||||||
|
|
||||||
|
## Multi-Listener Mode
|
||||||
|
|
||||||
|
Run multiple listeners on different ports, each with a different number
|
||||||
|
of proxy hops and pool assignment. Config-file only (not available via CLI).
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
listeners:
|
||||||
|
- listen: 0.0.0.0:1080
|
||||||
|
pool: clean
|
||||||
|
chain:
|
||||||
|
- socks5://10.200.1.13:9050
|
||||||
|
- pool # Tor + 2 clean proxies
|
||||||
|
- pool
|
||||||
|
|
||||||
|
- listen: 0.0.0.0:1081
|
||||||
|
pool: clean
|
||||||
|
chain:
|
||||||
|
- socks5://10.200.1.13:9050
|
||||||
|
- pool # Tor + 1 clean proxy
|
||||||
|
|
||||||
|
- listen: 0.0.0.0:1082
|
||||||
|
chain:
|
||||||
|
- socks5://10.200.1.13:9050 # Tor only (no pool)
|
||||||
|
|
||||||
|
- listen: 0.0.0.0:1083
|
||||||
|
pool: mitm
|
||||||
|
chain:
|
||||||
|
- socks5://10.200.1.13:9050
|
||||||
|
- pool # Tor + 2 MITM proxies
|
||||||
|
- pool
|
||||||
|
```
|
||||||
|
|
||||||
|
### Per-hop pool references
|
||||||
|
|
||||||
|
Use `pool:name` to draw from a specific named pool at that hop position.
|
||||||
|
Bare `pool` uses the listener's `pool:` default. This lets a single listener
|
||||||
|
mix pools in one chain.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
listeners:
|
||||||
|
- listen: 0.0.0.0:1080
|
||||||
|
pool: clean # default for bare "pool"
|
||||||
|
chain:
|
||||||
|
- socks5://10.200.1.13:9050
|
||||||
|
- pool:clean # explicit: from clean pool
|
||||||
|
- pool:mitm # explicit: from mitm pool
|
||||||
|
|
||||||
|
- listen: 0.0.0.0:1081
|
||||||
|
pool: clean
|
||||||
|
chain:
|
||||||
|
- socks5://10.200.1.13:9050
|
||||||
|
- pool # bare: uses default "clean"
|
||||||
|
- pool:mitm # explicit: from mitm pool
|
||||||
|
```
|
||||||
|
|
||||||
|
| Syntax | Resolves to |
|
||||||
|
|--------|-------------|
|
||||||
|
| `pool` | Listener's `pool:` value, or `"default"` if unset |
|
||||||
|
| `pool:name` | Named pool `name` (case-sensitive) |
|
||||||
|
| `pool:` | Same as bare `pool` (empty name = default) |
|
||||||
|
| `Pool:name` | Prefix is case-insensitive; name is case-sensitive |
|
||||||
|
| `[pool:a, pool:b]` | Random choice from candidates `a` or `b` per connection |
|
||||||
|
|
||||||
|
The `pool` keyword in a chain means "append a random alive proxy from the
|
||||||
|
assigned pool". Multiple `pool` entries = multiple pool hops (deeper chaining).
|
||||||
|
|
||||||
|
### Multi-candidate pool hops
|
||||||
|
|
||||||
|
Use a YAML list to randomly pick from a set of candidate pools at each hop.
|
||||||
|
On each connection, one candidate is chosen at random per hop (independently).
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
listeners:
|
||||||
|
- listen: 0.0.0.0:1080
|
||||||
|
chain:
|
||||||
|
- socks5://10.200.1.13:9050
|
||||||
|
- [pool:clean, pool:mitm] # hop 1: random choice
|
||||||
|
- [pool:clean, pool:mitm] # hop 2: random choice
|
||||||
|
```
|
||||||
|
|
||||||
|
Single-element pool references (`pool`, `pool:name`) and multi-candidate
|
||||||
|
lists can be mixed freely in the same chain. All existing syntax is unchanged.
|
||||||
|
|
||||||
|
When `pool:` is omitted on a listener with pool hops, it defaults to
|
||||||
|
`"default"`. A listener referencing an unknown pool name causes a fatal
|
||||||
|
error at startup. Listeners without pool hops ignore the `pool:` key.
|
||||||
|
|
||||||
|
| Resource | Scope | Notes |
|
||||||
|
|----------|-------|-------|
|
||||||
|
| ProxyPool | per name | Each named pool is independent |
|
||||||
|
| TorController | shared | One Tor instance |
|
||||||
|
| Metrics | shared | Aggregate stats across listeners |
|
||||||
|
| Semaphore | shared | Global `max_connections` cap |
|
||||||
|
| API server | shared | One control endpoint |
|
||||||
|
| FirstHopPool | per unique first hop | Listeners with same first hop share it |
|
||||||
|
| Chain + pool_hops | per listener | Each listener has its own chain depth |
|
||||||
|
|
||||||
|
## Listener Authentication
|
||||||
|
|
||||||
|
Per-listener SOCKS5 username/password authentication (RFC 1929). When `auth`
|
||||||
|
is configured on a listener, clients must authenticate before connecting.
|
||||||
|
Listeners without `auth` continue to accept unauthenticated connections.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
listeners:
|
||||||
|
- listen: 0.0.0.0:1080
|
||||||
|
auth:
|
||||||
|
alice: s3cret
|
||||||
|
bob: hunter2
|
||||||
|
chain:
|
||||||
|
- socks5://127.0.0.1:9050
|
||||||
|
- pool
|
||||||
|
```
|
||||||
|
|
||||||
|
### Testing with curl
|
||||||
|
|
||||||
|
```bash
|
||||||
|
curl --proxy socks5h://alice:s3cret@127.0.0.1:1080 https://example.com
|
||||||
|
```
|
||||||
|
|
||||||
|
### Behavior
|
||||||
|
|
||||||
|
| Client offers | Listener has `auth` | Result |
|
||||||
|
|---------------|---------------------|--------|
|
||||||
|
| `0x00` (no-auth) | yes | Rejected (`0xFF`) |
|
||||||
|
| `0x02` (user/pass) | yes | Subnegotiation, then accept/reject |
|
||||||
|
| `0x00` (no-auth) | no | Accepted (current behavior) |
|
||||||
|
| `0x02` (user/pass) | no | Rejected (`0xFF`) |
|
||||||
|
|
||||||
|
Authentication failures are logged and counted in the `auth_fail` metric.
|
||||||
|
The `/status` API endpoint includes `"auth": true` on authenticated listeners.
|
||||||
|
The `/config` endpoint shows `"auth_users": N` (passwords are never exposed).
|
||||||
|
|
||||||
|
### Mixed listeners
|
||||||
|
|
||||||
|
Different listeners can have different auth settings:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
listeners:
|
||||||
|
- listen: 0.0.0.0:1080 # public, no auth
|
||||||
|
chain:
|
||||||
|
- socks5://127.0.0.1:9050
|
||||||
|
- listen: 0.0.0.0:1081 # authenticated
|
||||||
|
auth:
|
||||||
|
alice: s3cret
|
||||||
|
chain:
|
||||||
|
- socks5://127.0.0.1:9050
|
||||||
|
- pool
|
||||||
|
```
|
||||||
|
|
||||||
|
## Bypass Rules
|
||||||
|
|
||||||
|
Per-listener rules to skip the chain for specific destinations. When a target
|
||||||
|
matches a bypass rule, s5p connects directly (no chain, no pool hops).
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
listeners:
|
||||||
|
- listen: 0.0.0.0:1080
|
||||||
|
bypass:
|
||||||
|
- 127.0.0.0/8 # CIDR: loopback
|
||||||
|
- 10.0.0.0/8 # CIDR: RFC 1918
|
||||||
|
- 192.168.0.0/16 # CIDR: RFC 1918
|
||||||
|
- fc00::/7 # CIDR: IPv6 ULA
|
||||||
|
- localhost # exact hostname
|
||||||
|
- .local # domain suffix (matches *.local and local)
|
||||||
|
chain:
|
||||||
|
- socks5://127.0.0.1:9050
|
||||||
|
- pool
|
||||||
|
```
|
||||||
|
|
||||||
|
### Rule syntax
|
||||||
|
|
||||||
|
| Pattern | Type | Matches |
|
||||||
|
|---------|------|---------|
|
||||||
|
| `10.0.0.0/8` | CIDR | Any IP in the network |
|
||||||
|
| `127.0.0.1` | Exact IP | That IP only |
|
||||||
|
| `localhost` | Exact hostname | String-equal match |
|
||||||
|
| `.local` | Domain suffix | `*.local` and `local` itself |
|
||||||
|
|
||||||
|
CIDR rules only match IP addresses, not hostnames. Domain suffix rules only
|
||||||
|
match hostnames, not IPs. Exact rules match both (string compare for hostnames,
|
||||||
|
parsed compare for IPs).
|
||||||
|
|
||||||
|
When bypass is active, retries are disabled (direct connections are not retried).
|
||||||
|
|
||||||
|
### Backward compatibility
|
||||||
|
|
||||||
|
When no `listeners:` key is present, the old `listen`/`chain` format creates
|
||||||
|
a single listener. If `proxy_pool` is configured without explicit `pool` in
|
||||||
|
the chain, legacy behavior is preserved (1 pool hop auto-appended).
|
||||||
|
|
||||||
|
Settings that require a restart: `listeners`, `listen`, `chain`, `pool_size`,
|
||||||
|
`pool_max_idle`, `api_listen`.
|
||||||
|
|
||||||
## Proxy URL Format
|
## Proxy URL Format
|
||||||
|
|
||||||
```
|
```
|
||||||
@@ -104,14 +397,18 @@ proxy_pool:
|
|||||||
proto: socks5 # optional: filter by protocol
|
proto: socks5 # optional: filter by protocol
|
||||||
country: US # optional: filter by country
|
country: US # optional: filter by country
|
||||||
limit: 1000 # max proxies to fetch from API
|
limit: 1000 # max proxies to fetch from API
|
||||||
|
mitm: false # optional: filter by MITM status (true/false)
|
||||||
- file: /etc/s5p/proxies.txt # text file, one proxy URL per line
|
- file: /etc/s5p/proxies.txt # text file, one proxy URL per line
|
||||||
refresh: 300 # re-fetch sources every 300 seconds
|
refresh: 300 # re-fetch sources every 300 seconds
|
||||||
test_interval: 120 # health test cycle every 120 seconds
|
test_interval: 120 # health test cycle every 120 seconds
|
||||||
test_url: http://httpbin.org/ip # URL for health checks
|
test_targets: # TLS handshake targets (round-robin)
|
||||||
|
- www.google.com
|
||||||
|
- www.cloudflare.com
|
||||||
|
- www.amazon.com
|
||||||
test_timeout: 15 # per-test timeout (seconds)
|
test_timeout: 15 # per-test timeout (seconds)
|
||||||
test_concurrency: 5 # parallel health tests
|
test_concurrency: 25 # max parallel tests (auto-scales to ~10% of pool)
|
||||||
max_fails: 3 # evict after N consecutive failures
|
max_fails: 3 # evict after N consecutive failures
|
||||||
state_file: "" # empty = ~/.cache/s5p/pool.json
|
state_file: "" # empty = ~/.cache/s5p/pool[-name].json
|
||||||
report_url: "" # POST dead proxies here (optional)
|
report_url: "" # POST dead proxies here (optional)
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -122,6 +419,17 @@ proxy_pool:
|
|||||||
| HTTP API | `url` | JSON: `{"proxies": [{"proto": "socks5", "proxy": "host:port"}, ...]}` |
|
| HTTP API | `url` | JSON: `{"proxies": [{"proto": "socks5", "proxy": "host:port"}, ...]}` |
|
||||||
| Text file | `file` | One proxy URL per line, `#` comments, blank lines ignored |
|
| Text file | `file` | One proxy URL per line, `#` comments, blank lines ignored |
|
||||||
|
|
||||||
|
### Source filters
|
||||||
|
|
||||||
|
| Filter | Values | Effect |
|
||||||
|
|--------|--------|--------|
|
||||||
|
| `proto` | `socks5`, `socks4`, `http` | Adds `?proto=...` to API URL |
|
||||||
|
| `country` | ISO 3166-1 alpha-2 | Adds `?country=...` to API URL |
|
||||||
|
| `limit` | integer | Adds `?limit=...` to API URL |
|
||||||
|
| `mitm` | `true` / `false` | Adds `?mitm=1` / `?mitm=0` to API URL |
|
||||||
|
|
||||||
|
The `mitm` filter is silently ignored for file sources.
|
||||||
|
|
||||||
### Proxy file format
|
### Proxy file format
|
||||||
|
|
||||||
```
|
```
|
||||||
@@ -134,8 +442,13 @@ http://proxy.example.com:8080
|
|||||||
### Health testing
|
### Health testing
|
||||||
|
|
||||||
Each cycle tests all proxies through the full chain (static chain + proxy)
|
Each cycle tests all proxies through the full chain (static chain + proxy)
|
||||||
by sending an HTTP GET to `test_url`. Proxies are marked alive on `200` response.
|
by performing a TLS handshake against one of the `test_targets` (rotated
|
||||||
After `max_fails` consecutive failures, a proxy is evicted.
|
round-robin). A successful handshake marks the proxy alive. After `max_fails`
|
||||||
|
consecutive failures, a proxy is evicted.
|
||||||
|
|
||||||
|
Concurrency auto-scales to ~10% of the proxy count, capped by
|
||||||
|
`test_concurrency` (default 25, minimum 3). For example, a pool of 73 proxies
|
||||||
|
tests 7 at a time rather than saturating the upstream Tor node.
|
||||||
|
|
||||||
Before each health test cycle, the static chain is tested without any pool
|
Before each health test cycle, the static chain is tested without any pool
|
||||||
proxy. If the chain itself is unreachable (e.g., Tor is down), proxy tests
|
proxy. If the chain itself is unreachable (e.g., Tor is down), proxy tests
|
||||||
@@ -230,56 +543,344 @@ api_listen: 127.0.0.1:1081
|
|||||||
s5p --api 127.0.0.1:1081 -c config/s5p.yaml
|
s5p --api 127.0.0.1:1081 -c config/s5p.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
### Read endpoints
|
Responses are `application/json` unless noted otherwise. Errors return
|
||||||
|
`{"error": "message"}` with appropriate status code (400, 404, 405, 500).
|
||||||
|
|
||||||
| Method | Path | Description |
|
Settings that require a restart: `listen`, `chain`, `pool_size`, `pool_max_idle`, `api_listen`.
|
||||||
|--------|------|-------------|
|
|
||||||
| `GET` | `/status` | Combined summary: uptime, metrics, pool stats, chain |
|
|
||||||
| `GET` | `/metrics` | Full metrics counters (connections, bytes, uptime) |
|
|
||||||
| `GET` | `/pool` | All proxies with per-entry state |
|
|
||||||
| `GET` | `/pool/alive` | Alive proxies only |
|
|
||||||
| `GET` | `/config` | Current runtime config (sanitized) |
|
|
||||||
|
|
||||||
### Write endpoints
|
### API Reference
|
||||||
|
|
||||||
| Method | Path | Description |
|
#### `GET /status`
|
||||||
|--------|------|-------------|
|
|
||||||
| `POST` | `/reload` | Re-read config file (replaces SIGHUP) |
|
|
||||||
| `POST` | `/pool/test` | Trigger immediate health test cycle |
|
|
||||||
| `POST` | `/pool/refresh` | Trigger immediate source re-fetch |
|
|
||||||
|
|
||||||
All responses are `application/json`. Errors return `{"error": "message"}` with
|
Combined runtime summary: uptime, metrics, pool stats, listeners.
|
||||||
appropriate status code (400, 404, 405, 500).
|
|
||||||
|
|
||||||
### Examples
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Runtime status
|
|
||||||
curl -s http://127.0.0.1:1081/status | jq .
|
curl -s http://127.0.0.1:1081/status | jq .
|
||||||
|
```
|
||||||
|
|
||||||
# Full metrics
|
```json
|
||||||
curl -s http://127.0.0.1:1081/metrics | jq .
|
{
|
||||||
|
"uptime": 3661.2,
|
||||||
|
"connections": 1842,
|
||||||
|
"success": 1790,
|
||||||
|
"failed": 52,
|
||||||
|
"active": 3,
|
||||||
|
"bytes_in": 52428800,
|
||||||
|
"bytes_out": 1073741824,
|
||||||
|
"rate": 4.72,
|
||||||
|
"latency": {"count": 1000, "min": 45.2, "max": 2841.7, "avg": 312.4, "p50": 198.3, "p95": 890.1, "p99": 1523.6},
|
||||||
|
"pool": {"alive": 42, "total": 65},
|
||||||
|
"pools": {
|
||||||
|
"clean": {"alive": 30, "total": 45},
|
||||||
|
"mitm": {"alive": 12, "total": 20}
|
||||||
|
},
|
||||||
|
"tor_nodes": ["socks5://10.200.1.1:9050", "socks5://10.200.1.254:9050"],
|
||||||
|
"listeners": [
|
||||||
|
{
|
||||||
|
"listen": "0.0.0.0:1080",
|
||||||
|
"chain": ["socks5://10.200.1.13:9050"],
|
||||||
|
"pool_hops": 2,
|
||||||
|
"pool": "clean",
|
||||||
|
"auth": true,
|
||||||
|
"latency": {"count": 500, "p50": 1800.2, "p95": 8200.1, "p99": 10500.3, "...": "..."}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
# Pool state (all proxies)
|
| Field | Type | Description |
|
||||||
|
|-------|------|-------------|
|
||||||
|
| `uptime` | float | Seconds since server start |
|
||||||
|
| `connections` | int | Total incoming connections |
|
||||||
|
| `success` | int | Successfully relayed |
|
||||||
|
| `failed` | int | All retries exhausted |
|
||||||
|
| `active` | int | Currently relaying |
|
||||||
|
| `bytes_in` | int | Bytes client -> remote |
|
||||||
|
| `bytes_out` | int | Bytes remote -> client |
|
||||||
|
| `rate` | float | Connections/sec (rolling window) |
|
||||||
|
| `latency` | object/null | Aggregate chain setup latency (ms), null if no samples |
|
||||||
|
| `pool` | object | Aggregate pool counts (present when pool active) |
|
||||||
|
| `pools` | object | Per-pool counts (present when multiple pools) |
|
||||||
|
| `tor_nodes` | array | Tor node URLs (present when configured) |
|
||||||
|
| `listeners` | array | Per-listener state with chain, pool, latency |
|
||||||
|
| `listeners[].auth` | bool | Present and `true` when auth is enabled |
|
||||||
|
|
||||||
|
#### `GET /metrics`
|
||||||
|
|
||||||
|
Prometheus/OpenMetrics exposition format. Content-Type:
|
||||||
|
`application/openmetrics-text; version=1.0.0; charset=utf-8`.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
curl -s http://127.0.0.1:1081/metrics
|
||||||
|
```
|
||||||
|
|
||||||
|
```
|
||||||
|
# HELP s5p_connections Total connection attempts.
|
||||||
|
# TYPE s5p_connections counter
|
||||||
|
s5p_connections_total 1842
|
||||||
|
# HELP s5p_connections_success Connections successfully relayed.
|
||||||
|
# TYPE s5p_connections_success counter
|
||||||
|
s5p_connections_success_total 1790
|
||||||
|
# HELP s5p_connections_failed Connection failures.
|
||||||
|
# TYPE s5p_connections_failed counter
|
||||||
|
s5p_connections_failed_total 52
|
||||||
|
# HELP s5p_retries Connection retry attempts.
|
||||||
|
# TYPE s5p_retries counter
|
||||||
|
s5p_retries_total 67
|
||||||
|
# HELP s5p_auth_failures SOCKS5 authentication failures.
|
||||||
|
# TYPE s5p_auth_failures counter
|
||||||
|
s5p_auth_failures_total 0
|
||||||
|
# HELP s5p_bytes_in Bytes received from clients.
|
||||||
|
# TYPE s5p_bytes_in counter
|
||||||
|
s5p_bytes_in_total 52428800
|
||||||
|
# HELP s5p_bytes_out Bytes sent to clients.
|
||||||
|
# TYPE s5p_bytes_out counter
|
||||||
|
s5p_bytes_out_total 1073741824
|
||||||
|
# HELP s5p_active_connections Currently open connections.
|
||||||
|
# TYPE s5p_active_connections gauge
|
||||||
|
s5p_active_connections 3
|
||||||
|
# HELP s5p_uptime_seconds Seconds since server start.
|
||||||
|
# TYPE s5p_uptime_seconds gauge
|
||||||
|
s5p_uptime_seconds 3661.2
|
||||||
|
# HELP s5p_connection_rate Connections per second (rolling window).
|
||||||
|
# TYPE s5p_connection_rate gauge
|
||||||
|
s5p_connection_rate 4.72
|
||||||
|
# HELP s5p_pool_proxies_alive Alive proxies in pool.
|
||||||
|
# TYPE s5p_pool_proxies_alive gauge
|
||||||
|
s5p_pool_proxies_alive{pool="clean"} 30
|
||||||
|
s5p_pool_proxies_alive{pool="mitm"} 12
|
||||||
|
# HELP s5p_pool_proxies_total Total proxies in pool.
|
||||||
|
# TYPE s5p_pool_proxies_total gauge
|
||||||
|
s5p_pool_proxies_total{pool="clean"} 45
|
||||||
|
s5p_pool_proxies_total{pool="mitm"} 20
|
||||||
|
# HELP s5p_chain_latency_seconds Chain build latency in seconds.
|
||||||
|
# TYPE s5p_chain_latency_seconds summary
|
||||||
|
s5p_chain_latency_seconds{quantile="0.5"} 0.198300
|
||||||
|
s5p_chain_latency_seconds{quantile="0.95"} 0.890100
|
||||||
|
s5p_chain_latency_seconds{quantile="0.99"} 1.523600
|
||||||
|
s5p_chain_latency_seconds_count 1000
|
||||||
|
s5p_chain_latency_seconds_sum 312.400000
|
||||||
|
# EOF
|
||||||
|
```
|
||||||
|
|
||||||
|
**Metrics exposed:**
|
||||||
|
|
||||||
|
| Metric | Type | Labels | Description |
|
||||||
|
|--------|------|--------|-------------|
|
||||||
|
| `s5p_connections` | counter | -- | Total connection attempts |
|
||||||
|
| `s5p_connections_success` | counter | -- | Successfully relayed |
|
||||||
|
| `s5p_connections_failed` | counter | -- | Connection failures |
|
||||||
|
| `s5p_retries` | counter | -- | Retry attempts |
|
||||||
|
| `s5p_auth_failures` | counter | -- | SOCKS5 auth failures |
|
||||||
|
| `s5p_bytes_in` | counter | -- | Bytes received from clients |
|
||||||
|
| `s5p_bytes_out` | counter | -- | Bytes sent to clients |
|
||||||
|
| `s5p_active_connections` | gauge | -- | Currently open connections |
|
||||||
|
| `s5p_uptime_seconds` | gauge | -- | Seconds since server start |
|
||||||
|
| `s5p_connection_rate` | gauge | -- | Connections/sec (rolling window) |
|
||||||
|
| `s5p_pool_proxies_alive` | gauge | `pool` | Alive proxies per pool |
|
||||||
|
| `s5p_pool_proxies_total` | gauge | `pool` | Total proxies per pool |
|
||||||
|
| `s5p_chain_latency_seconds` | summary | `quantile` | Chain build latency (p50/p95/p99) |
|
||||||
|
| `s5p_listener_chain_latency_seconds` | summary | `listener`, `quantile` | Per-listener chain latency |
|
||||||
|
|
||||||
|
**Prometheus scrape config:**
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
scrape_configs:
|
||||||
|
- job_name: s5p
|
||||||
|
metrics_path: /metrics
|
||||||
|
static_configs:
|
||||||
|
- targets: ["127.0.0.1:1081"]
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `GET /pool`
|
||||||
|
|
||||||
|
All proxies with per-entry state.
|
||||||
|
|
||||||
|
```bash
|
||||||
curl -s http://127.0.0.1:1081/pool | jq .
|
curl -s http://127.0.0.1:1081/pool | jq .
|
||||||
|
```
|
||||||
|
|
||||||
# Alive proxies only
|
```json
|
||||||
|
{
|
||||||
|
"alive": 42,
|
||||||
|
"total": 65,
|
||||||
|
"pools": {
|
||||||
|
"clean": {"alive": 30, "total": 45},
|
||||||
|
"mitm": {"alive": 12, "total": 20}
|
||||||
|
},
|
||||||
|
"proxies": {
|
||||||
|
"socks5://1.2.3.4:1080": {
|
||||||
|
"alive": true,
|
||||||
|
"fails": 0,
|
||||||
|
"tests": 12,
|
||||||
|
"last_ok": 1708012345.6,
|
||||||
|
"last_test": 1708012345.6,
|
||||||
|
"last_seen": 1708012300.0,
|
||||||
|
"pool": "clean"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
| Field | Type | Description |
|
||||||
|
|-------|------|-------------|
|
||||||
|
| `alive` | int | Total alive proxies across all pools |
|
||||||
|
| `total` | int | Total proxies across all pools |
|
||||||
|
| `pools` | object | Per-pool counts (present when multiple pools) |
|
||||||
|
| `proxies` | object | Keyed by proxy URL |
|
||||||
|
| `proxies[].alive` | bool | Currently passing health tests |
|
||||||
|
| `proxies[].fails` | int | Consecutive failures |
|
||||||
|
| `proxies[].tests` | int | Total health tests performed |
|
||||||
|
| `proxies[].last_ok` | float | Unix timestamp of last successful test |
|
||||||
|
| `proxies[].last_test` | float | Unix timestamp of last test (pass or fail) |
|
||||||
|
| `proxies[].last_seen` | float | Unix timestamp of last source refresh that included this proxy |
|
||||||
|
| `proxies[].pool` | string | Pool name (present when multiple pools) |
|
||||||
|
|
||||||
|
#### `GET /pool/alive`
|
||||||
|
|
||||||
|
Same schema as `/pool`, filtered to alive proxies only.
|
||||||
|
|
||||||
|
```bash
|
||||||
curl -s http://127.0.0.1:1081/pool/alive | jq '.proxies | length'
|
curl -s http://127.0.0.1:1081/pool/alive | jq '.proxies | length'
|
||||||
|
```
|
||||||
|
|
||||||
# Current config
|
#### `GET /config`
|
||||||
|
|
||||||
|
Current runtime config (sanitized -- passwords are never exposed).
|
||||||
|
|
||||||
|
```bash
|
||||||
curl -s http://127.0.0.1:1081/config | jq .
|
curl -s http://127.0.0.1:1081/config | jq .
|
||||||
|
```
|
||||||
|
|
||||||
# Reload config (like SIGHUP)
|
```json
|
||||||
|
{
|
||||||
|
"timeout": 10,
|
||||||
|
"retries": 3,
|
||||||
|
"log_level": "info",
|
||||||
|
"max_connections": 256,
|
||||||
|
"pool_size": 0,
|
||||||
|
"listeners": [
|
||||||
|
{
|
||||||
|
"listen": "0.0.0.0:1080",
|
||||||
|
"chain": ["socks5://10.200.1.13:9050"],
|
||||||
|
"pool_hops": 2,
|
||||||
|
"pool": "clean",
|
||||||
|
"auth_users": 2
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"tor_nodes": ["socks5://10.200.1.1:9050"],
|
||||||
|
"proxy_pools": {
|
||||||
|
"clean": {
|
||||||
|
"sources": [{"url": "http://10.200.1.250:8081/proxies/all", "mitm": false}],
|
||||||
|
"refresh": 300,
|
||||||
|
"test_interval": 120,
|
||||||
|
"max_fails": 3
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
| Field | Type | Description |
|
||||||
|
|-------|------|-------------|
|
||||||
|
| `timeout` | float | Per-hop connection timeout (seconds) |
|
||||||
|
| `retries` | int | Max connection attempts per request |
|
||||||
|
| `log_level` | string | Current log level |
|
||||||
|
| `max_connections` | int | Concurrent connection cap |
|
||||||
|
| `pool_size` | int | Pre-warmed TCP connections to first hop |
|
||||||
|
| `listeners` | array | Listener configs |
|
||||||
|
| `listeners[].auth_users` | int | Number of auth users (present when auth enabled) |
|
||||||
|
| `tor_nodes` | array | Tor node URLs (present when configured) |
|
||||||
|
| `proxy_pools` | object | Pool configs (present when pools configured) |
|
||||||
|
|
||||||
|
#### `GET /tor`
|
||||||
|
|
||||||
|
Tor controller status.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
curl -s http://127.0.0.1:1081/tor | jq .
|
||||||
|
```
|
||||||
|
|
||||||
|
```json
|
||||||
|
{"enabled": true, "connected": true, "last_newnym": 45.2, "newnym_interval": 60}
|
||||||
|
```
|
||||||
|
|
||||||
|
| Field | Type | Description |
|
||||||
|
|-------|------|-------------|
|
||||||
|
| `enabled` | bool | Whether Tor control is configured |
|
||||||
|
| `connected` | bool | Whether connected to Tor control port |
|
||||||
|
| `last_newnym` | float/null | Seconds since last NEWNYM signal |
|
||||||
|
| `newnym_interval` | int | Auto-rotation interval (0 = manual) |
|
||||||
|
|
||||||
|
Returns `{"enabled": false}` when Tor control is not configured.
|
||||||
|
|
||||||
|
#### `POST /reload`
|
||||||
|
|
||||||
|
Re-read config file (equivalent to SIGHUP).
|
||||||
|
|
||||||
|
```bash
|
||||||
curl -s -X POST http://127.0.0.1:1081/reload | jq .
|
curl -s -X POST http://127.0.0.1:1081/reload | jq .
|
||||||
|
```
|
||||||
|
|
||||||
# Trigger health tests now
|
```json
|
||||||
|
{"ok": true}
|
||||||
|
```
|
||||||
|
|
||||||
|
Returns `{"error": "..."}` (500) on failure.
|
||||||
|
|
||||||
|
#### `POST /pool/test`
|
||||||
|
|
||||||
|
Trigger immediate health test cycle for all pools.
|
||||||
|
|
||||||
|
```bash
|
||||||
curl -s -X POST http://127.0.0.1:1081/pool/test | jq .
|
curl -s -X POST http://127.0.0.1:1081/pool/test | jq .
|
||||||
|
```
|
||||||
|
|
||||||
# Re-fetch proxy sources now
|
```json
|
||||||
|
{"ok": true}
|
||||||
|
```
|
||||||
|
|
||||||
|
Returns `{"error": "no proxy pool configured"}` (400) when no pool is active.
|
||||||
|
|
||||||
|
#### `POST /pool/refresh`
|
||||||
|
|
||||||
|
Trigger immediate source re-fetch for all pools.
|
||||||
|
|
||||||
|
```bash
|
||||||
curl -s -X POST http://127.0.0.1:1081/pool/refresh | jq .
|
curl -s -X POST http://127.0.0.1:1081/pool/refresh | jq .
|
||||||
```
|
```
|
||||||
|
|
||||||
Settings that require a restart: `listen`, `chain`, `pool_size`, `pool_max_idle`, `api_listen`.
|
```json
|
||||||
|
{"ok": true}
|
||||||
|
```
|
||||||
|
|
||||||
|
Returns `{"error": "no proxy pool configured"}` (400) when no pool is active.
|
||||||
|
|
||||||
|
#### `POST /tor/newnym`
|
||||||
|
|
||||||
|
Request new Tor circuit (NEWNYM signal).
|
||||||
|
|
||||||
|
```bash
|
||||||
|
curl -s -X POST http://127.0.0.1:1081/tor/newnym | jq .
|
||||||
|
```
|
||||||
|
|
||||||
|
```json
|
||||||
|
{"ok": true}
|
||||||
|
```
|
||||||
|
|
||||||
|
Returns `{"ok": false, "reason": "rate-limited or not connected"}` when the
|
||||||
|
signal cannot be sent. Returns `{"error": "tor control not configured"}` (400)
|
||||||
|
when Tor control is not configured.
|
||||||
|
|
||||||
|
#### Error responses
|
||||||
|
|
||||||
|
All endpoints return errors as JSON with appropriate HTTP status codes:
|
||||||
|
|
||||||
|
| Status | Meaning | Example |
|
||||||
|
|--------|---------|---------|
|
||||||
|
| 400 | Bad request | `{"error": "no proxy pool configured"}` |
|
||||||
|
| 404 | Unknown path | `{"error": "not found"}` |
|
||||||
|
| 405 | Wrong method | `{"error": "use GET for /status"}` |
|
||||||
|
| 500 | Server error | `{"error": "reload not available"}` |
|
||||||
|
|
||||||
## Tor Control Port
|
## Tor Control Port
|
||||||
|
|
||||||
@@ -358,6 +959,31 @@ retries: 5 # try up to 5 different proxies per connection
|
|||||||
s5p -r 5 -C socks5://127.0.0.1:9050 -S http://api:8081/proxies
|
s5p -r 5 -C socks5://127.0.0.1:9050 -S http://api:8081/proxies
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Per-listener retry override
|
||||||
|
|
||||||
|
Each listener can override the global `retries` setting. Set `retries` on
|
||||||
|
a listener to use a different retry count for that port. A value of 0 (or
|
||||||
|
omitting the key) inherits the global setting.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
retries: 3 # global default
|
||||||
|
|
||||||
|
listeners:
|
||||||
|
- listen: 0.0.0.0:1080
|
||||||
|
retries: 5 # deep chain: more retries
|
||||||
|
chain:
|
||||||
|
- socks5://127.0.0.1:9050
|
||||||
|
- pool
|
||||||
|
- pool
|
||||||
|
- listen: 0.0.0.0:1082
|
||||||
|
chain:
|
||||||
|
- socks5://127.0.0.1:9050 # Tor only: uses global retries=3
|
||||||
|
```
|
||||||
|
|
||||||
|
The effective retry count for a listener is `listener.retries` if set,
|
||||||
|
otherwise `config.retries`. Visible in `/config` and `/status` API responses
|
||||||
|
when overridden.
|
||||||
|
|
||||||
## Hot Reload
|
## Hot Reload
|
||||||
|
|
||||||
Send `SIGHUP` to reload the config file without restarting:
|
Send `SIGHUP` to reload the config file without restarting:
|
||||||
@@ -378,7 +1004,7 @@ Settings reloaded on SIGHUP:
|
|||||||
| `max_connections` | Concurrent connection limit |
|
| `max_connections` | Concurrent connection limit |
|
||||||
| `proxy_pool.*` | Sources, intervals, thresholds |
|
| `proxy_pool.*` | Sources, intervals, thresholds |
|
||||||
|
|
||||||
Settings that require a restart: `listen`, `chain`, `pool_size`, `pool_max_idle`, `api_listen`.
|
Settings that require a restart: `listeners`, `listen`, `chain`, `pool_size`, `pool_max_idle`, `api_listen`.
|
||||||
|
|
||||||
Requires `-c` / `--config` to know which file to re-read. Without a
|
Requires `-c` / `--config` to know which file to re-read. Without a
|
||||||
config file, SIGHUP is ignored with a warning.
|
config file, SIGHUP is ignored with a warning.
|
||||||
@@ -389,7 +1015,7 @@ s5p tracks connection metrics and logs a summary every 60 seconds and on
|
|||||||
shutdown:
|
shutdown:
|
||||||
|
|
||||||
```
|
```
|
||||||
metrics: conn=142 ok=98 fail=44 retries=88 active=3 in=1.2M out=4.5M up=0h05m12s pool=42/65
|
metrics: conn=1842 ok=1790 fail=52 retries=67 active=3 in=50.0M out=1.0G rate=4.72/s p50=198.3ms p95=890.1ms up=1h01m01s pool=42/65
|
||||||
```
|
```
|
||||||
|
|
||||||
| Counter | Meaning |
|
| Counter | Meaning |
|
||||||
@@ -401,9 +1027,40 @@ metrics: conn=142 ok=98 fail=44 retries=88 active=3 in=1.2M out=4.5M up=0h05m12s
|
|||||||
| `active` | Currently relaying |
|
| `active` | Currently relaying |
|
||||||
| `in` | Bytes client -> remote |
|
| `in` | Bytes client -> remote |
|
||||||
| `out` | Bytes remote -> client |
|
| `out` | Bytes remote -> client |
|
||||||
|
| `rate` | Connection rate (events/sec, rolling window) |
|
||||||
|
| `p50` | Median chain setup latency in ms |
|
||||||
|
| `p95` | 95th percentile chain setup latency in ms |
|
||||||
| `up` | Server uptime |
|
| `up` | Server uptime |
|
||||||
| `pool` | Alive/total proxies (only when pool is active) |
|
| `pool` | Alive/total proxies (only when pool is active) |
|
||||||
|
|
||||||
|
### `/metrics` OpenMetrics endpoint
|
||||||
|
|
||||||
|
`GET /metrics` returns all counters, gauges, pool stats, and latency summaries
|
||||||
|
in OpenMetrics format (see [API Reference](#get-metrics) above). Use `/status`
|
||||||
|
for the JSON equivalent with aggregate data.
|
||||||
|
|
||||||
|
### Per-listener latency
|
||||||
|
|
||||||
|
Each listener tracks chain setup latency independently. The `/status`
|
||||||
|
endpoint includes a `latency` field on each listener entry:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"listeners": [
|
||||||
|
{
|
||||||
|
"listen": "0.0.0.0:1080",
|
||||||
|
"chain": ["socks5://10.200.1.13:9050"],
|
||||||
|
"pool_hops": 2,
|
||||||
|
"latency": {"count": 500, "p50": 1800.2, "p95": 8200.1, "...": "..."}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
The aggregate `latency` in `/metrics` combines all listeners. Use
|
||||||
|
`listener_latency` or the per-listener `latency` in `/status` to
|
||||||
|
isolate latency by chain depth.
|
||||||
|
|
||||||
## Profiling
|
## Profiling
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
|
|||||||
|
|
||||||
[project]
|
[project]
|
||||||
name = "s5p"
|
name = "s5p"
|
||||||
version = "0.1.0"
|
version = "0.3.0"
|
||||||
description = "SOCKS5 proxy with Tor and proxy-chain support"
|
description = "SOCKS5 proxy with Tor and proxy-chain support"
|
||||||
requires-python = ">=3.11"
|
requires-python = ">=3.11"
|
||||||
dependencies = ["pyyaml>=6.0"]
|
dependencies = ["pyyaml>=6.0"]
|
||||||
|
|||||||
1
requirements.txt
Normal file
1
requirements.txt
Normal file
@@ -0,0 +1 @@
|
|||||||
|
pyyaml>=6.0
|
||||||
@@ -1,3 +1,3 @@
|
|||||||
"""s5p -- SOCKS5 proxy with chain support."""
|
"""s5p -- SOCKS5 proxy with chain support."""
|
||||||
|
|
||||||
__version__ = "0.1.0"
|
__version__ = "0.3.0"
|
||||||
|
|||||||
317
src/s5p/api.py
317
src/s5p/api.py
@@ -30,23 +30,46 @@ def _parse_request(data: bytes) -> tuple[str, str]:
|
|||||||
return parts[0].upper(), parts[1].split("?", 1)[0]
|
return parts[0].upper(), parts[1].split("?", 1)[0]
|
||||||
|
|
||||||
|
|
||||||
|
def _http_response(
|
||||||
|
writer: asyncio.StreamWriter,
|
||||||
|
status: int,
|
||||||
|
payload: bytes,
|
||||||
|
content_type: str = "application/json",
|
||||||
|
) -> None:
|
||||||
|
"""Write an HTTP response and close."""
|
||||||
|
phrases = {200: "OK", 400: "Bad Request", 404: "Not Found",
|
||||||
|
405: "Method Not Allowed", 500: "Internal Server Error"}
|
||||||
|
header = (
|
||||||
|
f"HTTP/1.1 {status} {phrases.get(status, 'Error')}\r\n"
|
||||||
|
f"Content-Type: {content_type}\r\n"
|
||||||
|
f"Content-Length: {len(payload)}\r\n"
|
||||||
|
f"Connection: close\r\n"
|
||||||
|
f"\r\n"
|
||||||
|
)
|
||||||
|
writer.write(header.encode() + payload)
|
||||||
|
|
||||||
|
|
||||||
def _json_response(
|
def _json_response(
|
||||||
writer: asyncio.StreamWriter,
|
writer: asyncio.StreamWriter,
|
||||||
status: int,
|
status: int,
|
||||||
body: dict | list,
|
body: dict | list,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Write an HTTP response with JSON body and close."""
|
"""Write an HTTP response with JSON body and close."""
|
||||||
phrases = {200: "OK", 400: "Bad Request", 404: "Not Found",
|
|
||||||
405: "Method Not Allowed", 500: "Internal Server Error"}
|
|
||||||
payload = json.dumps(body, separators=(",", ":")).encode()
|
payload = json.dumps(body, separators=(",", ":")).encode()
|
||||||
header = (
|
_http_response(writer, status, payload, "application/json")
|
||||||
f"HTTP/1.1 {status} {phrases.get(status, 'Error')}\r\n"
|
|
||||||
f"Content-Type: application/json\r\n"
|
|
||||||
f"Content-Length: {len(payload)}\r\n"
|
# -- helpers -----------------------------------------------------------------
|
||||||
f"Connection: close\r\n"
|
|
||||||
f"\r\n"
|
|
||||||
)
|
def _multi_pool(lc) -> bool:
|
||||||
writer.write(header.encode() + payload)
|
"""Check if a listener uses more than one distinct pool."""
|
||||||
|
return len({n for c in lc.pool_seq for n in c}) > 1
|
||||||
|
|
||||||
|
|
||||||
|
def _pool_seq_entry(lc) -> dict:
|
||||||
|
"""Build pool_seq dict entry for API responses."""
|
||||||
|
return {"pool_seq": lc.pool_seq}
|
||||||
|
|
||||||
|
|
||||||
# -- route handlers ----------------------------------------------------------
|
# -- route handlers ----------------------------------------------------------
|
||||||
@@ -55,7 +78,7 @@ def _json_response(
|
|||||||
def _handle_status(ctx: dict) -> tuple[int, dict]:
|
def _handle_status(ctx: dict) -> tuple[int, dict]:
|
||||||
"""GET /status -- combined runtime summary."""
|
"""GET /status -- combined runtime summary."""
|
||||||
metrics: Metrics = ctx["metrics"]
|
metrics: Metrics = ctx["metrics"]
|
||||||
data = {
|
data: dict = {
|
||||||
"uptime": round(time.monotonic() - metrics.started, 1),
|
"uptime": round(time.monotonic() - metrics.started, 1),
|
||||||
"connections": metrics.connections,
|
"connections": metrics.connections,
|
||||||
"success": metrics.success,
|
"success": metrics.success,
|
||||||
@@ -63,44 +86,194 @@ def _handle_status(ctx: dict) -> tuple[int, dict]:
|
|||||||
"active": metrics.active,
|
"active": metrics.active,
|
||||||
"bytes_in": metrics.bytes_in,
|
"bytes_in": metrics.bytes_in,
|
||||||
"bytes_out": metrics.bytes_out,
|
"bytes_out": metrics.bytes_out,
|
||||||
|
"rate": round(metrics.conn_rate.rate(), 2),
|
||||||
|
"latency": metrics.latency.stats(),
|
||||||
}
|
}
|
||||||
pool = ctx.get("pool")
|
pools: dict = ctx.get("pools") or {}
|
||||||
if pool:
|
if pools:
|
||||||
|
total_alive = sum(p.alive_count for p in pools.values())
|
||||||
|
total_count = sum(p.count for p in pools.values())
|
||||||
|
data["pool"] = {"alive": total_alive, "total": total_count}
|
||||||
|
data["pools"] = {
|
||||||
|
name: {"alive": p.alive_count, "total": p.count}
|
||||||
|
for name, p in pools.items()
|
||||||
|
}
|
||||||
|
elif ctx.get("pool"):
|
||||||
|
pool = ctx["pool"]
|
||||||
data["pool"] = {"alive": pool.alive_count, "total": pool.count}
|
data["pool"] = {"alive": pool.alive_count, "total": pool.count}
|
||||||
config = ctx.get("config")
|
config = ctx.get("config")
|
||||||
|
if config and config.tor_nodes:
|
||||||
|
data["tor_nodes"] = [str(n) for n in config.tor_nodes]
|
||||||
if config:
|
if config:
|
||||||
data["chain"] = [str(h) for h in config.chain]
|
data["listeners"] = [
|
||||||
|
{
|
||||||
|
"listen": f"{lc.listen_host}:{lc.listen_port}",
|
||||||
|
"chain": [str(h) for h in lc.chain],
|
||||||
|
"pool_hops": lc.pool_hops,
|
||||||
|
**({"pool": lc.pool_name} if lc.pool_name else {}),
|
||||||
|
**(_pool_seq_entry(lc) if _multi_pool(lc) else {}),
|
||||||
|
**({"auth": True} if lc.auth else {}),
|
||||||
|
**({"retries": lc.retries} if lc.retries else {}),
|
||||||
|
"latency": metrics.get_listener_latency(
|
||||||
|
f"{lc.listen_host}:{lc.listen_port}"
|
||||||
|
).stats(),
|
||||||
|
}
|
||||||
|
for lc in config.listeners
|
||||||
|
]
|
||||||
return 200, data
|
return 200, data
|
||||||
|
|
||||||
|
|
||||||
def _handle_metrics(ctx: dict) -> tuple[int, dict]:
|
def _render_openmetrics(ctx: dict) -> str:
|
||||||
"""GET /metrics -- full metrics counters."""
|
"""Render all metrics in OpenMetrics text format."""
|
||||||
return 200, ctx["metrics"].to_dict()
|
m: Metrics = ctx["metrics"]
|
||||||
|
lines: list[str] = []
|
||||||
|
|
||||||
|
def _counter(name: str, help_text: str, value: int) -> None:
|
||||||
|
lines.append(f"# HELP {name} {help_text}")
|
||||||
|
lines.append(f"# TYPE {name} counter")
|
||||||
|
lines.append(f"{name}_total {value}")
|
||||||
|
|
||||||
|
def _gauge(name: str, help_text: str, value: float) -> None:
|
||||||
|
lines.append(f"# HELP {name} {help_text}")
|
||||||
|
lines.append(f"# TYPE {name} gauge")
|
||||||
|
lines.append(f"{name} {value}")
|
||||||
|
|
||||||
|
def _summary(name: str, help_text: str, q: dict,
|
||||||
|
labels: str = "") -> None:
|
||||||
|
lines.append(f"# HELP {name} {help_text}")
|
||||||
|
lines.append(f"# TYPE {name} summary")
|
||||||
|
lb = f"{{{labels}," if labels else "{"
|
||||||
|
for quantile in ("0.5", "0.95", "0.99"):
|
||||||
|
lines.append(f'{name}{lb}quantile="{quantile}"}} {q[quantile]:.6f}')
|
||||||
|
lw = f"{{{labels}}}" if labels else ""
|
||||||
|
lines.append(f"{name}_count{lw} {q['count']}")
|
||||||
|
lines.append(f"{name}_sum{lw} {q['sum']:.6f}")
|
||||||
|
|
||||||
|
# -- counters
|
||||||
|
_counter("s5p_connections", "Total connection attempts.", m.connections)
|
||||||
|
_counter("s5p_connections_success",
|
||||||
|
"Connections successfully relayed.", m.success)
|
||||||
|
_counter("s5p_connections_failed", "Connection failures.", m.failed)
|
||||||
|
_counter("s5p_retries", "Connection retry attempts.", m.retries)
|
||||||
|
_counter("s5p_auth_failures",
|
||||||
|
"SOCKS5 authentication failures.", m.auth_failures)
|
||||||
|
_counter("s5p_bytes_in",
|
||||||
|
"Bytes received from clients.", m.bytes_in)
|
||||||
|
_counter("s5p_bytes_out",
|
||||||
|
"Bytes sent to clients.", m.bytes_out)
|
||||||
|
|
||||||
|
# -- gauges
|
||||||
|
_gauge("s5p_active_connections",
|
||||||
|
"Currently open connections.", m.active)
|
||||||
|
_gauge("s5p_uptime_seconds",
|
||||||
|
"Seconds since server start.",
|
||||||
|
round(time.monotonic() - m.started, 1))
|
||||||
|
_gauge("s5p_connection_rate",
|
||||||
|
"Connections per second (rolling window).",
|
||||||
|
round(m.conn_rate.rate(), 4))
|
||||||
|
|
||||||
|
# -- pool gauges
|
||||||
|
pools: dict = ctx.get("pools") or {}
|
||||||
|
if pools:
|
||||||
|
lines.append("# HELP s5p_pool_proxies_alive Alive proxies in pool.")
|
||||||
|
lines.append("# TYPE s5p_pool_proxies_alive gauge")
|
||||||
|
for name, p in pools.items():
|
||||||
|
lines.append(f's5p_pool_proxies_alive{{pool="{name}"}} {p.alive_count}')
|
||||||
|
lines.append("# HELP s5p_pool_proxies_total Total proxies in pool.")
|
||||||
|
lines.append("# TYPE s5p_pool_proxies_total gauge")
|
||||||
|
for name, p in pools.items():
|
||||||
|
lines.append(f's5p_pool_proxies_total{{pool="{name}"}} {p.count}')
|
||||||
|
elif ctx.get("pool"):
|
||||||
|
p = ctx["pool"]
|
||||||
|
_gauge("s5p_pool_proxies_alive", "Alive proxies in pool.", p.alive_count)
|
||||||
|
_gauge("s5p_pool_proxies_total", "Total proxies in pool.", p.count)
|
||||||
|
|
||||||
|
# -- latency summary (global)
|
||||||
|
q = m.latency.quantiles()
|
||||||
|
if q:
|
||||||
|
_summary("s5p_chain_latency_seconds",
|
||||||
|
"Chain build latency in seconds.", q)
|
||||||
|
|
||||||
|
# -- per-listener latency summaries
|
||||||
|
if m.listener_latency:
|
||||||
|
lines.append(
|
||||||
|
"# HELP s5p_listener_chain_latency_seconds "
|
||||||
|
"Per-listener chain build latency in seconds.")
|
||||||
|
lines.append("# TYPE s5p_listener_chain_latency_seconds summary")
|
||||||
|
for key, tracker in m.listener_latency.items():
|
||||||
|
lq = tracker.quantiles()
|
||||||
|
if not lq:
|
||||||
|
continue
|
||||||
|
for quantile in ("0.5", "0.95", "0.99"):
|
||||||
|
lines.append(
|
||||||
|
f's5p_listener_chain_latency_seconds'
|
||||||
|
f'{{listener="{key}",quantile="{quantile}"}} '
|
||||||
|
f'{lq[quantile]:.6f}')
|
||||||
|
lines.append(
|
||||||
|
f's5p_listener_chain_latency_seconds_count'
|
||||||
|
f'{{listener="{key}"}} {lq["count"]}')
|
||||||
|
lines.append(
|
||||||
|
f's5p_listener_chain_latency_seconds_sum'
|
||||||
|
f'{{listener="{key}"}} {lq["sum"]:.6f}')
|
||||||
|
|
||||||
|
lines.append("# EOF")
|
||||||
|
return "\n".join(lines) + "\n"
|
||||||
|
|
||||||
|
|
||||||
|
_OPENMETRICS_CT = (
|
||||||
|
"application/openmetrics-text; version=1.0.0; charset=utf-8"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _handle_metrics(ctx: dict) -> tuple[int, str]:
|
||||||
|
"""GET /metrics -- OpenMetrics exposition."""
|
||||||
|
return 200, _render_openmetrics(ctx)
|
||||||
|
|
||||||
|
|
||||||
def _handle_pool(ctx: dict, alive_only: bool = False) -> tuple[int, dict]:
|
def _handle_pool(ctx: dict, alive_only: bool = False) -> tuple[int, dict]:
|
||||||
"""GET /pool or /pool/alive -- proxy pool state."""
|
"""GET /pool or /pool/alive -- proxy pool state."""
|
||||||
pool = ctx.get("pool")
|
pools: dict = ctx.get("pools") or {}
|
||||||
if not pool:
|
pool_list = list(pools.values()) if pools else []
|
||||||
|
# backward compat: fall back to single "pool" key
|
||||||
|
if not pool_list and ctx.get("pool"):
|
||||||
|
pool_list = [ctx["pool"]]
|
||||||
|
|
||||||
|
if not pool_list:
|
||||||
return 200, {"alive": 0, "total": 0, "proxies": {}}
|
return 200, {"alive": 0, "total": 0, "proxies": {}}
|
||||||
|
|
||||||
|
multi = len(pool_list) > 1
|
||||||
proxies = {}
|
proxies = {}
|
||||||
for key, entry in pool._proxies.items():
|
total_alive = 0
|
||||||
if alive_only and not entry.alive:
|
total_count = 0
|
||||||
continue
|
for p in pool_list:
|
||||||
proxies[key] = {
|
total_alive += p.alive_count
|
||||||
"alive": entry.alive,
|
total_count += p.count
|
||||||
"fails": entry.fails,
|
for key, entry in p._proxies.items():
|
||||||
"tests": entry.tests,
|
if alive_only and not entry.alive:
|
||||||
"last_ok": entry.last_ok,
|
continue
|
||||||
"last_test": entry.last_test,
|
rec: dict = {
|
||||||
"last_seen": entry.last_seen,
|
"alive": entry.alive,
|
||||||
}
|
"fails": entry.fails,
|
||||||
return 200, {
|
"tests": entry.tests,
|
||||||
"alive": pool.alive_count,
|
"last_ok": entry.last_ok,
|
||||||
"total": pool.count,
|
"last_test": entry.last_test,
|
||||||
|
"last_seen": entry.last_seen,
|
||||||
|
}
|
||||||
|
if multi:
|
||||||
|
rec["pool"] = p.name
|
||||||
|
proxies[key] = rec
|
||||||
|
|
||||||
|
result: dict = {
|
||||||
|
"alive": total_alive,
|
||||||
|
"total": total_count,
|
||||||
"proxies": proxies,
|
"proxies": proxies,
|
||||||
}
|
}
|
||||||
|
if multi:
|
||||||
|
result["pools"] = {
|
||||||
|
p.name: {"alive": p.alive_count, "total": p.count}
|
||||||
|
for p in pool_list
|
||||||
|
}
|
||||||
|
return 200, result
|
||||||
|
|
||||||
|
|
||||||
def _handle_config(ctx: dict) -> tuple[int, dict]:
|
def _handle_config(ctx: dict) -> tuple[int, dict]:
|
||||||
@@ -110,19 +283,54 @@ def _handle_config(ctx: dict) -> tuple[int, dict]:
|
|||||||
return 500, {"error": "config unavailable"}
|
return 500, {"error": "config unavailable"}
|
||||||
|
|
||||||
data: dict = {
|
data: dict = {
|
||||||
"listen": f"{config.listen_host}:{config.listen_port}",
|
|
||||||
"timeout": config.timeout,
|
"timeout": config.timeout,
|
||||||
"retries": config.retries,
|
"retries": config.retries,
|
||||||
"log_level": config.log_level,
|
"log_level": config.log_level,
|
||||||
"max_connections": config.max_connections,
|
"max_connections": config.max_connections,
|
||||||
"pool_size": config.pool_size,
|
"pool_size": config.pool_size,
|
||||||
"chain": [str(h) for h in config.chain],
|
"listeners": [
|
||||||
|
{
|
||||||
|
"listen": f"{lc.listen_host}:{lc.listen_port}",
|
||||||
|
"chain": [str(h) for h in lc.chain],
|
||||||
|
"pool_hops": lc.pool_hops,
|
||||||
|
**({"pool": lc.pool_name} if lc.pool_name else {}),
|
||||||
|
**(_pool_seq_entry(lc) if _multi_pool(lc) else {}),
|
||||||
|
**({"auth_users": len(lc.auth)} if lc.auth else {}),
|
||||||
|
**({"retries": lc.retries} if lc.retries else {}),
|
||||||
|
}
|
||||||
|
for lc in config.listeners
|
||||||
|
],
|
||||||
}
|
}
|
||||||
if config.proxy_pool:
|
if config.tor_nodes:
|
||||||
|
data["tor_nodes"] = [str(n) for n in config.tor_nodes]
|
||||||
|
if config.proxy_pools:
|
||||||
|
pools_data: dict = {}
|
||||||
|
for name, pp in config.proxy_pools.items():
|
||||||
|
sources = []
|
||||||
|
for src in pp.sources:
|
||||||
|
s: dict = {}
|
||||||
|
if src.url:
|
||||||
|
s["url"] = src.url
|
||||||
|
if src.file:
|
||||||
|
s["file"] = src.file
|
||||||
|
if src.mitm is not None:
|
||||||
|
s["mitm"] = src.mitm
|
||||||
|
sources.append(s)
|
||||||
|
pool_entry: dict = {
|
||||||
|
"sources": sources,
|
||||||
|
"refresh": pp.refresh,
|
||||||
|
"test_interval": pp.test_interval,
|
||||||
|
"max_fails": pp.max_fails,
|
||||||
|
}
|
||||||
|
if pp.allowed_protos:
|
||||||
|
pool_entry["allowed_protos"] = pp.allowed_protos
|
||||||
|
pools_data[name] = pool_entry
|
||||||
|
data["proxy_pools"] = pools_data
|
||||||
|
elif config.proxy_pool:
|
||||||
pp = config.proxy_pool
|
pp = config.proxy_pool
|
||||||
sources = []
|
sources = []
|
||||||
for src in pp.sources:
|
for src in pp.sources:
|
||||||
s: dict = {}
|
s = {}
|
||||||
if src.url:
|
if src.url:
|
||||||
s["url"] = src.url
|
s["url"] = src.url
|
||||||
if src.file:
|
if src.file:
|
||||||
@@ -151,19 +359,27 @@ async def _handle_reload(ctx: dict) -> tuple[int, dict]:
|
|||||||
|
|
||||||
async def _handle_pool_test(ctx: dict) -> tuple[int, dict]:
|
async def _handle_pool_test(ctx: dict) -> tuple[int, dict]:
|
||||||
"""POST /pool/test -- trigger immediate health test."""
|
"""POST /pool/test -- trigger immediate health test."""
|
||||||
pool = ctx.get("pool")
|
pools: dict = ctx.get("pools") or {}
|
||||||
if not pool:
|
pool_list = list(pools.values()) if pools else []
|
||||||
|
if not pool_list and ctx.get("pool"):
|
||||||
|
pool_list = [ctx["pool"]]
|
||||||
|
if not pool_list:
|
||||||
return 400, {"error": "no proxy pool configured"}
|
return 400, {"error": "no proxy pool configured"}
|
||||||
asyncio.create_task(pool._run_health_tests())
|
for p in pool_list:
|
||||||
|
asyncio.create_task(p._run_health_tests())
|
||||||
return 200, {"ok": True}
|
return 200, {"ok": True}
|
||||||
|
|
||||||
|
|
||||||
async def _handle_pool_refresh(ctx: dict) -> tuple[int, dict]:
|
async def _handle_pool_refresh(ctx: dict) -> tuple[int, dict]:
|
||||||
"""POST /pool/refresh -- trigger immediate source re-fetch."""
|
"""POST /pool/refresh -- trigger immediate source re-fetch."""
|
||||||
pool = ctx.get("pool")
|
pools: dict = ctx.get("pools") or {}
|
||||||
if not pool:
|
pool_list = list(pools.values()) if pools else []
|
||||||
|
if not pool_list and ctx.get("pool"):
|
||||||
|
pool_list = [ctx["pool"]]
|
||||||
|
if not pool_list:
|
||||||
return 400, {"error": "no proxy pool configured"}
|
return 400, {"error": "no proxy pool configured"}
|
||||||
asyncio.create_task(pool._fetch_all_sources())
|
for p in pool_list:
|
||||||
|
asyncio.create_task(p._fetch_all_sources())
|
||||||
return 200, {"ok": True}
|
return 200, {"ok": True}
|
||||||
|
|
||||||
|
|
||||||
@@ -211,8 +427,13 @@ _POST_ROUTES: dict[str, str] = {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
async def _route(method: str, path: str, ctx: dict) -> tuple[int, dict]:
|
async def _route(
|
||||||
"""Dispatch request to the appropriate handler."""
|
method: str, path: str, ctx: dict,
|
||||||
|
) -> tuple[int, dict | str]:
|
||||||
|
"""Dispatch request to the appropriate handler.
|
||||||
|
|
||||||
|
Returns (status, body) where body is a dict (JSON) or str (text).
|
||||||
|
"""
|
||||||
if method == "GET" and path in _GET_ROUTES:
|
if method == "GET" and path in _GET_ROUTES:
|
||||||
name = _GET_ROUTES[path]
|
name = _GET_ROUTES[path]
|
||||||
if name == "status":
|
if name == "status":
|
||||||
@@ -267,7 +488,11 @@ async def _handle_connection(
|
|||||||
return
|
return
|
||||||
|
|
||||||
status, body = await _route(method, path, ctx)
|
status, body = await _route(method, path, ctx)
|
||||||
_json_response(writer, status, body)
|
if isinstance(body, str):
|
||||||
|
_http_response(writer, status, body.encode(),
|
||||||
|
_OPENMETRICS_CT)
|
||||||
|
else:
|
||||||
|
_json_response(writer, status, body)
|
||||||
await writer.drain()
|
await writer.drain()
|
||||||
except (TimeoutError, ConnectionError, OSError):
|
except (TimeoutError, ConnectionError, OSError):
|
||||||
pass
|
pass
|
||||||
|
|||||||
@@ -7,7 +7,14 @@ import asyncio
|
|||||||
import logging
|
import logging
|
||||||
|
|
||||||
from . import __version__
|
from . import __version__
|
||||||
from .config import Config, PoolSourceConfig, ProxyPoolConfig, load_config, parse_proxy_url
|
from .config import (
|
||||||
|
Config,
|
||||||
|
ListenerConfig,
|
||||||
|
PoolSourceConfig,
|
||||||
|
ProxyPoolConfig,
|
||||||
|
load_config,
|
||||||
|
parse_proxy_url,
|
||||||
|
)
|
||||||
from .server import serve
|
from .server import serve
|
||||||
|
|
||||||
|
|
||||||
@@ -115,6 +122,25 @@ def main(argv: list[str] | None = None) -> int:
|
|||||||
elif args.quiet:
|
elif args.quiet:
|
||||||
config.log_level = "error"
|
config.log_level = "error"
|
||||||
|
|
||||||
|
# ensure listeners list is populated (CLI-only mode, no config file)
|
||||||
|
if not config.listeners:
|
||||||
|
lc = ListenerConfig(
|
||||||
|
listen_host=config.listen_host,
|
||||||
|
listen_port=config.listen_port,
|
||||||
|
chain=list(config.chain),
|
||||||
|
)
|
||||||
|
if config.proxy_pool and config.proxy_pool.sources:
|
||||||
|
lc.pool_hops = 1
|
||||||
|
config.listeners.append(lc)
|
||||||
|
elif len(config.listeners) == 1:
|
||||||
|
# sync CLI overrides (-l, -C) to the single listener
|
||||||
|
lc = config.listeners[0]
|
||||||
|
if args.listen:
|
||||||
|
lc.listen_host = config.listen_host
|
||||||
|
lc.listen_port = config.listen_port
|
||||||
|
if args.chain:
|
||||||
|
lc.chain = list(config.chain)
|
||||||
|
|
||||||
_setup_logging(config.log_level)
|
_setup_logging(config.log_level)
|
||||||
logger = logging.getLogger("s5p")
|
logger = logging.getLogger("s5p")
|
||||||
|
|
||||||
|
|||||||
@@ -36,6 +36,7 @@ class PoolSourceConfig:
|
|||||||
proto: str | None = None
|
proto: str | None = None
|
||||||
country: str | None = None
|
country: str | None = None
|
||||||
limit: int | None = 1000
|
limit: int | None = 1000
|
||||||
|
mitm: bool | None = None
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
@@ -45,12 +46,26 @@ class ProxyPoolConfig:
|
|||||||
sources: list[PoolSourceConfig] = field(default_factory=list)
|
sources: list[PoolSourceConfig] = field(default_factory=list)
|
||||||
refresh: float = 300.0
|
refresh: float = 300.0
|
||||||
test_interval: float = 120.0
|
test_interval: float = 120.0
|
||||||
test_url: str = "http://httpbin.org/ip"
|
test_url: str = "" # deprecated, kept for backward compat
|
||||||
|
test_targets: list[str] = field(default_factory=lambda: [
|
||||||
|
"www.google.com",
|
||||||
|
"www.cloudflare.com",
|
||||||
|
"www.amazon.com",
|
||||||
|
])
|
||||||
test_timeout: float = 15.0
|
test_timeout: float = 15.0
|
||||||
test_concurrency: int = 5
|
test_concurrency: int = 25
|
||||||
max_fails: int = 3
|
max_fails: int = 3
|
||||||
state_file: str = ""
|
state_file: str = ""
|
||||||
report_url: str = ""
|
report_url: str = ""
|
||||||
|
allowed_protos: list[str] = field(default_factory=list)
|
||||||
|
|
||||||
|
def __post_init__(self) -> None:
|
||||||
|
"""Backward compat: extract hostname from legacy test_url."""
|
||||||
|
defaults = ["www.google.com", "www.cloudflare.com", "www.amazon.com"]
|
||||||
|
if self.test_url and self.test_targets == defaults:
|
||||||
|
host = urlparse(self.test_url).hostname
|
||||||
|
if host:
|
||||||
|
self.test_targets = [host]
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
@@ -64,6 +79,25 @@ class TorConfig:
|
|||||||
newnym_interval: float = 0.0 # 0 = manual only
|
newnym_interval: float = 0.0 # 0 = manual only
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class ListenerConfig:
|
||||||
|
"""A single listener with its own address and chain."""
|
||||||
|
|
||||||
|
listen_host: str = "127.0.0.1"
|
||||||
|
listen_port: int = 1080
|
||||||
|
chain: list[ChainHop] = field(default_factory=list)
|
||||||
|
pool_seq: list[list[str]] = field(default_factory=list)
|
||||||
|
pool_name: str = ""
|
||||||
|
bypass: list[str] = field(default_factory=list)
|
||||||
|
auth: dict[str, str] = field(default_factory=dict)
|
||||||
|
retries: int = 0 # 0 = use global default
|
||||||
|
|
||||||
|
@property
|
||||||
|
def pool_hops(self) -> int:
|
||||||
|
"""Number of pool hops (backward compat)."""
|
||||||
|
return len(self.pool_seq)
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class Config:
|
class Config:
|
||||||
"""Server configuration."""
|
"""Server configuration."""
|
||||||
@@ -71,6 +105,7 @@ class Config:
|
|||||||
listen_host: str = "127.0.0.1"
|
listen_host: str = "127.0.0.1"
|
||||||
listen_port: int = 1080
|
listen_port: int = 1080
|
||||||
chain: list[ChainHop] = field(default_factory=list)
|
chain: list[ChainHop] = field(default_factory=list)
|
||||||
|
listeners: list[ListenerConfig] = field(default_factory=list)
|
||||||
timeout: float = 10.0
|
timeout: float = 10.0
|
||||||
retries: int = 3
|
retries: int = 3
|
||||||
log_level: str = "info"
|
log_level: str = "info"
|
||||||
@@ -80,7 +115,9 @@ class Config:
|
|||||||
api_host: str = ""
|
api_host: str = ""
|
||||||
api_port: int = 0
|
api_port: int = 0
|
||||||
proxy_pool: ProxyPoolConfig | None = None
|
proxy_pool: ProxyPoolConfig | None = None
|
||||||
|
proxy_pools: dict[str, ProxyPoolConfig] = field(default_factory=dict)
|
||||||
tor: TorConfig | None = None
|
tor: TorConfig | None = None
|
||||||
|
tor_nodes: list[ChainHop] = field(default_factory=list)
|
||||||
config_file: str = ""
|
config_file: str = ""
|
||||||
|
|
||||||
|
|
||||||
@@ -127,6 +164,56 @@ def parse_api_proxies(data: dict) -> list[ChainHop]:
|
|||||||
return proxies
|
return proxies
|
||||||
|
|
||||||
|
|
||||||
|
def _parse_pool_config(pool_raw: dict) -> ProxyPoolConfig:
|
||||||
|
"""Parse a single proxy pool config block from YAML."""
|
||||||
|
sources = []
|
||||||
|
for src in pool_raw.get("sources", []):
|
||||||
|
mitm = src.get("mitm")
|
||||||
|
if mitm is not None:
|
||||||
|
mitm = bool(mitm)
|
||||||
|
sources.append(
|
||||||
|
PoolSourceConfig(
|
||||||
|
url=src.get("url"),
|
||||||
|
file=src.get("file"),
|
||||||
|
proto=src.get("proto"),
|
||||||
|
country=src.get("country"),
|
||||||
|
limit=src.get("limit", 1000),
|
||||||
|
mitm=mitm,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
kwargs: dict = {
|
||||||
|
"sources": sources,
|
||||||
|
"refresh": float(pool_raw.get("refresh", 300)),
|
||||||
|
"test_interval": float(pool_raw.get("test_interval", 120)),
|
||||||
|
"test_url": pool_raw.get("test_url", ""),
|
||||||
|
"test_timeout": float(pool_raw.get("test_timeout", 15)),
|
||||||
|
"test_concurrency": int(pool_raw.get("test_concurrency", 25)),
|
||||||
|
"max_fails": int(pool_raw.get("max_fails", 3)),
|
||||||
|
"state_file": pool_raw.get("state_file", ""),
|
||||||
|
"report_url": pool_raw.get("report_url", ""),
|
||||||
|
}
|
||||||
|
if "test_targets" in pool_raw:
|
||||||
|
kwargs["test_targets"] = list(pool_raw["test_targets"])
|
||||||
|
if "allowed_protos" in pool_raw:
|
||||||
|
kwargs["allowed_protos"] = list(pool_raw["allowed_protos"])
|
||||||
|
return ProxyPoolConfig(**kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
def _parse_pool_ref(item: str, default: str) -> str:
|
||||||
|
"""Resolve a pool reference string to a pool name.
|
||||||
|
|
||||||
|
``pool`` or ``pool:`` -> *default*; ``pool:name`` -> ``name``.
|
||||||
|
The ``pool`` prefix is matched case-insensitively.
|
||||||
|
"""
|
||||||
|
lower = item.lower()
|
||||||
|
if lower == "pool" or lower == "pool:":
|
||||||
|
return default
|
||||||
|
if lower.startswith("pool:"):
|
||||||
|
_, _, name = item.partition(":")
|
||||||
|
return name if name else default
|
||||||
|
raise ValueError(f"not a pool reference: {item!r}")
|
||||||
|
|
||||||
|
|
||||||
def load_config(path: str | Path) -> Config:
|
def load_config(path: str | Path) -> Config:
|
||||||
"""Load configuration from a YAML file."""
|
"""Load configuration from a YAML file."""
|
||||||
path = Path(path)
|
path = Path(path)
|
||||||
@@ -186,30 +273,16 @@ def load_config(path: str | Path) -> Config:
|
|||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# -- proxy pools (named) ------------------------------------------------
|
||||||
|
if "proxy_pools" in raw:
|
||||||
|
for name, pool_raw in raw["proxy_pools"].items():
|
||||||
|
config.proxy_pools[name] = _parse_pool_config(pool_raw)
|
||||||
|
|
||||||
if "proxy_pool" in raw:
|
if "proxy_pool" in raw:
|
||||||
pool_raw = raw["proxy_pool"]
|
config.proxy_pool = _parse_pool_config(raw["proxy_pool"])
|
||||||
sources = []
|
# register singular as "default" when proxy_pools doesn't already have it
|
||||||
for src in pool_raw.get("sources", []):
|
if "default" not in config.proxy_pools:
|
||||||
sources.append(
|
config.proxy_pools["default"] = config.proxy_pool
|
||||||
PoolSourceConfig(
|
|
||||||
url=src.get("url"),
|
|
||||||
file=src.get("file"),
|
|
||||||
proto=src.get("proto"),
|
|
||||||
country=src.get("country"),
|
|
||||||
limit=src.get("limit", 1000),
|
|
||||||
)
|
|
||||||
)
|
|
||||||
config.proxy_pool = ProxyPoolConfig(
|
|
||||||
sources=sources,
|
|
||||||
refresh=float(pool_raw.get("refresh", 300)),
|
|
||||||
test_interval=float(pool_raw.get("test_interval", 120)),
|
|
||||||
test_url=pool_raw.get("test_url", "http://httpbin.org/ip"),
|
|
||||||
test_timeout=float(pool_raw.get("test_timeout", 15)),
|
|
||||||
test_concurrency=int(pool_raw.get("test_concurrency", 5)),
|
|
||||||
max_fails=int(pool_raw.get("max_fails", 3)),
|
|
||||||
state_file=pool_raw.get("state_file", ""),
|
|
||||||
report_url=pool_raw.get("report_url", ""),
|
|
||||||
)
|
|
||||||
elif "proxy_source" in raw:
|
elif "proxy_source" in raw:
|
||||||
# backward compat: convert legacy proxy_source to proxy_pool
|
# backward compat: convert legacy proxy_source to proxy_pool
|
||||||
src_raw = raw["proxy_source"]
|
src_raw = raw["proxy_source"]
|
||||||
@@ -240,4 +313,71 @@ def load_config(path: str | Path) -> Config:
|
|||||||
newnym_interval=float(tor_raw.get("newnym_interval", 0)),
|
newnym_interval=float(tor_raw.get("newnym_interval", 0)),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# -- tor_nodes -------------------------------------------------------
|
||||||
|
if "tor_nodes" in raw:
|
||||||
|
config.tor_nodes = [parse_proxy_url(u) for u in raw["tor_nodes"]]
|
||||||
|
|
||||||
|
# -- listeners -------------------------------------------------------
|
||||||
|
if "listeners" in raw:
|
||||||
|
for entry in raw["listeners"]:
|
||||||
|
lc = ListenerConfig()
|
||||||
|
listen = entry.get("listen", "")
|
||||||
|
if isinstance(listen, str) and ":" in listen:
|
||||||
|
host, port_str = listen.rsplit(":", 1)
|
||||||
|
lc.listen_host = host
|
||||||
|
lc.listen_port = int(port_str)
|
||||||
|
elif isinstance(listen, (str, int)) and listen:
|
||||||
|
lc.listen_port = int(listen)
|
||||||
|
if "bypass" in entry:
|
||||||
|
lc.bypass = list(entry["bypass"])
|
||||||
|
if "auth" in entry:
|
||||||
|
auth_raw = entry["auth"]
|
||||||
|
if isinstance(auth_raw, dict):
|
||||||
|
lc.auth = {str(k): str(v) for k, v in auth_raw.items()}
|
||||||
|
if "retries" in entry:
|
||||||
|
lc.retries = int(entry["retries"])
|
||||||
|
if "pool" in entry:
|
||||||
|
lc.pool_name = entry["pool"]
|
||||||
|
default_pool = lc.pool_name or "default"
|
||||||
|
chain_raw = entry.get("chain", [])
|
||||||
|
for item in chain_raw:
|
||||||
|
if isinstance(item, str):
|
||||||
|
lower = item.lower()
|
||||||
|
if lower == "pool" or lower.startswith("pool:"):
|
||||||
|
lc.pool_seq.append([_parse_pool_ref(item, default_pool)])
|
||||||
|
else:
|
||||||
|
lc.chain.append(parse_proxy_url(item))
|
||||||
|
elif isinstance(item, dict):
|
||||||
|
# YAML parses "pool:" and "pool: name" as dicts
|
||||||
|
pool_key = next((k for k in item if k.lower() == "pool"), None)
|
||||||
|
if pool_key is not None and len(item) == 1:
|
||||||
|
name = item[pool_key]
|
||||||
|
lc.pool_seq.append([name if name else default_pool])
|
||||||
|
else:
|
||||||
|
lc.chain.append(
|
||||||
|
ChainHop(
|
||||||
|
proto=item.get("proto", "socks5"),
|
||||||
|
host=item["host"],
|
||||||
|
port=int(item["port"]),
|
||||||
|
username=item.get("username"),
|
||||||
|
password=item.get("password"),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
elif isinstance(item, list):
|
||||||
|
# multi-candidate hop: [pool:clean, pool:mitm]
|
||||||
|
candidates = [_parse_pool_ref(str(el), default_pool) for el in item]
|
||||||
|
lc.pool_seq.append(candidates)
|
||||||
|
config.listeners.append(lc)
|
||||||
|
else:
|
||||||
|
# backward compat: build single listener from top-level fields
|
||||||
|
lc = ListenerConfig(
|
||||||
|
listen_host=config.listen_host,
|
||||||
|
listen_port=config.listen_port,
|
||||||
|
chain=list(config.chain),
|
||||||
|
)
|
||||||
|
# legacy behavior: if proxy_pool configured, auto-append 1 pool hop
|
||||||
|
if config.proxy_pool and config.proxy_pool.sources:
|
||||||
|
lc.pool_seq = [["default"]]
|
||||||
|
config.listeners.append(lc)
|
||||||
|
|
||||||
return config
|
return config
|
||||||
|
|||||||
@@ -3,6 +3,86 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
import time
|
import time
|
||||||
|
from collections import deque
|
||||||
|
|
||||||
|
|
||||||
|
class RateTracker:
|
||||||
|
"""Rolling window event rate (events/sec).
|
||||||
|
|
||||||
|
Stores up to ``maxlen`` monotonic timestamps. Rate is computed
|
||||||
|
on read as ``(n - 1) / span`` over the window -- no background timer.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, maxlen: int = 256) -> None:
|
||||||
|
self._times: deque[float] = deque(maxlen=maxlen)
|
||||||
|
|
||||||
|
def record(self, now: float | None = None) -> None:
|
||||||
|
"""Record an event at *now* (default: ``time.monotonic()``)."""
|
||||||
|
self._times.append(now if now is not None else time.monotonic())
|
||||||
|
|
||||||
|
def rate(self) -> float:
|
||||||
|
"""Return events/sec over the window, 0.0 if < 2 events."""
|
||||||
|
n = len(self._times)
|
||||||
|
if n < 2:
|
||||||
|
return 0.0
|
||||||
|
span = self._times[-1] - self._times[0]
|
||||||
|
if span <= 0:
|
||||||
|
return 0.0
|
||||||
|
return (n - 1) / span
|
||||||
|
|
||||||
|
|
||||||
|
class LatencyTracker:
|
||||||
|
"""Circular buffer of latency samples with percentile stats.
|
||||||
|
|
||||||
|
Stores up to ``maxlen`` float-second samples. ``stats()`` sorts
|
||||||
|
a copy on read (~0.1 ms for 1000 floats) and returns millisecond values.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, maxlen: int = 1000) -> None:
|
||||||
|
self._samples: deque[float] = deque(maxlen=maxlen)
|
||||||
|
|
||||||
|
def record(self, seconds: float) -> None:
|
||||||
|
"""Append a latency sample (in seconds)."""
|
||||||
|
self._samples.append(seconds)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def count(self) -> int:
|
||||||
|
"""Number of samples currently stored."""
|
||||||
|
return len(self._samples)
|
||||||
|
|
||||||
|
def stats(self) -> dict | None:
|
||||||
|
"""Return ``{count, min, max, avg, p50, p95, p99}`` in ms, or None."""
|
||||||
|
n = len(self._samples)
|
||||||
|
if n == 0:
|
||||||
|
return None
|
||||||
|
s = sorted(self._samples)
|
||||||
|
ms = [v * 1000 for v in s]
|
||||||
|
return {
|
||||||
|
"count": n,
|
||||||
|
"min": round(ms[0], 1),
|
||||||
|
"max": round(ms[-1], 1),
|
||||||
|
"avg": round(sum(ms) / n, 1),
|
||||||
|
"p50": round(ms[int(n * 0.50)], 1),
|
||||||
|
"p95": round(ms[min(int(n * 0.95), n - 1)], 1),
|
||||||
|
"p99": round(ms[min(int(n * 0.99), n - 1)], 1),
|
||||||
|
}
|
||||||
|
|
||||||
|
def quantiles(self) -> dict | None:
|
||||||
|
"""Return ``{count, sum, 0.5, 0.95, 0.99}`` in seconds, or None.
|
||||||
|
|
||||||
|
Designed for Prometheus/OpenMetrics summary exposition.
|
||||||
|
"""
|
||||||
|
n = len(self._samples)
|
||||||
|
if n == 0:
|
||||||
|
return None
|
||||||
|
s = sorted(self._samples)
|
||||||
|
return {
|
||||||
|
"count": n,
|
||||||
|
"sum": sum(s),
|
||||||
|
"0.5": s[int(n * 0.50)],
|
||||||
|
"0.95": s[min(int(n * 0.95), n - 1)],
|
||||||
|
"0.99": s[min(int(n * 0.99), n - 1)],
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
class Metrics:
|
class Metrics:
|
||||||
@@ -19,18 +99,34 @@ class Metrics:
|
|||||||
self.retries: int = 0
|
self.retries: int = 0
|
||||||
self.bytes_in: int = 0
|
self.bytes_in: int = 0
|
||||||
self.bytes_out: int = 0
|
self.bytes_out: int = 0
|
||||||
|
self.auth_failures: int = 0
|
||||||
self.active: int = 0
|
self.active: int = 0
|
||||||
self.started: float = time.monotonic()
|
self.started: float = time.monotonic()
|
||||||
|
self.conn_rate: RateTracker = RateTracker()
|
||||||
|
self.latency: LatencyTracker = LatencyTracker()
|
||||||
|
self.listener_latency: dict[str, LatencyTracker] = {}
|
||||||
|
|
||||||
|
def get_listener_latency(self, key: str) -> LatencyTracker:
|
||||||
|
"""Get or create a per-listener latency tracker."""
|
||||||
|
if key not in self.listener_latency:
|
||||||
|
self.listener_latency[key] = LatencyTracker()
|
||||||
|
return self.listener_latency[key]
|
||||||
|
|
||||||
def summary(self) -> str:
|
def summary(self) -> str:
|
||||||
"""One-line log-friendly summary."""
|
"""One-line log-friendly summary."""
|
||||||
uptime = time.monotonic() - self.started
|
uptime = time.monotonic() - self.started
|
||||||
h, rem = divmod(int(uptime), 3600)
|
h, rem = divmod(int(uptime), 3600)
|
||||||
m, s = divmod(rem, 60)
|
m, s = divmod(rem, 60)
|
||||||
|
rate = self.conn_rate.rate()
|
||||||
|
lat = self.latency.stats()
|
||||||
|
p50 = f" p50={lat['p50']:.1f}ms" if lat else ""
|
||||||
|
p95 = f" p95={lat['p95']:.1f}ms" if lat else ""
|
||||||
|
auth = f" auth_fail={self.auth_failures}" if self.auth_failures else ""
|
||||||
return (
|
return (
|
||||||
f"conn={self.connections} ok={self.success} fail={self.failed} "
|
f"conn={self.connections} ok={self.success} fail={self.failed} "
|
||||||
f"retries={self.retries} active={self.active} "
|
f"retries={self.retries} active={self.active}{auth} "
|
||||||
f"in={_human_bytes(self.bytes_in)} out={_human_bytes(self.bytes_out)} "
|
f"in={_human_bytes(self.bytes_in)} out={_human_bytes(self.bytes_out)} "
|
||||||
|
f"rate={rate:.2f}/s{p50}{p95} "
|
||||||
f"up={h}h{m:02d}m{s:02d}s"
|
f"up={h}h{m:02d}m{s:02d}s"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -41,10 +137,16 @@ class Metrics:
|
|||||||
"success": self.success,
|
"success": self.success,
|
||||||
"failed": self.failed,
|
"failed": self.failed,
|
||||||
"retries": self.retries,
|
"retries": self.retries,
|
||||||
|
"auth_failures": self.auth_failures,
|
||||||
"active": self.active,
|
"active": self.active,
|
||||||
"bytes_in": self.bytes_in,
|
"bytes_in": self.bytes_in,
|
||||||
"bytes_out": self.bytes_out,
|
"bytes_out": self.bytes_out,
|
||||||
"uptime": round(time.monotonic() - self.started, 1),
|
"uptime": round(time.monotonic() - self.started, 1),
|
||||||
|
"rate": round(self.conn_rate.rate(), 2),
|
||||||
|
"latency": self.latency.stats(),
|
||||||
|
"listener_latency": {
|
||||||
|
k: v.stats() for k, v in self.listener_latency.items()
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
157
src/s5p/pool.py
157
src/s5p/pool.py
@@ -7,10 +7,11 @@ import json
|
|||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import random
|
import random
|
||||||
|
import ssl
|
||||||
import time
|
import time
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from urllib.parse import urlencode, urlparse
|
from urllib.parse import urlencode
|
||||||
|
|
||||||
from .config import ChainHop, PoolSourceConfig, ProxyPoolConfig, parse_api_proxies, parse_proxy_url
|
from .config import ChainHop, PoolSourceConfig, ProxyPoolConfig, parse_api_proxies, parse_proxy_url
|
||||||
from .http import http_get_json, http_post_json
|
from .http import http_get_json, http_post_json
|
||||||
@@ -53,38 +54,52 @@ class ProxyPool:
|
|||||||
cfg: ProxyPoolConfig,
|
cfg: ProxyPoolConfig,
|
||||||
chain: list[ChainHop],
|
chain: list[ChainHop],
|
||||||
timeout: float,
|
timeout: float,
|
||||||
|
chain_nodes: list[ChainHop] | None = None,
|
||||||
|
name: str = "default",
|
||||||
) -> None:
|
) -> None:
|
||||||
self._cfg = cfg
|
self._cfg = cfg
|
||||||
self._chain = list(chain)
|
self._chain = list(chain)
|
||||||
|
self._chain_nodes = chain_nodes or []
|
||||||
|
self._chain_idx = 0
|
||||||
self._timeout = timeout
|
self._timeout = timeout
|
||||||
|
self._name = name
|
||||||
|
self._log_prefix = f"pool[{name}]" if name != "default" else "pool"
|
||||||
self._proxies: dict[str, ProxyEntry] = {}
|
self._proxies: dict[str, ProxyEntry] = {}
|
||||||
self._alive_keys: list[str] = []
|
self._alive_keys: list[str] = []
|
||||||
self._tasks: list[asyncio.Task] = []
|
self._tasks: list[asyncio.Task] = []
|
||||||
self._stop = asyncio.Event()
|
self._stop = asyncio.Event()
|
||||||
self._state_path = self._resolve_state_path()
|
self._state_path = self._resolve_state_path()
|
||||||
|
self._ssl_ctx = ssl.create_default_context()
|
||||||
|
self._target_idx = 0
|
||||||
|
|
||||||
|
def _effective_chain(self) -> list[ChainHop]:
|
||||||
|
"""Return chain with first hop rotated across tor_nodes (if configured)."""
|
||||||
|
if not self._chain_nodes or not self._chain:
|
||||||
|
return self._chain
|
||||||
|
chain = list(self._chain)
|
||||||
|
chain[0] = self._chain_nodes[self._chain_idx % len(self._chain_nodes)]
|
||||||
|
self._chain_idx += 1
|
||||||
|
return chain
|
||||||
|
|
||||||
|
@property
|
||||||
|
def name(self) -> str:
|
||||||
|
"""Pool name."""
|
||||||
|
return self._name
|
||||||
|
|
||||||
# -- public interface ----------------------------------------------------
|
# -- public interface ----------------------------------------------------
|
||||||
|
|
||||||
async def start(self) -> None:
|
async def start(self) -> None:
|
||||||
"""Load state, fetch sources, start background loops.
|
"""Load state, fetch sources, start background loops.
|
||||||
|
|
||||||
On warm start (state file has alive proxies), the pool begins
|
Always defers health testing to background so the server starts
|
||||||
serving immediately using cached state and defers all health
|
listening immediately. On warm start, cached alive proxies are
|
||||||
testing to background tasks. On cold start, a full health
|
available right away. On cold start, proxies become available
|
||||||
test runs before returning so the caller has live proxies.
|
as the background test progresses.
|
||||||
"""
|
"""
|
||||||
self._load_state()
|
self._load_state()
|
||||||
warm = bool(self._alive_keys)
|
|
||||||
await self._fetch_all_sources()
|
await self._fetch_all_sources()
|
||||||
|
self._save_state()
|
||||||
if warm:
|
self._tasks.append(asyncio.create_task(self._deferred_full_test()))
|
||||||
# trust persisted alive state, verify in background
|
|
||||||
self._save_state()
|
|
||||||
self._tasks.append(asyncio.create_task(self._deferred_full_test()))
|
|
||||||
else:
|
|
||||||
# cold start: test everything before serving
|
|
||||||
await self._run_health_tests()
|
|
||||||
self._save_state()
|
|
||||||
|
|
||||||
self._tasks.append(asyncio.create_task(self._refresh_loop()))
|
self._tasks.append(asyncio.create_task(self._refresh_loop()))
|
||||||
self._tasks.append(asyncio.create_task(self._health_loop()))
|
self._tasks.append(asyncio.create_task(self._health_loop()))
|
||||||
@@ -92,7 +107,7 @@ class ProxyPool:
|
|||||||
async def reload(self, cfg: ProxyPoolConfig) -> None:
|
async def reload(self, cfg: ProxyPoolConfig) -> None:
|
||||||
"""Update pool config and trigger source re-fetch."""
|
"""Update pool config and trigger source re-fetch."""
|
||||||
self._cfg = cfg
|
self._cfg = cfg
|
||||||
logger.info("pool: config reloaded, re-fetching sources")
|
logger.info("%s: config reloaded, re-fetching sources", self._log_prefix)
|
||||||
await self._fetch_all_sources()
|
await self._fetch_all_sources()
|
||||||
self._save_state()
|
self._save_state()
|
||||||
|
|
||||||
@@ -171,10 +186,11 @@ class ProxyPool:
|
|||||||
src = self._cfg.sources[i]
|
src = self._cfg.sources[i]
|
||||||
label = src.url or src.file or "?"
|
label = src.url or src.file or "?"
|
||||||
if isinstance(result, Exception):
|
if isinstance(result, Exception):
|
||||||
logger.warning("pool: source %s failed: %s", label, result)
|
err = str(result) or type(result).__name__
|
||||||
|
logger.warning("%s: source %s failed: %s", self._log_prefix, label, err)
|
||||||
else:
|
else:
|
||||||
kind = "fetched" if src.url else "loaded"
|
kind = "fetched" if src.url else "loaded"
|
||||||
logger.info("pool: %s %d proxies from %s", kind, len(result), label)
|
logger.info("%s: %s %d proxies from %s", self._log_prefix, kind, len(result), label)
|
||||||
proxies.extend(result)
|
proxies.extend(result)
|
||||||
self._merge(proxies)
|
self._merge(proxies)
|
||||||
|
|
||||||
@@ -187,6 +203,8 @@ class ProxyPool:
|
|||||||
params["proto"] = src.proto
|
params["proto"] = src.proto
|
||||||
if src.country:
|
if src.country:
|
||||||
params["country"] = src.country
|
params["country"] = src.country
|
||||||
|
if src.mitm is not None:
|
||||||
|
params["mitm"] = "1" if src.mitm else "0"
|
||||||
|
|
||||||
url = src.url
|
url = src.url
|
||||||
if params:
|
if params:
|
||||||
@@ -200,7 +218,7 @@ class ProxyPool:
|
|||||||
"""Parse a text file with one proxy URL per line (runs in executor)."""
|
"""Parse a text file with one proxy URL per line (runs in executor)."""
|
||||||
path = Path(src.file).expanduser()
|
path = Path(src.file).expanduser()
|
||||||
if not path.is_file():
|
if not path.is_file():
|
||||||
logger.warning("pool: file not found: %s", path)
|
logger.warning("%s: file not found: %s", self._log_prefix, path)
|
||||||
return []
|
return []
|
||||||
|
|
||||||
proxies: list[ChainHop] = []
|
proxies: list[ChainHop] = []
|
||||||
@@ -211,7 +229,7 @@ class ProxyPool:
|
|||||||
try:
|
try:
|
||||||
hop = parse_proxy_url(line)
|
hop = parse_proxy_url(line)
|
||||||
except ValueError as e:
|
except ValueError as e:
|
||||||
logger.debug("pool: skipping invalid line %r: %s", line, e)
|
logger.debug("%s: skipping invalid line %r: %s", self._log_prefix, line, e)
|
||||||
continue
|
continue
|
||||||
if src.proto and hop.proto != src.proto:
|
if src.proto and hop.proto != src.proto:
|
||||||
continue
|
continue
|
||||||
@@ -223,6 +241,8 @@ class ProxyPool:
|
|||||||
now = time.time()
|
now = time.time()
|
||||||
seen: set[str] = set()
|
seen: set[str] = set()
|
||||||
for hop in proxies:
|
for hop in proxies:
|
||||||
|
if self._cfg.allowed_protos and hop.proto not in self._cfg.allowed_protos:
|
||||||
|
continue
|
||||||
key = f"{hop.proto}://{hop.host}:{hop.port}"
|
key = f"{hop.proto}://{hop.host}:{hop.port}"
|
||||||
seen.add(key)
|
seen.add(key)
|
||||||
if key in self._proxies:
|
if key in self._proxies:
|
||||||
@@ -233,48 +253,52 @@ class ProxyPool:
|
|||||||
|
|
||||||
# -- health testing ------------------------------------------------------
|
# -- health testing ------------------------------------------------------
|
||||||
|
|
||||||
async def _http_check(self, chain: list[ChainHop]) -> bool:
|
async def _tls_check(self, chain: list[ChainHop]) -> bool:
|
||||||
"""Send an HTTP GET through *chain* and return True on 2xx."""
|
"""Perform a TLS handshake through *chain* and return True on success."""
|
||||||
parsed = urlparse(self._cfg.test_url)
|
targets = self._cfg.test_targets
|
||||||
host = parsed.hostname or "httpbin.org"
|
if not targets:
|
||||||
port = parsed.port or 80
|
return False
|
||||||
path = parsed.path or "/"
|
|
||||||
|
host = targets[self._target_idx % len(targets)]
|
||||||
|
self._target_idx += 1
|
||||||
|
|
||||||
try:
|
try:
|
||||||
reader, writer = await build_chain(
|
reader, writer = await build_chain(
|
||||||
chain, host, port, timeout=self._cfg.test_timeout,
|
chain, host, 443, timeout=self._cfg.test_timeout,
|
||||||
)
|
)
|
||||||
except (ProtoError, TimeoutError, ConnectionError, OSError, EOFError):
|
except (ProtoError, TimeoutError, ConnectionError, OSError, EOFError):
|
||||||
return False
|
return False
|
||||||
|
|
||||||
try:
|
try:
|
||||||
request = f"GET {path} HTTP/1.1\r\nHost: {host}\r\nConnection: close\r\n\r\n"
|
transport = writer.transport
|
||||||
writer.write(request.encode())
|
protocol = transport.get_protocol()
|
||||||
await writer.drain()
|
loop = asyncio.get_running_loop()
|
||||||
|
new_transport = await asyncio.wait_for(
|
||||||
line = await asyncio.wait_for(reader.readline(), timeout=self._cfg.test_timeout)
|
loop.start_tls(transport, protocol, self._ssl_ctx, server_hostname=host),
|
||||||
parts = line.decode("utf-8", errors="replace").split(None, 2)
|
timeout=self._cfg.test_timeout,
|
||||||
return len(parts) >= 2 and parts[1].startswith("2")
|
)
|
||||||
except (TimeoutError, ConnectionError, OSError, EOFError):
|
new_transport.close()
|
||||||
|
return True
|
||||||
|
except (ssl.SSLError, TimeoutError, ConnectionError, OSError, EOFError):
|
||||||
return False
|
return False
|
||||||
finally:
|
finally:
|
||||||
try:
|
try:
|
||||||
writer.close()
|
if not writer.is_closing():
|
||||||
await writer.wait_closed()
|
writer.close()
|
||||||
except OSError:
|
except OSError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
async def _test_proxy(self, key: str, entry: ProxyEntry) -> bool:
|
async def _test_proxy(self, key: str, entry: ProxyEntry) -> bool:
|
||||||
"""Test a single proxy by building the full chain and sending HTTP GET."""
|
"""Test a single proxy via TLS handshake through the full chain."""
|
||||||
entry.last_test = time.time()
|
entry.last_test = time.time()
|
||||||
entry.tests += 1
|
entry.tests += 1
|
||||||
return await self._http_check(self._chain + [entry.hop])
|
return await self._tls_check(self._effective_chain() + [entry.hop])
|
||||||
|
|
||||||
async def _test_chain(self) -> bool:
|
async def _test_chain(self) -> bool:
|
||||||
"""Test the static chain without any pool proxy."""
|
"""Test the static chain without any pool proxy."""
|
||||||
if not self._chain:
|
if not self._chain:
|
||||||
return True
|
return True
|
||||||
return await self._http_check(self._chain)
|
return await self._tls_check(self._effective_chain())
|
||||||
|
|
||||||
async def _run_health_tests(self, keys: list[str] | None = None) -> None:
|
async def _run_health_tests(self, keys: list[str] | None = None) -> None:
|
||||||
"""Test proxies with bounded concurrency.
|
"""Test proxies with bounded concurrency.
|
||||||
@@ -289,7 +313,10 @@ class ProxyPool:
|
|||||||
if self._chain:
|
if self._chain:
|
||||||
chain_ok = await self._test_chain()
|
chain_ok = await self._test_chain()
|
||||||
if not chain_ok:
|
if not chain_ok:
|
||||||
logger.warning("pool: static chain unreachable, skipping proxy tests")
|
logger.warning(
|
||||||
|
"%s: static chain unreachable, skipping proxy tests",
|
||||||
|
self._log_prefix,
|
||||||
|
)
|
||||||
return
|
return
|
||||||
|
|
||||||
target = (
|
target = (
|
||||||
@@ -300,15 +327,28 @@ class ProxyPool:
|
|||||||
if not target:
|
if not target:
|
||||||
return
|
return
|
||||||
|
|
||||||
sem = asyncio.Semaphore(self._cfg.test_concurrency)
|
effective = max(3, min(len(target) // 10, self._cfg.test_concurrency))
|
||||||
|
sem = asyncio.Semaphore(effective)
|
||||||
|
logger.debug(
|
||||||
|
"%s: testing %d proxies (concurrency=%d)",
|
||||||
|
self._log_prefix, len(target), effective,
|
||||||
|
)
|
||||||
results: dict[str, bool] = {}
|
results: dict[str, bool] = {}
|
||||||
|
|
||||||
async def _test(key: str, entry: ProxyEntry) -> None:
|
async def _test(key: str, entry: ProxyEntry) -> None:
|
||||||
async with sem:
|
async with sem:
|
||||||
try:
|
try:
|
||||||
results[key] = await self._test_proxy(key, entry)
|
ok = await self._test_proxy(key, entry)
|
||||||
except Exception:
|
except Exception:
|
||||||
results[key] = False
|
ok = False
|
||||||
|
results[key] = ok
|
||||||
|
# mark passing proxies alive immediately so they're
|
||||||
|
# available before the full batch completes
|
||||||
|
if ok:
|
||||||
|
entry.alive = True
|
||||||
|
entry.fails = 0
|
||||||
|
entry.last_ok = time.time()
|
||||||
|
self._alive_keys.append(key)
|
||||||
|
|
||||||
tasks = [_test(k, e) for k, e in target]
|
tasks = [_test(k, e) for k, e in target]
|
||||||
await asyncio.gather(*tasks)
|
await asyncio.gather(*tasks)
|
||||||
@@ -321,8 +361,8 @@ class ProxyPool:
|
|||||||
skip_eviction = fail_rate > 0.90 and total > 10
|
skip_eviction = fail_rate > 0.90 and total > 10
|
||||||
if skip_eviction:
|
if skip_eviction:
|
||||||
logger.warning(
|
logger.warning(
|
||||||
"pool: %d/%d tests failed (%.0f%%), skipping eviction",
|
"%s: %d/%d tests failed (%.0f%%), skipping eviction",
|
||||||
total - passed, total, fail_rate * 100,
|
self._log_prefix, total - passed, total, fail_rate * 100,
|
||||||
)
|
)
|
||||||
|
|
||||||
evict_keys: list[str] = []
|
evict_keys: list[str] = []
|
||||||
@@ -361,7 +401,8 @@ class ProxyPool:
|
|||||||
parts.append(f"stale {len(stale_keys)}")
|
parts.append(f"stale {len(stale_keys)}")
|
||||||
suffix = f" ({', '.join(parts)})" if parts else ""
|
suffix = f" ({', '.join(parts)})" if parts else ""
|
||||||
logger.info(
|
logger.info(
|
||||||
"pool: %d proxies, %d alive%s",
|
"%s: %d proxies, %d alive%s",
|
||||||
|
self._log_prefix,
|
||||||
len(self._proxies),
|
len(self._proxies),
|
||||||
len(self._alive_keys),
|
len(self._alive_keys),
|
||||||
suffix,
|
suffix,
|
||||||
@@ -385,9 +426,12 @@ class ProxyPool:
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
await http_post_json(self._cfg.report_url, {"dead": dead})
|
await http_post_json(self._cfg.report_url, {"dead": dead})
|
||||||
logger.info("pool: reported %d dead proxies to %s", len(dead), self._cfg.report_url)
|
logger.info(
|
||||||
|
"%s: reported %d dead proxies to %s",
|
||||||
|
self._log_prefix, len(dead), self._cfg.report_url,
|
||||||
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.debug("pool: report failed: %s", e)
|
logger.debug("%s: report failed: %s", self._log_prefix, e)
|
||||||
|
|
||||||
def _rebuild_alive(self) -> None:
|
def _rebuild_alive(self) -> None:
|
||||||
"""Rebuild the alive keys list from current state."""
|
"""Rebuild the alive keys list from current state."""
|
||||||
@@ -427,11 +471,12 @@ class ProxyPool:
|
|||||||
# -- persistence ---------------------------------------------------------
|
# -- persistence ---------------------------------------------------------
|
||||||
|
|
||||||
def _resolve_state_path(self) -> Path:
|
def _resolve_state_path(self) -> Path:
|
||||||
"""Resolve state file path, defaulting to ~/.cache/s5p/pool.json."""
|
"""Resolve state file path, defaulting to ~/.cache/s5p/pool[-name].json."""
|
||||||
if self._cfg.state_file:
|
if self._cfg.state_file:
|
||||||
return Path(self._cfg.state_file).expanduser()
|
return Path(self._cfg.state_file).expanduser()
|
||||||
cache_dir = Path.home() / ".cache" / "s5p"
|
cache_dir = Path.home() / ".cache" / "s5p"
|
||||||
return cache_dir / "pool.json"
|
filename = "pool.json" if self._name == "default" else f"pool-{self._name}.json"
|
||||||
|
return cache_dir / filename
|
||||||
|
|
||||||
def _load_state(self) -> None:
|
def _load_state(self) -> None:
|
||||||
"""Load proxy state from JSON file (warm start)."""
|
"""Load proxy state from JSON file (warm start)."""
|
||||||
@@ -440,7 +485,7 @@ class ProxyPool:
|
|||||||
try:
|
try:
|
||||||
data = json.loads(self._state_path.read_text())
|
data = json.loads(self._state_path.read_text())
|
||||||
if data.get("version") != STATE_VERSION:
|
if data.get("version") != STATE_VERSION:
|
||||||
logger.warning("pool: state file version mismatch, starting fresh")
|
logger.warning("%s: state file version mismatch, starting fresh", self._log_prefix)
|
||||||
return
|
return
|
||||||
for key, entry in data.get("proxies", {}).items():
|
for key, entry in data.get("proxies", {}).items():
|
||||||
hop = ChainHop(
|
hop = ChainHop(
|
||||||
@@ -461,11 +506,11 @@ class ProxyPool:
|
|||||||
)
|
)
|
||||||
self._rebuild_alive()
|
self._rebuild_alive()
|
||||||
logger.info(
|
logger.info(
|
||||||
"pool: loaded state (%d proxies, %d alive)",
|
"%s: loaded state (%d proxies, %d alive)",
|
||||||
len(self._proxies), len(self._alive_keys),
|
self._log_prefix, len(self._proxies), len(self._alive_keys),
|
||||||
)
|
)
|
||||||
except (json.JSONDecodeError, KeyError, TypeError, ValueError) as e:
|
except (json.JSONDecodeError, KeyError, TypeError, ValueError) as e:
|
||||||
logger.warning("pool: corrupt state file: %s", e)
|
logger.warning("%s: corrupt state file: %s", self._log_prefix, e)
|
||||||
self._proxies.clear()
|
self._proxies.clear()
|
||||||
self._alive_keys.clear()
|
self._alive_keys.clear()
|
||||||
|
|
||||||
@@ -497,4 +542,4 @@ class ProxyPool:
|
|||||||
tmp.write_text(json.dumps(data, indent=2))
|
tmp.write_text(json.dumps(data, indent=2))
|
||||||
os.replace(tmp, self._state_path)
|
os.replace(tmp, self._state_path)
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
logger.warning("pool: failed to save state: %s", e)
|
logger.warning("%s: failed to save state: %s", self._log_prefix, e)
|
||||||
|
|||||||
@@ -3,13 +3,15 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
import asyncio
|
import asyncio
|
||||||
|
import ipaddress
|
||||||
import logging
|
import logging
|
||||||
|
import random
|
||||||
import signal
|
import signal
|
||||||
import struct
|
import struct
|
||||||
import time
|
import time
|
||||||
|
|
||||||
from .api import start_api
|
from .api import start_api
|
||||||
from .config import Config, load_config
|
from .config import ChainHop, Config, ListenerConfig, load_config
|
||||||
from .connpool import FirstHopPool
|
from .connpool import FirstHopPool
|
||||||
from .metrics import Metrics
|
from .metrics import Metrics
|
||||||
from .pool import ProxyPool
|
from .pool import ProxyPool
|
||||||
@@ -21,6 +23,19 @@ logger = logging.getLogger("s5p")
|
|||||||
BUFFER_SIZE = 65536
|
BUFFER_SIZE = 65536
|
||||||
|
|
||||||
|
|
||||||
|
class _RoundRobin:
|
||||||
|
"""Simple round-robin selector (single-threaded asyncio, no lock)."""
|
||||||
|
|
||||||
|
def __init__(self, items: list[ChainHop]) -> None:
|
||||||
|
self._items = items
|
||||||
|
self._idx = 0
|
||||||
|
|
||||||
|
def next(self) -> ChainHop:
|
||||||
|
item = self._items[self._idx % len(self._items)]
|
||||||
|
self._idx += 1
|
||||||
|
return item
|
||||||
|
|
||||||
|
|
||||||
# -- relay -------------------------------------------------------------------
|
# -- relay -------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
@@ -57,13 +72,48 @@ def _socks5_reply(rep: int) -> bytes:
|
|||||||
return struct.pack("!BBB", 0x05, rep, 0x00) + b"\x01\x00\x00\x00\x00\x00\x00"
|
return struct.pack("!BBB", 0x05, rep, 0x00) + b"\x01\x00\x00\x00\x00\x00\x00"
|
||||||
|
|
||||||
|
|
||||||
|
def _bypass_match(rules: list[str], host: str) -> bool:
|
||||||
|
"""Check if host matches any bypass rule (CIDR, suffix, or exact)."""
|
||||||
|
addr = None
|
||||||
|
try:
|
||||||
|
addr = ipaddress.ip_address(host)
|
||||||
|
except ValueError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
for rule in rules:
|
||||||
|
if "/" in rule:
|
||||||
|
if addr is not None:
|
||||||
|
try:
|
||||||
|
if addr in ipaddress.ip_network(rule, strict=False):
|
||||||
|
return True
|
||||||
|
except ValueError:
|
||||||
|
pass
|
||||||
|
elif rule.startswith("."):
|
||||||
|
if addr is None and (host.endswith(rule) or host == rule[1:]):
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
if addr is not None:
|
||||||
|
try:
|
||||||
|
if addr == ipaddress.ip_address(rule):
|
||||||
|
return True
|
||||||
|
except ValueError:
|
||||||
|
pass
|
||||||
|
if host == rule:
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
async def _handle_client(
|
async def _handle_client(
|
||||||
client_reader: asyncio.StreamReader,
|
client_reader: asyncio.StreamReader,
|
||||||
client_writer: asyncio.StreamWriter,
|
client_writer: asyncio.StreamWriter,
|
||||||
config: Config,
|
listener: ListenerConfig,
|
||||||
proxy_pool: ProxyPool | None = None,
|
timeout: float,
|
||||||
|
retries: int,
|
||||||
|
pool_seq: list[list[ProxyPool]] | None = None,
|
||||||
metrics: Metrics | None = None,
|
metrics: Metrics | None = None,
|
||||||
first_hop_pool: FirstHopPool | None = None,
|
first_hop_pool: FirstHopPool | None = None,
|
||||||
|
tor_rr: _RoundRobin | None = None,
|
||||||
|
hop_pools: dict[tuple[str, int], FirstHopPool] | None = None,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Handle a single SOCKS5 client connection."""
|
"""Handle a single SOCKS5 client connection."""
|
||||||
peer = client_writer.get_extra_info("peername")
|
peer = client_writer.get_extra_info("peername")
|
||||||
@@ -71,6 +121,7 @@ async def _handle_client(
|
|||||||
|
|
||||||
if metrics:
|
if metrics:
|
||||||
metrics.connections += 1
|
metrics.connections += 1
|
||||||
|
metrics.conn_rate.record()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# -- greeting --
|
# -- greeting --
|
||||||
@@ -80,13 +131,50 @@ async def _handle_client(
|
|||||||
return
|
return
|
||||||
|
|
||||||
methods = await client_reader.readexactly(header[1])
|
methods = await client_reader.readexactly(header[1])
|
||||||
if 0x00 not in methods:
|
|
||||||
client_writer.write(b"\x05\xff")
|
|
||||||
await client_writer.drain()
|
|
||||||
return
|
|
||||||
|
|
||||||
client_writer.write(b"\x05\x00")
|
if listener.auth:
|
||||||
await client_writer.drain()
|
# require username/password auth (RFC 1929)
|
||||||
|
if 0x02 not in methods:
|
||||||
|
client_writer.write(b"\x05\xff")
|
||||||
|
await client_writer.drain()
|
||||||
|
return
|
||||||
|
|
||||||
|
client_writer.write(b"\x05\x02")
|
||||||
|
await client_writer.drain()
|
||||||
|
|
||||||
|
# subnegotiation: [ver, ulen, uname..., plen, passwd...]
|
||||||
|
ver = (await asyncio.wait_for(
|
||||||
|
client_reader.readexactly(1), timeout=10.0,
|
||||||
|
))[0]
|
||||||
|
if ver != 0x01:
|
||||||
|
client_writer.write(b"\x01\x01")
|
||||||
|
await client_writer.drain()
|
||||||
|
return
|
||||||
|
|
||||||
|
ulen = (await client_reader.readexactly(1))[0]
|
||||||
|
uname = (await client_reader.readexactly(ulen)).decode("utf-8", errors="replace")
|
||||||
|
plen = (await client_reader.readexactly(1))[0]
|
||||||
|
passwd = (await client_reader.readexactly(plen)).decode("utf-8", errors="replace")
|
||||||
|
|
||||||
|
if listener.auth.get(uname) != passwd:
|
||||||
|
logger.warning("[%s] auth failed for user %r", tag, uname)
|
||||||
|
if metrics:
|
||||||
|
metrics.auth_failures += 1
|
||||||
|
client_writer.write(b"\x01\x01")
|
||||||
|
await client_writer.drain()
|
||||||
|
return
|
||||||
|
|
||||||
|
client_writer.write(b"\x01\x00")
|
||||||
|
await client_writer.drain()
|
||||||
|
else:
|
||||||
|
# no auth required
|
||||||
|
if 0x00 not in methods:
|
||||||
|
client_writer.write(b"\x05\xff")
|
||||||
|
await client_writer.drain()
|
||||||
|
return
|
||||||
|
|
||||||
|
client_writer.write(b"\x05\x00")
|
||||||
|
await client_writer.drain()
|
||||||
|
|
||||||
# -- connect request --
|
# -- connect request --
|
||||||
req = await asyncio.wait_for(client_reader.readexactly(3), timeout=10.0)
|
req = await asyncio.wait_for(client_reader.readexactly(3), timeout=10.0)
|
||||||
@@ -100,32 +188,64 @@ async def _handle_client(
|
|||||||
target_host, target_port = await read_socks5_address(client_reader)
|
target_host, target_port = await read_socks5_address(client_reader)
|
||||||
logger.info("[%s] connect %s:%d", tag, target_host, target_port)
|
logger.info("[%s] connect %s:%d", tag, target_host, target_port)
|
||||||
|
|
||||||
|
# -- bypass / onion check --
|
||||||
|
bypass = bool(listener.bypass and _bypass_match(listener.bypass, target_host))
|
||||||
|
onion = target_host.endswith(".onion")
|
||||||
|
skip_pool = bypass or onion
|
||||||
|
if bypass:
|
||||||
|
logger.debug("[%s] bypass %s:%d", tag, target_host, target_port)
|
||||||
|
elif onion:
|
||||||
|
logger.debug("[%s] onion %s:%d (chain only)", tag, target_host, target_port)
|
||||||
|
|
||||||
# -- build chain (with retry) --
|
# -- build chain (with retry) --
|
||||||
attempts = config.retries if proxy_pool else 1
|
attempts = retries if pool_seq and not skip_pool else 1
|
||||||
last_err: Exception | None = None
|
last_err: Exception | None = None
|
||||||
|
|
||||||
for attempt in range(attempts):
|
for attempt in range(attempts):
|
||||||
effective_chain = list(config.chain)
|
if bypass:
|
||||||
pool_hop = None
|
effective_chain: list[ChainHop] = []
|
||||||
if proxy_pool:
|
fhp = None
|
||||||
pool_hop = await proxy_pool.get()
|
else:
|
||||||
if pool_hop:
|
effective_chain = list(listener.chain)
|
||||||
effective_chain.append(pool_hop)
|
fhp = first_hop_pool
|
||||||
logger.debug("[%s] +proxy %s", tag, pool_hop)
|
# tor_nodes round-robin overrides the listener's first hop
|
||||||
|
if tor_rr and effective_chain:
|
||||||
|
node = tor_rr.next()
|
||||||
|
effective_chain[0] = node
|
||||||
|
if hop_pools:
|
||||||
|
fhp = hop_pools.get((node.host, node.port))
|
||||||
|
|
||||||
|
pool_hops: list[tuple[ChainHop, ProxyPool]] = []
|
||||||
|
if pool_seq and not skip_pool:
|
||||||
|
for candidates in pool_seq:
|
||||||
|
weights = [max(pp.alive_count, 1) for pp in candidates]
|
||||||
|
pp = random.choices(candidates, weights=weights)[0]
|
||||||
|
hop = await pp.get()
|
||||||
|
if hop:
|
||||||
|
pool_hops.append((hop, pp))
|
||||||
|
effective_chain.append(hop)
|
||||||
|
if pool_hops:
|
||||||
|
logger.debug("[%s] +pool %s", tag, " ".join(str(h) for h, _ in pool_hops))
|
||||||
|
|
||||||
try:
|
try:
|
||||||
t0 = time.monotonic()
|
t0 = time.monotonic()
|
||||||
remote_reader, remote_writer = await build_chain(
|
remote_reader, remote_writer = await build_chain(
|
||||||
effective_chain, target_host, target_port,
|
effective_chain, target_host, target_port,
|
||||||
timeout=config.timeout, first_hop_pool=first_hop_pool,
|
timeout=timeout, first_hop_pool=fhp,
|
||||||
)
|
)
|
||||||
dt = time.monotonic() - t0
|
dt = time.monotonic() - t0
|
||||||
logger.debug("[%s] chain up in %.0fms", tag, dt * 1000)
|
logger.debug("[%s] chain up in %.0fms", tag, dt * 1000)
|
||||||
|
if metrics:
|
||||||
|
metrics.latency.record(dt)
|
||||||
|
metrics.get_listener_latency(
|
||||||
|
f"{listener.listen_host}:{listener.listen_port}"
|
||||||
|
).record(dt)
|
||||||
break
|
break
|
||||||
except (ProtoError, TimeoutError, ConnectionError, OSError) as e:
|
except (ProtoError, TimeoutError, ConnectionError, OSError) as e:
|
||||||
last_err = e
|
last_err = e
|
||||||
if pool_hop and proxy_pool:
|
if pool_hops:
|
||||||
proxy_pool.report_failure(pool_hop)
|
for hop, pp in pool_hops:
|
||||||
|
pp.report_failure(hop)
|
||||||
if metrics:
|
if metrics:
|
||||||
metrics.retries += 1
|
metrics.retries += 1
|
||||||
if attempt + 1 < attempts:
|
if attempt + 1 < attempts:
|
||||||
@@ -199,7 +319,7 @@ async def _handle_client(
|
|||||||
async def _metrics_logger(
|
async def _metrics_logger(
|
||||||
metrics: Metrics,
|
metrics: Metrics,
|
||||||
stop: asyncio.Event,
|
stop: asyncio.Event,
|
||||||
pool: ProxyPool | None = None,
|
pools: dict[str, ProxyPool] | None = None,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Log metrics summary every 60 seconds."""
|
"""Log metrics summary every 60 seconds."""
|
||||||
while not stop.is_set():
|
while not stop.is_set():
|
||||||
@@ -209,8 +329,13 @@ async def _metrics_logger(
|
|||||||
pass
|
pass
|
||||||
if not stop.is_set():
|
if not stop.is_set():
|
||||||
line = metrics.summary()
|
line = metrics.summary()
|
||||||
if pool:
|
if pools:
|
||||||
line += f" pool={pool.alive_count}/{pool.count}"
|
if len(pools) == 1:
|
||||||
|
p = next(iter(pools.values()))
|
||||||
|
line += f" pool={p.alive_count}/{p.count}"
|
||||||
|
else:
|
||||||
|
for name, p in pools.items():
|
||||||
|
line += f" pool[{name}]={p.alive_count}/{p.count}"
|
||||||
logger.info("metrics: %s", line)
|
logger.info("metrics: %s", line)
|
||||||
|
|
||||||
|
|
||||||
@@ -224,19 +349,85 @@ async def serve(config: Config) -> None:
|
|||||||
loop.add_signal_handler(sig, lambda s=sig: stop.set_result(s))
|
loop.add_signal_handler(sig, lambda s=sig: stop.set_result(s))
|
||||||
|
|
||||||
metrics = Metrics()
|
metrics = Metrics()
|
||||||
|
listeners = config.listeners
|
||||||
|
|
||||||
proxy_pool: ProxyPool | None = None
|
# -- tor_nodes round-robin -----------------------------------------------
|
||||||
if config.proxy_pool and config.proxy_pool.sources:
|
tor_rr: _RoundRobin | None = None
|
||||||
proxy_pool = ProxyPool(config.proxy_pool, config.chain, config.timeout)
|
if config.tor_nodes:
|
||||||
await proxy_pool.start()
|
tor_rr = _RoundRobin(config.tor_nodes)
|
||||||
|
nodes = ", ".join(str(n) for n in config.tor_nodes)
|
||||||
|
logger.info("tor_nodes: %s (round-robin)", nodes)
|
||||||
|
|
||||||
hop_pool: FirstHopPool | None = None
|
# -- named proxy pools ---------------------------------------------------
|
||||||
if config.pool_size > 0 and config.chain:
|
proxy_pools: dict[str, ProxyPool] = {}
|
||||||
hop_pool = FirstHopPool(
|
base_chain = listeners[0].chain if listeners else config.chain
|
||||||
config.chain[0], size=config.pool_size, max_idle=config.pool_max_idle,
|
for pool_name, pool_cfg in config.proxy_pools.items():
|
||||||
|
if not pool_cfg.sources:
|
||||||
|
continue
|
||||||
|
pool = ProxyPool(
|
||||||
|
pool_cfg, base_chain, config.timeout,
|
||||||
|
chain_nodes=config.tor_nodes or None,
|
||||||
|
name=pool_name,
|
||||||
)
|
)
|
||||||
await hop_pool.start()
|
await pool.start()
|
||||||
|
proxy_pools[pool_name] = pool
|
||||||
|
|
||||||
|
# backward compat: single proxy_pool -> "default"
|
||||||
|
if not proxy_pools and config.proxy_pool and config.proxy_pool.sources:
|
||||||
|
pool = ProxyPool(
|
||||||
|
config.proxy_pool, base_chain, config.timeout,
|
||||||
|
chain_nodes=config.tor_nodes or None,
|
||||||
|
)
|
||||||
|
await pool.start()
|
||||||
|
proxy_pools["default"] = pool
|
||||||
|
|
||||||
|
def _pools_for(lc: ListenerConfig) -> list[list[ProxyPool]]:
|
||||||
|
"""Resolve the ordered list of candidate proxy pools for a listener."""
|
||||||
|
result: list[list[ProxyPool]] = []
|
||||||
|
for candidates in lc.pool_seq:
|
||||||
|
resolved: list[ProxyPool] = []
|
||||||
|
for name in candidates:
|
||||||
|
if name not in proxy_pools:
|
||||||
|
raise RuntimeError(
|
||||||
|
f"listener {lc.listen_host}:{lc.listen_port} "
|
||||||
|
f"references unknown pool {name!r}"
|
||||||
|
)
|
||||||
|
resolved.append(proxy_pools[name])
|
||||||
|
result.append(resolved)
|
||||||
|
return result
|
||||||
|
|
||||||
|
# -- per-unique first-hop connection pools --------------------------------
|
||||||
|
hop_pools: dict[tuple[str, int], FirstHopPool] = {}
|
||||||
|
if config.pool_size > 0:
|
||||||
|
for lc in listeners:
|
||||||
|
if not lc.chain:
|
||||||
|
continue
|
||||||
|
first = lc.chain[0]
|
||||||
|
key = (first.host, first.port)
|
||||||
|
if key not in hop_pools:
|
||||||
|
hp = FirstHopPool(
|
||||||
|
first, size=config.pool_size, max_idle=config.pool_max_idle,
|
||||||
|
)
|
||||||
|
await hp.start()
|
||||||
|
hop_pools[key] = hp
|
||||||
|
# create pools for all tor_nodes
|
||||||
|
if config.tor_nodes:
|
||||||
|
for node in config.tor_nodes:
|
||||||
|
key = (node.host, node.port)
|
||||||
|
if key not in hop_pools:
|
||||||
|
hp = FirstHopPool(
|
||||||
|
node, size=config.pool_size,
|
||||||
|
max_idle=config.pool_max_idle,
|
||||||
|
)
|
||||||
|
await hp.start()
|
||||||
|
hop_pools[key] = hp
|
||||||
|
|
||||||
|
def _hop_pool_for(lc: ListenerConfig) -> FirstHopPool | None:
|
||||||
|
if not lc.chain:
|
||||||
|
return None
|
||||||
|
return hop_pools.get((lc.chain[0].host, lc.chain[0].port))
|
||||||
|
|
||||||
|
# -- tor controller ------------------------------------------------------
|
||||||
tor: TorController | None = None
|
tor: TorController | None = None
|
||||||
if config.tor:
|
if config.tor:
|
||||||
tc = config.tor
|
tc = config.tor
|
||||||
@@ -255,32 +446,65 @@ async def serve(config: Config) -> None:
|
|||||||
|
|
||||||
sem = asyncio.Semaphore(config.max_connections)
|
sem = asyncio.Semaphore(config.max_connections)
|
||||||
|
|
||||||
async def on_client(r: asyncio.StreamReader, w: asyncio.StreamWriter) -> None:
|
# -- start one server per listener ---------------------------------------
|
||||||
async with sem:
|
servers: list[asyncio.Server] = []
|
||||||
await _handle_client(r, w, config, proxy_pool, metrics, hop_pool)
|
for lc in listeners:
|
||||||
|
hp = _hop_pool_for(lc)
|
||||||
|
lc_pools = _pools_for(lc)
|
||||||
|
|
||||||
srv = await asyncio.start_server(on_client, config.listen_host, config.listen_port)
|
async def on_client(
|
||||||
addrs = ", ".join(str(s.getsockname()) for s in srv.sockets)
|
r: asyncio.StreamReader, w: asyncio.StreamWriter,
|
||||||
logger.info("listening on %s max_connections=%d", addrs, config.max_connections)
|
_lc: ListenerConfig = lc, _hp: FirstHopPool | None = hp,
|
||||||
|
_pools: list[list[ProxyPool]] = lc_pools,
|
||||||
|
) -> None:
|
||||||
|
async with sem:
|
||||||
|
await _handle_client(
|
||||||
|
r, w, _lc, config.timeout,
|
||||||
|
_lc.retries or config.retries,
|
||||||
|
_pools, metrics, _hp, tor_rr, hop_pools,
|
||||||
|
)
|
||||||
|
|
||||||
if config.chain:
|
srv = await asyncio.start_server(on_client, lc.listen_host, lc.listen_port)
|
||||||
for i, hop in enumerate(config.chain):
|
servers.append(srv)
|
||||||
logger.info(" chain[%d] %s", i, hop)
|
|
||||||
else:
|
|
||||||
logger.info(" mode: direct (no chain)")
|
|
||||||
|
|
||||||
if proxy_pool:
|
addr = f"{lc.listen_host}:{lc.listen_port}"
|
||||||
nsrc = len(config.proxy_pool.sources)
|
chain_desc = " -> ".join(str(h) for h in lc.chain) if lc.chain else "direct"
|
||||||
|
nhops = lc.pool_hops
|
||||||
|
pool_desc = ""
|
||||||
|
if nhops:
|
||||||
|
all_names = {n for cands in lc.pool_seq for n in cands}
|
||||||
|
hop_labels = ["|".join(cands) for cands in lc.pool_seq]
|
||||||
|
if len(all_names) == 1:
|
||||||
|
name = next(iter(all_names))
|
||||||
|
pool_desc = f" + {nhops} pool hop{'s' if nhops != 1 else ''}"
|
||||||
|
if name != "default":
|
||||||
|
pool_desc += f" [{name}]"
|
||||||
|
else:
|
||||||
|
pool_desc = f" + pool [{' -> '.join(hop_labels)}]"
|
||||||
|
bypass_desc = f" bypass: {len(lc.bypass)} rules" if lc.bypass else ""
|
||||||
|
auth_desc = f" auth: {len(lc.auth)} users" if lc.auth else ""
|
||||||
logger.info(
|
logger.info(
|
||||||
" pool: %d proxies, %d alive (from %d source%s)",
|
"listener %s chain: %s%s%s%s",
|
||||||
proxy_pool.count, proxy_pool.alive_count, nsrc, "s" if nsrc != 1 else "",
|
addr, chain_desc, pool_desc, bypass_desc, auth_desc,
|
||||||
)
|
)
|
||||||
logger.info(" retries: %d", config.retries)
|
|
||||||
|
logger.info("max_connections=%d", config.max_connections)
|
||||||
|
|
||||||
|
if proxy_pools:
|
||||||
|
for pname, pp in proxy_pools.items():
|
||||||
|
cfg = config.proxy_pools.get(pname, config.proxy_pool)
|
||||||
|
nsrc = len(cfg.sources) if cfg else 0
|
||||||
|
prefix = f"pool[{pname}]" if pname != "default" else "pool"
|
||||||
|
logger.info(
|
||||||
|
"%s: %d proxies, %d alive (from %d source%s)",
|
||||||
|
prefix, pp.count, pp.alive_count, nsrc, "s" if nsrc != 1 else "",
|
||||||
|
)
|
||||||
|
logger.info("retries: %d", config.retries)
|
||||||
|
|
||||||
if tor:
|
if tor:
|
||||||
extra = f", newnym every {tor.newnym_interval:.0f}s" if tor.newnym_interval else ""
|
extra = f", newnym every {tor.newnym_interval:.0f}s" if tor.newnym_interval else ""
|
||||||
logger.info(
|
logger.info(
|
||||||
" tor: control %s:%d%s", config.tor.control_host, config.tor.control_port, extra,
|
"tor: control %s:%d%s", config.tor.control_host, config.tor.control_port, extra,
|
||||||
)
|
)
|
||||||
|
|
||||||
# -- control API ---------------------------------------------------------
|
# -- control API ---------------------------------------------------------
|
||||||
@@ -289,8 +513,9 @@ async def serve(config: Config) -> None:
|
|||||||
api_ctx: dict = {
|
api_ctx: dict = {
|
||||||
"config": config,
|
"config": config,
|
||||||
"metrics": metrics,
|
"metrics": metrics,
|
||||||
"pool": proxy_pool,
|
"pools": proxy_pools,
|
||||||
"hop_pool": hop_pool,
|
"pool": next(iter(proxy_pools.values()), None), # backward compat
|
||||||
|
"hop_pools": hop_pools,
|
||||||
"tor": tor,
|
"tor": tor,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -315,9 +540,14 @@ async def serve(config: Config) -> None:
|
|||||||
for h in root.handlers:
|
for h in root.handlers:
|
||||||
h.setLevel(level)
|
h.setLevel(level)
|
||||||
logging.getLogger("s5p").setLevel(level)
|
logging.getLogger("s5p").setLevel(level)
|
||||||
if proxy_pool and new.proxy_pool:
|
# reload named pools (match by name)
|
||||||
await proxy_pool.reload(new.proxy_pool)
|
for pname, pp in proxy_pools.items():
|
||||||
logger.info("reload: config reloaded")
|
new_cfg = new.proxy_pools.get(pname)
|
||||||
|
if new_cfg:
|
||||||
|
await pp.reload(new_cfg)
|
||||||
|
elif new.proxy_pool and pname == "default":
|
||||||
|
await pp.reload(new.proxy_pool)
|
||||||
|
logger.info("reload: config reloaded (listeners require restart)")
|
||||||
|
|
||||||
def _on_sighup() -> None:
|
def _on_sighup() -> None:
|
||||||
asyncio.create_task(_reload())
|
asyncio.create_task(_reload())
|
||||||
@@ -329,24 +559,41 @@ async def serve(config: Config) -> None:
|
|||||||
api_srv = await start_api(config.api_host, config.api_port, api_ctx)
|
api_srv = await start_api(config.api_host, config.api_port, api_ctx)
|
||||||
|
|
||||||
metrics_stop = asyncio.Event()
|
metrics_stop = asyncio.Event()
|
||||||
pool_ref = proxy_pool
|
metrics_task = asyncio.create_task(_metrics_logger(metrics, metrics_stop, proxy_pools or None))
|
||||||
metrics_task = asyncio.create_task(_metrics_logger(metrics, metrics_stop, pool_ref))
|
|
||||||
|
|
||||||
async with srv:
|
# keep all servers open until stop signal
|
||||||
|
try:
|
||||||
|
for srv in servers:
|
||||||
|
await srv.start_serving()
|
||||||
sig = await stop
|
sig = await stop
|
||||||
|
finally:
|
||||||
logger.info("received %s, shutting down", signal.Signals(sig).name)
|
logger.info("received %s, shutting down", signal.Signals(sig).name)
|
||||||
|
for srv in servers:
|
||||||
|
srv.close()
|
||||||
|
for srv in servers:
|
||||||
|
try:
|
||||||
|
await asyncio.wait_for(srv.wait_closed(), timeout=5.0)
|
||||||
|
except TimeoutError:
|
||||||
|
pass
|
||||||
|
if metrics.active:
|
||||||
|
logger.info("shutdown: %d connections still active", metrics.active)
|
||||||
if api_srv:
|
if api_srv:
|
||||||
api_srv.close()
|
api_srv.close()
|
||||||
await api_srv.wait_closed()
|
await api_srv.wait_closed()
|
||||||
if tor:
|
if tor:
|
||||||
await tor.stop()
|
await tor.stop()
|
||||||
if hop_pool:
|
for hp in hop_pools.values():
|
||||||
await hop_pool.stop()
|
await hp.stop()
|
||||||
if proxy_pool:
|
for pp in proxy_pools.values():
|
||||||
await proxy_pool.stop()
|
await pp.stop()
|
||||||
shutdown_line = metrics.summary()
|
shutdown_line = metrics.summary()
|
||||||
if pool_ref:
|
if proxy_pools:
|
||||||
shutdown_line += f" pool={pool_ref.alive_count}/{pool_ref.count}"
|
if len(proxy_pools) == 1:
|
||||||
|
p = next(iter(proxy_pools.values()))
|
||||||
|
shutdown_line += f" pool={p.alive_count}/{p.count}"
|
||||||
|
else:
|
||||||
|
for pname, p in proxy_pools.items():
|
||||||
|
shutdown_line += f" pool[{pname}]={p.alive_count}/{p.count}"
|
||||||
logger.info("metrics: %s", shutdown_line)
|
logger.info("metrics: %s", shutdown_line)
|
||||||
metrics_stop.set()
|
metrics_stop.set()
|
||||||
await metrics_task
|
await metrics_task
|
||||||
|
|||||||
138
tests/conftest.py
Normal file
138
tests/conftest.py
Normal file
@@ -0,0 +1,138 @@
|
|||||||
|
"""Shared helpers for integration tests."""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
import socket
|
||||||
|
import struct
|
||||||
|
|
||||||
|
from s5p.proto import encode_address, read_socks5_address
|
||||||
|
|
||||||
|
|
||||||
|
def free_port() -> int:
|
||||||
|
"""Return an available TCP port."""
|
||||||
|
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
||||||
|
s.bind(("127.0.0.1", 0))
|
||||||
|
return s.getsockname()[1]
|
||||||
|
|
||||||
|
|
||||||
|
# -- echo server -------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
async def _echo_handler(
|
||||||
|
reader: asyncio.StreamReader, writer: asyncio.StreamWriter,
|
||||||
|
) -> None:
|
||||||
|
"""Echo back everything received, then close."""
|
||||||
|
try:
|
||||||
|
while True:
|
||||||
|
data = await reader.read(65536)
|
||||||
|
if not data:
|
||||||
|
break
|
||||||
|
writer.write(data)
|
||||||
|
await writer.drain()
|
||||||
|
except (ConnectionError, asyncio.CancelledError):
|
||||||
|
pass
|
||||||
|
finally:
|
||||||
|
writer.close()
|
||||||
|
await writer.wait_closed()
|
||||||
|
|
||||||
|
|
||||||
|
async def start_echo_server() -> tuple[str, int, asyncio.Server]:
|
||||||
|
"""Start a TCP echo server. Returns (host, port, server)."""
|
||||||
|
host = "127.0.0.1"
|
||||||
|
port = free_port()
|
||||||
|
srv = await asyncio.start_server(_echo_handler, host, port)
|
||||||
|
await srv.start_serving()
|
||||||
|
return host, port, srv
|
||||||
|
|
||||||
|
|
||||||
|
# -- mock SOCKS5 proxy -------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
async def _mock_socks5_handler(
|
||||||
|
reader: asyncio.StreamReader, writer: asyncio.StreamWriter,
|
||||||
|
) -> None:
|
||||||
|
"""Minimal SOCKS5 proxy: greeting, CONNECT, relay."""
|
||||||
|
remote_writer = None
|
||||||
|
try:
|
||||||
|
# greeting
|
||||||
|
header = await reader.readexactly(2)
|
||||||
|
if header[0] != 0x05:
|
||||||
|
return
|
||||||
|
await reader.readexactly(header[1]) # skip methods
|
||||||
|
writer.write(b"\x05\x00")
|
||||||
|
await writer.drain()
|
||||||
|
|
||||||
|
# connect request
|
||||||
|
req = await reader.readexactly(3)
|
||||||
|
if req[0] != 0x05 or req[1] != 0x01:
|
||||||
|
return
|
||||||
|
|
||||||
|
target_host, target_port = await read_socks5_address(reader)
|
||||||
|
|
||||||
|
# connect to actual target
|
||||||
|
try:
|
||||||
|
remote_reader, remote_writer = await asyncio.wait_for(
|
||||||
|
asyncio.open_connection(target_host, target_port),
|
||||||
|
timeout=5.0,
|
||||||
|
)
|
||||||
|
except (OSError, TimeoutError):
|
||||||
|
# connection refused reply
|
||||||
|
reply = struct.pack("!BBB", 0x05, 0x05, 0x00)
|
||||||
|
reply += b"\x01\x00\x00\x00\x00\x00\x00"
|
||||||
|
writer.write(reply)
|
||||||
|
await writer.drain()
|
||||||
|
return
|
||||||
|
|
||||||
|
# success reply
|
||||||
|
atyp, addr_bytes = encode_address(target_host)
|
||||||
|
reply = struct.pack("!BBB", 0x05, 0x00, 0x00)
|
||||||
|
reply += bytes([atyp]) + addr_bytes + struct.pack("!H", target_port)
|
||||||
|
writer.write(reply)
|
||||||
|
await writer.drain()
|
||||||
|
|
||||||
|
# relay both directions (close dst on EOF so peer sees shutdown)
|
||||||
|
async def _fwd(src: asyncio.StreamReader, dst: asyncio.StreamWriter) -> None:
|
||||||
|
try:
|
||||||
|
while True:
|
||||||
|
data = await src.read(65536)
|
||||||
|
if not data:
|
||||||
|
break
|
||||||
|
dst.write(data)
|
||||||
|
await dst.drain()
|
||||||
|
except (ConnectionError, asyncio.CancelledError):
|
||||||
|
pass
|
||||||
|
finally:
|
||||||
|
try:
|
||||||
|
dst.close()
|
||||||
|
await dst.wait_closed()
|
||||||
|
except OSError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
await asyncio.gather(
|
||||||
|
_fwd(reader, remote_writer),
|
||||||
|
_fwd(remote_reader, writer),
|
||||||
|
)
|
||||||
|
except (ConnectionError, asyncio.IncompleteReadError, asyncio.CancelledError):
|
||||||
|
pass
|
||||||
|
finally:
|
||||||
|
if remote_writer:
|
||||||
|
remote_writer.close()
|
||||||
|
try:
|
||||||
|
await remote_writer.wait_closed()
|
||||||
|
except OSError:
|
||||||
|
pass
|
||||||
|
writer.close()
|
||||||
|
try:
|
||||||
|
await writer.wait_closed()
|
||||||
|
except OSError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
async def start_mock_socks5() -> tuple[str, int, asyncio.Server]:
|
||||||
|
"""Start a mock SOCKS5 proxy. Returns (host, port, server)."""
|
||||||
|
host = "127.0.0.1"
|
||||||
|
port = free_port()
|
||||||
|
srv = await asyncio.start_server(_mock_socks5_handler, host, port)
|
||||||
|
await srv.start_serving()
|
||||||
|
return host, port, srv
|
||||||
@@ -13,9 +13,10 @@ from s5p.api import (
|
|||||||
_handle_tor_newnym,
|
_handle_tor_newnym,
|
||||||
_json_response,
|
_json_response,
|
||||||
_parse_request,
|
_parse_request,
|
||||||
|
_render_openmetrics,
|
||||||
_route,
|
_route,
|
||||||
)
|
)
|
||||||
from s5p.config import ChainHop, Config, PoolSourceConfig, ProxyPoolConfig
|
from s5p.config import ChainHop, Config, ListenerConfig, PoolSourceConfig, ProxyPoolConfig
|
||||||
from s5p.metrics import Metrics
|
from s5p.metrics import Metrics
|
||||||
|
|
||||||
# -- request parsing ---------------------------------------------------------
|
# -- request parsing ---------------------------------------------------------
|
||||||
@@ -82,6 +83,7 @@ class TestJsonResponse:
|
|||||||
def _make_ctx(
|
def _make_ctx(
|
||||||
config: Config | None = None,
|
config: Config | None = None,
|
||||||
pool: MagicMock | None = None,
|
pool: MagicMock | None = None,
|
||||||
|
pools: dict | None = None,
|
||||||
tor: MagicMock | None = None,
|
tor: MagicMock | None = None,
|
||||||
) -> dict:
|
) -> dict:
|
||||||
"""Build a mock context dict."""
|
"""Build a mock context dict."""
|
||||||
@@ -89,6 +91,7 @@ def _make_ctx(
|
|||||||
"config": config or Config(),
|
"config": config or Config(),
|
||||||
"metrics": Metrics(),
|
"metrics": Metrics(),
|
||||||
"pool": pool,
|
"pool": pool,
|
||||||
|
"pools": pools,
|
||||||
"hop_pool": None,
|
"hop_pool": None,
|
||||||
"tor": tor,
|
"tor": tor,
|
||||||
}
|
}
|
||||||
@@ -109,6 +112,8 @@ class TestHandleStatus:
|
|||||||
assert body["connections"] == 10
|
assert body["connections"] == 10
|
||||||
assert body["success"] == 8
|
assert body["success"] == 8
|
||||||
assert "uptime" in body
|
assert "uptime" in body
|
||||||
|
assert "rate" in body
|
||||||
|
assert "latency" in body
|
||||||
|
|
||||||
def test_with_pool(self):
|
def test_with_pool(self):
|
||||||
pool = MagicMock()
|
pool = MagicMock()
|
||||||
@@ -118,25 +123,387 @@ class TestHandleStatus:
|
|||||||
_, body = _handle_status(ctx)
|
_, body = _handle_status(ctx)
|
||||||
assert body["pool"] == {"alive": 5, "total": 10}
|
assert body["pool"] == {"alive": 5, "total": 10}
|
||||||
|
|
||||||
def test_with_chain(self):
|
def test_with_listeners(self):
|
||||||
config = Config(chain=[ChainHop("socks5", "127.0.0.1", 9050)])
|
config = Config(
|
||||||
|
listeners=[
|
||||||
|
ListenerConfig(
|
||||||
|
listen_host="0.0.0.0", listen_port=1080,
|
||||||
|
chain=[ChainHop("socks5", "127.0.0.1", 9050)],
|
||||||
|
),
|
||||||
|
ListenerConfig(
|
||||||
|
listen_host="0.0.0.0", listen_port=1081,
|
||||||
|
chain=[ChainHop("socks5", "127.0.0.1", 9050)],
|
||||||
|
pool_seq=[["default"]],
|
||||||
|
),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
ctx = _make_ctx(config=config)
|
||||||
|
# record some latency for the first listener
|
||||||
|
ctx["metrics"].get_listener_latency("0.0.0.0:1080").record(0.2)
|
||||||
|
_, body = _handle_status(ctx)
|
||||||
|
assert len(body["listeners"]) == 2
|
||||||
|
assert body["listeners"][0]["chain"] == ["socks5://127.0.0.1:9050"]
|
||||||
|
assert body["listeners"][0]["pool_hops"] == 0
|
||||||
|
assert body["listeners"][1]["pool_hops"] == 1
|
||||||
|
# per-listener latency present on each entry
|
||||||
|
assert "latency" in body["listeners"][0]
|
||||||
|
assert body["listeners"][0]["latency"]["count"] == 1
|
||||||
|
assert "latency" in body["listeners"][1]
|
||||||
|
assert body["listeners"][1]["latency"] is None
|
||||||
|
|
||||||
|
|
||||||
|
class TestHandleStatusAuth:
|
||||||
|
"""Test auth flag in /status listener entries."""
|
||||||
|
|
||||||
|
def test_auth_flag_present(self):
|
||||||
|
config = Config(
|
||||||
|
listeners=[
|
||||||
|
ListenerConfig(
|
||||||
|
listen_host="0.0.0.0", listen_port=1080,
|
||||||
|
auth={"alice": "s3cret", "bob": "hunter2"},
|
||||||
|
),
|
||||||
|
],
|
||||||
|
)
|
||||||
ctx = _make_ctx(config=config)
|
ctx = _make_ctx(config=config)
|
||||||
_, body = _handle_status(ctx)
|
_, body = _handle_status(ctx)
|
||||||
assert body["chain"] == ["socks5://127.0.0.1:9050"]
|
assert body["listeners"][0]["auth"] is True
|
||||||
|
|
||||||
|
def test_auth_flag_absent_when_empty(self):
|
||||||
|
config = Config(
|
||||||
|
listeners=[
|
||||||
|
ListenerConfig(listen_host="0.0.0.0", listen_port=1080),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
ctx = _make_ctx(config=config)
|
||||||
|
_, body = _handle_status(ctx)
|
||||||
|
assert "auth" not in body["listeners"][0]
|
||||||
|
|
||||||
|
|
||||||
|
class TestHandleConfigAuth:
|
||||||
|
"""Test auth_users in /config listener entries."""
|
||||||
|
|
||||||
|
def test_auth_users_count(self):
|
||||||
|
config = Config(
|
||||||
|
listeners=[
|
||||||
|
ListenerConfig(
|
||||||
|
listen_host="0.0.0.0", listen_port=1080,
|
||||||
|
auth={"alice": "s3cret", "bob": "hunter2"},
|
||||||
|
),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
ctx = _make_ctx(config=config)
|
||||||
|
_, body = _handle_config(ctx)
|
||||||
|
assert body["listeners"][0]["auth_users"] == 2
|
||||||
|
|
||||||
|
def test_auth_users_absent_when_empty(self):
|
||||||
|
config = Config(
|
||||||
|
listeners=[
|
||||||
|
ListenerConfig(listen_host="0.0.0.0", listen_port=1080),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
ctx = _make_ctx(config=config)
|
||||||
|
_, body = _handle_config(ctx)
|
||||||
|
assert "auth_users" not in body["listeners"][0]
|
||||||
|
|
||||||
|
def test_passwords_not_exposed(self):
|
||||||
|
config = Config(
|
||||||
|
listeners=[
|
||||||
|
ListenerConfig(
|
||||||
|
listen_host="0.0.0.0", listen_port=1080,
|
||||||
|
auth={"alice": "s3cret"},
|
||||||
|
),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
ctx = _make_ctx(config=config)
|
||||||
|
_, body = _handle_config(ctx)
|
||||||
|
listener = body["listeners"][0]
|
||||||
|
# only count, never passwords
|
||||||
|
assert "auth_users" in listener
|
||||||
|
assert "auth" not in listener
|
||||||
|
assert "s3cret" not in str(body)
|
||||||
|
|
||||||
|
|
||||||
|
class TestHandleStatusRetries:
|
||||||
|
"""Test retries in /status listener entries."""
|
||||||
|
|
||||||
|
def test_retries_present_when_set(self):
|
||||||
|
config = Config(
|
||||||
|
listeners=[
|
||||||
|
ListenerConfig(
|
||||||
|
listen_host="0.0.0.0", listen_port=1080,
|
||||||
|
retries=5,
|
||||||
|
),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
ctx = _make_ctx(config=config)
|
||||||
|
_, body = _handle_status(ctx)
|
||||||
|
assert body["listeners"][0]["retries"] == 5
|
||||||
|
|
||||||
|
def test_retries_absent_when_zero(self):
|
||||||
|
config = Config(
|
||||||
|
listeners=[
|
||||||
|
ListenerConfig(listen_host="0.0.0.0", listen_port=1080),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
ctx = _make_ctx(config=config)
|
||||||
|
_, body = _handle_status(ctx)
|
||||||
|
assert "retries" not in body["listeners"][0]
|
||||||
|
|
||||||
|
|
||||||
|
class TestHandleConfigRetries:
|
||||||
|
"""Test retries in /config listener entries."""
|
||||||
|
|
||||||
|
def test_retries_present_when_set(self):
|
||||||
|
config = Config(
|
||||||
|
listeners=[
|
||||||
|
ListenerConfig(
|
||||||
|
listen_host="0.0.0.0", listen_port=1080,
|
||||||
|
retries=7,
|
||||||
|
),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
ctx = _make_ctx(config=config)
|
||||||
|
_, body = _handle_config(ctx)
|
||||||
|
assert body["listeners"][0]["retries"] == 7
|
||||||
|
|
||||||
|
def test_retries_absent_when_zero(self):
|
||||||
|
config = Config(
|
||||||
|
listeners=[
|
||||||
|
ListenerConfig(listen_host="0.0.0.0", listen_port=1080),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
ctx = _make_ctx(config=config)
|
||||||
|
_, body = _handle_config(ctx)
|
||||||
|
assert "retries" not in body["listeners"][0]
|
||||||
|
|
||||||
|
|
||||||
|
class TestHandleConfigAllowedProtos:
|
||||||
|
"""Test allowed_protos in /config pool entries."""
|
||||||
|
|
||||||
|
def test_allowed_protos_present(self):
|
||||||
|
pp = ProxyPoolConfig(
|
||||||
|
sources=[],
|
||||||
|
allowed_protos=["socks5"],
|
||||||
|
)
|
||||||
|
config = Config(
|
||||||
|
proxy_pools={"socks_only": pp},
|
||||||
|
listeners=[ListenerConfig()],
|
||||||
|
)
|
||||||
|
ctx = _make_ctx(config=config)
|
||||||
|
_, body = _handle_config(ctx)
|
||||||
|
assert body["proxy_pools"]["socks_only"]["allowed_protos"] == ["socks5"]
|
||||||
|
|
||||||
|
def test_allowed_protos_absent_when_empty(self):
|
||||||
|
pp = ProxyPoolConfig(sources=[])
|
||||||
|
config = Config(
|
||||||
|
proxy_pools={"default": pp},
|
||||||
|
listeners=[ListenerConfig()],
|
||||||
|
)
|
||||||
|
ctx = _make_ctx(config=config)
|
||||||
|
_, body = _handle_config(ctx)
|
||||||
|
assert "allowed_protos" not in body["proxy_pools"]["default"]
|
||||||
|
|
||||||
|
|
||||||
|
class TestHandleStatusPools:
|
||||||
|
"""Test GET /status with multiple named pools."""
|
||||||
|
|
||||||
|
def test_multi_pool_summary(self):
|
||||||
|
pool_a = MagicMock()
|
||||||
|
pool_a.alive_count = 5
|
||||||
|
pool_a.count = 10
|
||||||
|
pool_a.name = "clean"
|
||||||
|
pool_b = MagicMock()
|
||||||
|
pool_b.alive_count = 3
|
||||||
|
pool_b.count = 8
|
||||||
|
pool_b.name = "mitm"
|
||||||
|
ctx = _make_ctx(pools={"clean": pool_a, "mitm": pool_b})
|
||||||
|
_, body = _handle_status(ctx)
|
||||||
|
assert body["pool"] == {"alive": 8, "total": 18}
|
||||||
|
assert body["pools"]["clean"] == {"alive": 5, "total": 10}
|
||||||
|
assert body["pools"]["mitm"] == {"alive": 3, "total": 8}
|
||||||
|
|
||||||
|
|
||||||
|
class TestHandleStatusMultiPool:
|
||||||
|
"""Test pool_seq appears in /status only for multi-pool listeners."""
|
||||||
|
|
||||||
|
def test_single_pool_no_pool_seq(self):
|
||||||
|
"""Single-pool listener: no pool_seq in response."""
|
||||||
|
config = Config(
|
||||||
|
listeners=[
|
||||||
|
ListenerConfig(
|
||||||
|
listen_host="0.0.0.0", listen_port=1080,
|
||||||
|
chain=[ChainHop("socks5", "127.0.0.1", 9050)],
|
||||||
|
pool_seq=[["clean"], ["clean"]], pool_name="clean",
|
||||||
|
),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
ctx = _make_ctx(config=config)
|
||||||
|
_, body = _handle_status(ctx)
|
||||||
|
assert "pool_seq" not in body["listeners"][0]
|
||||||
|
|
||||||
|
def test_multi_pool_has_pool_seq(self):
|
||||||
|
"""Multi-pool listener: pool_seq appears in response."""
|
||||||
|
config = Config(
|
||||||
|
listeners=[
|
||||||
|
ListenerConfig(
|
||||||
|
listen_host="0.0.0.0", listen_port=1080,
|
||||||
|
chain=[ChainHop("socks5", "127.0.0.1", 9050)],
|
||||||
|
pool_seq=[["clean"], ["mitm"]], pool_name="clean",
|
||||||
|
),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
ctx = _make_ctx(config=config)
|
||||||
|
_, body = _handle_status(ctx)
|
||||||
|
assert body["listeners"][0]["pool_seq"] == [["clean"], ["mitm"]]
|
||||||
|
assert body["listeners"][0]["pool_hops"] == 2
|
||||||
|
|
||||||
|
def test_multi_pool_in_config(self):
|
||||||
|
"""Multi-pool listener: pool_seq appears in /config response."""
|
||||||
|
config = Config(
|
||||||
|
listeners=[
|
||||||
|
ListenerConfig(
|
||||||
|
listen_host="0.0.0.0", listen_port=1080,
|
||||||
|
chain=[ChainHop("socks5", "127.0.0.1", 9050)],
|
||||||
|
pool_seq=[["clean"], ["mitm"]], pool_name="clean",
|
||||||
|
),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
ctx = _make_ctx(config=config)
|
||||||
|
_, body = _handle_config(ctx)
|
||||||
|
assert body["listeners"][0]["pool_seq"] == [["clean"], ["mitm"]]
|
||||||
|
|
||||||
|
|
||||||
class TestHandleMetrics:
|
class TestHandleMetrics:
|
||||||
"""Test GET /metrics handler."""
|
"""Test GET /metrics handler (OpenMetrics format)."""
|
||||||
|
|
||||||
def test_returns_dict(self):
|
def test_returns_openmetrics_string(self):
|
||||||
ctx = _make_ctx()
|
ctx = _make_ctx()
|
||||||
ctx["metrics"].connections = 42
|
ctx["metrics"].connections = 42
|
||||||
ctx["metrics"].bytes_in = 1024
|
ctx["metrics"].bytes_in = 1024
|
||||||
status, body = _handle_metrics(ctx)
|
status, body = _handle_metrics(ctx)
|
||||||
assert status == 200
|
assert status == 200
|
||||||
assert body["connections"] == 42
|
assert isinstance(body, str)
|
||||||
assert body["bytes_in"] == 1024
|
assert body.rstrip().endswith("# EOF")
|
||||||
assert "uptime" in body
|
|
||||||
|
def test_counter_values(self):
|
||||||
|
ctx = _make_ctx()
|
||||||
|
ctx["metrics"].connections = 42
|
||||||
|
ctx["metrics"].bytes_in = 1024
|
||||||
|
_, body = _handle_metrics(ctx)
|
||||||
|
assert "s5p_connections_total 42" in body
|
||||||
|
assert "s5p_bytes_in_total 1024" in body
|
||||||
|
|
||||||
|
|
||||||
|
class TestRenderOpenMetrics:
|
||||||
|
"""Test OpenMetrics text rendering."""
|
||||||
|
|
||||||
|
def test_eof_terminator(self):
|
||||||
|
ctx = _make_ctx()
|
||||||
|
text = _render_openmetrics(ctx)
|
||||||
|
assert text.rstrip().endswith("# EOF")
|
||||||
|
assert text.endswith("\n")
|
||||||
|
|
||||||
|
def test_type_declarations(self):
|
||||||
|
ctx = _make_ctx()
|
||||||
|
text = _render_openmetrics(ctx)
|
||||||
|
assert "# TYPE s5p_connections counter" in text
|
||||||
|
assert "# TYPE s5p_active_connections gauge" in text
|
||||||
|
assert "# TYPE s5p_uptime_seconds gauge" in text
|
||||||
|
|
||||||
|
def test_help_lines(self):
|
||||||
|
ctx = _make_ctx()
|
||||||
|
text = _render_openmetrics(ctx)
|
||||||
|
assert "# HELP s5p_connections Total connection attempts." in text
|
||||||
|
assert "# HELP s5p_active_connections Currently open connections." in text
|
||||||
|
|
||||||
|
def test_counter_values(self):
|
||||||
|
ctx = _make_ctx()
|
||||||
|
ctx["metrics"].connections = 100
|
||||||
|
ctx["metrics"].success = 95
|
||||||
|
ctx["metrics"].failed = 5
|
||||||
|
ctx["metrics"].retries = 10
|
||||||
|
ctx["metrics"].auth_failures = 2
|
||||||
|
ctx["metrics"].bytes_in = 4096
|
||||||
|
ctx["metrics"].bytes_out = 8192
|
||||||
|
text = _render_openmetrics(ctx)
|
||||||
|
assert "s5p_connections_total 100" in text
|
||||||
|
assert "s5p_connections_success_total 95" in text
|
||||||
|
assert "s5p_connections_failed_total 5" in text
|
||||||
|
assert "s5p_retries_total 10" in text
|
||||||
|
assert "s5p_auth_failures_total 2" in text
|
||||||
|
assert "s5p_bytes_in_total 4096" in text
|
||||||
|
assert "s5p_bytes_out_total 8192" in text
|
||||||
|
|
||||||
|
def test_gauge_values(self):
|
||||||
|
ctx = _make_ctx()
|
||||||
|
ctx["metrics"].active = 7
|
||||||
|
text = _render_openmetrics(ctx)
|
||||||
|
assert "s5p_active_connections 7" in text
|
||||||
|
assert "s5p_uptime_seconds " in text
|
||||||
|
assert "s5p_connection_rate " in text
|
||||||
|
|
||||||
|
def test_no_latency_when_empty(self):
|
||||||
|
ctx = _make_ctx()
|
||||||
|
text = _render_openmetrics(ctx)
|
||||||
|
assert "s5p_chain_latency_seconds" not in text
|
||||||
|
|
||||||
|
def test_latency_summary(self):
|
||||||
|
ctx = _make_ctx()
|
||||||
|
for i in range(1, 101):
|
||||||
|
ctx["metrics"].latency.record(i / 1000)
|
||||||
|
text = _render_openmetrics(ctx)
|
||||||
|
assert "# TYPE s5p_chain_latency_seconds summary" in text
|
||||||
|
assert 's5p_chain_latency_seconds{quantile="0.5"}' in text
|
||||||
|
assert 's5p_chain_latency_seconds{quantile="0.95"}' in text
|
||||||
|
assert 's5p_chain_latency_seconds{quantile="0.99"}' in text
|
||||||
|
assert "s5p_chain_latency_seconds_count 100" in text
|
||||||
|
assert "s5p_chain_latency_seconds_sum " in text
|
||||||
|
|
||||||
|
def test_listener_latency_summary(self):
|
||||||
|
ctx = _make_ctx()
|
||||||
|
tracker = ctx["metrics"].get_listener_latency("0.0.0.0:1080")
|
||||||
|
for i in range(1, 51):
|
||||||
|
tracker.record(i / 1000)
|
||||||
|
text = _render_openmetrics(ctx)
|
||||||
|
assert "# TYPE s5p_listener_chain_latency_seconds summary" in text
|
||||||
|
assert (
|
||||||
|
's5p_listener_chain_latency_seconds{listener="0.0.0.0:1080",'
|
||||||
|
'quantile="0.5"}'
|
||||||
|
) in text
|
||||||
|
assert (
|
||||||
|
's5p_listener_chain_latency_seconds_count{listener="0.0.0.0:1080"} 50'
|
||||||
|
) in text
|
||||||
|
|
||||||
|
def test_pool_gauges_multi(self):
|
||||||
|
pool_a = MagicMock()
|
||||||
|
pool_a.alive_count = 5
|
||||||
|
pool_a.count = 10
|
||||||
|
pool_a.name = "clean"
|
||||||
|
pool_b = MagicMock()
|
||||||
|
pool_b.alive_count = 3
|
||||||
|
pool_b.count = 8
|
||||||
|
pool_b.name = "mitm"
|
||||||
|
ctx = _make_ctx(pools={"clean": pool_a, "mitm": pool_b})
|
||||||
|
text = _render_openmetrics(ctx)
|
||||||
|
assert '# TYPE s5p_pool_proxies_alive gauge' in text
|
||||||
|
assert 's5p_pool_proxies_alive{pool="clean"} 5' in text
|
||||||
|
assert 's5p_pool_proxies_alive{pool="mitm"} 3' in text
|
||||||
|
assert 's5p_pool_proxies_total{pool="clean"} 10' in text
|
||||||
|
assert 's5p_pool_proxies_total{pool="mitm"} 8' in text
|
||||||
|
|
||||||
|
def test_pool_gauges_single(self):
|
||||||
|
pool = MagicMock()
|
||||||
|
pool.alive_count = 12
|
||||||
|
pool.count = 20
|
||||||
|
ctx = _make_ctx(pool=pool)
|
||||||
|
text = _render_openmetrics(ctx)
|
||||||
|
assert "s5p_pool_proxies_alive 12" in text
|
||||||
|
assert "s5p_pool_proxies_total 20" in text
|
||||||
|
|
||||||
|
def test_no_pool_metrics_when_unconfigured(self):
|
||||||
|
ctx = _make_ctx()
|
||||||
|
text = _render_openmetrics(ctx)
|
||||||
|
assert "s5p_pool_proxies" not in text
|
||||||
|
|
||||||
|
|
||||||
class TestHandlePool:
|
class TestHandlePool:
|
||||||
@@ -191,17 +558,73 @@ class TestHandlePool:
|
|||||||
assert "socks5://1.2.3.4:1080" in body["proxies"]
|
assert "socks5://1.2.3.4:1080" in body["proxies"]
|
||||||
|
|
||||||
|
|
||||||
|
class TestHandlePoolMulti:
|
||||||
|
"""Test GET /pool with multiple named pools."""
|
||||||
|
|
||||||
|
def test_merges_entries(self):
|
||||||
|
pool_a = MagicMock()
|
||||||
|
pool_a.alive_count = 1
|
||||||
|
pool_a.count = 1
|
||||||
|
pool_a.name = "clean"
|
||||||
|
entry_a = MagicMock(
|
||||||
|
alive=True, fails=0, tests=5,
|
||||||
|
last_ok=100.0, last_test=100.0, last_seen=100.0,
|
||||||
|
)
|
||||||
|
pool_a._proxies = {"socks5://1.2.3.4:1080": entry_a}
|
||||||
|
|
||||||
|
pool_b = MagicMock()
|
||||||
|
pool_b.alive_count = 1
|
||||||
|
pool_b.count = 1
|
||||||
|
pool_b.name = "mitm"
|
||||||
|
entry_b = MagicMock(
|
||||||
|
alive=True, fails=0, tests=3,
|
||||||
|
last_ok=90.0, last_test=90.0, last_seen=90.0,
|
||||||
|
)
|
||||||
|
pool_b._proxies = {"socks5://5.6.7.8:1080": entry_b}
|
||||||
|
|
||||||
|
ctx = _make_ctx(pools={"clean": pool_a, "mitm": pool_b})
|
||||||
|
_, body = _handle_pool(ctx)
|
||||||
|
assert body["alive"] == 2
|
||||||
|
assert body["total"] == 2
|
||||||
|
assert len(body["proxies"]) == 2
|
||||||
|
assert body["proxies"]["socks5://1.2.3.4:1080"]["pool"] == "clean"
|
||||||
|
assert body["proxies"]["socks5://5.6.7.8:1080"]["pool"] == "mitm"
|
||||||
|
assert "pools" in body
|
||||||
|
assert body["pools"]["clean"] == {"alive": 1, "total": 1}
|
||||||
|
|
||||||
|
def test_single_pool_no_pool_field(self):
|
||||||
|
"""Single pool: no 'pool' field on entries, no 'pools' summary."""
|
||||||
|
pool = MagicMock()
|
||||||
|
pool.alive_count = 1
|
||||||
|
pool.count = 1
|
||||||
|
pool.name = "default"
|
||||||
|
entry = MagicMock(
|
||||||
|
alive=True, fails=0, tests=5,
|
||||||
|
last_ok=100.0, last_test=100.0, last_seen=100.0,
|
||||||
|
)
|
||||||
|
pool._proxies = {"socks5://1.2.3.4:1080": entry}
|
||||||
|
ctx = _make_ctx(pools={"default": pool})
|
||||||
|
_, body = _handle_pool(ctx)
|
||||||
|
assert "pool" not in body["proxies"]["socks5://1.2.3.4:1080"]
|
||||||
|
assert "pools" not in body
|
||||||
|
|
||||||
|
|
||||||
class TestHandleConfig:
|
class TestHandleConfig:
|
||||||
"""Test GET /config handler."""
|
"""Test GET /config handler."""
|
||||||
|
|
||||||
def test_basic(self):
|
def test_basic(self):
|
||||||
config = Config(timeout=15.0, retries=5, log_level="debug")
|
config = Config(
|
||||||
|
timeout=15.0, retries=5, log_level="debug",
|
||||||
|
listeners=[ListenerConfig(listen_host="0.0.0.0", listen_port=1080)],
|
||||||
|
)
|
||||||
ctx = _make_ctx(config=config)
|
ctx = _make_ctx(config=config)
|
||||||
status, body = _handle_config(ctx)
|
status, body = _handle_config(ctx)
|
||||||
assert status == 200
|
assert status == 200
|
||||||
assert body["timeout"] == 15.0
|
assert body["timeout"] == 15.0
|
||||||
assert body["retries"] == 5
|
assert body["retries"] == 5
|
||||||
assert body["log_level"] == "debug"
|
assert body["log_level"] == "debug"
|
||||||
|
assert len(body["listeners"]) == 1
|
||||||
|
assert body["listeners"][0]["listen"] == "0.0.0.0:1080"
|
||||||
|
|
||||||
def test_with_proxy_pool(self):
|
def test_with_proxy_pool(self):
|
||||||
pp = ProxyPoolConfig(
|
pp = ProxyPoolConfig(
|
||||||
@@ -210,11 +633,91 @@ class TestHandleConfig:
|
|||||||
test_interval=60.0,
|
test_interval=60.0,
|
||||||
max_fails=5,
|
max_fails=5,
|
||||||
)
|
)
|
||||||
config = Config(proxy_pool=pp)
|
config = Config(
|
||||||
|
proxy_pool=pp,
|
||||||
|
listeners=[ListenerConfig(
|
||||||
|
chain=[ChainHop("socks5", "127.0.0.1", 9050)],
|
||||||
|
pool_seq=["default"],
|
||||||
|
)],
|
||||||
|
)
|
||||||
ctx = _make_ctx(config=config)
|
ctx = _make_ctx(config=config)
|
||||||
_, body = _handle_config(ctx)
|
_, body = _handle_config(ctx)
|
||||||
assert body["proxy_pool"]["refresh"] == 600.0
|
assert body["proxy_pool"]["refresh"] == 600.0
|
||||||
assert body["proxy_pool"]["sources"][0]["url"] == "http://api:8081/proxies"
|
assert body["proxy_pool"]["sources"][0]["url"] == "http://api:8081/proxies"
|
||||||
|
assert body["listeners"][0]["pool_hops"] == 1
|
||||||
|
|
||||||
|
def test_with_proxy_pools(self):
|
||||||
|
pp_clean = ProxyPoolConfig(
|
||||||
|
sources=[PoolSourceConfig(url="http://api:8081/proxies/all", mitm=False)],
|
||||||
|
refresh=300.0,
|
||||||
|
test_interval=120.0,
|
||||||
|
max_fails=3,
|
||||||
|
)
|
||||||
|
pp_mitm = ProxyPoolConfig(
|
||||||
|
sources=[PoolSourceConfig(url="http://api:8081/proxies/all", mitm=True)],
|
||||||
|
refresh=300.0,
|
||||||
|
test_interval=120.0,
|
||||||
|
max_fails=3,
|
||||||
|
)
|
||||||
|
config = Config(
|
||||||
|
proxy_pools={"clean": pp_clean, "mitm": pp_mitm},
|
||||||
|
listeners=[ListenerConfig(
|
||||||
|
listen_host="0.0.0.0", listen_port=1080,
|
||||||
|
chain=[ChainHop("socks5", "127.0.0.1", 9050)],
|
||||||
|
pool_seq=[["clean"], ["clean"]], pool_name="clean",
|
||||||
|
)],
|
||||||
|
)
|
||||||
|
ctx = _make_ctx(config=config)
|
||||||
|
_, body = _handle_config(ctx)
|
||||||
|
assert "proxy_pools" in body
|
||||||
|
assert body["proxy_pools"]["clean"]["sources"][0]["mitm"] is False
|
||||||
|
assert body["proxy_pools"]["mitm"]["sources"][0]["mitm"] is True
|
||||||
|
assert body["listeners"][0]["pool"] == "clean"
|
||||||
|
|
||||||
|
def test_with_tor_nodes(self):
|
||||||
|
config = Config(
|
||||||
|
tor_nodes=[
|
||||||
|
ChainHop("socks5", "10.200.1.1", 9050),
|
||||||
|
ChainHop("socks5", "10.200.1.13", 9050),
|
||||||
|
],
|
||||||
|
listeners=[ListenerConfig()],
|
||||||
|
)
|
||||||
|
ctx = _make_ctx(config=config)
|
||||||
|
_, body = _handle_config(ctx)
|
||||||
|
assert body["tor_nodes"] == [
|
||||||
|
"socks5://10.200.1.1:9050",
|
||||||
|
"socks5://10.200.1.13:9050",
|
||||||
|
]
|
||||||
|
|
||||||
|
def test_no_tor_nodes(self):
|
||||||
|
config = Config(listeners=[ListenerConfig()])
|
||||||
|
ctx = _make_ctx(config=config)
|
||||||
|
_, body = _handle_config(ctx)
|
||||||
|
assert "tor_nodes" not in body
|
||||||
|
|
||||||
|
|
||||||
|
class TestHandleStatusTorNodes:
|
||||||
|
"""Test tor_nodes in GET /status response."""
|
||||||
|
|
||||||
|
def test_tor_nodes_in_status(self):
|
||||||
|
config = Config(
|
||||||
|
tor_nodes=[
|
||||||
|
ChainHop("socks5", "10.200.1.1", 9050),
|
||||||
|
ChainHop("socks5", "10.200.1.13", 9050),
|
||||||
|
],
|
||||||
|
listeners=[ListenerConfig()],
|
||||||
|
)
|
||||||
|
ctx = _make_ctx(config=config)
|
||||||
|
_, body = _handle_status(ctx)
|
||||||
|
assert body["tor_nodes"] == [
|
||||||
|
"socks5://10.200.1.1:9050",
|
||||||
|
"socks5://10.200.1.13:9050",
|
||||||
|
]
|
||||||
|
|
||||||
|
def test_no_tor_nodes_in_status(self):
|
||||||
|
ctx = _make_ctx()
|
||||||
|
_, body = _handle_status(ctx)
|
||||||
|
assert "tor_nodes" not in body
|
||||||
|
|
||||||
|
|
||||||
# -- routing -----------------------------------------------------------------
|
# -- routing -----------------------------------------------------------------
|
||||||
|
|||||||
153
tests/test_cli.py
Normal file
153
tests/test_cli.py
Normal file
@@ -0,0 +1,153 @@
|
|||||||
|
"""Tests for CLI argument parsing."""
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from s5p import __version__
|
||||||
|
from s5p.cli import _parse_args
|
||||||
|
|
||||||
|
|
||||||
|
class TestDefaults:
|
||||||
|
"""Default argument values."""
|
||||||
|
|
||||||
|
def test_no_args(self):
|
||||||
|
args = _parse_args([])
|
||||||
|
assert args.config is None
|
||||||
|
assert args.listen is None
|
||||||
|
assert args.chain is None
|
||||||
|
assert args.timeout is None
|
||||||
|
assert args.retries is None
|
||||||
|
assert args.max_connections is None
|
||||||
|
assert args.verbose is False
|
||||||
|
assert args.quiet is False
|
||||||
|
assert args.proxy_source is None
|
||||||
|
assert args.api is None
|
||||||
|
assert args.cprofile is None
|
||||||
|
assert args.tracemalloc is None
|
||||||
|
|
||||||
|
|
||||||
|
class TestFlags:
|
||||||
|
"""Flag parsing."""
|
||||||
|
|
||||||
|
def test_verbose(self):
|
||||||
|
args = _parse_args(["-v"])
|
||||||
|
assert args.verbose is True
|
||||||
|
|
||||||
|
def test_quiet(self):
|
||||||
|
args = _parse_args(["-q"])
|
||||||
|
assert args.quiet is True
|
||||||
|
|
||||||
|
def test_config(self):
|
||||||
|
args = _parse_args(["-c", "s5p.yaml"])
|
||||||
|
assert args.config == "s5p.yaml"
|
||||||
|
|
||||||
|
def test_config_long(self):
|
||||||
|
args = _parse_args(["--config", "s5p.yaml"])
|
||||||
|
assert args.config == "s5p.yaml"
|
||||||
|
|
||||||
|
def test_listen(self):
|
||||||
|
args = _parse_args(["-l", "0.0.0.0:9999"])
|
||||||
|
assert args.listen == "0.0.0.0:9999"
|
||||||
|
|
||||||
|
def test_chain(self):
|
||||||
|
args = _parse_args(["-C", "socks5://127.0.0.1:9050"])
|
||||||
|
assert args.chain == "socks5://127.0.0.1:9050"
|
||||||
|
|
||||||
|
def test_chain_multi(self):
|
||||||
|
args = _parse_args(["-C", "socks5://a:1080,http://b:8080"])
|
||||||
|
assert args.chain == "socks5://a:1080,http://b:8080"
|
||||||
|
|
||||||
|
def test_timeout(self):
|
||||||
|
args = _parse_args(["-t", "30"])
|
||||||
|
assert args.timeout == 30.0
|
||||||
|
|
||||||
|
def test_retries(self):
|
||||||
|
args = _parse_args(["-r", "5"])
|
||||||
|
assert args.retries == 5
|
||||||
|
|
||||||
|
def test_max_connections(self):
|
||||||
|
args = _parse_args(["-m", "512"])
|
||||||
|
assert args.max_connections == 512
|
||||||
|
|
||||||
|
def test_proxy_source(self):
|
||||||
|
args = _parse_args(["-S", "http://api:8081/proxies"])
|
||||||
|
assert args.proxy_source == "http://api:8081/proxies"
|
||||||
|
|
||||||
|
def test_api(self):
|
||||||
|
args = _parse_args(["--api", "127.0.0.1:1081"])
|
||||||
|
assert args.api == "127.0.0.1:1081"
|
||||||
|
|
||||||
|
def test_cprofile_default(self):
|
||||||
|
args = _parse_args(["--cprofile"])
|
||||||
|
assert args.cprofile == "s5p.prof"
|
||||||
|
|
||||||
|
def test_cprofile_custom(self):
|
||||||
|
args = _parse_args(["--cprofile", "out.prof"])
|
||||||
|
assert args.cprofile == "out.prof"
|
||||||
|
|
||||||
|
def test_tracemalloc_default(self):
|
||||||
|
args = _parse_args(["--tracemalloc"])
|
||||||
|
assert args.tracemalloc == 10
|
||||||
|
|
||||||
|
def test_tracemalloc_custom(self):
|
||||||
|
args = _parse_args(["--tracemalloc", "20"])
|
||||||
|
assert args.tracemalloc == 20
|
||||||
|
|
||||||
|
|
||||||
|
class TestVersion:
|
||||||
|
"""--version flag."""
|
||||||
|
|
||||||
|
def test_version_output(self, capsys):
|
||||||
|
with pytest.raises(SystemExit, match="0"):
|
||||||
|
_parse_args(["--version"])
|
||||||
|
captured = capsys.readouterr()
|
||||||
|
assert captured.out.strip() == f"s5p {__version__}"
|
||||||
|
|
||||||
|
def test_version_short(self, capsys):
|
||||||
|
with pytest.raises(SystemExit, match="0"):
|
||||||
|
_parse_args(["-V"])
|
||||||
|
captured = capsys.readouterr()
|
||||||
|
assert "0.3.0" in captured.out
|
||||||
|
|
||||||
|
|
||||||
|
class TestCombinations:
|
||||||
|
"""Multiple flags together."""
|
||||||
|
|
||||||
|
def test_verbose_with_chain(self):
|
||||||
|
args = _parse_args(["-v", "-C", "socks5://tor:9050"])
|
||||||
|
assert args.verbose is True
|
||||||
|
assert args.chain == "socks5://tor:9050"
|
||||||
|
|
||||||
|
def test_config_with_api(self):
|
||||||
|
args = _parse_args(["-c", "s5p.yaml", "--api", "0.0.0.0:1090"])
|
||||||
|
assert args.config == "s5p.yaml"
|
||||||
|
assert args.api == "0.0.0.0:1090"
|
||||||
|
|
||||||
|
def test_listen_with_timeout_and_retries(self):
|
||||||
|
args = _parse_args(["-l", ":8080", "-t", "15", "-r", "3"])
|
||||||
|
assert args.listen == ":8080"
|
||||||
|
assert args.timeout == 15.0
|
||||||
|
assert args.retries == 3
|
||||||
|
|
||||||
|
|
||||||
|
class TestInvalid:
|
||||||
|
"""Invalid argument handling."""
|
||||||
|
|
||||||
|
def test_unknown_flag(self):
|
||||||
|
with pytest.raises(SystemExit, match="2"):
|
||||||
|
_parse_args(["--nonexistent"])
|
||||||
|
|
||||||
|
def test_timeout_non_numeric(self):
|
||||||
|
with pytest.raises(SystemExit, match="2"):
|
||||||
|
_parse_args(["-t", "abc"])
|
||||||
|
|
||||||
|
def test_retries_non_numeric(self):
|
||||||
|
with pytest.raises(SystemExit, match="2"):
|
||||||
|
_parse_args(["-r", "abc"])
|
||||||
|
|
||||||
|
def test_max_connections_non_numeric(self):
|
||||||
|
with pytest.raises(SystemExit, match="2"):
|
||||||
|
_parse_args(["-m", "abc"])
|
||||||
|
|
||||||
|
def test_tracemalloc_non_numeric(self):
|
||||||
|
with pytest.raises(SystemExit, match="2"):
|
||||||
|
_parse_args(["--tracemalloc", "abc"])
|
||||||
@@ -2,7 +2,15 @@
|
|||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from s5p.config import ChainHop, Config, load_config, parse_api_proxies, parse_proxy_url
|
from s5p.config import (
|
||||||
|
ChainHop,
|
||||||
|
Config,
|
||||||
|
ListenerConfig,
|
||||||
|
load_config,
|
||||||
|
parse_api_proxies,
|
||||||
|
parse_proxy_url,
|
||||||
|
)
|
||||||
|
from s5p.server import _bypass_match
|
||||||
|
|
||||||
|
|
||||||
class TestParseProxyUrl:
|
class TestParseProxyUrl:
|
||||||
@@ -128,6 +136,7 @@ class TestConfig:
|
|||||||
assert c.max_connections == 256
|
assert c.max_connections == 256
|
||||||
assert c.pool_size == 0
|
assert c.pool_size == 0
|
||||||
assert c.pool_max_idle == 30.0
|
assert c.pool_max_idle == 30.0
|
||||||
|
assert c.tor_nodes == []
|
||||||
|
|
||||||
def test_max_connections_from_yaml(self, tmp_path):
|
def test_max_connections_from_yaml(self, tmp_path):
|
||||||
cfg_file = tmp_path / "test.yaml"
|
cfg_file = tmp_path / "test.yaml"
|
||||||
@@ -175,3 +184,608 @@ class TestConfig:
|
|||||||
cfg_file.write_text("listen: 1080\n")
|
cfg_file.write_text("listen: 1080\n")
|
||||||
c = load_config(cfg_file)
|
c = load_config(cfg_file)
|
||||||
assert c.tor is None
|
assert c.tor is None
|
||||||
|
|
||||||
|
def test_proxy_pool_test_targets(self, tmp_path):
|
||||||
|
cfg_file = tmp_path / "test.yaml"
|
||||||
|
cfg_file.write_text(
|
||||||
|
"proxy_pool:\n"
|
||||||
|
" sources: []\n"
|
||||||
|
" test_targets:\n"
|
||||||
|
" - host-a.example.com\n"
|
||||||
|
" - host-b.example.com\n"
|
||||||
|
)
|
||||||
|
c = load_config(cfg_file)
|
||||||
|
assert c.proxy_pool is not None
|
||||||
|
assert c.proxy_pool.test_targets == ["host-a.example.com", "host-b.example.com"]
|
||||||
|
assert c.proxy_pool.test_url == ""
|
||||||
|
|
||||||
|
def test_proxy_pool_legacy_test_url(self, tmp_path):
|
||||||
|
cfg_file = tmp_path / "test.yaml"
|
||||||
|
cfg_file.write_text(
|
||||||
|
"proxy_pool:\n"
|
||||||
|
" sources: []\n"
|
||||||
|
" test_url: http://httpbin.org/ip\n"
|
||||||
|
)
|
||||||
|
c = load_config(cfg_file)
|
||||||
|
assert c.proxy_pool is not None
|
||||||
|
assert c.proxy_pool.test_targets == ["httpbin.org"]
|
||||||
|
|
||||||
|
def test_proxy_pool_defaults(self, tmp_path):
|
||||||
|
cfg_file = tmp_path / "test.yaml"
|
||||||
|
cfg_file.write_text(
|
||||||
|
"proxy_pool:\n"
|
||||||
|
" sources: []\n"
|
||||||
|
)
|
||||||
|
c = load_config(cfg_file)
|
||||||
|
assert c.proxy_pool is not None
|
||||||
|
assert c.proxy_pool.test_targets == [
|
||||||
|
"www.google.com", "www.cloudflare.com", "www.amazon.com",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
class TestProxyPools:
|
||||||
|
"""Test named proxy_pools config parsing."""
|
||||||
|
|
||||||
|
def test_proxy_pools_from_yaml(self, tmp_path):
|
||||||
|
cfg_file = tmp_path / "test.yaml"
|
||||||
|
cfg_file.write_text(
|
||||||
|
"proxy_pools:\n"
|
||||||
|
" clean:\n"
|
||||||
|
" sources:\n"
|
||||||
|
" - url: http://api:8081/proxies/all\n"
|
||||||
|
" mitm: false\n"
|
||||||
|
" refresh: 300\n"
|
||||||
|
" state_file: /data/pool-clean.json\n"
|
||||||
|
" mitm:\n"
|
||||||
|
" sources:\n"
|
||||||
|
" - url: http://api:8081/proxies/all\n"
|
||||||
|
" mitm: true\n"
|
||||||
|
" state_file: /data/pool-mitm.json\n"
|
||||||
|
)
|
||||||
|
c = load_config(cfg_file)
|
||||||
|
assert "clean" in c.proxy_pools
|
||||||
|
assert "mitm" in c.proxy_pools
|
||||||
|
assert c.proxy_pools["clean"].sources[0].mitm is False
|
||||||
|
assert c.proxy_pools["mitm"].sources[0].mitm is True
|
||||||
|
assert c.proxy_pools["clean"].state_file == "/data/pool-clean.json"
|
||||||
|
assert c.proxy_pools["mitm"].state_file == "/data/pool-mitm.json"
|
||||||
|
|
||||||
|
def test_mitm_none_when_absent(self, tmp_path):
|
||||||
|
cfg_file = tmp_path / "test.yaml"
|
||||||
|
cfg_file.write_text(
|
||||||
|
"proxy_pool:\n"
|
||||||
|
" sources:\n"
|
||||||
|
" - url: http://api:8081/proxies\n"
|
||||||
|
)
|
||||||
|
c = load_config(cfg_file)
|
||||||
|
assert c.proxy_pool is not None
|
||||||
|
assert c.proxy_pool.sources[0].mitm is None
|
||||||
|
|
||||||
|
def test_singular_becomes_default(self, tmp_path):
|
||||||
|
cfg_file = tmp_path / "test.yaml"
|
||||||
|
cfg_file.write_text(
|
||||||
|
"proxy_pool:\n"
|
||||||
|
" sources:\n"
|
||||||
|
" - url: http://api:8081/proxies\n"
|
||||||
|
)
|
||||||
|
c = load_config(cfg_file)
|
||||||
|
assert "default" in c.proxy_pools
|
||||||
|
assert c.proxy_pools["default"] is c.proxy_pool
|
||||||
|
|
||||||
|
def test_proxy_pools_wins_over_singular(self, tmp_path):
|
||||||
|
cfg_file = tmp_path / "test.yaml"
|
||||||
|
cfg_file.write_text(
|
||||||
|
"proxy_pools:\n"
|
||||||
|
" default:\n"
|
||||||
|
" sources:\n"
|
||||||
|
" - url: http://api:8081/pools-default\n"
|
||||||
|
"proxy_pool:\n"
|
||||||
|
" sources:\n"
|
||||||
|
" - url: http://api:8081/singular\n"
|
||||||
|
)
|
||||||
|
c = load_config(cfg_file)
|
||||||
|
# proxy_pools "default" wins, singular does not overwrite
|
||||||
|
assert c.proxy_pools["default"].sources[0].url == "http://api:8081/pools-default"
|
||||||
|
|
||||||
|
def test_listener_pool_name(self, tmp_path):
|
||||||
|
cfg_file = tmp_path / "test.yaml"
|
||||||
|
cfg_file.write_text(
|
||||||
|
"listeners:\n"
|
||||||
|
" - listen: 0.0.0.0:1080\n"
|
||||||
|
" pool: clean\n"
|
||||||
|
" chain:\n"
|
||||||
|
" - socks5://127.0.0.1:9050\n"
|
||||||
|
" - pool\n"
|
||||||
|
" - listen: 0.0.0.0:1081\n"
|
||||||
|
" chain:\n"
|
||||||
|
" - socks5://127.0.0.1:9050\n"
|
||||||
|
)
|
||||||
|
c = load_config(cfg_file)
|
||||||
|
assert c.listeners[0].pool_name == "clean"
|
||||||
|
assert c.listeners[0].pool_hops == 1
|
||||||
|
assert c.listeners[1].pool_name == ""
|
||||||
|
assert c.listeners[1].pool_hops == 0
|
||||||
|
|
||||||
|
|
||||||
|
class TestAllowedProtos:
|
||||||
|
"""Test pool-level allowed_protos config."""
|
||||||
|
|
||||||
|
def test_allowed_protos_from_yaml(self, tmp_path):
|
||||||
|
cfg_file = tmp_path / "test.yaml"
|
||||||
|
cfg_file.write_text(
|
||||||
|
"proxy_pools:\n"
|
||||||
|
" socks_only:\n"
|
||||||
|
" sources: []\n"
|
||||||
|
" allowed_protos: [socks5]\n"
|
||||||
|
" any:\n"
|
||||||
|
" sources: []\n"
|
||||||
|
)
|
||||||
|
c = load_config(cfg_file)
|
||||||
|
assert c.proxy_pools["socks_only"].allowed_protos == ["socks5"]
|
||||||
|
assert c.proxy_pools["any"].allowed_protos == []
|
||||||
|
|
||||||
|
def test_allowed_protos_multiple(self, tmp_path):
|
||||||
|
cfg_file = tmp_path / "test.yaml"
|
||||||
|
cfg_file.write_text(
|
||||||
|
"proxy_pool:\n"
|
||||||
|
" sources: []\n"
|
||||||
|
" allowed_protos: [socks5, http]\n"
|
||||||
|
)
|
||||||
|
c = load_config(cfg_file)
|
||||||
|
assert c.proxy_pool.allowed_protos == ["socks5", "http"]
|
||||||
|
|
||||||
|
def test_allowed_protos_default_empty(self):
|
||||||
|
from s5p.config import ProxyPoolConfig
|
||||||
|
cfg = ProxyPoolConfig()
|
||||||
|
assert cfg.allowed_protos == []
|
||||||
|
|
||||||
|
|
||||||
|
class TestTorNodes:
|
||||||
|
"""Test tor_nodes config parsing."""
|
||||||
|
|
||||||
|
def test_tor_nodes_from_yaml(self, tmp_path):
|
||||||
|
cfg_file = tmp_path / "test.yaml"
|
||||||
|
cfg_file.write_text(
|
||||||
|
"tor_nodes:\n"
|
||||||
|
" - socks5://10.200.1.1:9050\n"
|
||||||
|
" - socks5://10.200.1.254:9050\n"
|
||||||
|
" - socks5://10.200.1.13:9050\n"
|
||||||
|
)
|
||||||
|
c = load_config(cfg_file)
|
||||||
|
assert len(c.tor_nodes) == 3
|
||||||
|
assert c.tor_nodes[0].host == "10.200.1.1"
|
||||||
|
assert c.tor_nodes[0].port == 9050
|
||||||
|
assert c.tor_nodes[1].host == "10.200.1.254"
|
||||||
|
assert c.tor_nodes[2].host == "10.200.1.13"
|
||||||
|
|
||||||
|
def test_no_tor_nodes(self, tmp_path):
|
||||||
|
cfg_file = tmp_path / "test.yaml"
|
||||||
|
cfg_file.write_text("listen: 1080\n")
|
||||||
|
c = load_config(cfg_file)
|
||||||
|
assert c.tor_nodes == []
|
||||||
|
|
||||||
|
|
||||||
|
class TestListenerConfig:
|
||||||
|
"""Test multi-listener config parsing."""
|
||||||
|
|
||||||
|
def test_defaults(self):
|
||||||
|
lc = ListenerConfig()
|
||||||
|
assert lc.listen_host == "127.0.0.1"
|
||||||
|
assert lc.listen_port == 1080
|
||||||
|
assert lc.chain == []
|
||||||
|
assert lc.pool_hops == 0
|
||||||
|
|
||||||
|
def test_listeners_from_yaml(self, tmp_path):
|
||||||
|
cfg_file = tmp_path / "test.yaml"
|
||||||
|
cfg_file.write_text(
|
||||||
|
"listeners:\n"
|
||||||
|
" - listen: 0.0.0.0:1080\n"
|
||||||
|
" chain:\n"
|
||||||
|
" - socks5://127.0.0.1:9050\n"
|
||||||
|
" - listen: 0.0.0.0:1081\n"
|
||||||
|
" chain:\n"
|
||||||
|
" - socks5://127.0.0.1:9050\n"
|
||||||
|
" - pool\n"
|
||||||
|
" - listen: 0.0.0.0:1082\n"
|
||||||
|
" chain:\n"
|
||||||
|
" - socks5://127.0.0.1:9050\n"
|
||||||
|
" - pool\n"
|
||||||
|
" - pool\n"
|
||||||
|
)
|
||||||
|
c = load_config(cfg_file)
|
||||||
|
assert len(c.listeners) == 3
|
||||||
|
|
||||||
|
# listener 0: no pool hops
|
||||||
|
assert c.listeners[0].listen_host == "0.0.0.0"
|
||||||
|
assert c.listeners[0].listen_port == 1080
|
||||||
|
assert len(c.listeners[0].chain) == 1
|
||||||
|
assert c.listeners[0].pool_hops == 0
|
||||||
|
|
||||||
|
# listener 1: 1 pool hop
|
||||||
|
assert c.listeners[1].listen_port == 1081
|
||||||
|
assert len(c.listeners[1].chain) == 1
|
||||||
|
assert c.listeners[1].pool_hops == 1
|
||||||
|
|
||||||
|
# listener 2: 2 pool hops
|
||||||
|
assert c.listeners[2].listen_port == 1082
|
||||||
|
assert len(c.listeners[2].chain) == 1
|
||||||
|
assert c.listeners[2].pool_hops == 2
|
||||||
|
|
||||||
|
def test_pool_keyword_stripped_from_chain(self, tmp_path):
|
||||||
|
cfg_file = tmp_path / "test.yaml"
|
||||||
|
cfg_file.write_text(
|
||||||
|
"listeners:\n"
|
||||||
|
" - listen: 1080\n"
|
||||||
|
" chain:\n"
|
||||||
|
" - socks5://tor:9050\n"
|
||||||
|
" - pool\n"
|
||||||
|
" - pool\n"
|
||||||
|
)
|
||||||
|
c = load_config(cfg_file)
|
||||||
|
lc = c.listeners[0]
|
||||||
|
# only the real hop remains in chain
|
||||||
|
assert len(lc.chain) == 1
|
||||||
|
assert lc.chain[0].host == "tor"
|
||||||
|
assert lc.pool_hops == 2
|
||||||
|
|
||||||
|
def test_pool_keyword_case_insensitive(self, tmp_path):
|
||||||
|
cfg_file = tmp_path / "test.yaml"
|
||||||
|
cfg_file.write_text(
|
||||||
|
"listeners:\n"
|
||||||
|
" - listen: 1080\n"
|
||||||
|
" chain:\n"
|
||||||
|
" - Pool\n"
|
||||||
|
)
|
||||||
|
c = load_config(cfg_file)
|
||||||
|
assert c.listeners[0].pool_hops == 1
|
||||||
|
assert c.listeners[0].chain == []
|
||||||
|
|
||||||
|
|
||||||
|
class TestPoolSeq:
|
||||||
|
"""Test per-hop pool references (pool:name syntax)."""
|
||||||
|
|
||||||
|
def test_bare_pool_uses_default_name(self, tmp_path):
|
||||||
|
"""Bare `pool` + `pool: clean` -> pool_seq=[["clean"]]."""
|
||||||
|
cfg_file = tmp_path / "test.yaml"
|
||||||
|
cfg_file.write_text(
|
||||||
|
"listeners:\n"
|
||||||
|
" - listen: 1080\n"
|
||||||
|
" pool: clean\n"
|
||||||
|
" chain:\n"
|
||||||
|
" - pool\n"
|
||||||
|
)
|
||||||
|
c = load_config(cfg_file)
|
||||||
|
assert c.listeners[0].pool_seq == [["clean"]]
|
||||||
|
|
||||||
|
def test_bare_pool_no_pool_name(self, tmp_path):
|
||||||
|
"""Bare `pool` with no `pool:` key -> pool_seq=[["default"]]."""
|
||||||
|
cfg_file = tmp_path / "test.yaml"
|
||||||
|
cfg_file.write_text(
|
||||||
|
"listeners:\n"
|
||||||
|
" - listen: 1080\n"
|
||||||
|
" chain:\n"
|
||||||
|
" - pool\n"
|
||||||
|
)
|
||||||
|
c = load_config(cfg_file)
|
||||||
|
assert c.listeners[0].pool_seq == [["default"]]
|
||||||
|
|
||||||
|
def test_pool_colon_name(self, tmp_path):
|
||||||
|
"""`pool:clean, pool:mitm` -> pool_seq=[["clean"], ["mitm"]]."""
|
||||||
|
cfg_file = tmp_path / "test.yaml"
|
||||||
|
cfg_file.write_text(
|
||||||
|
"listeners:\n"
|
||||||
|
" - listen: 1080\n"
|
||||||
|
" chain:\n"
|
||||||
|
" - pool:clean\n"
|
||||||
|
" - pool:mitm\n"
|
||||||
|
)
|
||||||
|
c = load_config(cfg_file)
|
||||||
|
assert c.listeners[0].pool_seq == [["clean"], ["mitm"]]
|
||||||
|
|
||||||
|
def test_mixed_bare_and_named(self, tmp_path):
|
||||||
|
"""Bare `pool` + `pool:mitm` with `pool: clean` -> [["clean"], ["mitm"]]."""
|
||||||
|
cfg_file = tmp_path / "test.yaml"
|
||||||
|
cfg_file.write_text(
|
||||||
|
"listeners:\n"
|
||||||
|
" - listen: 1080\n"
|
||||||
|
" pool: clean\n"
|
||||||
|
" chain:\n"
|
||||||
|
" - pool\n"
|
||||||
|
" - pool:mitm\n"
|
||||||
|
)
|
||||||
|
c = load_config(cfg_file)
|
||||||
|
assert c.listeners[0].pool_seq == [["clean"], ["mitm"]]
|
||||||
|
|
||||||
|
def test_pool_colon_case_insensitive_prefix(self, tmp_path):
|
||||||
|
"""`Pool:MyPool` -> pool_seq=[["MyPool"]] (prefix case-insensitive)."""
|
||||||
|
cfg_file = tmp_path / "test.yaml"
|
||||||
|
cfg_file.write_text(
|
||||||
|
"listeners:\n"
|
||||||
|
" - listen: 1080\n"
|
||||||
|
" chain:\n"
|
||||||
|
" - Pool:MyPool\n"
|
||||||
|
)
|
||||||
|
c = load_config(cfg_file)
|
||||||
|
assert c.listeners[0].pool_seq == [["MyPool"]]
|
||||||
|
|
||||||
|
def test_pool_colon_empty_is_bare(self, tmp_path):
|
||||||
|
"""`pool:` (empty name) -> treated as bare pool."""
|
||||||
|
cfg_file = tmp_path / "test.yaml"
|
||||||
|
cfg_file.write_text(
|
||||||
|
"listeners:\n"
|
||||||
|
" - listen: 1080\n"
|
||||||
|
" pool: clean\n"
|
||||||
|
" chain:\n"
|
||||||
|
" - pool:\n"
|
||||||
|
)
|
||||||
|
c = load_config(cfg_file)
|
||||||
|
assert c.listeners[0].pool_seq == [["clean"]]
|
||||||
|
|
||||||
|
def test_backward_compat_pool_hops_property(self):
|
||||||
|
"""pool_hops property returns len(pool_seq)."""
|
||||||
|
lc = ListenerConfig(pool_seq=[["clean"], ["mitm"]])
|
||||||
|
assert lc.pool_hops == 2
|
||||||
|
lc2 = ListenerConfig()
|
||||||
|
assert lc2.pool_hops == 0
|
||||||
|
|
||||||
|
def test_legacy_auto_append(self, tmp_path):
|
||||||
|
"""Singular `proxy_pool:` -> pool_seq=[["default"]]."""
|
||||||
|
cfg_file = tmp_path / "test.yaml"
|
||||||
|
cfg_file.write_text(
|
||||||
|
"listen: 0.0.0.0:1080\n"
|
||||||
|
"chain:\n"
|
||||||
|
" - socks5://127.0.0.1:9050\n"
|
||||||
|
"proxy_pool:\n"
|
||||||
|
" sources:\n"
|
||||||
|
" - url: http://api:8081/proxies\n"
|
||||||
|
)
|
||||||
|
c = load_config(cfg_file)
|
||||||
|
lc = c.listeners[0]
|
||||||
|
assert lc.pool_seq == [["default"]]
|
||||||
|
assert lc.pool_hops == 1
|
||||||
|
|
||||||
|
def test_list_candidates(self, tmp_path):
|
||||||
|
"""List in chain -> multi-candidate hop."""
|
||||||
|
cfg_file = tmp_path / "test.yaml"
|
||||||
|
cfg_file.write_text(
|
||||||
|
"listeners:\n"
|
||||||
|
" - listen: 1080\n"
|
||||||
|
" chain:\n"
|
||||||
|
" - socks5://tor:9050\n"
|
||||||
|
" - [pool:clean, pool:mitm]\n"
|
||||||
|
" - [pool:clean, pool:mitm]\n"
|
||||||
|
)
|
||||||
|
c = load_config(cfg_file)
|
||||||
|
lc = c.listeners[0]
|
||||||
|
assert len(lc.chain) == 1
|
||||||
|
assert lc.pool_hops == 2
|
||||||
|
assert lc.pool_seq == [["clean", "mitm"], ["clean", "mitm"]]
|
||||||
|
|
||||||
|
|
||||||
|
class TestListenerBackwardCompat:
|
||||||
|
"""Test backward-compatible single listener from old format."""
|
||||||
|
|
||||||
|
def test_old_format_creates_single_listener(self, tmp_path):
|
||||||
|
cfg_file = tmp_path / "test.yaml"
|
||||||
|
cfg_file.write_text(
|
||||||
|
"listen: 0.0.0.0:9999\n"
|
||||||
|
"chain:\n"
|
||||||
|
" - socks5://127.0.0.1:9050\n"
|
||||||
|
)
|
||||||
|
c = load_config(cfg_file)
|
||||||
|
assert len(c.listeners) == 1
|
||||||
|
lc = c.listeners[0]
|
||||||
|
assert lc.listen_host == "0.0.0.0"
|
||||||
|
assert lc.listen_port == 9999
|
||||||
|
assert len(lc.chain) == 1
|
||||||
|
assert lc.pool_hops == 0
|
||||||
|
|
||||||
|
def test_empty_config_creates_single_listener(self, tmp_path):
|
||||||
|
cfg_file = tmp_path / "test.yaml"
|
||||||
|
cfg_file.write_text("")
|
||||||
|
c = load_config(cfg_file)
|
||||||
|
assert len(c.listeners) == 1
|
||||||
|
lc = c.listeners[0]
|
||||||
|
assert lc.listen_host == "127.0.0.1"
|
||||||
|
assert lc.listen_port == 1080
|
||||||
|
|
||||||
|
|
||||||
|
class TestListenerPoolCompat:
|
||||||
|
"""Test that proxy_pool + old format auto-sets pool_hops=1."""
|
||||||
|
|
||||||
|
def test_pool_auto_appends(self, tmp_path):
|
||||||
|
cfg_file = tmp_path / "test.yaml"
|
||||||
|
cfg_file.write_text(
|
||||||
|
"listen: 0.0.0.0:1080\n"
|
||||||
|
"chain:\n"
|
||||||
|
" - socks5://127.0.0.1:9050\n"
|
||||||
|
"proxy_pool:\n"
|
||||||
|
" sources:\n"
|
||||||
|
" - url: http://api:8081/proxies\n"
|
||||||
|
)
|
||||||
|
c = load_config(cfg_file)
|
||||||
|
assert len(c.listeners) == 1
|
||||||
|
lc = c.listeners[0]
|
||||||
|
assert lc.pool_hops == 1
|
||||||
|
|
||||||
|
def test_explicit_listeners_no_auto_append(self, tmp_path):
|
||||||
|
cfg_file = tmp_path / "test.yaml"
|
||||||
|
cfg_file.write_text(
|
||||||
|
"listeners:\n"
|
||||||
|
" - listen: 0.0.0.0:1080\n"
|
||||||
|
" chain:\n"
|
||||||
|
" - socks5://127.0.0.1:9050\n"
|
||||||
|
"proxy_pool:\n"
|
||||||
|
" sources:\n"
|
||||||
|
" - url: http://api:8081/proxies\n"
|
||||||
|
)
|
||||||
|
c = load_config(cfg_file)
|
||||||
|
assert len(c.listeners) == 1
|
||||||
|
lc = c.listeners[0]
|
||||||
|
# explicit listeners: no auto pool_hops
|
||||||
|
assert lc.pool_hops == 0
|
||||||
|
|
||||||
|
|
||||||
|
class TestListenerRetries:
|
||||||
|
"""Test per-listener retry override config."""
|
||||||
|
|
||||||
|
def test_retries_default(self):
|
||||||
|
lc = ListenerConfig()
|
||||||
|
assert lc.retries == 0
|
||||||
|
|
||||||
|
def test_retries_from_yaml(self, tmp_path):
|
||||||
|
cfg_file = tmp_path / "test.yaml"
|
||||||
|
cfg_file.write_text(
|
||||||
|
"listeners:\n"
|
||||||
|
" - listen: 1080\n"
|
||||||
|
" retries: 5\n"
|
||||||
|
" chain:\n"
|
||||||
|
" - socks5://127.0.0.1:9050\n"
|
||||||
|
" - pool\n"
|
||||||
|
" - listen: 1081\n"
|
||||||
|
" chain:\n"
|
||||||
|
" - socks5://127.0.0.1:9050\n"
|
||||||
|
)
|
||||||
|
c = load_config(cfg_file)
|
||||||
|
assert c.listeners[0].retries == 5
|
||||||
|
assert c.listeners[1].retries == 0
|
||||||
|
|
||||||
|
def test_retries_absent_from_yaml(self, tmp_path):
|
||||||
|
cfg_file = tmp_path / "test.yaml"
|
||||||
|
cfg_file.write_text(
|
||||||
|
"listeners:\n"
|
||||||
|
" - listen: 1080\n"
|
||||||
|
)
|
||||||
|
c = load_config(cfg_file)
|
||||||
|
assert c.listeners[0].retries == 0
|
||||||
|
|
||||||
|
|
||||||
|
class TestAuthConfig:
|
||||||
|
"""Test auth field in listener config."""
|
||||||
|
|
||||||
|
def test_auth_from_yaml(self, tmp_path):
|
||||||
|
cfg_file = tmp_path / "test.yaml"
|
||||||
|
cfg_file.write_text(
|
||||||
|
"listeners:\n"
|
||||||
|
" - listen: 1080\n"
|
||||||
|
" auth:\n"
|
||||||
|
" alice: s3cret\n"
|
||||||
|
" bob: hunter2\n"
|
||||||
|
)
|
||||||
|
c = load_config(cfg_file)
|
||||||
|
assert c.listeners[0].auth == {"alice": "s3cret", "bob": "hunter2"}
|
||||||
|
|
||||||
|
def test_auth_empty_default(self):
|
||||||
|
lc = ListenerConfig()
|
||||||
|
assert lc.auth == {}
|
||||||
|
|
||||||
|
def test_auth_absent_from_yaml(self, tmp_path):
|
||||||
|
cfg_file = tmp_path / "test.yaml"
|
||||||
|
cfg_file.write_text(
|
||||||
|
"listeners:\n"
|
||||||
|
" - listen: 1080\n"
|
||||||
|
)
|
||||||
|
c = load_config(cfg_file)
|
||||||
|
assert c.listeners[0].auth == {}
|
||||||
|
|
||||||
|
def test_auth_numeric_password(self, tmp_path):
|
||||||
|
"""YAML parses `admin: 12345` as int; must be coerced to str."""
|
||||||
|
cfg_file = tmp_path / "test.yaml"
|
||||||
|
cfg_file.write_text(
|
||||||
|
"listeners:\n"
|
||||||
|
" - listen: 1080\n"
|
||||||
|
" auth:\n"
|
||||||
|
" admin: 12345\n"
|
||||||
|
)
|
||||||
|
c = load_config(cfg_file)
|
||||||
|
assert c.listeners[0].auth == {"admin": "12345"}
|
||||||
|
|
||||||
|
def test_auth_mixed_listeners(self, tmp_path):
|
||||||
|
"""One listener with auth, one without."""
|
||||||
|
cfg_file = tmp_path / "test.yaml"
|
||||||
|
cfg_file.write_text(
|
||||||
|
"listeners:\n"
|
||||||
|
" - listen: 1080\n"
|
||||||
|
" auth:\n"
|
||||||
|
" alice: pass\n"
|
||||||
|
" - listen: 1081\n"
|
||||||
|
)
|
||||||
|
c = load_config(cfg_file)
|
||||||
|
assert c.listeners[0].auth == {"alice": "pass"}
|
||||||
|
assert c.listeners[1].auth == {}
|
||||||
|
|
||||||
|
|
||||||
|
class TestBypassConfig:
|
||||||
|
"""Test bypass rules in listener config."""
|
||||||
|
|
||||||
|
def test_bypass_from_yaml(self, tmp_path):
|
||||||
|
cfg_file = tmp_path / "test.yaml"
|
||||||
|
cfg_file.write_text(
|
||||||
|
"listeners:\n"
|
||||||
|
" - listen: 1080\n"
|
||||||
|
" bypass:\n"
|
||||||
|
" - 127.0.0.0/8\n"
|
||||||
|
" - 192.168.0.0/16\n"
|
||||||
|
" - localhost\n"
|
||||||
|
" - .local\n"
|
||||||
|
" chain:\n"
|
||||||
|
" - socks5://127.0.0.1:9050\n"
|
||||||
|
)
|
||||||
|
c = load_config(cfg_file)
|
||||||
|
lc = c.listeners[0]
|
||||||
|
assert lc.bypass == ["127.0.0.0/8", "192.168.0.0/16", "localhost", ".local"]
|
||||||
|
|
||||||
|
def test_bypass_empty_default(self):
|
||||||
|
lc = ListenerConfig()
|
||||||
|
assert lc.bypass == []
|
||||||
|
|
||||||
|
def test_bypass_absent_from_yaml(self, tmp_path):
|
||||||
|
cfg_file = tmp_path / "test.yaml"
|
||||||
|
cfg_file.write_text(
|
||||||
|
"listeners:\n"
|
||||||
|
" - listen: 1080\n"
|
||||||
|
" chain:\n"
|
||||||
|
" - socks5://127.0.0.1:9050\n"
|
||||||
|
)
|
||||||
|
c = load_config(cfg_file)
|
||||||
|
assert c.listeners[0].bypass == []
|
||||||
|
|
||||||
|
|
||||||
|
class TestBypassMatch:
|
||||||
|
"""Test _bypass_match function."""
|
||||||
|
|
||||||
|
def test_cidr_ipv4(self):
|
||||||
|
assert _bypass_match(["10.0.0.0/8"], "10.1.2.3") is True
|
||||||
|
assert _bypass_match(["10.0.0.0/8"], "11.0.0.1") is False
|
||||||
|
|
||||||
|
def test_cidr_ipv6(self):
|
||||||
|
assert _bypass_match(["fc00::/7"], "fd00::1") is True
|
||||||
|
assert _bypass_match(["fc00::/7"], "2001:db8::1") is False
|
||||||
|
|
||||||
|
def test_exact_ip(self):
|
||||||
|
assert _bypass_match(["127.0.0.1"], "127.0.0.1") is True
|
||||||
|
assert _bypass_match(["127.0.0.1"], "127.0.0.2") is False
|
||||||
|
|
||||||
|
def test_exact_hostname(self):
|
||||||
|
assert _bypass_match(["localhost"], "localhost") is True
|
||||||
|
assert _bypass_match(["localhost"], "otherhost") is False
|
||||||
|
|
||||||
|
def test_domain_suffix(self):
|
||||||
|
assert _bypass_match([".local"], "myhost.local") is True
|
||||||
|
assert _bypass_match([".local"], "local") is True
|
||||||
|
assert _bypass_match([".local"], "notlocal") is False
|
||||||
|
assert _bypass_match([".example.com"], "api.example.com") is True
|
||||||
|
assert _bypass_match([".example.com"], "example.com") is True
|
||||||
|
|
||||||
|
def test_multiple_rules(self):
|
||||||
|
rules = ["10.0.0.0/8", "192.168.0.0/16", ".local"]
|
||||||
|
assert _bypass_match(rules, "10.1.2.3") is True
|
||||||
|
assert _bypass_match(rules, "192.168.1.1") is True
|
||||||
|
assert _bypass_match(rules, "host.local") is True
|
||||||
|
assert _bypass_match(rules, "8.8.8.8") is False
|
||||||
|
|
||||||
|
def test_empty_rules(self):
|
||||||
|
assert _bypass_match([], "anything") is False
|
||||||
|
|
||||||
|
def test_hostname_not_matched_by_cidr(self):
|
||||||
|
assert _bypass_match(["10.0.0.0/8"], "example.com") is False
|
||||||
|
|||||||
578
tests/test_integration.py
Normal file
578
tests/test_integration.py
Normal file
@@ -0,0 +1,578 @@
|
|||||||
|
"""End-to-end integration tests with mock SOCKS5 proxies."""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
import struct
|
||||||
|
|
||||||
|
from s5p.config import ChainHop, ListenerConfig
|
||||||
|
from s5p.proto import encode_address
|
||||||
|
from s5p.server import _handle_client
|
||||||
|
|
||||||
|
from .conftest import free_port, start_echo_server, start_mock_socks5
|
||||||
|
|
||||||
|
# -- helpers -----------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
async def _socks5_connect(
|
||||||
|
host: str, port: int, target_host: str, target_port: int,
|
||||||
|
) -> tuple[asyncio.StreamReader, asyncio.StreamWriter]:
|
||||||
|
"""Connect as a SOCKS5 client, perform greeting + CONNECT."""
|
||||||
|
reader, writer = await asyncio.open_connection(host, port)
|
||||||
|
|
||||||
|
# greeting: version 5, 1 method (no-auth)
|
||||||
|
writer.write(b"\x05\x01\x00")
|
||||||
|
await writer.drain()
|
||||||
|
resp = await reader.readexactly(2)
|
||||||
|
assert resp == b"\x05\x00", f"greeting failed: {resp!r}"
|
||||||
|
|
||||||
|
# connect request
|
||||||
|
atyp, addr_bytes = encode_address(target_host)
|
||||||
|
writer.write(
|
||||||
|
struct.pack("!BBB", 0x05, 0x01, 0x00)
|
||||||
|
+ bytes([atyp])
|
||||||
|
+ addr_bytes
|
||||||
|
+ struct.pack("!H", target_port)
|
||||||
|
)
|
||||||
|
await writer.drain()
|
||||||
|
|
||||||
|
# read reply
|
||||||
|
rep_header = await reader.readexactly(3)
|
||||||
|
atyp_resp = (await reader.readexactly(1))[0]
|
||||||
|
if atyp_resp == 0x01:
|
||||||
|
await reader.readexactly(4)
|
||||||
|
elif atyp_resp == 0x03:
|
||||||
|
length = (await reader.readexactly(1))[0]
|
||||||
|
await reader.readexactly(length)
|
||||||
|
elif atyp_resp == 0x04:
|
||||||
|
await reader.readexactly(16)
|
||||||
|
await reader.readexactly(2) # port
|
||||||
|
|
||||||
|
if rep_header[1] != 0x00:
|
||||||
|
writer.close()
|
||||||
|
await writer.wait_closed()
|
||||||
|
raise ConnectionError(f"SOCKS5 reply={rep_header[1]:#x}")
|
||||||
|
|
||||||
|
return reader, writer
|
||||||
|
|
||||||
|
|
||||||
|
async def _socks5_connect_auth(
|
||||||
|
host: str, port: int, target_host: str, target_port: int,
|
||||||
|
username: str, password: str,
|
||||||
|
) -> tuple[asyncio.StreamReader, asyncio.StreamWriter]:
|
||||||
|
"""Connect as a SOCKS5 client with username/password auth (RFC 1929)."""
|
||||||
|
reader, writer = await asyncio.open_connection(host, port)
|
||||||
|
|
||||||
|
# greeting: version 5, 1 method (user/pass)
|
||||||
|
writer.write(b"\x05\x01\x02")
|
||||||
|
await writer.drain()
|
||||||
|
resp = await reader.readexactly(2)
|
||||||
|
assert resp == b"\x05\x02", f"greeting failed: {resp!r}"
|
||||||
|
|
||||||
|
# subnegotiation
|
||||||
|
uname = username.encode("utf-8")
|
||||||
|
passwd = password.encode("utf-8")
|
||||||
|
writer.write(
|
||||||
|
b"\x01"
|
||||||
|
+ bytes([len(uname)]) + uname
|
||||||
|
+ bytes([len(passwd)]) + passwd
|
||||||
|
)
|
||||||
|
await writer.drain()
|
||||||
|
auth_resp = await reader.readexactly(2)
|
||||||
|
if auth_resp[1] != 0x00:
|
||||||
|
writer.close()
|
||||||
|
await writer.wait_closed()
|
||||||
|
raise ConnectionError(f"auth failed: status={auth_resp[1]:#x}")
|
||||||
|
|
||||||
|
# connect request
|
||||||
|
atyp, addr_bytes = encode_address(target_host)
|
||||||
|
writer.write(
|
||||||
|
struct.pack("!BBB", 0x05, 0x01, 0x00)
|
||||||
|
+ bytes([atyp])
|
||||||
|
+ addr_bytes
|
||||||
|
+ struct.pack("!H", target_port)
|
||||||
|
)
|
||||||
|
await writer.drain()
|
||||||
|
|
||||||
|
# read reply
|
||||||
|
rep_header = await reader.readexactly(3)
|
||||||
|
atyp_resp = (await reader.readexactly(1))[0]
|
||||||
|
if atyp_resp == 0x01:
|
||||||
|
await reader.readexactly(4)
|
||||||
|
elif atyp_resp == 0x03:
|
||||||
|
length = (await reader.readexactly(1))[0]
|
||||||
|
await reader.readexactly(length)
|
||||||
|
elif atyp_resp == 0x04:
|
||||||
|
await reader.readexactly(16)
|
||||||
|
await reader.readexactly(2) # port
|
||||||
|
|
||||||
|
if rep_header[1] != 0x00:
|
||||||
|
writer.close()
|
||||||
|
await writer.wait_closed()
|
||||||
|
raise ConnectionError(f"SOCKS5 reply={rep_header[1]:#x}")
|
||||||
|
|
||||||
|
return reader, writer
|
||||||
|
|
||||||
|
|
||||||
|
async def _close_server(srv: asyncio.Server) -> None:
|
||||||
|
"""Close a server and wait."""
|
||||||
|
srv.close()
|
||||||
|
await srv.wait_closed()
|
||||||
|
|
||||||
|
|
||||||
|
# -- tests -------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
class TestDirectNoChain:
|
||||||
|
"""Client -> s5p -> echo (empty chain)."""
|
||||||
|
|
||||||
|
def test_echo(self):
|
||||||
|
async def _run():
|
||||||
|
servers = []
|
||||||
|
try:
|
||||||
|
echo_host, echo_port, echo_srv = await start_echo_server()
|
||||||
|
servers.append(echo_srv)
|
||||||
|
|
||||||
|
listener = ListenerConfig(listen_host="127.0.0.1", listen_port=free_port())
|
||||||
|
s5p_srv = await asyncio.start_server(
|
||||||
|
lambda r, w: _handle_client(r, w, listener, timeout=5.0, retries=1),
|
||||||
|
listener.listen_host, listener.listen_port,
|
||||||
|
)
|
||||||
|
servers.append(s5p_srv)
|
||||||
|
await s5p_srv.start_serving()
|
||||||
|
|
||||||
|
reader, writer = await _socks5_connect(
|
||||||
|
listener.listen_host, listener.listen_port, echo_host, echo_port,
|
||||||
|
)
|
||||||
|
writer.write(b"hello direct")
|
||||||
|
await writer.drain()
|
||||||
|
data = await asyncio.wait_for(reader.read(4096), timeout=2.0)
|
||||||
|
assert data == b"hello direct"
|
||||||
|
|
||||||
|
writer.close()
|
||||||
|
await writer.wait_closed()
|
||||||
|
finally:
|
||||||
|
for s in servers:
|
||||||
|
await _close_server(s)
|
||||||
|
|
||||||
|
asyncio.run(_run())
|
||||||
|
|
||||||
|
|
||||||
|
class TestSingleHop:
|
||||||
|
"""Client -> s5p -> mock socks5 -> echo."""
|
||||||
|
|
||||||
|
def test_echo_through_one_hop(self):
|
||||||
|
async def _run():
|
||||||
|
servers = []
|
||||||
|
try:
|
||||||
|
echo_host, echo_port, echo_srv = await start_echo_server()
|
||||||
|
servers.append(echo_srv)
|
||||||
|
mock_host, mock_port, mock_srv = await start_mock_socks5()
|
||||||
|
servers.append(mock_srv)
|
||||||
|
|
||||||
|
listener = ListenerConfig(
|
||||||
|
listen_host="127.0.0.1",
|
||||||
|
listen_port=free_port(),
|
||||||
|
chain=[ChainHop(proto="socks5", host=mock_host, port=mock_port)],
|
||||||
|
)
|
||||||
|
s5p_srv = await asyncio.start_server(
|
||||||
|
lambda r, w: _handle_client(r, w, listener, timeout=5.0, retries=1),
|
||||||
|
listener.listen_host, listener.listen_port,
|
||||||
|
)
|
||||||
|
servers.append(s5p_srv)
|
||||||
|
await s5p_srv.start_serving()
|
||||||
|
|
||||||
|
reader, writer = await _socks5_connect(
|
||||||
|
listener.listen_host, listener.listen_port, echo_host, echo_port,
|
||||||
|
)
|
||||||
|
writer.write(b"hello one hop")
|
||||||
|
await writer.drain()
|
||||||
|
data = await asyncio.wait_for(reader.read(4096), timeout=2.0)
|
||||||
|
assert data == b"hello one hop"
|
||||||
|
|
||||||
|
writer.close()
|
||||||
|
await writer.wait_closed()
|
||||||
|
finally:
|
||||||
|
for s in servers:
|
||||||
|
await _close_server(s)
|
||||||
|
|
||||||
|
asyncio.run(_run())
|
||||||
|
|
||||||
|
|
||||||
|
class TestTwoHops:
|
||||||
|
"""Client -> s5p -> mock1 -> mock2 -> echo."""
|
||||||
|
|
||||||
|
def test_echo_through_two_hops(self):
|
||||||
|
async def _run():
|
||||||
|
servers = []
|
||||||
|
try:
|
||||||
|
echo_host, echo_port, echo_srv = await start_echo_server()
|
||||||
|
servers.append(echo_srv)
|
||||||
|
m1_host, m1_port, m1_srv = await start_mock_socks5()
|
||||||
|
servers.append(m1_srv)
|
||||||
|
m2_host, m2_port, m2_srv = await start_mock_socks5()
|
||||||
|
servers.append(m2_srv)
|
||||||
|
|
||||||
|
listener = ListenerConfig(
|
||||||
|
listen_host="127.0.0.1",
|
||||||
|
listen_port=free_port(),
|
||||||
|
chain=[
|
||||||
|
ChainHop(proto="socks5", host=m1_host, port=m1_port),
|
||||||
|
ChainHop(proto="socks5", host=m2_host, port=m2_port),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
s5p_srv = await asyncio.start_server(
|
||||||
|
lambda r, w: _handle_client(r, w, listener, timeout=5.0, retries=1),
|
||||||
|
listener.listen_host, listener.listen_port,
|
||||||
|
)
|
||||||
|
servers.append(s5p_srv)
|
||||||
|
await s5p_srv.start_serving()
|
||||||
|
|
||||||
|
reader, writer = await _socks5_connect(
|
||||||
|
listener.listen_host, listener.listen_port, echo_host, echo_port,
|
||||||
|
)
|
||||||
|
writer.write(b"hello two hops")
|
||||||
|
await writer.drain()
|
||||||
|
data = await asyncio.wait_for(reader.read(4096), timeout=2.0)
|
||||||
|
assert data == b"hello two hops"
|
||||||
|
|
||||||
|
writer.close()
|
||||||
|
await writer.wait_closed()
|
||||||
|
finally:
|
||||||
|
for s in servers:
|
||||||
|
await _close_server(s)
|
||||||
|
|
||||||
|
asyncio.run(_run())
|
||||||
|
|
||||||
|
|
||||||
|
class TestConnectionRefused:
|
||||||
|
"""Dead hop returns SOCKS5 error to client."""
|
||||||
|
|
||||||
|
def test_refused(self):
|
||||||
|
async def _run():
|
||||||
|
servers = []
|
||||||
|
try:
|
||||||
|
# use a port with nothing listening
|
||||||
|
dead_port = free_port()
|
||||||
|
|
||||||
|
listener = ListenerConfig(
|
||||||
|
listen_host="127.0.0.1",
|
||||||
|
listen_port=free_port(),
|
||||||
|
chain=[ChainHop(proto="socks5", host="127.0.0.1", port=dead_port)],
|
||||||
|
)
|
||||||
|
s5p_srv = await asyncio.start_server(
|
||||||
|
lambda r, w: _handle_client(r, w, listener, timeout=3.0, retries=1),
|
||||||
|
listener.listen_host, listener.listen_port,
|
||||||
|
)
|
||||||
|
servers.append(s5p_srv)
|
||||||
|
await s5p_srv.start_serving()
|
||||||
|
|
||||||
|
reader, writer = await asyncio.open_connection(
|
||||||
|
listener.listen_host, listener.listen_port,
|
||||||
|
)
|
||||||
|
# greeting
|
||||||
|
writer.write(b"\x05\x01\x00")
|
||||||
|
await writer.drain()
|
||||||
|
resp = await reader.readexactly(2)
|
||||||
|
assert resp == b"\x05\x00"
|
||||||
|
|
||||||
|
# connect to a dummy target
|
||||||
|
atyp, addr_bytes = encode_address("127.0.0.1")
|
||||||
|
writer.write(
|
||||||
|
struct.pack("!BBB", 0x05, 0x01, 0x00)
|
||||||
|
+ bytes([atyp])
|
||||||
|
+ addr_bytes
|
||||||
|
+ struct.pack("!H", 9999)
|
||||||
|
)
|
||||||
|
await writer.drain()
|
||||||
|
|
||||||
|
# should get error reply (non-zero rep field)
|
||||||
|
rep = await asyncio.wait_for(reader.read(4096), timeout=5.0)
|
||||||
|
assert len(rep) >= 3
|
||||||
|
assert rep[1] != 0x00, "expected non-zero SOCKS5 reply code"
|
||||||
|
|
||||||
|
writer.close()
|
||||||
|
await writer.wait_closed()
|
||||||
|
finally:
|
||||||
|
for s in servers:
|
||||||
|
await _close_server(s)
|
||||||
|
|
||||||
|
asyncio.run(_run())
|
||||||
|
|
||||||
|
|
||||||
|
class TestBypassDirectConnect:
|
||||||
|
"""Target matches bypass rule -> chain skipped, direct connect to echo."""
|
||||||
|
|
||||||
|
def test_bypass_skips_chain(self):
|
||||||
|
async def _run():
|
||||||
|
servers = []
|
||||||
|
try:
|
||||||
|
echo_host, echo_port, echo_srv = await start_echo_server()
|
||||||
|
servers.append(echo_srv)
|
||||||
|
|
||||||
|
# dead hop -- would fail if bypass didn't skip it
|
||||||
|
dead_port = free_port()
|
||||||
|
|
||||||
|
listener = ListenerConfig(
|
||||||
|
listen_host="127.0.0.1",
|
||||||
|
listen_port=free_port(),
|
||||||
|
chain=[ChainHop(proto="socks5", host="127.0.0.1", port=dead_port)],
|
||||||
|
bypass=["127.0.0.0/8"],
|
||||||
|
)
|
||||||
|
s5p_srv = await asyncio.start_server(
|
||||||
|
lambda r, w: _handle_client(r, w, listener, timeout=5.0, retries=1),
|
||||||
|
listener.listen_host, listener.listen_port,
|
||||||
|
)
|
||||||
|
servers.append(s5p_srv)
|
||||||
|
await s5p_srv.start_serving()
|
||||||
|
|
||||||
|
reader, writer = await _socks5_connect(
|
||||||
|
listener.listen_host, listener.listen_port, echo_host, echo_port,
|
||||||
|
)
|
||||||
|
writer.write(b"hello bypass")
|
||||||
|
await writer.drain()
|
||||||
|
data = await asyncio.wait_for(reader.read(4096), timeout=2.0)
|
||||||
|
assert data == b"hello bypass"
|
||||||
|
|
||||||
|
writer.close()
|
||||||
|
await writer.wait_closed()
|
||||||
|
finally:
|
||||||
|
for s in servers:
|
||||||
|
await _close_server(s)
|
||||||
|
|
||||||
|
asyncio.run(_run())
|
||||||
|
|
||||||
|
|
||||||
|
class TestOnionChainOnly:
|
||||||
|
"""Onion target uses static chain only, pool hops skipped."""
|
||||||
|
|
||||||
|
def test_onion_skips_pool(self):
|
||||||
|
async def _run():
|
||||||
|
servers = []
|
||||||
|
try:
|
||||||
|
# mock socks5 acts as the "Tor" hop
|
||||||
|
mock_host, mock_port, mock_srv = await start_mock_socks5()
|
||||||
|
servers.append(mock_srv)
|
||||||
|
|
||||||
|
# fake pool that would add a dead hop if called
|
||||||
|
from unittest.mock import AsyncMock, MagicMock
|
||||||
|
|
||||||
|
dead_port = free_port()
|
||||||
|
fake_pool = MagicMock()
|
||||||
|
fake_pool.alive_count = 1
|
||||||
|
fake_pool.get = AsyncMock(
|
||||||
|
return_value=ChainHop(
|
||||||
|
proto="socks5", host="127.0.0.1", port=dead_port,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
listener = ListenerConfig(
|
||||||
|
listen_host="127.0.0.1",
|
||||||
|
listen_port=free_port(),
|
||||||
|
chain=[ChainHop(proto="socks5", host=mock_host, port=mock_port)],
|
||||||
|
pool_seq=[["default"]],
|
||||||
|
)
|
||||||
|
s5p_srv = await asyncio.start_server(
|
||||||
|
lambda r, w: _handle_client(
|
||||||
|
r, w, listener, timeout=5.0, retries=1,
|
||||||
|
pool_seq=[[fake_pool]],
|
||||||
|
),
|
||||||
|
listener.listen_host, listener.listen_port,
|
||||||
|
)
|
||||||
|
servers.append(s5p_srv)
|
||||||
|
await s5p_srv.start_serving()
|
||||||
|
|
||||||
|
# connect with .onion target -- mock socks5 will fail to
|
||||||
|
# resolve it, but the key assertion is pool.get NOT called
|
||||||
|
reader, writer = await asyncio.open_connection(
|
||||||
|
listener.listen_host, listener.listen_port,
|
||||||
|
)
|
||||||
|
writer.write(b"\x05\x01\x00")
|
||||||
|
await writer.drain()
|
||||||
|
await reader.readexactly(2)
|
||||||
|
|
||||||
|
atyp, addr_bytes = encode_address("fake.onion")
|
||||||
|
writer.write(
|
||||||
|
struct.pack("!BBB", 0x05, 0x01, 0x00)
|
||||||
|
+ bytes([atyp])
|
||||||
|
+ addr_bytes
|
||||||
|
+ struct.pack("!H", 80)
|
||||||
|
)
|
||||||
|
await writer.drain()
|
||||||
|
await asyncio.wait_for(reader.read(4096), timeout=3.0)
|
||||||
|
|
||||||
|
writer.close()
|
||||||
|
await writer.wait_closed()
|
||||||
|
|
||||||
|
# pool.get must NOT have been called (onion skips pool)
|
||||||
|
fake_pool.get.assert_not_called()
|
||||||
|
|
||||||
|
finally:
|
||||||
|
for s in servers:
|
||||||
|
await _close_server(s)
|
||||||
|
|
||||||
|
asyncio.run(_run())
|
||||||
|
|
||||||
|
|
||||||
|
class TestAuthSuccess:
|
||||||
|
"""Authenticate with valid credentials, relay echo data."""
|
||||||
|
|
||||||
|
def test_auth_echo(self):
|
||||||
|
async def _run():
|
||||||
|
servers = []
|
||||||
|
try:
|
||||||
|
echo_host, echo_port, echo_srv = await start_echo_server()
|
||||||
|
servers.append(echo_srv)
|
||||||
|
|
||||||
|
listener = ListenerConfig(
|
||||||
|
listen_host="127.0.0.1",
|
||||||
|
listen_port=free_port(),
|
||||||
|
auth={"alice": "s3cret"},
|
||||||
|
)
|
||||||
|
s5p_srv = await asyncio.start_server(
|
||||||
|
lambda r, w: _handle_client(r, w, listener, timeout=5.0, retries=1),
|
||||||
|
listener.listen_host, listener.listen_port,
|
||||||
|
)
|
||||||
|
servers.append(s5p_srv)
|
||||||
|
await s5p_srv.start_serving()
|
||||||
|
|
||||||
|
reader, writer = await _socks5_connect_auth(
|
||||||
|
listener.listen_host, listener.listen_port,
|
||||||
|
echo_host, echo_port, "alice", "s3cret",
|
||||||
|
)
|
||||||
|
writer.write(b"hello auth")
|
||||||
|
await writer.drain()
|
||||||
|
data = await asyncio.wait_for(reader.read(4096), timeout=2.0)
|
||||||
|
assert data == b"hello auth"
|
||||||
|
|
||||||
|
writer.close()
|
||||||
|
await writer.wait_closed()
|
||||||
|
finally:
|
||||||
|
for s in servers:
|
||||||
|
await _close_server(s)
|
||||||
|
|
||||||
|
asyncio.run(_run())
|
||||||
|
|
||||||
|
|
||||||
|
class TestAuthFailure:
|
||||||
|
"""Wrong password returns auth failure response."""
|
||||||
|
|
||||||
|
def test_wrong_password(self):
|
||||||
|
async def _run():
|
||||||
|
servers = []
|
||||||
|
try:
|
||||||
|
listener = ListenerConfig(
|
||||||
|
listen_host="127.0.0.1",
|
||||||
|
listen_port=free_port(),
|
||||||
|
auth={"alice": "s3cret"},
|
||||||
|
)
|
||||||
|
s5p_srv = await asyncio.start_server(
|
||||||
|
lambda r, w: _handle_client(r, w, listener, timeout=5.0, retries=1),
|
||||||
|
listener.listen_host, listener.listen_port,
|
||||||
|
)
|
||||||
|
servers.append(s5p_srv)
|
||||||
|
await s5p_srv.start_serving()
|
||||||
|
|
||||||
|
reader, writer = await asyncio.open_connection(
|
||||||
|
listener.listen_host, listener.listen_port,
|
||||||
|
)
|
||||||
|
# greeting with auth method
|
||||||
|
writer.write(b"\x05\x01\x02")
|
||||||
|
await writer.drain()
|
||||||
|
resp = await reader.readexactly(2)
|
||||||
|
assert resp == b"\x05\x02"
|
||||||
|
|
||||||
|
# subnegotiation with wrong password
|
||||||
|
uname = b"alice"
|
||||||
|
passwd = b"wrong"
|
||||||
|
writer.write(
|
||||||
|
b"\x01"
|
||||||
|
+ bytes([len(uname)]) + uname
|
||||||
|
+ bytes([len(passwd)]) + passwd
|
||||||
|
)
|
||||||
|
await writer.drain()
|
||||||
|
auth_resp = await reader.readexactly(2)
|
||||||
|
assert auth_resp == b"\x01\x01", f"expected auth failure, got {auth_resp!r}"
|
||||||
|
|
||||||
|
writer.close()
|
||||||
|
await writer.wait_closed()
|
||||||
|
finally:
|
||||||
|
for s in servers:
|
||||||
|
await _close_server(s)
|
||||||
|
|
||||||
|
asyncio.run(_run())
|
||||||
|
|
||||||
|
|
||||||
|
class TestAuthMethodNotOffered:
|
||||||
|
"""Client offers only no-auth when auth is required -> 0xFF rejection."""
|
||||||
|
|
||||||
|
def test_no_auth_method_rejected(self):
|
||||||
|
async def _run():
|
||||||
|
servers = []
|
||||||
|
try:
|
||||||
|
listener = ListenerConfig(
|
||||||
|
listen_host="127.0.0.1",
|
||||||
|
listen_port=free_port(),
|
||||||
|
auth={"alice": "s3cret"},
|
||||||
|
)
|
||||||
|
s5p_srv = await asyncio.start_server(
|
||||||
|
lambda r, w: _handle_client(r, w, listener, timeout=5.0, retries=1),
|
||||||
|
listener.listen_host, listener.listen_port,
|
||||||
|
)
|
||||||
|
servers.append(s5p_srv)
|
||||||
|
await s5p_srv.start_serving()
|
||||||
|
|
||||||
|
reader, writer = await asyncio.open_connection(
|
||||||
|
listener.listen_host, listener.listen_port,
|
||||||
|
)
|
||||||
|
# greeting with only no-auth method (0x00)
|
||||||
|
writer.write(b"\x05\x01\x00")
|
||||||
|
await writer.drain()
|
||||||
|
resp = await reader.readexactly(2)
|
||||||
|
assert resp == b"\x05\xff", f"expected method rejection, got {resp!r}"
|
||||||
|
|
||||||
|
writer.close()
|
||||||
|
await writer.wait_closed()
|
||||||
|
finally:
|
||||||
|
for s in servers:
|
||||||
|
await _close_server(s)
|
||||||
|
|
||||||
|
asyncio.run(_run())
|
||||||
|
|
||||||
|
|
||||||
|
class TestNoAuthListenerUnchanged:
|
||||||
|
"""No auth configured -- 0x00 still works as before."""
|
||||||
|
|
||||||
|
def test_no_auth_still_works(self):
|
||||||
|
async def _run():
|
||||||
|
servers = []
|
||||||
|
try:
|
||||||
|
echo_host, echo_port, echo_srv = await start_echo_server()
|
||||||
|
servers.append(echo_srv)
|
||||||
|
|
||||||
|
listener = ListenerConfig(
|
||||||
|
listen_host="127.0.0.1",
|
||||||
|
listen_port=free_port(),
|
||||||
|
)
|
||||||
|
s5p_srv = await asyncio.start_server(
|
||||||
|
lambda r, w: _handle_client(r, w, listener, timeout=5.0, retries=1),
|
||||||
|
listener.listen_host, listener.listen_port,
|
||||||
|
)
|
||||||
|
servers.append(s5p_srv)
|
||||||
|
await s5p_srv.start_serving()
|
||||||
|
|
||||||
|
reader, writer = await _socks5_connect(
|
||||||
|
listener.listen_host, listener.listen_port, echo_host, echo_port,
|
||||||
|
)
|
||||||
|
writer.write(b"hello no auth")
|
||||||
|
await writer.drain()
|
||||||
|
data = await asyncio.wait_for(reader.read(4096), timeout=2.0)
|
||||||
|
assert data == b"hello no auth"
|
||||||
|
|
||||||
|
writer.close()
|
||||||
|
await writer.wait_closed()
|
||||||
|
finally:
|
||||||
|
for s in servers:
|
||||||
|
await _close_server(s)
|
||||||
|
|
||||||
|
asyncio.run(_run())
|
||||||
198
tests/test_metrics.py
Normal file
198
tests/test_metrics.py
Normal file
@@ -0,0 +1,198 @@
|
|||||||
|
"""Tests for metrics trackers and helpers."""
|
||||||
|
|
||||||
|
from s5p.metrics import LatencyTracker, Metrics, RateTracker, _human_bytes
|
||||||
|
|
||||||
|
# -- LatencyTracker ----------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
class TestLatencyTracker:
|
||||||
|
"""Test latency sample collection and percentile stats."""
|
||||||
|
|
||||||
|
def test_empty(self):
|
||||||
|
lt = LatencyTracker()
|
||||||
|
assert lt.count == 0
|
||||||
|
assert lt.stats() is None
|
||||||
|
|
||||||
|
def test_single(self):
|
||||||
|
lt = LatencyTracker()
|
||||||
|
lt.record(0.1)
|
||||||
|
s = lt.stats()
|
||||||
|
assert s is not None
|
||||||
|
assert s["count"] == 1
|
||||||
|
assert s["min"] == s["max"] == s["avg"] == s["p50"]
|
||||||
|
|
||||||
|
def test_percentiles(self):
|
||||||
|
lt = LatencyTracker()
|
||||||
|
# 100 evenly spaced samples: 0.001, 0.002, ..., 0.100
|
||||||
|
for i in range(1, 101):
|
||||||
|
lt.record(i / 1000)
|
||||||
|
s = lt.stats()
|
||||||
|
assert s["count"] == 100
|
||||||
|
assert s["min"] == 1.0 # 0.001s = 1.0ms
|
||||||
|
assert s["max"] == 100.0 # 0.100s = 100.0ms
|
||||||
|
assert 49.0 <= s["avg"] <= 52.0
|
||||||
|
assert 50.0 <= s["p50"] <= 52.0
|
||||||
|
assert 95.0 <= s["p95"] <= 97.0
|
||||||
|
assert 99.0 <= s["p99"] <= 101.0
|
||||||
|
|
||||||
|
def test_bounded(self):
|
||||||
|
lt = LatencyTracker(maxlen=5)
|
||||||
|
for i in range(10):
|
||||||
|
lt.record(i / 100)
|
||||||
|
assert lt.count == 5
|
||||||
|
s = lt.stats()
|
||||||
|
# only the last 5 samples remain: 0.05..0.09
|
||||||
|
assert s["min"] == 50.0
|
||||||
|
assert s["max"] == 90.0
|
||||||
|
|
||||||
|
def test_milliseconds(self):
|
||||||
|
lt = LatencyTracker()
|
||||||
|
lt.record(0.5) # 500ms
|
||||||
|
s = lt.stats()
|
||||||
|
assert s["min"] == 500.0
|
||||||
|
assert s["max"] == 500.0
|
||||||
|
|
||||||
|
def test_quantiles_empty(self):
|
||||||
|
lt = LatencyTracker()
|
||||||
|
assert lt.quantiles() is None
|
||||||
|
|
||||||
|
def test_quantiles_seconds(self):
|
||||||
|
lt = LatencyTracker()
|
||||||
|
for i in range(1, 101):
|
||||||
|
lt.record(i / 1000)
|
||||||
|
q = lt.quantiles()
|
||||||
|
assert q is not None
|
||||||
|
assert q["count"] == 100
|
||||||
|
assert 0.050 <= q["0.5"] <= 0.052
|
||||||
|
assert 0.095 <= q["0.95"] <= 0.097
|
||||||
|
assert 0.099 <= q["0.99"] <= 0.101
|
||||||
|
assert "sum" in q
|
||||||
|
assert q["sum"] > 0
|
||||||
|
|
||||||
|
|
||||||
|
# -- RateTracker -------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
class TestRateTracker:
|
||||||
|
"""Test rolling window event rate calculation."""
|
||||||
|
|
||||||
|
def test_empty(self):
|
||||||
|
rt = RateTracker()
|
||||||
|
assert rt.rate() == 0.0
|
||||||
|
|
||||||
|
def test_single(self):
|
||||||
|
rt = RateTracker()
|
||||||
|
rt.record()
|
||||||
|
assert rt.rate() == 0.0
|
||||||
|
|
||||||
|
def test_known_rate(self):
|
||||||
|
rt = RateTracker()
|
||||||
|
# 11 events at 0.1s intervals = 10/1.0 = 10.0/s
|
||||||
|
for i in range(11):
|
||||||
|
rt.record(now=100.0 + i * 0.1)
|
||||||
|
assert abs(rt.rate() - 10.0) < 0.01
|
||||||
|
|
||||||
|
def test_bounded(self):
|
||||||
|
rt = RateTracker(maxlen=5)
|
||||||
|
for i in range(10):
|
||||||
|
rt.record(now=float(i))
|
||||||
|
# only last 5 events: t=5..9, span=4, rate=4/4=1.0
|
||||||
|
assert abs(rt.rate() - 1.0) < 0.01
|
||||||
|
|
||||||
|
def test_zero_span(self):
|
||||||
|
rt = RateTracker()
|
||||||
|
rt.record(now=1.0)
|
||||||
|
rt.record(now=1.0)
|
||||||
|
assert rt.rate() == 0.0
|
||||||
|
|
||||||
|
|
||||||
|
# -- Metrics -----------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
class TestMetrics:
|
||||||
|
"""Test Metrics aggregation and output."""
|
||||||
|
|
||||||
|
def test_to_dict_includes_rate_and_latency(self):
|
||||||
|
m = Metrics()
|
||||||
|
m.connections = 10
|
||||||
|
m.conn_rate.record(now=0.0)
|
||||||
|
m.conn_rate.record(now=1.0)
|
||||||
|
m.latency.record(0.2)
|
||||||
|
m.latency.record(0.3)
|
||||||
|
d = m.to_dict()
|
||||||
|
assert "rate" in d
|
||||||
|
assert isinstance(d["rate"], float)
|
||||||
|
assert "latency" in d
|
||||||
|
assert d["latency"] is not None
|
||||||
|
assert d["latency"]["count"] == 2
|
||||||
|
|
||||||
|
def test_to_dict_latency_none_when_empty(self):
|
||||||
|
m = Metrics()
|
||||||
|
d = m.to_dict()
|
||||||
|
assert d["latency"] is None
|
||||||
|
assert d["rate"] == 0.0
|
||||||
|
|
||||||
|
def test_summary_includes_rate(self):
|
||||||
|
m = Metrics()
|
||||||
|
m.conn_rate.record(now=0.0)
|
||||||
|
m.conn_rate.record(now=1.0)
|
||||||
|
s = m.summary()
|
||||||
|
assert "rate=" in s
|
||||||
|
assert "/s" in s
|
||||||
|
|
||||||
|
def test_summary_includes_latency(self):
|
||||||
|
m = Metrics()
|
||||||
|
m.latency.record(0.2)
|
||||||
|
s = m.summary()
|
||||||
|
assert "p50=" in s
|
||||||
|
assert "p95=" in s
|
||||||
|
|
||||||
|
def test_summary_no_latency_when_empty(self):
|
||||||
|
m = Metrics()
|
||||||
|
s = m.summary()
|
||||||
|
assert "p50=" not in s
|
||||||
|
assert "p95=" not in s
|
||||||
|
|
||||||
|
def test_listener_latency(self):
|
||||||
|
m = Metrics()
|
||||||
|
m.get_listener_latency("0.0.0.0:1080").record(0.5)
|
||||||
|
m.get_listener_latency("0.0.0.0:1080").record(0.6)
|
||||||
|
m.get_listener_latency("0.0.0.0:1081").record(0.1)
|
||||||
|
d = m.to_dict()
|
||||||
|
assert "listener_latency" in d
|
||||||
|
assert "0.0.0.0:1080" in d["listener_latency"]
|
||||||
|
assert "0.0.0.0:1081" in d["listener_latency"]
|
||||||
|
assert d["listener_latency"]["0.0.0.0:1080"]["count"] == 2
|
||||||
|
assert d["listener_latency"]["0.0.0.0:1081"]["count"] == 1
|
||||||
|
|
||||||
|
def test_listener_latency_empty(self):
|
||||||
|
m = Metrics()
|
||||||
|
d = m.to_dict()
|
||||||
|
assert d["listener_latency"] == {}
|
||||||
|
|
||||||
|
|
||||||
|
# -- _human_bytes ------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
class TestHumanBytes:
|
||||||
|
"""Test byte count formatting."""
|
||||||
|
|
||||||
|
def test_bytes(self):
|
||||||
|
assert _human_bytes(0) == "0B"
|
||||||
|
assert _human_bytes(512) == "512B"
|
||||||
|
|
||||||
|
def test_kilobytes(self):
|
||||||
|
assert _human_bytes(1024) == "1.0K"
|
||||||
|
assert _human_bytes(1536) == "1.5K"
|
||||||
|
|
||||||
|
def test_megabytes(self):
|
||||||
|
assert _human_bytes(1024 * 1024) == "1.0M"
|
||||||
|
|
||||||
|
def test_gigabytes(self):
|
||||||
|
assert _human_bytes(1024**3) == "1.0G"
|
||||||
|
|
||||||
|
def test_terabytes(self):
|
||||||
|
assert _human_bytes(1024**4) == "1.0T"
|
||||||
|
|
||||||
|
def test_petabytes(self):
|
||||||
|
assert _human_bytes(1024**5) == "1.0P"
|
||||||
@@ -1,6 +1,9 @@
|
|||||||
"""Tests for the managed proxy pool."""
|
"""Tests for the managed proxy pool."""
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
import ssl
|
||||||
import time
|
import time
|
||||||
|
from unittest.mock import AsyncMock, MagicMock, patch
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
@@ -8,6 +11,95 @@ from s5p.config import ChainHop, PoolSourceConfig, ProxyPoolConfig
|
|||||||
from s5p.pool import ProxyEntry, ProxyPool
|
from s5p.pool import ProxyEntry, ProxyPool
|
||||||
|
|
||||||
|
|
||||||
|
class TestProxyPoolName:
|
||||||
|
"""Test pool name and state path derivation."""
|
||||||
|
|
||||||
|
def test_default_name(self):
|
||||||
|
cfg = ProxyPoolConfig(sources=[])
|
||||||
|
pool = ProxyPool(cfg, [], timeout=10.0)
|
||||||
|
assert pool.name == "default"
|
||||||
|
assert pool._log_prefix == "pool"
|
||||||
|
|
||||||
|
def test_named_pool(self):
|
||||||
|
cfg = ProxyPoolConfig(sources=[])
|
||||||
|
pool = ProxyPool(cfg, [], timeout=10.0, name="clean")
|
||||||
|
assert pool.name == "clean"
|
||||||
|
assert pool._log_prefix == "pool[clean]"
|
||||||
|
|
||||||
|
def test_state_path_default(self):
|
||||||
|
cfg = ProxyPoolConfig(sources=[])
|
||||||
|
pool = ProxyPool(cfg, [], timeout=10.0)
|
||||||
|
assert pool._state_path.name == "pool.json"
|
||||||
|
|
||||||
|
def test_state_path_named(self):
|
||||||
|
cfg = ProxyPoolConfig(sources=[])
|
||||||
|
pool = ProxyPool(cfg, [], timeout=10.0, name="clean")
|
||||||
|
assert pool._state_path.name == "pool-clean.json"
|
||||||
|
|
||||||
|
def test_state_path_explicit_overrides_name(self):
|
||||||
|
cfg = ProxyPoolConfig(sources=[], state_file="/data/custom.json")
|
||||||
|
pool = ProxyPool(cfg, [], timeout=10.0, name="clean")
|
||||||
|
assert str(pool._state_path) == "/data/custom.json"
|
||||||
|
|
||||||
|
|
||||||
|
class TestProxyPoolMitmQuery:
|
||||||
|
"""Test mitm query parameter in API fetch."""
|
||||||
|
|
||||||
|
def test_mitm_false(self):
|
||||||
|
cfg = ProxyPoolConfig(sources=[])
|
||||||
|
pool = ProxyPool(cfg, [], timeout=10.0)
|
||||||
|
src = PoolSourceConfig(url="http://api:8081/proxies/all", mitm=False)
|
||||||
|
|
||||||
|
async def run():
|
||||||
|
from unittest.mock import AsyncMock, patch
|
||||||
|
mock_ret = {"proxies": []}
|
||||||
|
with patch(
|
||||||
|
"s5p.pool.http_get_json",
|
||||||
|
new_callable=AsyncMock, return_value=mock_ret,
|
||||||
|
) as mock:
|
||||||
|
await pool._fetch_api(src)
|
||||||
|
call_url = mock.call_args[0][0]
|
||||||
|
assert "mitm=0" in call_url
|
||||||
|
|
||||||
|
asyncio.run(run())
|
||||||
|
|
||||||
|
def test_mitm_true(self):
|
||||||
|
cfg = ProxyPoolConfig(sources=[])
|
||||||
|
pool = ProxyPool(cfg, [], timeout=10.0)
|
||||||
|
src = PoolSourceConfig(url="http://api:8081/proxies/all", mitm=True)
|
||||||
|
|
||||||
|
async def run():
|
||||||
|
from unittest.mock import AsyncMock, patch
|
||||||
|
mock_ret = {"proxies": []}
|
||||||
|
with patch(
|
||||||
|
"s5p.pool.http_get_json",
|
||||||
|
new_callable=AsyncMock, return_value=mock_ret,
|
||||||
|
) as mock:
|
||||||
|
await pool._fetch_api(src)
|
||||||
|
call_url = mock.call_args[0][0]
|
||||||
|
assert "mitm=1" in call_url
|
||||||
|
|
||||||
|
asyncio.run(run())
|
||||||
|
|
||||||
|
def test_mitm_none_omitted(self):
|
||||||
|
cfg = ProxyPoolConfig(sources=[])
|
||||||
|
pool = ProxyPool(cfg, [], timeout=10.0)
|
||||||
|
src = PoolSourceConfig(url="http://api:8081/proxies/all", mitm=None)
|
||||||
|
|
||||||
|
async def run():
|
||||||
|
from unittest.mock import AsyncMock, patch
|
||||||
|
mock_ret = {"proxies": []}
|
||||||
|
with patch(
|
||||||
|
"s5p.pool.http_get_json",
|
||||||
|
new_callable=AsyncMock, return_value=mock_ret,
|
||||||
|
) as mock:
|
||||||
|
await pool._fetch_api(src)
|
||||||
|
call_url = mock.call_args[0][0]
|
||||||
|
assert "mitm" not in call_url
|
||||||
|
|
||||||
|
asyncio.run(run())
|
||||||
|
|
||||||
|
|
||||||
class TestProxyEntry:
|
class TestProxyEntry:
|
||||||
"""Test ProxyEntry defaults."""
|
"""Test ProxyEntry defaults."""
|
||||||
|
|
||||||
@@ -19,6 +111,38 @@ class TestProxyEntry:
|
|||||||
assert entry.tests == 0
|
assert entry.tests == 0
|
||||||
|
|
||||||
|
|
||||||
|
class TestEffectiveChain:
|
||||||
|
"""Test chain_nodes round-robin in pool health tests."""
|
||||||
|
|
||||||
|
def test_no_nodes_returns_original(self):
|
||||||
|
cfg = ProxyPoolConfig(sources=[])
|
||||||
|
chain = [ChainHop(proto="socks5", host="10.0.0.1", port=9050)]
|
||||||
|
pool = ProxyPool(cfg, chain, timeout=10.0)
|
||||||
|
assert pool._effective_chain() == chain
|
||||||
|
|
||||||
|
def test_round_robin_across_nodes(self):
|
||||||
|
cfg = ProxyPoolConfig(sources=[])
|
||||||
|
chain = [ChainHop(proto="socks5", host="original", port=9050)]
|
||||||
|
nodes = [
|
||||||
|
ChainHop(proto="socks5", host="node-a", port=9050),
|
||||||
|
ChainHop(proto="socks5", host="node-b", port=9050),
|
||||||
|
ChainHop(proto="socks5", host="node-c", port=9050),
|
||||||
|
]
|
||||||
|
pool = ProxyPool(cfg, chain, timeout=10.0, chain_nodes=nodes)
|
||||||
|
|
||||||
|
hosts = [pool._effective_chain()[0].host for _ in range(6)]
|
||||||
|
assert hosts == [
|
||||||
|
"node-a", "node-b", "node-c",
|
||||||
|
"node-a", "node-b", "node-c",
|
||||||
|
]
|
||||||
|
|
||||||
|
def test_empty_chain_no_replacement(self):
|
||||||
|
cfg = ProxyPoolConfig(sources=[])
|
||||||
|
nodes = [ChainHop(proto="socks5", host="node-a", port=9050)]
|
||||||
|
pool = ProxyPool(cfg, [], timeout=10.0, chain_nodes=nodes)
|
||||||
|
assert pool._effective_chain() == []
|
||||||
|
|
||||||
|
|
||||||
class TestProxyPoolMerge:
|
class TestProxyPoolMerge:
|
||||||
"""Test proxy deduplication and merge."""
|
"""Test proxy deduplication and merge."""
|
||||||
|
|
||||||
@@ -47,6 +171,46 @@ class TestProxyPoolMerge:
|
|||||||
assert pool.count == 1
|
assert pool.count == 1
|
||||||
|
|
||||||
|
|
||||||
|
class TestProxyPoolAllowedProtos:
|
||||||
|
"""Test pool-level proxy protocol filter."""
|
||||||
|
|
||||||
|
def test_allowed_protos_filters_merge(self):
|
||||||
|
cfg = ProxyPoolConfig(sources=[], allowed_protos=["socks5"])
|
||||||
|
pool = ProxyPool(cfg, [], timeout=10.0)
|
||||||
|
proxies = [
|
||||||
|
ChainHop(proto="socks5", host="1.2.3.4", port=1080),
|
||||||
|
ChainHop(proto="http", host="5.6.7.8", port=8080),
|
||||||
|
ChainHop(proto="socks4", host="9.9.9.9", port=1080),
|
||||||
|
]
|
||||||
|
pool._merge(proxies)
|
||||||
|
assert pool.count == 1
|
||||||
|
assert "socks5://1.2.3.4:1080" in pool._proxies
|
||||||
|
|
||||||
|
def test_allowed_protos_multiple(self):
|
||||||
|
cfg = ProxyPoolConfig(sources=[], allowed_protos=["socks5", "http"])
|
||||||
|
pool = ProxyPool(cfg, [], timeout=10.0)
|
||||||
|
proxies = [
|
||||||
|
ChainHop(proto="socks5", host="1.2.3.4", port=1080),
|
||||||
|
ChainHop(proto="http", host="5.6.7.8", port=8080),
|
||||||
|
ChainHop(proto="socks4", host="9.9.9.9", port=1080),
|
||||||
|
]
|
||||||
|
pool._merge(proxies)
|
||||||
|
assert pool.count == 2
|
||||||
|
assert "socks5://1.2.3.4:1080" in pool._proxies
|
||||||
|
assert "http://5.6.7.8:8080" in pool._proxies
|
||||||
|
assert "socks4://9.9.9.9:1080" not in pool._proxies
|
||||||
|
|
||||||
|
def test_empty_allowed_protos_accepts_all(self):
|
||||||
|
cfg = ProxyPoolConfig(sources=[], allowed_protos=[])
|
||||||
|
pool = ProxyPool(cfg, [], timeout=10.0)
|
||||||
|
proxies = [
|
||||||
|
ChainHop(proto="socks5", host="1.2.3.4", port=1080),
|
||||||
|
ChainHop(proto="http", host="5.6.7.8", port=8080),
|
||||||
|
]
|
||||||
|
pool._merge(proxies)
|
||||||
|
assert pool.count == 2
|
||||||
|
|
||||||
|
|
||||||
class TestProxyPoolGet:
|
class TestProxyPoolGet:
|
||||||
"""Test proxy selection."""
|
"""Test proxy selection."""
|
||||||
|
|
||||||
@@ -152,6 +316,102 @@ class TestProxyPoolWeight:
|
|||||||
pool.report_failure(hop) # should not raise
|
pool.report_failure(hop) # should not raise
|
||||||
|
|
||||||
|
|
||||||
|
class TestDynamicConcurrency:
|
||||||
|
"""Test dynamic health test concurrency scaling."""
|
||||||
|
|
||||||
|
def test_scales_to_ten_percent(self):
|
||||||
|
import asyncio
|
||||||
|
from unittest.mock import AsyncMock, patch
|
||||||
|
|
||||||
|
cfg = ProxyPoolConfig(sources=[], test_concurrency=25)
|
||||||
|
pool = ProxyPool(cfg, [], timeout=10.0)
|
||||||
|
|
||||||
|
now = time.time()
|
||||||
|
# add 100 proxies -> effective concurrency = max(3, min(100//10, 25)) = 10
|
||||||
|
for i in range(100):
|
||||||
|
hop = ChainHop(proto="socks5", host=f"10.0.{i // 256}.{i % 256}", port=1080)
|
||||||
|
key = f"socks5://10.0.{i // 256}.{i % 256}:1080"
|
||||||
|
pool._proxies[key] = ProxyEntry(hop=hop, alive=False, last_seen=now)
|
||||||
|
|
||||||
|
captured = {}
|
||||||
|
|
||||||
|
original_semaphore = asyncio.Semaphore
|
||||||
|
|
||||||
|
def capture_semaphore(value):
|
||||||
|
captured["concurrency"] = value
|
||||||
|
return original_semaphore(value)
|
||||||
|
|
||||||
|
with (
|
||||||
|
patch.object(pool, "_test_proxy", new_callable=AsyncMock, return_value=True),
|
||||||
|
patch("s5p.pool.asyncio.Semaphore", side_effect=capture_semaphore),
|
||||||
|
):
|
||||||
|
asyncio.run(pool._run_health_tests())
|
||||||
|
|
||||||
|
assert captured["concurrency"] == 10
|
||||||
|
|
||||||
|
def test_minimum_of_three(self):
|
||||||
|
import asyncio
|
||||||
|
from unittest.mock import AsyncMock, patch
|
||||||
|
|
||||||
|
cfg = ProxyPoolConfig(sources=[], test_concurrency=25)
|
||||||
|
pool = ProxyPool(cfg, [], timeout=10.0)
|
||||||
|
|
||||||
|
now = time.time()
|
||||||
|
# 5 proxies -> 5//10=0, but min is 3
|
||||||
|
for i in range(5):
|
||||||
|
hop = ChainHop(proto="socks5", host=f"10.0.0.{i}", port=1080)
|
||||||
|
pool._proxies[f"socks5://10.0.0.{i}:1080"] = ProxyEntry(
|
||||||
|
hop=hop, alive=False, last_seen=now,
|
||||||
|
)
|
||||||
|
|
||||||
|
captured = {}
|
||||||
|
|
||||||
|
original_semaphore = asyncio.Semaphore
|
||||||
|
|
||||||
|
def capture_semaphore(value):
|
||||||
|
captured["concurrency"] = value
|
||||||
|
return original_semaphore(value)
|
||||||
|
|
||||||
|
with (
|
||||||
|
patch.object(pool, "_test_proxy", new_callable=AsyncMock, return_value=True),
|
||||||
|
patch("s5p.pool.asyncio.Semaphore", side_effect=capture_semaphore),
|
||||||
|
):
|
||||||
|
asyncio.run(pool._run_health_tests())
|
||||||
|
|
||||||
|
assert captured["concurrency"] == 3
|
||||||
|
|
||||||
|
def test_capped_by_config(self):
|
||||||
|
import asyncio
|
||||||
|
from unittest.mock import AsyncMock, patch
|
||||||
|
|
||||||
|
cfg = ProxyPoolConfig(sources=[], test_concurrency=5)
|
||||||
|
pool = ProxyPool(cfg, [], timeout=10.0)
|
||||||
|
|
||||||
|
now = time.time()
|
||||||
|
# 1000 proxies -> 1000//10=100, capped at 5
|
||||||
|
for i in range(1000):
|
||||||
|
h = f"10.{i // 65536}.{(i // 256) % 256}.{i % 256}"
|
||||||
|
hop = ChainHop(proto="socks5", host=h, port=1080)
|
||||||
|
key = str(hop)
|
||||||
|
pool._proxies[key] = ProxyEntry(hop=hop, alive=False, last_seen=now)
|
||||||
|
|
||||||
|
captured = {}
|
||||||
|
|
||||||
|
original_semaphore = asyncio.Semaphore
|
||||||
|
|
||||||
|
def capture_semaphore(value):
|
||||||
|
captured["concurrency"] = value
|
||||||
|
return original_semaphore(value)
|
||||||
|
|
||||||
|
with (
|
||||||
|
patch.object(pool, "_test_proxy", new_callable=AsyncMock, return_value=True),
|
||||||
|
patch("s5p.pool.asyncio.Semaphore", side_effect=capture_semaphore),
|
||||||
|
):
|
||||||
|
asyncio.run(pool._run_health_tests())
|
||||||
|
|
||||||
|
assert captured["concurrency"] == 5
|
||||||
|
|
||||||
|
|
||||||
class TestProxyPoolHealthTests:
|
class TestProxyPoolHealthTests:
|
||||||
"""Test selective health testing."""
|
"""Test selective health testing."""
|
||||||
|
|
||||||
@@ -468,3 +728,118 @@ class TestProxyPoolPersistence:
|
|||||||
entry = pool2._proxies["socks5://1.2.3.4:1080"]
|
entry = pool2._proxies["socks5://1.2.3.4:1080"]
|
||||||
assert entry.hop.username == "user"
|
assert entry.hop.username == "user"
|
||||||
assert entry.hop.password == "pass"
|
assert entry.hop.password == "pass"
|
||||||
|
|
||||||
|
|
||||||
|
class TestTlsCheck:
|
||||||
|
"""Test TLS handshake health check."""
|
||||||
|
|
||||||
|
def _make_pool(self, **kwargs):
|
||||||
|
cfg = ProxyPoolConfig(sources=[], **kwargs)
|
||||||
|
return ProxyPool(cfg, [], timeout=10.0)
|
||||||
|
|
||||||
|
def test_success(self):
|
||||||
|
pool = self._make_pool(test_targets=["www.example.com"])
|
||||||
|
|
||||||
|
mock_writer = MagicMock()
|
||||||
|
mock_writer.is_closing.return_value = False
|
||||||
|
mock_transport = MagicMock()
|
||||||
|
mock_protocol = MagicMock()
|
||||||
|
mock_transport.get_protocol.return_value = mock_protocol
|
||||||
|
mock_writer.transport = mock_transport
|
||||||
|
|
||||||
|
new_transport = MagicMock()
|
||||||
|
|
||||||
|
chain_ret = (MagicMock(), mock_writer)
|
||||||
|
with (
|
||||||
|
patch("s5p.pool.build_chain", new_callable=AsyncMock, return_value=chain_ret),
|
||||||
|
patch("asyncio.get_running_loop") as mock_loop_fn,
|
||||||
|
):
|
||||||
|
mock_loop = MagicMock()
|
||||||
|
mock_loop.start_tls = AsyncMock(return_value=new_transport)
|
||||||
|
mock_loop_fn.return_value = mock_loop
|
||||||
|
|
||||||
|
result = asyncio.run(pool._tls_check([]))
|
||||||
|
|
||||||
|
assert result is True
|
||||||
|
mock_loop.start_tls.assert_called_once_with(
|
||||||
|
mock_transport, mock_protocol, pool._ssl_ctx,
|
||||||
|
server_hostname="www.example.com",
|
||||||
|
)
|
||||||
|
new_transport.close.assert_called_once()
|
||||||
|
|
||||||
|
def test_build_chain_failure(self):
|
||||||
|
pool = self._make_pool(test_targets=["www.example.com"])
|
||||||
|
|
||||||
|
with patch(
|
||||||
|
"s5p.pool.build_chain", new_callable=AsyncMock,
|
||||||
|
side_effect=ConnectionError("refused"),
|
||||||
|
):
|
||||||
|
result = asyncio.run(pool._tls_check([]))
|
||||||
|
|
||||||
|
assert result is False
|
||||||
|
|
||||||
|
def test_handshake_failure(self):
|
||||||
|
pool = self._make_pool(test_targets=["www.example.com"])
|
||||||
|
|
||||||
|
mock_writer = MagicMock()
|
||||||
|
mock_writer.is_closing.return_value = False
|
||||||
|
mock_transport = MagicMock()
|
||||||
|
mock_transport.get_protocol.return_value = MagicMock()
|
||||||
|
mock_writer.transport = mock_transport
|
||||||
|
|
||||||
|
chain_ret = (MagicMock(), mock_writer)
|
||||||
|
with (
|
||||||
|
patch("s5p.pool.build_chain", new_callable=AsyncMock, return_value=chain_ret),
|
||||||
|
patch("asyncio.get_running_loop") as mock_loop_fn,
|
||||||
|
):
|
||||||
|
mock_loop = MagicMock()
|
||||||
|
mock_loop.start_tls = AsyncMock(
|
||||||
|
side_effect=ssl.SSLError("handshake failed"),
|
||||||
|
)
|
||||||
|
mock_loop_fn.return_value = mock_loop
|
||||||
|
|
||||||
|
result = asyncio.run(pool._tls_check([]))
|
||||||
|
|
||||||
|
assert result is False
|
||||||
|
|
||||||
|
def test_round_robin_rotation(self):
|
||||||
|
targets = ["host-a.example.com", "host-b.example.com", "host-c.example.com"]
|
||||||
|
pool = self._make_pool(test_targets=targets)
|
||||||
|
|
||||||
|
selected: list[str] = []
|
||||||
|
|
||||||
|
async def fake_build_chain(chain, host, port, timeout=None):
|
||||||
|
selected.append(host)
|
||||||
|
raise ConnectionError("test")
|
||||||
|
|
||||||
|
with patch("s5p.pool.build_chain", side_effect=fake_build_chain):
|
||||||
|
for _ in range(6):
|
||||||
|
asyncio.run(pool._tls_check([]))
|
||||||
|
|
||||||
|
assert selected == ["host-a.example.com", "host-b.example.com", "host-c.example.com",
|
||||||
|
"host-a.example.com", "host-b.example.com", "host-c.example.com"]
|
||||||
|
|
||||||
|
def test_empty_targets(self):
|
||||||
|
pool = self._make_pool(test_targets=[])
|
||||||
|
result = asyncio.run(pool._tls_check([]))
|
||||||
|
assert result is False
|
||||||
|
|
||||||
|
|
||||||
|
class TestProxyPoolConfigCompat:
|
||||||
|
"""Test backward compatibility for test_url -> test_targets."""
|
||||||
|
|
||||||
|
def test_legacy_test_url_converts(self):
|
||||||
|
cfg = ProxyPoolConfig(test_url="http://httpbin.org/ip")
|
||||||
|
assert cfg.test_targets == ["httpbin.org"]
|
||||||
|
|
||||||
|
def test_explicit_test_targets_wins(self):
|
||||||
|
cfg = ProxyPoolConfig(
|
||||||
|
test_url="http://httpbin.org/ip",
|
||||||
|
test_targets=["custom.example.com"],
|
||||||
|
)
|
||||||
|
assert cfg.test_targets == ["custom.example.com"]
|
||||||
|
|
||||||
|
def test_defaults_when_neither_set(self):
|
||||||
|
cfg = ProxyPoolConfig()
|
||||||
|
assert cfg.test_targets == ["www.google.com", "www.cloudflare.com", "www.amazon.com"]
|
||||||
|
assert cfg.test_url == ""
|
||||||
|
|||||||
@@ -1,6 +1,66 @@
|
|||||||
"""Tests for protocol helpers."""
|
"""Tests for protocol helpers."""
|
||||||
|
|
||||||
from s5p.proto import Socks5AddrType, encode_address
|
import asyncio
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from s5p.proto import (
|
||||||
|
ProtoError,
|
||||||
|
Socks5AddrType,
|
||||||
|
Socks5Reply,
|
||||||
|
encode_address,
|
||||||
|
http_connect,
|
||||||
|
socks4_connect,
|
||||||
|
socks5_connect,
|
||||||
|
)
|
||||||
|
|
||||||
|
# -- helpers -----------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
class _MockTransport(asyncio.Transport):
|
||||||
|
"""Minimal transport that captures writes and supports drain."""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
self.written = bytearray()
|
||||||
|
self._closing = False
|
||||||
|
|
||||||
|
def write(self, data):
|
||||||
|
self.written.extend(data)
|
||||||
|
|
||||||
|
def is_closing(self):
|
||||||
|
return self._closing
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
self._closing = True
|
||||||
|
|
||||||
|
def get_extra_info(self, name, default=None):
|
||||||
|
return default
|
||||||
|
|
||||||
|
|
||||||
|
def _make_streams(response_data: bytes):
|
||||||
|
"""Create mock reader/writer for protocol tests.
|
||||||
|
|
||||||
|
Must be called from within a running event loop.
|
||||||
|
"""
|
||||||
|
reader = asyncio.StreamReader()
|
||||||
|
reader.feed_data(response_data)
|
||||||
|
reader.feed_eof()
|
||||||
|
|
||||||
|
protocol = asyncio.StreamReaderProtocol(reader)
|
||||||
|
transport = _MockTransport()
|
||||||
|
protocol.connection_made(transport)
|
||||||
|
|
||||||
|
writer = asyncio.StreamWriter(transport, protocol, reader, asyncio.get_running_loop())
|
||||||
|
return reader, writer
|
||||||
|
|
||||||
|
|
||||||
|
def _run(coro):
|
||||||
|
"""Run a coroutine in a fresh event loop."""
|
||||||
|
asyncio.run(coro)
|
||||||
|
|
||||||
|
|
||||||
|
# -- encode_address ----------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
class TestEncodeAddress:
|
class TestEncodeAddress:
|
||||||
@@ -11,12 +71,265 @@ class TestEncodeAddress:
|
|||||||
assert atyp == Socks5AddrType.IPV4
|
assert atyp == Socks5AddrType.IPV4
|
||||||
assert data == b"\x7f\x00\x00\x01"
|
assert data == b"\x7f\x00\x00\x01"
|
||||||
|
|
||||||
|
def test_ipv4_zeros(self):
|
||||||
|
atyp, data = encode_address("0.0.0.0")
|
||||||
|
assert atyp == Socks5AddrType.IPV4
|
||||||
|
assert data == b"\x00\x00\x00\x00"
|
||||||
|
|
||||||
def test_ipv6(self):
|
def test_ipv6(self):
|
||||||
atyp, data = encode_address("::1")
|
atyp, data = encode_address("::1")
|
||||||
assert atyp == Socks5AddrType.IPV6
|
assert atyp == Socks5AddrType.IPV6
|
||||||
assert len(data) == 16
|
assert len(data) == 16
|
||||||
|
assert data[-1] == 1
|
||||||
|
|
||||||
|
def test_ipv6_full(self):
|
||||||
|
atyp, data = encode_address("2001:db8::1")
|
||||||
|
assert atyp == Socks5AddrType.IPV6
|
||||||
|
assert len(data) == 16
|
||||||
|
|
||||||
def test_domain(self):
|
def test_domain(self):
|
||||||
atyp, data = encode_address("example.com")
|
atyp, data = encode_address("example.com")
|
||||||
assert atyp == Socks5AddrType.DOMAIN
|
assert atyp == Socks5AddrType.DOMAIN
|
||||||
assert data == bytes([11]) + b"example.com"
|
assert data == bytes([11]) + b"example.com"
|
||||||
|
|
||||||
|
def test_domain_short(self):
|
||||||
|
atyp, data = encode_address("a.co")
|
||||||
|
assert atyp == Socks5AddrType.DOMAIN
|
||||||
|
assert data == bytes([4]) + b"a.co"
|
||||||
|
|
||||||
|
def test_domain_long(self):
|
||||||
|
host = "sub.domain.example.com"
|
||||||
|
atyp, data = encode_address(host)
|
||||||
|
assert atyp == Socks5AddrType.DOMAIN
|
||||||
|
assert data[0] == len(host)
|
||||||
|
assert data[1:] == host.encode("ascii")
|
||||||
|
|
||||||
|
|
||||||
|
# -- socks5_connect ----------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
class TestSocks5Connect:
|
||||||
|
"""Test SOCKS5 handshake building."""
|
||||||
|
|
||||||
|
def test_no_auth_success(self):
|
||||||
|
"""Successful SOCKS5 connect without auth."""
|
||||||
|
bind_addr = b"\x01\x00\x00\x00\x00\x00\x00" # IPv4 0.0.0.0:0
|
||||||
|
response = b"\x05\x00" + b"\x05\x00\x00" + bind_addr
|
||||||
|
|
||||||
|
async def run():
|
||||||
|
reader, writer = _make_streams(response)
|
||||||
|
await socks5_connect(reader, writer, "example.com", 80)
|
||||||
|
|
||||||
|
_run(run())
|
||||||
|
|
||||||
|
def test_auth_success(self):
|
||||||
|
"""Successful SOCKS5 connect with username/password auth."""
|
||||||
|
bind_addr = b"\x01\x00\x00\x00\x00\x00\x00"
|
||||||
|
response = b"\x05\x02" + b"\x01\x00" + b"\x05\x00\x00" + bind_addr
|
||||||
|
|
||||||
|
async def run():
|
||||||
|
reader, writer = _make_streams(response)
|
||||||
|
await socks5_connect(reader, writer, "example.com", 80, "user", "pass")
|
||||||
|
|
||||||
|
_run(run())
|
||||||
|
|
||||||
|
def test_auth_failure(self):
|
||||||
|
"""SOCKS5 auth rejected by server."""
|
||||||
|
response = b"\x05\x02" + b"\x01\x01"
|
||||||
|
|
||||||
|
async def run():
|
||||||
|
reader, writer = _make_streams(response)
|
||||||
|
with pytest.raises(ProtoError, match="authentication failed"):
|
||||||
|
await socks5_connect(reader, writer, "example.com", 80, "user", "bad")
|
||||||
|
|
||||||
|
_run(run())
|
||||||
|
|
||||||
|
def test_no_acceptable_methods(self):
|
||||||
|
"""Server rejects all auth methods (0xFF)."""
|
||||||
|
response = b"\x05\xff"
|
||||||
|
|
||||||
|
async def run():
|
||||||
|
reader, writer = _make_streams(response)
|
||||||
|
with pytest.raises(ProtoError, match="no acceptable"):
|
||||||
|
await socks5_connect(reader, writer, "example.com", 80)
|
||||||
|
|
||||||
|
_run(run())
|
||||||
|
|
||||||
|
def test_connect_refused(self):
|
||||||
|
"""SOCKS5 connect reply with connection refused."""
|
||||||
|
bind_addr = b"\x01\x00\x00\x00\x00\x00\x00"
|
||||||
|
response = b"\x05\x00" + b"\x05\x05\x00" + bind_addr
|
||||||
|
|
||||||
|
async def run():
|
||||||
|
reader, writer = _make_streams(response)
|
||||||
|
with pytest.raises(ProtoError, match="connect failed"):
|
||||||
|
await socks5_connect(reader, writer, "example.com", 80)
|
||||||
|
|
||||||
|
_run(run())
|
||||||
|
|
||||||
|
def test_wrong_version(self):
|
||||||
|
"""Server responds with wrong SOCKS version."""
|
||||||
|
response = b"\x04\x00"
|
||||||
|
|
||||||
|
async def run():
|
||||||
|
reader, writer = _make_streams(response)
|
||||||
|
with pytest.raises(ProtoError, match="unexpected version"):
|
||||||
|
await socks5_connect(reader, writer, "example.com", 80)
|
||||||
|
|
||||||
|
_run(run())
|
||||||
|
|
||||||
|
def test_server_requires_auth_no_creds(self):
|
||||||
|
"""Server demands auth but no credentials provided."""
|
||||||
|
response = b"\x05\x02"
|
||||||
|
|
||||||
|
async def run():
|
||||||
|
reader, writer = _make_streams(response)
|
||||||
|
with pytest.raises(ProtoError, match="requires auth"):
|
||||||
|
await socks5_connect(reader, writer, "example.com", 80)
|
||||||
|
|
||||||
|
_run(run())
|
||||||
|
|
||||||
|
|
||||||
|
# -- socks4_connect ----------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
class TestSocks4Connect:
|
||||||
|
"""Test SOCKS4/4a request building."""
|
||||||
|
|
||||||
|
def test_ip_success(self):
|
||||||
|
"""SOCKS4 connect with IP address."""
|
||||||
|
response = b"\x00\x5a" + b"\x00" * 6
|
||||||
|
|
||||||
|
async def run():
|
||||||
|
reader, writer = _make_streams(response)
|
||||||
|
await socks4_connect(reader, writer, "1.2.3.4", 80)
|
||||||
|
|
||||||
|
_run(run())
|
||||||
|
|
||||||
|
def test_domain_success(self):
|
||||||
|
"""SOCKS4a connect with domain name."""
|
||||||
|
response = b"\x00\x5a" + b"\x00" * 6
|
||||||
|
|
||||||
|
async def run():
|
||||||
|
reader, writer = _make_streams(response)
|
||||||
|
await socks4_connect(reader, writer, "example.com", 80)
|
||||||
|
|
||||||
|
_run(run())
|
||||||
|
|
||||||
|
def test_rejected(self):
|
||||||
|
"""SOCKS4 request rejected."""
|
||||||
|
response = b"\x00\x5b" + b"\x00" * 6
|
||||||
|
|
||||||
|
async def run():
|
||||||
|
reader, writer = _make_streams(response)
|
||||||
|
with pytest.raises(ProtoError, match="rejected"):
|
||||||
|
await socks4_connect(reader, writer, "1.2.3.4", 80)
|
||||||
|
|
||||||
|
_run(run())
|
||||||
|
|
||||||
|
|
||||||
|
# -- http_connect ------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
class TestHttpConnect:
|
||||||
|
"""Test HTTP CONNECT request building."""
|
||||||
|
|
||||||
|
def test_success_200(self):
|
||||||
|
"""HTTP CONNECT with 200 response."""
|
||||||
|
response = b"HTTP/1.1 200 Connection Established\r\n\r\n"
|
||||||
|
|
||||||
|
async def run():
|
||||||
|
reader, writer = _make_streams(response)
|
||||||
|
await http_connect(reader, writer, "example.com", 443)
|
||||||
|
|
||||||
|
_run(run())
|
||||||
|
|
||||||
|
def test_success_with_headers(self):
|
||||||
|
"""HTTP CONNECT with extra headers in response."""
|
||||||
|
response = b"HTTP/1.1 200 OK\r\nX-Proxy: test\r\n\r\n"
|
||||||
|
|
||||||
|
async def run():
|
||||||
|
reader, writer = _make_streams(response)
|
||||||
|
await http_connect(reader, writer, "example.com", 443)
|
||||||
|
|
||||||
|
_run(run())
|
||||||
|
|
||||||
|
def test_auth_success(self):
|
||||||
|
"""HTTP CONNECT with proxy authentication."""
|
||||||
|
response = b"HTTP/1.1 200 OK\r\n\r\n"
|
||||||
|
|
||||||
|
async def run():
|
||||||
|
reader, writer = _make_streams(response)
|
||||||
|
await http_connect(reader, writer, "example.com", 443, "user", "pass")
|
||||||
|
|
||||||
|
_run(run())
|
||||||
|
|
||||||
|
def test_forbidden(self):
|
||||||
|
"""HTTP CONNECT with 403 response."""
|
||||||
|
response = b"HTTP/1.1 403 Forbidden\r\n\r\n"
|
||||||
|
|
||||||
|
async def run():
|
||||||
|
reader, writer = _make_streams(response)
|
||||||
|
with pytest.raises(ProtoError, match="connect failed"):
|
||||||
|
await http_connect(reader, writer, "example.com", 443)
|
||||||
|
|
||||||
|
_run(run())
|
||||||
|
|
||||||
|
def test_proxy_auth_required(self):
|
||||||
|
"""HTTP CONNECT with 407 response."""
|
||||||
|
response = b"HTTP/1.1 407 Proxy Authentication Required\r\n\r\n"
|
||||||
|
|
||||||
|
async def run():
|
||||||
|
reader, writer = _make_streams(response)
|
||||||
|
with pytest.raises(ProtoError, match="connect failed"):
|
||||||
|
await http_connect(reader, writer, "example.com", 443)
|
||||||
|
|
||||||
|
_run(run())
|
||||||
|
|
||||||
|
def test_empty_response(self):
|
||||||
|
"""HTTP CONNECT with empty response."""
|
||||||
|
|
||||||
|
async def run():
|
||||||
|
reader, writer = _make_streams(b"")
|
||||||
|
with pytest.raises(ProtoError, match="empty response"):
|
||||||
|
await http_connect(reader, writer, "example.com", 443)
|
||||||
|
|
||||||
|
_run(run())
|
||||||
|
|
||||||
|
|
||||||
|
# -- Socks5Reply enum -------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
class TestSocks5Reply:
|
||||||
|
"""Test SOCKS5 reply code values."""
|
||||||
|
|
||||||
|
def test_succeeded(self):
|
||||||
|
assert Socks5Reply.SUCCEEDED == 0x00
|
||||||
|
|
||||||
|
def test_general_failure(self):
|
||||||
|
assert Socks5Reply.GENERAL_FAILURE == 0x01
|
||||||
|
|
||||||
|
def test_connection_refused(self):
|
||||||
|
assert Socks5Reply.CONNECTION_REFUSED == 0x05
|
||||||
|
|
||||||
|
def test_command_not_supported(self):
|
||||||
|
assert Socks5Reply.COMMAND_NOT_SUPPORTED == 0x07
|
||||||
|
|
||||||
|
def test_address_type_not_supported(self):
|
||||||
|
assert Socks5Reply.ADDRESS_TYPE_NOT_SUPPORTED == 0x08
|
||||||
|
|
||||||
|
|
||||||
|
# -- ProtoError --------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
class TestProtoError:
|
||||||
|
"""Test ProtoError exception."""
|
||||||
|
|
||||||
|
def test_default_reply(self):
|
||||||
|
err = ProtoError("test error")
|
||||||
|
assert str(err) == "test error"
|
||||||
|
assert err.reply == Socks5Reply.GENERAL_FAILURE
|
||||||
|
|
||||||
|
def test_custom_reply(self):
|
||||||
|
err = ProtoError("refused", Socks5Reply.CONNECTION_REFUSED)
|
||||||
|
assert err.reply == Socks5Reply.CONNECTION_REFUSED
|
||||||
|
|||||||
Reference in New Issue
Block a user