feat: Add server management and database migrations

- Add start/stop/restart/status commands to Makefile
- Add health endpoint with uptime tracking
- Add CLI module (esp32-web command)
- Add initial database migration
- Listen on all interfaces (0.0.0.0:5500)

Bump version to 0.1.1
This commit is contained in:
user
2026-02-05 21:03:35 +01:00
parent a676136f5d
commit a8f616970a
9 changed files with 550 additions and 8 deletions

View File

@@ -1,13 +1,56 @@
.PHONY: build run dev stop logs test migrate clean install
.PHONY: build run dev stop logs test migrate clean install start restart status
APP_NAME := esp32-web
PORT := 5500
HOST := 0.0.0.0
PIDFILE := /tmp/esp32-web.pid
LOGFILE := /tmp/esp32-web.log
install:
pip install -e ".[dev]"
# Server management
start:
@if [ -f $(PIDFILE) ] && kill -0 $$(cat $(PIDFILE)) 2>/dev/null; then \
echo "Server already running (PID $$(cat $(PIDFILE)))"; \
else \
echo "Starting server on $(HOST):$(PORT)..."; \
nohup flask --app src/esp32_web run --host $(HOST) --port $(PORT) > $(LOGFILE) 2>&1 & \
echo $$! > $(PIDFILE); \
sleep 1; \
echo "Server started (PID $$(cat $(PIDFILE)))"; \
fi
stop:
@if [ -f $(PIDFILE) ]; then \
echo "Stopping server (PID $$(cat $(PIDFILE)))..."; \
kill $$(cat $(PIDFILE)) 2>/dev/null || true; \
rm -f $(PIDFILE); \
echo "Server stopped"; \
else \
echo "Server not running"; \
fi
restart: stop
@sleep 1
@$(MAKE) start
status:
@if [ -f $(PIDFILE) ] && kill -0 $$(cat $(PIDFILE)) 2>/dev/null; then \
echo "Server running (PID $$(cat $(PIDFILE)))"; \
curl -s http://localhost:$(PORT)/health 2>/dev/null | \
python3 -c "import sys,json; d=json.load(sys.stdin); print(f\"Status: {d['status']}\nUptime: {d['uptime']}\")" 2>/dev/null \
|| echo "Health: unreachable"; \
else \
echo "Server not running"; \
rm -f $(PIDFILE) 2>/dev/null; \
fi
logs:
@tail -f $(LOGFILE)
dev:
flask --app src/esp32_web run --port $(PORT) --debug
flask --app src/esp32_web run --host $(HOST) --port $(PORT) --debug
test:
pytest -v
@@ -21,20 +64,21 @@ migrate-init:
migrate-create:
flask --app src/esp32_web db migrate -m "$(msg)"
# Container targets
build:
podman build -t $(APP_NAME) .
run:
container-run:
podman run -d --name $(APP_NAME) \
-p $(PORT):$(PORT) \
-p $(PORT):$(PORT)/udp \
-v ./instance:/app/instance:Z \
$(APP_NAME)
stop:
container-stop:
podman stop $(APP_NAME) && podman rm $(APP_NAME)
logs:
container-logs:
podman logs -f $(APP_NAME)
clean:

1
migrations/README Normal file
View File

@@ -0,0 +1 @@
Single-database configuration for Flask.

50
migrations/alembic.ini Normal file
View File

@@ -0,0 +1,50 @@
# A generic, single database configuration.
[alembic]
# template used to generate migration files
# file_template = %%(rev)s_%%(slug)s
# set to 'true' to run the environment during
# the 'revision' command, regardless of autogenerate
# revision_environment = false
# Logging configuration
[loggers]
keys = root,sqlalchemy,alembic,flask_migrate
[handlers]
keys = console
[formatters]
keys = generic
[logger_root]
level = WARN
handlers = console
qualname =
[logger_sqlalchemy]
level = WARN
handlers =
qualname = sqlalchemy.engine
[logger_alembic]
level = INFO
handlers =
qualname = alembic
[logger_flask_migrate]
level = INFO
handlers =
qualname = flask_migrate
[handler_console]
class = StreamHandler
args = (sys.stderr,)
level = NOTSET
formatter = generic
[formatter_generic]
format = %(levelname)-5.5s [%(name)s] %(message)s
datefmt = %H:%M:%S

113
migrations/env.py Normal file
View File

@@ -0,0 +1,113 @@
import logging
from logging.config import fileConfig
from flask import current_app
from alembic import context
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
logger = logging.getLogger('alembic.env')
def get_engine():
try:
# this works with Flask-SQLAlchemy<3 and Alchemical
return current_app.extensions['migrate'].db.get_engine()
except (TypeError, AttributeError):
# this works with Flask-SQLAlchemy>=3
return current_app.extensions['migrate'].db.engine
def get_engine_url():
try:
return get_engine().url.render_as_string(hide_password=False).replace(
'%', '%%')
except AttributeError:
return str(get_engine().url).replace('%', '%%')
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
config.set_main_option('sqlalchemy.url', get_engine_url())
target_db = current_app.extensions['migrate'].db
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def get_metadata():
if hasattr(target_db, 'metadatas'):
return target_db.metadatas[None]
return target_db.metadata
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url, target_metadata=get_metadata(), literal_binds=True
)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
# this callback is used to prevent an auto-migration from being generated
# when there are no changes to the schema
# reference: http://alembic.zzzcomputing.com/en/latest/cookbook.html
def process_revision_directives(context, revision, directives):
if getattr(config.cmd_opts, 'autogenerate', False):
script = directives[0]
if script.upgrade_ops.is_empty():
directives[:] = []
logger.info('No changes in schema detected.')
conf_args = current_app.extensions['migrate'].configure_args
if conf_args.get("process_revision_directives") is None:
conf_args["process_revision_directives"] = process_revision_directives
connectable = get_engine()
with connectable.connect() as connection:
context.configure(
connection=connection,
target_metadata=get_metadata(),
**conf_args
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()

24
migrations/script.py.mako Normal file
View File

@@ -0,0 +1,24 @@
"""${message}
Revision ID: ${up_revision}
Revises: ${down_revision | comma,n}
Create Date: ${create_date}
"""
from alembic import op
import sqlalchemy as sa
${imports if imports else ""}
# revision identifiers, used by Alembic.
revision = ${repr(up_revision)}
down_revision = ${repr(down_revision)}
branch_labels = ${repr(branch_labels)}
depends_on = ${repr(depends_on)}
def upgrade():
${upgrades if upgrades else "pass"}
def downgrade():
${downgrades if downgrades else "pass"}

View File

@@ -0,0 +1,151 @@
"""initial schema
Revision ID: 80ccb7597566
Revises:
Create Date: 2026-02-05 20:58:36.484000
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '80ccb7597566'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('devices',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('mac', sa.String(length=17), nullable=False),
sa.Column('device_type', sa.String(length=8), nullable=False),
sa.Column('vendor', sa.String(length=64), nullable=True),
sa.Column('name', sa.String(length=64), nullable=True),
sa.Column('first_seen', sa.DateTime(), nullable=False),
sa.Column('last_seen', sa.DateTime(), nullable=False),
sa.Column('company_id', sa.Integer(), nullable=True),
sa.Column('tx_power', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
with op.batch_alter_table('devices', schema=None) as batch_op:
batch_op.create_index(batch_op.f('ix_devices_mac'), ['mac'], unique=True)
op.create_table('sensors',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('hostname', sa.String(length=32), nullable=False),
sa.Column('ip', sa.String(length=15), nullable=False),
sa.Column('last_seen', sa.DateTime(), nullable=False),
sa.Column('status', sa.String(length=16), nullable=False),
sa.Column('config_json', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
with op.batch_alter_table('sensors', schema=None) as batch_op:
batch_op.create_index(batch_op.f('ix_sensors_hostname'), ['hostname'], unique=True)
op.create_table('alerts',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('sensor_id', sa.Integer(), nullable=False),
sa.Column('alert_type', sa.String(length=16), nullable=False),
sa.Column('source_mac', sa.String(length=17), nullable=True),
sa.Column('target_mac', sa.String(length=17), nullable=True),
sa.Column('rssi', sa.Integer(), nullable=True),
sa.Column('flood_count', sa.Integer(), nullable=True),
sa.Column('flood_window', sa.Integer(), nullable=True),
sa.Column('timestamp', sa.DateTime(), nullable=False),
sa.ForeignKeyConstraint(['sensor_id'], ['sensors.id'], ),
sa.PrimaryKeyConstraint('id')
)
with op.batch_alter_table('alerts', schema=None) as batch_op:
batch_op.create_index(batch_op.f('ix_alerts_alert_type'), ['alert_type'], unique=False)
batch_op.create_index(batch_op.f('ix_alerts_sensor_id'), ['sensor_id'], unique=False)
batch_op.create_index(batch_op.f('ix_alerts_timestamp'), ['timestamp'], unique=False)
op.create_table('events',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('sensor_id', sa.Integer(), nullable=False),
sa.Column('event_type', sa.String(length=32), nullable=False),
sa.Column('payload_json', sa.Text(), nullable=True),
sa.Column('timestamp', sa.DateTime(), nullable=False),
sa.ForeignKeyConstraint(['sensor_id'], ['sensors.id'], ),
sa.PrimaryKeyConstraint('id')
)
with op.batch_alter_table('events', schema=None) as batch_op:
batch_op.create_index(batch_op.f('ix_events_event_type'), ['event_type'], unique=False)
batch_op.create_index(batch_op.f('ix_events_sensor_id'), ['sensor_id'], unique=False)
batch_op.create_index(batch_op.f('ix_events_timestamp'), ['timestamp'], unique=False)
op.create_table('probes',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('device_id', sa.Integer(), nullable=False),
sa.Column('sensor_id', sa.Integer(), nullable=False),
sa.Column('ssid', sa.String(length=32), nullable=False),
sa.Column('rssi', sa.Integer(), nullable=False),
sa.Column('channel', sa.Integer(), nullable=False),
sa.Column('timestamp', sa.DateTime(), nullable=False),
sa.ForeignKeyConstraint(['device_id'], ['devices.id'], ),
sa.ForeignKeyConstraint(['sensor_id'], ['sensors.id'], ),
sa.PrimaryKeyConstraint('id')
)
with op.batch_alter_table('probes', schema=None) as batch_op:
batch_op.create_index(batch_op.f('ix_probes_device_id'), ['device_id'], unique=False)
batch_op.create_index(batch_op.f('ix_probes_sensor_id'), ['sensor_id'], unique=False)
batch_op.create_index(batch_op.f('ix_probes_ssid'), ['ssid'], unique=False)
batch_op.create_index(batch_op.f('ix_probes_timestamp'), ['timestamp'], unique=False)
op.create_table('sightings',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('device_id', sa.Integer(), nullable=False),
sa.Column('sensor_id', sa.Integer(), nullable=False),
sa.Column('rssi', sa.Integer(), nullable=False),
sa.Column('timestamp', sa.DateTime(), nullable=False),
sa.ForeignKeyConstraint(['device_id'], ['devices.id'], ),
sa.ForeignKeyConstraint(['sensor_id'], ['sensors.id'], ),
sa.PrimaryKeyConstraint('id')
)
with op.batch_alter_table('sightings', schema=None) as batch_op:
batch_op.create_index(batch_op.f('ix_sightings_device_id'), ['device_id'], unique=False)
batch_op.create_index(batch_op.f('ix_sightings_sensor_id'), ['sensor_id'], unique=False)
batch_op.create_index(batch_op.f('ix_sightings_timestamp'), ['timestamp'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('sightings', schema=None) as batch_op:
batch_op.drop_index(batch_op.f('ix_sightings_timestamp'))
batch_op.drop_index(batch_op.f('ix_sightings_sensor_id'))
batch_op.drop_index(batch_op.f('ix_sightings_device_id'))
op.drop_table('sightings')
with op.batch_alter_table('probes', schema=None) as batch_op:
batch_op.drop_index(batch_op.f('ix_probes_timestamp'))
batch_op.drop_index(batch_op.f('ix_probes_ssid'))
batch_op.drop_index(batch_op.f('ix_probes_sensor_id'))
batch_op.drop_index(batch_op.f('ix_probes_device_id'))
op.drop_table('probes')
with op.batch_alter_table('events', schema=None) as batch_op:
batch_op.drop_index(batch_op.f('ix_events_timestamp'))
batch_op.drop_index(batch_op.f('ix_events_sensor_id'))
batch_op.drop_index(batch_op.f('ix_events_event_type'))
op.drop_table('events')
with op.batch_alter_table('alerts', schema=None) as batch_op:
batch_op.drop_index(batch_op.f('ix_alerts_timestamp'))
batch_op.drop_index(batch_op.f('ix_alerts_sensor_id'))
batch_op.drop_index(batch_op.f('ix_alerts_alert_type'))
op.drop_table('alerts')
with op.batch_alter_table('sensors', schema=None) as batch_op:
batch_op.drop_index(batch_op.f('ix_sensors_hostname'))
op.drop_table('sensors')
with op.batch_alter_table('devices', schema=None) as batch_op:
batch_op.drop_index(batch_op.f('ix_devices_mac'))
op.drop_table('devices')
# ### end Alembic commands ###

View File

@@ -1,6 +1,6 @@
[project]
name = "esp32-web"
version = "0.1.0"
version = "0.1.1"
description = "REST API backend for ESP32 sensor fleet"
requires-python = ">=3.11"
dependencies = [
@@ -23,6 +23,9 @@ dev = [
requires = ["setuptools>=68"]
build-backend = "setuptools.build_meta"
[project.scripts]
esp32-web = "esp32_web.cli:cli"
[tool.setuptools.packages.find]
where = ["src"]

View File

@@ -1,12 +1,19 @@
"""ESP32-Web Flask Application."""
from datetime import datetime, UTC
from flask import Flask
from .config import Config
from .extensions import db, migrate
# Track app start time
_start_time = None
def create_app(config_class=Config):
"""Application factory."""
global _start_time
_start_time = datetime.now(UTC)
app = Flask(__name__)
app.config.from_object(config_class)
@@ -18,10 +25,20 @@ def create_app(config_class=Config):
from .api import bp as api_bp
app.register_blueprint(api_bp, url_prefix='/api/v1')
# Health check
# Health check with uptime
@app.route('/health')
def health():
return {'status': 'ok'}
uptime_seconds = int((datetime.now(UTC) - _start_time).total_seconds())
days, remainder = divmod(uptime_seconds, 86400)
hours, remainder = divmod(remainder, 3600)
minutes, seconds = divmod(remainder, 60)
if days > 0:
uptime_str = f'{days}d{hours}h{minutes}m'
elif hours > 0:
uptime_str = f'{hours}h{minutes}m{seconds}s'
else:
uptime_str = f'{minutes}m{seconds}s'
return {'status': 'ok', 'uptime': uptime_str, 'uptime_seconds': uptime_seconds}
# Start UDP collector in non-testing mode
if not app.config.get('TESTING'):

139
src/esp32_web/cli.py Normal file
View File

@@ -0,0 +1,139 @@
"""CLI commands for managing the application."""
import os
import signal
import sys
from pathlib import Path
import click
PIDFILE = Path('/tmp/esp32-web.pid')
LOGFILE = Path('/tmp/esp32-web.log')
def get_pid() -> int | None:
"""Get PID from pidfile if running."""
if not PIDFILE.exists():
return None
try:
pid = int(PIDFILE.read_text().strip())
# Check if process exists
os.kill(pid, 0)
return pid
except (ValueError, ProcessLookupError, PermissionError):
PIDFILE.unlink(missing_ok=True)
return None
def is_running() -> bool:
"""Check if server is running."""
return get_pid() is not None
@click.group()
def cli():
"""ESP32-Web server management."""
pass
@cli.command()
@click.option('--port', default=5500, help='Port to listen on')
@click.option('--host', default='0.0.0.0', help='Host to bind to')
@click.option('--debug', is_flag=True, help='Enable debug mode')
def start(port: int, host: str, debug: bool):
"""Start the server."""
if is_running():
click.echo(f'Server already running (PID {get_pid()})')
sys.exit(1)
click.echo(f'Starting server on {host}:{port}...')
pid = os.fork()
if pid > 0:
# Parent - write pidfile and exit
PIDFILE.write_text(str(pid))
click.echo(f'Server started (PID {pid})')
click.echo(f'Logs: {LOGFILE}')
sys.exit(0)
# Child - become daemon
os.setsid()
# Redirect stdout/stderr to logfile
log_fd = os.open(str(LOGFILE), os.O_WRONLY | os.O_CREAT | os.O_TRUNC)
os.dup2(log_fd, sys.stdout.fileno())
os.dup2(log_fd, sys.stderr.fileno())
# Import and run app
from esp32_web import create_app
app = create_app()
app.run(host=host, port=port, debug=debug, use_reloader=False)
@cli.command()
def stop():
"""Stop the server."""
pid = get_pid()
if not pid:
click.echo('Server not running')
sys.exit(1)
click.echo(f'Stopping server (PID {pid})...')
try:
os.kill(pid, signal.SIGTERM)
PIDFILE.unlink(missing_ok=True)
click.echo('Server stopped')
except ProcessLookupError:
PIDFILE.unlink(missing_ok=True)
click.echo('Server was not running')
@cli.command()
@click.option('--port', default=5500, help='Port to listen on')
@click.option('--host', default='0.0.0.0', help='Host to bind to')
@click.option('--debug', is_flag=True, help='Enable debug mode')
@click.pass_context
def restart(ctx, port: int, host: str, debug: bool):
"""Restart the server."""
if is_running():
ctx.invoke(stop)
import time
time.sleep(1)
ctx.invoke(start, port=port, host=host, debug=debug)
@cli.command()
def status():
"""Show server status."""
pid = get_pid()
if pid:
click.echo(f'Server running (PID {pid})')
click.echo(f'Logs: {LOGFILE}')
# Try to get health status
try:
import urllib.request
with urllib.request.urlopen('http://localhost:5500/health', timeout=2) as resp:
click.echo(f'Health: {resp.read().decode()}')
except Exception:
click.echo('Health: unreachable')
else:
click.echo('Server not running')
sys.exit(1)
@cli.command()
@click.option('-n', '--lines', default=50, help='Number of lines to show')
@click.option('-f', '--follow', is_flag=True, help='Follow log output')
def logs(lines: int, follow: bool):
"""Show server logs."""
if not LOGFILE.exists():
click.echo('No logs found')
sys.exit(1)
if follow:
os.execvp('tail', ['tail', '-f', str(LOGFILE)])
else:
os.execvp('tail', ['tail', '-n', str(lines), str(LOGFILE)])
if __name__ == '__main__':
cli()