feat: indie status page MVP -- FastAPI + SQLite

- 8 DB models (services, incidents, monitors, subscribers, etc.)
- Full CRUD API for services, incidents, monitors
- Public status page with live data
- Incident detail page with timeline
- API key authentication
- Uptime monitoring scheduler
- 13 tests passing
- TECHNICAL_DESIGN.md with full spec
This commit is contained in:
IndieStatusBot 2026-04-25 05:00:00 +00:00
commit 902133edd3
4655 changed files with 1342691 additions and 0 deletions

View file

@ -0,0 +1,72 @@
import shlex
import subprocess
import sys
import textwrap
import uuid
from collections.abc import Iterable
from dataclasses import dataclass
from pathlib import Path
from mypy.test.config import test_data_prefix
@dataclass
class PytestResult:
input: str
input_updated: str # any updates made by --update-data
stdout: str
stderr: str
def dedent_docstring(s: str) -> str:
return textwrap.dedent(s).lstrip()
def run_pytest_data_suite(
data_suite: str,
*,
data_file_prefix: str = "check",
pytest_node_prefix: str = "mypy/test/testcheck.py::TypeCheckSuite",
extra_args: Iterable[str],
max_attempts: int,
) -> PytestResult:
"""
Runs a suite of data test cases through pytest until either tests pass
or until a maximum number of attempts (needed for incremental tests).
:param data_suite: the actual "suite" i.e. the contents of a .test file
"""
p_test_data = Path(test_data_prefix)
p_root = p_test_data.parent.parent
p = p_test_data / f"{data_file_prefix}-meta-{uuid.uuid4()}.test"
assert not p.exists()
data_suite = dedent_docstring(data_suite)
try:
p.write_text(data_suite)
test_nodeid = f"{pytest_node_prefix}::{p.name}"
extra_args = [sys.executable, "-m", "pytest", "-n", "0", "-s", *extra_args, test_nodeid]
cmd = shlex.join(extra_args)
for i in range(max_attempts - 1, -1, -1):
print(f">> {cmd}")
proc = subprocess.run(extra_args, capture_output=True, check=False, cwd=p_root)
if proc.returncode == 0:
break
prefix = "NESTED PYTEST STDOUT"
for line in proc.stdout.decode().splitlines():
print(f"{prefix}: {line}")
prefix = " " * len(prefix)
prefix = "NESTED PYTEST STDERR"
for line in proc.stderr.decode().splitlines():
print(f"{prefix}: {line}")
prefix = " " * len(prefix)
print(f"Exit code {proc.returncode} ({i} attempts remaining)")
return PytestResult(
input=data_suite,
input_updated=p.read_text(),
stdout=proc.stdout.decode(),
stderr=proc.stderr.decode(),
)
finally:
p.unlink()

View file

@ -0,0 +1,47 @@
import io
from mypy.test.helpers import Suite, diff_ranges, render_diff_range
class DiffHelperSuite(Suite):
def test_render_diff_range(self) -> None:
expected = ["hello", "world"]
actual = ["goodbye", "world"]
expected_ranges, actual_ranges = diff_ranges(expected, actual)
output = io.StringIO()
render_diff_range(expected_ranges, expected, output=output)
assert output.getvalue() == " hello (diff)\n world\n"
output = io.StringIO()
render_diff_range(actual_ranges, actual, output=output)
assert output.getvalue() == " goodbye (diff)\n world\n"
expected = ["a", "b", "c", "d", "e", "f", "g", "h", "circle", "i", "j"]
actual = ["a", "b", "c", "d", "e", "f", "g", "h", "square", "i", "j"]
expected_ranges, actual_ranges = diff_ranges(expected, actual)
output = io.StringIO()
render_diff_range(expected_ranges, expected, output=output, indent=0)
assert output.getvalue() == "a\nb\nc\n...\nf\ng\nh\ncircle (diff)\ni\nj\n"
output = io.StringIO()
render_diff_range(actual_ranges, actual, output=output, indent=0)
assert output.getvalue() == "a\nb\nc\n...\nf\ng\nh\nsquare (diff)\ni\nj\n"
def test_diff_ranges(self) -> None:
a = ["hello", "world"]
b = ["hello", "world"]
assert diff_ranges(a, b) == (
[(0, 0), (0, 2), (2, 2), (2, 2)],
[(0, 0), (0, 2), (2, 2), (2, 2)],
)
a = ["hello", "world"]
b = ["goodbye", "world"]
assert diff_ranges(a, b) == (
[(0, 1), (1, 2), (2, 2), (2, 2)],
[(0, 1), (1, 2), (2, 2), (2, 2)],
)

View file

@ -0,0 +1,69 @@
"""
A "meta test" which tests the parsing of .test files. This is not meant to become exhaustive
but to ensure we maintain a basic level of ergonomics for mypy contributors.
"""
from mypy.test.helpers import Suite
from mypy.test.meta._pytest import PytestResult, run_pytest_data_suite
def _run_pytest(data_suite: str) -> PytestResult:
return run_pytest_data_suite(data_suite, extra_args=[], max_attempts=1)
class ParseTestDataSuite(Suite):
def test_parse_invalid_case(self) -> None:
# Act
result = _run_pytest("""
[case abc]
s: str
[case foo-XFAIL]
s: str
""")
# Assert
assert "Invalid testcase id 'foo-XFAIL'" in result.stdout
def test_parse_invalid_section(self) -> None:
# Act
result = _run_pytest("""
[case abc]
s: str
[unknownsection]
abc
""")
# Assert
expected_lineno = result.input.splitlines().index("[unknownsection]") + 1
expected = (
f".test:{expected_lineno}: Invalid section header [unknownsection] in case 'abc'"
)
assert expected in result.stdout
def test_bad_ge_version_check(self) -> None:
# Act
actual = _run_pytest("""
[case abc]
s: str
[out version>=3.10]
abc
""")
# Assert
assert (
"version>=3.10 always true since minimum runtime version is (3, 10)" in actual.stdout
)
def test_bad_eq_version_check(self) -> None:
# Act
actual = _run_pytest("""
[case abc]
s: str
[out version==3.7]
abc
""")
# Assert
assert (
"version==3.7 always false since minimum runtime version is (3, 10)" in actual.stdout
)

View file

@ -0,0 +1,131 @@
"""
A "meta test" which tests the `--update-data` feature for updating .test files.
Updating the expected output, especially when it's in the form of inline (comment) assertions,
can be brittle, which is why we're "meta-testing" here.
"""
from mypy.test.helpers import Suite
from mypy.test.meta._pytest import PytestResult, dedent_docstring, run_pytest_data_suite
def _run_pytest_update_data(data_suite: str) -> PytestResult:
"""
Runs a suite of data test cases through 'pytest --update-data' until either tests pass
or until a maximum number of attempts (needed for incremental tests).
"""
return run_pytest_data_suite(data_suite, extra_args=["--update-data"], max_attempts=3)
class UpdateDataSuite(Suite):
def test_update_data(self) -> None:
# Note: We test multiple testcases rather than 'test case per test case'
# so we could also exercise rewriting multiple testcases at once.
result = _run_pytest_update_data("""
[case testCorrect]
s: str = 42 # E: Incompatible types in assignment (expression has type "int", variable has type "str")
[case testWrong]
s: str = 42 # E: wrong error
[case testXfail-xfail]
s: str = 42 # E: wrong error
[case testWrongMultiline]
s: str = 42 # E: foo \
# N: bar
[case testMissingMultiline]
s: str = 42; i: int = 'foo'
[case testExtraneous]
s: str = 'foo' # E: wrong error
[case testExtraneousMultiline]
s: str = 'foo' # E: foo \
# E: bar
[case testExtraneousMultilineNonError]
s: str = 'foo' # W: foo \
# N: bar
[case testOutCorrect]
s: str = 42
[out]
main:1: error: Incompatible types in assignment (expression has type "int", variable has type "str")
[case testOutWrong]
s: str = 42
[out]
main:1: error: foobar
[case testOutWrongIncremental]
s: str = 42
[out]
main:1: error: foobar
[out2]
main:1: error: foobar
[case testWrongMultipleFiles]
import a, b
s: str = 42 # E: foo
[file a.py]
s1: str = 42 # E: bar
[file b.py]
s2: str = 43 # E: baz
[builtins fixtures/list.pyi]
""")
# Assert
expected = dedent_docstring("""
[case testCorrect]
s: str = 42 # E: Incompatible types in assignment (expression has type "int", variable has type "str")
[case testWrong]
s: str = 42 # E: Incompatible types in assignment (expression has type "int", variable has type "str")
[case testXfail-xfail]
s: str = 42 # E: wrong error
[case testWrongMultiline]
s: str = 42 # E: Incompatible types in assignment (expression has type "int", variable has type "str")
[case testMissingMultiline]
s: str = 42; i: int = 'foo' # E: Incompatible types in assignment (expression has type "int", variable has type "str") \\
# E: Incompatible types in assignment (expression has type "str", variable has type "int")
[case testExtraneous]
s: str = 'foo'
[case testExtraneousMultiline]
s: str = 'foo'
[case testExtraneousMultilineNonError]
s: str = 'foo'
[case testOutCorrect]
s: str = 42
[out]
main:1: error: Incompatible types in assignment (expression has type "int", variable has type "str")
[case testOutWrong]
s: str = 42
[out]
main:1: error: Incompatible types in assignment (expression has type "int", variable has type "str")
[case testOutWrongIncremental]
s: str = 42
[out]
main:1: error: Incompatible types in assignment (expression has type "int", variable has type "str")
[out2]
main:1: error: Incompatible types in assignment (expression has type "int", variable has type "str")
[case testWrongMultipleFiles]
import a, b
s: str = 42 # E: Incompatible types in assignment (expression has type "int", variable has type "str")
[file a.py]
s1: str = 42 # E: Incompatible types in assignment (expression has type "int", variable has type "str")
[file b.py]
s2: str = 43 # E: Incompatible types in assignment (expression has type "int", variable has type "str")
[builtins fixtures/list.pyi]
""")
assert result.input_updated == expected