feat: indie status page MVP -- FastAPI + SQLite

- 8 DB models (services, incidents, monitors, subscribers, etc.)
- Full CRUD API for services, incidents, monitors
- Public status page with live data
- Incident detail page with timeline
- API key authentication
- Uptime monitoring scheduler
- 13 tests passing
- TECHNICAL_DESIGN.md with full spec
This commit is contained in:
IndieStatusBot 2026-04-25 05:00:00 +00:00
commit 902133edd3
4655 changed files with 1342691 additions and 0 deletions

View file

@ -0,0 +1,13 @@
from __future__ import annotations
import os
provided_prefix = os.getenv("MYPY_TEST_PREFIX", None)
if provided_prefix:
PREFIX = provided_prefix
else:
this_file_dir = os.path.dirname(os.path.realpath(__file__))
PREFIX = os.path.dirname(os.path.dirname(this_file_dir))
# Location of test data files such as test case descriptions.
test_data_prefix = os.path.join(PREFIX, "mypyc", "test-data")

View file

@ -0,0 +1,236 @@
"""Build and cache librt for use in tests.
This module provides a way to build librt extension modules once and cache
them across test runs, and across different test cases in a single run. The
cache is invalidated when source files or details of the build environment change.
Note: Tests must run in a subprocess to use the cached librt, since importing
this module also triggers the import of the regular installed librt.
Usage:
from mypyc.test.librt_cache import get_librt_path, run_with_librt
# Get path to built librt (builds if needed)
path = get_librt_path()
# Run a test file in subprocess with built librt
result = run_with_librt("test_librt.py")
"""
from __future__ import annotations
import hashlib
import os
import shutil
import subprocess
import sys
import sysconfig
from typing import Any
import filelock
from mypyc.build import LIBRT_MODULES, get_cflags, include_dir
from mypyc.common import RUNTIME_C_FILES
from mypyc.test.config import PREFIX
def _librt_build_hash(experimental: bool, opt_level: str) -> str:
"""Compute hash for librt build, including sources and build environment."""
# Import lazily to ensure mypyc.build has ensured that distutils is correctly set up
from distutils import ccompiler
h = hashlib.sha256()
# Include experimental flag
h.update(b"exp" if experimental else b"noexp")
h.update(f"opt={opt_level}".encode())
# Include full Python version string (includes git hash for dev builds)
h.update(sys.version.encode())
# Include debug build status (gettotalrefcount only exists in debug builds)
is_debug = hasattr(sys, "gettotalrefcount")
h.update(b"debug" if is_debug else b"release")
# Include free-threading status (Python 3.13+)
is_free_threaded = bool(sysconfig.get_config_var("Py_GIL_DISABLED"))
h.update(b"freethreaded" if is_free_threaded else b"gil")
# Include compiler type (e.g., "unix" or "msvc")
compiler: Any = ccompiler.new_compiler()
h.update(compiler.compiler_type.encode())
# Include environment variables that affect C compilation
for var in ("CC", "CXX", "CFLAGS", "CPPFLAGS", "LDFLAGS"):
val = os.environ.get(var, "")
h.update(f"{var}={val}".encode())
# Hash runtime files
for name in RUNTIME_C_FILES:
path = os.path.join(include_dir(), name)
h.update(name.encode() + b"|")
with open(path, "rb") as f:
h.update(f.read())
# Hash librt module files
for mod, files, extra, includes in LIBRT_MODULES:
for fname in files + extra:
path = os.path.join(include_dir(), fname)
h.update(fname.encode() + b"|")
with open(path, "rb") as f:
h.update(f.read())
return h.hexdigest()[:16]
def _generate_setup_py(build_dir: str, experimental: bool, opt_level: str) -> str:
"""Generate setup.py content for building librt directly.
We inline LIBRT_MODULES/RUNTIME_C_FILES/include_dir/cflags values to avoid
importing mypyc.build, which recursively imports lots of things.
"""
lib_rt_dir = include_dir()
# Get compiler flags using the shared helper
cflags = get_cflags(opt_level=opt_level, experimental_features=experimental)
# Serialize values to inline in generated setup.py
librt_modules_repr = repr(
[(m.module, m.c_files, m.other_files, m.include_dirs) for m in LIBRT_MODULES]
)
runtime_files_repr = repr(RUNTIME_C_FILES)
cflags_repr = repr(cflags)
return f"""\
import os
from setuptools import setup, Extension
import build_setup # noqa: F401 # Monkey-patches compiler for per-file SIMD flags
build_dir = {build_dir!r}
lib_rt_dir = {lib_rt_dir!r}
RUNTIME_C_FILES = {runtime_files_repr}
LIBRT_MODULES = {librt_modules_repr}
CFLAGS = {cflags_repr}
def write_file(path, contents):
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, "wb") as f:
f.write(contents)
# Copy runtime C files
for name in RUNTIME_C_FILES:
src = os.path.join(lib_rt_dir, name)
dst = os.path.join(build_dir, name)
with open(src, "rb") as f:
write_file(dst, f.read())
# Build extensions for each librt module
extensions = []
for mod, file_names, extra_files, includes in LIBRT_MODULES:
# Copy source files
for fname in file_names + extra_files:
src = os.path.join(lib_rt_dir, fname)
dst = os.path.join(build_dir, fname)
with open(src, "rb") as f:
write_file(dst, f.read())
extensions.append(Extension(
mod,
sources=[os.path.join(build_dir, f) for f in file_names + RUNTIME_C_FILES],
include_dirs=[lib_rt_dir] + [os.path.join(lib_rt_dir, d) for d in includes],
extra_compile_args=CFLAGS,
))
setup(name='librt_cached', ext_modules=extensions)
"""
def get_librt_path(experimental: bool = True, opt_level: str = "0") -> str:
"""Get path to librt built from the repository, building and caching if necessary.
Uses build/librt-cache/ under the repo root (gitignored). The cache is
keyed by a hash of sources and build environment, so it auto-invalidates
when relevant factors change.
Safe to call from multiple parallel pytest workers - uses file locking.
Args:
experimental: Whether to enable experimental features.
opt_level: Optimization level ("0".."3") used when building librt.
Returns:
Path to directory containing built librt modules.
"""
# Use build/librt-cache/ under the repo root (gitignored)
cache_root = os.path.join(PREFIX, "build", "librt-cache")
build_hash = _librt_build_hash(experimental, opt_level)
build_dir = os.path.join(cache_root, f"librt-{build_hash}")
lock_file = os.path.join(cache_root, f"librt-{build_hash}.lock")
marker = os.path.join(build_dir, ".complete")
os.makedirs(cache_root, exist_ok=True)
with filelock.FileLock(lock_file, timeout=300): # 5 min timeout
if os.path.exists(marker):
return build_dir
# Clean up any partial build
if os.path.exists(build_dir):
shutil.rmtree(build_dir)
os.makedirs(build_dir)
# Create librt package directory for --inplace to copy .so files into
librt_pkg = os.path.join(build_dir, "librt")
os.makedirs(librt_pkg)
with open(os.path.join(librt_pkg, "__init__.py"), "w") as f:
pass
# Copy build_setup.py for per-file SIMD compiler flags
build_setup_src = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "build_setup.py"
)
build_setup_dst = os.path.join(build_dir, "build_setup.py")
shutil.copy(build_setup_src, build_setup_dst)
# Write setup.py
setup_py = os.path.join(build_dir, "setup.py")
with open(setup_py, "w") as f:
f.write(_generate_setup_py(build_dir, experimental, opt_level))
# Build (parallel builds don't work well because multiple extensions
# share the same runtime C files, causing race conditions)
result = subprocess.run(
[sys.executable, setup_py, "build_ext", "--inplace"],
cwd=build_dir,
capture_output=True,
text=True,
)
if result.returncode != 0:
raise RuntimeError(f"librt build failed:\n{result.stdout}\n{result.stderr}")
# Mark complete
with open(marker, "w") as f:
f.write("ok")
return build_dir
def run_with_librt(
file_path: str, experimental: bool = True, check: bool = True, opt_level: str = "0"
) -> subprocess.CompletedProcess[str]:
"""Run a Python file in a subprocess with built librt available.
This runs the file in a fresh Python process where the built librt
is at the front of sys.path, avoiding conflicts with any system librt.
Args:
file_path: Path to Python file to execute.
experimental: Whether to use experimental features.
check: If True, raise CalledProcessError on non-zero exit.
opt_level: Optimization level ("0".."3") used when building librt.
Returns:
CompletedProcess with stdout, stderr, and returncode.
"""
librt_path = get_librt_path(experimental, opt_level=opt_level)
# Prepend librt path to PYTHONPATH
env = os.environ.copy()
existing = env.get("PYTHONPATH", "")
env["PYTHONPATH"] = librt_path + (os.pathsep + existing if existing else "")
return subprocess.run(
[sys.executable, file_path], capture_output=True, text=True, check=check, env=env
)

View file

@ -0,0 +1,46 @@
"""Test cases for inferring always defined attributes in classes."""
from __future__ import annotations
import os.path
from mypy.errors import CompileError
from mypy.test.config import test_temp_dir
from mypy.test.data import DataDrivenTestCase
from mypyc.test.testutil import (
ICODE_GEN_BUILTINS,
MypycDataSuite,
assert_test_output,
build_ir_for_single_file2,
infer_ir_build_options_from_test_name,
use_custom_builtins,
)
files = ["alwaysdefined.test"]
class TestAlwaysDefined(MypycDataSuite):
files = files
base_path = test_temp_dir
def run_case(self, testcase: DataDrivenTestCase) -> None:
"""Perform a runtime checking transformation test case."""
options = infer_ir_build_options_from_test_name(testcase.name)
if options is None:
# Skipped test case
return
with use_custom_builtins(os.path.join(self.data_prefix, ICODE_GEN_BUILTINS), testcase):
try:
ir = build_ir_for_single_file2(testcase.input, options)[0]
except CompileError as e:
actual = e.messages
else:
actual = []
for cl in ir.classes:
if cl.name.startswith("_"):
continue
actual.append(
"{}: [{}]".format(cl.name, ", ".join(sorted(cl._always_initialized_attrs)))
)
assert_test_output(testcase, actual, "Invalid test output", testcase.output)

View file

@ -0,0 +1,77 @@
"""Test runner for data-flow analysis test cases."""
from __future__ import annotations
import os.path
from mypy.errors import CompileError
from mypy.test.config import test_temp_dir
from mypy.test.data import DataDrivenTestCase
from mypyc.analysis import dataflow
from mypyc.common import TOP_LEVEL_NAME
from mypyc.ir.func_ir import all_values
from mypyc.ir.ops import Value
from mypyc.ir.pprint import format_func, generate_names_for_ir
from mypyc.test.testutil import (
ICODE_GEN_BUILTINS,
MypycDataSuite,
assert_test_output,
build_ir_for_single_file,
use_custom_builtins,
)
from mypyc.transform import exceptions
files = ["analysis.test"]
class TestAnalysis(MypycDataSuite):
files = files
base_path = test_temp_dir
optional_out = True
def run_case(self, testcase: DataDrivenTestCase) -> None:
"""Perform a data-flow analysis test case."""
with use_custom_builtins(os.path.join(self.data_prefix, ICODE_GEN_BUILTINS), testcase):
try:
ir = build_ir_for_single_file(testcase.input)
except CompileError as e:
actual = e.messages
else:
actual = []
for fn in ir:
if fn.name == TOP_LEVEL_NAME and not testcase.name.endswith("_toplevel"):
continue
exceptions.insert_exception_handling(fn, True)
actual.extend(format_func(fn))
cfg = dataflow.get_cfg(fn.blocks)
args: set[Value] = set(fn.arg_regs)
name = testcase.name
if name.endswith("_MaybeDefined"):
# Forward, maybe
analysis_result = dataflow.analyze_maybe_defined_regs(fn.blocks, cfg, args)
elif name.endswith("_Liveness"):
# Backward, maybe
analysis_result = dataflow.analyze_live_regs(fn.blocks, cfg)
elif name.endswith("_MustDefined"):
# Forward, must
analysis_result = dataflow.analyze_must_defined_regs(
fn.blocks, cfg, args, regs=all_values(fn.arg_regs, fn.blocks)
)
elif name.endswith("_BorrowedArgument"):
# Forward, must
analysis_result = dataflow.analyze_borrowed_arguments(fn.blocks, cfg, args)
else:
assert False, "No recognized _AnalysisName suffix in test case"
names = generate_names_for_ir(fn.arg_regs, fn.blocks)
for key in sorted(
analysis_result.before.keys(), key=lambda x: (x[0].label, x[1])
):
pre = ", ".join(sorted(names[reg] for reg in analysis_result.before[key]))
post = ", ".join(sorted(names[reg] for reg in analysis_result.after[key]))
actual.append(
"%-8s %-23s %s" % ((key[0].label, key[1]), "{%s}" % pre, "{%s}" % post)
)
assert_test_output(testcase, actual, "Invalid source code output")

View file

@ -0,0 +1,71 @@
"""Test cases for annotating source code to highlight inefficiencies."""
from __future__ import annotations
import os.path
from mypy.errors import CompileError
from mypy.test.config import test_temp_dir
from mypy.test.data import DataDrivenTestCase
from mypyc.annotate import generate_annotations, get_max_prio
from mypyc.ir.pprint import format_func
from mypyc.test.testutil import (
ICODE_GEN_BUILTINS,
MypycDataSuite,
assert_test_output,
build_ir_for_single_file2,
infer_ir_build_options_from_test_name,
remove_comment_lines,
use_custom_builtins,
)
files = ["annotate-basic.test"]
class TestReport(MypycDataSuite):
files = files
base_path = test_temp_dir
optional_out = True
def run_case(self, testcase: DataDrivenTestCase) -> None:
"""Perform a runtime checking transformation test case."""
options = infer_ir_build_options_from_test_name(testcase.name)
if options is None:
# Skipped test case
return
with use_custom_builtins(os.path.join(self.data_prefix, ICODE_GEN_BUILTINS), testcase):
expected_output = remove_comment_lines(testcase.output)
# Parse "# A: <message>" comments.
for i, line in enumerate(testcase.input):
if "# A:" in line:
msg = line.rpartition("# A:")[2].strip()
expected_output.append(f"main:{i + 1}: {msg}")
ir = None
try:
ir, tree, type_map, mapper = build_ir_for_single_file2(testcase.input, options)
except CompileError as e:
actual = e.messages
else:
annotations = generate_annotations("native.py", tree, ir, type_map, mapper)
actual = []
for line_num, line_anns in sorted(
annotations.annotations.items(), key=lambda it: it[0]
):
anns = get_max_prio(line_anns)
str_anns = [a.message for a in anns]
s = " ".join(str_anns)
actual.append(f"main:{line_num}: {s}")
try:
assert_test_output(testcase, actual, "Invalid source code output", expected_output)
except BaseException:
if ir:
print("Generated IR:\n")
for fn in ir.functions:
if fn.name == "__top_level__":
continue
for s in format_func(fn):
print(s)
raise

View file

@ -0,0 +1,55 @@
"""Test cases for capsule dependency analysis."""
from __future__ import annotations
import os.path
from mypy.errors import CompileError
from mypy.test.config import test_temp_dir
from mypy.test.data import DataDrivenTestCase
from mypyc.analysis.capsule_deps import find_class_dependencies, find_implicit_op_dependencies
from mypyc.options import CompilerOptions
from mypyc.test.testutil import (
ICODE_GEN_BUILTINS,
MypycDataSuite,
assert_test_output,
build_ir_for_single_file2,
infer_ir_build_options_from_test_name,
use_custom_builtins,
)
from mypyc.transform.lower import lower_ir
files = ["capsule-deps.test"]
class TestCapsuleDeps(MypycDataSuite):
files = files
base_path = test_temp_dir
def run_case(self, testcase: DataDrivenTestCase) -> None:
options = infer_ir_build_options_from_test_name(testcase.name)
if options is None:
# Skipped test case
return
with use_custom_builtins(os.path.join(self.data_prefix, ICODE_GEN_BUILTINS), testcase):
try:
module_ir, _, _, _ = build_ir_for_single_file2(testcase.input, options)
except CompileError as e:
actual = e.messages
else:
all_deps: set[str] = set()
for fn in module_ir.functions:
compiler_options = CompilerOptions()
lower_ir(fn, compiler_options)
deps = find_implicit_op_dependencies(fn)
if deps:
for dep in deps:
all_deps.add(repr(dep))
for cl in module_ir.classes:
deps = find_class_dependencies(cl)
if deps:
for dep in deps:
all_deps.add(repr(dep))
actual = sorted(all_deps) if all_deps else ["No deps"]
assert_test_output(testcase, actual, "Invalid test output", testcase.output)

View file

@ -0,0 +1,97 @@
"""Test that C functions used in primitives are declared in a header such as CPy.h."""
from __future__ import annotations
import glob
import os
import re
import unittest
from mypyc.ir.deps import SourceDep
from mypyc.ir.ops import PrimitiveDescription
from mypyc.primitives import (
bytearray_ops,
bytes_ops,
dict_ops,
exc_ops,
float_ops,
generic_ops,
int_ops,
librt_strings_ops,
librt_vecs_ops,
list_ops,
misc_ops,
registry,
set_ops,
str_ops,
tuple_ops,
weakref_ops,
)
class TestHeaderInclusion(unittest.TestCase):
def test_primitives_included_in_header(self) -> None:
base_dir = os.path.join(os.path.dirname(__file__), "..", "lib-rt")
with open(os.path.join(base_dir, "CPy.h")) as f:
header = f.read()
with open(os.path.join(base_dir, "pythonsupport.h")) as f:
header += f.read()
def check_name(name: str) -> None:
if name.startswith("CPy"):
assert re.search(
rf"\b{name}\b", header
), f'"{name}" is used in mypyc.primitives but not declared in CPy.h'
all_ops = []
for values in [
registry.method_call_ops.values(),
registry.binary_ops.values(),
registry.unary_ops.values(),
registry.function_ops.values(),
]:
for ops in values:
all_ops.extend(ops)
for module in [
bytes_ops,
str_ops,
dict_ops,
list_ops,
bytearray_ops,
generic_ops,
int_ops,
misc_ops,
tuple_ops,
exc_ops,
float_ops,
set_ops,
weakref_ops,
librt_vecs_ops,
librt_strings_ops,
]:
for name in dir(module):
val = getattr(module, name, None)
if isinstance(val, PrimitiveDescription):
all_ops.append(val)
# Find additional headers via extra C source file dependencies.
for op in all_ops:
if op.dependencies:
for dep in op.dependencies:
if isinstance(dep, SourceDep):
header_fnam = os.path.join(base_dir, dep.get_header())
if os.path.isfile(header_fnam):
with open(os.path.join(base_dir, header_fnam)) as f:
header += f.read()
for op in all_ops:
if op.c_function_name is not None:
check_name(op.c_function_name)
primitives_path = os.path.join(os.path.dirname(__file__), "..", "primitives")
for fnam in glob.glob(f"{primitives_path}/*.py"):
with open(fnam) as f:
content = f.read()
for name in re.findall(r'c_function_name=["\'](CPy[A-Z_a-z0-9]+)', content):
check_name(name)

View file

@ -0,0 +1,82 @@
"""Test cases for invoking mypyc on the command line.
These are slow -- do not add test cases unless you have a very good reason to do so.
"""
from __future__ import annotations
import glob
import os
import os.path
import re
import subprocess
import sys
from mypy.test.config import test_temp_dir
from mypy.test.data import DataDrivenTestCase
from mypy.test.helpers import normalize_error_messages
from mypyc.test.testutil import MypycDataSuite, assert_test_output
files = ["commandline.test"]
base_path = os.path.join(os.path.dirname(__file__), "..", "..")
python3_path = sys.executable
class TestCommandLine(MypycDataSuite):
files = files
base_path = test_temp_dir
optional_out = True
def run_case(self, testcase: DataDrivenTestCase) -> None:
# Parse options from test case description (arguments must not have spaces)
text = "\n".join(testcase.input)
m = re.search(r"# *cmd: *(.*)", text)
assert m is not None, 'Test case missing "# cmd: <files>" section'
args = m.group(1).split()
# Write main program to run (not compiled)
program = "_%s.py" % testcase.name
program_path = os.path.join(test_temp_dir, program)
with open(program_path, "w") as f:
f.write(text)
env = os.environ.copy()
env["PYTHONPATH"] = base_path
out = b""
try:
# Compile program
cmd = subprocess.run(
[sys.executable, "-m", "mypyc", *args],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd="tmp",
env=env,
)
if "ErrorOutput" in testcase.name or cmd.returncode != 0:
out += cmd.stdout
elif "WarningOutput" in testcase.name:
# Strip out setuptools build related output since we're only
# interested in the messages emitted during compilation.
messages, _, _ = cmd.stdout.partition(b"running build_ext")
out += messages
if cmd.returncode == 0:
# Run main program
out += subprocess.check_output([python3_path, program], cwd="tmp")
finally:
suffix = "pyd" if sys.platform == "win32" else "so"
so_paths = glob.glob(f"tmp/**/*.{suffix}", recursive=True)
for path in so_paths:
os.remove(path)
# Strip out 'tmp/' from error message paths in the testcase output,
# due to a mismatch between this test and mypy's test suite.
expected = [x.replace("tmp/", "") for x in testcase.output]
# Verify output
actual = normalize_error_messages(out.decode().splitlines())
assert_test_output(testcase, actual, "Invalid output", expected=expected)

View file

@ -0,0 +1,164 @@
from __future__ import annotations
import unittest
from mypyc.codegen.emit import Emitter, EmitterContext
from mypyc.common import HAVE_IMMORTAL
from mypyc.ir.class_ir import ClassIR
from mypyc.ir.ops import BasicBlock, Register, Value
from mypyc.ir.rtypes import (
RInstance,
RTuple,
RUnion,
bool_rprimitive,
int_rprimitive,
list_rprimitive,
none_rprimitive,
object_rprimitive,
str_rprimitive,
)
from mypyc.irbuild.vtable import compute_vtable
from mypyc.namegen import NameGenerator
class TestEmitter(unittest.TestCase):
def setUp(self) -> None:
self.n = Register(int_rprimitive, "n")
self.context = EmitterContext(NameGenerator([["mod"]]), True)
self.emitter = Emitter(self.context, {})
ir = ClassIR("A", "mod")
compute_vtable(ir)
ir.mro = [ir]
self.instance_a = RInstance(ir)
def test_label(self) -> None:
assert self.emitter.label(BasicBlock(4)) == "CPyL4"
def test_reg(self) -> None:
names: dict[Value, str] = {self.n: "n"}
emitter = Emitter(self.context, names)
assert emitter.reg(self.n) == "cpy_r_n"
def test_object_annotation(self) -> None:
assert self.emitter.object_annotation("hello, world", "line;") == " /* 'hello, world' */"
assert self.emitter.object_annotation(list(range(30)), "line;") == """\
/* [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
23, 24, 25, 26, 27, 28, 29] */"""
def test_emit_line(self) -> None:
emitter = self.emitter
emitter.emit_line("line;")
emitter.emit_line("a {")
emitter.emit_line("f();")
emitter.emit_line("}")
assert emitter.fragments == ["line;\n", "a {\n", " f();\n", "}\n"]
emitter = Emitter(self.context, {})
emitter.emit_line("CPyStatics[0];", ann="hello, world")
emitter.emit_line("CPyStatics[1];", ann=list(range(30)))
assert emitter.fragments[0] == "CPyStatics[0]; /* 'hello, world' */\n"
assert emitter.fragments[1] == """\
CPyStatics[1]; /* [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28, 29] */\n"""
def test_emit_undefined_value_for_simple_type(self) -> None:
emitter = self.emitter
assert emitter.c_undefined_value(int_rprimitive) == "CPY_INT_TAG"
assert emitter.c_undefined_value(str_rprimitive) == "NULL"
assert emitter.c_undefined_value(bool_rprimitive) == "2"
def test_emit_undefined_value_for_tuple(self) -> None:
emitter = self.emitter
assert (
emitter.c_undefined_value(RTuple([str_rprimitive, int_rprimitive, bool_rprimitive]))
== "(tuple_T3OIC) { NULL, CPY_INT_TAG, 2 }"
)
assert emitter.c_undefined_value(RTuple([str_rprimitive])) == "(tuple_T1O) { NULL }"
assert (
emitter.c_undefined_value(RTuple([RTuple([str_rprimitive]), bool_rprimitive]))
== "(tuple_T2T1OC) { { NULL }, 2 }"
)
def test_emit_inc_ref_object(self) -> None:
self.emitter.emit_inc_ref("x", object_rprimitive)
self.assert_output("CPy_INCREF(x);\n")
def test_emit_inc_ref_int(self) -> None:
self.emitter.emit_inc_ref("x", int_rprimitive)
self.assert_output("CPyTagged_INCREF(x);\n")
def test_emit_inc_ref_rare(self) -> None:
self.emitter.emit_inc_ref("x", object_rprimitive, rare=True)
self.assert_output("CPy_INCREF(x);\n")
self.emitter.emit_inc_ref("x", int_rprimitive, rare=True)
self.assert_output("CPyTagged_IncRef(x);\n")
def test_emit_inc_ref_list(self) -> None:
self.emitter.emit_inc_ref("x", list_rprimitive)
if HAVE_IMMORTAL:
self.assert_output("CPy_INCREF_NO_IMM(x);\n")
else:
self.assert_output("CPy_INCREF(x);\n")
def test_emit_inc_ref_instance(self) -> None:
self.emitter.emit_inc_ref("x", self.instance_a)
if HAVE_IMMORTAL:
self.assert_output("CPy_INCREF_NO_IMM(x);\n")
else:
self.assert_output("CPy_INCREF(x);\n")
def test_emit_inc_ref_optional(self) -> None:
optional = RUnion([self.instance_a, none_rprimitive])
self.emitter.emit_inc_ref("o", optional)
self.assert_output("CPy_INCREF(o);\n")
def test_emit_dec_ref_object(self) -> None:
self.emitter.emit_dec_ref("x", object_rprimitive)
self.assert_output("CPy_DECREF(x);\n")
self.emitter.emit_dec_ref("x", object_rprimitive, is_xdec=True)
self.assert_output("CPy_XDECREF(x);\n")
def test_emit_dec_ref_int(self) -> None:
self.emitter.emit_dec_ref("x", int_rprimitive)
self.assert_output("CPyTagged_DECREF(x);\n")
self.emitter.emit_dec_ref("x", int_rprimitive, is_xdec=True)
self.assert_output("CPyTagged_XDECREF(x);\n")
def test_emit_dec_ref_rare(self) -> None:
self.emitter.emit_dec_ref("x", object_rprimitive, rare=True)
self.assert_output("CPy_DecRef(x);\n")
self.emitter.emit_dec_ref("x", int_rprimitive, rare=True)
self.assert_output("CPyTagged_DecRef(x);\n")
def test_emit_dec_ref_list(self) -> None:
self.emitter.emit_dec_ref("x", list_rprimitive)
if HAVE_IMMORTAL:
self.assert_output("CPy_DECREF_NO_IMM(x);\n")
else:
self.assert_output("CPy_DECREF(x);\n")
self.emitter.emit_dec_ref("x", list_rprimitive, is_xdec=True)
if HAVE_IMMORTAL:
self.assert_output("CPy_XDECREF_NO_IMM(x);\n")
else:
self.assert_output("CPy_XDECREF(x);\n")
def test_emit_dec_ref_instance(self) -> None:
self.emitter.emit_dec_ref("x", self.instance_a)
if HAVE_IMMORTAL:
self.assert_output("CPy_DECREF_NO_IMM(x);\n")
else:
self.assert_output("CPy_DECREF(x);\n")
self.emitter.emit_dec_ref("x", self.instance_a, is_xdec=True)
if HAVE_IMMORTAL:
self.assert_output("CPy_XDECREF_NO_IMM(x);\n")
else:
self.assert_output("CPy_XDECREF(x);\n")
def test_emit_dec_ref_optional(self) -> None:
optional = RUnion([self.instance_a, none_rprimitive])
self.emitter.emit_dec_ref("o", optional)
self.assert_output("CPy_DECREF(o);\n")
def assert_output(self, expected: str) -> None:
assert "".join(self.emitter.fragments) == expected
self.emitter.fragments = []

View file

@ -0,0 +1,35 @@
from __future__ import annotations
import unittest
from mypyc.codegen.emitclass import getter_name, setter_name, slot_key
from mypyc.ir.class_ir import ClassIR
from mypyc.namegen import NameGenerator
class TestEmitClass(unittest.TestCase):
def test_slot_key(self) -> None:
attrs = ["__add__", "__radd__", "__rshift__", "__rrshift__", "__setitem__", "__delitem__"]
s = sorted(attrs, key=lambda x: slot_key(x))
# __delitem__ and reverse methods should come last.
assert s == [
"__add__",
"__rshift__",
"__setitem__",
"__delitem__",
"__radd__",
"__rrshift__",
]
def test_setter_name(self) -> None:
cls = ClassIR(module_name="testing", name="SomeClass")
generator = NameGenerator([["mod"]])
# This should never be `setup`, as it will conflict with the class `setup`
assert setter_name(cls, "up", generator) == "testing___SomeClass_set_up"
def test_getter_name(self) -> None:
cls = ClassIR(module_name="testing", name="SomeClass")
generator = NameGenerator([["mod"]])
assert getter_name(cls, "down", generator) == "testing___SomeClass_get_down"

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,60 @@
from __future__ import annotations
import unittest
from mypy.test.helpers import assert_string_arrays_equal
from mypyc.codegen.emit import Emitter, EmitterContext, ReturnHandler
from mypyc.codegen.emitwrapper import generate_arg_check
from mypyc.ir.rtypes import int_rprimitive, list_rprimitive
from mypyc.namegen import NameGenerator
class TestArgCheck(unittest.TestCase):
def setUp(self) -> None:
self.context = EmitterContext(NameGenerator([["mod"]]), True)
def test_check_list(self) -> None:
emitter = Emitter(self.context)
generate_arg_check("x", list_rprimitive, emitter, ReturnHandler("NULL"))
lines = emitter.fragments
self.assert_lines(
[
"PyObject *arg_x;",
"if (likely(PyList_Check(obj_x)))",
" arg_x = obj_x;",
"else {",
' CPy_TypeError("list", obj_x);',
" return NULL;",
"}",
],
lines,
)
def test_check_int(self) -> None:
emitter = Emitter(self.context)
generate_arg_check("x", int_rprimitive, emitter, ReturnHandler("NULL"))
generate_arg_check("y", int_rprimitive, emitter, ReturnHandler("NULL"), optional=True)
lines = emitter.fragments
self.assert_lines(
[
"CPyTagged arg_x;",
"if (likely(PyLong_Check(obj_x)))",
" arg_x = CPyTagged_BorrowFromObject(obj_x);",
"else {",
' CPy_TypeError("int", obj_x); return NULL;',
"}",
"CPyTagged arg_y;",
"if (obj_y == NULL) {",
" arg_y = CPY_INT_TAG;",
"} else if (likely(PyLong_Check(obj_y)))",
" arg_y = CPyTagged_BorrowFromObject(obj_y);",
"else {",
' CPy_TypeError("int", obj_y); return NULL;',
"}",
],
lines,
)
def assert_lines(self, expected: list[str], actual: list[str]) -> None:
actual = [line.rstrip("\n") for line in actual]
assert_string_arrays_equal(expected, actual, "Invalid output")

View file

@ -0,0 +1,56 @@
"""Test runner for exception handling transform test cases.
The transform inserts exception handling branch operations to IR.
"""
from __future__ import annotations
import os.path
from mypy.errors import CompileError
from mypy.test.config import test_temp_dir
from mypy.test.data import DataDrivenTestCase
from mypyc.analysis.blockfreq import frequently_executed_blocks
from mypyc.common import TOP_LEVEL_NAME
from mypyc.ir.pprint import format_func
from mypyc.test.testutil import (
ICODE_GEN_BUILTINS,
MypycDataSuite,
assert_test_output,
build_ir_for_single_file,
remove_comment_lines,
use_custom_builtins,
)
from mypyc.transform.exceptions import insert_exception_handling
from mypyc.transform.refcount import insert_ref_count_opcodes
from mypyc.transform.uninit import insert_uninit_checks
files = ["exceptions.test", "exceptions-freq.test"]
class TestExceptionTransform(MypycDataSuite):
files = files
base_path = test_temp_dir
def run_case(self, testcase: DataDrivenTestCase) -> None:
"""Perform a runtime checking transformation test case."""
with use_custom_builtins(os.path.join(self.data_prefix, ICODE_GEN_BUILTINS), testcase):
expected_output = remove_comment_lines(testcase.output)
try:
ir = build_ir_for_single_file(testcase.input)
except CompileError as e:
actual = e.messages
else:
actual = []
for fn in ir:
if fn.name == TOP_LEVEL_NAME and not testcase.name.endswith("_toplevel"):
continue
insert_uninit_checks(fn, True)
insert_exception_handling(fn, True)
insert_ref_count_opcodes(fn)
actual.extend(format_func(fn))
if testcase.name.endswith("_freq"):
common = frequently_executed_blocks(fn.blocks[0])
actual.append("hot blocks: %s" % sorted(b.label for b in common))
assert_test_output(testcase, actual, "Invalid source code output", expected_output)

View file

@ -0,0 +1,104 @@
"""Test cases that run tests as subprocesses."""
from __future__ import annotations
import os
import subprocess
import sys
import tempfile
import unittest
from distutils import ccompiler, sysconfig
from typing import Any
from mypyc.build import get_cflags, include_dir
from .config import PREFIX
EXCLUDED_LIB_RT_COMPILE_FILES = ["static_data.c"]
class TestExternal(unittest.TestCase):
def test_lib_rt_c_files_compile_individually(self) -> None:
"""Compile each top-level lib-rt C file as its own translation unit."""
lib_rt_dir = include_dir()
source_names = sorted(
name
for name in os.listdir(lib_rt_dir)
if name.endswith(".c")
and os.path.isfile(os.path.join(lib_rt_dir, name))
and name not in EXCLUDED_LIB_RT_COMPILE_FILES
)
compiler: Any = ccompiler.new_compiler()
sysconfig.customize_compiler(compiler)
include_dirs = [lib_rt_dir]
for plat_specific in (False, True):
path = sysconfig.get_python_inc(plat_specific=plat_specific)
if path and path not in include_dirs:
include_dirs.append(path)
with tempfile.TemporaryDirectory() as tmpdir:
for experimental_features in (False, True):
cflags = get_cflags(
compiler_type=compiler.compiler_type,
opt_level="0",
experimental_features=experimental_features,
)
output_dir = os.path.join(
tmpdir, "experimental" if experimental_features else "default"
)
for source_name in source_names:
source_path = os.path.join(lib_rt_dir, source_name)
with self.subTest(source=source_name, experimental=experimental_features):
try:
compiler.compile(
[source_path],
output_dir=output_dir,
include_dirs=include_dirs,
extra_postargs=cflags,
)
except Exception as err:
raise AssertionError(
f"failed to compile {source_name} "
f"(experimental={experimental_features})"
) from err
# TODO: Get this to work on Windows.
# (Or don't. It is probably not a good use of time.)
@unittest.skipIf(sys.platform.startswith("win"), "rt tests don't work on windows")
def test_c_unit_test(self) -> None:
"""Run C unit tests in a subprocess."""
cppflags: list[str] = []
env = os.environ.copy()
if sys.platform == "darwin":
cppflags += ["-O0", "-mmacosx-version-min=10.10", "-stdlib=libc++"]
elif sys.platform == "linux":
cppflags += ["-O0"]
env["CPPFLAGS"] = " ".join(cppflags)
# Build Python wrapper for C unit tests.
with tempfile.TemporaryDirectory() as tmpdir:
status = subprocess.check_call(
[
sys.executable,
"setup.py",
"build_ext",
f"--build-lib={tmpdir}",
f"--build-temp={tmpdir}",
"--run-capi-tests",
],
env=env,
cwd=os.path.join(PREFIX, "mypyc", "lib-rt"),
)
# Run C unit tests.
env = os.environ.copy()
if "GTEST_COLOR" not in os.environ:
env["GTEST_COLOR"] = "yes" # Use fancy colors
status = subprocess.call(
[sys.executable, "-c", "import sys, test_capi; sys.exit(test_capi.run_tests())"],
env=env,
cwd=tmpdir,
)
if status != 0:
raise AssertionError("make test: C unit test failure")

View file

@ -0,0 +1,100 @@
"""Test cases for IR generation."""
from __future__ import annotations
import os.path
import sys
from mypy.errors import CompileError
from mypy.test.config import test_temp_dir
from mypy.test.data import DataDrivenTestCase
from mypyc.common import IS_FREE_THREADED, TOP_LEVEL_NAME
from mypyc.ir.pprint import format_func
from mypyc.test.testutil import (
ICODE_GEN_BUILTINS,
MypycDataSuite,
assert_test_output,
build_ir_for_single_file,
infer_ir_build_options_from_test_name,
remove_comment_lines,
replace_word_size,
use_custom_builtins,
)
files = [
"irbuild-basic.test",
"irbuild-int.test",
"irbuild-bool.test",
"irbuild-lists.test",
"irbuild-tuple.test",
"irbuild-dict.test",
"irbuild-set.test",
"irbuild-str.test",
"irbuild-bytes.test",
"irbuild-float.test",
"irbuild-frozenset.test",
"irbuild-statements.test",
"irbuild-nested.test",
"irbuild-classes.test",
"irbuild-optional.test",
"irbuild-any.test",
"irbuild-generics.test",
"irbuild-try.test",
"irbuild-strip-asserts.test",
"irbuild-i64.test",
"irbuild-i32.test",
"irbuild-i16.test",
"irbuild-u8.test",
"irbuild-vec-i64.test",
"irbuild-vec-misc.test",
"irbuild-vec-t.test",
"irbuild-vec-nested.test",
"irbuild-vectorcall.test",
"irbuild-unreachable.test",
"irbuild-isinstance.test",
"irbuild-dunders.test",
"irbuild-singledispatch.test",
"irbuild-constant-fold.test",
"irbuild-glue-methods.test",
"irbuild-math.test",
"irbuild-weakref.test",
"irbuild-librt-strings.test",
"irbuild-base64.test",
"irbuild-time.test",
"irbuild-match.test",
]
if sys.version_info >= (3, 14):
files.append("irbuild-python314.test")
class TestGenOps(MypycDataSuite):
files = files
base_path = test_temp_dir
optional_out = True
def run_case(self, testcase: DataDrivenTestCase) -> None:
"""Perform a runtime checking transformation test case."""
options = infer_ir_build_options_from_test_name(testcase.name)
if options is None:
# Skipped test case
return
if "_withgil" in testcase.name and IS_FREE_THREADED:
# Test case should only run on a non-free-threaded build.
return
with use_custom_builtins(os.path.join(self.data_prefix, ICODE_GEN_BUILTINS), testcase):
expected_output = remove_comment_lines(testcase.output)
expected_output = replace_word_size(expected_output)
name = testcase.name
try:
ir = build_ir_for_single_file(testcase.input, options)
except CompileError as e:
actual = e.messages
else:
actual = []
for fn in ir:
if fn.name == TOP_LEVEL_NAME and not name.endswith("_toplevel"):
continue
actual.extend(format_func(fn))
assert_test_output(testcase, actual, "Invalid source code output", expected_output)

View file

@ -0,0 +1,199 @@
from __future__ import annotations
import unittest
from mypyc.analysis.ircheck import FnError, can_coerce_to, check_func_ir
from mypyc.ir.class_ir import ClassIR
from mypyc.ir.func_ir import FuncDecl, FuncIR, FuncSignature
from mypyc.ir.ops import (
Assign,
BasicBlock,
Goto,
Integer,
LoadAddress,
LoadLiteral,
Op,
Register,
Return,
)
from mypyc.ir.pprint import format_func
from mypyc.ir.rtypes import (
RInstance,
RType,
RUnion,
bytes_rprimitive,
int32_rprimitive,
int64_rprimitive,
none_rprimitive,
object_rprimitive,
pointer_rprimitive,
str_rprimitive,
)
def assert_has_error(fn: FuncIR, error: FnError) -> None:
errors = check_func_ir(fn)
assert errors == [error]
def assert_no_errors(fn: FuncIR) -> None:
assert not check_func_ir(fn)
NONE_VALUE = Integer(0, rtype=none_rprimitive)
class TestIrcheck(unittest.TestCase):
def setUp(self) -> None:
self.label = 0
def basic_block(self, ops: list[Op]) -> BasicBlock:
self.label += 1
block = BasicBlock(self.label)
block.ops = ops
return block
def func_decl(self, name: str, ret_type: RType | None = None) -> FuncDecl:
if ret_type is None:
ret_type = none_rprimitive
return FuncDecl(
name=name,
class_name=None,
module_name="module",
sig=FuncSignature(args=[], ret_type=ret_type),
)
def test_valid_fn(self) -> None:
assert_no_errors(
FuncIR(
decl=self.func_decl(name="func_1"),
arg_regs=[],
blocks=[self.basic_block(ops=[Return(value=NONE_VALUE)])],
)
)
def test_block_not_terminated_empty_block(self) -> None:
block = self.basic_block([])
fn = FuncIR(decl=self.func_decl(name="func_1"), arg_regs=[], blocks=[block])
assert_has_error(fn, FnError(source=block, desc="Block not terminated"))
def test_valid_goto(self) -> None:
block_1 = self.basic_block([Return(value=NONE_VALUE)])
block_2 = self.basic_block([Goto(label=block_1)])
fn = FuncIR(decl=self.func_decl(name="func_1"), arg_regs=[], blocks=[block_1, block_2])
assert_no_errors(fn)
def test_invalid_goto(self) -> None:
block_1 = self.basic_block([Return(value=NONE_VALUE)])
goto = Goto(label=block_1)
block_2 = self.basic_block([goto])
fn = FuncIR(
decl=self.func_decl(name="func_1"),
arg_regs=[],
# block_1 omitted
blocks=[block_2],
)
assert_has_error(fn, FnError(source=goto, desc="Invalid control operation target: 1"))
def test_invalid_register_source(self) -> None:
ret = Return(value=Register(type=none_rprimitive, name="r1"))
block = self.basic_block([ret])
fn = FuncIR(decl=self.func_decl(name="func_1"), arg_regs=[], blocks=[block])
assert_has_error(fn, FnError(source=ret, desc="Invalid op reference to register 'r1'"))
def test_invalid_op_source(self) -> None:
ret = Return(value=LoadLiteral(value="foo", rtype=str_rprimitive))
block = self.basic_block([ret])
fn = FuncIR(decl=self.func_decl(name="func_1"), arg_regs=[], blocks=[block])
assert_has_error(
fn, FnError(source=ret, desc="Invalid op reference to op of type LoadLiteral")
)
def test_invalid_return_type(self) -> None:
ret = Return(value=Integer(value=5, rtype=int32_rprimitive))
fn = FuncIR(
decl=self.func_decl(name="func_1", ret_type=int64_rprimitive),
arg_regs=[],
blocks=[self.basic_block([ret])],
)
assert_has_error(
fn, FnError(source=ret, desc="Cannot coerce source type i32 to dest type i64")
)
def test_invalid_assign(self) -> None:
arg_reg = Register(type=int64_rprimitive, name="r1")
assign = Assign(dest=arg_reg, src=Integer(value=5, rtype=int32_rprimitive))
ret = Return(value=NONE_VALUE)
fn = FuncIR(
decl=self.func_decl(name="func_1"),
arg_regs=[arg_reg],
blocks=[self.basic_block([assign, ret])],
)
assert_has_error(
fn, FnError(source=assign, desc="Cannot coerce source type i32 to dest type i64")
)
def test_can_coerce_to(self) -> None:
cls = ClassIR(name="Cls", module_name="cls")
valid_cases = [
(int64_rprimitive, int64_rprimitive),
(str_rprimitive, str_rprimitive),
(str_rprimitive, object_rprimitive),
(object_rprimitive, str_rprimitive),
(RUnion([bytes_rprimitive, str_rprimitive]), str_rprimitive),
(str_rprimitive, RUnion([bytes_rprimitive, str_rprimitive])),
(RInstance(cls), object_rprimitive),
]
invalid_cases = [
(int64_rprimitive, int32_rprimitive),
(RInstance(cls), str_rprimitive),
(str_rprimitive, bytes_rprimitive),
]
for src, dest in valid_cases:
assert can_coerce_to(src, dest)
for src, dest in invalid_cases:
assert not can_coerce_to(src, dest)
def test_duplicate_op(self) -> None:
arg_reg = Register(type=int32_rprimitive, name="r1")
assign = Assign(dest=arg_reg, src=Integer(value=5, rtype=int32_rprimitive))
block = self.basic_block([assign, assign, Return(value=NONE_VALUE)])
fn = FuncIR(decl=self.func_decl(name="func_1"), arg_regs=[], blocks=[block])
assert_has_error(fn, FnError(source=assign, desc="Func has a duplicate op"))
def test_pprint(self) -> None:
block_1 = self.basic_block([Return(value=NONE_VALUE)])
goto = Goto(label=block_1)
block_2 = self.basic_block([goto])
fn = FuncIR(
decl=self.func_decl(name="func_1"),
arg_regs=[],
# block_1 omitted
blocks=[block_2],
)
errors = [(goto, "Invalid control operation target: 1")]
formatted = format_func(fn, errors)
assert formatted == [
"def func_1():",
"L0:",
" \U0000274c goto L1",
" \U0001f446 ERROR: Invalid control operation target: 1",
]
def test_load_address_declares_register(self) -> None:
rx = Register(str_rprimitive, "x")
ry = Register(pointer_rprimitive, "y")
load_addr = LoadAddress(pointer_rprimitive, rx)
assert_no_errors(
FuncIR(
decl=self.func_decl(name="func_1"),
arg_regs=[],
blocks=[
self.basic_block(
ops=[load_addr, Assign(ry, load_addr), Return(value=NONE_VALUE)]
)
],
)
)

View file

@ -0,0 +1,90 @@
"""Test code geneneration for literals."""
from __future__ import annotations
import unittest
from mypyc.codegen.literals import (
Literals,
_encode_bytes_values,
_encode_int_values,
_encode_str_values,
format_str_literal,
)
class TestLiterals(unittest.TestCase):
def test_format_str_literal(self) -> None:
assert format_str_literal("") == b"\x00"
assert format_str_literal("xyz") == b"\x03xyz"
assert format_str_literal("x" * 127) == b"\x7f" + b"x" * 127
assert format_str_literal("x" * 128) == b"\x81\x00" + b"x" * 128
assert format_str_literal("x" * 131) == b"\x81\x03" + b"x" * 131
def test_encode_str_values(self) -> None:
assert _encode_str_values({}) == [b""]
assert _encode_str_values({"foo": 0}) == [b"\x01\x03foo", b""]
assert _encode_str_values({"foo": 0, "b": 1}) == [b"\x02\x03foo\x01b", b""]
assert _encode_str_values({"foo": 0, "x" * 70: 1}) == [
b"\x01\x03foo",
bytes([1, 70]) + b"x" * 70,
b"",
]
assert _encode_str_values({"y" * 100: 0}) == [bytes([1, 100]) + b"y" * 100, b""]
def test_encode_bytes_values(self) -> None:
assert _encode_bytes_values({}) == [b""]
assert _encode_bytes_values({b"foo": 0}) == [b"\x01\x03foo", b""]
assert _encode_bytes_values({b"foo": 0, b"b": 1}) == [b"\x02\x03foo\x01b", b""]
assert _encode_bytes_values({b"foo": 0, b"x" * 70: 1}) == [
b"\x01\x03foo",
bytes([1, 70]) + b"x" * 70,
b"",
]
assert _encode_bytes_values({b"y" * 100: 0}) == [bytes([1, 100]) + b"y" * 100, b""]
def test_encode_int_values(self) -> None:
assert _encode_int_values({}) == [b""]
assert _encode_int_values({123: 0}) == [b"\x01123", b""]
assert _encode_int_values({123: 0, 9: 1}) == [b"\x02123\x009", b""]
assert _encode_int_values({123: 0, 45: 1, 5 * 10**70: 2}) == [
b"\x02123\x0045",
b"\x015" + b"0" * 70,
b"",
]
assert _encode_int_values({6 * 10**100: 0}) == [b"\x016" + b"0" * 100, b""]
def test_simple_literal_index(self) -> None:
lit = Literals()
lit.record_literal(1)
lit.record_literal("y")
lit.record_literal(True)
lit.record_literal(None)
lit.record_literal(False)
assert lit.literal_index(None) == 0
assert lit.literal_index(False) == 1
assert lit.literal_index(True) == 2
assert lit.literal_index("y") == 3
assert lit.literal_index(1) == 4
def test_tuple_literal(self) -> None:
lit = Literals()
lit.record_literal((1, "y", None, (b"a", "b")))
lit.record_literal((b"a", "b"))
lit.record_literal(())
assert lit.literal_index((b"a", "b")) == 7
assert lit.literal_index((1, "y", None, (b"a", "b"))) == 8
assert lit.literal_index(()) == 9
print(lit.encoded_tuple_values())
assert lit.encoded_tuple_values() == [
"3", # Number of tuples
"2",
"5",
"4", # First tuple (length=2)
"4",
"6",
"3",
"0",
"7", # Second tuple (length=4)
"0", # Third tuple (length=0)
]

View file

@ -0,0 +1,61 @@
"""Runner for lowering transform tests."""
from __future__ import annotations
import os.path
from mypy.errors import CompileError
from mypy.test.config import test_temp_dir
from mypy.test.data import DataDrivenTestCase
from mypyc.common import TOP_LEVEL_NAME
from mypyc.ir.pprint import format_func
from mypyc.options import CompilerOptions
from mypyc.test.testutil import (
ICODE_GEN_BUILTINS,
MypycDataSuite,
assert_test_output,
build_ir_for_single_file,
infer_ir_build_options_from_test_name,
remove_comment_lines,
replace_word_size,
use_custom_builtins,
)
from mypyc.transform.exceptions import insert_exception_handling
from mypyc.transform.flag_elimination import do_flag_elimination
from mypyc.transform.lower import lower_ir
from mypyc.transform.refcount import insert_ref_count_opcodes
from mypyc.transform.uninit import insert_uninit_checks
class TestLowering(MypycDataSuite):
files = ["lowering-int.test", "lowering-list.test"]
base_path = test_temp_dir
def run_case(self, testcase: DataDrivenTestCase) -> None:
options = infer_ir_build_options_from_test_name(testcase.name)
if options is None:
# Skipped test case
return
with use_custom_builtins(os.path.join(self.data_prefix, ICODE_GEN_BUILTINS), testcase):
expected_output = remove_comment_lines(testcase.output)
expected_output = replace_word_size(expected_output)
try:
ir = build_ir_for_single_file(testcase.input, options)
except CompileError as e:
actual = e.messages
else:
actual = []
for fn in ir:
if fn.name == TOP_LEVEL_NAME and not testcase.name.endswith("_toplevel"):
continue
options = CompilerOptions(strict_traceback_checks=True)
# Lowering happens after exception handling and ref count opcodes have
# been added. Any changes must maintain reference counting semantics.
insert_uninit_checks(fn, True)
insert_exception_handling(fn, True)
insert_ref_count_opcodes(fn)
lower_ir(fn, options)
do_flag_elimination(fn, options)
actual.extend(format_func(fn))
assert_test_output(testcase, actual, "Invalid source code output", expected_output)

View file

@ -0,0 +1,22 @@
from __future__ import annotations
import unittest
from mypyc.ir.ops import BasicBlock
from mypyc.ir.pprint import format_blocks, generate_names_for_ir
from mypyc.irbuild.ll_builder import LowLevelIRBuilder
from mypyc.options import CompilerOptions
class TestMisc(unittest.TestCase):
def test_debug_op(self) -> None:
block = BasicBlock()
builder = LowLevelIRBuilder(
errors=None, options=CompilerOptions(strict_traceback_checks=True)
)
builder.activate_block(block)
builder.debug_print("foo")
names = generate_names_for_ir([], [block])
code = format_blocks([block], names, {})
assert code[:-1] == ["L0:", " r0 = 'foo'", " CPyDebug_PrintObject(r0)"]

View file

@ -0,0 +1,68 @@
from __future__ import annotations
import unittest
from mypyc.namegen import (
NameGenerator,
candidate_suffixes,
exported_name,
make_module_translation_map,
)
class TestNameGen(unittest.TestCase):
def test_candidate_suffixes(self) -> None:
assert candidate_suffixes("foo") == ["", "foo."]
assert candidate_suffixes("foo.bar") == ["", "bar.", "foo.bar."]
def test_exported_name(self) -> None:
assert exported_name("foo") == "foo"
assert exported_name("foo.bar") == "foo___bar"
def test_make_module_translation_map(self) -> None:
assert make_module_translation_map(["foo", "bar"]) == {"foo": "foo.", "bar": "bar."}
assert make_module_translation_map(["foo.bar", "foo.baz"]) == {
"foo.bar": "bar.",
"foo.baz": "baz.",
}
assert make_module_translation_map(["zar", "foo.bar", "foo.baz"]) == {
"foo.bar": "bar.",
"foo.baz": "baz.",
"zar": "zar.",
}
assert make_module_translation_map(["foo.bar", "fu.bar", "foo.baz"]) == {
"foo.bar": "foo.bar.",
"fu.bar": "fu.bar.",
"foo.baz": "baz.",
}
assert make_module_translation_map(["foo", "foo.foo", "bar.foo", "bar.foo.bar.foo"]) == {
"foo": "foo.",
"foo.foo": "foo.foo.",
"bar.foo": "bar.foo.",
"bar.foo.bar.foo": "foo.bar.foo.",
}
def test_name_generator(self) -> None:
g = NameGenerator([["foo", "foo.zar"]])
assert g.private_name("foo", "f") == "foo___f"
assert g.private_name("foo", "C.x.y") == "foo___C___x___y"
assert g.private_name("foo", "C.x.y") == "foo___C___x___y"
assert g.private_name("foo.zar", "C.x.y") == "zar___C___x___y"
assert g.private_name("foo", "C.x_y") == "foo___C___x_y"
assert g.private_name("foo", "C_x_y") == "foo___C_x_y"
assert g.private_name("foo", "C_x_y") == "foo___C_x_y"
assert g.private_name("foo", "___") == "foo______3_"
g = NameGenerator([["foo.zar"]])
assert g.private_name("foo.zar", "f") == "f"
def test_name_generator_with_separate(self) -> None:
g = NameGenerator([["foo", "foo.zar"]], separate=True)
assert g.private_name("foo", "f") == "foo___f"
assert g.private_name("foo", "C.x.y") == "foo___C___x___y"
assert g.private_name("foo.zar", "C.x.y") == "foo___zar___C___x___y"
assert g.private_name("foo", "C.x_y") == "foo___C___x_y"
assert g.private_name("foo", "___") == "foo______3_"
g = NameGenerator([["foo.zar"]], separate=True)
assert g.private_name("foo.zar", "f") == "foo___zar___f"

View file

@ -0,0 +1,68 @@
"""Runner for IR optimization tests."""
from __future__ import annotations
import os.path
from mypy.errors import CompileError
from mypy.test.config import test_temp_dir
from mypy.test.data import DataDrivenTestCase
from mypyc.common import TOP_LEVEL_NAME
from mypyc.ir.func_ir import FuncIR
from mypyc.ir.pprint import format_func
from mypyc.options import CompilerOptions
from mypyc.test.testutil import (
ICODE_GEN_BUILTINS,
MypycDataSuite,
assert_test_output,
build_ir_for_single_file,
remove_comment_lines,
use_custom_builtins,
)
from mypyc.transform.copy_propagation import do_copy_propagation
from mypyc.transform.flag_elimination import do_flag_elimination
from mypyc.transform.uninit import insert_uninit_checks
class OptimizationSuite(MypycDataSuite):
"""Base class for IR optimization test suites.
To use this, add a base class and define "files" and "do_optimizations".
"""
base_path = test_temp_dir
def run_case(self, testcase: DataDrivenTestCase) -> None:
with use_custom_builtins(os.path.join(self.data_prefix, ICODE_GEN_BUILTINS), testcase):
expected_output = remove_comment_lines(testcase.output)
try:
ir = build_ir_for_single_file(testcase.input)
except CompileError as e:
actual = e.messages
else:
actual = []
for fn in ir:
if fn.name == TOP_LEVEL_NAME and not testcase.name.endswith("_toplevel"):
continue
insert_uninit_checks(fn, True)
self.do_optimizations(fn)
actual.extend(format_func(fn))
assert_test_output(testcase, actual, "Invalid source code output", expected_output)
def do_optimizations(self, fn: FuncIR) -> None:
raise NotImplementedError
class TestCopyPropagation(OptimizationSuite):
files = ["opt-copy-propagation.test"]
def do_optimizations(self, fn: FuncIR) -> None:
do_copy_propagation(fn, CompilerOptions(strict_traceback_checks=True))
class TestFlagElimination(OptimizationSuite):
files = ["opt-flag-elimination.test"]
def do_optimizations(self, fn: FuncIR) -> None:
do_flag_elimination(fn, CompilerOptions(strict_traceback_checks=True))

View file

@ -0,0 +1,42 @@
from __future__ import annotations
import unittest
from mypyc.ir.ops import Assign, BasicBlock, Integer, IntOp, Op, Register, Unreachable
from mypyc.ir.pprint import generate_names_for_ir
from mypyc.ir.rtypes import int_rprimitive
def register(name: str) -> Register:
return Register(int_rprimitive, "foo", is_arg=True)
def make_block(ops: list[Op]) -> BasicBlock:
block = BasicBlock()
block.ops.extend(ops)
return block
class TestGenerateNames(unittest.TestCase):
def test_empty(self) -> None:
assert generate_names_for_ir([], []) == {}
def test_arg(self) -> None:
reg = register("foo")
assert generate_names_for_ir([reg], []) == {reg: "foo"}
def test_int_op(self) -> None:
n1 = Integer(2)
n2 = Integer(4)
op1 = IntOp(int_rprimitive, n1, n2, IntOp.ADD)
op2 = IntOp(int_rprimitive, op1, n2, IntOp.ADD)
block = make_block([op1, op2, Unreachable()])
assert generate_names_for_ir([], [block]) == {op1: "r0", op2: "r1"}
def test_assign(self) -> None:
reg = register("foo")
n = Integer(2)
op1 = Assign(reg, n)
op2 = Assign(reg, n)
block = make_block([op1, op2])
assert generate_names_for_ir([reg], [block]) == {reg: "foo"}

View file

@ -0,0 +1,48 @@
"""Unit tests for RArray types."""
from __future__ import annotations
import unittest
from mypyc.common import PLATFORM_SIZE
from mypyc.ir.rtypes import (
RArray,
bool_rprimitive,
compute_rtype_alignment,
compute_rtype_size,
int_rprimitive,
)
class TestRArray(unittest.TestCase):
def test_basics(self) -> None:
a = RArray(int_rprimitive, 10)
assert a.item_type == int_rprimitive
assert a.length == 10
def test_str_conversion(self) -> None:
a = RArray(int_rprimitive, 10)
assert str(a) == "int[10]"
assert repr(a) == "<RArray <RPrimitive builtins.int>[10]>"
def test_eq(self) -> None:
a = RArray(int_rprimitive, 10)
assert a == RArray(int_rprimitive, 10)
assert a != RArray(bool_rprimitive, 10)
assert a != RArray(int_rprimitive, 9)
def test_hash(self) -> None:
assert hash(RArray(int_rprimitive, 10)) == hash(RArray(int_rprimitive, 10))
assert hash(RArray(bool_rprimitive, 5)) == hash(RArray(bool_rprimitive, 5))
def test_alignment(self) -> None:
a = RArray(int_rprimitive, 10)
assert compute_rtype_alignment(a) == PLATFORM_SIZE
b = RArray(bool_rprimitive, 55)
assert compute_rtype_alignment(b) == 1
def test_size(self) -> None:
a = RArray(int_rprimitive, 9)
assert compute_rtype_size(a) == 9 * PLATFORM_SIZE
b = RArray(bool_rprimitive, 3)
assert compute_rtype_size(b) == 3

View file

@ -0,0 +1,59 @@
"""Test runner for reference count opcode insertion transform test cases.
The transform inserts needed reference count increment/decrement
operations to IR.
"""
from __future__ import annotations
import os.path
from mypy.errors import CompileError
from mypy.test.config import test_temp_dir
from mypy.test.data import DataDrivenTestCase
from mypyc.common import TOP_LEVEL_NAME
from mypyc.ir.pprint import format_func
from mypyc.test.testutil import (
ICODE_GEN_BUILTINS,
MypycDataSuite,
assert_test_output,
build_ir_for_single_file,
infer_ir_build_options_from_test_name,
remove_comment_lines,
replace_word_size,
use_custom_builtins,
)
from mypyc.transform.refcount import insert_ref_count_opcodes
from mypyc.transform.uninit import insert_uninit_checks
files = ["refcount.test"]
class TestRefCountTransform(MypycDataSuite):
files = files
base_path = test_temp_dir
optional_out = True
def run_case(self, testcase: DataDrivenTestCase) -> None:
"""Perform a runtime checking transformation test case."""
options = infer_ir_build_options_from_test_name(testcase.name)
if options is None:
# Skipped test case
return
with use_custom_builtins(os.path.join(self.data_prefix, ICODE_GEN_BUILTINS), testcase):
expected_output = remove_comment_lines(testcase.output)
expected_output = replace_word_size(expected_output)
try:
ir = build_ir_for_single_file(testcase.input, options)
except CompileError as e:
actual = e.messages
else:
actual = []
for fn in ir:
if fn.name == TOP_LEVEL_NAME and not testcase.name.endswith("_toplevel"):
continue
insert_uninit_checks(fn, True)
insert_ref_count_opcodes(fn)
actual.extend(format_func(fn))
assert_test_output(testcase, actual, "Invalid source code output", expected_output)

View file

@ -0,0 +1,534 @@
"""Test cases for building an C extension and running it."""
from __future__ import annotations
import ast
import contextlib
import glob
import os.path
import re
import shutil
import subprocess
import sys
import time
from collections.abc import Iterator
from typing import Any
import pytest
from mypy import build
from mypy.errors import CompileError
from mypy.options import Options
from mypy.test.config import mypyc_output_dir, test_temp_dir
from mypy.test.data import DataDrivenTestCase
from mypy.test.helpers import assert_module_equivalence, perform_file_operations
from mypyc.build import construct_groups
from mypyc.codegen import emitmodule
from mypyc.codegen.emitmodule import collect_source_dependencies
from mypyc.errors import Errors
from mypyc.options import CompilerOptions
from mypyc.test.config import test_data_prefix
from mypyc.test.librt_cache import get_librt_path
from mypyc.test.test_serialization import check_serialization_roundtrip
from mypyc.test.testutil import (
ICODE_GEN_BUILTINS,
TESTUTIL_PATH,
MypycDataSuite,
assert_test_output,
fudge_dir_mtimes,
has_test_name_tag,
show_c,
use_custom_builtins,
)
files = [
"run-async.test",
"run-misc.test",
"run-functions.test",
"run-integers.test",
"run-i64.test",
"run-i32.test",
"run-i16.test",
"run-u8.test",
"run-floats.test",
"run-math.test",
"run-bools.test",
"run-strings.test",
"run-bytes.test",
"run-tuples.test",
"run-lists.test",
"run-dicts.test",
"run-sets.test",
"run-primitives.test",
"run-loops.test",
"run-exceptions.test",
"run-imports.test",
"run-classes.test",
"run-traits.test",
"run-generators.test",
"run-generics.test",
"run-multimodule.test",
"run-bench.test",
"run-mypy-sim.test",
"run-dunders.test",
"run-dunders-special.test",
"run-singledispatch.test",
"run-attrs.test",
"run-signatures.test",
"run-weakref.test",
"run-python37.test",
"run-python38.test",
"run-librt-strings.test",
"run-base64.test",
"run-librt-time.test",
"run-match.test",
"run-vecs-i64-interp.test",
"run-vecs-misc-interp.test",
"run-vecs-t-interp.test",
"run-vecs-nested-interp.test",
"run-vecs-i64.test",
"run-vecs-misc.test",
"run-vecs-t.test",
"run-vecs-nested.test",
]
if sys.version_info >= (3, 12):
files.append("run-python312.test")
setup_format = """\
from setuptools import setup
from mypyc.build import mypycify
setup(name='test_run_output',
ext_modules=mypycify({}, separate={}, skip_cgen_input={!r}, strip_asserts=False,
multi_file={}, opt_level='{}', install_librt={},
experimental_features={}),
)
"""
WORKDIR = "build"
def run_setup(script_name: str, script_args: list[str]) -> bool:
"""Run a setup script in a somewhat controlled environment.
This is adapted from code in distutils and our goal here is that is
faster to not need to spin up a python interpreter to run it.
We had to fork it because the real run_setup swallows errors
and KeyboardInterrupt with no way to recover them (!).
The real version has some extra features that we removed since
we weren't using them.
Returns whether the setup succeeded.
"""
save_argv = sys.argv.copy()
g = {"__file__": script_name}
try:
try:
sys.argv[0] = script_name
sys.argv[1:] = script_args
with open(script_name, "rb") as f:
exec(f.read(), g)
finally:
sys.argv = save_argv
except SystemExit as e:
# distutils converts KeyboardInterrupt into a SystemExit with
# "interrupted" as the argument. Convert it back so that
# pytest will exit instead of just failing the test.
if e.code == "interrupted":
raise KeyboardInterrupt from e
return e.code == 0 or e.code is None
return True
@contextlib.contextmanager
def chdir_manager(target: str) -> Iterator[None]:
dir = os.getcwd()
os.chdir(target)
try:
yield
finally:
os.chdir(dir)
class TestRun(MypycDataSuite):
"""Test cases that build a C extension and run code."""
files = files
base_path = test_temp_dir
optional_out = True
multi_file = False
separate = False # If True, using separate (incremental) compilation
strict_dunder_typing = False
def run_case(self, testcase: DataDrivenTestCase) -> None:
with pytest.MonkeyPatch.context() as mp:
mp.delenv("CFLAGS", raising=False)
# setup.py wants to be run from the root directory of the package, which we accommodate
# by chdiring into tmp/
with (
use_custom_builtins(os.path.join(self.data_prefix, ICODE_GEN_BUILTINS), testcase),
chdir_manager("tmp"),
):
self.run_case_inner(testcase)
def run_case_inner(self, testcase: DataDrivenTestCase) -> None:
if not os.path.isdir(WORKDIR): # (one test puts something in build...)
os.mkdir(WORKDIR)
text = "\n".join(testcase.input)
with open("native.py", "w", encoding="utf-8") as f:
f.write(text)
with open("interpreted.py", "w", encoding="utf-8") as f:
f.write(text)
shutil.copyfile(TESTUTIL_PATH, "testutil.py")
step = 1
self.run_case_step(testcase, step)
steps = testcase.find_steps()
if steps == [[]]:
steps = []
for operations in steps:
# To make sure that any new changes get picked up as being
# new by distutils, shift the mtime of all of the
# generated artifacts back by a second.
fudge_dir_mtimes(WORKDIR, -1)
# On some OS, changing the mtime doesn't work reliably. As
# a workaround, sleep.
# TODO: Figure out a better approach, since this slows down tests.
time.sleep(1.0)
step += 1
with chdir_manager(".."):
perform_file_operations(operations)
self.run_case_step(testcase, step)
def run_case_step(self, testcase: DataDrivenTestCase, incremental_step: int) -> None:
benchmark_build = has_test_name_tag(testcase.name, "benchmark")
bench = testcase.config.getoption("--bench", False) and (
benchmark_build or "Benchmark" in testcase.name
)
options = Options()
options.use_builtins_fixtures = True
options.show_traceback = True
options.strict_optional = True
options.strict_bytes = True
options.disable_bytearray_promotion = True
options.disable_memoryview_promotion = True
options.python_version = sys.version_info[:2]
options.export_types = True
options.preserve_asts = True
options.allow_empty_bodies = True
options.incremental = self.separate
options.check_untyped_defs = True
# Avoid checking modules/packages named 'unchecked', to provide a way
# to test interacting with code we don't have types for.
options.per_module_options["unchecked.*"] = {"follow_imports": "error"}
source = build.BuildSource("native.py", "native", None)
sources = [source]
module_names = ["native"]
module_paths = ["native.py"]
# Hard code another module name to compile in the same compilation unit.
to_delete = []
for fn, text in testcase.files:
fn = os.path.relpath(fn, test_temp_dir)
if os.path.basename(fn).startswith("other") and fn.endswith(".py"):
name = fn.split(".")[0].replace(os.sep, ".")
module_names.append(name)
sources.append(build.BuildSource(fn, name, None))
to_delete.append(fn)
module_paths.append(fn)
shutil.copyfile(fn, os.path.join(os.path.dirname(fn), name + "_interpreted.py"))
elif fn.endswith("__init__.py"):
pkg_dir = os.path.dirname(fn)
if os.path.basename(pkg_dir).startswith("other"):
name = pkg_dir.replace(os.sep, ".")
module_names.append(name)
sources.append(build.BuildSource(fn, name, None))
to_delete.append(fn)
module_paths.append(fn)
for source in sources:
options.per_module_options.setdefault(source.module, {})["mypyc"] = True
separate = (
self.get_separate("\n".join(testcase.input), incremental_step)
if self.separate
else False
)
groups = construct_groups(sources, separate, len(module_names) > 1, None)
# Use _librt_internal to test mypy-specific parts of librt (they have
# some special-casing in mypyc), for everything else use _librt suffix.
librt_internal = has_test_name_tag(testcase.name, "librt_internal")
librt = has_test_name_tag(testcase.name, "librt")
# Enable experimental features (local librt build also includes experimental features)
experimental_features = has_test_name_tag(testcase.name, "experimental")
try:
compiler_options = CompilerOptions(
multi_file=self.multi_file,
separate=self.separate,
strict_dunder_typing=self.strict_dunder_typing,
depends_on_librt_internal=librt_internal,
experimental_features=experimental_features,
strict_traceback_checks=True,
)
result = emitmodule.parse_and_typecheck(
sources=sources,
options=options,
compiler_options=compiler_options,
groups=groups,
alt_lib_path=".",
)
errors = Errors(options)
ir, cfiles, _ = emitmodule.compile_modules_to_c(
result, compiler_options=compiler_options, errors=errors, groups=groups
)
deps = sorted(dep.path for dep in collect_source_dependencies(ir))
if errors.num_errors:
errors.flush_errors()
assert False, "Compile error"
except CompileError as e:
for line in e.messages:
print(fix_native_line_number(line, testcase.file, testcase.line))
assert False, "Compile error"
finally:
result.manager.metastore.close()
# Check that serialization works on this IR. (Only on the first
# step because the returned ir only includes updated code.)
if incremental_step == 1:
check_serialization_roundtrip(ir)
opt_level = 3 if benchmark_build else int(os.environ.get("MYPYC_OPT_LEVEL", 0))
setup_file = os.path.abspath(os.path.join(WORKDIR, "setup.py"))
# We pass the C file information to the build script via setup.py unfortunately
# Note: install_librt is always False since we use cached librt from librt_cache
with open(setup_file, "w", encoding="utf-8") as f:
f.write(
setup_format.format(
module_paths,
separate,
(cfiles, deps),
self.multi_file,
opt_level,
False, # install_librt - use cached version instead
experimental_features,
)
)
if librt:
# Use cached pre-built librt instead of rebuilding for each test
cached_librt = get_librt_path(experimental_features, opt_level=str(opt_level))
shutil.copytree(os.path.join(cached_librt, "librt"), "librt")
if not run_setup(setup_file, ["build_ext", "--inplace"]):
if testcase.config.getoption("--mypyc-showc"):
show_c(cfiles)
copy_output_files(mypyc_output_dir)
assert False, "Compilation failed"
# Assert that an output file got created
suffix = "pyd" if sys.platform == "win32" else "so"
assert glob.glob(f"native.*.{suffix}") or glob.glob(f"native.{suffix}")
driver_path = "driver.py"
if not os.path.isfile(driver_path):
# No driver.py provided by test case. Use the default one
# (mypyc/test-data/driver/driver.py) that calls each
# function named test_*.
default_driver = os.path.join(test_data_prefix, "driver", "driver.py")
shutil.copy(default_driver, driver_path)
env = os.environ.copy()
env["MYPYC_RUN_BENCH"] = "1" if bench else "0"
debugger = testcase.config.getoption("debugger")
if debugger:
if debugger == "lldb":
subprocess.check_call(["lldb", "--", sys.executable, driver_path], env=env)
elif debugger == "gdb":
subprocess.check_call(["gdb", "--args", sys.executable, driver_path], env=env)
else:
assert False, "Unsupported debugger"
# TODO: find a way to automatically disable capturing
# stdin/stdout when in debugging mode
assert False, (
"Test can't pass in debugging mode. "
"(Make sure to pass -s to pytest to interact with the debugger)"
)
proc = subprocess.Popen(
[sys.executable, driver_path],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=env,
)
if sys.version_info >= (3, 12):
# TODO: testDecorators1 hangs on 3.12, remove this once fixed
proc.wait(timeout=30)
output = proc.communicate()[0].decode("utf8")
output = output.replace(f' File "{os.getcwd()}{os.sep}', ' File "')
outlines = output.splitlines()
if testcase.config.getoption("--mypyc-showc"):
show_c(cfiles)
if proc.returncode != 0:
print()
signal = proc.returncode == -11
extra = ""
if signal:
extra = " (likely segmentation fault)"
print(f"*** Exit status: {proc.returncode}{extra}")
if signal and not sys.platform.startswith("win"):
print()
if sys.platform == "darwin":
debugger = "lldb"
else:
debugger = "gdb"
print(
f'hint: Use "pytest -n0 -s --mypyc-debug={debugger} -k <name-substring>" to run test in debugger'
)
print("hint: You may need to build a debug version of Python first and use it")
print('hint: See also "Debugging Segfaults" in mypyc/doc/dev-intro.md')
copy_output_files(mypyc_output_dir)
# Verify output.
if bench:
print("Test output:")
print(output)
else:
if incremental_step == 1:
msg = "Invalid output"
expected = testcase.output
else:
msg = f"Invalid output (step {incremental_step})"
expected = testcase.output2.get(incremental_step, [])
if not expected:
# Tweak some line numbers, but only if the expected output is empty,
# as tweaked output might not match expected output.
outlines = [
fix_native_line_number(line, testcase.file, testcase.line) for line in outlines
]
assert_test_output(testcase, outlines, msg, expected)
if incremental_step > 1 and options.incremental:
suffix = "" if incremental_step == 2 else str(incremental_step - 1)
expected_rechecked = testcase.expected_rechecked_modules.get(incremental_step - 1)
if expected_rechecked is not None:
assert_module_equivalence(
"rechecked" + suffix, expected_rechecked, result.manager.rechecked_modules
)
expected_stale = testcase.expected_stale_modules.get(incremental_step - 1)
if expected_stale is not None:
assert_module_equivalence(
"stale" + suffix, expected_stale, result.manager.stale_modules
)
assert proc.returncode == 0
def get_separate(self, program_text: str, incremental_step: int) -> Any:
template = r"# separate{}: (\[.*\])$"
m = re.search(template.format(incremental_step), program_text, flags=re.MULTILINE)
if not m:
m = re.search(template.format(""), program_text, flags=re.MULTILINE)
if m:
return ast.literal_eval(m.group(1))
else:
return True
class TestRunMultiFile(TestRun):
"""Run the main multi-module tests in multi-file compilation mode.
In multi-file mode each module gets compiled into a separate C file,
but all modules (C files) are compiled together.
"""
multi_file = True
test_name_suffix = "_multi"
files = ["run-multimodule.test", "run-mypy-sim.test"]
class TestRunSeparate(TestRun):
"""Run the main multi-module tests in separate compilation mode.
In this mode there are multiple compilation groups, which are compiled
incrementally. Each group is compiled to a separate C file, and these C
files are compiled separately.
Each compiled module is placed into a separate compilation group, unless
overridden by a special comment. Consider this example:
# separate: [(["other.py", "other_b.py"], "stuff")]
This puts other.py and other_b.py into a compilation group named "stuff".
Any files not mentioned in the comment will get single-file groups.
"""
separate = True
test_name_suffix = "_separate"
files = ["run-multimodule.test", "run-mypy-sim.test"]
class TestRunStrictDunderTyping(TestRun):
"""Run the tests with strict dunder typing."""
strict_dunder_typing = True
test_name_suffix = "_dunder_typing"
files = ["run-dunders.test", "run-floats.test"]
def fix_native_line_number(message: str, fnam: str, delta: int) -> str:
"""Update code locations in test case output to point to the .test file.
The description of the test case is written to native.py, and line numbers
in test case output often are relative to native.py. This translates the
line numbers to be relative to the .test file that contains the test case
description, and also updates the file name to the .test file name.
Args:
message: message to update
fnam: path of the .test file
delta: line number of the beginning of the test case in the .test file
Returns updated message (or original message if we couldn't find anything).
"""
fnam = os.path.basename(fnam)
message = re.sub(
r"native\.py:([0-9]+):", lambda m: "%s:%d:" % (fnam, int(m.group(1)) + delta), message
)
message = re.sub(
r'"native.py", line ([0-9]+),',
lambda m: '"%s", line %d,' % (fnam, int(m.group(1)) + delta),
message,
)
return message
def copy_output_files(target_dir: str) -> None:
try:
os.mkdir(target_dir)
except OSError:
# Only copy data for the first failure, to avoid excessive output in case
# many tests fail
return
for fnam in glob.glob("build/*.[ch]"):
shutil.copy(fnam, target_dir)
sys.stderr.write(f"\nGenerated files: {target_dir} (for first failure only)\n\n")

View file

@ -0,0 +1,104 @@
"""Functions to check that serialization round-tripped properly."""
# This file is named test_serialization.py even though it doesn't
# contain its own tests so that pytest will rewrite the asserts...
from __future__ import annotations
from collections.abc import Iterable
from typing import Any
from mypyc.ir.class_ir import ClassIR
from mypyc.ir.func_ir import FuncDecl, FuncIR, FuncSignature
from mypyc.ir.module_ir import ModuleIR, deserialize_modules
from mypyc.ir.ops import DeserMaps
from mypyc.ir.rtypes import RType
from mypyc.sametype import is_same_signature, is_same_type
def get_dict(x: Any) -> dict[str, Any]:
if hasattr(x, "__mypyc_attrs__"):
return {k: getattr(x, k) for k in x.__mypyc_attrs__ if hasattr(x, k)}
else:
return dict(x.__dict__)
def get_function_dict(x: FuncIR) -> dict[str, Any]:
"""Get a dict of function attributes safe to compare across serialization"""
d = get_dict(x)
d.pop("blocks", None)
d.pop("env", None)
return d
def assert_blobs_same(x: Any, y: Any, trail: tuple[Any, ...]) -> None:
"""Compare two blobs of IR as best we can.
FuncDecls, FuncIRs, and ClassIRs are compared by fullname to avoid
infinite recursion.
(More detailed comparisons should be done manually.)
Types and signatures are compared using mypyc.sametype.
Containers are compared recursively.
Anything else is compared with ==.
The `trail` argument is used in error messages.
"""
assert type(x) is type(y), (f"Type mismatch at {trail}", type(x), type(y))
if isinstance(x, (FuncDecl, FuncIR, ClassIR)):
assert x.fullname == y.fullname, f"Name mismatch at {trail}"
elif isinstance(x, dict):
assert len(x.keys()) == len(y.keys()), f"Keys mismatch at {trail}"
for (xk, xv), (yk, yv) in zip(x.items(), y.items()):
assert_blobs_same(xk, yk, trail + ("keys",))
assert_blobs_same(xv, yv, trail + (xk,))
elif isinstance(x, Iterable) and not isinstance(x, (str, set)):
# Special case iterables to generate better assert error messages.
# We can't use this for sets since the ordering is unpredictable,
# and strings should be treated as atomic values.
for i, (xv, yv) in enumerate(zip(x, y)):
assert_blobs_same(xv, yv, trail + (i,))
elif isinstance(x, RType):
assert is_same_type(x, y), f"RType mismatch at {trail}"
elif isinstance(x, FuncSignature):
assert is_same_signature(x, y), f"Signature mismatch at {trail}"
else:
assert x == y, f"Value mismatch at {trail}"
def assert_modules_same(ir1: ModuleIR, ir2: ModuleIR) -> None:
"""Assert that two module IRs are the same (*).
* Or rather, as much as we care about preserving across
serialization. We drop the actual IR bodies of functions but try
to preserve everything else.
"""
assert ir1.fullname == ir2.fullname
assert ir1.imports == ir2.imports
for cls1, cls2 in zip(ir1.classes, ir2.classes):
assert_blobs_same(get_dict(cls1), get_dict(cls2), (ir1.fullname, cls1.fullname))
for fn1, fn2 in zip(ir1.functions, ir2.functions):
assert_blobs_same(
get_function_dict(fn1), get_function_dict(fn2), (ir1.fullname, fn1.fullname)
)
assert_blobs_same(get_dict(fn1.decl), get_dict(fn2.decl), (ir1.fullname, fn1.fullname))
assert_blobs_same(ir1.final_names, ir2.final_names, (ir1.fullname, "final_names"))
def check_serialization_roundtrip(irs: dict[str, ModuleIR]) -> None:
"""Check that we can serialize modules out and deserialize them to the same thing."""
serialized = {k: ir.serialize() for k, ir in irs.items()}
ctx = DeserMaps({}, {})
irs2 = deserialize_modules(serialized, ctx)
assert irs.keys() == irs2.keys()
for k in irs:
assert_modules_same(irs[k], irs2[k])

View file

@ -0,0 +1,149 @@
from __future__ import annotations
import unittest
from typing import cast
from mypy.build import Graph
from mypy.nodes import Import, MypyFile
from mypy.options import Options
from mypyc.errors import Errors
from mypyc.irbuild.builder import IRBuilder
from mypyc.irbuild.mapper import Mapper
from mypyc.irbuild.prebuildvisitor import PreBuildVisitor
from mypyc.irbuild.statement import (
IMPORT_NATIVE_ATTR,
IMPORT_NATIVE_SUBMODULE,
IMPORT_NON_NATIVE,
classify_import_from,
group_consecutive,
import_globals_id_and_name,
split_import_group_to_python_and_native,
)
from mypyc.irbuild.visitor import IRBuilderVisitor
from mypyc.options import CompilerOptions
def make_builder(
*,
module_name: str = "pkg.current",
native_modules: set[str] | None = None,
same_group_modules: set[str] | None = None,
graph: set[str] | None = None,
) -> IRBuilder:
native_modules = native_modules or set()
same_group_modules = same_group_modules or set()
group_map: dict[str, str | None] = {module_name: "current-group"}
for module in native_modules:
group_map[module] = "current-group" if module in same_group_modules else "other-group"
errors = Errors(Options())
current_file = MypyFile([], [])
current_file._fullname = module_name
pbv = PreBuildVisitor(errors, current_file, {}, {})
builder = IRBuilder(
module_name,
{},
cast(Graph, {name: object() for name in (graph or set())}),
errors,
Mapper(group_map),
pbv,
IRBuilderVisitor(),
CompilerOptions(),
{},
)
builder.set_module(module_name, module_name.replace(".", "/") + ".py")
return builder
class TestStatementHelpers(unittest.TestCase):
def test_import_globals_id_and_name_for_plain_import(self) -> None:
assert import_globals_id_and_name("foo.bar", None) == ("foo", "foo")
def test_import_globals_id_and_name_for_import_as(self) -> None:
assert import_globals_id_and_name("foo.bar", "baz") == ("foo.bar", "baz")
def test_split_import_group_to_python_and_native_preserves_runs(self) -> None:
builder = make_builder(
native_modules={"pkg.alpha", "pkg.beta", "pkg.gamma"},
same_group_modules={"pkg.alpha", "pkg.beta", "pkg.gamma"},
)
group = [
Import([("pkg.alpha", None), ("py_mod", None)]),
Import([("pkg.beta", "beta_alias"), ("foreign.mod", None), ("pkg.gamma", None)]),
]
group[0].line = 10
group[1].line = 20
result = split_import_group_to_python_and_native(builder, group)
assert result == [
([("pkg.alpha", None, 10)], True),
([("py_mod", None, 10)], False),
([("pkg.beta", "beta_alias", 20)], True),
([("foreign.mod", None, 20)], False),
([("pkg.gamma", None, 20)], True),
]
def test_group_consecutive_groups_by_kind_and_preserves_aliases(self) -> None:
buckets = group_consecutive(
[
(IMPORT_NATIVE_SUBMODULE, "a", "a"),
(IMPORT_NATIVE_SUBMODULE, "b", "b_alias"),
(IMPORT_NON_NATIVE, "c", "c"),
(IMPORT_NATIVE_ATTR, "d", "d_alias"),
(IMPORT_NATIVE_ATTR, "e", "e"),
]
)
assert [(bucket.kind, bucket.names, bucket.as_names) for bucket in buckets] == [
(IMPORT_NATIVE_SUBMODULE, ["a", "b"], ["a", "b_alias"]),
(IMPORT_NON_NATIVE, ["c"], ["c"]),
(IMPORT_NATIVE_ATTR, ["d", "e"], ["d_alias", "e"]),
]
def test_classify_import_from_groups_consecutive_kinds(self) -> None:
builder = make_builder(
native_modules={"pkg.native_a", "pkg.native_b"},
same_group_modules={"pkg.native_a", "pkg.native_b"},
graph={"pkg.native_a", "pkg.native_b", "pkg.foreign_a", "pkg.foreign_b"},
)
buckets = classify_import_from(
builder,
"pkg",
["native_a", "native_b", "foreign_a", "foreign_b"],
["native_a", "native_b_alias", "foreign_a", "foreign_b_alias"],
parent_is_native=True,
)
assert [(bucket.kind, bucket.names, bucket.as_names) for bucket in buckets] == [
(IMPORT_NATIVE_SUBMODULE, ["native_a", "native_b"], ["native_a", "native_b_alias"]),
(IMPORT_NON_NATIVE, ["foreign_a", "foreign_b"], ["foreign_a", "foreign_b_alias"]),
]
def test_classify_import_from_treats_missing_name_under_native_parent_as_attr(self) -> None:
builder = make_builder(graph={"pkg.foreign"})
buckets = classify_import_from(
builder,
"pkg",
["attr_name", "foreign"],
["attr_alias", "foreign_alias"],
parent_is_native=True,
)
assert [(bucket.kind, bucket.names, bucket.as_names) for bucket in buckets] == [
(IMPORT_NATIVE_ATTR, ["attr_name"], ["attr_alias"]),
(IMPORT_NON_NATIVE, ["foreign"], ["foreign_alias"]),
]
def test_classify_import_from_without_native_parent_never_uses_native_attr(self) -> None:
builder = make_builder()
buckets = classify_import_from(
builder, "pkg", ["attr_name"], ["attr_alias"], parent_is_native=False
)
assert [(bucket.kind, bucket.names, bucket.as_names) for bucket in buckets] == [
(IMPORT_NON_NATIVE, ["attr_name"], ["attr_alias"])
]

View file

@ -0,0 +1,112 @@
from __future__ import annotations
import unittest
from mypyc.ir.rtypes import (
RStruct,
bool_rprimitive,
int32_rprimitive,
int64_rprimitive,
int_rprimitive,
object_rprimitive,
)
from mypyc.rt_subtype import is_runtime_subtype
class TestStruct(unittest.TestCase):
def test_struct_offsets(self) -> None:
# test per-member alignment
r = RStruct("", [], [bool_rprimitive, int32_rprimitive, int64_rprimitive])
assert r.size == 16
assert r.offsets == [0, 4, 8]
# test final alignment
r1 = RStruct("", [], [bool_rprimitive, bool_rprimitive])
assert r1.size == 2
assert r1.offsets == [0, 1]
r2 = RStruct("", [], [int32_rprimitive, bool_rprimitive])
r3 = RStruct("", [], [int64_rprimitive, bool_rprimitive])
assert r2.offsets == [0, 4]
assert r3.offsets == [0, 8]
assert r2.size == 8
assert r3.size == 16
r4 = RStruct("", [], [bool_rprimitive, bool_rprimitive, bool_rprimitive, int32_rprimitive])
assert r4.size == 8
assert r4.offsets == [0, 1, 2, 4]
# test nested struct
r5 = RStruct("", [], [bool_rprimitive, r])
assert r5.offsets == [0, 8]
assert r5.size == 24
r6 = RStruct("", [], [int32_rprimitive, r5])
assert r6.offsets == [0, 8]
assert r6.size == 32
# test nested struct with alignment less than 8
r7 = RStruct("", [], [bool_rprimitive, r4])
assert r7.offsets == [0, 4]
assert r7.size == 12
def test_struct_str(self) -> None:
r = RStruct("Foo", ["a", "b"], [bool_rprimitive, object_rprimitive])
assert str(r) == "Foo{a:bool, b:object}"
assert (
repr(r) == "<RStruct Foo{a:<RPrimitive builtins.bool>, "
"b:<RPrimitive builtins.object>}>"
)
r1 = RStruct("Bar", ["c"], [int32_rprimitive])
assert str(r1) == "Bar{c:i32}"
assert repr(r1) == "<RStruct Bar{c:<RPrimitive i32>}>"
r2 = RStruct("Baz", [], [])
assert str(r2) == "Baz{}"
assert repr(r2) == "<RStruct Baz{}>"
def test_runtime_subtype(self) -> None:
# right type to check with
r = RStruct("Foo", ["a", "b"], [bool_rprimitive, int_rprimitive])
# using the exact same fields
r1 = RStruct("Foo", ["a", "b"], [bool_rprimitive, int_rprimitive])
# names different
r2 = RStruct("Bar", ["c", "b"], [bool_rprimitive, int_rprimitive])
# name different
r3 = RStruct("Baz", ["a", "b"], [bool_rprimitive, int_rprimitive])
# type different
r4 = RStruct("FooBar", ["a", "b"], [bool_rprimitive, int32_rprimitive])
# number of types different
r5 = RStruct(
"FooBarBaz", ["a", "b", "c"], [bool_rprimitive, int_rprimitive, bool_rprimitive]
)
assert is_runtime_subtype(r1, r) is True
assert is_runtime_subtype(r2, r) is False
assert is_runtime_subtype(r3, r) is False
assert is_runtime_subtype(r4, r) is False
assert is_runtime_subtype(r5, r) is False
def test_eq_and_hash(self) -> None:
r = RStruct("Foo", ["a", "b"], [bool_rprimitive, int_rprimitive])
# using the exact same fields
r1 = RStruct("Foo", ["a", "b"], [bool_rprimitive, int_rprimitive])
assert hash(r) == hash(r1)
assert r == r1
# different name
r2 = RStruct("Foq", ["a", "b"], [bool_rprimitive, int_rprimitive])
assert hash(r) != hash(r2)
assert r != r2
# different names
r3 = RStruct("Foo", ["a", "c"], [bool_rprimitive, int_rprimitive])
assert hash(r) != hash(r3)
assert r != r3
# different type
r4 = RStruct("Foo", ["a", "b"], [bool_rprimitive, int_rprimitive, bool_rprimitive])
assert hash(r) != hash(r4)
assert r != r4

View file

@ -0,0 +1,33 @@
from __future__ import annotations
import unittest
from mypyc.ir.class_ir import ClassIR
from mypyc.ir.rtypes import (
RInstance,
RTuple,
RUnion,
bool_rprimitive,
int_rprimitive,
list_rprimitive,
object_rprimitive,
)
class TestTupleNames(unittest.TestCase):
def setUp(self) -> None:
self.inst_a = RInstance(ClassIR("A", "__main__"))
self.inst_b = RInstance(ClassIR("B", "__main__"))
def test_names(self) -> None:
assert RTuple([int_rprimitive, int_rprimitive]).unique_id == "T2II"
assert RTuple([list_rprimitive, object_rprimitive, self.inst_a]).unique_id == "T3OOO"
assert RTuple([list_rprimitive, object_rprimitive, self.inst_b]).unique_id == "T3OOO"
assert RTuple([]).unique_id == "T0"
assert (
RTuple([RTuple([]), RTuple([int_rprimitive, int_rprimitive])]).unique_id == "T2T0T2II"
)
assert (
RTuple([bool_rprimitive, RUnion([bool_rprimitive, int_rprimitive])]).unique_id
== "T2CO"
)

View file

@ -0,0 +1,97 @@
"""Test cases for various RType operations."""
from __future__ import annotations
import unittest
from mypyc.ir.rtypes import (
RUnion,
bit_rprimitive,
bool_rprimitive,
int16_rprimitive,
int32_rprimitive,
int64_rprimitive,
int_rprimitive,
object_rprimitive,
short_int_rprimitive,
str_rprimitive,
)
from mypyc.rt_subtype import is_runtime_subtype
from mypyc.subtype import is_subtype
native_int_types = [int64_rprimitive, int32_rprimitive, int16_rprimitive]
class TestSubtype(unittest.TestCase):
def test_bit(self) -> None:
assert is_subtype(bit_rprimitive, bool_rprimitive)
assert is_subtype(bit_rprimitive, int_rprimitive)
assert is_subtype(bit_rprimitive, short_int_rprimitive)
for rt in native_int_types:
assert is_subtype(bit_rprimitive, rt)
def test_bool(self) -> None:
assert not is_subtype(bool_rprimitive, bit_rprimitive)
assert is_subtype(bool_rprimitive, int_rprimitive)
assert is_subtype(bool_rprimitive, short_int_rprimitive)
for rt in native_int_types:
assert is_subtype(bool_rprimitive, rt)
def test_int64(self) -> None:
assert is_subtype(int64_rprimitive, int64_rprimitive)
assert is_subtype(int64_rprimitive, int_rprimitive)
assert not is_subtype(int64_rprimitive, short_int_rprimitive)
assert not is_subtype(int64_rprimitive, int32_rprimitive)
assert not is_subtype(int64_rprimitive, int16_rprimitive)
def test_int32(self) -> None:
assert is_subtype(int32_rprimitive, int32_rprimitive)
assert is_subtype(int32_rprimitive, int_rprimitive)
assert not is_subtype(int32_rprimitive, short_int_rprimitive)
assert not is_subtype(int32_rprimitive, int64_rprimitive)
assert not is_subtype(int32_rprimitive, int16_rprimitive)
def test_int16(self) -> None:
assert is_subtype(int16_rprimitive, int16_rprimitive)
assert is_subtype(int16_rprimitive, int_rprimitive)
assert not is_subtype(int16_rprimitive, short_int_rprimitive)
assert not is_subtype(int16_rprimitive, int64_rprimitive)
assert not is_subtype(int16_rprimitive, int32_rprimitive)
class TestRuntimeSubtype(unittest.TestCase):
def test_bit(self) -> None:
assert is_runtime_subtype(bit_rprimitive, bool_rprimitive)
assert not is_runtime_subtype(bit_rprimitive, int_rprimitive)
def test_bool(self) -> None:
assert not is_runtime_subtype(bool_rprimitive, bit_rprimitive)
assert not is_runtime_subtype(bool_rprimitive, int_rprimitive)
def test_union(self) -> None:
bool_int_mix = RUnion([bool_rprimitive, int_rprimitive])
assert not is_runtime_subtype(bool_int_mix, short_int_rprimitive)
assert not is_runtime_subtype(bool_int_mix, int_rprimitive)
assert not is_runtime_subtype(short_int_rprimitive, bool_int_mix)
assert not is_runtime_subtype(int_rprimitive, bool_int_mix)
class TestUnionSimplification(unittest.TestCase):
def test_simple_type_result(self) -> None:
assert RUnion.make_simplified_union([int_rprimitive]) == int_rprimitive
def test_remove_duplicate(self) -> None:
assert RUnion.make_simplified_union([int_rprimitive, int_rprimitive]) == int_rprimitive
def test_cannot_simplify(self) -> None:
assert RUnion.make_simplified_union(
[int_rprimitive, str_rprimitive, object_rprimitive]
) == RUnion([int_rprimitive, str_rprimitive, object_rprimitive])
def test_nested(self) -> None:
assert RUnion.make_simplified_union(
[int_rprimitive, RUnion([str_rprimitive, int_rprimitive])]
) == RUnion([int_rprimitive, str_rprimitive])
assert RUnion.make_simplified_union(
[int_rprimitive, RUnion([str_rprimitive, RUnion([int_rprimitive])])]
) == RUnion([int_rprimitive, str_rprimitive])

View file

@ -0,0 +1,300 @@
"""Helpers for writing tests"""
from __future__ import annotations
import contextlib
import os
import os.path
import re
import shutil
from collections.abc import Callable, Iterator
from mypy import build
from mypy.errors import CompileError
from mypy.nodes import Expression, MypyFile
from mypy.options import Options
from mypy.test.config import test_temp_dir
from mypy.test.data import DataDrivenTestCase, DataSuite
from mypy.test.helpers import assert_string_arrays_equal
from mypy.types import Type
from mypyc.analysis.ircheck import assert_func_ir_valid
from mypyc.common import IS_32_BIT_PLATFORM, PLATFORM_SIZE
from mypyc.errors import Errors
from mypyc.ir.func_ir import FuncIR
from mypyc.ir.module_ir import ModuleIR
from mypyc.irbuild.main import build_ir
from mypyc.irbuild.mapper import Mapper
from mypyc.options import CompilerOptions
from mypyc.test.config import test_data_prefix
# The builtins stub used during icode generation test cases.
ICODE_GEN_BUILTINS = os.path.join(test_data_prefix, "fixtures/ir.py")
# The testutil support library
TESTUTIL_PATH = os.path.join(test_data_prefix, "fixtures/testutil.py")
class MypycDataSuite(DataSuite):
# Need to list no files, since this will be picked up as a suite of tests
files: list[str] = []
data_prefix = test_data_prefix
def builtins_wrapper(
func: Callable[[DataDrivenTestCase], None], path: str
) -> Callable[[DataDrivenTestCase], None]:
"""Decorate a function that implements a data-driven test case to copy an
alternative builtins module implementation in place before performing the
test case. Clean up after executing the test case.
"""
return lambda testcase: perform_test(func, path, testcase)
@contextlib.contextmanager
def use_custom_builtins(builtins_path: str, testcase: DataDrivenTestCase) -> Iterator[None]:
for path, _ in testcase.files:
if os.path.basename(path) == "builtins.pyi":
default_builtins = False
break
else:
# Use default builtins.
builtins = os.path.abspath(os.path.join(test_temp_dir, "builtins.pyi"))
shutil.copyfile(builtins_path, builtins)
default_builtins = True
# Actually perform the test case.
try:
yield None
finally:
if default_builtins:
# Clean up.
os.remove(builtins)
def perform_test(
func: Callable[[DataDrivenTestCase], None], builtins_path: str, testcase: DataDrivenTestCase
) -> None:
for path, _ in testcase.files:
if os.path.basename(path) == "builtins.py":
default_builtins = False
break
else:
# Use default builtins.
builtins = os.path.join(test_temp_dir, "builtins.py")
shutil.copyfile(builtins_path, builtins)
default_builtins = True
# Actually perform the test case.
func(testcase)
if default_builtins:
# Clean up.
os.remove(builtins)
def build_ir_for_single_file(
input_lines: list[str], compiler_options: CompilerOptions | None = None
) -> list[FuncIR]:
return build_ir_for_single_file2(input_lines, compiler_options)[0].functions
def build_ir_for_single_file2(
input_lines: list[str], compiler_options: CompilerOptions | None = None
) -> tuple[ModuleIR, MypyFile, dict[Expression, Type], Mapper]:
program_text = "\n".join(input_lines)
# By default generate IR compatible with the earliest supported Python C API.
# If a test needs more recent API features, this should be overridden.
compiler_options = compiler_options or CompilerOptions(capi_version=(3, 9))
options = Options()
options.show_traceback = True
options.hide_error_codes = True
options.use_builtins_fixtures = True
options.strict_optional = True
options.python_version = compiler_options.python_version or (3, 9)
options.export_types = True
options.preserve_asts = True
options.allow_empty_bodies = True
options.strict_bytes = True
options.disable_bytearray_promotion = True
options.disable_memoryview_promotion = True
options.per_module_options["__main__"] = {"mypyc": True}
source = build.BuildSource("main", "__main__", program_text)
# Construct input as a single single.
# Parse and type check the input program.
result = build.build(sources=[source], options=options, alt_lib_path=test_temp_dir)
result.manager.metastore.close()
if result.errors:
raise CompileError(result.errors)
errors = Errors(options)
mapper = Mapper({"__main__": None})
modules = build_ir(
[result.files["__main__"]], result.graph, result.types, mapper, compiler_options, errors
)
if errors.num_errors:
raise CompileError(errors.new_messages())
module = list(modules.values())[0]
for fn in module.functions:
assert_func_ir_valid(fn)
tree = result.graph[module.fullname].tree
assert tree is not None
return module, tree, result.types, mapper
def update_testcase_output(testcase: DataDrivenTestCase, output: list[str]) -> None:
# TODO: backport this to mypy
assert testcase.old_cwd is not None, "test was not properly set up"
testcase_path = os.path.join(testcase.old_cwd, testcase.file)
with open(testcase_path) as f:
data_lines = f.read().splitlines()
# We can't rely on the test line numbers to *find* the test, since
# we might fix multiple tests in a run. So find it by the case
# header. Give up if there are multiple tests with the same name.
test_slug = f"[case {testcase.name}]"
if data_lines.count(test_slug) != 1:
return
start_idx = data_lines.index(test_slug)
stop_idx = start_idx + 11
while stop_idx < len(data_lines) and not data_lines[stop_idx].startswith("[case "):
stop_idx += 1
test = data_lines[start_idx:stop_idx]
out_start = test.index("[out]")
test[out_start + 1 :] = output
data_lines[start_idx:stop_idx] = test + [""]
data = "\n".join(data_lines)
with open(testcase_path, "w") as f:
print(data, file=f)
def assert_test_output(
testcase: DataDrivenTestCase,
actual: list[str],
message: str,
expected: list[str] | None = None,
formatted: list[str] | None = None,
) -> None:
__tracebackhide__ = True
expected_output = expected if expected is not None else testcase.output
if expected_output != actual and testcase.config.getoption("--update-data", False):
update_testcase_output(testcase, actual)
assert_string_arrays_equal(
expected_output, actual, f"{message} ({testcase.file}, line {testcase.line})"
)
def get_func_names(expected: list[str]) -> list[str]:
res = []
for s in expected:
m = re.match(r"def ([_a-zA-Z0-9.*$]+)\(", s)
if m:
res.append(m.group(1))
return res
def remove_comment_lines(a: list[str]) -> list[str]:
"""Return a copy of array with comments removed.
Lines starting with '--' (but not with '---') are removed.
"""
r = []
for s in a:
if s.strip().startswith("--") and not s.strip().startswith("---"):
pass
else:
r.append(s)
return r
def print_with_line_numbers(s: str) -> None:
lines = s.splitlines()
for i, line in enumerate(lines):
print("%-4d %s" % (i + 1, line))
def heading(text: str) -> None:
print("=" * 20 + " " + text + " " + "=" * 20)
def show_c(cfiles: list[list[tuple[str, str]]]) -> None:
heading("Generated C")
for group in cfiles:
for cfile, ctext in group:
print(f"== {cfile} ==")
print_with_line_numbers(ctext)
heading("End C")
def fudge_dir_mtimes(dir: str, delta: int) -> None:
for dirpath, _, filenames in os.walk(dir):
for name in filenames:
path = os.path.join(dirpath, name)
new_mtime = os.stat(path).st_mtime + delta
os.utime(path, times=(new_mtime, new_mtime))
def replace_word_size(text: list[str]) -> list[str]:
"""Replace WORDSIZE with platform specific word sizes"""
result = []
for line in text:
index = line.find("WORD_SIZE")
if index != -1:
# get 'WORDSIZE*n' token
word_size_token = line[index:].split()[0]
n = int(word_size_token[10:])
replace_str = str(PLATFORM_SIZE * n)
result.append(line.replace(word_size_token, replace_str))
else:
result.append(line)
return result
def infer_ir_build_options_from_test_name(name: str) -> CompilerOptions | None:
"""Look for magic substrings in test case name to set compiler options.
Return None if the test case should be skipped (always pass).
Supported naming conventions:
*_64bit*:
Run test case only on 64-bit platforms
*_32bit*:
Run test caseonly on 32-bit platforms
*_python3_8* (or for any Python version):
Use Python 3.8+ C API features (default: lowest supported version)
*StripAssert*:
Don't generate code for assert statements
"""
# If this is specific to some bit width, always pass if platform doesn't match.
if "_64bit" in name and IS_32_BIT_PLATFORM:
return None
if "_32bit" in name and not IS_32_BIT_PLATFORM:
return None
options = CompilerOptions(
strip_asserts="StripAssert" in name, capi_version=(3, 9), strict_traceback_checks=True
)
# A suffix like _python3_9 is used to set the target C API version.
m = re.search(r"_python([3-9]+)_([0-9]+)(_|\b)", name)
if m:
options.capi_version = (int(m.group(1)), int(m.group(2)))
options.python_version = options.capi_version
elif "_py" in name or "_Python" in name:
assert False, f"Invalid _py* suffix (should be _pythonX_Y): {name}"
if has_test_name_tag(name, "experimental"):
options.experimental_features = True
return options
def has_test_name_tag(name: str, tag: str) -> bool:
"""Check if a test case name contains a tag token like ``_experimental``.
A tag matches if it appears as a full underscore-delimited token:
``foo_tag_bar`` or ``foo_tag``.
"""
return re.search(rf"(?:^|_){re.escape(tag)}(?:_|$)", name) is not None