diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md index bafa3e7f8..62dee182c 100644 --- a/.github/copilot-instructions.md +++ b/.github/copilot-instructions.md @@ -168,3 +168,60 @@ All Markdown files must strictly follow these markdownlint rules: - **MD034**: No bare URLs (for example, use a markdown link like `[text](destination)` instead of a plain URL) - **MD036**: Use # headings, not **Bold:** for titles - **MD040**: Always specify code block language (for example, use '```bash', '```python', '```text', etc.) + +## Development & Testing Workflows + +### Build and Test + +- **Environment**: Always source `setup-env.sh` before building or testing. This applies to all environments (Dev Container, local machine, HPC). +- **Configuration**: + - **Presets**: Prefer `CMakePresets.json` workflows (e.g., `cmake --preset default`). + - **Generator**: Prefer `Ninja` over `Makefiles` when available (`-G Ninja`). +- **Build**: + - **Parallelism**: Always use multiple cores. Ninja does this by default. For `make`, use `cmake --build build -j $(nproc)`. +- **Test**: + - **Parallelism**: Run tests in parallel using `ctest -j $(nproc)` or `ctest --parallel `. + - **Selection**: Run specific tests with `ctest -R "regex"` (e.g., `ctest -R "py:*"`). + - **Debugging**: Use `ctest --output-on-failure` to see logs for failed tests. + - **Guard against known or suspected stalling tests**: Use `ctest --test-timeout` to set the per-test time limit (e.g. `90`) for 90s, *vs* the default of 1500s. + +### Python Integration + +- **Naming**: Avoid naming Python test scripts `types.py` or other names that shadow standard library modules. This causes obscure import errors (e.g., `ModuleNotFoundError: No module named 'numpy'`). +- **PYTHONPATH**: Only include paths that contain user Python modules loaded by Phlex (for example, the source directory and any build output directory that houses generated modules). Do not append system/Spack/venv `site-packages`; `pymodule.cpp` handles CMAKE_PREFIX_PATH and virtual-environment path adjustments. +- **Test Structure**: + - **C++ Driver**: Provides data streams (e.g., `test/python/driver.cpp`). + - **Jsonnet Config**: Wires the graph (e.g., `test/python/pytypes.jsonnet`). + - **Python Script**: Implements algorithms (e.g., `test/python/test_types.py`). +- **Type Conversion**: `plugins/python/src/modulewrap.cpp` handles C++ ↔ Python conversion. + - **Mechanism**: Uses substring matching on type names (for example, `"float64]]"`). This is brittle. + - **Requirement**: Ensure converters exist for all types used in tests (e.g., `float`, `double`, `unsigned int`, and their vector equivalents). + - **Warning**: Exact type matches are required. `numpy.float32` != `float`. + +### Coverage Analysis + +- **Tooling**: The project uses LLVM source-based coverage. +- **Requirement**: The `phlex` binary must catch exceptions in `main` to ensure coverage data is flushed to disk even when tests fail/crash. +- **Generation**: + - **CMake Targets**: `coverage-xml`, `coverage-html` (if configured). + - **Manual**: + 1. Run tests with `LLVM_PROFILE_FILE` set (e.g., `export LLVM_PROFILE_FILE="profraw/%m-%p.profraw"`). + 2. Merge profiles: `llvm-profdata merge -sparse profraw/*.profraw -o coverage.profdata`. + 3. Generate report: `llvm-cov show -instr-profile=coverage.profdata -format=html ...` + +### Local GitHub Actions Testing (`act`) + +- **Tool**: Use `act` to run GitHub Actions workflows locally. +- **Configuration**: Ensure `.actrc` exists in the workspace root with the following content to use a compatible runner image: + + ```text + -P ubuntu-latest=catthehacker/ubuntu:act-latest + ``` + +- **Usage**: + - List jobs: `act -l` + - Run specific job: `act -j ` (e.g., `act -j python-check`) + - Run specific event: `act pull_request` +- **Troubleshooting**: + - **Docker Socket**: `act` requires access to the Docker socket. In dev containers, this may require specific mount configurations or permissions. + - **Artifacts**: `act` creates a `phlex-src` directory (or similar) for checkout. Ensure this is cleaned up or ignored by tools like `mypy`. diff --git a/.gitignore b/.gitignore index dc994c55d..d021cd45e 100644 --- a/.gitignore +++ b/.gitignore @@ -1,23 +1,27 @@ # Build directories build/ +build-cov/ _build/ *.dir/ -phlex-src -phlex-build/ -CMakeCache.txt +/phlex-src/ +/phlex-build/ +/CMakeCache.txt CMakeFiles/ -_deps/ +/_deps/ _codeql_detected_source_root # CMake user-specific presets (not generated by Spack) -CMakeUserPresets.json +/CMakeUserPresets.json # Coverage reports -coverage.xml -coverage.info -coverage-html/ -.coverage-generated/ -.coverage-artifacts/ +/coverage.profdata +/coverage_*.txt +/coverage.xml +/coverage.info +/coverage-html/ +/profraw/ +/.coverage-generated/ +/.coverage-artifacts/ *.gcda *.gcno *.gcov @@ -45,5 +49,5 @@ __pycache__/ .DS_Store # act (local workflow testing) .act-artifacts/ -.secretsactionlint +.secrets actionlint diff --git a/CMakeLists.txt b/CMakeLists.txt index 59ff4d81a..a5547316f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -49,6 +49,11 @@ project(phlex VERSION 0.1.0 LANGUAGES CXX) cet_cmake_env() # ############################################################################## +# Set CI/test timeouts to a conservative value to avoid long stalls in CI. +# Use cache variables so generated CTest/Dart files pick this up when configured. +set(DART_TESTING_TIMEOUT 90 CACHE STRING "Timeout (s) for Dart/CTest runs") +set(CTEST_TEST_TIMEOUT 90 CACHE STRING "Per-test timeout (s) for CTest") + # Make tools available FetchContent_MakeAvailable(Catch2 GSL mimicpp) @@ -70,13 +75,13 @@ add_compile_options( ) if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") - if( - CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL "14.1" - AND CMAKE_COMPILER_VERSION VERSION_LESS "15" - ) - # GCC 14.1 issues many false positives re. array-bounds and - # stringop-overflow - add_compile_options(-Wno-array-bounds -Wno-stringop-overflow) + if(CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL "14.1") + if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS "15") + add_compile_options(-Wno-stringop-overflow) + endif() + if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS "16") + add_compile_options(-Wno-array-bounds) + endif() endif() endif() @@ -108,7 +113,8 @@ if(ENABLE_TSAN) -g -O1 # Ensure no optimizations interfere with TSan - "$<$:-fno-omit-frame-pointer -fno-optimize-sibling-calls>" + "$<$:-fno-omit-frame-pointer>" + "$<$:-fno-optimize-sibling-calls>" ) add_link_options(-fsanitize=thread) else() @@ -130,7 +136,8 @@ if(ENABLE_ASAN) -g -O1 # Ensure no optimizations interfere with ASan - "$<$:-fno-omit-frame-pointer -fno-optimize-sibling-calls>" + "$<$:-fno-omit-frame-pointer>" + "$<$:-fno-optimize-sibling-calls>" ) add_link_options(-fsanitize=address) else() diff --git a/CMakePresets.json b/CMakePresets.json index dc1d73c27..1cea31f53 100644 --- a/CMakePresets.json +++ b/CMakePresets.json @@ -4,6 +4,7 @@ "name": "default", "hidden": false, "cacheVariables": { + "PHLEX_USE_FORM": "ON", "CMAKE_EXPORT_COMPILE_COMMANDS": "YES", "CMAKE_CXX_STANDARD": "23", "CMAKE_CXX_STANDARD_REQUIRED": "YES", diff --git a/phlex/core/edge_maker.cpp b/phlex/core/edge_maker.cpp index 9f9e3362c..8a91300d7 100644 --- a/phlex/core/edge_maker.cpp +++ b/phlex/core/edge_maker.cpp @@ -28,6 +28,10 @@ namespace phlex::experimental { provider.full_name(), node_name, port.product_label.to_string()); + if (port.port == nullptr) { + throw std::runtime_error("Unexpected null port while connecting provider " + + provider.full_name() + " to node " + node_name); + } make_edge(provider.sender(), *(port.port)); found_match = true; break; diff --git a/phlex/core/edge_maker.hpp b/phlex/core/edge_maker.hpp index f837231f8..55541aeab 100644 --- a/phlex/core/edge_maker.hpp +++ b/phlex/core/edge_maker.hpp @@ -73,6 +73,10 @@ namespace phlex::experimental { continue; } + if (producer->port == nullptr or receiver_port == nullptr) { + throw std::runtime_error("Unexpected null port while connecting " + + producer->node.full() + " to " + node_name); + } make_edge(*producer->port, *receiver_port); } } @@ -93,6 +97,9 @@ namespace phlex::experimental { for (auto const& [output_name, output_node] : outputs) { make_edge(source, output_node->port()); for (auto const& named_port : producers_.values()) { + if (named_port.to_output == nullptr) { + throw std::runtime_error("Unexpected null output port for " + named_port.node.full()); + } make_edge(*named_port.to_output, output_node->port()); } } diff --git a/plugins/python/CMakeLists.txt b/plugins/python/CMakeLists.txt index 2554a6dce..047869d09 100644 --- a/plugins/python/CMakeLists.txt +++ b/plugins/python/CMakeLists.txt @@ -21,3 +21,8 @@ target_link_libraries(pymodule PRIVATE phlex::module Python::Python Python::NumP target_compile_definitions(pymodule PRIVATE NPY_NO_DEPRECATED_API=NPY_1_7_API_VERSION) install(TARGETS pymodule LIBRARY DESTINATION lib) + +install( + DIRECTORY python/phlex + DESTINATION lib/python${Python_VERSION_MAJOR}.${Python_VERSION_MINOR}/site-packages +) diff --git a/plugins/python/README.md b/plugins/python/README.md new file mode 100644 index 000000000..60cd9fc93 --- /dev/null +++ b/plugins/python/README.md @@ -0,0 +1,57 @@ +# Phlex Python Plugin Architecture + +This directory contains the C++ source code for the Phlex Python plugin, which enables Phlex to execute Python code as part of its computation graph. + +## Architecture Overview + +The integration is built on the **Python C API** (not `pybind11`) to maintain strict control over the interpreter lifecycle and memory management. + +### 1. The "Type Bridge" (`modulewrap.cpp`) + +The core of the integration is the type conversion layer in `src/modulewrap.cpp`. This layer is responsible for: + +- Converting Phlex `Product` objects (C++) into Python objects (e.g., `PyObject*`, `numpy.ndarray`). +- Converting Python return values back into Phlex `Product` objects. + +**Critical Implementation Detail:** +The type mapping relies on **string comparison** of type names. + +- **Mechanism**: The C++ code checks whether `type_name()` contains `"float64]]"` to identify a 2D array of doubles. +- **Brittleness**: This is a fragile contract. If the type name changes (e.g., `numpy` changes its string representation) or if a user provides a slightly different type (e.g., `float` vs `np.float32`), the bridge may fail. +- **Extension**: When adding support for new types, you must explicitly add converters in `modulewrap.cpp` for both scalar and vector/array versions. + +### 2. Hybrid Configuration + +Phlex uses a hybrid configuration model involving three languages: + +1. **Jsonnet** (`*.jsonnet`): Defines the computation graph structure. It specifies: + - The nodes in the graph. + - The Python module/class to load for specific nodes. + - Configuration parameters passed to the Python object. +2. **C++ Driver**: The executable that: + - Parses the Jsonnet configuration. + - Initializes the Phlex core. + - Loads the Python interpreter and the specified plugin. +3. **Python Code** (`*.py`): Implements the algorithmic logic. + +### 3. Environment & Testing + +Because the Python interpreter is embedded within the C++ application, the runtime environment is critical. + +- **PYTHONPATH**: Must be set correctly to include: + - The build directory (for generated modules). + - The source directory (for user scripts). + - Do not append system/Spack `site-packages`; `pymodule.cpp` adjusts `sys.path` based on `CMAKE_PREFIX_PATH` and active virtual environments. +- **Naming Collisions**: + - **Warning**: Do not name test files `types.py`, `test.py`, `code.py`, or other names that shadow standard library modules. + - **Consequence**: Shadowing can cause obscure failures in internal libraries (e.g., `numpy` failing to import because it tries to import `types` from the standard library but gets your local file instead). + +## Development Guidelines + +1. **Adding New Types**: + - Update `src/modulewrap.cpp` to handle the new C++ type. + - Add a corresponding test case in `test/python/` to verify the round-trip conversion. +2. **Testing**: + - Use `ctest` to run tests. + - Tests are integration tests: they run the full C++ application which loads the Python script. + - Debugging: Use `ctest --output-on-failure` to see Python exceptions. diff --git a/plugins/python/python/phlex/__init__.py b/plugins/python/python/phlex/__init__.py new file mode 100644 index 000000000..b21384ff4 --- /dev/null +++ b/plugins/python/python/phlex/__init__.py @@ -0,0 +1,109 @@ +"""Annotation helper for C++ typing variants. + +Python algorithms are generic, like C++ templates, but the Phlex registration +process requires a single unique signature. These helpers generate annotated +functions for registration with the proper C++ types. +""" + +import collections +import copy +import inspect +from typing import Any, Callable + + +class MissingAnnotation(Exception): + """Exception noting the missing of an argument in the provied annotations.""" + + def __init__(self, arg: str): + """Construct exception from the name of the argument without annotation.""" + self.arg = arg + + def __str__(self): + """Report the argument that is missing an annotation.""" + return "argument '%s' is not annotated" % self.arg + + +class Variant: + """Wrapper to associate custom annotations with a callable. + + This class wraps a callable and provides custom ``__annotations__`` and + ``__name__`` attributes, allowing the same underlying function or callable + object to be registered multiple times with different type annotations. + + By default, the provided callable is kept by reference, but can be cloned + (e.g. for callable instances) if requested. + + Phlex will recognize the "phlex_callable" data member, allowing an unwrap + and thus saving an indirection. To detect performance degradation, the + wrapper is not callable by default. + + Attributes: + phlex_callable (Callable): The underlying callable (public). + __annotations__ (dict): Type information of arguments and return product. + __name__ (str): The name associated with this variant. + + Examples: + >>> def add(i: Number, j: Number) -> Number: + ... return i + j + ... + >>> int_adder = Variant(add, {"i": int, "j": int, "return": int}, "iadd") + """ + + def __init__( + self, + f: Callable, + annotations: dict[str, str | type | Any], + name: str, + clone: bool | str = False, + allow_call: bool = False, + ): + """Annotate the callable F. + + Args: + f (Callable): Annotable function. + annotations (dict): Type information of arguments and return product. + name (str): Name to assign to this variant. + clone (bool|str): If True (or "deep"), creates a shallow (deep) copy + of the callable. + allow_call (bool): Allow this wrapper to forward to the callable. + """ + if clone == "deep": + self.phlex_callable = copy.deepcopy(f) + elif clone: + self.phlex_callable = copy.copy(f) + else: + self.phlex_callable = f + + # annotions are expected as an ordinary dict and should be ordered, but + # we do not require it, so re-order based on the function's co_varnames + self.__annotations__ = collections.OrderedDict() + + sig = inspect.signature(self.phlex_callable) + for k, v in sig.parameters.items(): + try: + self.__annotations__[k] = annotations[k] + except KeyError as e: + if v.default is inspect.Parameter.empty: + raise MissingAnnotation(k) from e + + self.__annotations__["return"] = annotations.get("return", None) + + self.__name__ = name + self.__code__ = getattr(self.phlex_callable, "__code__", None) + self.__defaults__ = getattr(self.phlex_callable, "__defaults__", None) + self._allow_call = allow_call + + def __call__(self, *args, **kwargs): + """Raises an error if called directly. + + Variant instances should not be called directly. The framework should + extract ``phlex_callable`` instead and call that. + + Raises: + AssertionError: To indicate incorrect usage, unless overridden. + """ + assert self._allow_call, ( + f"Variant '{self.__name__}' was called directly. " + f"The framework should extract phlex_callable instead." + ) + return self.phlex_callable(*args, **kwargs) # type: ignore diff --git a/plugins/python/src/configwrap.cpp b/plugins/python/src/configwrap.cpp index d019725f9..7484d2b45 100644 --- a/plugins/python/src/configwrap.cpp +++ b/plugins/python/src/configwrap.cpp @@ -131,7 +131,7 @@ static PyObject* pcm_subscript(py_config_map* pycmap, PyObject* pykey) } } } catch (std::runtime_error const&) { - PyErr_Format(PyExc_TypeError, "property \"%s\" does not exist", ckey.c_str()); + PyErr_Format(PyExc_KeyError, "property \"%s\" does not exist", ckey.c_str()); } // cache if found diff --git a/plugins/python/src/lifelinewrap.cpp b/plugins/python/src/lifelinewrap.cpp index 0f81e6bb1..a00e1d72f 100644 --- a/plugins/python/src/lifelinewrap.cpp +++ b/plugins/python/src/lifelinewrap.cpp @@ -31,9 +31,14 @@ static int ll_clear(py_lifeline_t* pyobj) static void ll_dealloc(py_lifeline_t* pyobj) { + // This type participates in GC; untrack before clearing references so the + // collector does not traverse a partially torn-down object during dealloc. + PyObject_GC_UnTrack(pyobj); Py_CLEAR(pyobj->m_view); typedef std::shared_ptr generic_shared_t; pyobj->m_source.~generic_shared_t(); + // Use tp_free to pair with tp_alloc for GC-tracked Python objects. + Py_TYPE(pyobj)->tp_free((PyObject*)pyobj); } // clang-format off diff --git a/plugins/python/src/modulewrap.cpp b/plugins/python/src/modulewrap.cpp index 00b123d6a..bfc475876 100644 --- a/plugins/python/src/modulewrap.cpp +++ b/plugins/python/src/modulewrap.cpp @@ -57,35 +57,48 @@ namespace { static inline PyObject* lifeline_transform(intptr_t arg) { - if (Py_TYPE((PyObject*)arg) == &PhlexLifeline_Type) { - return ((py_lifeline_t*)arg)->m_view; + PyObject* pyobj = (PyObject*)arg; + if (pyobj && PyObject_TypeCheck(pyobj, &PhlexLifeline_Type)) { + return ((py_lifeline_t*)pyobj)->m_view; } - return (PyObject*)arg; + return pyobj; } // callable object managing the callback template struct py_callback { - PyObject const* m_callable; // owned + PyObject* m_callable; // owned - py_callback(PyObject const* callable) + py_callback(PyObject* callable) : m_callable(callable) { - Py_INCREF(callable); - m_callable = callable; + // callable is always non-null here (validated before py_callback construction) + PyGILRAII gil; + Py_INCREF(m_callable); } - py_callback(py_callback const& pc) + py_callback(py_callback const& pc) : m_callable(pc.m_callable) { - Py_INCREF(pc.m_callable); - m_callable = pc.m_callable; + // Must hold GIL when manipulating reference counts + PyGILRAII gil; + Py_INCREF(m_callable); } py_callback& operator=(py_callback const& pc) { if (this != &pc) { + // Must hold GIL when manipulating reference counts + PyGILRAII gil; Py_INCREF(pc.m_callable); + Py_DECREF(m_callable); m_callable = pc.m_callable; } + return *this; + } + ~py_callback() + { + // TODO: cleanup deferred to Phlex shutdown hook + // Cannot safely Py_DECREF during arbitrary destruction due to: + // - TOCTOU race on Py_IsInitialized() without GIL + // - Module offloading in interpreter cleanup phase 2 } - ~py_callback() { Py_DECREF(m_callable); } template intptr_t call(Args... args) @@ -95,7 +108,7 @@ namespace { PyGILRAII gil; PyObject* result = - PyObject_CallFunctionObjArgs((PyObject*)m_callable, lifeline_transform(args)..., nullptr); + PyObject_CallFunctionObjArgs(m_callable, lifeline_transform(args)..., nullptr); std::string error_msg; if (!result) { @@ -105,8 +118,9 @@ namespace { decref_all(args...); - if (!error_msg.empty()) + if (!error_msg.empty()) { throw std::runtime_error(error_msg.c_str()); + } return (intptr_t)result; } @@ -118,8 +132,7 @@ namespace { PyGILRAII gil; - PyObject* result = - PyObject_CallFunctionObjArgs((PyObject*)m_callable, (PyObject*)args..., nullptr); + PyObject* result = PyObject_CallFunctionObjArgs(m_callable, (PyObject*)args..., nullptr); std::string error_msg; if (!result) { @@ -130,8 +143,9 @@ namespace { decref_all(args...); - if (!error_msg.empty()) + if (!error_msg.empty()) { throw std::runtime_error(error_msg.c_str()); + } } private: @@ -174,25 +188,30 @@ namespace { static std::vector cseq(PyObject* coll) { - size_t len = coll ? (size_t)PySequence_Size(coll) : 0; - std::vector cargs{len}; - - for (size_t i = 0; i < len; ++i) { - PyObject* item = PySequence_GetItem(coll, i); - if (item) { - char const* p = PyUnicode_AsUTF8(item); - if (p) { - Py_ssize_t sz = PyUnicode_GetLength(item); - cargs[i].assign(p, (std::string::size_type)sz); - } - Py_DECREF(item); + if (!coll) { + return std::vector{}; + } - if (!p) { - PyErr_Format(PyExc_TypeError, "could not convert item %d to string", (int)i); - break; - } - } else - break; // Python error already set + // coll is guaranteed to be a list or tuple (from PySequence_Fast) + Py_ssize_t len = PySequence_Fast_GET_SIZE(coll); + std::vector cargs; + cargs.reserve(static_cast(len)); + + PyObject** items = PySequence_Fast_ITEMS(coll); + for (Py_ssize_t i = 0; i < len; ++i) { + PyObject* item = items[i]; // borrowed reference + if (!PyUnicode_Check(item)) { + PyErr_Format(PyExc_TypeError, "item %d must be a string", (int)i); + return std::vector{}; // Error set + } + + char const* p = PyUnicode_AsUTF8(item); + if (!p) { + return std::vector{}; // Error already set + } + + Py_ssize_t sz = PyUnicode_GetLength(item); + cargs.emplace_back(p, static_cast(sz)); } return cargs; @@ -207,28 +226,42 @@ namespace { std::string ann; if (!PyUnicode_Check(pyobj)) { PyObject* pystr = PyObject_GetAttrString(pyobj, "__name__"); // eg. for classes + + // generics like Union have a __name__ that is not useful for our purposes + if (pystr) { + char const* cstr = PyUnicode_AsUTF8(pystr); + if (cstr && (strcmp(cstr, "Union") == 0 || strcmp(cstr, "Optional") == 0)) { + Py_DECREF(pystr); + pystr = nullptr; + } + } + if (!pystr) { PyErr_Clear(); pystr = PyObject_Str(pyobj); } - char const* cstr = PyUnicode_AsUTF8(pystr); - if (cstr) - ann = cstr; - Py_DECREF(pystr); + if (pystr) { + char const* cstr = PyUnicode_AsUTF8(pystr); + if (cstr) + ann = cstr; + Py_DECREF(pystr); + } // for numpy typing, there's no useful way of figuring out the type from the // name of the type, only from its string representation, so fall through and // let this method return str() - if (ann != "ndarray") + if (ann != "ndarray" && ann != "list") return ann; // start over for numpy type using result from str() pystr = PyObject_Str(pyobj); - cstr = PyUnicode_AsUTF8(pystr); - if (cstr) // if failed, ann will remain "ndarray" - ann = cstr; - Py_DECREF(pystr); + if (pystr) { + char const* cstr = PyUnicode_AsUTF8(pystr); + if (cstr) // if failed, ann will remain "ndarray" + ann = cstr; + Py_DECREF(pystr); + } return ann; } @@ -302,6 +335,11 @@ namespace { { \ PyGILRAII gil; \ cpptype i = (cpptype)frompy((PyObject*)pyobj); \ + std::string msg; \ + if (msg_from_py_error(msg, true)) { \ + Py_DECREF((PyObject*)pyobj); \ + throw std::runtime_error("Python conversion error for type " #name ": " + msg); \ + } \ Py_DECREF((PyObject*)pyobj); \ return i; \ } @@ -319,14 +357,17 @@ namespace { { \ PyGILRAII gil; \ \ + if (!v) \ + return (intptr_t)nullptr; \ + \ /* use a numpy view with the shared pointer tied up in a lifeline object (note: this */ \ /* is just a demonstrator; alternatives are still being considered) */ \ npy_intp dims[] = {static_cast(v->size())}; \ \ - PyObject* np_view = PyArray_SimpleNewFromData(1, /* 1-D array */ \ - dims, /* dimension sizes */ \ - nptype, /* numpy C type */ \ - v->data() /* raw buffer */ \ + PyObject* np_view = PyArray_SimpleNewFromData(1, /* 1-D array */ \ + dims, /* dimension sizes */ \ + nptype, /* numpy C type */ \ + (void*)(v->data()) /* raw buffer */ \ ); \ \ if (!np_view) \ @@ -340,8 +381,12 @@ namespace { /* when passing it to the registered Python function */ \ py_lifeline_t* pyll = \ (py_lifeline_t*)PhlexLifeline_Type.tp_new(&PhlexLifeline_Type, nullptr, nullptr); \ - pyll->m_view = np_view; /* steals reference */ \ + if (!pyll) { \ + Py_DECREF(np_view); \ + return (intptr_t)nullptr; \ + } \ pyll->m_source = v; \ + pyll->m_view = np_view; /* steals reference */ \ \ return (intptr_t)pyll; \ } @@ -353,7 +398,7 @@ namespace { VECTOR_CONVERTER(vfloat, float, NPY_FLOAT) VECTOR_CONVERTER(vdouble, double, NPY_DOUBLE) -#define NUMPY_ARRAY_CONVERTER(name, cpptype, nptype) \ +#define NUMPY_ARRAY_CONVERTER(name, cpptype, nptype, frompy) \ static std::shared_ptr> py_to_##name(intptr_t pyobj) \ { \ PyGILRAII gil; \ @@ -361,36 +406,48 @@ namespace { auto vec = std::make_shared>(); \ \ /* TODO: because of unresolved ownership issues, copy the full array contents */ \ - if (!pyobj || !PyArray_Check((PyObject*)pyobj)) { \ - PyErr_Clear(); /* how to report an error? */ \ - Py_DECREF((PyObject*)pyobj); \ - return vec; \ - } \ - \ - PyArrayObject* arr = (PyArrayObject*)pyobj; \ + if (PyArray_Check((PyObject*)pyobj)) { \ + PyArrayObject* arr = (PyArrayObject*)pyobj; \ \ - /* TODO: flattening the array here seems to be the only workable solution */ \ - npy_intp* dims = PyArray_DIMS(arr); \ - int nd = PyArray_NDIM(arr); \ - size_t total = 1; \ - for (int i = 0; i < nd; ++i) \ - total *= static_cast(dims[i]); \ + /* TODO: flattening the array here seems to be the only workable solution */ \ + npy_intp* dims = PyArray_DIMS(arr); \ + int nd = PyArray_NDIM(arr); \ + size_t total = 1; \ + for (int i = 0; i < nd; ++i) \ + total *= static_cast(dims[i]); \ \ - /* copy the array info; note that this assumes C continuity */ \ - cpptype* raw = static_cast(PyArray_DATA(arr)); \ - vec->reserve(total); \ - vec->insert(vec->end(), raw, raw + total); \ + /* copy the array info; note that this assumes C continuity */ \ + cpptype* raw = static_cast(PyArray_DATA(arr)); \ + vec->reserve(total); \ + vec->insert(vec->end(), raw, raw + total); \ + } else if (PyList_Check((PyObject*)pyobj)) { \ + Py_ssize_t total = PyList_Size((PyObject*)pyobj); \ + vec->reserve(total); \ + for (Py_ssize_t i = 0; i < total; ++i) { \ + PyObject* item = PyList_GetItem((PyObject*)pyobj, i); \ + vec->push_back((cpptype)frompy(item)); \ + if (PyErr_Occurred()) { \ + PyErr_Clear(); \ + break; \ + } \ + } \ + } else { \ + std::string msg; \ + if (msg_from_py_error(msg, true)) { \ + throw std::runtime_error("List conversion error: " + msg); \ + } \ + } \ \ Py_DECREF((PyObject*)pyobj); \ return vec; \ } - NUMPY_ARRAY_CONVERTER(vint, int, NPY_INT) - NUMPY_ARRAY_CONVERTER(vuint, unsigned int, NPY_UINT) - NUMPY_ARRAY_CONVERTER(vlong, long, NPY_LONG) - NUMPY_ARRAY_CONVERTER(vulong, unsigned long, NPY_ULONG) - NUMPY_ARRAY_CONVERTER(vfloat, float, NPY_FLOAT) - NUMPY_ARRAY_CONVERTER(vdouble, double, NPY_DOUBLE) + NUMPY_ARRAY_CONVERTER(vint, int, NPY_INT, PyLong_AsLong) + NUMPY_ARRAY_CONVERTER(vuint, unsigned int, NPY_UINT, pylong_or_int_as_ulong) + NUMPY_ARRAY_CONVERTER(vlong, long, NPY_LONG, pylong_as_strictlong) + NUMPY_ARRAY_CONVERTER(vulong, unsigned long, NPY_ULONG, pylong_or_int_as_ulong) + NUMPY_ARRAY_CONVERTER(vfloat, float, NPY_FLOAT, PyFloat_AsDouble) + NUMPY_ARRAY_CONVERTER(vdouble, double, NPY_DOUBLE, PyFloat_AsDouble) } // unnamed namespace @@ -442,8 +499,10 @@ static PyObject* parse_args(PyObject* args, // AttributeError already set return nullptr; } - } else + } else { Py_INCREF(pyname); + } + functor_name = PyUnicode_AsUTF8(pyname); Py_DECREF(pyname); @@ -452,14 +511,29 @@ static PyObject* parse_args(PyObject* args, return nullptr; } - if (!PySequence_Check(input) || (output && !PySequence_Check(output))) { - PyErr_SetString(PyExc_TypeError, "input and output need to be sequences"); - return nullptr; + // Accept any sequence type (list, tuple, custom sequences) + PyObject* input_fast = PySequence_Fast(input, "input_family must be a sequence"); + if (!input_fast) { + return nullptr; // TypeError already set by PySequence_Fast + } + + PyObject* output_fast = nullptr; + if (output) { + output_fast = PySequence_Fast(output, "output_products must be a sequence"); + if (!output_fast) { + Py_DECREF(input_fast); + return nullptr; + } } // convert input and output declarations, to be able to pass them to Phlex - input_labels = cseq(input); - output_labels = cseq(output); + input_labels = cseq(input_fast); + output_labels = cseq(output_fast); + + // Clean up fast sequences + Py_DECREF(input_fast); + Py_XDECREF(output_fast); + if (output_labels.size() > 1) { PyErr_SetString(PyExc_TypeError, "only a single output supported"); return nullptr; @@ -481,20 +555,19 @@ static PyObject* parse_args(PyObject* args, } Py_DECREF(sann); - if (annot && PyDict_Check(annot) && PyDict_Size(annot)) { - PyObject* ret = PyDict_GetItemString(annot, "return"); - if (ret) - output_types.push_back(annotation_as_text(ret)); - - // dictionary is ordered with return last if provide (note: the keys here - // could be used as input labels, instead of the ones from the configuration, - // but that is probably not practical in actual use, so they are ignored) - PyObject* values = PyDict_Values(annot); - for (Py_ssize_t i = 0; i < (PyList_GET_SIZE(values) - (ret ? 1 : 0)); ++i) { - PyObject* item = PyList_GET_ITEM(values, i); - input_types.push_back(annotation_as_text(item)); + if (annot && PyDict_Check(annot)) { + // Variant guarantees OrderedDict with "return" last + PyObject *key, *value; + Py_ssize_t pos = 0; + + while (PyDict_Next(annot, &pos, &key, &value)) { + char const* key_str = PyUnicode_AsUTF8(key); + if (strcmp(key_str, "return") == 0) { + output_types.push_back(annotation_as_text(value)); + } else { + input_types.push_back(annotation_as_text(value)); + } } - Py_DECREF(values); } Py_XDECREF(annot); @@ -514,8 +587,18 @@ static PyObject* parse_args(PyObject* args, return nullptr; } + // special case of Phlex Variant wrapper + PyObject* wrapped_callable = PyObject_GetAttrString(callable, "phlex_callable"); + if (wrapped_callable) { + // PyObject_GetAttrString returns a new reference, which we return + callable = wrapped_callable; + } else { + // No wrapper, use the original callable with incremented reference count + PyErr_Clear(); + Py_INCREF(callable); + } + // no common errors detected; actual registration may have more checks - Py_INCREF(callable); return callable; } @@ -557,31 +640,31 @@ static bool insert_input_converters(py_phlex_module* mod, } pos += 18; - std::string py_out = cname + "_" + inp + "py"; - if (inp_type.compare(pos, std::string::npos, "int32]]") == 0) { - mod->ph_module->transform("pyvint_" + inp + "_" + cname, vint_to_py, concurrency::serial) - .input_family(product_query{product_specification::create(inp), LAYER}) - .output_products(py_out); - } else if (inp_type.compare(pos, std::string::npos, "uint32]]") == 0) { + + if (inp_type.compare(pos, 8, "uint32]]") == 0) { mod->ph_module->transform("pyvuint_" + inp + "_" + cname, vuint_to_py, concurrency::serial) .input_family(product_query{product_specification::create(inp), LAYER}) .output_products(py_out); - } else if (inp_type.compare(pos, std::string::npos, "int64]]") == 0) { // need not be true - mod->ph_module->transform("pyvlong_" + inp + "_" + cname, vlong_to_py, concurrency::serial) + } else if (inp_type.compare(pos, 7, "int32]]") == 0) { + mod->ph_module->transform("pyvint_" + inp + "_" + cname, vint_to_py, concurrency::serial) .input_family(product_query{product_specification::create(inp), LAYER}) .output_products(py_out); - } else if (inp_type.compare(pos, std::string::npos, "uint64]]") == 0) { // id. + } else if (inp_type.compare(pos, 8, "uint64]]") == 0) { // id. mod->ph_module ->transform("pyvulong_" + inp + "_" + cname, vulong_to_py, concurrency::serial) .input_family(product_query{product_specification::create(inp), LAYER}) .output_products(py_out); - } else if (inp_type.compare(pos, std::string::npos, "float32]]") == 0) { + } else if (inp_type.compare(pos, 7, "int64]]") == 0) { // need not be true + mod->ph_module->transform("pyvlong_" + inp + "_" + cname, vlong_to_py, concurrency::serial) + .input_family(product_query{product_specification::create(inp), LAYER}) + .output_products(py_out); + } else if (inp_type.compare(pos, 9, "float32]]") == 0) { mod->ph_module ->transform("pyvfloat_" + inp + "_" + cname, vfloat_to_py, concurrency::serial) .input_family(product_query{product_specification::create(inp), LAYER}) .output_products(py_out); - } else if (inp_type.compare(pos, std::string::npos, "double64]]") == 0) { + } else if (inp_type.compare(pos, 9, "float64]]") == 0) { mod->ph_module ->transform("pyvdouble_" + inp + "_" + cname, vdouble_to_py, concurrency::serial) .input_family(product_query{product_specification::create(inp), LAYER}) @@ -590,6 +673,37 @@ static bool insert_input_converters(py_phlex_module* mod, PyErr_Format(PyExc_TypeError, "unsupported array input type \"%s\"", inp_type.c_str()); return false; } + } else if (inp_type == "list[int]") { + std::string py_out = cname + "_" + inp + "py"; + mod->ph_module->transform("pyvint_" + inp + "_" + cname, vint_to_py, concurrency::serial) + .input_family(product_query{product_specification::create(inp), LAYER}) + .output_products(py_out); + } else if (inp_type == "list[unsigned int]" || inp_type == "list['unsigned int']") { + std::string py_out = cname + "_" + inp + "py"; + mod->ph_module->transform("pyvuint_" + inp + "_" + cname, vuint_to_py, concurrency::serial) + .input_family(product_query{product_specification::create(inp), LAYER}) + .output_products(py_out); + } else if (inp_type == "list[long]" || inp_type == "list['long']") { + std::string py_out = cname + "_" + inp + "py"; + mod->ph_module->transform("pyvlong_" + inp + "_" + cname, vlong_to_py, concurrency::serial) + .input_family(product_query{product_specification::create(inp), LAYER}) + .output_products(py_out); + } else if (inp_type == "list[unsigned long]" || inp_type == "list['unsigned long']") { + std::string py_out = cname + "_" + inp + "py"; + mod->ph_module->transform("pyvulong_" + inp + "_" + cname, vulong_to_py, concurrency::serial) + .input_family(product_query{product_specification::create(inp), LAYER}) + .output_products(py_out); + } else if (inp_type == "list[float]") { + std::string py_out = cname + "_" + inp + "py"; + mod->ph_module->transform("pyvfloat_" + inp + "_" + cname, vfloat_to_py, concurrency::serial) + .input_family(product_query{product_specification::create(inp), LAYER}) + .output_products(py_out); + } else if (inp_type == "list[double]" || inp_type == "list['double']") { + std::string py_out = cname + "_" + inp + "py"; + mod->ph_module + ->transform("pyvdouble_" + inp + "_" + cname, vdouble_to_py, concurrency::serial) + .input_family(product_query{product_specification::create(inp), LAYER}) + .output_products(py_out); } else { PyErr_Format(PyExc_TypeError, "unsupported input type \"%s\"", inp_type.c_str()); return false; @@ -613,6 +727,7 @@ static PyObject* md_transform(py_phlex_module* mod, PyObject* args, PyObject* kw if (output_types.empty()) { PyErr_Format(PyExc_TypeError, "a transform should have an output type"); + Py_DECREF(callable); return nullptr; } @@ -622,8 +737,10 @@ static PyObject* md_transform(py_phlex_module* mod, PyObject* args, PyObject* kw std::string output = output_labels[0]; std::string output_type = output_types[0]; - if (!insert_input_converters(mod, cname, input_labels, input_types)) + if (!insert_input_converters(mod, cname, input_labels, input_types)) { + Py_DECREF(callable); return nullptr; // error already set + } // register Python transform std::string py_out = "py" + output + "_" + cname; @@ -633,6 +750,7 @@ static PyObject* md_transform(py_phlex_module* mod, PyObject* args, PyObject* kw .input_family( product_query{product_specification::create(cname + "_" + input_labels[0] + "py"), LAYER}) .output_products(py_out); + Py_DECREF(callable); } else if (input_labels.size() == 2) { auto* pyc = new py_callback_2{callable}; mod->ph_module->transform(cname, *pyc, concurrency::serial) @@ -640,6 +758,7 @@ static PyObject* md_transform(py_phlex_module* mod, PyObject* args, PyObject* kw product_query{product_specification::create(cname + "_" + input_labels[0] + "py"), LAYER}, product_query{product_specification::create(cname + "_" + input_labels[1] + "py"), LAYER}) .output_products(py_out); + Py_DECREF(callable); } else if (input_labels.size() == 3) { auto* pyc = new py_callback_3{callable}; mod->ph_module->transform(cname, *pyc, concurrency::serial) @@ -648,8 +767,10 @@ static PyObject* md_transform(py_phlex_module* mod, PyObject* args, PyObject* kw product_query{product_specification::create(cname + "_" + input_labels[1] + "py"), LAYER}, product_query{product_specification::create(cname + "_" + input_labels[2] + "py"), LAYER}) .output_products(py_out); + Py_DECREF(callable); } else { PyErr_SetString(PyExc_TypeError, "unsupported number of inputs"); + Py_DECREF(callable); return nullptr; } @@ -682,29 +803,29 @@ static PyObject* md_transform(py_phlex_module* mod, PyObject* args, PyObject* kw pos += 18; auto py_in = "py" + output + "_" + cname; - if (output_type.compare(pos, std::string::npos, "int32]]") == 0) { + if (output_type.compare(pos, 7, "int32]]") == 0) { mod->ph_module->transform("pyvint_" + output + "_" + cname, py_to_vint, concurrency::serial) .input_family(product_query{product_specification::create(py_in), LAYER}) .output_products(output); - } else if (output_type.compare(pos, std::string::npos, "uint32]]") == 0) { + } else if (output_type.compare(pos, 8, "uint32]]") == 0) { mod->ph_module->transform("pyvuint_" + output + "_" + cname, py_to_vuint, concurrency::serial) .input_family(product_query{product_specification::create(py_in), LAYER}) .output_products(output); - } else if (output_type.compare(pos, std::string::npos, "int64]]") == 0) { // need not be true + } else if (output_type.compare(pos, 7, "int64]]") == 0) { // need not be true mod->ph_module->transform("pyvlong_" + output + "_" + cname, py_to_vlong, concurrency::serial) .input_family(product_query{product_specification::create(py_in), LAYER}) .output_products(output); - } else if (output_type.compare(pos, std::string::npos, "uint64]]") == 0) { // id. + } else if (output_type.compare(pos, 8, "uint64]]") == 0) { // id. mod->ph_module ->transform("pyvulong_" + output + "_" + cname, py_to_vulong, concurrency::serial) .input_family(product_query{product_specification::create(py_in), LAYER}) .output_products(output); - } else if (output_type.compare(pos, std::string::npos, "float32]]") == 0) { + } else if (output_type.compare(pos, 9, "float32]]") == 0) { mod->ph_module ->transform("pyvfloat_" + output + "_" + cname, py_to_vfloat, concurrency::serial) .input_family(product_query{product_specification::create(py_in), LAYER}) .output_products(output); - } else if (output_type.compare(pos, std::string::npos, "double64]]") == 0) { + } else if (output_type.compare(pos, 9, "float64]]") == 0) { mod->ph_module ->transform("pyvdouble_" + output + "_" + cname, py_to_vdouble, concurrency::serial) .input_family(product_query{product_specification::create(py_in), LAYER}) @@ -713,6 +834,37 @@ static PyObject* md_transform(py_phlex_module* mod, PyObject* args, PyObject* kw PyErr_Format(PyExc_TypeError, "unsupported array output type \"%s\"", output_type.c_str()); return nullptr; } + } else if (output_type == "list[int]") { + auto py_in = "py" + output + "_" + cname; + mod->ph_module->transform("pyvint_" + output + "_" + cname, py_to_vint, concurrency::serial) + .input_family(product_query{product_specification::create(py_in), LAYER}) + .output_products(output); + } else if (output_type == "list[unsigned int]" || output_type == "list['unsigned int']") { + auto py_in = "py" + output + "_" + cname; + mod->ph_module->transform("pyvuint_" + output + "_" + cname, py_to_vuint, concurrency::serial) + .input_family(product_query{product_specification::create(py_in), LAYER}) + .output_products(output); + } else if (output_type == "list[long]" || output_type == "list['long']") { + auto py_in = "py" + output + "_" + cname; + mod->ph_module->transform("pyvlong_" + output + "_" + cname, py_to_vlong, concurrency::serial) + .input_family(product_query{product_specification::create(py_in), LAYER}) + .output_products(output); + } else if (output_type == "list[unsigned long]" || output_type == "list['unsigned long']") { + auto py_in = "py" + output + "_" + cname; + mod->ph_module->transform("pyvulong_" + output + "_" + cname, py_to_vulong, concurrency::serial) + .input_family(product_query{product_specification::create(py_in), LAYER}) + .output_products(output); + } else if (output_type == "list[float]") { + auto py_in = "py" + output + "_" + cname; + mod->ph_module->transform("pyvfloat_" + output + "_" + cname, py_to_vfloat, concurrency::serial) + .input_family(product_query{product_specification::create(py_in), LAYER}) + .output_products(output); + } else if (output_type == "list[double]" || output_type == "list['double']") { + auto py_in = "py" + output + "_" + cname; + mod->ph_module + ->transform("pyvdouble_" + output + "_" + cname, py_to_vdouble, concurrency::serial) + .input_family(product_query{product_specification::create(py_in), LAYER}) + .output_products(output); } else { PyErr_Format(PyExc_TypeError, "unsupported output type \"%s\"", output_type.c_str()); return nullptr; @@ -738,8 +890,10 @@ static PyObject* md_observe(py_phlex_module* mod, PyObject* args, PyObject* kwds return nullptr; } - if (!insert_input_converters(mod, cname, input_labels, input_types)) + if (!insert_input_converters(mod, cname, input_labels, input_types)) { + Py_DECREF(callable); return nullptr; // error already set + } // register Python observer if (input_labels.size() == 1) { @@ -747,12 +901,14 @@ static PyObject* md_observe(py_phlex_module* mod, PyObject* args, PyObject* kwds mod->ph_module->observe(cname, *pyc, concurrency::serial) .input_family( product_query{product_specification::create(cname + "_" + input_labels[0] + "py"), LAYER}); + Py_DECREF(callable); } else if (input_labels.size() == 2) { auto* pyc = new py_callback_2v{callable}; mod->ph_module->observe(cname, *pyc, concurrency::serial) .input_family( product_query{product_specification::create(cname + "_" + input_labels[0] + "py"), LAYER}, product_query{product_specification::create(cname + "_" + input_labels[1] + "py"), LAYER}); + Py_DECREF(callable); } else if (input_labels.size() == 3) { auto* pyc = new py_callback_3v{callable}; mod->ph_module->observe(cname, *pyc, concurrency::serial) @@ -760,8 +916,10 @@ static PyObject* md_observe(py_phlex_module* mod, PyObject* args, PyObject* kwds product_query{product_specification::create(cname + "_" + input_labels[0] + "py"), LAYER}, product_query{product_specification::create(cname + "_" + input_labels[1] + "py"), LAYER}, product_query{product_specification::create(cname + "_" + input_labels[2] + "py"), LAYER}); + Py_DECREF(callable); } else { PyErr_SetString(PyExc_TypeError, "unsupported number of inputs"); + Py_DECREF(callable); return nullptr; } diff --git a/scripts/README.md b/scripts/README.md index 6a32f118f..5d6cfdc18 100644 --- a/scripts/README.md +++ b/scripts/README.md @@ -156,12 +156,27 @@ Provides convenient commands for managing code coverage analysis. ```bash # From repository root -./scripts/coverage.sh [COMMAND] [COMMAND...] +./scripts/coverage.sh [--preset ] [COMMAND] [COMMAND...] # Multiple commands in sequence ./scripts/coverage.sh setup test xml html ``` +#### Presets + +The `--preset` flag controls the toolchain and instrumentation method: + +- **`coverage-clang`** (Default): + - Uses LLVM source-based coverage. + - Best for local development (fast, accurate). + - Generates high-fidelity HTML reports. + - Key commands: `setup`, `test`, `html`, `view`, `summary`. + +- **`coverage-gcc`**: + - Uses `gcov` instrumentation. + - Best for CI pipelines requiring XML output (e.g., Codecov). + - Key commands: `setup`, `test`, `xml`, `upload`. + #### Commands | Command | Description | diff --git a/test/python/CMakeLists.txt b/test/python/CMakeLists.txt index c2f73917f..7e97c8167 100644 --- a/test/python/CMakeLists.txt +++ b/test/python/CMakeLists.txt @@ -9,15 +9,30 @@ function(check_python_module_version MODULE_NAME MIN_VERSION OUT_VAR) "import sys try: import ${MODULE_NAME} - from packaging.version import parse as parse_version installed_version = getattr(${MODULE_NAME}, '__version__', None) - if parse_version(installed_version) >= parse_version('${MIN_VERSION}'): + if not installed_version: + sys.exit(2) + + def parse(v): + return tuple(map(int, v.split('.')[:3])) + + if parse(installed_version) >= parse('${MIN_VERSION}'): sys.exit(0) else: sys.exit(2) # Version too low -except ImportError: +except ImportError as e: + print(f'ImportError: {e}') + sys.exit(1) +except Exception as e: + print(f'Exception: {e}') sys.exit(1)" RESULT_VARIABLE _module_check_result + OUTPUT_VARIABLE _module_check_out + ERROR_VARIABLE _module_check_err + ) + message( + STATUS + "Check ${MODULE_NAME}: Res=${_module_check_result} Out=${_module_check_out} Err=${_module_check_err}" ) if(_module_check_result EQUAL 0) @@ -53,6 +68,8 @@ if(HAS_CPPYY) set(PYTHON_TEST_PHLEX_INSTALL ${CMAKE_SOURCE_DIR}) endif() + set(PYTHON_TEST_FILES test_phlex.py unit_test_variant.py) + # Determine pytest command based on coverage support if(HAS_PYTEST_COV AND ENABLE_COVERAGE) set( @@ -65,11 +82,11 @@ if(HAS_CPPYY) --cov-report=xml:${CMAKE_BINARY_DIR}/coverage-python.xml --cov-report=html:${CMAKE_BINARY_DIR}/coverage-python-html --cov-config=${CMAKE_CURRENT_SOURCE_DIR}/.coveragerc - test_phlex.py + ${PYTHON_TEST_FILES} ) message(STATUS "Python tests will run with coverage reporting (pytest-cov)") else() - set(PYTEST_COMMAND ${PYTHON_TEST_EXECUTABLE} -m pytest test_phlex.py) + set(PYTEST_COMMAND ${PYTHON_TEST_EXECUTABLE} -m pytest ${PYTHON_TEST_FILES}) if(ENABLE_COVERAGE AND NOT HAS_PYTEST_COV) message(WARNING "ENABLE_COVERAGE is ON but pytest-cov not found; Python coverage disabled") endif() @@ -79,18 +96,88 @@ if(HAS_CPPYY) add_test(NAME py:phlex COMMAND ${PYTEST_COMMAND} WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}) set_property(TEST py:phlex PROPERTY ENVIRONMENT "PHLEX_INSTALL=${PYTHON_TEST_PHLEX_INSTALL}") + + if(HAS_PYTEST_COV) + add_custom_target( + coverage-python + COMMAND ${PYTEST_COMMAND} + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} + COMMENT "Running Python coverage report" + ) + endif() endif() set(ACTIVE_PY_CPHLEX_TESTS "") +# numpy support if installed +if(HAS_NUMPY) + # phlex-based tests that require numpy support + add_test(NAME py:vec COMMAND phlex -c ${CMAKE_CURRENT_SOURCE_DIR}/pyvec.jsonnet) + list(APPEND ACTIVE_PY_CPHLEX_TESTS py:vec) + + add_test(NAME py:vectypes COMMAND phlex -c ${CMAKE_CURRENT_SOURCE_DIR}/pyvectypes.jsonnet) + list(APPEND ACTIVE_PY_CPHLEX_TESTS py:vectypes) + + add_test(NAME py:callback3 COMMAND phlex -c ${CMAKE_CURRENT_SOURCE_DIR}/pycallback3.jsonnet) + list(APPEND ACTIVE_PY_CPHLEX_TESTS py:callback3) + + # Expect failure for these tests (check for error propagation and type checking) + add_test(NAME py:raise COMMAND phlex -c ${CMAKE_CURRENT_SOURCE_DIR}/pyraise.jsonnet) + set_tests_properties( + py:raise + PROPERTIES PASS_REGULAR_EXPRESSION "RuntimeError: Intentional failure" + ) + list(APPEND ACTIVE_PY_CPHLEX_TESTS py:raise) + + add_test(NAME py:badbool COMMAND phlex -c ${CMAKE_CURRENT_SOURCE_DIR}/pybadbool.jsonnet) + set_tests_properties( + py:badbool + PROPERTIES + PASS_REGULAR_EXPRESSION + "Python conversion error for type bool: boolean value should be bool, or integer 1 or 0" + ) + list(APPEND ACTIVE_PY_CPHLEX_TESTS py:badbool) + + add_test(NAME py:badint COMMAND phlex -c ${CMAKE_CURRENT_SOURCE_DIR}/pybadint.jsonnet) + set_tests_properties( + py:badint + PROPERTIES + PASS_REGULAR_EXPRESSION + "Python conversion error for type long: int/long conversion expects an integer object" + ) + list(APPEND ACTIVE_PY_CPHLEX_TESTS py:badint) + + add_test(NAME py:baduint COMMAND phlex -c ${CMAKE_CURRENT_SOURCE_DIR}/pybaduint.jsonnet) + set_tests_properties( + py:baduint + PROPERTIES + PASS_REGULAR_EXPRESSION + "Python conversion error for type uint: can't convert negative value to unsigned long" + ) + list(APPEND ACTIVE_PY_CPHLEX_TESTS py:baduint) + + add_test( + NAME py:mismatch_variant + COMMAND phlex -c ${CMAKE_CURRENT_SOURCE_DIR}/pymismatch_variant.jsonnet + ) + set_tests_properties( + py:mismatch_variant + PROPERTIES + PASS_REGULAR_EXPRESSION "number of inputs .* does not match number of annotation types" + ) + list(APPEND ACTIVE_PY_CPHLEX_TESTS py:mismatch_variant) + + add_test(NAME py:veclists COMMAND phlex -c ${CMAKE_CURRENT_SOURCE_DIR}/pyveclists.jsonnet) + list(APPEND ACTIVE_PY_CPHLEX_TESTS py:veclists) + + add_test(NAME py:types COMMAND phlex -c ${CMAKE_CURRENT_SOURCE_DIR}/pytypes.jsonnet) + list(APPEND ACTIVE_PY_CPHLEX_TESTS py:types) +endif() + # C++ helper to provide a driver add_library(cppsource4py MODULE source.cpp) target_link_libraries(cppsource4py PRIVATE phlex::module) -# phlex-based tests that require numpy support -add_test(NAME py:vec COMMAND phlex -c ${CMAKE_CURRENT_SOURCE_DIR}/pyvec.jsonnet) -list(APPEND ACTIVE_PY_CPHLEX_TESTS py:vec) - # phlex-based tests (no cppyy dependency) add_test(NAME py:add COMMAND phlex -c ${CMAKE_CURRENT_SOURCE_DIR}/pyadd.jsonnet) list(APPEND ACTIVE_PY_CPHLEX_TESTS py:add) @@ -101,12 +188,24 @@ list(APPEND ACTIVE_PY_CPHLEX_TESTS py:config) add_test(NAME py:reduce COMMAND phlex -c ${CMAKE_CURRENT_SOURCE_DIR}/pyreduce.jsonnet) list(APPEND ACTIVE_PY_CPHLEX_TESTS py:reduce) +add_test(NAME py:coverage COMMAND phlex -c ${CMAKE_CURRENT_SOURCE_DIR}/pycoverage.jsonnet) +list(APPEND ACTIVE_PY_CPHLEX_TESTS py:coverage) + +add_test( + NAME py:mismatch + COMMAND ${PROJECT_BINARY_DIR}/bin/phlex -c ${CMAKE_CURRENT_SOURCE_DIR}/pymismatch.jsonnet +) +set_tests_properties( + py:mismatch + PROPERTIES + PASS_REGULAR_EXPRESSION "number of inputs .* does not match number of annotation types" +) +list(APPEND ACTIVE_PY_CPHLEX_TESTS py:mismatch) + # "failing" tests for checking error paths add_test( NAME py:failure - COMMAND - ${CMAKE_CURRENT_SOURCE_DIR}/failing_test_wrap.sh ${PROJECT_BINARY_DIR}/bin/phlex -c - ${CMAKE_CURRENT_SOURCE_DIR}/pyfailure.jsonnet + COMMAND ${PROJECT_BINARY_DIR}/bin/phlex -c ${CMAKE_CURRENT_SOURCE_DIR}/pyfailure.jsonnet ) set_tests_properties( py:failure @@ -114,8 +213,26 @@ set_tests_properties( ) list(APPEND ACTIVE_PY_CPHLEX_TESTS py:failure) +set(TEST_PYTHONPATH ${CMAKE_CURRENT_SOURCE_DIR}) +# Add the python plugin source directory to PYTHONPATH so tests can use phlex package +set(TEST_PYTHONPATH ${TEST_PYTHONPATH}:${PROJECT_SOURCE_DIR}/plugins/python/python) + +# Always add site-packages to PYTHONPATH for tests, as embedded python might +# not find them especially in spack environments where they are in +# non-standard locations +if(Python_SITELIB) + set(TEST_PYTHONPATH ${TEST_PYTHONPATH}:${Python_SITELIB}) +endif() +if(Python_SITEARCH AND NOT "${Python_SITEARCH}" STREQUAL "${Python_SITELIB}") + set(TEST_PYTHONPATH ${TEST_PYTHONPATH}:${Python_SITEARCH}) +endif() + +if(DEFINED ENV{VIRTUAL_ENV}) + # Keep this for backward compatibility or if it adds something else +endif() +set(TEST_PYTHONPATH ${TEST_PYTHONPATH}:$ENV{PYTHONPATH}) + # Environment variables required: -set(TEST_PYTHONPATH ${CMAKE_CURRENT_SOURCE_DIR}:$ENV{PYTHONPATH}) set( PYTHON_TEST_ENVIRONMENT "SPDLOG_LEVEL=debug;PHLEX_PLUGIN_PATH=${PROJECT_BINARY_DIR};PYTHONPATH=${TEST_PYTHONPATH};PHLEX_INSTALL=${PYTHON_TEST_PHLEX_INSTALL}" @@ -137,3 +254,10 @@ set_tests_properties( ENVIRONMENT "${PYTHON_TEST_ENVIRONMENT};VIRTUAL_ENV=${PY_VIRTUAL_ENV_DIR}" ENVIRONMENT_MODIFICATION "PATH=path_list_prepend:${PY_VIRTUAL_ENV_DIR}/bin" ) + +# Unit tests for the phlex python package +add_test( + NAME py:unit_variant + COMMAND ${Python_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/unit_test_variant.py +) +set_tests_properties(py:unit_variant PROPERTIES ENVIRONMENT "${PYTHON_TEST_ENVIRONMENT}") diff --git a/test/python/adder.py b/test/python/adder.py index 549dcdab9..9508636db 100644 --- a/test/python/adder.py +++ b/test/python/adder.py @@ -4,19 +4,33 @@ real. It serves as a "Hello, World" equivalent for running Python code. """ +from typing import Protocol, TypeVar -def add(i: int, j: int) -> int: +from phlex import Variant + + +class AddableProtocol[T](Protocol): + """Typer bound for any types that can be added.""" + + def __add__(self, other: T) -> T: # noqa: D105 + ... # codeql[py/ineffectual-statement] + + +Addable = TypeVar("Addable", bound=AddableProtocol) + + +def add(i: Addable, j: Addable) -> Addable: """Add the inputs together and return the sum total. Use the standard `+` operator to add the two inputs together to arrive at their total. Args: - i (int): First input. - j (int): Second input. + i (Addable): First input. + j (Addable): Second input. Returns: - int: Sum of the two inputs. + Addable: Sum of the two inputs. Examples: >>> add(1, 2) @@ -40,4 +54,5 @@ def PHLEX_REGISTER_ALGORITHMS(m, config): Returns: None """ - m.transform(add, input_family=config["input"], output_products=config["output"]) + int_adder = Variant(add, {"i": int, "j": int, "return": int}, "iadd") + m.transform(int_adder, input_family=config["input"], output_products=config["output"]) diff --git a/test/python/failing_test_wrap.sh b/test/python/failing_test_wrap.sh deleted file mode 100755 index ee8081316..000000000 --- a/test/python/failing_test_wrap.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash -"$@" -exit_code=$? -if [ $exit_code -ne 0 ]; then - exit 1 -fi -exit 0 diff --git a/test/python/pybadbool.jsonnet b/test/python/pybadbool.jsonnet new file mode 100644 index 000000000..97bd2821f --- /dev/null +++ b/test/python/pybadbool.jsonnet @@ -0,0 +1,26 @@ +{ + driver: { + cpp: 'generate_layers', + layers: { + event: { parent: 'job', total: 1, starting_number: 1 }, + }, + }, + sources: { + provider: { + cpp: 'cppsource4py', + }, + }, + modules: { + test_bad_bool: { + py: 'test_callbacks', + mode: 'bad_bool', + input: ['i'], + output: ['out_bool'], + }, + verify_bool: { + py: 'verify', + input: ['out_bool'], + expected_bool: true, + }, + }, +} diff --git a/test/python/pybadint.jsonnet b/test/python/pybadint.jsonnet new file mode 100644 index 000000000..7bfbb8659 --- /dev/null +++ b/test/python/pybadint.jsonnet @@ -0,0 +1,21 @@ +{ + driver: { + cpp: 'generate_layers', + layers: { + event: { parent: 'job', total: 1, starting_number: 1 }, + }, + }, + sources: { + provider: { + cpp: 'cppsource4py', + }, + }, + modules: { + test_bad_long: { + py: 'test_callbacks', + mode: 'bad_long', + input: ['i'], + output: ['out_long'], + }, + }, +} diff --git a/test/python/pybaduint.jsonnet b/test/python/pybaduint.jsonnet new file mode 100644 index 000000000..0616e7fdc --- /dev/null +++ b/test/python/pybaduint.jsonnet @@ -0,0 +1,21 @@ +{ + driver: { + cpp: 'generate_layers', + layers: { + event: { parent: 'job', total: 1, starting_number: 1 }, + }, + }, + sources: { + provider: { + cpp: 'cppsource4py', + }, + }, + modules: { + test_bad_uint: { + py: 'test_callbacks', + mode: 'bad_uint', + input: ['i'], + output: ['out_uint'], + }, + }, +} diff --git a/test/python/pycallback3.jsonnet b/test/python/pycallback3.jsonnet new file mode 100644 index 000000000..c6893fd80 --- /dev/null +++ b/test/python/pycallback3.jsonnet @@ -0,0 +1,29 @@ +{ + driver: { + cpp: 'generate_layers', + layers: { + event: { parent: 'job', total: 1, starting_number: 1 }, + }, + }, + sources: { + provider: { + cpp: 'cppsource4py', + }, + }, + modules: { + // 1. Test 3-arg callback (success case) + test_three_args: { + py: 'test_callbacks', + mode: 'three_args', + input: ['i', 'j', 'k'], + output: ['sum_ijk'], + }, + verify_three: { + py: 'verify', + input: ['sum_ijk'], + sum_total: 1, // 1 event * (0+0+0? wait, i=event_num-1. event1->0. sum=0. ) + // provider generates i, j starting at 0? + // cppsource4py probably uses event number. + }, + }, +} diff --git a/test/python/pycoverage.jsonnet b/test/python/pycoverage.jsonnet new file mode 100644 index 000000000..3d4ccbe66 --- /dev/null +++ b/test/python/pycoverage.jsonnet @@ -0,0 +1,18 @@ +{ + driver: { + cpp: 'generate_layers', + layers: { + event: { parent: 'job', total: 1, starting_number: 1 }, + }, + }, + sources: { + cppdriver: { + cpp: 'cppsource4py', + }, + }, + modules: { + coverage: { + py: 'test_coverage', + }, + }, +} diff --git a/test/python/pymismatch.jsonnet b/test/python/pymismatch.jsonnet new file mode 100644 index 000000000..4098dd630 --- /dev/null +++ b/test/python/pymismatch.jsonnet @@ -0,0 +1,13 @@ +{ + driver: { + cpp: 'generate_layers', + layers: { + event: { total: 1 }, + }, + }, + modules: { + mismatch: { + py: 'test_mismatch', + }, + }, +} diff --git a/test/python/pymismatch_variant.jsonnet b/test/python/pymismatch_variant.jsonnet new file mode 100644 index 000000000..07823340b --- /dev/null +++ b/test/python/pymismatch_variant.jsonnet @@ -0,0 +1,22 @@ +{ + driver: { + cpp: 'generate_layers', + layers: { + event: { parent: 'job', total: 1, starting_number: 1 }, + }, + }, + sources: { + provider: { + cpp: 'cppsource4py', + }, + }, + modules: { + test_mismatch: { + py: 'test_callbacks', + mode: 'mismatch', + // Providing 3 inputs for a 2-arg function + input: ['i', 'j', 'k'], + output: ['sum_out'], + }, + }, +} diff --git a/test/python/pyraise.jsonnet b/test/python/pyraise.jsonnet new file mode 100644 index 000000000..cd08ce5b5 --- /dev/null +++ b/test/python/pyraise.jsonnet @@ -0,0 +1,21 @@ +{ + driver: { + cpp: 'generate_layers', + layers: { + event: { parent: 'job', total: 1, starting_number: 1 }, + }, + }, + sources: { + provider: { + cpp: 'cppsource4py', + }, + }, + modules: { + test_exception: { + py: 'test_callbacks', + mode: 'exception', + input: ['i'], + output: ['out'], + }, + }, +} diff --git a/test/python/pytypes.jsonnet b/test/python/pytypes.jsonnet new file mode 100644 index 000000000..4c401a1a7 --- /dev/null +++ b/test/python/pytypes.jsonnet @@ -0,0 +1,33 @@ +{ + driver: { + cpp: 'generate_layers', + layers: { + event: { parent: 'job', total: 10, starting_number: 1 }, + }, + }, + sources: { + cppdriver: { + cpp: 'cppsource4py', + }, + }, + modules: { + pytypes: { + py: 'test_types', + input_float: ['f1', 'f2'], + output_float: ['sum_f'], + input_double: ['d1', 'd2'], + output_double: ['sum_d'], + input_uint: ['u1', 'u2'], + output_uint: ['sum_u'], + input_bool: ['b1', 'b2'], + output_bool: ['and_b'], + output_vfloat: ['vec_f'], + output_vdouble: ['vec_d'], + }, + verify_bool: { + py: 'verify_extended', + input_bool: ['and_b'], + expected_bool: false, + }, + }, +} diff --git a/test/python/pyveclists.jsonnet b/test/python/pyveclists.jsonnet new file mode 100644 index 000000000..4d09979fd --- /dev/null +++ b/test/python/pyveclists.jsonnet @@ -0,0 +1,61 @@ +{ + driver: { + cpp: 'generate_layers', + layers: { + event: { parent: 'job', total: 10, starting_number: 1 }, + }, + }, + sources: { + cppdriver: { + cpp: 'cppsource4py', + }, + }, + modules: { + vectypes: { + py: 'vectypes', + use_lists: true, + input_int32: ['i', 'j'], + output_int32: ['sum_int32'], + input_uint32: ['u1', 'u2'], + output_uint32: ['sum_uint32'], + input_int64: ['l1', 'l2'], + output_int64: ['sum_int64'], + input_uint64: ['ul1', 'ul2'], + output_uint64: ['sum_uint64'], + input_float32: ['f1', 'f2'], + output_float32: ['sum_float32'], + input_float64: ['d1', 'd2'], + output_float64: ['sum_float64'], + }, + verify_int32: { + py: 'verify_extended', + input_int: ['sum_int32'], + sum_total: 1, + }, + verify_uint32: { + py: 'verify_extended', + input_uint: ['sum_uint32'], + sum_total: 1, + }, + verify_int64: { + py: 'verify_extended', + input_long: ['sum_int64'], + sum_total: 1, + }, + verify_uint64: { + py: 'verify_extended', + input_ulong: ['sum_uint64'], + sum_total: 100, + }, + verify_float32: { + py: 'verify_extended', + input_float: ['sum_float32'], + sum_total: 1.0, + }, + verify_double: { + py: 'verify_extended', + input_double: ['sum_float64'], + sum_total: 1.0, + }, + }, +} diff --git a/test/python/pyvectypes.jsonnet b/test/python/pyvectypes.jsonnet new file mode 100644 index 000000000..3740cd802 --- /dev/null +++ b/test/python/pyvectypes.jsonnet @@ -0,0 +1,60 @@ +{ + driver: { + cpp: 'generate_layers', + layers: { + event: { parent: 'job', total: 10, starting_number: 1 }, + }, + }, + sources: { + cppdriver: { + cpp: 'cppsource4py', + }, + }, + modules: { + vectypes: { + py: 'vectypes', + input_int32: ['i', 'j'], + output_int32: ['sum_int32'], + input_uint32: ['u1', 'u2'], + output_uint32: ['sum_uint32'], + input_int64: ['l1', 'l2'], + output_int64: ['sum_int64'], + input_uint64: ['ul1', 'ul2'], + output_uint64: ['sum_uint64'], + input_float32: ['f1', 'f2'], + output_float32: ['sum_float32'], + input_float64: ['d1', 'd2'], + output_float64: ['sum_float64'], + }, + verify_int32: { + py: 'verify_extended', + input_int: ['sum_int32'], + sum_total: 1, + }, + verify_uint32: { + py: 'verify_extended', + input_uint: ['sum_uint32'], + sum_total: 1, + }, + verify_int64: { + py: 'verify_extended', + input_long: ['sum_int64'], + sum_total: 1, + }, + verify_uint64: { + py: 'verify_extended', + input_ulong: ['sum_uint64'], + sum_total: 100, + }, + verify_float32: { + py: 'verify_extended', + input_float: ['sum_float32'], + sum_total: 1.0, + }, + verify_double: { + py: 'verify_extended', + input_double: ['sum_float64'], + sum_total: 1.0, + }, + }, +} diff --git a/test/python/reducer.py b/test/python/reducer.py index 2ced48de7..855d35313 100644 --- a/test/python/reducer.py +++ b/test/python/reducer.py @@ -32,6 +32,21 @@ def add(i: int, j: int) -> int: return i + j +def add_sum01(sum0: int, sum1: int) -> int: + """Add sum0 and sum1.""" + return sum0 + sum1 + + +def add_sum23(sum2: int, sum3: int) -> int: + """Add sum2 and sum3.""" + return sum2 + sum3 + + +def add_final(sum01: int, sum23: int) -> int: + """Add sum01 and sum23.""" + return sum01 + sum23 + + def PHLEX_REGISTER_ALGORITHMS(m, config): """Register a series of `add` algorithm as transformations. @@ -55,8 +70,12 @@ def PHLEX_REGISTER_ALGORITHMS(m, config): ) # now reduce them pair-wise - m.transform(add, name="reduce01", input_family=["sum0", "sum1"], output_products=["sum01"]) - m.transform(add, name="reduce23", input_family=["sum2", "sum3"], output_products=["sum23"]) + m.transform( + add_sum01, name="reduce01", input_family=["sum0", "sum1"], output_products=["sum01"] + ) + m.transform( + add_sum23, name="reduce23", input_family=["sum2", "sum3"], output_products=["sum23"] + ) # once more (and the configuration will add a verifier) - m.transform(add, name="reduce", input_family=["sum01", "sum23"], output_products=["sum"]) + m.transform(add_final, name="reduce", input_family=["sum01", "sum23"], output_products=["sum"]) diff --git a/test/python/source.cpp b/test/python/source.cpp index 2a6aac8fd..a08cb01cf 100644 --- a/test/python/source.cpp +++ b/test/python/source.cpp @@ -1,12 +1,64 @@ #include "phlex/source.hpp" #include "phlex/model/data_cell_index.hpp" +#include using namespace phlex; PHLEX_REGISTER_PROVIDERS(s) { - s.provide("provide_i", [](data_cell_index const& id) -> int { return id.number(); }) + s.provide("provide_i", [](data_cell_index const& id) -> int { return id.number() % 2; }) .output_product("i"_in("job")); - s.provide("provide_j", [](data_cell_index const& id) -> int { return -id.number() + 1; }) + s.provide("provide_j", + [](data_cell_index const& id) -> int { return 1 - (int)(id.number() % 2); }) .output_product("j"_in("job")); + s.provide("provide_k", [](data_cell_index const&) -> int { return 0; }) + .output_product("k"_in("job")); + + s.provide("provide_f1", + [](data_cell_index const& id) -> float { return (float)((id.number() % 100) / 100.0); }) + .output_product("f1"_in("job")); + s.provide( + "provide_f2", + [](data_cell_index const& id) -> float { return 1.0f - (float)((id.number() % 100) / 100.0); }) + .output_product("f2"_in("job")); + + s.provide( + "provide_d1", + [](data_cell_index const& id) -> double { return (double)((id.number() % 100) / 100.0); }) + .output_product("d1"_in("job")); + s.provide("provide_d2", + [](data_cell_index const& id) -> double { + return 1.0 - (double)((id.number() % 100) / 100.0); + }) + .output_product("d2"_in("job")); + + s.provide( + "provide_u1", + [](data_cell_index const& id) -> unsigned int { return (unsigned int)(id.number() % 2); }) + .output_product("u1"_in("job")); + s.provide( + "provide_u2", + [](data_cell_index const& id) -> unsigned int { return 1 - (unsigned int)(id.number() % 2); }) + .output_product("u2"_in("job")); + + s.provide("provide_l1", [](data_cell_index const& id) -> long { return (long)(id.number() % 2); }) + .output_product("l1"_in("job")); + s.provide("provide_l2", + [](data_cell_index const& id) -> long { return 1 - (long)(id.number() % 2); }) + .output_product("l2"_in("job")); + + s.provide( + "provide_ul1", + [](data_cell_index const& id) -> unsigned long { return (unsigned long)(id.number() % 101); }) + .output_product("ul1"_in("job")); + s.provide("provide_ul2", + [](data_cell_index const& id) -> unsigned long { + return 100 - (unsigned long)(id.number() % 101); + }) + .output_product("ul2"_in("job")); + + s.provide("provide_b1", [](data_cell_index const& id) -> bool { return (id.number() % 2) == 0; }) + .output_product("b1"_in("job")); + s.provide("provide_b2", [](data_cell_index const& id) -> bool { return (id.number() % 2) != 0; }) + .output_product("b2"_in("job")); } diff --git a/test/python/test_callbacks.py b/test/python/test_callbacks.py new file mode 100644 index 000000000..b9e92c5c5 --- /dev/null +++ b/test/python/test_callbacks.py @@ -0,0 +1,58 @@ +"""Test coverage gaps in modulewrap.cpp.""" + + +# 3-argument function to trigger py_callback<3> +def sum_three(i: int, j: int, k: int) -> int: + """Sum three integers.""" + return i + j + k + + +# Function that raises exception to test error handling +def raise_error(i: int) -> int: + """Raise a RuntimeError.""" + raise RuntimeError("Intentional failure") + + +# Invalid bool return (2) +def bad_bool(i: int) -> bool: + """Return an invalid boolean value.""" + return 2 # type: ignore + + +# Invalid long return (float) +def bad_long(i: int) -> "long": # type: ignore # noqa: F821 + """Return a float instead of an int.""" + return 1.5 # type: ignore + + +# Invalid uint return (negative) +def bad_uint(i: int) -> "unsigned int": # type: ignore # noqa: F722 + """Return a negative value for unsigned int.""" + return -5 # type: ignore + + +# Function with mismatching annotation count vs config inputs +def two_args(i: int, j: int) -> int: + """Sum two integers while config provides three inputs (tests parameter count mismatch).""" + return i + j + + +def PHLEX_REGISTER_ALGORITHMS(m, config): + """Register algorithms based on configuration.""" + try: + mode = config["mode"] + except KeyError: + mode = "three_args" + + if mode == "three_args": + m.transform(sum_three, input_family=config["input"], output_products=config["output"]) + elif mode == "exception": + m.transform(raise_error, input_family=config["input"], output_products=config["output"]) + elif mode == "bad_bool": + m.transform(bad_bool, input_family=config["input"], output_products=config["output"]) + elif mode == "bad_long": + m.transform(bad_long, input_family=config["input"], output_products=config["output"]) + elif mode == "bad_uint": + m.transform(bad_uint, input_family=config["input"], output_products=config["output"]) + elif mode == "mismatch": + m.transform(two_args, input_family=config["input"], output_products=config["output"]) diff --git a/test/python/test_coverage.py b/test/python/test_coverage.py new file mode 100644 index 000000000..40f890899 --- /dev/null +++ b/test/python/test_coverage.py @@ -0,0 +1,51 @@ +"""Test coverage for list input converters.""" + + +class double(float): # noqa: N801 + """Dummy class for C++ double type.""" + + pass + + +def list_int_func(lst: list[int]) -> int: + """Sum a list of integers.""" + return sum(lst) + + +def list_float_func(lst: list[float]) -> float: + """Sum a list of floats.""" + return sum(lst) + + +# For double, I'll use string annotation to be safe and match C++ check +def list_double_func(lst: "list[double]") -> float: # type: ignore + """Sum a list of doubles.""" + return sum(lst) + + +def collect_int(i: int) -> list[int]: + """Collect an integer into a list.""" + return [i] + + +def collect_float(f1: float) -> list[float]: + """Collect a float into a list.""" + return [f1] + + +def collect_double(d1: "double") -> "list[double]": # type: ignore + """Collect a double into a list.""" + return [d1] + + +def PHLEX_REGISTER_ALGORITHMS(m, config): + """Register algorithms.""" + # We need to transform scalar inputs to lists first + # i, f1, d1 come from cppsource4py + m.transform(collect_int, input_family=["i"], output_products=["l_int"]) + m.transform(collect_float, input_family=["f1"], output_products=["l_float"]) + m.transform(collect_double, input_family=["d1"], output_products=["l_double"]) + + m.transform(list_int_func, input_family=["l_int"], output_products=["sum_int"]) + m.transform(list_float_func, input_family=["l_float"], output_products=["sum_float"]) + m.transform(list_double_func, input_family=["l_double"], output_products=["sum_double"]) diff --git a/test/python/test_mismatch.py b/test/python/test_mismatch.py new file mode 100644 index 000000000..d84f27e8c --- /dev/null +++ b/test/python/test_mismatch.py @@ -0,0 +1,13 @@ +"""Test mismatch between input labels and types.""" + + +def mismatch_func(a: int, b: int) -> int: + """Add two integers.""" + return a + b + + +def PHLEX_REGISTER_ALGORITHMS(m, config): + """Register algorithms.""" + # input_family has 1 element, but function takes 2 arguments + # This should trigger the error in modulewrap.cpp + m.transform(mismatch_func, input_family=["a"], output_products=["sum"]) diff --git a/test/python/test_types.py b/test/python/test_types.py new file mode 100644 index 000000000..d2189a709 --- /dev/null +++ b/test/python/test_types.py @@ -0,0 +1,127 @@ +"""Algorithms exercising various C++ types. + +This test code implements algorithms that use types other than the standard +int/string to ensure that the Python bindings correctly handle them. +""" + +import numpy as np +import numpy.typing as npt + + +class double(float): # noqa: N801 + """Dummy class for C++ double type.""" + + pass + + +def add_float(f1: float, f2: float) -> float: + """Add two floats. + + Args: + f1 (float): First input. + f2 (float): Second input. + + Returns: + float: Sum of the two inputs. + """ + return f1 + f2 + + +def add_double(d1: double, d2: double) -> double: + """Add two doubles. + + Args: + d1 (double): First input. + d2 (double): Second input. + + Returns: + double: Sum of the two inputs. + """ + return double(d1 + d2) + + +def add_unsigned(u1: "unsigned int", u2: "unsigned int") -> "unsigned int": # type: ignore # noqa: F722 + """Add two unsigned integers. + + Args: + u1 (unsigned int): First input. + u2 (unsigned int): Second input. + + Returns: + unsigned int: Sum of the two inputs. + """ + return u1 + u2 + + +def collect_float(f1: float, f2: float) -> npt.NDArray[np.float32]: + """Combine floats into a numpy array. + + Args: + f1 (float): First input. + f2 (float): Second input. + + Returns: + ndarray: Array of floats. + """ + return np.array([f1, f2], dtype=np.float32) + + +def collect_double(d1: double, d2: double) -> npt.NDArray[np.float64]: + """Combine doubles into a numpy array. + + Args: + d1 (double): First input. + d2 (double): Second input. + + Returns: + ndarray: Array of doubles. + """ + return np.array([d1, d2], dtype=np.float64) + + +def and_bool(b1: bool, b2: bool) -> bool: + """And two booleans. + + Args: + b1 (bool): First input. + b2 (bool): Second input. + + Returns: + bool: Logical AND of the two inputs. + """ + return b1 and b2 + + +def PHLEX_REGISTER_ALGORITHMS(m, config): + """Register algorithms. + + Args: + m (internal): Phlex registrar representation. + config (internal): Phlex configuration representation. + + Returns: + None + """ + m.transform( + add_float, input_family=config["input_float"], output_products=config["output_float"] + ) + + m.transform( + add_double, input_family=config["input_double"], output_products=config["output_double"] + ) + + m.transform( + add_unsigned, input_family=config["input_uint"], output_products=config["output_uint"] + ) + + m.transform(and_bool, input_family=config["input_bool"], output_products=config["output_bool"]) + + m.transform( + collect_float, input_family=config["input_float"], output_products=config["output_vfloat"] + ) + + m.transform( + collect_double, + input_family=config["input_double"], + output_products=config["output_vdouble"], + ) diff --git a/test/python/unit_test_variant.py b/test/python/unit_test_variant.py new file mode 100644 index 000000000..a2cd27dea --- /dev/null +++ b/test/python/unit_test_variant.py @@ -0,0 +1,78 @@ +#!/usr/bin/env python3 +"""Unit tests for the phlex.Variant class.""" + +import unittest + +from phlex import Variant + + +def example_func(a, b=1): + """Example function for testing.""" + return a + b + + +ann = {"a": int, "b": int, "return": int} + + +class TestVariant(unittest.TestCase): + """Tests for Variant wrapper.""" + + def test_initialization(self): + """Test proper initialization and attribute exposure.""" + wrapper = Variant(example_func, ann, "example_wrapper") + + self.assertEqual(wrapper.__name__, "example_wrapper") + self.assertEqual(wrapper.__annotations__, ann) + self.assertEqual(wrapper.phlex_callable, example_func) + # Check introspection attributes are exposed + self.assertEqual(wrapper.__code__, example_func.__code__) + self.assertEqual(wrapper.__defaults__, example_func.__defaults__) + + def test_call_by_default_raises(self): + """Test that calling the wrapper raises AssertionError by default.""" + wrapper = Variant(example_func, ann, "no_call") + with self.assertRaises(AssertionError) as cm: + wrapper(1) + self.assertIn("was called directly", str(cm.exception)) + + def test_allow_call(self): + """Test that calling is allowed when configured.""" + wrapper = Variant(example_func, ann, "yes_call", allow_call=True) + self.assertEqual(wrapper(10, 20), 30) + + def test_clone_shallow(self): + """Test shallow cloning behavior.""" + # For a function, copy.copy just returns the function itself usually, + # but let's test the flag logic in Variant + wrapper = Variant(example_func, ann, "clone_shallow", clone=True) + # function copy is same object + self.assertEqual(wrapper.phlex_callable, example_func) + + # Test valid copy logic with a mutable callable + class CallableObj: + def __call__(self): + pass + + obj = CallableObj() + wrapper_obj = Variant(obj, {}, "obj_clone", clone=True) + self.assertNotEqual(id(wrapper_obj.phlex_callable), id(obj)) # copy was made? + # copy.copy of a custom object usually creates a new instance if generic + + def test_clone_deep(self): + """Test deep cloning behavior.""" + + class Container: + def __init__(self): + self.data = [1] + + def __call__(self): + return self.data[0] + + c = Container() + wrapper = Variant(c, {}, "deep_clone", clone="deep") + self.assertNotEqual(id(wrapper.phlex_callable), id(c)) + self.assertNotEqual(id(wrapper.phlex_callable.data), id(c.data)) + + +if __name__ == "__main__": + unittest.main() diff --git a/test/python/vectypes.py b/test/python/vectypes.py new file mode 100644 index 000000000..4763621b7 --- /dev/null +++ b/test/python/vectypes.py @@ -0,0 +1,268 @@ +"""Algorithms exercising various numpy array types. + +This test code implements algorithms that use numpy arrays of different types +to ensure that the Python bindings correctly handle them. +""" + +import numpy as np +import numpy.typing as npt + +# Type aliases for C++ types that don't have Python equivalents +# These are used by the C++ wrapper to identify the correct converter + + +class _CppTypeMeta(type): + """Metaclass to allow overriding __name__ for C++ type identification.""" + + def __new__(mcs, name, bases, namespace, cpp_name=None): + cls = super().__new__(mcs, name, bases, namespace) + cls._cpp_name = cpp_name if cpp_name else name + return cls + + @property + def __name__(cls): + return cls._cpp_name + + +class unsigned_int(int, metaclass=_CppTypeMeta, cpp_name="unsigned int"): # noqa: N801 + """Type alias for C++ unsigned int.""" + + pass + + +class unsigned_long(int, metaclass=_CppTypeMeta, cpp_name="unsigned long"): # noqa: N801 + """Type alias for C++ unsigned long.""" + + pass + + +class long(int, metaclass=_CppTypeMeta, cpp_name="long"): # noqa: N801, A001 + """Type alias for C++ long.""" + + pass + + +class double(float, metaclass=_CppTypeMeta, cpp_name="double"): # noqa: N801 + """Type alias for C++ double.""" + + pass + + +def collectify_int32(i: int, j: int) -> npt.NDArray[np.int32]: + """Create an int32 array from two integers.""" + return np.array([i, j], dtype=np.int32) + + +def sum_array_int32(coll: npt.NDArray[np.int32]) -> int: + """Sum an int32 array.""" + return int(sum(int(x) for x in coll)) + + +def collectify_uint32( + u1: unsigned_int, + u2: unsigned_int, +) -> npt.NDArray[np.uint32]: + """Create a uint32 array from two integers.""" + return np.array([u1, u2], dtype=np.uint32) + + +def sum_array_uint32(coll: npt.NDArray[np.uint32]) -> unsigned_int: + """Sum a uint32 array.""" + return unsigned_int(sum(int(x) for x in coll)) + + +def collectify_int64(l1: long, l2: long) -> npt.NDArray[np.int64]: + """Create an int64 array from two integers.""" + return np.array([l1, l2], dtype=np.int64) + + +def sum_array_int64(coll: npt.NDArray[np.int64]) -> long: + """Sum an int64 array.""" + return long(sum(int(x) for x in coll)) + + +def collectify_uint64( + ul1: unsigned_long, + ul2: unsigned_long, +) -> npt.NDArray[np.uint64]: + """Create a uint64 array from two integers.""" + return np.array([ul1, ul2], dtype=np.uint64) + + +def sum_array_uint64(coll: npt.NDArray[np.uint64]) -> unsigned_long: + """Sum a uint64 array.""" + return unsigned_long(sum(int(x) for x in coll)) + + +def collectify_float32(f1: float, f2: float) -> npt.NDArray[np.float32]: + """Create a float32 array from two floats.""" + return np.array([f1, f2], dtype=np.float32) + + +def sum_array_float32(coll: npt.NDArray[np.float32]) -> float: + """Sum a float32 array.""" + return float(sum(coll)) + + +def collectify_float64(d1: double, d2: double) -> npt.NDArray[np.float64]: + """Create a float64 array from two floats.""" + return np.array([d1, d2], dtype=np.float64) + + +def collectify_float32_list(f1: float, f2: float) -> list[float]: + """Create a float32 list from two floats.""" + return [f1, f2] + + +def collectify_float64_list(d1: double, d2: double) -> list["double"]: + """Create a float64 list from two floats.""" + return [d1, d2] + + +def sum_array_float64(coll: npt.NDArray[np.float64]) -> double: + """Sum a float64 array.""" + return double(sum(coll)) + + +def collectify_int32_list(i: int, j: int) -> list[int]: + """Create an int32 list from two integers.""" + return [i, j] + + +def collectify_uint32_list( + u1: unsigned_int, + u2: unsigned_int, +) -> "list[unsigned int]": # type: ignore # noqa: F722 + """Create a uint32 list from two integers.""" + return [unsigned_int(u1), unsigned_int(u2)] + + +def collectify_int64_list(l1: long, l2: long) -> "list[long]": # type: ignore # noqa: F722 + """Create an int64 list from two integers.""" + return [long(l1), long(l2)] + + +def collectify_uint64_list( + ul1: unsigned_long, + ul2: unsigned_long, +) -> "list[unsigned long]": # type: ignore # noqa: F722 + """Create a uint64 list from two integers.""" + return [unsigned_long(ul1), unsigned_long(ul2)] + + +def sum_list_int32(coll: list[int]) -> int: + """Sum a list of ints.""" + return sum(coll) + + +def sum_list_uint32(coll: "list[unsigned int]") -> unsigned_int: # type: ignore # noqa: F722 + """Sum a list of uints.""" + return unsigned_int(sum(coll)) + + +def sum_list_int64(coll: "list[long]") -> long: # type: ignore # noqa: F722 + """Sum a list of longs.""" + return long(sum(coll)) + + +def sum_list_uint64(coll: "list[unsigned long]") -> unsigned_long: # type: ignore # noqa: F722 + """Sum a list of ulongs.""" + return unsigned_long(sum(coll)) + + +def sum_list_float(coll: list[float]) -> float: + """Sum a list of floats.""" + return sum(coll) + + +def sum_list_double(coll: list["double"]) -> double: + """Sum a list of doubles.""" + return double(sum(coll)) + + +def PHLEX_REGISTER_ALGORITHMS(m, config): + """Register algorithms for the test.""" + try: + use_lists = config["use_lists"] + except (KeyError, TypeError): + use_lists = False + + specs = [ + ( + "int32", + collectify_int32_list, + collectify_int32, + sum_list_int32, + sum_array_int32, + "input_int32", + "output_int32", + "sum_int32", + ), + ( + "uint32", + collectify_uint32_list, + collectify_uint32, + sum_list_uint32, + sum_array_uint32, + "input_uint32", + "output_uint32", + "sum_uint32", + ), + ( + "int64", + collectify_int64_list, + collectify_int64, + sum_list_int64, + sum_array_int64, + "input_int64", + "output_int64", + "sum_int64", + ), + ( + "uint64", + collectify_uint64_list, + collectify_uint64, + sum_list_uint64, + sum_array_uint64, + "input_uint64", + "output_uint64", + "sum_uint64", + ), + ( + "float32", + collectify_float32_list, + collectify_float32, + sum_list_float, + sum_array_float32, + "input_float32", + "output_float32", + None, + ), + ( + "float64", + collectify_float64_list, + collectify_float64, + sum_list_double, + sum_array_float64, + "input_float64", + "output_float64", + None, + ), + ] + + for name, list_collect, arr_collect, list_sum, arr_sum, in_key, out_key, sum_name in specs: + arr_name = f"arr_{name}" + m.transform( + list_collect if use_lists else arr_collect, + input_family=config[in_key], + output_products=[arr_name], + ) + + sum_kwargs = { + "input_family": [arr_name], + "output_products": config[out_key], + } + if sum_name: + sum_kwargs["name"] = sum_name + + m.transform(list_sum if use_lists else arr_sum, **sum_kwargs) diff --git a/test/python/verify.py b/test/python/verify.py index 936f5a81d..f7e94c656 100644 --- a/test/python/verify.py +++ b/test/python/verify.py @@ -4,6 +4,8 @@ this observer verifies its result against the expected value. """ +from typing import Any + class Verifier: """A callable class that can assert an expected value. @@ -13,31 +15,25 @@ class Verifier: Examples: >>> v = Verifier(42) - >>> v.(42) - >>> v.(21) + >>> v(42) + >>> v(21) Traceback (most recent call last): - File "", line 1, in - File "verify.py", line 22, in __call__ - assert value == self._sum_total - ^^^^^^^^^^^^^^^^^^^^^^^^ + ... AssertionError """ __name__ = "verifier" - def __init__(self, sum_total: int): + def __init__(self, expected_value: Any): """Create a verifier object. Args: - sum_total (int): The expected value. - - Returns: - None + expected_value (Any): The expected value. """ - self._sum_total = sum_total + self._expected_value = expected_value def __call__(self, value: int) -> None: - """Verify a the `value`. + """Verify the `value`. Check that `value` matches the pre-registered value. @@ -45,21 +41,32 @@ def __call__(self, value: int) -> None: value (int): The value to verify. Raises: - AssertionError: if the provided value does not matches the - pre-registed value. - - Returns: - None + AssertionError: if the provided value does not match the + pre-registered value. """ - assert value == self._sum_total + assert value == self._expected_value + + +class BoolVerifier: + """Verifier for boolean values.""" + + __name__ = "bool_verifier" + + def __init__(self, expected: bool): + """Create a boolean verifier.""" + self._expected = expected + + def __call__(self, value: bool) -> None: + """Verify the boolean value.""" + assert value == self._expected def PHLEX_REGISTER_ALGORITHMS(m, config): """Register an instance of `Verifier` as an observer. Use the standard Phlex `observe` registration to insert a node in - the execution graph that receives a summed total to check against an - expected value. The expected total is taken from the configuration. + the execution graph that receives a value to check against an + expected value. The expected value is taken from the configuration. Args: m (internal): Phlex registrar representation. @@ -68,5 +75,13 @@ def PHLEX_REGISTER_ALGORITHMS(m, config): Returns: None """ + try: + expected = config["expected_bool"] + v = BoolVerifier(expected) + m.observe(v, input_family=config["input"]) + return + except (KeyError, TypeError): + pass + assert_sum = Verifier(config["sum_total"]) m.observe(assert_sum, input_family=config["input"]) diff --git a/test/python/verify_extended.py b/test/python/verify_extended.py new file mode 100644 index 000000000..c456cde1a --- /dev/null +++ b/test/python/verify_extended.py @@ -0,0 +1,144 @@ +"""Observers to check for various types in tests.""" + + +class VerifierInt: + """Verify int values.""" + + __name__ = "verifier_int" + + def __init__(self, sum_total: int): + """Initialize with expected sum.""" + self._sum_total = sum_total + + def __call__(self, value: int) -> None: + """Check if value matches expected sum.""" + assert value == self._sum_total + + +class VerifierUInt: + """Verify unsigned int values.""" + + __name__ = "verifier_uint" + + def __init__(self, sum_total: int): + """Initialize with expected sum.""" + self._sum_total = sum_total + + def __call__(self, value: "unsigned int") -> None: # type: ignore # noqa: F722 + """Check if value matches expected sum.""" + assert value == self._sum_total + + +class VerifierLong: + """Verify long values.""" + + __name__ = "verifier_long" + + def __init__(self, sum_total: int): + """Initialize with expected sum.""" + self._sum_total = sum_total + + def __call__(self, value: "long") -> None: # type: ignore # noqa: F821 + """Check if value matches expected sum.""" + assert value == self._sum_total + + +class VerifierULong: + """Verify unsigned long values.""" + + __name__ = "verifier_ulong" + + def __init__(self, sum_total: int): + """Initialize with expected sum.""" + self._sum_total = sum_total + + def __call__(self, value: "unsigned long") -> None: # type: ignore # noqa: F722 + """Check if value matches expected sum.""" + assert value == self._sum_total + + +class VerifierFloat: + """Verify float values.""" + + __name__ = "verifier_float" + + def __init__(self, sum_total: float): + """Initialize with expected sum.""" + self._sum_total = sum_total + + def __call__(self, value: "float") -> None: + """Check if value matches expected sum.""" + assert abs(value - self._sum_total) < 1e-5 + + +class VerifierDouble: + """Verify double values.""" + + __name__ = "verifier_double" + + def __init__(self, sum_total: float): + """Initialize with expected sum.""" + self._sum_total = sum_total + + def __call__(self, value: "double") -> None: # type: ignore # noqa: F821 + """Check if value matches expected sum.""" + assert abs(value - self._sum_total) < 1e-5 + + +class VerifierBool: + """Verify bool values.""" + + __name__ = "verifier_bool" + + def __init__(self, expected: bool): + """Initialize with expected value.""" + self._expected = expected + + def __call__(self, value: bool) -> None: + """Check if value matches expected.""" + assert value == self._expected + + +def PHLEX_REGISTER_ALGORITHMS(m, config): + """Register observers for the test.""" + try: + m.observe(VerifierInt(config["sum_total"]), input_family=config["input_int"]) + except (KeyError, TypeError): + # Optional configuration, skip if missing + pass + + try: + m.observe(VerifierBool(config["expected_bool"]), input_family=config["input_bool"]) + except (KeyError, TypeError): + # Optional configuration, skip if missing + pass + + try: + m.observe(VerifierUInt(config["sum_total"]), input_family=config["input_uint"]) + except (KeyError, TypeError): + # Optional configuration, skip if missing + pass + + try: + m.observe(VerifierLong(config["sum_total"]), input_family=config["input_long"]) + except (KeyError, TypeError): + # Optional configuration, skip if missing + pass + + try: + m.observe(VerifierULong(config["sum_total"]), input_family=config["input_ulong"]) + except (KeyError, TypeError): + # Optional configuration, skip if missing + pass + + try: + m.observe(VerifierFloat(config["sum_total"]), input_family=config["input_float"]) + except (KeyError, TypeError): + # Optional configuration, skip if missing + pass + + try: + m.observe(VerifierDouble(config["sum_total"]), input_family=config["input_double"]) + except (KeyError, TypeError): + # Optional configuration, skip if missing + pass