From df830459e5eb4a1b021dbc48c58a64793eb47402 Mon Sep 17 00:00:00 2001 From: Marcel R Date: Thu, 21 Mar 2024 19:41:12 +0100 Subject: [PATCH 1/3] Enable interface tests, remove dev workflow. --- .../TensorFlowAOT/scripts/compile_model.py | 201 ------------- .../TensorFlowAOT/scripts/create_wrapper.py | 283 ------------------ .../TensorFlowAOT/templates/wrapper.h.in | 156 ---------- PhysicsTools/TensorFlowAOT/test/BuildFile.xml | 14 +- .../TensorFlowAOT/test/create_model.py | 90 ------ PhysicsTools/TensorFlowAOT/test/testBase.h | 67 ----- .../TensorFlowAOT/test/testInterface.cc | 21 +- .../test/test_models/create_models.sh | 47 --- 8 files changed, 12 insertions(+), 867 deletions(-) delete mode 100644 PhysicsTools/TensorFlowAOT/scripts/compile_model.py delete mode 100644 PhysicsTools/TensorFlowAOT/scripts/create_wrapper.py delete mode 100644 PhysicsTools/TensorFlowAOT/templates/wrapper.h.in delete mode 100644 PhysicsTools/TensorFlowAOT/test/create_model.py delete mode 100644 PhysicsTools/TensorFlowAOT/test/testBase.h delete mode 100755 PhysicsTools/TensorFlowAOT/test/test_models/create_models.sh diff --git a/PhysicsTools/TensorFlowAOT/scripts/compile_model.py b/PhysicsTools/TensorFlowAOT/scripts/compile_model.py deleted file mode 100644 index bd19d589f3189..0000000000000 --- a/PhysicsTools/TensorFlowAOT/scripts/compile_model.py +++ /dev/null @@ -1,201 +0,0 @@ -# coding: utf-8 - -""" -Script to simplify the development workflow for compiling and integrating TF AOT models into CMSSW. -""" - -from __future__ import annotations - -import os -import shutil -import tempfile - - -tool_name_template = "tfaot-dev-{subsystem}-{package}-{model_name}" - -ld_flag_template = "" - -tool_file_template = """\ - - - - - - - - {ld_flags} - -""" # noqa - - -def compile_model( - subsystem: str, - package: str, - model_path: str, - model_name: str | None = None, - model_version: str = "1.0.0", - batch_sizes: tuple[int] = (1,), - output_path: str | None = None, -): - # default output path - if not output_path: - # check that we are located in a cmssw env - cmssw_base = os.getenv("CMSSW_BASE") - if not cmssw_base or not os.path.isdir(cmssw_base): - raise Exception("CMSSW_BASE is not set or points to a non-existing directory") - - output_path = os.path.join("$CMSSW_BASE", "src", subsystem, package, "tfaot_dev") - output_path = os.path.expandvars(os.path.expanduser(output_path)) - - # check that the model exists - model_path = os.path.expandvars(os.path.expanduser(model_path)) - model_path = os.path.normpath(os.path.abspath(model_path)) - if not os.path.exists(model_path): - raise Exception(f"model_path '{model_path}' does not exist") - - # infer the model name when none was provided - if not model_name: - model_name = os.path.splitext(os.path.basename(model_path))[0] - - # prepare directories - lib_dir = os.path.join(output_path, "lib") - if not os.path.exists(lib_dir): - os.makedirs(lib_dir) - inc_dir = os.path.join(output_path, "include") - if not os.path.exists(inc_dir): - os.makedirs(inc_dir) - - # compile the model into a temporary directory - from cmsml.scripts.compile_tf_graph import compile_tf_graph - with tempfile.TemporaryDirectory() as tmp_dir: - compile_tf_graph( - model_path=model_path, - output_path=tmp_dir, - batch_sizes=batch_sizes, - compile_prefix=f"{model_name}_bs{{}}", - compile_class=f"{subsystem}_{package}::{model_name}_bs{{}}", - ) - - # copy files - header_files = [] - for bs in batch_sizes: - header_name = f"{model_name}_bs{bs}.h" - shutil.copy2(os.path.join(tmp_dir, "aot", header_name), inc_dir) - shutil.copy2(os.path.join(tmp_dir, "aot", f"{model_name}_bs{bs}.o"), lib_dir) - header_files.append(os.path.join(inc_dir, header_name)) - - # create the wrapper header - from create_wrapper import create_wrapper - create_wrapper( - header_files=header_files, - model_path=model_path, - subsystem=subsystem, - package=package, - output_path=os.path.join(inc_dir, f"{model_name}.h"), - ) - - # create the toolfile - tool_vars = { - "subsystem": subsystem, - "subsystem_uc": subsystem.upper(), - "package": package, - "package_uc": package.upper(), - "model_name": model_name, - "model_name_uc": model_name.upper(), - "model_version": model_version, - "lib_dir_name": os.path.basename(lib_dir), - "inc_dir_name": os.path.basename(inc_dir), - "tool_name": tool_name_template.format( - subsystem=subsystem.lower(), - package=package.lower(), - model_name=model_name.lower(), - ), - "ld_flags": "\n ".join([ - ld_flag_template.format(model_name=model_name, bs=bs) - for bs in batch_sizes - ]), - } - tool_path = os.path.join(output_path, f"{tool_vars['tool_name']}.xml") - with open(tool_path, "w") as f: - f.write(tool_file_template.format(**tool_vars)) - - # print a message - tool_path_repr = os.path.relpath(tool_path) - if tool_path_repr.startswith(".."): - tool_path_repr = tool_path - inc_path = f"{output_path}/include/{model_name}.h" - if "CMSSW_BASE" in os.environ and os.path.exists(os.environ["CMSSW_BASE"]): - inc_path_rel = os.path.relpath(inc_path, os.path.join(os.environ["CMSSW_BASE"], "src")) - if not inc_path_rel.startswith(".."): - inc_path = inc_path_rel - - print("\n" + 80 * "-" + "\n") - print(f"created custom tool file for AOT compiled model '{model_name}'") - print("to register it to scram, run") - print(f"\n> scram setup {tool_path_repr}\n") - print("and use the following to include it in your code") - print(f"\n#include \"{inc_path}\"\n") - - -def main() -> None: - from argparse import ArgumentParser - - parser = ArgumentParser( - description=__doc__.strip(), - ) - parser.add_argument( - "--subsystem", - "-s", - required=True, - help="the CMSSW subsystem that the plugin belongs to", - ) - parser.add_argument( - "--package", - "-p", - required=True, - help="the CMSSW package that the plugin belongs to", - ) - parser.add_argument( - "--model-path", - "-m", - required=True, - help="the path to the model to compile", - ) - parser.add_argument( - "--model-name", - default=None, - help="a custom model name; when empty, a name is inferred from --model-path", - ) - parser.add_argument( - "--model-version", - default="1.0.0", - help="a custom model version; default: 1.0.0", - ) - parser.add_argument( - "--batch-sizes", - "-b", - default=(1,), - type=(lambda s: tuple(map(int, s.strip().split(",")))), - help="comma-separated list of batch sizes to compile the model for; default: 1", - ) - parser.add_argument( - "--output-path", - "-o", - help="path where the outputs should be saved; default: " - "$CMSSW_BASE/src/SUBSYSTEM/PACKAGE/tfaot_dev", - ) - args = parser.parse_args() - - compile_model( - subsystem=args.subsystem, - package=args.package, - model_path=args.model_path, - model_name=args.model_name, - model_version=args.model_version, - batch_sizes=args.batch_sizes, - output_path=args.output_path, - ) - - -if __name__ == "__main__": - main() diff --git a/PhysicsTools/TensorFlowAOT/scripts/create_wrapper.py b/PhysicsTools/TensorFlowAOT/scripts/create_wrapper.py deleted file mode 100644 index 4eceb4754f0cd..0000000000000 --- a/PhysicsTools/TensorFlowAOT/scripts/create_wrapper.py +++ /dev/null @@ -1,283 +0,0 @@ -# coding: utf-8 -# flake8: noqa - -""" -Script that parses header files created by the AOT compilation and creates another header file -containing a wrapper class (inheriting from tfaot::Wrapper) for models with different batch sizes. -""" - -from __future__ import annotations - -import os -import re -from collections import namedtuple - - -HeaderData = namedtuple("HeaderData", [ - "batch_size", - "prefix", - "namespace", - "class_name", - "n_args", - "arg_counts", - "arg_counts_no_batch", - "n_res", - "res_counts", - "res_counts_no_batch", -]) - -common_header_data = [ - "prefix", - "namespace", - "class_name", - "n_args", - "n_res", - "arg_counts_no_batch", - "res_counts_no_batch", -] - - -def create_wrapper( - header_files: list[str], - model_path: str, - subsystem: str, - package: str, - output_path: str | None = None, - template: str = "$CMSSW_BASE/src/PhysicsTools/TensorFlowAOT/templates/wrapper.h.in", -) -> None: - # read header data - header_data = {} - for path in header_files: - data = parse_header(path) - header_data[data.batch_size] = data - - # sorted batch sizes - batch_sizes = sorted(data.batch_size for data in header_data.values()) - - # set common variables - variables = { - "cmssw_version": os.environ["CMSSW_VERSION"], - "scram_arch": os.environ["SCRAM_ARCH"], - "model_path": model_path, - "batch_sizes": batch_sizes, - "subsystem": subsystem, - "package": package, - } - for key in common_header_data: - values = set(getattr(d, key) for d in header_data.values()) - if len(values) > 1: - raise ValueError(f"found more than one possible {key} values: {', '.join(values)}") - variables[key] = values.pop() - - # helper for variable replacement - def substituter(variables): - # insert upper-case variants of strings, csv variants of lists - variables_ = {} - for key, value in variables.items(): - key = key.upper() - variables_[key] = str(value) - if isinstance(value, str) and not key.endswith("_UC"): - variables_[f"{key}_UC"] = value.upper() - elif isinstance(value, (list, tuple)) and not key.endswith("_CSV"): - variables_[f"{key}_CSV"] = ", ".join(map(str, value)) - - def repl(m): - key = m.group(1) - if key not in variables_: - raise KeyError(f"template contains unknown variable {key}") - return variables_[key] - - return lambda line: re.sub(r"\$\{([A-Z0-9_]+)\}", repl, line) - - # substituter for common variables and per-model variables - common_sub = substituter(variables) - model_subs = { - batch_size : substituter({ - **variables, - **dict(zip(HeaderData._fields, header_data[batch_size])), - }) - for batch_size in batch_sizes - } - - # read template lines - template = os.path.expandvars(os.path.expanduser(str(template))) - with open(template, "r") as f: - input_lines = [line.rstrip() for line in f.readlines()] - - # go through lines and define new ones - output_lines = [] - while input_lines: - line = input_lines.pop(0) - - # loop statement? - m = re.match(r"^\/\/\s+foreach=([^\s]+)\s+lines=(\d+)$", line.strip()) - if m: - loop = m.group(1) - n_lines = int(m.group(2)) - - if loop == "MODEL": - # repeat the next n lines for each batch size and replace model variables - batch_lines, input_lines = input_lines[:n_lines], input_lines[n_lines:] - for batch_size in batch_sizes: - for line in batch_lines: - output_lines.append(model_subs[batch_size](line)) - else: - raise ValueError(f"unknown loop target '{loop}'") - - continue - - # just make common substitutions - output_lines.append(common_sub(line)) - - # prepare the output - if not output_path: - output_path = f"$CMSSW_BASE/src/{subsystem}/{package}/tfaot_dev/{variables['prefix']}.h" - output_path = os.path.expandvars(os.path.expanduser(str(output_path))) - output_dir = os.path.dirname(output_path) - if not os.path.exists(output_dir): - os.makedirs(output_dir) - - # write lines - with open(output_path, "w") as f: - f.writelines("\n".join(map(str, output_lines)) + "\n") - - -def parse_header(path: str) -> HeaderData: - # read all non-empty lines - path = os.path.expandvars(os.path.expanduser(str(path))) - with open(path, "r") as f: - lines = [line for line in (line.strip() for line in f.readlines()) if line] - - # prepare HeaderData - data = HeaderData(*([None] * len(HeaderData._fields))) - - # helper to set data fields - set_ = lambda key, value: data._replace(**{key: value}) - - # extract data - arg_counts = {} - res_counts = {} - while lines: - line = lines.pop(0) - - # read the namespace - m = re.match(r"^namespace\s+([^\s]+)\s*\{$", line) - if m: - data = set_("namespace", m.group(1)) - continue - - # read the class name and batch size - m = re.match(rf"^class\s+([^\s]+)_bs(\d+)\s+final\s+\:\s+public\stensorflow\:\:XlaCompiledCpuFunction\s+.*$", line) # noqa - if m: - data = set_("class_name", m.group(1)) - data = set_("batch_size", int(m.group(2))) - - # read argument and result counts - m = re.match(r"^int\s+(arg|result)(\d+)_count\(\).+$", line) - if m: - # get kind and index - kind = m.group(1) - index = int(m.group(2)) - - # parse the next line - m = re.match(r"^return\s+(\d+)\s*\;.*$", lines.pop(0)) - if not m: - raise Exception(f"corrupted header file {path}") - count = int(m.group(1)) - - # store the count - (arg_counts if kind == "arg" else res_counts)[index] = count - continue - - # helper to flatten counts to lists - def flatten(counts: dict[int, int], name: str) -> list[int]: - if set(counts) != set(range(len(counts))): - raise ValueError( - f"non-contiguous indices in {name} counts: {', '.join(map(str, counts))}", - ) - return [counts[index] for index in sorted(counts)] - - - # helper to enforce integer division by batch size - def no_batch(count: int, index: int, name: str) -> int: - if count % data.batch_size != 0: - raise ValueError( - f"{name} count of {count} at index {index} is not dividable by batch size " - f"{data.batch_size}", - ) - return count // data.batch_size - - # store the prefix - base = os.path.basename(path) - postfix = f"_bs{data.batch_size}.h" - if not base.endswith(postfix): - raise ValueError(f"header '{path}' does not end with expected postfix '{postfix}'") - data = set_("prefix", base[:-len(postfix)]) - - # set counts - data = set_("n_args", len(arg_counts)) - data = set_("n_res", len(res_counts)) - data = set_("arg_counts", flatten(arg_counts, "argument")) - data = set_("res_counts", flatten(res_counts, "result")) - data = set_("arg_counts_no_batch", tuple( - no_batch(c, i, "argument") - for i, c in enumerate(data.arg_counts) - )) - data = set_("res_counts_no_batch", tuple( - no_batch(c, i, "result") - for i, c in enumerate(data.res_counts) - )) - - return data - - -def main() -> None: - from argparse import ArgumentParser - - parser = ArgumentParser( - description=__doc__.strip(), - ) - parser.add_argument( - "--subsystem", - "-s", - required=True, - help="the CMSSW subsystem that the plugin belongs to", - ) - parser.add_argument( - "--package", - "-p", - required=True, - help="the CMSSW package that the plugin belongs to", - ) - parser.add_argument( - "--model-path", - "-m", - required=True, - help="path of the initial model file for provenance purposes", - ) - parser.add_argument( - "--header-files", - "-f", - required=True, - nargs="+", - help="comma-separated list of AOT header files that define the models to wrap", - ) - parser.add_argument( - "--output-path", - "-o", - help="path where the created header file should be saved; default: " - "$CMSSW_BASE/src/SUBSYSTEM/PACKAGE/tfaot_dev/PREFIX.h" - ) - args = parser.parse_args() - - create_wrapper( - header_files=args.header_files, - model_path=args.model_path, - subsystem=args.subsystem, - package=args.package, - output_path=args.output_path, - ) - - -if __name__ == "__main__": - main() diff --git a/PhysicsTools/TensorFlowAOT/templates/wrapper.h.in b/PhysicsTools/TensorFlowAOT/templates/wrapper.h.in deleted file mode 100644 index 1ce44d08bc868..0000000000000 --- a/PhysicsTools/TensorFlowAOT/templates/wrapper.h.in +++ /dev/null @@ -1,156 +0,0 @@ -#ifndef ${SUBSYSTEM_UC}_${PACKAGE_UC}_GENERATED_AOT_${PREFIX_UC}_H -#define ${SUBSYSTEM_UC}_${PACKAGE_UC}_GENERATED_AOT_${PREFIX_UC}_H - -/* - * Auto-generated AOT wrapper for - * model path : ${MODEL_PATH} - * prefix : ${PREFIX} - * namespace : ${NAMESPACE} - * class name : ${CLASS_NAME} - * batch sizes : ${BATCH_SIZES_CSV} - */ - -#include "PhysicsTools/TensorFlowAOT/interface/Wrapper.h" - -// model headers -// foreach=MODEL lines=1 -#include "${PREFIX}_bs${BATCH_SIZE}.h" - -namespace ${NAMESPACE} { - - class ${CLASS_NAME} : public tfaot::Wrapper { - public: - // default constructor - explicit ${CLASS_NAME}(const std::string& name = "${NAMESPACE}_${CLASS_NAME}") - : tfaot::Wrapper(name) - // foreach=MODEL lines=1 - , xla_function_bs${BATCH_SIZE}_(allocMode()) - { - } - - // disable copy constructor - ${CLASS_NAME}(const ${CLASS_NAME}&) = delete; - - // disable assigment operator - ${CLASS_NAME}& operator=(const ${CLASS_NAME}&) = delete; - - // disable move operator - ${CLASS_NAME}& operator=(${CLASS_NAME}&&) = delete; - - // destructor - ~${CLASS_NAME}() = default; - - // registered batch sizes, sorted by default - const std::vector& batchSizes() const { - static const std::vector batchSizes = {${BATCH_SIZES_CSV}}; - return batchSizes; - } - - // number of input arguments - size_t nArgs() const { return ${N_ARGS}; } - - // number of elements in arguments per batch size - const std::map>& argCounts() const { - static const std::map> argCounts = { - // foreach=MODEL lines=1 - {${BATCH_SIZE}, {${ARG_COUNTS_CSV}}}, - }; - return argCounts; - } - - // number of elements in arguments, divided by batch size - const std::vector& argCountsNoBatch() const { - static const std::vector argCountsNoBatch = {${ARG_COUNTS_NO_BATCH_CSV}}; - return argCountsNoBatch; - } - - // pointer to argument data - template - T* argData(size_t batchSize, size_t argIndex); - - // const pointer to argument data - template - const T* argData(size_t batchSize, size_t argIndex) const; - - // number for output results - size_t nResults() const { return ${N_RES}; } - - // number of elements in results per batch size - const std::map>& resultCounts() const { - static const std::map> resultCounts = { - // foreach=MODEL lines=1 - {${BATCH_SIZE}, {${RES_COUNTS_CSV}}}, - }; - return resultCounts; - } - - // number of elements in results, divided by batch size - const std::vector& resultCountsNoBatch() const { - static const std::vector resultCountsNoBatch = {${RES_COUNTS_NO_BATCH_CSV}}; - return resultCountsNoBatch; - } - - // pointer to result data - template - T* resultData(size_t batchSize, size_t resultIndex); - - // const pointer to result data - template - const T* resultData(size_t batchSize, size_t resultIndex) const; - - // model evaluation - bool runSilent(size_t batchSize); - - private: - // xla function instances - // foreach=MODEL lines=1 - ${NAMESPACE}::${CLASS_NAME}_bs${BATCH_SIZE} xla_function_bs${BATCH_SIZE}_; - }; - - template - T* ${CLASS_NAME}::argData(size_t batchSize, size_t argIndex) { - // foreach=MODEL lines=2 - if (batchSize == ${BATCH_SIZE}) - return static_cast(xla_function_bs${BATCH_SIZE}_.arg_data(argIndex)); - unknownBatchSize(batchSize, "argData()"); - return nullptr; - } - - template - const T* ${CLASS_NAME}::argData(size_t batchSize, size_t argIndex) const { - // foreach=MODEL lines=2 - if (batchSize == ${BATCH_SIZE}) - return static_cast(xla_function_bs${BATCH_SIZE}_.arg_data(argIndex)); - unknownBatchSize(batchSize, "const argData()"); - return nullptr; - } - - template - T* ${CLASS_NAME}::resultData(size_t batchSize, size_t resultIndex) { - // foreach=MODEL lines=2 - if (batchSize == ${BATCH_SIZE}) - return static_cast(xla_function_bs${BATCH_SIZE}_.result_data(resultIndex)); - unknownBatchSize(batchSize, "resultData()"); - return nullptr; - } - - template - const T* ${CLASS_NAME}::resultData(size_t batchSize, size_t resultIndex) const { - // foreach=MODEL lines=2 - if (batchSize == ${BATCH_SIZE}) - return static_cast(xla_function_bs${BATCH_SIZE}_.result_data(resultIndex)); - unknownBatchSize(batchSize, "const resultData()"); - return nullptr; - } - - bool ${CLASS_NAME}::runSilent(size_t batchSize) { - // foreach=MODEL lines=2 - if (batchSize == ${BATCH_SIZE}) - return xla_function_bs${BATCH_SIZE}_.Run(); - unknownBatchSize(batchSize, "runSilent()"); - return false; - } - -} // namespace ${NAMESPACE} - -#endif // ${SUBSYSTEM_UC}_${PACKAGE_UC}_GENERATED_AOT_${PREFIX_UC}_H diff --git a/PhysicsTools/TensorFlowAOT/test/BuildFile.xml b/PhysicsTools/TensorFlowAOT/test/BuildFile.xml index bebd82a54582d..2f31bd96bac27 100644 --- a/PhysicsTools/TensorFlowAOT/test/BuildFile.xml +++ b/PhysicsTools/TensorFlowAOT/test/BuildFile.xml @@ -1,18 +1,8 @@ - diff --git a/PhysicsTools/TensorFlowAOT/test/create_model.py b/PhysicsTools/TensorFlowAOT/test/create_model.py deleted file mode 100644 index 15ced9ca3dec2..0000000000000 --- a/PhysicsTools/TensorFlowAOT/test/create_model.py +++ /dev/null @@ -1,90 +0,0 @@ -# coding: utf-8 - -""" -Test script to create a simple model for AOT compilation. - -By default, a simple float32 -> float32 is created. When "--multi-tensor" is defined, the signature -is (float32, float64) -> (float32, bool). -""" - -import os - -import cmsml - - -def create_model(model_dir, multi_tensor=False): - # get tensorflow (suppressing the usual device warnings and logs) - tf = cmsml.tensorflow.import_tf()[0] - - # set random seeds to get deterministic results for testing - tf.keras.utils.set_random_seed(1) - - # define architecture - n_in, n_out, n_layers, n_units = 4, 2, 5, 128 - - # define input layer(s) - if multi_tensor: - x1 = tf.keras.Input(shape=(n_in,), dtype=tf.float32, name="input1") - x2 = tf.keras.Input(shape=(n_in,), dtype=tf.float64, name="input2") - x = tf.keras.layers.Concatenate(axis=1)([x1, x2]) - else: - x1 = tf.keras.Input(shape=(n_in,), dtype=tf.float32, name="input1") - x = x1 - - # model layers - a = tf.keras.layers.BatchNormalization(axis=1, renorm=True)(x) - for _ in range(n_layers): - a = tf.keras.layers.Dense(n_units, activation="tanh")(a) - a = tf.keras.layers.BatchNormalization(axis=1, renorm=True)(a) - y1 = tf.keras.layers.Dense(n_out, activation="softmax", name="output1", dtype=tf.float32)(a) - - # define output layer(s) - if multi_tensor: - y2 = tf.keras.layers.Reshape((n_out,), name="output2")(y1 > 0.5) - - # define the model - inputs, outputs = [x1], [y1] - if multi_tensor: - inputs.append(x2) - outputs.append(y2) - model = tf.keras.Model(inputs=inputs, outputs=outputs) - - # test evaluation - inputs = [ - tf.constant([list(range(n_in))], dtype=tf.float32), - ] - if multi_tensor: - inputs.append(tf.constant([list(range(n_in))], dtype=tf.float64)) - print(model(inputs)) - - # save it - tf.saved_model.save(model, model_dir) - - -def main(): - from argparse import ArgumentParser - - this_dir = os.path.dirname(os.path.abspath(__file__)) - aot_dir = os.path.dirname(this_dir) - - parser = ArgumentParser( - description="create a simple model for AOT compilation", - ) - parser.add_argument( - "--model-dir", - "-d", - default=os.path.join(aot_dir, "data", "testmodel"), - help="the model directory; default: %(default)s", - ) - parser.add_argument( - "--multi-tensor", - "-m", - action="store_true", - help="create a model with multiple inputs and outputs", - ) - args = parser.parse_args() - create_model(args.model_dir, multi_tensor=args.multi_tensor) - - -if __name__ == "__main__": - main() diff --git a/PhysicsTools/TensorFlowAOT/test/testBase.h b/PhysicsTools/TensorFlowAOT/test/testBase.h deleted file mode 100644 index 1ea89138cce20..0000000000000 --- a/PhysicsTools/TensorFlowAOT/test/testBase.h +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Base class for tests. - */ - -#ifndef PHYSICSTOOLS_TENSORFLOWAOT_TEST_TESTBASE_H -#define PHYSICSTOOLS_TENSORFLOWAOT_TEST_TESTBASE_H - -#include -#include -#include -#include - -class testBase : public CppUnit::TestFixture { -public: - std::string dataPath_; - - void setUp(); - void tearDown(); - std::string cmsswPath(std::string path); - void runCmd(const std::string& cmd); - - virtual void test() = 0; -}; - -void testBase::setUp() { - dataPath_ = - cmsswPath("/test/" + std::string(std::getenv("SCRAM_ARCH")) + "/" + boost::filesystem::unique_path().string()); -} - -void testBase::tearDown() { - if (std::filesystem::exists(dataPath_)) { - std::filesystem::remove_all(dataPath_); - } -} - -std::string testBase::cmsswPath(std::string path) { - if (path.size() > 0 && path.substr(0, 1) != "/") { - path = "/" + path; - } - - std::string base = std::string(std::getenv("CMSSW_BASE")); - std::string releaseBase = std::string(std::getenv("CMSSW_RELEASE_BASE")); - - return (std::filesystem::exists(base.c_str()) ? base : releaseBase) + path; -} - -void testBase::runCmd(const std::string& cmd) { - // popen - std::array buffer; - std::string result; - std::shared_ptr pipe(popen(cmd.c_str(), "r"), pclose); - - // catch errors - if (!pipe) { - throw std::runtime_error("popen() failed!"); - } - - // print the result - while (!feof(pipe.get())) { - if (fgets(buffer.data(), 128, pipe.get()) != NULL) { - result += buffer.data(); - } - } - std::cout << std::endl << result << std::endl; -} - -#endif // PHYSICSTOOLS_TENSORFLOWAOT_TEST_TESTBASE_H diff --git a/PhysicsTools/TensorFlowAOT/test/testInterface.cc b/PhysicsTools/TensorFlowAOT/test/testInterface.cc index 0d98087bbb0f2..a8c83d17d27f3 100644 --- a/PhysicsTools/TensorFlowAOT/test/testInterface.cc +++ b/PhysicsTools/TensorFlowAOT/test/testInterface.cc @@ -5,20 +5,20 @@ #include #include -#include "testBase.h" - #include "PhysicsTools/TensorFlowAOT/interface/Model.h" -#include "test_models/simplemodel/include/simplemodel.h" -#include "test_models/multimodel/include/multimodel.h" +#include "tfaot-model-test-simple/model.h" +#include "tfaot-model-test-multi/model.h" -class testInterface : public testBase { +class testInterface : public CppUnit::TestFixture { CPPUNIT_TEST_SUITE(testInterface); CPPUNIT_TEST(test); CPPUNIT_TEST_SUITE_END(); public: - void test() override; + void setUp() {}; + void tearDown() {}; + void test(); void test_simple(); void test_multi(); }; @@ -32,10 +32,10 @@ void testInterface::test() { void testInterface::test_simple() { std::cout << std::endl; - std::cout << "tesing simplemodel" << std::endl; + std::cout << "tesing simple model" << std::endl; // initialize the model - auto model = tfaot::Model(); + auto model = tfaot::Model(); // register (optional) batch rules model.setBatchRule(1, {1}); @@ -57,7 +57,6 @@ void testInterface::test_simple() { std::tie(output_bs1) = model.run(1, input_bs1); CPPUNIT_ASSERT(output_bs1.size() == 1); CPPUNIT_ASSERT(output_bs1[0].size() == 2); - std::cout << std::endl; std::cout << "output_bs1[0]: " << output_bs1[0][0] << ", " << output_bs1[0][1] << std::endl; // evaluate batch size 2 @@ -91,10 +90,10 @@ void testInterface::test_simple() { void testInterface::test_multi() { std::cout << std::endl; - std::cout << "tesing multimodel" << std::endl; + std::cout << "tesing multi model" << std::endl; // initialize the model - auto model = tfaot::Model(); + auto model = tfaot::Model(); // there should be no batch rule for size 2 yet CPPUNIT_ASSERT(!model.getBatchStrategy().hasRule(2)); diff --git a/PhysicsTools/TensorFlowAOT/test/test_models/create_models.sh b/PhysicsTools/TensorFlowAOT/test/test_models/create_models.sh deleted file mode 100755 index b4ec475be6047..0000000000000 --- a/PhysicsTools/TensorFlowAOT/test/test_models/create_models.sh +++ /dev/null @@ -1,47 +0,0 @@ -#!/usr/bin/env bash - -# Script to create simple AOT compiled models for testing purposes. - -action() { - local shell_is_zsh="$( [ -z "${ZSH_VERSION}" ] && echo "false" || echo "true" )" - local this_file="$( ${shell_is_zsh} && echo "${(%):-%x}" || echo "${BASH_SOURCE[0]}" )" - local this_dir="$( cd "$( dirname "${this_file}" )" && pwd )" - local aot_dir="${CMSSW_BASE}/src/PhysicsTools/TensorFlowAOT" - - # remove existing models - rm -rf "${this_dir}"/*model - - # create saved models - python3 -W ignore "${aot_dir}/test/create_model.py" \ - --model-dir "${this_dir}/saved_simplemodel"\ - || return "$?" - python3 -W ignore "${aot_dir}/test/create_model.py" \ - --model-dir "${this_dir}/saved_multimodel" \ - --multi-tensor \ - || return "$?" - - # comple them - python3 -W ignore "${aot_dir}/scripts/compile_model.py" \ - --model-path "${this_dir}/saved_simplemodel" \ - --model-name "simplemodel" \ - --subsystem PhysicsTools \ - --package TensorFlowAOT \ - --batch-sizes "1,2" \ - --output-path "${this_dir}/simplemodel" \ - || return "$?" - python3 -W ignore "${aot_dir}/scripts/compile_model.py" \ - --model-path "${this_dir}/saved_multimodel" \ - --model-name "multimodel" \ - --subsystem PhysicsTools \ - --package TensorFlowAOT \ - --batch-sizes "1,2" \ - --output-path "${this_dir}/multimodel" \ - || return "$?" - - # remove custom tool files - rm "${this_dir}/"*model/tfaot-dev-physicstools-tensorflowaot-*model.xml || return "$?" - - # remove saved models again - rm -rf "${this_dir}"/saved_*model -} -action "$@" From e07f8bd35cace163132e733688b65b2d8c30cf19 Mon Sep 17 00:00:00 2001 From: Marcel R Date: Fri, 22 Mar 2024 18:18:11 +0100 Subject: [PATCH 2/3] Code format. --- PhysicsTools/TensorFlowAOT/test/testInterface.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/PhysicsTools/TensorFlowAOT/test/testInterface.cc b/PhysicsTools/TensorFlowAOT/test/testInterface.cc index a8c83d17d27f3..10c19c746a790 100644 --- a/PhysicsTools/TensorFlowAOT/test/testInterface.cc +++ b/PhysicsTools/TensorFlowAOT/test/testInterface.cc @@ -16,8 +16,8 @@ class testInterface : public CppUnit::TestFixture { CPPUNIT_TEST_SUITE_END(); public: - void setUp() {}; - void tearDown() {}; + void setUp(){}; + void tearDown(){}; void test(); void test_simple(); void test_multi(); From 7c477f1b0df76cbd7d9b78e9424976e946b984e0 Mon Sep 17 00:00:00 2001 From: Marcel R Date: Wed, 27 Mar 2024 10:42:21 +0100 Subject: [PATCH 3/3] Update aot tool tests. --- .../TensorFlowAOT/test/testAOTTools.py | 76 ++++++------------- 1 file changed, 23 insertions(+), 53 deletions(-) diff --git a/PhysicsTools/TensorFlowAOT/test/testAOTTools.py b/PhysicsTools/TensorFlowAOT/test/testAOTTools.py index 91fcc1cc02837..cd598c0499544 100644 --- a/PhysicsTools/TensorFlowAOT/test/testAOTTools.py +++ b/PhysicsTools/TensorFlowAOT/test/testAOTTools.py @@ -5,7 +5,7 @@ """ import os -import sys +import re import shlex import subprocess import tempfile @@ -33,66 +33,36 @@ def wrapper(self): class TFAOTTests(unittest.TestCase): @run_in_tmp - def test_compilation(self, tmp_dir): - # create the test model - cmd = [ - sys.executable, - "-W", "ignore", - os.path.join(this_dir, "create_model.py"), - "-d", os.path.join(tmp_dir, "testmodel"), - ] - run_cmd(cmd) - - # compile it - cmd = [ - "PYTHONWARNINGS=ignore", - "cmsml_compile_tf_graph", - os.path.join(tmp_dir, "testmodel"), - os.path.join(tmp_dir, "testmodel_compiled"), - "-c", r"testmodel_bs{}", r"testmodel_bs{}", - "-b", "1,2", - ] - run_cmd(cmd) + def test_dev_workflow(self, tmp_dir): + import cms_tfaot - # check files - exists = lambda *p: os.path.exists(os.path.join(tmp_dir, "testmodel_compiled", "aot", *p)) - self.assertTrue(exists("testmodel_bs1.h")) - self.assertTrue(exists("testmodel_bs1.o")) - self.assertTrue(exists("testmodel_bs2.h")) - self.assertTrue(exists("testmodel_bs2.o")) + # find the cms_tfaot install dir to locate the test model + m = re.match(r"(.+/\d+\.\d+\.\d+\-[^/]+)/lib/.+$", cms_tfaot.__file__) + self.assertIsNotNone(m) + config_file = os.path.join(m.group(1), "share", "test_models", "simple", "aot_config.yaml") + self.assertTrue(os.path.exists(config_file)) - @run_in_tmp - def test_dev_workflow(self, tmp_dir): + # run the dev workflow # create the test model cmd = [ - sys.executable, - "-W", "ignore", - os.path.join(this_dir, "create_model.py"), - "-d", os.path.join(tmp_dir, "testmodel"), - ] - run_cmd(cmd) - - # compile it - cmd = [ - sys.executable, - "-W", "ignore", - os.path.normpath(os.path.join(this_dir, "..", "scripts", "compile_model.py")), - "-m", os.path.join(tmp_dir, "testmodel"), - "-s", "PhysicsTools", - "-p", "TensorFlowAOT", - "-b", "1,2", - "-o", os.path.join(tmp_dir, "testmodel_compiled"), + "cms_tfaot_compile", + "-c", config_file, + "-o", tmp_dir, + "--tool-name", "tfaot-model-test", + "--dev", ] run_cmd(cmd) # check files - exists = lambda *p: os.path.exists(os.path.join(tmp_dir, "testmodel_compiled", *p)) - self.assertTrue(exists("tfaot-dev-physicstools-tensorflowaot-testmodel.xml")) - self.assertTrue(exists("include", "testmodel.h")) - self.assertTrue(exists("include", "testmodel_bs1.h")) - self.assertTrue(exists("include", "testmodel_bs2.h")) - self.assertTrue(exists("lib", "testmodel_bs1.o")) - self.assertTrue(exists("lib", "testmodel_bs2.o")) + exists = lambda *p: os.path.exists(os.path.join(tmp_dir, *p)) + self.assertTrue(exists("tfaot-model-test.xml")) + self.assertTrue(exists("include", "tfaot-model-test")) + self.assertTrue(exists("include", "tfaot-model-test", "test_simple_bs1.h")) + self.assertTrue(exists("include", "tfaot-model-test", "test_simple_bs2.h")) + self.assertTrue(exists("include", "tfaot-model-test", "test_simple.h")) + self.assertTrue(exists("include", "tfaot-model-test", "model.h")) + self.assertTrue(exists("lib", "test_simple_bs1.o")) + self.assertTrue(exists("lib", "test_simple_bs2.o")) if __name__ == "__main__":