From 50c535ffa9751713d9b1c0eff85dbb5710ba1cbb Mon Sep 17 00:00:00 2001 From: Ed Savage Date: Fri, 27 Jun 2025 16:20:02 +1200 Subject: [PATCH 01/25] [ML] Add a script to run each unit test separately Add a script to provide a wrapper around the call to "cmake" that runs the test cases and provides some flexibility as to how the tests should be run in terms of how they are spread across processes. This is necessary when trying to isolate the impact memory usage of tests have upon one another. --- 3rd_party/3rd_party.cmake | 19 +++-- 3rd_party/CMakeLists.txt | 2 +- cmake/compiler/clang.cmake | 2 +- cmake/functions.cmake | 9 ++ cmake/test-runner.cmake | 53 ++++++++++-- dev-tools/docker/docker_entrypoint.sh | 2 +- generate_test_names.py | 77 ++++++++++++++++++ run_tests_as_seperate_processes.sh | 113 ++++++++++++++++++++++++++ test/CMakeLists.txt | 9 ++ 9 files changed, 269 insertions(+), 17 deletions(-) create mode 100755 generate_test_names.py create mode 100755 run_tests_as_seperate_processes.sh diff --git a/3rd_party/3rd_party.cmake b/3rd_party/3rd_party.cmake index 2c9d79622..5878d7f86 100644 --- a/3rd_party/3rd_party.cmake +++ b/3rd_party/3rd_party.cmake @@ -25,6 +25,10 @@ if(NOT INSTALL_DIR) message(FATAL_ERROR "INSTALL_DIR not specified") endif() +STRING(REPLACE "//" "/" INSTALL_DIR ${INSTALL_DIR}) + +message(STATUS "3rd_party: CMAKE_CXX_COMPILER_VERSION_MAJOR=${CMAKE_CXX_COMPILER_VERSION_MAJOR}") + string(TOLOWER ${CMAKE_HOST_SYSTEM_NAME} HOST_SYSTEM_NAME) message(STATUS "3rd_party: HOST_SYSTEM_NAME=${HOST_SYSTEM_NAME}") @@ -43,7 +47,9 @@ set(ARCH ${HOST_SYSTEM_PROCESSOR}) if ("${HOST_SYSTEM_NAME}" STREQUAL "darwin") message(STATUS "3rd_party: Copying macOS 3rd party libraries") set(BOOST_LOCATION "/usr/local/lib") - set(BOOST_COMPILER "clang") + set(BOOST_COMPILER "clang-darwin${CMAKE_CXX_COMPILER_VERSION_MAJOR}") + message(STATUS "3rd_party: BOOST_COMPILER=${BOOST_COMPILER}") + if( "${ARCH}" STREQUAL "x86_64" ) set(BOOST_ARCH "x64") else() @@ -63,7 +69,7 @@ elseif ("${HOST_SYSTEM_NAME}" STREQUAL "linux") if(NOT DEFINED ENV{CPP_CROSS_COMPILE} OR "$ENV{CPP_CROSS_COMPILE}" STREQUAL "") message(STATUS "3rd_party: NOT cross compiling. Copying Linux 3rd party libraries") set(BOOST_LOCATION "/usr/local/gcc133/lib") - set(BOOST_COMPILER "gcc") + set(BOOST_COMPILER "gcc${CMAKE_CXX_COMPILER_VERSION_MAJOR}") if( "${ARCH}" STREQUAL "aarch64" ) set(BOOST_ARCH "a64") else() @@ -93,7 +99,7 @@ elseif ("${HOST_SYSTEM_NAME}" STREQUAL "linux") message(STATUS "3rd_party: Cross compile for macosx: Copying macOS 3rd party libraries") set(SYSROOT "/usr/local/sysroot-x86_64-apple-macosx10.14") set(BOOST_LOCATION "${SYSROOT}/usr/local/lib") - set(BOOST_COMPILER "clang") + set(BOOST_COMPILER "clang-darwin${CMAKE_CXX_COMPILER_VERSION_MAJOR}") set(BOOST_EXTENSION "mt-x64-1_86.dylib") set(BOOST_LIBRARIES "atomic" "chrono" "date_time" "filesystem" "iostreams" "log" "log_setup" "program_options" "regex" "system" "thread" "unit_test_framework") set(XML_LOCATION) @@ -108,7 +114,7 @@ elseif ("${HOST_SYSTEM_NAME}" STREQUAL "linux") message(STATUS "3rd_party: Cross compile for linux-aarch64: Copying Linux 3rd party libraries") set(SYSROOT "/usr/local/sysroot-$ENV{CPP_CROSS_COMPILE}-linux-gnu") set(BOOST_LOCATION "${SYSROOT}/usr/local/gcc133/lib") - set(BOOST_COMPILER "gcc") + set(BOOST_COMPILER "gcc${CMAKE_CXX_COMPILER_VERSION_MAJOR}") if("$ENV{CPP_CROSS_COMPILE}" STREQUAL "aarch64") set(BOOST_ARCH "a64") else() @@ -188,6 +194,9 @@ function(install_libs _target _source_dir _prefix _postfix) set(LIBRARIES ${ARGN}) + message(STATUS "_target=${_target} _source_dir=${_source_dir} _prefix=${_prefix} _postfix=${_postfix} LIBRARIES=${LIBRARIES}") + + file(GLOB _LIBS ${_source_dir}/*${_prefix}*${_postfix}) if(_LIBS) @@ -219,7 +228,7 @@ function(install_libs _target _source_dir _prefix _postfix) endif() file(CHMOD ${INSTALL_DIR}/${_LIB} PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE) else() - file(COPY ${_RESOLVED_PATH} DESTINATION ${INSTALL_DIR}) + file(COPY ${_RESOLVED_PATH} DESTINATION "${INSTALL_DIR}") file(CHMOD ${INSTALL_DIR}/${_RESOLVED_LIB} PERMISSIONS OWNER_READ OWNER_WRITE OWNER_EXECUTE GROUP_READ GROUP_EXECUTE WORLD_READ WORLD_EXECUTE) endif() endforeach() diff --git a/3rd_party/CMakeLists.txt b/3rd_party/CMakeLists.txt index ffdb8e093..f2b092f91 100644 --- a/3rd_party/CMakeLists.txt +++ b/3rd_party/CMakeLists.txt @@ -22,7 +22,7 @@ add_custom_target(licenses ALL # as part of the CMake configuration step - avoiding # the need for it to be done on every build execute_process( - COMMAND ${CMAKE_COMMAND} -DINSTALL_DIR=${INSTALL_DIR} -P ./3rd_party.cmake + COMMAND ${CMAKE_COMMAND} -DINSTALL_DIR=${INSTALL_DIR} -DCMAKE_CXX_COMPILER_VERSION_MAJOR=${CMAKE_CXX_COMPILER_VERSION_MAJOR} -P ./3rd_party.cmake WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} ) diff --git a/cmake/compiler/clang.cmake b/cmake/compiler/clang.cmake index 1749ad0a8..cc4042dbc 100644 --- a/cmake/compiler/clang.cmake +++ b/cmake/compiler/clang.cmake @@ -16,7 +16,7 @@ set(CMAKE_RANLIB "ranlib") set(CMAKE_STRIP "strip") -list(APPEND ML_C_FLAGS +list(APPEND ML_C_FLAGS ${CROSS_FLAGS} ${ARCHCFLAGS} "-fstack-protector" diff --git a/cmake/functions.cmake b/cmake/functions.cmake index c39a86089..ea8070a71 100644 --- a/cmake/functions.cmake +++ b/cmake/functions.cmake @@ -392,6 +392,13 @@ function(ml_add_test_executable _target) COMMENT "Running test: ml_test_${_target}" WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} ) + + add_custom_target(test_${_target}_individually + DEPENDS ml_test_${_target} + COMMAND ${CMAKE_SOURCE_DIR}/run_tests_as_seperate_processes.sh ${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_BINARY_DIR} test_${_target} + COMMENT "Running test: ml_test_${_target}_individually" + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} + ) endif() endfunction() @@ -420,8 +427,10 @@ function(ml_add_test _directory _target) add_subdirectory(../${_directory} ${_directory}) list(APPEND ML_BUILD_TEST_DEPENDS ml_test_${_target}) list(APPEND ML_TEST_DEPENDS test_${_target}) + list(APPEND ML_TEST_INDIVIDUALLY_DEPENDS test_${_target}_individually) set(ML_BUILD_TEST_DEPENDS ${ML_BUILD_TEST_DEPENDS} PARENT_SCOPE) set(ML_TEST_DEPENDS ${ML_TEST_DEPENDS} PARENT_SCOPE) + set(ML_TEST_INDIVIDUALLY_DEPENDS ${ML_TEST_INDIVIDUALLY_DEPENDS} PARENT_SCOPE) endfunction() diff --git a/cmake/test-runner.cmake b/cmake/test-runner.cmake index 2bba0cb5c..7c13d59f0 100644 --- a/cmake/test-runner.cmake +++ b/cmake/test-runner.cmake @@ -9,6 +9,13 @@ # limitation. # +execute_process(COMMAND ${CMAKE_COMMAND} -E rm -f ${TEST_DIR}/*.out) +execute_process(COMMAND ${CMAKE_COMMAND} -E rm -f ${TEST_DIR}/*.failed) +execute_process(COMMAND ${CMAKE_COMMAND} -E rm -f boost_test_results*.xml) +execute_process(COMMAND ${CMAKE_COMMAND} -E rm -f boost_test_results*.junit) + +set(INDIVIDUAL_TEST "CAnnotationJsonWriterTest/testWrite") + if(TEST_NAME STREQUAL "ml_test_seccomp") execute_process(COMMAND ${TEST_DIR}/${TEST_NAME} $ENV{BOOST_TEST_OUTPUT_FORMAT_FLAGS} --logger=HRF,all --report_format=HRF --show_progress=no --no_color_output OUTPUT_FILE ${TEST_DIR}/${TEST_NAME}.out ERROR_FILE ${TEST_DIR}/${TEST_NAME}.out RESULT_VARIABLE TEST_SUCCESS) else() @@ -17,22 +24,50 @@ else() string(REPLACE " " ";" TEST_FLAGS $ENV{TEST_FLAGS}) endif() + + set(SAFE_TEST_NAME "") + set(OUTPUT_FILE "${TEST_DIR}/${TEST_NAME}.out") + set(FAILED_FILE "${TEST_DIR}/${TEST_NAME}.failed") # Special case for specifying a subset of tests to run (can be regex) if (DEFINED ENV{TESTS} AND NOT "$ENV{TESTS}" STREQUAL "") set(TESTS "--run_test=$ENV{TESTS}") + string(REGEX REPLACE "[^a-zA-Z0-9_]" "_" SAFE_TEST_NAME "$ENV{TESTS}") + set(SAFE_TEST_NAME "_${SAFE_TEST_NAME}") endif() - # If any special command line args are present run the tests in the foreground + # If env var RUN_BOOST_TESTS_IN_BACKGROUND is defined run the tests in the background + message(STATUS "RUN_BOOST_TESTS_IN_BACKGROUND=$ENV{RUN_BOOST_TESTS_IN_BACKGROUND}") + if (DEFINED TEST_FLAGS OR DEFINED TESTS) - message(STATUS "executing process ${TEST_DIR}/${TEST_NAME} ${TEST_FLAGS} ${TESTS} $ENV{BOOST_TEST_OUTPUT_FORMAT_FLAGS}") - execute_process(COMMAND ${TEST_DIR}/${TEST_NAME} ${TEST_FLAGS} ${TESTS} $ENV{BOOST_TEST_OUTPUT_FORMAT_FLAGS} RESULT_VARIABLE TEST_SUCCESS) + string(REPLACE "boost_test_results" "boost_test_results${SAFE_TEST_NAME}" BOOST_TEST_OUTPUT_FORMAT_FLAGS "$ENV{BOOST_TEST_OUTPUT_FORMAT_FLAGS}") + set(OUTPUT_FILE "${TEST_DIR}/${TEST_NAME}${SAFE_TEST_NAME}.out") + set(FAILED_FILE "${TEST_DIR}/${TEST_NAME}${SAFE_TEST_NAME}.failed") + + if(DEFINED ENV{RUN_BOOST_TESTS_IN_BACKGROUND}) + message(STATUS "executing process ${TEST_DIR}/${TEST_NAME} ${TEST_FLAGS} ${TESTS} ${BOOST_TEST_OUTPUT_FORMAT_FLAGS} --no_color_output") + execute_process(COMMAND ${TEST_DIR}/${TEST_NAME} ${TEST_FLAGS} ${TESTS} ${BOOST_TEST_OUTPUT_FORMAT_FLAGS} --no_color_output OUTPUT_FILE ${OUTPUT_FILE} ERROR_FILE ${OUTPUT_FILE} RESULT_VARIABLE TEST_SUCCESS) + message(STATUS "TESTS EXITED WITH SUCCESS ${TEST_SUCCESS}") + else() + message(STATUS "executing process ${TEST_DIR}/${TEST_NAME} ${TEST_FLAGS} ${TESTS} ${BOOST_TEST_OUTPUT_FORMAT_FLAGS}") + execute_process(COMMAND ${TEST_DIR}/${TEST_NAME} ${TEST_FLAGS} ${TESTS} ${BOOST_TEST_OUTPUT_FORMAT_FLAGS} RESULT_VARIABLE TEST_SUCCESS) + message(STATUS "TESTS EXITED WITH SUCCESS ${TEST_SUCCESS}") + endif() else() - execute_process(COMMAND ${TEST_DIR}/${TEST_NAME} $ENV{TEST_FLAGS} $ENV{BOOST_TEST_OUTPUT_FORMAT_FLAGS} - --no_color_output OUTPUT_FILE ${TEST_DIR}/${TEST_NAME}.out ERROR_FILE ${TEST_DIR}/${TEST_NAME}.out RESULT_VARIABLE TEST_SUCCESS) + if(DEFINED ENV{RUN_BOOST_TESTS_IN_BACKGROUND}) + execute_process(COMMAND ${TEST_DIR}/${TEST_NAME} $ENV{TEST_FLAGS} $ENV{BOOST_TEST_OUTPUT_FORMAT_FLAGS} + --no_color_output OUTPUT_FILE ${OUTPUT_FILE} ERROR_FILE ${OUTPUT_FILE} RESULT_VARIABLE TEST_SUCCESS) + else() + execute_process(COMMAND ${TEST_DIR}/${TEST_NAME} ${TEST_FLAGS} ${TESTS} ${BOOST_TEST_OUTPUT_FORMAT_FLAGS} RESULT_VARIABLE TEST_SUCCESS) + endif() + endif() + + if (NOT TEST_SUCCESS EQUAL 0) + if (EXISTS ${TEST_DIR}/${TEST_NAME}) + execute_process(COMMAND ${CMAKE_COMMAND} -E cat ${OUTPUT_FILE}) + file(WRITE "${TEST_DIR}/${FAILED_FILE}" "") + endif() + message(FATAL_ERROR "Exiting with status ${TEST_SUCCESS}") endif() -endif() -if (NOT TEST_SUCCESS EQUAL 0) - execute_process(COMMAND ${CMAKE_COMMAND} -E cat ${TEST_DIR}/${TEST_NAME}.out) - file(WRITE "${TEST_DIR}/${TEST_NAME}.failed" "") endif() + diff --git a/dev-tools/docker/docker_entrypoint.sh b/dev-tools/docker/docker_entrypoint.sh index 475c05344..dba029e51 100755 --- a/dev-tools/docker/docker_entrypoint.sh +++ b/dev-tools/docker/docker_entrypoint.sh @@ -66,6 +66,6 @@ if [ "x$1" = "x--test" ] ; then # failure is the unit tests, and then the detailed test results can be # copied from the image echo passed > build/test_status.txt - cmake --build cmake-build-docker ${CMAKE_VERBOSE} -j`nproc` -t test || echo failed > build/test_status.txt + cmake --build cmake-build-docker ${CMAKE_VERBOSE} -j`nproc` -t test_individually || echo failed > build/test_status.txt fi diff --git a/generate_test_names.py b/generate_test_names.py new file mode 100755 index 000000000..805c169d6 --- /dev/null +++ b/generate_test_names.py @@ -0,0 +1,77 @@ +#!/usr/bin/env python3 +# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +# or more contributor license agreements. Licensed under the Elastic License +# 2.0 and the following additional limitation. Functionality enabled by the +# files subject to the Elastic License 2.0 may only be used in production when +# invoked by an Elasticsearch process with a license key installed that permits +# use of machine learning features. You may not use this file except in +# compliance with the Elastic License 2.0 and the foregoing additional +# limitation. + +# This script provides a wrapper around a call to a BOOST test executable +# to return a formatted list of tests such that each fully qualified test +# name would be in a form suitable to being passed to BOOST test's "--run_test" +# parameter. +# It takes precisely one positional parameter, the path to a BOOST test executable. + + +import argparse +import re +import subprocess +import sys + + +def parse_arguments(): + parser = argparse.ArgumentParser() + parser.add_argument('exec_path', help='The path to the ml_test suite executable') + return parser.parse_args() + +def get_qualified_test_names(executable_path): + + cmd = [args.exec_path, "--list_content"] + process = subprocess.run(cmd, capture_output=True, text=True, check=True) + output_lines = process.stderr.splitlines() + + test_names = [] + current_suite_stack = [] + + for line in output_lines: + match_suite = re.match(r'^( *)(C.*Test)\*$', line) + match_case = re.match(r'^( *)(test.*)\*$', line) + + if match_suite: + indent_level = len(match_suite.group(1)) + suite_name = match_suite.group(2) + + # Pop suites from stack if current indent is less or equal + while current_suite_stack and len(current_suite_stack[-1][0]) >= indent_level: + current_suite_stack.pop() + + current_suite_stack.append((match_suite.group(1), suite_name)) + elif match_case: + indent_level = len(match_case.group(1)) + case_name = match_case.group(2) + + # Pop suites from stack if current indent is less (for sibling suites/cases) + while current_suite_stack and len(current_suite_stack[-1][0]) >= indent_level: + current_suite_stack.pop() + + full_path = "/".join([s[1] for s in current_suite_stack] + [case_name]) + test_names.append(full_path) + return test_names + +if __name__ == "__main__": + args = parse_arguments() + try: + names = get_qualified_test_names(args.exec_path) + for name in names: + print(name) + except subprocess.CalledProcessError as e: + print(f"Error listing tests: {e.stderr}", file=sys.stderr) + sys.exit(1) + except FileNotFoundError: + print(f"Error: Test executable '{args.exec_path}' not found.", file=sys.stderr) + sys.exit(1) + except Exception as e: + print(f"An unexpected error occurred: {e}", file=sys.stderr) + sys.exit(1) diff --git a/run_tests_as_seperate_processes.sh b/run_tests_as_seperate_processes.sh new file mode 100755 index 000000000..6f565ce7c --- /dev/null +++ b/run_tests_as_seperate_processes.sh @@ -0,0 +1,113 @@ +#!/bin/bash +# +# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +# or more contributor license agreements. Licensed under the Elastic License +# 2.0 and the following additional limitation. Functionality enabled by the +# files subject to the Elastic License 2.0 may only be used in production when +# invoked by an Elasticsearch process with a license key installed that permits +# use of machine learning features. You may not use this file except in +# compliance with the Elastic License 2.0 and the foregoing additional +# limitation. +# + +# This script ultimately gets called from within the docker entry point script. +# It provides a wrapper around the call to "cmake" that runs the test cases +# and provides some flexibility as to how the tests should be run in terms of how they +# are spread across processes. This is necessary when trying to isolate the impact memory +# usage of tests have upon one another. +# +# It is intended to be called as part of the CI build/test process but should be able to be run manually. +# +# It should be called with 3 parameters +# cmake_build_dir: The directory that cmake is using for build outputs, i.e. that passed to cmake's --build argument +# cmake_current_binary_dir: The directory containing the current test suite executable e.g. /test/lib/api/unittest +# test_suite: The name of the test suite to run, minus any leading "ml_", e.g. "test_api" +# +# In addition to the required parameters there are several environment variables that control the script's behaviour +# BOOST_TEST_MAX_ARGS: The maximum number of test cases to be passed off to a sub shell +# BOOST_TEST_MAX_PROCS: The maximum number of sub shells to use +# BOOST_TEST_MIXED_MODE: If set to "true" then rather than iterating over each individual test passed to a sub-shell +# run them all in the same BOOST test executable process. +# + +if [ $# -lt 3 ]; then + echo "Usage: $0 " + echo "e.g.: $0 ${CPP_SRC_HOME}/cmake-build-relwithdebinfo-local ${CPP_SRC_HOME}/cmake-build-relwithdebinfo-local/test/lib/api/unittest test_api" + exit +fi + +export BUILD_DIR=$1 +export BINARY_DIR=$2 +export TEST_SUITE=$3 + +export TEST_EXECUTABLE="$2/ml_$3" +export LOG_DIR="$2/test_logs" + +MAX_ARGS=2 +MAX_PROCS=4 + +if [[ -n "$BOOST_TEST_MAX_ARGS" ]]; then + MAX_ARGS=$BOOST_TEST_MAX_ARGS +fi + +if [[ -n "$BOOST_TEST_MAX_PROCS" ]]; then + MAX_PROCS=$BOOST_TEST_MAX_PROCS +fi + +rm -rf "$LOG_DIR" +mkdir -p "$LOG_DIR" + +echo "Discovering tests..." +# Use the Python script to get the fully qualified test names +ALL_TEST_NAMES=$(python3 ${CPP_SRC_HOME}/generate_test_names.py "$TEST_EXECUTABLE") + +if [ -z "$ALL_TEST_NAMES" ]; then + echo "No tests found to run or error in test discovery." + exit 1 +fi + +EXIT_CODE=0 +export RUN_BOOST_TESTS_IN_BACKGROUND=1 + +function execute_tests() { + + if [[ "$BOOST_TEST_MIXED_MODE" == "true" ]]; then + TEST_CASES=$(sed 's/ /:/g' <<< $@) + else + TEST_CASES=$@ + fi + + # Loop through each test + for TEST_NAME in $TEST_CASES; do + echo "--------------------------------------------------" + echo "Running test: $TEST_NAME" + + # Replace slashes and potentially other special chars for a safe filename + SAFE_TEST_LOG_FILENAME=$(echo "$TEST_NAME" | sed 's/[^a-zA-Z0-9_]/_/g' | cut -c-100) + LOG_FILE="$LOG_DIR/${SAFE_TEST_LOG_FILENAME}.log" + + # Execute the test in a separate process + TESTS=$TEST_NAME cmake --build $BUILD_DIR -t $TEST_SUITE > "$LOG_FILE" 2>&1 + TEST_STATUS=$? + + if [ $TEST_STATUS -eq 0 ]; then + echo "Test '$TEST_NAME' PASSED." + else + echo "Test '$TEST_NAME' FAILED with exit code $TEST_STATUS. Check '$LOG_FILE' for details." + EXIT_CODE=1 # Indicate overall failure if any test fails + fi + done +} + +export -f execute_tests + +echo $ALL_TEST_NAMES | xargs -n $MAX_ARGS -P $MAX_PROCS bash -c 'execute_tests "$@"' _ + +echo "--------------------------------------------------" +if [ $EXIT_CODE -eq 0 ]; then + echo "All individual tests PASSED." +else + echo "Some individual tests FAILED. Check logs in '$LOG_DIR'." +fi + +exit $EXIT_CODE diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index 1877e64b5..b96518a8a 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -31,8 +31,17 @@ add_custom_target(run_tests DEPENDS clean_test_results ${ML_TEST_DEPENDS} WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} ) +add_custom_target(run_tests_individually + DEPENDS clean_test_results ${ML_TEST_INDIVIDUALLY_DEPENDS} + WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} +) add_custom_target(test DEPENDS run_tests COMMAND ${CMAKE_COMMAND} -DTEST_DIR=${CMAKE_BINARY_DIR} -P ${CMAKE_SOURCE_DIR}/cmake/test-check-success.cmake WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} ) +add_custom_target(test_individually + DEPENDS run_tests_individually + COMMAND ${CMAKE_COMMAND} -DTEST_DIR=${CMAKE_BINARY_DIR} -P ${CMAKE_SOURCE_DIR}/cmake/test-check-success.cmake + WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} +) \ No newline at end of file From 1c5c46e00ddd94a440cf0bff79df00fc23205cd8 Mon Sep 17 00:00:00 2001 From: Ed Savage Date: Mon, 30 Jun 2025 11:40:11 +1200 Subject: [PATCH 02/25] Slight tweak to reduce load on linux builds --- dev-tools/docker/docker_entrypoint.sh | 2 +- dev-tools/docker_test.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/dev-tools/docker/docker_entrypoint.sh b/dev-tools/docker/docker_entrypoint.sh index dba029e51..1585356c4 100755 --- a/dev-tools/docker/docker_entrypoint.sh +++ b/dev-tools/docker/docker_entrypoint.sh @@ -66,6 +66,6 @@ if [ "x$1" = "x--test" ] ; then # failure is the unit tests, and then the detailed test results can be # copied from the image echo passed > build/test_status.txt - cmake --build cmake-build-docker ${CMAKE_VERBOSE} -j`nproc` -t test_individually || echo failed > build/test_status.txt + cmake --build cmake-build-docker ${CMAKE_VERBOSE} -j $(( `nproc`/2 )) -t test_individually || echo failed > build/test_status.txt fi diff --git a/dev-tools/docker_test.sh b/dev-tools/docker_test.sh index aed18fa60..44a972b6c 100755 --- a/dev-tools/docker_test.sh +++ b/dev-tools/docker_test.sh @@ -25,7 +25,7 @@ usage() { } PLATFORMS= -EXTRACT_FIND="-name boost_test_results.xml -o -name boost_test_results.junit" +EXTRACT_FIND="-name boost_test_results\*.xml -o -name boost_test_results\*.junit" EXTRACT_EXPLICIT="build/distributions build/test_status.txt" while [ -n "$1" ] From 0109cce78d8bce18fdf185d3705607286ab59271 Mon Sep 17 00:00:00 2001 From: Ed Savage Date: Mon, 30 Jun 2025 17:00:37 +1200 Subject: [PATCH 03/25] Replace use of python script with bash function --- run_tests_as_seperate_processes.sh | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/run_tests_as_seperate_processes.sh b/run_tests_as_seperate_processes.sh index 6f565ce7c..7715bdd57 100755 --- a/run_tests_as_seperate_processes.sh +++ b/run_tests_as_seperate_processes.sh @@ -57,9 +57,28 @@ fi rm -rf "$LOG_DIR" mkdir -p "$LOG_DIR" +function get_qualified_test_names() { + executable_path=$1 + + output_lines=$($executable_path --list_content 2>&1) + + while IFS= read -r line; do + match=$(grep -w '^[ ]*C.*Test' <<< "$line"); + if [ $? -eq 0 ]; then + suite=$match + continue + fi + match=$(grep -w 'test.*\*$' <<< "$line"); + if [ $? -eq 0 ]; then + case=$(sed 's/[ \*]//g' <<< "$suite/$match") + echo "$case" + fi + done <<< "$output_lines" +} + +# get the fully qualified test names echo "Discovering tests..." -# Use the Python script to get the fully qualified test names -ALL_TEST_NAMES=$(python3 ${CPP_SRC_HOME}/generate_test_names.py "$TEST_EXECUTABLE") +ALL_TEST_NAMES=$(get_qualified_test_names "$TEST_EXECUTABLE") if [ -z "$ALL_TEST_NAMES" ]; then echo "No tests found to run or error in test discovery." From 01c1e6b4835ed5373555c09c97b7ed4084c8c71a Mon Sep 17 00:00:00 2001 From: Ed Savage Date: Tue, 1 Jul 2025 10:22:11 +1200 Subject: [PATCH 04/25] Pass all JUNIT result files to test collector --- .buildkite/pipelines/build_linux.json.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.buildkite/pipelines/build_linux.json.py b/.buildkite/pipelines/build_linux.json.py index 56c6002aa..78fc7afea 100755 --- a/.buildkite/pipelines/build_linux.json.py +++ b/.buildkite/pipelines/build_linux.json.py @@ -72,10 +72,9 @@ def main(args): "RUN_TESTS": "true", "BOOST_TEST_OUTPUT_FORMAT_FLAGS": "--logger=JUNIT,error,boost_test_results.junit", }, - "artifact_paths": "*/**/unittest/boost_test_results.junit", "plugins": { "test-collector#v1.2.0": { - "files": "*/*/unittest/boost_test_results.junit", + "files": "*/*/unittest/boost_test_results_*.junit", "format": "junit" } }, From e2d2d17ef58fd81b17b49920558beb5606ea9dbb Mon Sep 17 00:00:00 2001 From: Ed Savage Date: Wed, 2 Jul 2025 16:27:47 +1200 Subject: [PATCH 05/25] Better reporting of unit test results Combine multiple JUNIT results files from individual test runs into one file, omitting any skipped tests --- merge_results.sh | 41 ++++++++++++++++++++++++++++++ run_tests_as_seperate_processes.sh | 16 +++++++----- 2 files changed, 50 insertions(+), 7 deletions(-) create mode 100755 merge_results.sh diff --git a/merge_results.sh b/merge_results.sh new file mode 100755 index 000000000..c1d544609 --- /dev/null +++ b/merge_results.sh @@ -0,0 +1,41 @@ +#!/bin/bash +# +# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +# or more contributor license agreements. Licensed under the Elastic License +# 2.0 and the following additional limitation. Functionality enabled by the +# files subject to the Elastic License 2.0 may only be used in production when +# invoked by an Elasticsearch process with a license key installed that permits +# use of machine learning features. You may not use this file except in +# compliance with the Elastic License 2.0 and the foregoing additional +# limitation. +# + +# This script amalgamates multiple JUNIT results files from individual tests +# into one, omitting any test cases that have been skipped. The result is +# output to stdout. + +if [ $# -lt 1 ]; then + echo "Usage: $0 " + exit 1 +fi + +JUNIT_FILES="$@" + +echo "" +cat $JUNIT_FILES | \ + gawk -n ' + BEGIN{tests=0; skipped=0; errors=0; failures=0; id=""; time=0.0; name=""} + { + where=match($0, /"}' + +cat $JUNIT_FILES | sed -e '/xml/d' -e '/testsuite/d' -e '//{H;d;};x;/skipped/d' | grep '.' +echo "" +echo + diff --git a/run_tests_as_seperate_processes.sh b/run_tests_as_seperate_processes.sh index 7715bdd57..601fc389c 100755 --- a/run_tests_as_seperate_processes.sh +++ b/run_tests_as_seperate_processes.sh @@ -20,7 +20,7 @@ # # It should be called with 3 parameters # cmake_build_dir: The directory that cmake is using for build outputs, i.e. that passed to cmake's --build argument -# cmake_current_binary_dir: The directory containing the current test suite executable e.g. /test/lib/api/unittest +# unit_test_dir: The relative directory containing the current test suite e.g. lib/api/unittest # test_suite: The name of the test suite to run, minus any leading "ml_", e.g. "test_api" # # In addition to the required parameters there are several environment variables that control the script's behaviour @@ -32,16 +32,16 @@ if [ $# -lt 3 ]; then echo "Usage: $0 " - echo "e.g.: $0 ${CPP_SRC_HOME}/cmake-build-relwithdebinfo-local ${CPP_SRC_HOME}/cmake-build-relwithdebinfo-local/test/lib/api/unittest test_api" + echo "e.g.: $0 ${CPP_SRC_HOME}/cmake-build-relwithdebinfo-local lib/api/unittest test_api" exit fi export BUILD_DIR=$1 -export BINARY_DIR=$2 +export TEST_DIR=$2 export TEST_SUITE=$3 -export TEST_EXECUTABLE="$2/ml_$3" -export LOG_DIR="$2/test_logs" +export TEST_EXECUTABLE="$1/test/$2/ml_$3" +export LOG_DIR="$1/test/$2/test_logs" MAX_ARGS=2 MAX_PROCS=4 @@ -124,9 +124,11 @@ echo $ALL_TEST_NAMES | xargs -n $MAX_ARGS -P $MAX_PROCS bash -c 'execute_tests " echo "--------------------------------------------------" if [ $EXIT_CODE -eq 0 ]; then - echo "All individual tests PASSED." + echo "$TEST_SUITE: All individual tests PASSED." else - echo "Some individual tests FAILED. Check logs in '$LOG_DIR'." + echo "$TEST_SUITE: Some individual tests FAILED. Check logs in '$LOG_DIR'." fi +./merge_results.sh $TEST_DIR/boost_test_results_C*.junit > $TEST_DIR/boost_test_results.junit + exit $EXIT_CODE From 7904e76ebb74cfd14c2049f0096f401b67f72697 Mon Sep 17 00:00:00 2001 From: Ed Savage Date: Wed, 2 Jul 2025 17:07:11 +1200 Subject: [PATCH 06/25] Slight tweak to parameters to test script --- run_tests_as_seperate_processes.sh | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/run_tests_as_seperate_processes.sh b/run_tests_as_seperate_processes.sh index 601fc389c..0bb718526 100755 --- a/run_tests_as_seperate_processes.sh +++ b/run_tests_as_seperate_processes.sh @@ -20,7 +20,7 @@ # # It should be called with 3 parameters # cmake_build_dir: The directory that cmake is using for build outputs, i.e. that passed to cmake's --build argument -# unit_test_dir: The relative directory containing the current test suite e.g. lib/api/unittest +# cmake_current_binary_dir: The directory containing the current test suite executable e.g. /test/lib/api/unittest # test_suite: The name of the test suite to run, minus any leading "ml_", e.g. "test_api" # # In addition to the required parameters there are several environment variables that control the script's behaviour @@ -32,16 +32,18 @@ if [ $# -lt 3 ]; then echo "Usage: $0 " - echo "e.g.: $0 ${CPP_SRC_HOME}/cmake-build-relwithdebinfo-local lib/api/unittest test_api" + echo "e.g.: $0 ${CPP_SRC_HOME}/cmake-build-relwithdebinfo-local ${CPP_SRC_HOME}/cmake-build-relwithdebinfo-local/test/lib/api/unittest test_api" exit fi export BUILD_DIR=$1 -export TEST_DIR=$2 +export BINARY_DIR=$2 export TEST_SUITE=$3 -export TEST_EXECUTABLE="$1/test/$2/ml_$3" -export LOG_DIR="$1/test/$2/test_logs" +TEST_DIR=$(echo $BINARY_DIR | sed "s|$BUILD_DIR/test/||") + +export TEST_EXECUTABLE="$2/ml_$3" +export LOG_DIR="$2/test_logs" MAX_ARGS=2 MAX_PROCS=4 @@ -124,9 +126,9 @@ echo $ALL_TEST_NAMES | xargs -n $MAX_ARGS -P $MAX_PROCS bash -c 'execute_tests " echo "--------------------------------------------------" if [ $EXIT_CODE -eq 0 ]; then - echo "$TEST_SUITE: All individual tests PASSED." + echo "All individual tests PASSED." else - echo "$TEST_SUITE: Some individual tests FAILED. Check logs in '$LOG_DIR'." + echo "Some individual tests FAILED. Check logs in '$LOG_DIR'." fi ./merge_results.sh $TEST_DIR/boost_test_results_C*.junit > $TEST_DIR/boost_test_results.junit From 185a0e09bf4daf70d156bb1c3d75346e1781450f Mon Sep 17 00:00:00 2001 From: Ed Savage Date: Thu, 3 Jul 2025 09:44:52 +1200 Subject: [PATCH 07/25] Tweaks and typos --- .buildkite/pipelines/build_linux.json.py | 2 +- run_tests_as_seperate_processes.sh | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.buildkite/pipelines/build_linux.json.py b/.buildkite/pipelines/build_linux.json.py index 78fc7afea..efe313b15 100755 --- a/.buildkite/pipelines/build_linux.json.py +++ b/.buildkite/pipelines/build_linux.json.py @@ -74,7 +74,7 @@ def main(args): }, "plugins": { "test-collector#v1.2.0": { - "files": "*/*/unittest/boost_test_results_*.junit", + "files": "*/*/unittest/boost_test_results.junit", "format": "junit" } }, diff --git a/run_tests_as_seperate_processes.sh b/run_tests_as_seperate_processes.sh index 0bb718526..4d15de5ee 100755 --- a/run_tests_as_seperate_processes.sh +++ b/run_tests_as_seperate_processes.sh @@ -126,9 +126,9 @@ echo $ALL_TEST_NAMES | xargs -n $MAX_ARGS -P $MAX_PROCS bash -c 'execute_tests " echo "--------------------------------------------------" if [ $EXIT_CODE -eq 0 ]; then - echo "All individual tests PASSED." + echo "$TEST_SUITE: All individual tests PASSED." else - echo "Some individual tests FAILED. Check logs in '$LOG_DIR'." + echo "$TEST_SUITE: Some individual tests FAILED. Check logs in '$LOG_DIR'." fi ./merge_results.sh $TEST_DIR/boost_test_results_C*.junit > $TEST_DIR/boost_test_results.junit From 0a6a08695466692811ca71cc30f0fa3df5666649 Mon Sep 17 00:00:00 2001 From: Ed Savage Date: Thu, 3 Jul 2025 14:46:44 +1200 Subject: [PATCH 08/25] Portability fixes --- run_tests_as_seperate_processes.sh | 38 ++++++++++++++++++++++++++++-- 1 file changed, 36 insertions(+), 2 deletions(-) diff --git a/run_tests_as_seperate_processes.sh b/run_tests_as_seperate_processes.sh index 4d15de5ee..8011bc0fe 100755 --- a/run_tests_as_seperate_processes.sh +++ b/run_tests_as_seperate_processes.sh @@ -29,6 +29,9 @@ # BOOST_TEST_MIXED_MODE: If set to "true" then rather than iterating over each individual test passed to a sub-shell # run them all in the same BOOST test executable process. # +# Design decisions: The script relies upon the simplest tools available on most unix like platforms - bash, sed and +# awk (the awk script does not use any GNU extensions for maximum portability). This is to keep the number of dependencies +# required by CI build images to a minimum (so e.g. no python etc.) if [ $# -lt 3 ]; then echo "Usage: $0 " @@ -40,7 +43,7 @@ export BUILD_DIR=$1 export BINARY_DIR=$2 export TEST_SUITE=$3 -TEST_DIR=$(echo $BINARY_DIR | sed "s|$BUILD_DIR/test/||") +TEST_DIR=${CPP_SRC_HOME}/$(echo $BINARY_DIR | sed "s|$BUILD_DIR/test/||") export TEST_EXECUTABLE="$2/ml_$3" export LOG_DIR="$2/test_logs" @@ -131,6 +134,37 @@ else echo "$TEST_SUITE: Some individual tests FAILED. Check logs in '$LOG_DIR'." fi -./merge_results.sh $TEST_DIR/boost_test_results_C*.junit > $TEST_DIR/boost_test_results.junit +function merge_junit_results() { + JUNIT_FILES="$@" + echo "" + cat $JUNIT_FILES | \ + awk ' + BEGIN{tests=0; skipped=0; errors=0; failures=0; id=""; time=0.0; name=""} + $0 ~ /"}' + + cat $JUNIT_FILES | sed -e '/xml/d' -e '/testsuite/d' -e '//{H;d;};x;/skipped/d' | grep '.' +echo "" +echo +} + +merge_junit_results $TEST_DIR/boost_test_results_C*.junit > $TEST_DIR/boost_test_results.junit exit $EXIT_CODE From 6c97572660a37a0919a7672da4c6a090c58861ec Mon Sep 17 00:00:00 2001 From: Ed Savage Date: Fri, 4 Jul 2025 09:36:08 +1200 Subject: [PATCH 09/25] Fix failing test cases --- lib/core/unittest/CNamedPipeFactoryTest.cc | 43 +++++++++++++--------- 1 file changed, 25 insertions(+), 18 deletions(-) diff --git a/lib/core/unittest/CNamedPipeFactoryTest.cc b/lib/core/unittest/CNamedPipeFactoryTest.cc index 39aef5e07..da05bb634 100644 --- a/lib/core/unittest/CNamedPipeFactoryTest.cc +++ b/lib/core/unittest/CNamedPipeFactoryTest.cc @@ -38,9 +38,9 @@ const std::size_t MAX_ATTEMPTS{100}; const std::size_t TEST_SIZE{10000}; const char TEST_CHAR{'a'}; #ifdef Windows -const char* const TEST_PIPE_NAME{"\\\\.\\pipe\\testpipe"}; +static const std::string TEST_PIPE_NAME{"\\\\.\\pipe\\testpipe"}; #else -const char* const TEST_PIPE_NAME{"testfiles/testpipe"}; +static const std::string TEST_PIPE_NAME{"testfiles/testpipe"}; #endif class CThreadBlockCanceller : public ml::core::CThread { @@ -71,13 +71,14 @@ class CThreadBlockCanceller : public ml::core::CThread { } BOOST_AUTO_TEST_CASE(testServerIsCppReader) { - ml::test::CThreadDataWriter threadWriter{SLEEP_TIME_MS, TEST_PIPE_NAME, + const std::string pipeName=TEST_PIPE_NAME+"testServerIsCppReader"; + ml::test::CThreadDataWriter threadWriter{SLEEP_TIME_MS, pipeName, TEST_CHAR, TEST_SIZE}; BOOST_TEST_REQUIRE(threadWriter.start()); std::atomic_bool dummy{false}; ml::core::CNamedPipeFactory::TIStreamP strm{ - ml::core::CNamedPipeFactory::openPipeStreamRead(TEST_PIPE_NAME, dummy)}; + ml::core::CNamedPipeFactory::openPipeStreamRead(pipeName, dummy)}; BOOST_TEST_REQUIRE(strm); static const std::streamsize BUF_SIZE{512}; @@ -100,13 +101,15 @@ BOOST_AUTO_TEST_CASE(testServerIsCppReader) { } BOOST_AUTO_TEST_CASE(testServerIsCReader) { - ml::test::CThreadDataWriter threadWriter{SLEEP_TIME_MS, TEST_PIPE_NAME, + const std::string pipeName=TEST_PIPE_NAME+"testServerIsCReader"; + + ml::test::CThreadDataWriter threadWriter{SLEEP_TIME_MS, pipeName, TEST_CHAR, TEST_SIZE}; BOOST_TEST_REQUIRE(threadWriter.start()); std::atomic_bool dummy{false}; ml::core::CNamedPipeFactory::TFileP file{ - ml::core::CNamedPipeFactory::openPipeFileRead(TEST_PIPE_NAME, dummy)}; + ml::core::CNamedPipeFactory::openPipeFileRead(pipeName, dummy)}; BOOST_TEST_REQUIRE(file); static const std::size_t BUF_SIZE{512}; @@ -129,12 +132,14 @@ BOOST_AUTO_TEST_CASE(testServerIsCReader) { } BOOST_AUTO_TEST_CASE(testServerIsCppWriter) { - ml::test::CThreadDataReader threadReader{PAUSE_TIME_MS, MAX_ATTEMPTS, TEST_PIPE_NAME}; + const std::string pipeName=TEST_PIPE_NAME+"testServerIsCppWriter"; + + ml::test::CThreadDataReader threadReader{PAUSE_TIME_MS, MAX_ATTEMPTS, pipeName}; BOOST_TEST_REQUIRE(threadReader.start()); std::atomic_bool dummy{false}; ml::core::CNamedPipeFactory::TOStreamP strm{ - ml::core::CNamedPipeFactory::openPipeStreamWrite(TEST_PIPE_NAME, dummy)}; + ml::core::CNamedPipeFactory::openPipeStreamWrite(pipeName, dummy)}; BOOST_TEST_REQUIRE(strm); std::size_t charsLeft{TEST_SIZE}; @@ -159,12 +164,14 @@ BOOST_AUTO_TEST_CASE(testServerIsCppWriter) { } BOOST_AUTO_TEST_CASE(testServerIsCWriter) { - ml::test::CThreadDataReader threadReader{PAUSE_TIME_MS, MAX_ATTEMPTS, TEST_PIPE_NAME}; + const std::string pipeName=TEST_PIPE_NAME+"testServerIsCWriter"; + + ml::test::CThreadDataReader threadReader{PAUSE_TIME_MS, MAX_ATTEMPTS, pipeName}; BOOST_TEST_REQUIRE(threadReader.start()); std::atomic_bool dummy{false}; ml::core::CNamedPipeFactory::TFileP file{ - ml::core::CNamedPipeFactory::openPipeFileWrite(TEST_PIPE_NAME, dummy)}; + ml::core::CNamedPipeFactory::openPipeFileWrite(pipeName, dummy)}; BOOST_TEST_REQUIRE(file); std::size_t charsLeft{TEST_SIZE}; @@ -200,7 +207,7 @@ BOOST_AUTO_TEST_CASE(testCancelBlock) { } BOOST_AUTO_TEST_CASE(testErrorIfRegularFile) { - std::atomic_bool dummy{false}; + const std::atomic_bool dummy{false}; ml::core::CNamedPipeFactory::TIStreamP strm{ ml::core::CNamedPipeFactory::openPipeStreamRead("Main.cc", dummy)}; BOOST_TEST_REQUIRE(strm == nullptr); @@ -215,23 +222,23 @@ BOOST_AUTO_TEST_CASE(testErrorIfSymlink) { // Suppress the error about no assertions in this case BOOST_REQUIRE(BOOST_IS_DEFINED(Windows)); #else - static const char* const TEST_SYMLINK_NAME{"test_symlink"}; + const std::string TEST_SYMLINK_NAME{"test_symlink"}; // Remove any files left behind by a previous failed test, but don't check // the return codes as these calls will usually fail - ::unlink(TEST_SYMLINK_NAME); - ::unlink(TEST_PIPE_NAME); + ::unlink(TEST_SYMLINK_NAME.c_str()); + ::unlink(TEST_PIPE_NAME.c_str()); - BOOST_REQUIRE_EQUAL(0, ::mkfifo(TEST_PIPE_NAME, S_IRUSR | S_IWUSR)); - BOOST_REQUIRE_EQUAL(0, ::symlink(TEST_PIPE_NAME, TEST_SYMLINK_NAME)); + BOOST_REQUIRE_EQUAL(0, ::mkfifo(TEST_PIPE_NAME.c_str(), S_IRUSR | S_IWUSR)); + BOOST_REQUIRE_EQUAL(0, ::symlink(TEST_PIPE_NAME.c_str(), TEST_SYMLINK_NAME.c_str())); std::atomic_bool dummy{false}; ml::core::CNamedPipeFactory::TIStreamP strm{ ml::core::CNamedPipeFactory::openPipeStreamRead(TEST_SYMLINK_NAME, dummy)}; BOOST_TEST_REQUIRE(strm == nullptr); - BOOST_REQUIRE_EQUAL(0, ::unlink(TEST_SYMLINK_NAME)); - BOOST_REQUIRE_EQUAL(0, ::unlink(TEST_PIPE_NAME)); + BOOST_REQUIRE_EQUAL(0, ::unlink(TEST_SYMLINK_NAME.c_str())); + BOOST_REQUIRE_EQUAL(0, ::unlink(TEST_PIPE_NAME.c_str())); #endif } From 733b03871bfb1552c1e16d80964163ec854c00da Mon Sep 17 00:00:00 2001 From: Ed Savage Date: Fri, 4 Jul 2025 10:43:19 +1200 Subject: [PATCH 10/25] Formatting --- lib/core/unittest/CNamedPipeFactoryTest.cc | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/lib/core/unittest/CNamedPipeFactoryTest.cc b/lib/core/unittest/CNamedPipeFactoryTest.cc index da05bb634..ae90cf48d 100644 --- a/lib/core/unittest/CNamedPipeFactoryTest.cc +++ b/lib/core/unittest/CNamedPipeFactoryTest.cc @@ -38,9 +38,9 @@ const std::size_t MAX_ATTEMPTS{100}; const std::size_t TEST_SIZE{10000}; const char TEST_CHAR{'a'}; #ifdef Windows -static const std::string TEST_PIPE_NAME{"\\\\.\\pipe\\testpipe"}; +const std::string TEST_PIPE_NAME{"\\\\.\\pipe\\testpipe"}; #else -static const std::string TEST_PIPE_NAME{"testfiles/testpipe"}; +const std::string TEST_PIPE_NAME{"testfiles/testpipe"}; #endif class CThreadBlockCanceller : public ml::core::CThread { @@ -71,9 +71,8 @@ class CThreadBlockCanceller : public ml::core::CThread { } BOOST_AUTO_TEST_CASE(testServerIsCppReader) { - const std::string pipeName=TEST_PIPE_NAME+"testServerIsCppReader"; - ml::test::CThreadDataWriter threadWriter{SLEEP_TIME_MS, pipeName, - TEST_CHAR, TEST_SIZE}; + const std::string pipeName = TEST_PIPE_NAME + "testServerIsCppReader"; + ml::test::CThreadDataWriter threadWriter{SLEEP_TIME_MS, pipeName, TEST_CHAR, TEST_SIZE}; BOOST_TEST_REQUIRE(threadWriter.start()); std::atomic_bool dummy{false}; @@ -101,10 +100,9 @@ BOOST_AUTO_TEST_CASE(testServerIsCppReader) { } BOOST_AUTO_TEST_CASE(testServerIsCReader) { - const std::string pipeName=TEST_PIPE_NAME+"testServerIsCReader"; + const std::string pipeName = TEST_PIPE_NAME + "testServerIsCReader"; - ml::test::CThreadDataWriter threadWriter{SLEEP_TIME_MS, pipeName, - TEST_CHAR, TEST_SIZE}; + ml::test::CThreadDataWriter threadWriter{SLEEP_TIME_MS, pipeName, TEST_CHAR, TEST_SIZE}; BOOST_TEST_REQUIRE(threadWriter.start()); std::atomic_bool dummy{false}; @@ -132,7 +130,7 @@ BOOST_AUTO_TEST_CASE(testServerIsCReader) { } BOOST_AUTO_TEST_CASE(testServerIsCppWriter) { - const std::string pipeName=TEST_PIPE_NAME+"testServerIsCppWriter"; + const std::string pipeName = TEST_PIPE_NAME + "testServerIsCppWriter"; ml::test::CThreadDataReader threadReader{PAUSE_TIME_MS, MAX_ATTEMPTS, pipeName}; BOOST_TEST_REQUIRE(threadReader.start()); @@ -164,7 +162,7 @@ BOOST_AUTO_TEST_CASE(testServerIsCppWriter) { } BOOST_AUTO_TEST_CASE(testServerIsCWriter) { - const std::string pipeName=TEST_PIPE_NAME+"testServerIsCWriter"; + const std::string pipeName = TEST_PIPE_NAME + "testServerIsCWriter"; ml::test::CThreadDataReader threadReader{PAUSE_TIME_MS, MAX_ATTEMPTS, pipeName}; BOOST_TEST_REQUIRE(threadReader.start()); From 62dce12f9cbc51917aaf95af9e5db434d5b1c19c Mon Sep 17 00:00:00 2001 From: Ed Savage Date: Fri, 4 Jul 2025 12:35:55 +1200 Subject: [PATCH 11/25] Tidy up of scripts --- cmake/test-runner.cmake | 8 ++-- dev-tools/docker_test.sh | 2 +- generate_test_names.py | 77 ------------------------------ merge_results.sh | 41 ---------------- run_tests_as_seperate_processes.sh | 1 - 5 files changed, 5 insertions(+), 124 deletions(-) delete mode 100755 generate_test_names.py delete mode 100755 merge_results.sh diff --git a/cmake/test-runner.cmake b/cmake/test-runner.cmake index 7c13d59f0..c0acac469 100644 --- a/cmake/test-runner.cmake +++ b/cmake/test-runner.cmake @@ -35,15 +35,15 @@ else() set(SAFE_TEST_NAME "_${SAFE_TEST_NAME}") endif() - # If env var RUN_BOOST_TESTS_IN_BACKGROUND is defined run the tests in the background - message(STATUS "RUN_BOOST_TESTS_IN_BACKGROUND=$ENV{RUN_BOOST_TESTS_IN_BACKGROUND}") + # If env var RUN_BOOST_TESTS_IN_FOREGROUND is defined run the tests in the foreground + message(STATUS "RUN_BOOST_TESTS_IN_FOREGROUND=$ENV{RUN_BOOST_TESTS_IN_FOREGROUND}") if (DEFINED TEST_FLAGS OR DEFINED TESTS) string(REPLACE "boost_test_results" "boost_test_results${SAFE_TEST_NAME}" BOOST_TEST_OUTPUT_FORMAT_FLAGS "$ENV{BOOST_TEST_OUTPUT_FORMAT_FLAGS}") set(OUTPUT_FILE "${TEST_DIR}/${TEST_NAME}${SAFE_TEST_NAME}.out") set(FAILED_FILE "${TEST_DIR}/${TEST_NAME}${SAFE_TEST_NAME}.failed") - if(DEFINED ENV{RUN_BOOST_TESTS_IN_BACKGROUND}) + if(NOT DEFINED ENV{RUN_BOOST_TESTS_IN_FOREGROUND}) message(STATUS "executing process ${TEST_DIR}/${TEST_NAME} ${TEST_FLAGS} ${TESTS} ${BOOST_TEST_OUTPUT_FORMAT_FLAGS} --no_color_output") execute_process(COMMAND ${TEST_DIR}/${TEST_NAME} ${TEST_FLAGS} ${TESTS} ${BOOST_TEST_OUTPUT_FORMAT_FLAGS} --no_color_output OUTPUT_FILE ${OUTPUT_FILE} ERROR_FILE ${OUTPUT_FILE} RESULT_VARIABLE TEST_SUCCESS) message(STATUS "TESTS EXITED WITH SUCCESS ${TEST_SUCCESS}") @@ -53,7 +53,7 @@ else() message(STATUS "TESTS EXITED WITH SUCCESS ${TEST_SUCCESS}") endif() else() - if(DEFINED ENV{RUN_BOOST_TESTS_IN_BACKGROUND}) + if(NOT DEFINED ENV{RUN_BOOST_TESTS_IN_FOREGROUND}) execute_process(COMMAND ${TEST_DIR}/${TEST_NAME} $ENV{TEST_FLAGS} $ENV{BOOST_TEST_OUTPUT_FORMAT_FLAGS} --no_color_output OUTPUT_FILE ${OUTPUT_FILE} ERROR_FILE ${OUTPUT_FILE} RESULT_VARIABLE TEST_SUCCESS) else() diff --git a/dev-tools/docker_test.sh b/dev-tools/docker_test.sh index 44a972b6c..aed18fa60 100755 --- a/dev-tools/docker_test.sh +++ b/dev-tools/docker_test.sh @@ -25,7 +25,7 @@ usage() { } PLATFORMS= -EXTRACT_FIND="-name boost_test_results\*.xml -o -name boost_test_results\*.junit" +EXTRACT_FIND="-name boost_test_results.xml -o -name boost_test_results.junit" EXTRACT_EXPLICIT="build/distributions build/test_status.txt" while [ -n "$1" ] diff --git a/generate_test_names.py b/generate_test_names.py deleted file mode 100755 index 805c169d6..000000000 --- a/generate_test_names.py +++ /dev/null @@ -1,77 +0,0 @@ -#!/usr/bin/env python3 -# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -# or more contributor license agreements. Licensed under the Elastic License -# 2.0 and the following additional limitation. Functionality enabled by the -# files subject to the Elastic License 2.0 may only be used in production when -# invoked by an Elasticsearch process with a license key installed that permits -# use of machine learning features. You may not use this file except in -# compliance with the Elastic License 2.0 and the foregoing additional -# limitation. - -# This script provides a wrapper around a call to a BOOST test executable -# to return a formatted list of tests such that each fully qualified test -# name would be in a form suitable to being passed to BOOST test's "--run_test" -# parameter. -# It takes precisely one positional parameter, the path to a BOOST test executable. - - -import argparse -import re -import subprocess -import sys - - -def parse_arguments(): - parser = argparse.ArgumentParser() - parser.add_argument('exec_path', help='The path to the ml_test suite executable') - return parser.parse_args() - -def get_qualified_test_names(executable_path): - - cmd = [args.exec_path, "--list_content"] - process = subprocess.run(cmd, capture_output=True, text=True, check=True) - output_lines = process.stderr.splitlines() - - test_names = [] - current_suite_stack = [] - - for line in output_lines: - match_suite = re.match(r'^( *)(C.*Test)\*$', line) - match_case = re.match(r'^( *)(test.*)\*$', line) - - if match_suite: - indent_level = len(match_suite.group(1)) - suite_name = match_suite.group(2) - - # Pop suites from stack if current indent is less or equal - while current_suite_stack and len(current_suite_stack[-1][0]) >= indent_level: - current_suite_stack.pop() - - current_suite_stack.append((match_suite.group(1), suite_name)) - elif match_case: - indent_level = len(match_case.group(1)) - case_name = match_case.group(2) - - # Pop suites from stack if current indent is less (for sibling suites/cases) - while current_suite_stack and len(current_suite_stack[-1][0]) >= indent_level: - current_suite_stack.pop() - - full_path = "/".join([s[1] for s in current_suite_stack] + [case_name]) - test_names.append(full_path) - return test_names - -if __name__ == "__main__": - args = parse_arguments() - try: - names = get_qualified_test_names(args.exec_path) - for name in names: - print(name) - except subprocess.CalledProcessError as e: - print(f"Error listing tests: {e.stderr}", file=sys.stderr) - sys.exit(1) - except FileNotFoundError: - print(f"Error: Test executable '{args.exec_path}' not found.", file=sys.stderr) - sys.exit(1) - except Exception as e: - print(f"An unexpected error occurred: {e}", file=sys.stderr) - sys.exit(1) diff --git a/merge_results.sh b/merge_results.sh deleted file mode 100755 index c1d544609..000000000 --- a/merge_results.sh +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/bash -# -# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -# or more contributor license agreements. Licensed under the Elastic License -# 2.0 and the following additional limitation. Functionality enabled by the -# files subject to the Elastic License 2.0 may only be used in production when -# invoked by an Elasticsearch process with a license key installed that permits -# use of machine learning features. You may not use this file except in -# compliance with the Elastic License 2.0 and the foregoing additional -# limitation. -# - -# This script amalgamates multiple JUNIT results files from individual tests -# into one, omitting any test cases that have been skipped. The result is -# output to stdout. - -if [ $# -lt 1 ]; then - echo "Usage: $0 " - exit 1 -fi - -JUNIT_FILES="$@" - -echo "" -cat $JUNIT_FILES | \ - gawk -n ' - BEGIN{tests=0; skipped=0; errors=0; failures=0; id=""; time=0.0; name=""} - { - where=match($0, /"}' - -cat $JUNIT_FILES | sed -e '/xml/d' -e '/testsuite/d' -e '//{H;d;};x;/skipped/d' | grep '.' -echo "" -echo - diff --git a/run_tests_as_seperate_processes.sh b/run_tests_as_seperate_processes.sh index 8011bc0fe..43daf0819 100755 --- a/run_tests_as_seperate_processes.sh +++ b/run_tests_as_seperate_processes.sh @@ -91,7 +91,6 @@ if [ -z "$ALL_TEST_NAMES" ]; then fi EXIT_CODE=0 -export RUN_BOOST_TESTS_IN_BACKGROUND=1 function execute_tests() { From 55875c001d293c6325001c0132584911356dca3a Mon Sep 17 00:00:00 2001 From: Ed Savage Date: Fri, 4 Jul 2025 14:40:50 +1200 Subject: [PATCH 12/25] Further tidy up of scripts --- .buildkite/scripts/steps/build_and_test.sh | 2 +- cmake/test-runner.cmake | 81 +++++++++------------- run_tests_as_seperate_processes.sh | 4 +- 3 files changed, 38 insertions(+), 49 deletions(-) diff --git a/.buildkite/scripts/steps/build_and_test.sh b/.buildkite/scripts/steps/build_and_test.sh index 5fc133874..023d4c748 100755 --- a/.buildkite/scripts/steps/build_and_test.sh +++ b/.buildkite/scripts/steps/build_and_test.sh @@ -106,7 +106,7 @@ fi if [[ -z "$CPP_CROSS_COMPILE" ]] ; then OS=$(uname -s | tr "A-Z" "a-z") TEST_RESULTS_ARCHIVE=${OS}-${HARDWARE_ARCH}-unit_test_results.tgz - find . -path "*/**/ml_test_*.out" -o -path "*/**/*.junit" | xargs tar cvzf ${TEST_RESULTS_ARCHIVE} + find . \( -path "*/**/ml_test_*.out" -o -path "*/**/*.junit" \) -print0 | tar czf ${TEST_RESULTS_ARCHIVE} --null -T - buildkite-agent artifact upload "${TEST_RESULTS_ARCHIVE}" fi diff --git a/cmake/test-runner.cmake b/cmake/test-runner.cmake index c0acac469..481b2dd09 100644 --- a/cmake/test-runner.cmake +++ b/cmake/test-runner.cmake @@ -14,60 +14,47 @@ execute_process(COMMAND ${CMAKE_COMMAND} -E rm -f ${TEST_DIR}/*.failed) execute_process(COMMAND ${CMAKE_COMMAND} -E rm -f boost_test_results*.xml) execute_process(COMMAND ${CMAKE_COMMAND} -E rm -f boost_test_results*.junit) -set(INDIVIDUAL_TEST "CAnnotationJsonWriterTest/testWrite") - -if(TEST_NAME STREQUAL "ml_test_seccomp") - execute_process(COMMAND ${TEST_DIR}/${TEST_NAME} $ENV{BOOST_TEST_OUTPUT_FORMAT_FLAGS} --logger=HRF,all --report_format=HRF --show_progress=no --no_color_output OUTPUT_FILE ${TEST_DIR}/${TEST_NAME}.out ERROR_FILE ${TEST_DIR}/${TEST_NAME}.out RESULT_VARIABLE TEST_SUCCESS) -else() - # Turn the TEST_FLAGS environment variable into a CMake list variable - if (DEFINED ENV{TEST_FLAGS} AND NOT "$ENV{TEST_FLAGS}" STREQUAL "") - string(REPLACE " " ";" TEST_FLAGS $ENV{TEST_FLAGS}) - endif() - +# Turn the TEST_FLAGS environment variable into a CMake list variable +if (DEFINED ENV{TEST_FLAGS} AND NOT "$ENV{TEST_FLAGS}" STREQUAL "") + string(REPLACE " " ";" TEST_FLAGS $ENV{TEST_FLAGS}) +endif() - set(SAFE_TEST_NAME "") - set(OUTPUT_FILE "${TEST_DIR}/${TEST_NAME}.out") - set(FAILED_FILE "${TEST_DIR}/${TEST_NAME}.failed") - # Special case for specifying a subset of tests to run (can be regex) - if (DEFINED ENV{TESTS} AND NOT "$ENV{TESTS}" STREQUAL "") - set(TESTS "--run_test=$ENV{TESTS}") - string(REGEX REPLACE "[^a-zA-Z0-9_]" "_" SAFE_TEST_NAME "$ENV{TESTS}") - set(SAFE_TEST_NAME "_${SAFE_TEST_NAME}") - endif() +set(SAFE_TEST_NAME "") +set(TESTS "") +# Special case for specifying a subset of tests to run (can be regex) +if (DEFINED ENV{TESTS} AND NOT "$ENV{TESTS}" STREQUAL "") + set(TESTS "--run_test=$ENV{TESTS}") + string(REGEX REPLACE "[^a-zA-Z0-9_]" "_" SAFE_TEST_NAME "$ENV{TESTS}") + set(SAFE_TEST_NAME "_${SAFE_TEST_NAME}") +endif() - # If env var RUN_BOOST_TESTS_IN_FOREGROUND is defined run the tests in the foreground - message(STATUS "RUN_BOOST_TESTS_IN_FOREGROUND=$ENV{RUN_BOOST_TESTS_IN_FOREGROUND}") +message(STATUS "SAFE_TEST_NAME=${SAFE_TEST_NAME}") +string(REPLACE "boost_test_results" "boost_test_results${SAFE_TEST_NAME}" BOOST_TEST_OUTPUT_FORMAT_FLAGS "$ENV{BOOST_TEST_OUTPUT_FORMAT_FLAGS}") +set(OUTPUT_FILE "${TEST_DIR}/${TEST_NAME}${SAFE_TEST_NAME}.out") +set(FAILED_FILE "${TEST_DIR}/${TEST_NAME}${SAFE_TEST_NAME}.failed") - if (DEFINED TEST_FLAGS OR DEFINED TESTS) - string(REPLACE "boost_test_results" "boost_test_results${SAFE_TEST_NAME}" BOOST_TEST_OUTPUT_FORMAT_FLAGS "$ENV{BOOST_TEST_OUTPUT_FORMAT_FLAGS}") - set(OUTPUT_FILE "${TEST_DIR}/${TEST_NAME}${SAFE_TEST_NAME}.out") - set(FAILED_FILE "${TEST_DIR}/${TEST_NAME}${SAFE_TEST_NAME}.failed") +# If env var RUN_BOOST_TESTS_IN_FOREGROUND is defined run the tests in the foreground +message(STATUS "RUN_BOOST_TESTS_IN_FOREGROUND=$ENV{RUN_BOOST_TESTS_IN_FOREGROUND}") - if(NOT DEFINED ENV{RUN_BOOST_TESTS_IN_FOREGROUND}) - message(STATUS "executing process ${TEST_DIR}/${TEST_NAME} ${TEST_FLAGS} ${TESTS} ${BOOST_TEST_OUTPUT_FORMAT_FLAGS} --no_color_output") - execute_process(COMMAND ${TEST_DIR}/${TEST_NAME} ${TEST_FLAGS} ${TESTS} ${BOOST_TEST_OUTPUT_FORMAT_FLAGS} --no_color_output OUTPUT_FILE ${OUTPUT_FILE} ERROR_FILE ${OUTPUT_FILE} RESULT_VARIABLE TEST_SUCCESS) - message(STATUS "TESTS EXITED WITH SUCCESS ${TEST_SUCCESS}") - else() - message(STATUS "executing process ${TEST_DIR}/${TEST_NAME} ${TEST_FLAGS} ${TESTS} ${BOOST_TEST_OUTPUT_FORMAT_FLAGS}") - execute_process(COMMAND ${TEST_DIR}/${TEST_NAME} ${TEST_FLAGS} ${TESTS} ${BOOST_TEST_OUTPUT_FORMAT_FLAGS} RESULT_VARIABLE TEST_SUCCESS) - message(STATUS "TESTS EXITED WITH SUCCESS ${TEST_SUCCESS}") - endif() +if(TEST_NAME STREQUAL "ml_test_seccomp") + execute_process(COMMAND ${TEST_DIR}/${TEST_NAME} ${TEST_FLAGS} ${TESTS} ${BOOST_TEST_OUTPUT_FORMAT_FLAGS} --logger=HRF,all --report_format=HRF --show_progress=no --no_color_output OUTPUT_FILE ${OUTPUT_FILE} ERROR_FILE ${OUTPUT_FILE} RESULT_VARIABLE TEST_SUCCESS) +else() + if(NOT DEFINED ENV{RUN_BOOST_TESTS_IN_FOREGROUND}) + message(STATUS "executing process ${TEST_DIR}/${TEST_NAME} ${TEST_FLAGS} ${TESTS} ${BOOST_TEST_OUTPUT_FORMAT_FLAGS} --no_color_output") + execute_process(COMMAND ${TEST_DIR}/${TEST_NAME} ${TEST_FLAGS} ${TESTS} ${BOOST_TEST_OUTPUT_FORMAT_FLAGS} --no_color_output OUTPUT_FILE ${OUTPUT_FILE} ERROR_FILE ${OUTPUT_FILE} RESULT_VARIABLE TEST_SUCCESS) else() - if(NOT DEFINED ENV{RUN_BOOST_TESTS_IN_FOREGROUND}) - execute_process(COMMAND ${TEST_DIR}/${TEST_NAME} $ENV{TEST_FLAGS} $ENV{BOOST_TEST_OUTPUT_FORMAT_FLAGS} - --no_color_output OUTPUT_FILE ${OUTPUT_FILE} ERROR_FILE ${OUTPUT_FILE} RESULT_VARIABLE TEST_SUCCESS) - else() - execute_process(COMMAND ${TEST_DIR}/${TEST_NAME} ${TEST_FLAGS} ${TESTS} ${BOOST_TEST_OUTPUT_FORMAT_FLAGS} RESULT_VARIABLE TEST_SUCCESS) - endif() + message(STATUS "executing process ${TEST_DIR}/${TEST_NAME} ${TEST_FLAGS} ${TESTS} ${BOOST_TEST_OUTPUT_FORMAT_FLAGS}") + execute_process(COMMAND ${TEST_DIR}/${TEST_NAME} ${TEST_FLAGS} ${TESTS} ${BOOST_TEST_OUTPUT_FORMAT_FLAGS} RESULT_VARIABLE TEST_SUCCESS) endif() +endif() - if (NOT TEST_SUCCESS EQUAL 0) - if (EXISTS ${TEST_DIR}/${TEST_NAME}) - execute_process(COMMAND ${CMAKE_COMMAND} -E cat ${OUTPUT_FILE}) - file(WRITE "${TEST_DIR}/${FAILED_FILE}" "") - endif() - message(FATAL_ERROR "Exiting with status ${TEST_SUCCESS}") - endif() +message(STATUS "TESTS EXITED WITH SUCCESS ${TEST_SUCCESS}") +if (NOT TEST_SUCCESS EQUAL 0) + if (EXISTS ${TEST_DIR}/${TEST_NAME}) + execute_process(COMMAND ${CMAKE_COMMAND} -E cat ${OUTPUT_FILE}) + file(WRITE "${TEST_DIR}/${FAILED_FILE}" "") + endif() + message(FATAL_ERROR "Exiting with status ${TEST_SUCCESS}") endif() diff --git a/run_tests_as_seperate_processes.sh b/run_tests_as_seperate_processes.sh index 43daf0819..641552f12 100755 --- a/run_tests_as_seperate_processes.sh +++ b/run_tests_as_seperate_processes.sh @@ -164,6 +164,8 @@ echo "" echo } -merge_junit_results $TEST_DIR/boost_test_results_C*.junit > $TEST_DIR/boost_test_results.junit +if [ "$TEST_SUITE" != "test_seccomp" ]; then + merge_junit_results $TEST_DIR/boost_test_results_C*.junit > $TEST_DIR/boost_test_results.junit +fi exit $EXIT_CODE From 9c6735380ba53609eecc1df22adab0af5f80a8e9 Mon Sep 17 00:00:00 2001 From: Ed Savage Date: Fri, 4 Jul 2025 15:59:08 +1200 Subject: [PATCH 13/25] Tweaks for linux aarch64 --- dev-tools/docker/docker_entrypoint.sh | 2 +- dev-tools/docker_test.sh | 5 ++++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/dev-tools/docker/docker_entrypoint.sh b/dev-tools/docker/docker_entrypoint.sh index 1585356c4..8311efffe 100755 --- a/dev-tools/docker/docker_entrypoint.sh +++ b/dev-tools/docker/docker_entrypoint.sh @@ -66,6 +66,6 @@ if [ "x$1" = "x--test" ] ; then # failure is the unit tests, and then the detailed test results can be # copied from the image echo passed > build/test_status.txt - cmake --build cmake-build-docker ${CMAKE_VERBOSE} -j $(( `nproc`/2 )) -t test_individually || echo failed > build/test_status.txt + cmake --build cmake-build-docker ${CMAKE_VERBOSE} -j $(nproc) -t test_individually || echo failed > build/test_status.txt fi diff --git a/dev-tools/docker_test.sh b/dev-tools/docker_test.sh index aed18fa60..016795ce7 100755 --- a/dev-tools/docker_test.sh +++ b/dev-tools/docker_test.sh @@ -92,7 +92,10 @@ do # Using tar to copy the build and test artifacts out of the container seems # more reliable than docker cp, and also means the files end up with the # correct uid/gid - docker run --rm --workdir=/ml-cpp $TEMP_TAG bash -c "find . $EXTRACT_FIND | xargs tar cf - $EXTRACT_EXPLICIT && sleep 30" | tar xvf - + docker run --rm --workdir=/ml-cpp $TEMP_TAG bash -c "find . \( $EXTRACT_FIND \) -print0 | tar cf - $EXTRACT_EXPLICIT --null -T - && sleep 60" | tar xf - + if [ $? != 0 ]; then + echo "Copying build and test artifacts from docker container failed" + fi docker rmi --force $TEMP_TAG # The image build is set to return zero (i.e. succeed as far as Docker is # concerned) when the only problem is that the unit tests fail, as this From d265102884d237d9af28a3628dc54e89fcacc6d0 Mon Sep 17 00:00:00 2001 From: Ed Savage Date: Mon, 7 Jul 2025 09:49:35 +1200 Subject: [PATCH 14/25] Tweak for linux aarch64 seccomp test --- dev-tools/docker_test.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/docker_test.sh b/dev-tools/docker_test.sh index 016795ce7..013a3f3ec 100755 --- a/dev-tools/docker_test.sh +++ b/dev-tools/docker_test.sh @@ -92,7 +92,7 @@ do # Using tar to copy the build and test artifacts out of the container seems # more reliable than docker cp, and also means the files end up with the # correct uid/gid - docker run --rm --workdir=/ml-cpp $TEMP_TAG bash -c "find . \( $EXTRACT_FIND \) -print0 | tar cf - $EXTRACT_EXPLICIT --null -T - && sleep 60" | tar xf - + docker run --rm --workdir=/ml-cpp $TEMP_TAG bash -c "find . \( $EXTRACT_FIND \) -print0 | tar cf - $EXTRACT_EXPLICIT --null -T - | tar xvf - if [ $? != 0 ]; then echo "Copying build and test artifacts from docker container failed" fi From 2cf57527bc5bfb388560428345c7aaa7f5744ed9 Mon Sep 17 00:00:00 2001 From: Ed Savage Date: Mon, 7 Jul 2025 10:30:54 +1200 Subject: [PATCH 15/25] Typo --- dev-tools/docker_test.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/docker_test.sh b/dev-tools/docker_test.sh index 013a3f3ec..22a46f4c9 100755 --- a/dev-tools/docker_test.sh +++ b/dev-tools/docker_test.sh @@ -92,7 +92,7 @@ do # Using tar to copy the build and test artifacts out of the container seems # more reliable than docker cp, and also means the files end up with the # correct uid/gid - docker run --rm --workdir=/ml-cpp $TEMP_TAG bash -c "find . \( $EXTRACT_FIND \) -print0 | tar cf - $EXTRACT_EXPLICIT --null -T - | tar xvf - + docker run --rm --workdir=/ml-cpp $TEMP_TAG bash -c "find . \( $EXTRACT_FIND \) -print0 | tar cf - $EXTRACT_EXPLICIT --null -T -" | tar xvf - if [ $? != 0 ]; then echo "Copying build and test artifacts from docker container failed" fi From 3ace6f454a724695158307ee05a3baf9f46902d9 Mon Sep 17 00:00:00 2001 From: Ed Savage Date: Tue, 8 Jul 2025 13:32:21 +1200 Subject: [PATCH 16/25] Better isolation of tests --- lib/api/unittest/CMultiFileDataAdderTest.cc | 17 ++++++++++- lib/core/unittest/CLoggerTest.cc | 20 +++++++------ lib/core/unittest/CNamedPipeFactoryTest.cc | 21 +++++++------- run_tests_as_seperate_processes.sh | 31 +++++++++++++-------- 4 files changed, 59 insertions(+), 30 deletions(-) diff --git a/lib/api/unittest/CMultiFileDataAdderTest.cc b/lib/api/unittest/CMultiFileDataAdderTest.cc index 96231c567..cd86625d0 100644 --- a/lib/api/unittest/CMultiFileDataAdderTest.cc +++ b/lib/api/unittest/CMultiFileDataAdderTest.cc @@ -37,6 +37,8 @@ #include #include #include +#include +#include // For random number generation facilities #include #include @@ -112,10 +114,18 @@ void detectorPersistHelper(const std::string& configFileName, std::string origBaseDocId(JOB_ID + '_' + CTestAnomalyJob::STATE_TYPE + '_' + origSnapshotId); + // Create a random number to use to generate a unique file name for each test + // this allows tests to be run successfully in parallel + std::random_device rd; + std::mt19937 gen(rd()); + std::uniform_int_distribution<> distrib(1, 100); + std::ostringstream oss; + oss << distrib(gen); + std::string temp; TStrVec origFileContents(numOrigDocs); for (size_t index = 0; index < numOrigDocs; ++index) { - std::string expectedOrigFilename(baseOrigOutputFilename); + std::string expectedOrigFilename(baseOrigOutputFilename+"_"+oss.str()+"_"); expectedOrigFilename += "/_index/"; expectedOrigFilename += ml::core::CDataAdder::makeCurrentDocId(origBaseDocId, 1 + index); @@ -247,6 +257,10 @@ BOOST_AUTO_TEST_CASE(testSimpleWrite) { BOOST_REQUIRE_NO_THROW(boost::filesystem::remove_all(workDir)); } +#ifndef Linux // These disabled tests all fail when run as part of a full ml_test_api run on Linux, due to hard memory limits being hit. +// This is due to the ResourceMonitor.totalMemory() returning max_rss on that platform +// which means, as it never decreases for the lifetime of the ml_test_api process, that +// prior test cases can affect latter ones. BOOST_AUTO_TEST_CASE(testDetectorPersistBy) { detectorPersistHelper("testfiles/new_mlfields.json", "testfiles/big_ascending.txt", 0, "%d/%b/%Y:%T %z"); @@ -271,5 +285,6 @@ BOOST_AUTO_TEST_CASE(testDetectorPersistCount) { detectorPersistHelper("testfiles/new_persist_count.json", "testfiles/files_users_programs.csv", 5); } +#endif BOOST_AUTO_TEST_SUITE_END() diff --git a/lib/core/unittest/CLoggerTest.cc b/lib/core/unittest/CLoggerTest.cc index 1abec95da..d41d4cce3 100644 --- a/lib/core/unittest/CLoggerTest.cc +++ b/lib/core/unittest/CLoggerTest.cc @@ -49,12 +49,13 @@ class CTestFixture { } }; -std::function makeReader(std::ostringstream& loggedData) { - return [&loggedData] { +std::function makeReader(std::ostringstream& loggedData, const std::string& pipeName) { + return [&loggedData, &pipeName]() { + for (std::size_t attempt = 1; attempt <= 100; ++attempt) { // wait a bit so that pipe has been created std::this_thread::sleep_for(std::chrono::milliseconds(50)); - std::ifstream strm(TEST_PIPE_NAME); + std::ifstream strm(pipeName); if (strm.is_open()) { std::copy(std::istreambuf_iterator(strm), std::istreambuf_iterator(), @@ -62,7 +63,7 @@ std::function makeReader(std::ostringstream& loggedData) { return; } } - BOOST_FAIL("Failed to connect to logging pipe within a reasonable time"); + BOOST_FAIL("Failed to connect to logging pipe " + pipeName + " within a reasonable time"); }; } @@ -204,12 +205,13 @@ BOOST_FIXTURE_TEST_CASE(testNonAsciiJsonLogging, CTestFixture) { "Non-iso8859-15: 编码 test", "surrogate pair: 𐐷 test"}; std::ostringstream loggedData; - std::thread reader(makeReader(loggedData)); + const std::string& pipeName = std::string{TEST_PIPE_NAME}+"_testNonAsciiJsonLogging"; + std::thread reader(makeReader(loggedData, pipeName)); ml::core::CLogger& logger = ml::core::CLogger::instance(); // logger might have been reconfigured in previous tests, so reset and reconfigure it logger.reset(); - logger.reconfigure(TEST_PIPE_NAME, ""); + logger.reconfigure(pipeName, ""); for (const auto& m : messages) { LOG_INFO(<< m); @@ -225,14 +227,16 @@ BOOST_FIXTURE_TEST_CASE(testNonAsciiJsonLogging, CTestFixture) { BOOST_FIXTURE_TEST_CASE(testWarnAndErrorThrottling, CTestFixture) { std::ostringstream loggedData; - std::thread reader{makeReader(loggedData)}; + const std::string& pipeName = std::string{TEST_PIPE_NAME}+"_testWarnAndErrorThrottling"; + + std::thread reader{makeReader(loggedData, pipeName)}; TStrVec messages{"Warn should only be seen once", "Error should only be seen once"}; ml::core::CLogger& logger = ml::core::CLogger::instance(); // logger might have been reconfigured in previous tests, so reset and reconfigure it logger.reset(); - logger.reconfigure(TEST_PIPE_NAME, ""); + logger.reconfigure(pipeName, ""); for (std::size_t i = 0; i < 10; ++i) { LOG_WARN(<< messages[0]); diff --git a/lib/core/unittest/CNamedPipeFactoryTest.cc b/lib/core/unittest/CNamedPipeFactoryTest.cc index ae90cf48d..e94fcd689 100644 --- a/lib/core/unittest/CNamedPipeFactoryTest.cc +++ b/lib/core/unittest/CNamedPipeFactoryTest.cc @@ -71,7 +71,7 @@ class CThreadBlockCanceller : public ml::core::CThread { } BOOST_AUTO_TEST_CASE(testServerIsCppReader) { - const std::string pipeName = TEST_PIPE_NAME + "testServerIsCppReader"; + const std::string pipeName = TEST_PIPE_NAME + "_testServerIsCppReader"; ml::test::CThreadDataWriter threadWriter{SLEEP_TIME_MS, pipeName, TEST_CHAR, TEST_SIZE}; BOOST_TEST_REQUIRE(threadWriter.start()); @@ -100,7 +100,7 @@ BOOST_AUTO_TEST_CASE(testServerIsCppReader) { } BOOST_AUTO_TEST_CASE(testServerIsCReader) { - const std::string pipeName = TEST_PIPE_NAME + "testServerIsCReader"; + const std::string pipeName = TEST_PIPE_NAME + "_testServerIsCReader"; ml::test::CThreadDataWriter threadWriter{SLEEP_TIME_MS, pipeName, TEST_CHAR, TEST_SIZE}; BOOST_TEST_REQUIRE(threadWriter.start()); @@ -130,7 +130,7 @@ BOOST_AUTO_TEST_CASE(testServerIsCReader) { } BOOST_AUTO_TEST_CASE(testServerIsCppWriter) { - const std::string pipeName = TEST_PIPE_NAME + "testServerIsCppWriter"; + const std::string pipeName = TEST_PIPE_NAME + "_testServerIsCppWriter"; ml::test::CThreadDataReader threadReader{PAUSE_TIME_MS, MAX_ATTEMPTS, pipeName}; BOOST_TEST_REQUIRE(threadReader.start()); @@ -162,7 +162,7 @@ BOOST_AUTO_TEST_CASE(testServerIsCppWriter) { } BOOST_AUTO_TEST_CASE(testServerIsCWriter) { - const std::string pipeName = TEST_PIPE_NAME + "testServerIsCWriter"; + const std::string pipeName = TEST_PIPE_NAME + "_testServerIsCWriter"; ml::test::CThreadDataReader threadReader{PAUSE_TIME_MS, MAX_ATTEMPTS, pipeName}; BOOST_TEST_REQUIRE(threadReader.start()); @@ -198,7 +198,7 @@ BOOST_AUTO_TEST_CASE(testCancelBlock) { BOOST_TEST_REQUIRE(cancellerThread.start()); ml::core::CNamedPipeFactory::TOStreamP strm{ml::core::CNamedPipeFactory::openPipeStreamWrite( - TEST_PIPE_NAME, cancellerThread.hasCancelledBlockingCall())}; + TEST_PIPE_NAME+"_testCancelBlock", cancellerThread.hasCancelledBlockingCall())}; BOOST_TEST_REQUIRE(strm == nullptr); BOOST_TEST_REQUIRE(cancellerThread.stop()); @@ -220,15 +220,16 @@ BOOST_AUTO_TEST_CASE(testErrorIfSymlink) { // Suppress the error about no assertions in this case BOOST_REQUIRE(BOOST_IS_DEFINED(Windows)); #else - const std::string TEST_SYMLINK_NAME{"test_symlink"}; + const std::string TEST_SYMLINK_NAME{"test_symlink_testErrorIfSymlink"}; + const std::string testPipeName{TEST_PIPE_NAME+"_test_symlink_testErrorIfSymlink"}; // Remove any files left behind by a previous failed test, but don't check // the return codes as these calls will usually fail ::unlink(TEST_SYMLINK_NAME.c_str()); - ::unlink(TEST_PIPE_NAME.c_str()); + ::unlink(testPipeName.c_str()); - BOOST_REQUIRE_EQUAL(0, ::mkfifo(TEST_PIPE_NAME.c_str(), S_IRUSR | S_IWUSR)); - BOOST_REQUIRE_EQUAL(0, ::symlink(TEST_PIPE_NAME.c_str(), TEST_SYMLINK_NAME.c_str())); + BOOST_REQUIRE_EQUAL(0, ::mkfifo(testPipeName.c_str(), S_IRUSR | S_IWUSR)); + BOOST_REQUIRE_EQUAL(0, ::symlink(testPipeName.c_str(), TEST_SYMLINK_NAME.c_str())); std::atomic_bool dummy{false}; ml::core::CNamedPipeFactory::TIStreamP strm{ @@ -236,7 +237,7 @@ BOOST_AUTO_TEST_CASE(testErrorIfSymlink) { BOOST_TEST_REQUIRE(strm == nullptr); BOOST_REQUIRE_EQUAL(0, ::unlink(TEST_SYMLINK_NAME.c_str())); - BOOST_REQUIRE_EQUAL(0, ::unlink(TEST_PIPE_NAME.c_str())); + BOOST_REQUIRE_EQUAL(0, ::unlink(testPipeName.c_str())); #endif } diff --git a/run_tests_as_seperate_processes.sh b/run_tests_as_seperate_processes.sh index 641552f12..888efcc53 100755 --- a/run_tests_as_seperate_processes.sh +++ b/run_tests_as_seperate_processes.sh @@ -48,9 +48,6 @@ TEST_DIR=${CPP_SRC_HOME}/$(echo $BINARY_DIR | sed "s|$BUILD_DIR/test/||") export TEST_EXECUTABLE="$2/ml_$3" export LOG_DIR="$2/test_logs" -MAX_ARGS=2 -MAX_PROCS=4 - if [[ -n "$BOOST_TEST_MAX_ARGS" ]]; then MAX_ARGS=$BOOST_TEST_MAX_ARGS fi @@ -62,6 +59,17 @@ fi rm -rf "$LOG_DIR" mkdir -p "$LOG_DIR" +function num_procs() { + if [ `uname` = "Darwin" ]; then + sysctl -n hw.logicalcpu + else + nproc + fi +} + +MAX_ARGS=1 +MAX_PROCS=$(num_procs) + function get_qualified_test_names() { executable_path=$1 @@ -90,8 +98,6 @@ if [ -z "$ALL_TEST_NAMES" ]; then exit 1 fi -EXIT_CODE=0 - function execute_tests() { if [[ "$BOOST_TEST_MIXED_MODE" == "true" ]]; then @@ -117,7 +123,8 @@ function execute_tests() { echo "Test '$TEST_NAME' PASSED." else echo "Test '$TEST_NAME' FAILED with exit code $TEST_STATUS. Check '$LOG_FILE' for details." - EXIT_CODE=1 # Indicate overall failure if any test fails + echo "touch $SAFE_TEST_LOG_FILENAME.failed" + touch $SAFE_TEST_LOG_FILENAME.failed fi done } @@ -125,12 +132,15 @@ function execute_tests() { export -f execute_tests echo $ALL_TEST_NAMES | xargs -n $MAX_ARGS -P $MAX_PROCS bash -c 'execute_tests "$@"' _ - + echo "--------------------------------------------------" -if [ $EXIT_CODE -eq 0 ]; then - echo "$TEST_SUITE: All individual tests PASSED." -else + +if test -n "$(find . -maxdepth 1 -name '*.failed' -print -quit)" +then echo "$TEST_SUITE: Some individual tests FAILED. Check logs in '$LOG_DIR'." + echo found +else + echo "$TEST_SUITE: All individual tests PASSED." fi function merge_junit_results() { @@ -168,4 +178,3 @@ if [ "$TEST_SUITE" != "test_seccomp" ]; then merge_junit_results $TEST_DIR/boost_test_results_C*.junit > $TEST_DIR/boost_test_results.junit fi -exit $EXIT_CODE From 4014a721397f66e37183f7ee03b878cd8f3b4680 Mon Sep 17 00:00:00 2001 From: Ed Savage Date: Tue, 8 Jul 2025 15:02:57 +1200 Subject: [PATCH 17/25] Formatting --- lib/api/unittest/CMultiFileDataAdderTest.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/api/unittest/CMultiFileDataAdderTest.cc b/lib/api/unittest/CMultiFileDataAdderTest.cc index cd86625d0..1d718b9eb 100644 --- a/lib/api/unittest/CMultiFileDataAdderTest.cc +++ b/lib/api/unittest/CMultiFileDataAdderTest.cc @@ -37,8 +37,8 @@ #include #include #include -#include #include // For random number generation facilities +#include #include #include @@ -125,7 +125,7 @@ void detectorPersistHelper(const std::string& configFileName, std::string temp; TStrVec origFileContents(numOrigDocs); for (size_t index = 0; index < numOrigDocs; ++index) { - std::string expectedOrigFilename(baseOrigOutputFilename+"_"+oss.str()+"_"); + std::string expectedOrigFilename(baseOrigOutputFilename + "_" + oss.str() + "_"); expectedOrigFilename += "/_index/"; expectedOrigFilename += ml::core::CDataAdder::makeCurrentDocId(origBaseDocId, 1 + index); From e3d2f8adcbc431a8603e14a09e562318ff8b5b51 Mon Sep 17 00:00:00 2001 From: Ed Savage Date: Tue, 8 Jul 2025 15:07:15 +1200 Subject: [PATCH 18/25] Formatting --- lib/core/unittest/CLoggerTest.cc | 4 ++-- lib/core/unittest/CNamedPipeFactoryTest.cc | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/core/unittest/CLoggerTest.cc b/lib/core/unittest/CLoggerTest.cc index d41d4cce3..e626fb8fc 100644 --- a/lib/core/unittest/CLoggerTest.cc +++ b/lib/core/unittest/CLoggerTest.cc @@ -205,7 +205,7 @@ BOOST_FIXTURE_TEST_CASE(testNonAsciiJsonLogging, CTestFixture) { "Non-iso8859-15: 编码 test", "surrogate pair: 𐐷 test"}; std::ostringstream loggedData; - const std::string& pipeName = std::string{TEST_PIPE_NAME}+"_testNonAsciiJsonLogging"; + const std::string& pipeName = std::string{TEST_PIPE_NAME} + "_testNonAsciiJsonLogging"; std::thread reader(makeReader(loggedData, pipeName)); ml::core::CLogger& logger = ml::core::CLogger::instance(); @@ -227,7 +227,7 @@ BOOST_FIXTURE_TEST_CASE(testNonAsciiJsonLogging, CTestFixture) { BOOST_FIXTURE_TEST_CASE(testWarnAndErrorThrottling, CTestFixture) { std::ostringstream loggedData; - const std::string& pipeName = std::string{TEST_PIPE_NAME}+"_testWarnAndErrorThrottling"; + const std::string& pipeName = std::string{TEST_PIPE_NAME} + "_testWarnAndErrorThrottling"; std::thread reader{makeReader(loggedData, pipeName)}; diff --git a/lib/core/unittest/CNamedPipeFactoryTest.cc b/lib/core/unittest/CNamedPipeFactoryTest.cc index e94fcd689..19dc01323 100644 --- a/lib/core/unittest/CNamedPipeFactoryTest.cc +++ b/lib/core/unittest/CNamedPipeFactoryTest.cc @@ -198,7 +198,7 @@ BOOST_AUTO_TEST_CASE(testCancelBlock) { BOOST_TEST_REQUIRE(cancellerThread.start()); ml::core::CNamedPipeFactory::TOStreamP strm{ml::core::CNamedPipeFactory::openPipeStreamWrite( - TEST_PIPE_NAME+"_testCancelBlock", cancellerThread.hasCancelledBlockingCall())}; + TEST_PIPE_NAME + "_testCancelBlock", cancellerThread.hasCancelledBlockingCall())}; BOOST_TEST_REQUIRE(strm == nullptr); BOOST_TEST_REQUIRE(cancellerThread.stop()); @@ -221,7 +221,7 @@ BOOST_AUTO_TEST_CASE(testErrorIfSymlink) { BOOST_REQUIRE(BOOST_IS_DEFINED(Windows)); #else const std::string TEST_SYMLINK_NAME{"test_symlink_testErrorIfSymlink"}; - const std::string testPipeName{TEST_PIPE_NAME+"_test_symlink_testErrorIfSymlink"}; + const std::string testPipeName{TEST_PIPE_NAME + "_test_symlink_testErrorIfSymlink"}; // Remove any files left behind by a previous failed test, but don't check // the return codes as these calls will usually fail From 7b398999b76e5ffc59b3e1b8ed93041617543939 Mon Sep 17 00:00:00 2001 From: Ed Savage Date: Tue, 8 Jul 2025 16:39:02 +1200 Subject: [PATCH 19/25] Reworking test case --- lib/api/unittest/CMultiFileDataAdderTest.cc | 26 +++++++++------------ 1 file changed, 11 insertions(+), 15 deletions(-) diff --git a/lib/api/unittest/CMultiFileDataAdderTest.cc b/lib/api/unittest/CMultiFileDataAdderTest.cc index 1d718b9eb..f03dd81fd 100644 --- a/lib/api/unittest/CMultiFileDataAdderTest.cc +++ b/lib/api/unittest/CMultiFileDataAdderTest.cc @@ -102,7 +102,16 @@ void detectorPersistHelper(const std::string& configFileName, // Persist the detector state to file(s) - std::string baseOrigOutputFilename(ml::test::CTestTmpDir::tmpDir() + "/orig"); + // Create a random number to use to generate a unique file name for each test + // this allows tests to be run successfully in parallel + std::random_device rd; + std::mt19937 gen(rd()); + std::uniform_int_distribution<> distrib(1, 100); + std::ostringstream oss; + oss << distrib(gen); + + std::string baseOrigOutputFilename(ml::test::CTestTmpDir::tmpDir() + + "/orig_" + oss.str()); { // Clean up any leftovers of previous failures boost::filesystem::path origDir(baseOrigOutputFilename); @@ -114,18 +123,10 @@ void detectorPersistHelper(const std::string& configFileName, std::string origBaseDocId(JOB_ID + '_' + CTestAnomalyJob::STATE_TYPE + '_' + origSnapshotId); - // Create a random number to use to generate a unique file name for each test - // this allows tests to be run successfully in parallel - std::random_device rd; - std::mt19937 gen(rd()); - std::uniform_int_distribution<> distrib(1, 100); - std::ostringstream oss; - oss << distrib(gen); - std::string temp; TStrVec origFileContents(numOrigDocs); for (size_t index = 0; index < numOrigDocs; ++index) { - std::string expectedOrigFilename(baseOrigOutputFilename + "_" + oss.str() + "_"); + std::string expectedOrigFilename(baseOrigOutputFilename); expectedOrigFilename += "/_index/"; expectedOrigFilename += ml::core::CDataAdder::makeCurrentDocId(origBaseDocId, 1 + index); @@ -257,10 +258,6 @@ BOOST_AUTO_TEST_CASE(testSimpleWrite) { BOOST_REQUIRE_NO_THROW(boost::filesystem::remove_all(workDir)); } -#ifndef Linux // These disabled tests all fail when run as part of a full ml_test_api run on Linux, due to hard memory limits being hit. -// This is due to the ResourceMonitor.totalMemory() returning max_rss on that platform -// which means, as it never decreases for the lifetime of the ml_test_api process, that -// prior test cases can affect latter ones. BOOST_AUTO_TEST_CASE(testDetectorPersistBy) { detectorPersistHelper("testfiles/new_mlfields.json", "testfiles/big_ascending.txt", 0, "%d/%b/%Y:%T %z"); @@ -285,6 +282,5 @@ BOOST_AUTO_TEST_CASE(testDetectorPersistCount) { detectorPersistHelper("testfiles/new_persist_count.json", "testfiles/files_users_programs.csv", 5); } -#endif BOOST_AUTO_TEST_SUITE_END() From 58fafb40bca5cfb7f4e0767861a8a11f1fa1d0da Mon Sep 17 00:00:00 2001 From: Ed Savage Date: Wed, 9 Jul 2025 15:45:14 +1200 Subject: [PATCH 20/25] Small tidy up --- cmake/test-runner.cmake | 10 +--------- run_tests_as_seperate_processes.sh | 23 +++++++++++------------ 2 files changed, 12 insertions(+), 21 deletions(-) diff --git a/cmake/test-runner.cmake b/cmake/test-runner.cmake index 481b2dd09..b17d85ba6 100644 --- a/cmake/test-runner.cmake +++ b/cmake/test-runner.cmake @@ -28,33 +28,25 @@ if (DEFINED ENV{TESTS} AND NOT "$ENV{TESTS}" STREQUAL "") set(SAFE_TEST_NAME "_${SAFE_TEST_NAME}") endif() -message(STATUS "SAFE_TEST_NAME=${SAFE_TEST_NAME}") string(REPLACE "boost_test_results" "boost_test_results${SAFE_TEST_NAME}" BOOST_TEST_OUTPUT_FORMAT_FLAGS "$ENV{BOOST_TEST_OUTPUT_FORMAT_FLAGS}") set(OUTPUT_FILE "${TEST_DIR}/${TEST_NAME}${SAFE_TEST_NAME}.out") set(FAILED_FILE "${TEST_DIR}/${TEST_NAME}${SAFE_TEST_NAME}.failed") # If env var RUN_BOOST_TESTS_IN_FOREGROUND is defined run the tests in the foreground -message(STATUS "RUN_BOOST_TESTS_IN_FOREGROUND=$ENV{RUN_BOOST_TESTS_IN_FOREGROUND}") - if(TEST_NAME STREQUAL "ml_test_seccomp") execute_process(COMMAND ${TEST_DIR}/${TEST_NAME} ${TEST_FLAGS} ${TESTS} ${BOOST_TEST_OUTPUT_FORMAT_FLAGS} --logger=HRF,all --report_format=HRF --show_progress=no --no_color_output OUTPUT_FILE ${OUTPUT_FILE} ERROR_FILE ${OUTPUT_FILE} RESULT_VARIABLE TEST_SUCCESS) else() if(NOT DEFINED ENV{RUN_BOOST_TESTS_IN_FOREGROUND}) - message(STATUS "executing process ${TEST_DIR}/${TEST_NAME} ${TEST_FLAGS} ${TESTS} ${BOOST_TEST_OUTPUT_FORMAT_FLAGS} --no_color_output") execute_process(COMMAND ${TEST_DIR}/${TEST_NAME} ${TEST_FLAGS} ${TESTS} ${BOOST_TEST_OUTPUT_FORMAT_FLAGS} --no_color_output OUTPUT_FILE ${OUTPUT_FILE} ERROR_FILE ${OUTPUT_FILE} RESULT_VARIABLE TEST_SUCCESS) else() - message(STATUS "executing process ${TEST_DIR}/${TEST_NAME} ${TEST_FLAGS} ${TESTS} ${BOOST_TEST_OUTPUT_FORMAT_FLAGS}") execute_process(COMMAND ${TEST_DIR}/${TEST_NAME} ${TEST_FLAGS} ${TESTS} ${BOOST_TEST_OUTPUT_FORMAT_FLAGS} RESULT_VARIABLE TEST_SUCCESS) endif() endif() -message(STATUS "TESTS EXITED WITH SUCCESS ${TEST_SUCCESS}") - if (NOT TEST_SUCCESS EQUAL 0) if (EXISTS ${TEST_DIR}/${TEST_NAME}) execute_process(COMMAND ${CMAKE_COMMAND} -E cat ${OUTPUT_FILE}) - file(WRITE "${TEST_DIR}/${FAILED_FILE}" "") + file(WRITE "${FAILED_FILE}" "") endif() - message(FATAL_ERROR "Exiting with status ${TEST_SUCCESS}") endif() diff --git a/run_tests_as_seperate_processes.sh b/run_tests_as_seperate_processes.sh index 888efcc53..a59149a60 100755 --- a/run_tests_as_seperate_processes.sh +++ b/run_tests_as_seperate_processes.sh @@ -48,17 +48,6 @@ TEST_DIR=${CPP_SRC_HOME}/$(echo $BINARY_DIR | sed "s|$BUILD_DIR/test/||") export TEST_EXECUTABLE="$2/ml_$3" export LOG_DIR="$2/test_logs" -if [[ -n "$BOOST_TEST_MAX_ARGS" ]]; then - MAX_ARGS=$BOOST_TEST_MAX_ARGS -fi - -if [[ -n "$BOOST_TEST_MAX_PROCS" ]]; then - MAX_PROCS=$BOOST_TEST_MAX_PROCS -fi - -rm -rf "$LOG_DIR" -mkdir -p "$LOG_DIR" - function num_procs() { if [ `uname` = "Darwin" ]; then sysctl -n hw.logicalcpu @@ -70,6 +59,17 @@ function num_procs() { MAX_ARGS=1 MAX_PROCS=$(num_procs) +if [[ -n "$BOOST_TEST_MAX_ARGS" ]]; then + MAX_ARGS=$BOOST_TEST_MAX_ARGS +fi + +if [[ -n "$BOOST_TEST_MAX_PROCS" ]]; then + MAX_PROCS=$BOOST_TEST_MAX_PROCS +fi + +rm -rf "$LOG_DIR" +mkdir -p "$LOG_DIR" + function get_qualified_test_names() { executable_path=$1 @@ -123,7 +123,6 @@ function execute_tests() { echo "Test '$TEST_NAME' PASSED." else echo "Test '$TEST_NAME' FAILED with exit code $TEST_STATUS. Check '$LOG_FILE' for details." - echo "touch $SAFE_TEST_LOG_FILENAME.failed" touch $SAFE_TEST_LOG_FILENAME.failed fi done From 033adef3829f672d826466d9202fe290ea9db92e Mon Sep 17 00:00:00 2001 From: Ed Savage Date: Fri, 11 Jul 2025 16:41:47 +1200 Subject: [PATCH 21/25] Minor tweaks to test runner script --- run_tests_as_seperate_processes.sh | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/run_tests_as_seperate_processes.sh b/run_tests_as_seperate_processes.sh index a59149a60..deb60a429 100755 --- a/run_tests_as_seperate_processes.sh +++ b/run_tests_as_seperate_processes.sh @@ -39,11 +39,11 @@ if [ $# -lt 3 ]; then exit fi -export BUILD_DIR=$1 -export BINARY_DIR=$2 +export BUILD_DIR=$( echo $1 | sed 's|/$||' ) +export BINARY_DIR=$( echo $2 | sed 's|/$||' ) export TEST_SUITE=$3 -TEST_DIR=${CPP_SRC_HOME}/$(echo $BINARY_DIR | sed "s|$BUILD_DIR/test/||") +TEST_DIR=${CPP_SRC_HOME}/$(echo $BINARY_DIR | sed -e "s|$BUILD_DIR/test/||" -e 's|unittest.*|unittest|') export TEST_EXECUTABLE="$2/ml_$3" export LOG_DIR="$2/test_logs" @@ -123,18 +123,18 @@ function execute_tests() { echo "Test '$TEST_NAME' PASSED." else echo "Test '$TEST_NAME' FAILED with exit code $TEST_STATUS. Check '$LOG_FILE' for details." - touch $SAFE_TEST_LOG_FILENAME.failed fi done } export -f execute_tests -echo $ALL_TEST_NAMES | xargs -n $MAX_ARGS -P $MAX_PROCS bash -c 'execute_tests "$@"' _ +RESULTS=$(echo $ALL_TEST_NAMES | xargs -n $MAX_ARGS -P $MAX_PROCS bash -c 'execute_tests "$@"' _) echo "--------------------------------------------------" -if test -n "$(find . -maxdepth 1 -name '*.failed' -print -quit)" +grep 'FAILED with exit code' <<< $RESULT +if [ $? -eq 0 ] then echo "$TEST_SUITE: Some individual tests FAILED. Check logs in '$LOG_DIR'." echo found @@ -173,7 +173,7 @@ echo "" echo } -if [ "$TEST_SUITE" != "test_seccomp" ]; then +if [[ $BOOST_TEST_OUTPUT_FORMAT_FLAGS =~ junit ]]; then merge_junit_results $TEST_DIR/boost_test_results_C*.junit > $TEST_DIR/boost_test_results.junit fi From fe71d2abb1617462056792fa9b5afc91945e8e74 Mon Sep 17 00:00:00 2001 From: Ed Savage Date: Mon, 14 Jul 2025 09:56:33 +1200 Subject: [PATCH 22/25] typo --- run_tests_as_seperate_processes.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/run_tests_as_seperate_processes.sh b/run_tests_as_seperate_processes.sh index deb60a429..4208be8fd 100755 --- a/run_tests_as_seperate_processes.sh +++ b/run_tests_as_seperate_processes.sh @@ -129,7 +129,7 @@ function execute_tests() { export -f execute_tests -RESULTS=$(echo $ALL_TEST_NAMES | xargs -n $MAX_ARGS -P $MAX_PROCS bash -c 'execute_tests "$@"' _) +RESULT=$(echo $ALL_TEST_NAMES | xargs -n $MAX_ARGS -P $MAX_PROCS bash -c 'execute_tests "$@"' _) echo "--------------------------------------------------" From 98bfc7ff22f62e10caaf248a68697978b5e255b0 Mon Sep 17 00:00:00 2001 From: Ed Savage Date: Fri, 25 Jul 2025 14:56:45 +1200 Subject: [PATCH 23/25] Fix flaky test --- lib/api/unittest/CMultiFileDataAdderTest.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/api/unittest/CMultiFileDataAdderTest.cc b/lib/api/unittest/CMultiFileDataAdderTest.cc index f03dd81fd..b72f5eccf 100644 --- a/lib/api/unittest/CMultiFileDataAdderTest.cc +++ b/lib/api/unittest/CMultiFileDataAdderTest.cc @@ -163,7 +163,7 @@ void detectorPersistHelper(const std::string& configFileName, // Finally, persist the new detector state to a file - std::string baseRestoredOutputFilename(ml::test::CTestTmpDir::tmpDir() + "/restored"); + std::string baseRestoredOutputFilename(ml::test::CTestTmpDir::tmpDir() + "/restored_" + oss.str()); { // Clean up any leftovers of previous failures boost::filesystem::path restoredDir(baseRestoredOutputFilename); From 183e797b6d8c27e4ec7a8f950716b74b6ae5a1cd Mon Sep 17 00:00:00 2001 From: Ed Savage Date: Fri, 25 Jul 2025 16:04:18 +1200 Subject: [PATCH 24/25] Formatting --- lib/api/unittest/CMultiFileDataAdderTest.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/api/unittest/CMultiFileDataAdderTest.cc b/lib/api/unittest/CMultiFileDataAdderTest.cc index b72f5eccf..f37d11c1f 100644 --- a/lib/api/unittest/CMultiFileDataAdderTest.cc +++ b/lib/api/unittest/CMultiFileDataAdderTest.cc @@ -163,7 +163,8 @@ void detectorPersistHelper(const std::string& configFileName, // Finally, persist the new detector state to a file - std::string baseRestoredOutputFilename(ml::test::CTestTmpDir::tmpDir() + "/restored_" + oss.str()); + std::string baseRestoredOutputFilename(ml::test::CTestTmpDir::tmpDir() + + "/restored_" + oss.str()); { // Clean up any leftovers of previous failures boost::filesystem::path restoredDir(baseRestoredOutputFilename); From 0629d0915ba7043fb8bb00dd27cce464284b5a31 Mon Sep 17 00:00:00 2001 From: Ed Savage Date: Tue, 29 Jul 2025 11:47:08 +1200 Subject: [PATCH 25/25] Update documentation. Update the CONTRIBUTING.md doc with details on the new feature enabling tests to be run individually. --- CONTRIBUTING.md | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 5b435d0ce..880c59943 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -92,6 +92,17 @@ Note that we configure the build to be of type `RelWithDebInfo` in order to obta 1. It is also possible to control the behaviour of the test framework by passing any other arbitrary flags via the `TEST_FLAGS` environment variable , e.g. `TEST_FLAGS="--random" cmake --build cmake-build-relwithdebinfo -t test` (use TEST_FLAGS="--help" to see the full list). +1. On Linux and maOS it is possible to run individual tests within a Boost Test suite in separate processes. + 1. This is convenient for several reasons: + 1. Isolation: Prevent one test's failures (e.g., memory corruption, unhandled exceptions) from affecting subsequent tests. + 1. Resource Management: Clean up of resources (memory, file handles, network connections) between tests more effectively. + 1. Stability: Improve the robustness of test suites, especially for long-running or complex tests. + 1. Parallelization: A means to run individual test cases in parallel has been provided: + 1. For all tests associated with a library or executable, e.g. + `cmake --build cmake-build-relwithdebinfo -j 8 -t test_api_individually` + 1. For all tests in the `ml-cpp` repo: + `cmake --build cmake-build-relwithdebinfo -j 8 -t test_individually` + 1. **Care should be taken that tests don't modify common resources.** 1. As a convenience, there exists a `precommit` target that both formats the code and runs the entire test suite, e.g. 1. `./gradlew precommit` 1. `cmake --build cmake-build-relwithdebinfo -j 8 -t precommit`