diff --git a/common/proto/BUILD.bazel b/common/proto/BUILD.bazel index f91736c91ef4..b58dc1572f7b 100644 --- a/common/proto/BUILD.bazel +++ b/common/proto/BUILD.bazel @@ -5,6 +5,12 @@ load( "drake_cc_googletest", "drake_cc_library", ) +load( + "//tools/skylark:drake_py.bzl", + "drake_py_binary", + "drake_py_library", + "drake_py_unittest", +) load( "@drake//tools/skylark:drake_proto.bzl", "drake_cc_proto_library", @@ -60,7 +66,7 @@ drake_cc_library( ], ) -py_library( +drake_py_library( name = "call_python_client", srcs = ["call_python_client.py"], imports = ["."], @@ -69,7 +75,7 @@ py_library( ], ) -py_binary( +drake_py_binary( name = "call_python_client_cli", srcs = ["call_python_client.py"], main = "call_python_client.py", @@ -123,8 +129,7 @@ drake_cc_googletest( ) drake_cc_googletest( - name = "call_python_test", - srcs = ["test/call_python_test.cc"], + name = "call_python_server_test", tags = ["manual"], deps = [ ":call_python", @@ -133,17 +138,18 @@ drake_cc_googletest( # TODO(eric.cousineau): Add a test which will use an interactive matplotlib # backend on CI only. -sh_test( - name = "call_python_full_test", +drake_py_unittest( + name = "call_python_test", size = "small", - srcs = ["test/call_python_full_test.sh"], data = [ ":call_python_client_cli", - ":call_python_test", + ":call_python_server_test", ], + # Do not isolate, as we wish to access neighboring files. + isolate = False, # TODO(eric.cousineau): Find the source of (more) sporadic CI failures, but # after refactoring the script into Python. - flaky = 1, + # flaky = 1, ) drake_cc_googletest( diff --git a/common/proto/test/call_python_full_test.sh b/common/proto/test/call_python_full_test.sh deleted file mode 100755 index b4a084794370..000000000000 --- a/common/proto/test/call_python_full_test.sh +++ /dev/null @@ -1,230 +0,0 @@ -#!/bin/bash -set -e -u - -# @file -# @brief Tests the `call_python_client` CLI and `call_python` test together. - -# TODO(eric.cousineau): Rewrite this in Python for an easier-to-understand -# testing API (#7703). - -if [[ "${OSTYPE}" == "darwin"* ]]; then - echo "Skipping $(basename $0) on Mac" >&2 - exit 0 -fi - -no_plotting= -# By default, set backend so that the test does not open windows. -export MPLBACKEND="ps" - -while [[ $# -gt 0 ]]; do - case ${1} in - --no_plotting) - no_plotting=1 - shift;; - --matplotlib_backend) - export MPLBACKEND=${2} - shift; shift;; - *) - echo "Bad argument: ${1}" >&2 - exit 1;; - esac -done - -cur=$(dirname $0) -cc_bin=${cur}/call_python_test -py_client_cli=${cur}/call_python_client_cli -# TODO(eric.cousineau): Use `tempfile` once we can choose which file C++ -# uses. -filename="${TEST_TMPDIR}/python_rpc" -done_file=${filename}_done - -cc_bin_flags= -if [[ ${no_plotting} == 1 ]]; then - cc_bin_flags='--gtest_filter=-TestCallPython.Plot*' -fi - -py-error() { - echo "ERROR: Python client did not exit successfully." - exit 1 -} - -pause() { - # General busy-spinning. - sleep 0.05 -} - -should-fail() { - echo "This should have failed!" - exit 2 -} - -sub-tests() { - # Execute sub-cases. - func=${1} - # Sub-case 1: Nominal - # @note This setup assumes other things succeeded. - echo -e "\n\n\n[ ${func}: nominal ]" - do-setup 0 0 - ${func} - # Sub-case 2: With Error - echo -e "\n\n\n[ ${func}: with_error ]" - do-setup 1 0 - ${func} - # Sub-case 3: With Error + Stop on Error - echo -e "\n\n\n[ ${func}: with_error + stop_on_error ]" - do-setup 1 1 - ${func} -} - -py-check() { - # Check the status of the Python executable (either `wait ...` or the - # executable itself). - if [[ ${py_fail} -eq 0 ]]; then - # Should succeed. - "$@" || py-error - else - # Should fail. - # TODO(eric.cousineau): File / find bug in Bash for this; this behaves - # differently depending on how this is placed in a function. - { "$@" && should-fail; } || : - fi -} - -SIGPIPE_STATUS=141 - -cc-check() { - if [[ ${py_fail} -eq 0 ]]; then - "$@" || { echo "C++ binary failed"; exit 1; } - else - # If the C++ binary has not finished by the time the Python client - # exits due to failure, then the C++ binary will fail with SIGPIPE. - set +e - "$@" - status=$? - set -e - if [[ ${status} -eq 0 ]]; then - : - elif [[ ${status} -eq ${SIGPIPE_STATUS} ]]; then - echo "C++ binary failed with SIGPIPE; expected behavior, continuing." - else - echo "C++ binary failed" - exit ${status} - fi - fi -} - -do-setup() { - py_fail=${1} - py_stop_on_error=${2} - - cc_flags="--file=${filename} --done_file=${done_file}" - if [[ ${py_fail} -eq 1 ]]; then - cc_flags="${cc_flags} --with_error" - fi - py_flags="--file=${filename}" - if [[ ${py_stop_on_error} -eq 1 ]]; then - py_flags="${py_flags} --stop_on_error" - fi - - rm -f ${filename} - if [[ ${use_fifo} -eq 1 ]]; then - mkfifo ${filename} - fi - echo 0 > ${done_file} -} - -do-kill-after() { - pid=${1} - done_count_max=${2} - # Ensure that we wait until the client is fully done with both - # executions. - done_count=0 - while [[ ${done_count} -lt ${done_count_max} ]]; do - done_count=$(cat ${done_file}) - pause - done - # Kill the client with Ctrl+C. - # TODO(eric.cousineau): In script form, this generally works well (only - # one interrupt needed); however, interactively we need a few more. - while ps -p ${pid} > /dev/null; do - kill -INT ${pid} || : - pause - done -} - -# Execute tests using FIFO. -use_fifo=1 - -no_threading-no_wait() { - # Start Python binary in the background. - ${py_client_cli} --no_threading --no_wait ${py_flags} & - pid=$! - # Execute C++. - cc-check ${cc_bin} ${cc_bin_flags} ${cc_flags} - # When this is done, Python client should exit. - py-check wait ${pid} -} -sub-tests no_threading-no_wait - -threading-no_wait() { - ${py_client_cli} --no_wait ${py_flags} & - pid=$! - cc-check ${cc_bin} ${cc_bin_flags} ${cc_flags} - py-check wait ${pid} -} -sub-tests threading-no_wait - -threading-wait() { - ${py_client_cli} ${py_flags} & - pid=$! - cc-check ${cc_bin} ${cc_bin_flags} ${cc_flags} - if [[ ${py_stop_on_error} -ne 1 ]]; then - # If the client will not halt execution based on an error, execute C++ - # client once more. - cc-check ${cc_bin} ${cc_bin_flags} ${cc_flags} - do-kill-after ${pid} 2 - fi - py-check wait ${pid} -} -sub-tests threading-wait - -# Execute tests without FIFO. -use_fifo=0 - -no_fifo-no_threading-no_wait() { - # Execute C++ first. - ${cc_bin} ${cc_bin_flags} ${cc_flags} - # Start Python binary to consume generated file. - py-check ${py_client_cli} --no_threading --no_wait ${py_flags} -} -sub-tests no_fifo-no_threading-no_wait - -no_fifo-threading-no_wait() { - ${cc_bin} ${cc_bin_flags} ${cc_flags} - py-check ${py_client_cli} --no_wait ${py_flags} -} -sub-tests no_fifo-threading-no_wait - -no_fifo-no_threading-wait() { - # Execute C++ first. - ${cc_bin} ${cc_bin_flags} ${cc_flags} - # Start Python binary to consume generated file. - ${py_client_cli} --no_threading ${py_flags} & - pid=$! - if [[ ${py_stop_on_error} -ne 1 ]]; then - do-kill-after ${pid} 1 - fi - py-check wait ${pid} -} -sub-tests no_fifo-no_threading-wait - -no_fifo-threading-wait() { - ${cc_bin} ${cc_bin_flags} ${cc_flags} - ${py_client_cli} ${py_flags} & - pid=$! - if [[ ${py_stop_on_error} -ne 1 ]]; then - do-kill-after ${pid} 1 - fi - py-check wait ${pid} -} -sub-tests no_fifo-threading-wait diff --git a/common/proto/test/call_python_test.cc b/common/proto/test/call_python_server_test.cc similarity index 100% rename from common/proto/test/call_python_test.cc rename to common/proto/test/call_python_server_test.cc index ee366da73c69..713742e7d222 100644 --- a/common/proto/test/call_python_test.cc +++ b/common/proto/test/call_python_server_test.cc @@ -1,5 +1,3 @@ -#include "drake/common/proto/call_python.h" - #include #include #include @@ -8,6 +6,8 @@ #include #include +#include "drake/common/proto/call_python.h" + DEFINE_string(file, "/tmp/python_rpc", "File written to by this binary, read by client."); // This file signals to `call_python_full_test.sh` that a full execution has diff --git a/common/proto/test/call_python_test.py b/common/proto/test/call_python_test.py new file mode 100644 index 000000000000..8dcb3ef271e8 --- /dev/null +++ b/common/proto/test/call_python_test.py @@ -0,0 +1,109 @@ +"""Tests the `call_python_client` CLI and `call_python_server_test` +together.""" + +from contextlib import contextmanager +import os +import signal +import subprocess +import time +import unittest + + +@contextmanager +def scoped_file(filepath, is_fifo=False): + # Ensures a file does not exist, creates it, and then destroys it upon + # exiting the context. + assert not os.path.exists(filepath) + try: + if is_fifo: + os.mkfifo(filepath) + else: + with open(filepath, 'w'): + pass + yield + finally: + os.unlink(filepath) + + +# TODO(eric.cousineau): Elevate to top-level unittest once it's clear that +# `bazel test` can cover the same capabilities as `bazel run` (debugging, +# command-line arguments via command-line, etc). +assert "TEST_TMPDIR" in os.environ, "Must run under `bazel test`" + +# By default, set backend so that the test does not open windows. +os.environ["MPLBACKEND"] = "ps" + +SIGPIPE_STATUS = 141 + +cur_dir = os.path.dirname(os.path.abspath(__file__)) +# N.B. Need parent directories because this is under `test/*.py`, but the +# Bazel-generated script is one level above. +server_bin = os.path.join(cur_dir, "../call_python_server_test") +client_bin = os.path.join(cur_dir, "../call_python_client_cli") +# Change this if you wish to skip plotting testing (useful if debugging). +no_plotting = False + +file = os.path.join(os.environ["TEST_TMPDIR"], "python_rpc") +done_file = file + "_done" + + +def wait_for_done_count(num_expected, attempt_max=100): + done_count = -1 + attempt = 0 + while done_count < num_expected: + assert done_count <= num_expected + with open(done_file) as f: + done_count = int(f.read().strip()) + time.sleep(0.005) + attempt += 1 + if attempt == attempt_max: + raise Error("Did not get updated 'done count'") + + +class TestCallPython(unittest.TestCase): + def run_server_and_client(self, with_error): + """Runs and tests server and client in parallel.""" + server_flags = ["--file=" + file, "--done_file=" + done_file] + client_flags = ["--file=" + file] + if with_error: + server_flags += ["--with_error"] + client_flags += ["--stop_on_error"] + if no_plotting: + server_flags += ["--gtest_filter=-TestCallPython.Plot*"] + + with scoped_file(file, is_fifo=True), scoped_file(done_file): + with open(done_file, 'w') as f: + f.write("0\n") + # Start client. + client = subprocess.Popen([client_bin] + client_flags) + # Start server. + server = subprocess.Popen([server_bin] + server_flags) + # Join with processes, check return codes. + server_valid_statuses = [0] + if with_error: + # If the C++ binary has not finished by the time the Python + # client exits due to failure, then the C++ binary will fail + # with SIGPIPE. + server_valid_statuses.append(SIGPIPE_STATUS) + self.assertIn(server.wait(), server_valid_statuses) + if not with_error: + # Execute once more. + server = subprocess.Popen([server_bin] + server_flags) + self.assertIn(server.wait(), server_valid_statuses) + # Wait until the client has indicated that the server process + # has run twice. + wait_for_done_count(2) + # Kill the client with SIGINT since it's waiting. + client.send_signal(signal.SIGINT) + client_status = client.wait() + if not with_error: + self.assertEqual(client_status, 0) + else: + self.assertEqual(client_status, 1) + + def test_basic(self): + for with_error in [False, True]: + print("[ with_error: {} ]".format(with_error)) + self.run_server_and_client(with_error) + # TODO(eric.cousineau): Cover other use cases if it's useful, or prune + # them from the code.