Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Enable reuse of launch testing functionality #236

Merged
merged 4 commits into from
May 17, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion launch_testing/launch_testing/asserts/assert_output.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ def _match(expected, actual):
return lambda expected, actual: expected in actual
elif hasattr(expected_output, 'search'):
return lambda expected, actual: (
expected.match(actual.replace(os.linesep, '\n')) is not None
expected.search(actual.replace(os.linesep, '\n')) is not None
Copy link
Contributor

@pbaughman pbaughman May 15, 2019

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I encountered this same issue while trying to improve the test coverage on launch_testing. A simple test that catches this issue and prevents regression would be something like

def test_works_with_regex(self):                 
    assertInStdout(                              
        self.proc_output,                        
        re.compile(r'Called with arguments \S+'),
        'terminating_proc-2'                     
    )                                            

You can add it to the bottom of test_io_handler_and_assertions.py You'll also need to import re at the top

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sounds good!

)
raise ValueError('Unknown format for expected output')

Expand Down
147 changes: 68 additions & 79 deletions launch_testing/launch_testing/launch_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,133 +34,122 @@ def _load_python_file_as_module(test_module_name, python_file_path):
return loader.load_module()


def main():

logging.basicConfig()

parser = argparse.ArgumentParser(
description='Launch integration testing tool'
def add_arguments(parser):
"""Add arguments to the CLI parser."""
parser.add_argument('launch_test_file', help='Path to the launch test.')
parser.add_argument(
'--package-name', action='store', default=None,
help='Name of the package the test is in. Useful to aggregate xUnit reports.'
)
parser.add_argument(
'-v', '--verbose', action='store_true', default=False, help='Run with verbose output'
)
parser.add_argument(
'-s', '--show-args', '--show-arguments', action='store_true', default=False,
help='Show arguments that may be given to the launch test.'
)

parser.add_argument('test_file')

parser.add_argument('-v', '--verbose',
action='store_true',
default=False,
help='Run with verbose output')

parser.add_argument('-s', '--show-args', '--show-arguments',
action='store_true',
default=False,
help='Show arguments that may be given to the test file.')

# TODO(hidmic): Provide this option for rostests only.
parser.add_argument('-i', '--isolated',
action='store_true',
default=False,
help=('Isolate tests using a custom ROS_DOMAIN_ID.'
'Useful for test parallelization.'))

parser.add_argument(
'launch_arguments',
nargs='*',
help="Arguments to the launch file; '<name>:=<value>' (for duplicates, last one wins)"
'-i', '--isolated', action='store_true', default=False,
help='Isolate tests using a custom ROS_DOMAIN_ID. Useful for test parallelization.'
)

parser.add_argument(
'--junit-xml',
action='store',
dest='xmlpath',
default=None,
help='write junit XML style report to specified path'
'launch_arguments', nargs='*',
help="Arguments in '<name>:=<value>' format (for duplicates, last one wins)."
)

parser.add_argument(
'--package-name',
action='store',
default=None,
help='a name for the test'
'--junit-xml', action='store', dest='xmlpath', default=None,
help='Do write xUnit reports to specified path.'
)
args = parser.parse_args()

if args.verbose:
_logger_.setLevel(logging.DEBUG)
_logger_.debug('Running with verbose output')

def parse_arguments():
parser = argparse.ArgumentParser(
description='Launch integration testing tool.'
)
add_arguments(parser)
return parser, parser.parse_args()


def run(parser, args, test_runner_cls=LaunchTestRunner):
if args.isolated:
domain_id = get_coordinated_domain_id() # Must copy this to a local to keep it alive
_logger_.debug('Running with ROS_DOMAIN_ID {}'.format(domain_id))
os.environ['ROS_DOMAIN_ID'] = str(domain_id)

# Load the test file as a module and make sure it has the required
# components to run it as a launch test
_logger_.debug("Loading tests from file '{}'".format(args.test_file))
if not os.path.isfile(args.test_file):
_logger_.debug("Loading tests from file '{}'".format(args.launch_test_file))
if not os.path.isfile(args.launch_test_file):
# Note to future reader: parser.error also exits as a side effect
parser.error("Test file '{}' does not exist".format(args.test_file))
parser.error("Test file '{}' does not exist".format(args.launch_test_file))

args.test_file = os.path.abspath(args.test_file)
test_file_basename = os.path.splitext(os.path.basename(args.test_file))[0]
args.launch_test_file = os.path.abspath(args.launch_test_file)
launch_test_file_basename = os.path.splitext(os.path.basename(args.launch_test_file))[0]
if not args.package_name:
args.package_name = test_file_basename
test_module = _load_python_file_as_module(args.package_name, args.test_file)

_logger_.debug('Checking for generate_test_description')
if not hasattr(test_module, 'generate_test_description'):
parser.error(
"Test file '{}' is missing generate_test_description function".format(args.test_file)
)
args.package_name = launch_test_file_basename
test_module = _load_python_file_as_module(args.package_name, args.launch_test_file)

# This is a list of TestRun objects. Each run corresponds to one launch. There may be
# multiple runs if the launch is parametrized
test_runs = LoadTestsFromPythonModule(
test_module, name='{}.{}.launch_tests'.format(
args.package_name, test_file_basename
args.package_name, launch_test_file_basename
)
)

# The runner handles sequcing the launches
runner = LaunchTestRunner(
runner = test_runner_cls(
test_runs=test_runs,
launch_file_arguments=args.launch_arguments,
debug=args.verbose
)

_logger_.debug('Validating test configuration')
try:
runner.validate()
except Exception as e:
parser.error(e)

runner.validate()

if args.show_args:
# TODO pete: Handle the case where different launch descriptions take different args?
print_arguments_of_launch_description(
launch_description=test_runs[0].get_launch_description()
)
sys.exit(0)
return

_logger_.debug('Running integration test')
try:
results = runner.run()
_logger_.debug('Done running integration test')

if args.xmlpath:
xml_report = unittestResultsToXml(
test_results=results, name='{}.{}'.format(
args.package_name, test_file_basename
)

results = runner.run()

_logger_.debug('Done running integration test')

if args.xmlpath:
xml_report = unittestResultsToXml(
test_results=results, name='{}.{}'.format(
args.package_name, launch_test_file_basename
)
xml_report.write(args.xmlpath, encoding='utf-8', xml_declaration=True)
)
xml_report.write(args.xmlpath, encoding='utf-8', xml_declaration=True)

# There will be one result for every test run (see above where we load the tests)
if not all(result.wasSuccessful() for result in results.values()):
return 1
return 0


def main():
logging.basicConfig()

# There will be one result for every test run (see above where we load the tests)
for result in results.values():
if not result.wasSuccessful():
sys.exit(1)
parser, args = parse_arguments()

if args.verbose:
_logger_.setLevel(logging.DEBUG)
_logger_.debug('Running with verbose output')

try:
sys.exit(run(parser, args))
except Exception as e:
import traceback
traceback.print_exc()
parser.error(e)
sys.exit(1)


if __name__ == '__main__':
Expand Down
13 changes: 12 additions & 1 deletion launch_testing/launch_testing/test_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,9 +37,11 @@ class _RunnerWorker():

def __init__(self,
test_run,
test_run_preamble,
launch_file_arguments=[],
debug=False):
self._test_run = test_run
self._test_run_preamble = test_run_preamble
self._launch_service = LaunchService(debug=debug)
self._processes_launched = threading.Event() # To signal when all processes started
self._tests_completed = threading.Event() # To signal when all the tests have finished
Expand Down Expand Up @@ -119,6 +121,7 @@ def run(self):
# Wrap the test_ld in another launch description so we can bind command line arguments to
# the test and add our own event handlers for process IO and process exit:
launch_description = LaunchDescription([
*self._test_run_preamble,
launch.actions.IncludeLaunchDescription(
launch.LaunchDescriptionSource(launch_description=test_ld),
launch_arguments=parsed_launch_arguments
Expand Down Expand Up @@ -212,6 +215,10 @@ def __init__(self,
self._launch_file_arguments = launch_file_arguments
self._debug = debug

def generate_preamble(self):
"""Generate a launch description preamble for a test to be run with."""
return []

def run(self):
"""
Launch the processes under test and run the tests.
Expand All @@ -226,7 +233,11 @@ def run(self):
if len(self._test_runs) > 1:
print('\nStarting test run {}'.format(run))
try:
worker = _RunnerWorker(run, self._launch_file_arguments, self._debug)
worker = _RunnerWorker(
run,
self.generate_preamble(),
self._launch_file_arguments,
self._debug)
results[run] = worker.run()
except unittest.case.SkipTest as skip_exception:
# If a 'skip' decorator was placed on the generate_test_description function,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
# limitations under the License.

import os
import re
import sys
import unittest

Expand Down Expand Up @@ -157,6 +158,13 @@ def test_strict_proc_matching_false(self):
strict_proc_matching=False
)

def test_regex_matching(self):
assertInStdout(
self.proc_output,
re.compile(r'Called with arguments \S+'),
'terminating_proc-2'
)

def test_arguments_disambiguate_processes(self):
txt = self.EXPECTED_TEXT
assertInStdout(self.proc_output, txt, 'terminating_proc', '--extra')
Expand Down
84 changes: 44 additions & 40 deletions launch_testing_ament_cmake/cmake/add_launch_test.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -47,77 +47,81 @@
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.

#
# Add a launch test
#
# :param file: The launch test file containing the test to run
# :type file: string
# :param TARGET: The test target name
# :type TARGET: string
# :param PYTHON_EXECUTABLE: The python executable to use for the test
# :type PYTHON_EXECUTABLE: string
# :param TIMEOUT: The test timeout in seconds
# :type TIMEOUT: integer
# :param ARGS: Launch arguments to pass to the launch test
# :type ARGS: string
function(add_launch_test file)

cmake_parse_arguments(_add_launch_test
macro(parse_launch_test_arguments namespace filename)
cmake_parse_arguments(${namespace}
""
"TARGET;TIMEOUT;PYTHON_EXECUTABLE"
"ARGS"
${ARGN})

if(NOT _add_launch_test_TIMEOUT)
set(_add_launch_test_TIMEOUT 60)
if(NOT ${namespace}_TIMEOUT)
set(${namespace}_TIMEOUT 60)
endif()

if(NOT _add_launch_test_PYTHON_EXECUTABLE)
set(_add_launch_test_PYTHON_EXECUTABLE "${PYTHON_EXECUTABLE}")
if(NOT ${namespace}_PYTHON_EXECUTABLE)
set(${namespace}_PYTHON_EXECUTABLE "${PYTHON_EXECUTABLE}")
endif()

set(_file_name _file_name-NOTFOUND)
if(IS_ABSOLUTE ${file})
set(_file_name ${file})
set(${namespace}_FILE_NAME NOTFOUND)
if(IS_ABSOLUTE ${filename})
set(${namespace}_FILE_NAME ${filename})
else()
find_file(_file_name ${file}
find_file(${namespace}_FILE_NAME ${filename}
PATHS ${CMAKE_CURRENT_SOURCE_DIR}
NO_DEFAULT_PATH
NO_CMAKE_FIND_ROOT_PATH)
if(NOT _file_name)
message(FATAL_ERROR "Can't find launch test file \"${file}\"")
if(NOT ${namespace}_FILE_NAME)
message(FATAL_ERROR "Can't find launch test file \"${filename}\"")
endif()
endif()

if (NOT _add_launch_test_TARGET)
if (NOT ${namespace}_TARGET)
# strip PROJECT_SOURCE_DIR and PROJECT_BINARY_DIR from absolute filename to get unique test name (as rostest does it internally)
set(_add_launch_test_TARGET ${_file_name})
rostest__strip_prefix(_add_launch_test_TARGET "${PROJECT_SOURCE_DIR}/")
rostest__strip_prefix(_add_launch_test_TARGET "${PROJECT_BINARY_DIR}/")
string(REPLACE "/" "_" _add_launch_test_TARGET ${_add_launch_test_TARGET})
set(${namespace}_TARGET ${${namespace}_FILE_NAME})
rostest__strip_prefix(${namespace}_TARGET "${PROJECT_SOURCE_DIR}/")
rostest__strip_prefix(${namespace}_TARGET "${PROJECT_BINARY_DIR}/")
string(REPLACE "/" "_" ${namespace}_TARGET ${${namespace}_TARGET})
endif()

set(result_file "${AMENT_TEST_RESULTS_DIR}/${PROJECT_NAME}/${_add_launch_test_TARGET}.xunit.xml")
set(${namespace}_RESULT_FILE "${AMENT_TEST_RESULTS_DIR}/${PROJECT_NAME}/${${namespace}_TARGET}.xunit.xml")
endmacro()


#
# Add a launch test
#
# :param filename: The launch test file containing the test to run
# :type filename: string
# :param TARGET: The test target name
# :type TARGET: string
# :param PYTHON_EXECUTABLE: The python executable to use for the test
# :type PYTHON_EXECUTABLE: string
# :param TIMEOUT: The test timeout in seconds
# :type TIMEOUT: integer
# :param ARGS: Launch arguments to pass to the launch test
# :type ARGS: string
function(add_launch_test filename)
parse_launch_test_arguments(_launch_test ${filename} ${ARGN})

set(cmd
"${_add_launch_test_PYTHON_EXECUTABLE}"
"${_launch_test_PYTHON_EXECUTABLE}"
"-m"
"launch_testing.launch_test"
"${_file_name}"
"${_add_launch_test_ARGS}"
"--junit-xml=${result_file}"
"${_launch_test_FILE_NAME}"
"${_launch_test_ARGS}"
"--junit-xml=${_launch_test_RESULT_FILE}"
"--package-name=${PROJECT_NAME}"
)

ament_add_test(
"${_add_launch_test_TARGET}"
"${_launch_test_TARGET}"
COMMAND ${cmd}
OUTPUT_FILE "${CMAKE_BINARY_DIR}/launch_test/CHANGEME.txt"
RESULT_FILE "${result_file}"
TIMEOUT "${_add_launch_test_TIMEOUT}"
${_add_launch_test_UNPARSED_ARGUMENTS}
RESULT_FILE "${_launch_test_RESULT_FILE}"
TIMEOUT "${_launch_test_TIMEOUT}"
${_launch_test_UNPARSED_ARGUMENTS}
)

endfunction()

macro(rostest__strip_prefix var prefix)
Expand Down