#! /usr/bin/env python ## -*- Mode: python; py-indent-offset: 4; indent-tabs-mode: nil; coding: utf-8; -*- # # Copyright (c) 2009 University of Washington # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation; # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # import os import sys import time import optparse import subprocess import threading import Queue import signal import xml.dom.minidom import shutil import re # # XXX This should really be part of a waf command to list the configuration # items relative to optional ns-3 pieces. # # A list of interesting configuration items in the waf configuration # cache which we may be interested in when deciding on which examples # to run and how to run them. These are set by waf during the # configuration phase and the corresponding assignments are usually # found in the associated subdirectory wscript files. # interesting_config_items = [ "NS3_BUILDDIR", "NS3_MODULE_PATH", "NSC_ENABLED", "ENABLE_REAL_TIME", "ENABLE_EXAMPLES", "ENABLE_PYTHON_BINDINGS", ] NSC_ENABLED = False ENABLE_REAL_TIME = False ENABLE_EXAMPLES = True # # If the user has constrained us to run certain kinds of tests, we can tell waf # to only build # core_kinds = ["bvt", "core", "system", "unit"] # # There are some special cases for test suites that kill valgrind. This is # because NSC causes illegal instruction crashes when run under valgrind. # core_valgrind_skip_tests = [ "ns3-tcp-cwnd", "nsc-tcp-loss", "ns3-tcp-interoperability", ] # # There are some special cases for test suites that fail when NSC is # missing. # core_nsc_missing_skip_tests = [ "ns3-tcp-cwnd", "nsc-tcp-loss", "ns3-tcp-interoperability", ] # # A list of examples to run as smoke tests just to ensure that they remain # buildable and runnable over time. Also a condition under which to run # the example (from the waf configuration), and a condition under which to # run the example under valgrind. This is because NSC causes illegal # instruction crashes when run under valgrind. # # XXX Should this not be read from a configuration file somewhere and not # hardcoded. # example_tests = [ ("src/devices/bridge/examples/csma-bridge", "True", "True"), ("src/devices/bridge/examples/csma-bridge-one-hop", "True", "True"), ("examples/csma/csma-broadcast", "True", "True"), ("examples/csma/csma-multicast", "True", "True"), ("examples/csma/csma-one-subnet", "True", "True"), ("examples/csma/csma-packet-socket", "True", "True"), ("examples/csma/csma-ping", "True", "True"), ("examples/csma/csma-raw-ip-socket", "True", "True"), ("examples/csma/csma-star", "True", "True"), ("examples/emulation/emu-ping", "False", "True"), ("examples/emulation/emu-udp-echo", "False", "True"), ("examples/error-model/simple-error-model", "True", "True"), ("examples/ipv6/icmpv6-redirect", "True", "True"), ("examples/ipv6/ping6", "True", "True"), ("examples/ipv6/radvd", "True", "True"), ("examples/ipv6/radvd-two-prefix", "True", "True"), ("examples/ipv6/test-ipv6", "True", "True"), ("examples/mesh/mesh", "True", "True"), ("examples/naming/object-names", "True", "True"), ("examples/realtime/realtime-udp-echo", "ENABLE_REAL_TIME == True", "True"), ("examples/routing/dynamic-global-routing", "True", "True"), ("examples/routing/global-injection-slash32", "True", "True"), ("examples/routing/global-routing-slash32", "True", "True"), ("examples/routing/mixed-global-routing", "True", "True"), ("examples/routing/nix-simple", "True", "True"), ("examples/routing/nms-p2p-nix", "False", "True"), # Takes too long to run ("examples/routing/simple-alternate-routing", "True", "True"), ("examples/routing/simple-global-routing", "True", "True"), ("src/routing/olsr/examples/simple-point-to-point-olsr", "True", "True"), ("examples/routing/simple-routing-ping6", "True", "True"), ("examples/routing/static-routing-slash32", "True", "True"), ("examples/routing/aodv", "True", "True"), ("examples/spectrum/adhoc-aloha-ideal-phy", "True", "True"), ("examples/spectrum/adhoc-aloha-ideal-phy-with-microwave-oven", "True", "True"), ("examples/stats/wifi-example-sim", "True", "True"), ("examples/tap/tap-wifi-dumbbell", "False", "True"), # Requires manual configuration ("examples/tcp/star", "True", "True"), ("examples/tcp/tcp-large-transfer", "True", "True"), ("examples/tcp/tcp-nsc-lfn", "NSC_ENABLED == True", "False"), ("examples/tcp/tcp-nsc-zoo", "NSC_ENABLED == True", "False"), ("examples/tcp/tcp-star-server", "True", "True"), ("examples/topology-read/topology-read --input=../../examples/topology-read/Inet_small_toposample.txt", "True", "True"), ("examples/topology-read/topology-read --format=Rocketfuel --input=../../examples/topology-read/RocketFuel_toposample_1239_weights.txt", "True", "True"), ("src/devices/virtual-net-device/examples/virtual-net-device", "True", "True"), ("examples/tutorial/first", "True", "True"), ("examples/tutorial/hello-simulator", "True", "True"), ("examples/tutorial/second", "True", "True"), ("examples/tutorial/third", "True", "True"), ("examples/tutorial/fourth", "True", "True"), ("examples/tutorial/fifth", "True", "True"), ("examples/tutorial/sixth", "True", "True"), ("examples/udp/udp-echo", "True", "True"), ("examples/wireless/mixed-wireless", "True", "True"), ("examples/wireless/multirate --totalTime=0.3s --rateManager=ns3::AarfcdWifiManager", "True", "True"), ("examples/wireless/multirate --totalTime=0.3s --rateManager=ns3::AmrrWifiManager", "True", "True"), ("examples/wireless/multirate --totalTime=0.3s --rateManager=ns3::CaraWifiManager", "True", "True"), ("examples/wireless/multirate --totalTime=0.3s --rateManager=ns3::IdealWifiManager", "True", "True"), ("examples/wireless/multirate --totalTime=0.3s --rateManager=ns3::MinstrelWifiManager", "True", "True"), ("examples/wireless/multirate --totalTime=0.3s --rateManager=ns3::OnoeWifiManager", "True", "True"), ("examples/wireless/multirate --totalTime=0.3s --rateManager=ns3::RraaWifiManager", "True", "True"), ("examples/wireless/simple-wifi-frame-aggregation", "True", "True"), ("examples/wireless/wifi-adhoc", "False", "True"), # Takes too long to run ("examples/wireless/wifi-ap --verbose=0", "True", "True"), # Don't let it spew to stdout ("examples/wireless/wifi-clear-channel-cmu", "False", "True"), # Requires specific hardware ("examples/wireless/wifi-simple-adhoc", "True", "True"), ("examples/wireless/wifi-simple-adhoc-grid", "True", "True"), ("examples/wireless/wifi-simple-infra", "True", "True"), ("examples/wireless/wifi-simple-interference", "True", "True"), ("examples/wireless/wifi-wired-bridging", "True", "True"), ("examples/wimax/wimax-simple", "True", "True"), ("examples/wimax/wimax-ipv4", "True", "True"), ("examples/wimax/wimax-multicast", "True", "True"), ] # # A list of python examples to run as smoke tests just to ensure that they # runnable over time. Also a condition under which to run the example (from # the waf configuration) # # XXX Should this not be read from a configuration file somewhere and not # hardcoded. # python_tests = [ ("src/devices/bridge/examples/csma-bridge.py", "True"), ("src/contrib/flow-monitor/examples/wifi-olsr-flowmon.py", "True"), ("examples/routing/simple-routing-ping6.py", "True"), ("examples/tap/tap-csma-virtual-machine.py", "False"), # requires enable-sudo ("examples/tap/tap-wifi-virtual-machine.py", "False"), # requires enable-sudo ("examples/tutorial/first.py", "True"), ("examples/wireless/wifi-ap.py", "True"), ("examples/wireless/mixed-wireless.py", "True"), ] # # The test suites are going to want to output status. They are running # concurrently. This means that unless we are careful, the output of # the test suites will be interleaved. Rather than introducing a lock # file that could unintentionally start serializing execution, we ask # the tests to write their output to a temporary directory and then # put together the final output file when we "join" the test tasks back # to the main thread. In addition to this issue, the example programs # often write lots and lots of trace files which we will just ignore. # We put all of them into the temp directory as well, so they can be # easily deleted. # TMP_OUTPUT_DIR = "testpy-output" def get_node_text(node): for child in node.childNodes: if child.nodeType == child.TEXT_NODE: return child.nodeValue return "None" # # A simple example of writing a text file with a test result summary. It is # expected that this output will be fine for developers looking for problems. # def translate_to_text(results_file, text_file): f = open(text_file, 'w') dom = xml.dom.minidom.parse(results_file) for suite in dom.getElementsByTagName("TestSuite"): result = get_node_text(suite.getElementsByTagName("SuiteResult")[0]) name = get_node_text(suite.getElementsByTagName("SuiteName")[0]) time = get_node_text(suite.getElementsByTagName("SuiteTime")[0]) output = "%s: Test Suite \"%s\" (%s)\n" % (result, name, time) f.write(output) if result != "CRASH": for case in suite.getElementsByTagName("TestCase"): result = get_node_text(case.getElementsByTagName("CaseResult")[0]) name = get_node_text(case.getElementsByTagName("CaseName")[0]) time = get_node_text(case.getElementsByTagName("CaseTime")[0]) output = " %s: Test Case \"%s\" (%s)\n" % (result, name, time) f.write(output) if result == "FAIL": for details in case.getElementsByTagName("FailureDetails"): f.write(" Details:\n") f.write(" Message: %s\n" % get_node_text(details.getElementsByTagName("Message")[0])) f.write(" Condition: %s\n" % get_node_text(details.getElementsByTagName("Condition")[0])) f.write(" Actual: %s\n" % get_node_text(details.getElementsByTagName("Actual")[0])) f.write(" Limit: %s\n" % get_node_text(details.getElementsByTagName("Limit")[0])) f.write(" File: %s\n" % get_node_text(details.getElementsByTagName("File")[0])) f.write(" Line: %s\n" % get_node_text(details.getElementsByTagName("Line")[0])) for example in dom.getElementsByTagName("Example"): result = get_node_text(example.getElementsByTagName("Result")[0]) name = get_node_text(example.getElementsByTagName("Name")[0]) time = get_node_text(example.getElementsByTagName("ElapsedTime")[0]) output = "%s: Example \"%s\" (%s)\n" % (result, name, time) f.write(output) f.close() # # A simple example of writing an HTML file with a test result summary. It is # expected that this will eventually be made prettier as time progresses and # we have time to tweak it. This may end up being moved to a separate module # since it will probably grow over time. # def translate_to_html(results_file, html_file): f = open(html_file, 'w') f.write("\n") f.write("\n") f.write("

ns-3 Test Results

\n") # # Read and parse the whole results file. # dom = xml.dom.minidom.parse(results_file) # # Iterate through the test suites # f.write("

Test Suites

\n") for suite in dom.getElementsByTagName("TestSuite"): # # For each test suite, get its name, result and execution time info # name = get_node_text(suite.getElementsByTagName("SuiteName")[0]) result = get_node_text(suite.getElementsByTagName("SuiteResult")[0]) time = get_node_text(suite.getElementsByTagName("SuiteTime")[0]) # # Print a level three header with the result, name and time. If the # test suite passed, the header is printed in green. If the suite was # skipped, print it in orange, otherwise assume something bad happened # and print in red. # if result == "PASS": f.write("

%s: %s (%s)

\n" % (result, name, time)) elif result == "SKIP": f.write("

%s: %s (%s)

\n" % (result, name, time)) else: f.write("

%s: %s (%s)

\n" % (result, name, time)) # # The test case information goes in a table. # f.write("\n") # # The first column of the table has the heading Result # f.write("\n") # # If the suite crashed or is skipped, there is no further information, so just # delare a new table row with the result (CRASH or SKIP) in it. Looks like: # # +--------+ # | Result | # +--------+ # | CRASH | # +--------+ # # Then go on to the next test suite. Valgrind and skipped errors look the same. # if result in ["CRASH", "SKIP", "VALGR"]: f.write("\n") if result == "SKIP": f.write("\n" % result) else: f.write("\n" % result) f.write("\n") f.write("
Result
%s%s
\n") continue # # If the suite didn't crash, we expect more information, so fill out # the table heading row. Like, # # +--------+----------------+------+ # | Result | Test Case Name | Time | # +--------+----------------+------+ # f.write("Test Case Name\n") f.write(" Time \n") # # If the test case failed, we need to print out some failure details # so extend the heading row again. Like, # # +--------+----------------+------+-----------------+ # | Result | Test Case Name | Time | Failure Details | # +--------+----------------+------+-----------------+ # if result == "FAIL": f.write("Failure Details\n") # # Now iterate through all of the test cases. # for case in suite.getElementsByTagName("TestCase"): # # Get the name, result and timing information from xml to use in # printing table below. # name = get_node_text(case.getElementsByTagName("CaseName")[0]) result = get_node_text(case.getElementsByTagName("CaseResult")[0]) time = get_node_text(case.getElementsByTagName("CaseTime")[0]) # # If the test case failed, we iterate through possibly multiple # failure details # if result == "FAIL": # # There can be multiple failures for each test case. The first # row always gets the result, name and timing information along # with the failure details. Remaining failures don't duplicate # this information but just get blanks for readability. Like, # # +--------+----------------+------+-----------------+ # | Result | Test Case Name | Time | Failure Details | # +--------+----------------+------+-----------------+ # | FAIL | The name | time | It's busted | # +--------+----------------+------+-----------------+ # | | | | Really broken | # +--------+----------------+------+-----------------+ # | | | | Busted bad | # +--------+----------------+------+-----------------+ # first_row = True for details in case.getElementsByTagName("FailureDetails"): # # Start a new row in the table for each possible Failure Detail # f.write("\n") if first_row: first_row = False f.write("%s\n" % result) f.write("%s\n" % name) f.write("%s\n" % time) else: f.write("\n") f.write("\n") f.write("\n") f.write("") f.write("Message: %s, " % get_node_text(details.getElementsByTagName("Message")[0])) f.write("Condition: %s, " % get_node_text(details.getElementsByTagName("Condition")[0])) f.write("Actual: %s, " % get_node_text(details.getElementsByTagName("Actual")[0])) f.write("Limit: %s, " % get_node_text(details.getElementsByTagName("Limit")[0])) f.write("File: %s, " % get_node_text(details.getElementsByTagName("File")[0])) f.write("Line: %s" % get_node_text(details.getElementsByTagName("Line")[0])) f.write("\n") # # End the table row # f.write("\n") else: # # If this particular test case passed, then we just print the PASS # result in green, followed by the test case name and its execution # time information. These go off in ... table data. # The details table entry is left blank. # # +--------+----------------+------+---------+ # | Result | Test Case Name | Time | Details | # +--------+----------------+------+---------+ # | PASS | The name | time | | # +--------+----------------+------+---------+ # f.write("\n") f.write("%s\n" % result) f.write("%s\n" % name) f.write("%s\n" % time) f.write("\n") f.write("\n") # # All of the rows are written, so we need to end the table. # f.write("\n") # # That's it for all of the test suites. Now we have to do something about # our examples. # f.write("

Examples

\n") # # Example status is rendered in a table just like the suites. # f.write("\n") # # The table headings look like, # # +--------+--------------+--------------+ # | Result | Example Name | Elapsed Time | # +--------+--------------+--------------+ # f.write("\n") f.write("\n") f.write("\n") # # Now iterate through all of the examples # for example in dom.getElementsByTagName("Example"): # # Start a new row for each example # f.write("\n") # # Get the result and name of the example in question # result = get_node_text(example.getElementsByTagName("Result")[0]) name = get_node_text(example.getElementsByTagName("Name")[0]) time = get_node_text(example.getElementsByTagName("ElapsedTime")[0]) # # If the example either failed or crashed, print its result status # in red; otherwise green. This goes in a table data # if result == "PASS": f.write("\n" % result) elif result == "SKIP": f.write("\n" % result) # # Write the example name as a new tag data. # f.write("\n" % name) # # Write the elapsed time as a new tag data. # f.write("\n" % time) # # That's it for the current example, so terminate the row. # f.write("\n") # # That's it for the table of examples, so terminate the table. # f.write("
Result Example NameElapsed Time
... %s%s\n" % result) else: f.write("%s%s%s
\n") # # And that's it for the report, so finish up. # f.write("\n") f.write("\n") f.close() # # Python Control-C handling is broken in the presence of multiple threads. # Signals get delivered to the runnable/running thread by default and if # it is blocked, the signal is simply ignored. So we hook sigint and set # a global variable telling the system to shut down gracefully. # thread_exit = False def sigint_hook(signal, frame): global thread_exit thread_exit = True return 0 # # Waf can be configured to compile in debug or optimized modes. In each # case, the resulting built goes into a different directory. If we want # test tests to run from the correct code-base, we have to figure out which # mode waf is running in. This is called its active variant. # # XXX This function pokes around in the waf internal state file. To be a # little less hacky, we should add a commmand to waf to return this info # and use that result. # def read_waf_active_variant(): for line in open("build/c4che/default.cache.py").readlines(): if line.startswith("NS3_ACTIVE_VARIANT"): exec(line, globals()) break if options.verbose: print "NS3_ACTIVE_VARIANT == %s" % NS3_ACTIVE_VARIANT # # In general, the build process itself naturally takes care of figuring out # which tests are built into the test runner. For example, if waf configure # determines that ENABLE_EMU is false due to some missing dependency, # the tests for the emu net device simply will not be built and will # therefore not be included in the built test runner. # # Examples, however, are a different story. In that case, we are just given # a list of examples that could be run. Instead of just failing, for example, # nsc-tcp-zoo if NSC is not present, we look into the waf saved configuration # for relevant configuration items. # # XXX This function pokes around in the waf internal state file. To be a # little less hacky, we should add a commmand to waf to return this info # and use that result. # def read_waf_config(): for line in open("build/c4che/%s.cache.py" % NS3_ACTIVE_VARIANT).readlines(): for item in interesting_config_items: if line.startswith(item): exec(line, globals()) if options.verbose: for item in interesting_config_items: print "%s ==" % item, eval(item) # # It seems pointless to fork a process to run waf to fork a process to run # the test runner, so we just run the test runner directly. The main thing # that waf would do for us would be to sort out the shared library path but # we can deal with that easily and do here. # # There can be many different ns-3 repositories on a system, and each has # its own shared libraries, so ns-3 doesn't hardcode a shared library search # path -- it is cooked up dynamically, so we do that too. # def make_paths(): have_DYLD_LIBRARY_PATH = False have_LD_LIBRARY_PATH = False have_PATH = False have_PYTHONPATH = False keys = os.environ.keys() for key in keys: if key == "DYLD_LIBRARY_PATH": have_DYLD_LIBRARY_PATH = True if key == "LD_LIBRARY_PATH": have_LD_LIBRARY_PATH = True if key == "PATH": have_PATH = True if key == "PYTHONPATH": have_PYTHONPATH = True pypath = os.environ["PYTHONPATH"] = os.path.join (NS3_BUILDDIR, NS3_ACTIVE_VARIANT, "bindings", "python") if not have_PYTHONPATH: os.environ["PYTHONPATH"] = pypath else: os.environ["PYTHONPATH"] += ":" + pypath if options.verbose: print "os.environ[\"PYTHONPATH\"] == %s" % os.environ["PYTHONPATH"] if sys.platform == "darwin": if not have_DYLD_LIBRARY_PATH: os.environ["DYLD_LIBRARY_PATH"] = "" for path in NS3_MODULE_PATH: os.environ["DYLD_LIBRARY_PATH"] += ":" + path if options.verbose: print "os.environ[\"DYLD_LIBRARY_PATH\"] == %s" % os.environ["DYLD_LIBRARY_PATH"] elif sys.platform == "win32": if not have_PATH: os.environ["PATH"] = "" for path in NS3_MODULE_PATH: os.environ["PATH"] += ';' + path if options.verbose: print "os.environ[\"PATH\"] == %s" % os.environ["PATH"] elif sys.platform == "cygwin": if not have_PATH: os.environ["PATH"] = "" for path in NS3_MODULE_PATH: os.environ["PATH"] += ":" + path if options.verbose: print "os.environ[\"PATH\"] == %s" % os.environ["PATH"] else: if not have_LD_LIBRARY_PATH: os.environ["LD_LIBRARY_PATH"] = "" for path in NS3_MODULE_PATH: os.environ["LD_LIBRARY_PATH"] += ":" + path if options.verbose: print "os.environ[\"LD_LIBRARY_PATH\"] == %s" % os.environ["LD_LIBRARY_PATH"] # # Short note on generating suppressions: # # See the valgrind documentation for a description of suppressions. The easiest # way to generate a suppression expression is by using the valgrind # --gen-suppressions option. To do that you have to figure out how to run the # test in question. # # If you do "test.py -v -g -s then test.py will output most of what # you need. For example, if you are getting a valgrind error in the # devices-mesh-dot11s-regression test suite, you can run: # # ./test.py -v -g -s devices-mesh-dot11s-regression # # You should see in the verbose output something that looks like: # # Synchronously execute valgrind --suppressions=/home/craigdo/repos/ns-3-allinone-dev/ns-3-dev/testpy.supp # --leak-check=full --error-exitcode=2 /home/craigdo/repos/ns-3-allinone-dev/ns-3-dev/build/debug/utils/test-runner # --suite=devices-mesh-dot11s-regression --basedir=/home/craigdo/repos/ns-3-allinone-dev/ns-3-dev # --tempdir=testpy-output/2010-01-12-22-47-50-CUT # --out=testpy-output/2010-01-12-22-47-50-CUT/devices-mesh-dot11s-regression.xml # # You need to pull out the useful pieces, and so could run the following to # reproduce your error: # # valgrind --suppressions=/home/craigdo/repos/ns-3-allinone-dev/ns-3-dev/testpy.supp # --leak-check=full --error-exitcode=2 /home/craigdo/repos/ns-3-allinone-dev/ns-3-dev/build/debug/utils/test-runner # --suite=devices-mesh-dot11s-regression --basedir=/home/craigdo/repos/ns-3-allinone-dev/ns-3-dev # --tempdir=testpy-output # # Hint: Use the first part of the command as is, and point the "tempdir" to # somewhere real. You don't need to specify an "out" file. # # When you run the above command you should see your valgrind error. The # suppression expression(s) can be generated by adding the --gen-suppressions=yes # option to valgrind. Use something like: # # valgrind --gen-suppressions=yes --suppressions=/home/craigdo/repos/ns-3-allinone-dev/ns-3-dev/testpy.supp # --leak-check=full --error-exitcode=2 /home/craigdo/repos/ns-3-allinone-dev/ns-3-dev/build/debug/utils/test-runner # --suite=devices-mesh-dot11s-regression --basedir=/home/craigdo/repos/ns-3-allinone-dev/ns-3-dev # --tempdir=testpy-output # # Now when valgrind detects an error it will ask: # # ==27235== ---- Print suppression ? --- [Return/N/n/Y/y/C/c] ---- # # to which you just enter 'y'. # # You will be provided with a suppression expression that looks something like # the following: # { # # Memcheck:Addr8 # fun:_ZN3ns36dot11s15HwmpProtocolMac8SendPreqESt6vectorINS0_6IePreqESaIS3_EE # fun:_ZN3ns36dot11s15HwmpProtocolMac10SendMyPreqEv # fun:_ZN3ns36dot11s15HwmpProtocolMac18RequestDestinationENS_12Mac48AddressEjj # ... # the rest of the stack frame # ... # } # # You need to add a supression name which will only be printed out by valgrind in # verbose mode (but it needs to be there in any case). The entire stack frame is # shown to completely characterize the error, but in most cases you won't need # all of that info. For example, if you want to turn off all errors that happen # when the function (fun:) is called, you can just delete the rest of the stack # frame. You can also use wildcards to make the mangled signatures more readable. # # I added the following to the testpy.supp file for this particular error: # # { # Supress invalid read size errors in SendPreq() when using HwmpProtocolMac # Memcheck:Addr8 # fun:*HwmpProtocolMac*SendPreq* # } # # Now, when you run valgrind the error will be suppressed. # VALGRIND_SUPPRESSIONS_FILE = "testpy.supp" def run_job_synchronously(shell_command, directory, valgrind, is_python): (base, build) = os.path.split (NS3_BUILDDIR) suppressions_path = os.path.join (base, VALGRIND_SUPPRESSIONS_FILE) if is_python: path_cmd = "python " + os.path.join (base, shell_command) else: path_cmd = os.path.join (NS3_BUILDDIR, NS3_ACTIVE_VARIANT, shell_command) if valgrind: cmd = "valgrind --suppressions=%s --leak-check=full --show-reachable=yes --error-exitcode=2 %s" % (suppressions_path, path_cmd) else: cmd = path_cmd if options.verbose: print "Synchronously execute %s" % cmd start_time = time.time() proc = subprocess.Popen(cmd, shell = True, cwd = directory, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout_results, stderr_results = proc.communicate() elapsed_time = time.time() - start_time retval = proc.returncode # # valgrind sometimes has its own idea about what kind of memory management # errors are important. We want to detect *any* leaks, so the way to do # that is to look for the presence of a valgrind leak summary section. # # If another error has occurred (like a test suite has failed), we don't # want to trump that error, so only do the valgrind output scan if the # test has otherwise passed (return code was zero). # if valgrind and retval == 0 and "== LEAK SUMMARY:" in stderr_results: retval = 2 if options.verbose: print "Return code = ", retval print "stderr = ", stderr_results return (retval, stdout_results, stderr_results, elapsed_time) # # This class defines a unit of testing work. It will typically refer to # a test suite to run using the test-runner, or an example to run directly. # class Job: def __init__(self): self.is_break = False self.is_skip = False self.is_example = False self.is_pyexample = False self.shell_command = "" self.display_name = "" self.basedir = "" self.tempdir = "" self.cwd = "" self.tmp_file_name = "" self.returncode = False self.elapsed_time = 0 # # A job is either a standard job or a special job indicating that a worker # thread should exist. This special job is indicated by setting is_break # to true. # def set_is_break(self, is_break): self.is_break = is_break # # If a job is to be skipped, we actually run it through the worker threads # to keep the PASS, FAIL, CRASH and SKIP processing all in one place. # def set_is_skip(self, is_skip): self.is_skip = is_skip # # Examples are treated differently than standard test suites. This is # mostly because they are completely unaware that they are being run as # tests. So we have to do some special case processing to make them look # like tests. # def set_is_example(self, is_example): self.is_example = is_example # # Examples are treated differently than standard test suites. This is # mostly because they are completely unaware that they are being run as # tests. So we have to do some special case processing to make them look # like tests. # def set_is_pyexample(self, is_pyexample): self.is_pyexample = is_pyexample # # This is the shell command that will be executed in the job. For example, # # "utils/test-runner --suite=some-test-suite" # def set_shell_command(self, shell_command): self.shell_command = shell_command # # This is the dispaly name of the job, typically the test suite or example # name. For example, # # "some-test-suite" or "udp-echo" # def set_display_name(self, display_name): self.display_name = display_name # # This is the base directory of the repository out of which the tests are # being run. It will be used deep down in the testing framework to determine # where the source directory of the test was, and therefore where to find # provided test vectors. For example, # # "/home/user/repos/ns-3-dev" # def set_basedir(self, basedir): self.basedir = basedir # # This is the directory to which a running test suite should write any # temporary files. # def set_tempdir(self, tempdir): self.tempdir = tempdir # # This is the current working directory that will be given to an executing # test as it is being run. It will be used for examples to tell them where # to write all of the pcap files that we will be carefully ignoring. For # example, # # "/tmp/unchecked-traces" # def set_cwd(self, cwd): self.cwd = cwd # # This is the temporary results file name that will be given to an executing # test as it is being run. We will be running all of our tests in parallel # so there must be multiple temporary output files. These will be collected # into a single XML file at the end and then be deleted. # def set_tmp_file_name(self, tmp_file_name): self.tmp_file_name = tmp_file_name # # The return code received when the job process is executed. # def set_returncode(self, returncode): self.returncode = returncode # # The elapsed real time for the job execution. # def set_elapsed_time(self, elapsed_time): self.elapsed_time = elapsed_time # # The worker thread class that handles the actual running of a given test. # Once spawned, it receives requests for work through its input_queue and # ships the results back through the output_queue. # class worker_thread(threading.Thread): def __init__(self, input_queue, output_queue): threading.Thread.__init__(self) self.input_queue = input_queue self.output_queue = output_queue def run(self): while True: job = self.input_queue.get() # # Worker threads continue running until explicitly told to stop with # a special job. # if job.is_break: return # # If the global interrupt handler sets the thread_exit variable, # we stop doing real work and just report back a "break" in the # normal command processing has happened. # if thread_exit == True: job.set_is_break(True) self.output_queue.put(job) continue # # If we are actually supposed to skip this job, do so. Note that # if is_skip is true, returncode is undefined. # if job.is_skip: if options.verbose: print "Skip %s" % job.shell_command self.output_queue.put(job) continue # # Otherwise go about the business of running tests as normal. # else: if options.verbose: print "Launch %s" % job.shell_command if job.is_example or job.is_pyexample: # # If we have an example, the shell command is all we need to # know. It will be something like "examples/udp-echo" or # "examples/mixed-wireless.py" # (job.returncode, standard_out, standard_err, et) = run_job_synchronously(job.shell_command, job.cwd, options.valgrind, job.is_pyexample) else: # # If we're a test suite, we need to provide a little more info # to the test runner, specifically the base directory and temp # file name # (job.returncode, standard_out, standard_err, et) = run_job_synchronously(job.shell_command + " --basedir=%s --tempdir=%s --out=%s" % (job.basedir, job.tempdir, job.tmp_file_name), job.cwd, options.valgrind, False) job.set_elapsed_time(et) if options.verbose: print "returncode = %d" % job.returncode print "---------- begin standard out ----------" print standard_out print "---------- begin standard err ----------" print standard_err print "---------- end standard err ----------" self.output_queue.put(job) # # This is the main function that does the work of interacting with the test-runner # itself. # def run_tests(): # # Run waf to make sure that everything is built, configured and ready to go # unless we are explicitly told not to. We want to be careful about causing # our users pain while waiting for extraneous stuff to compile and link, so # we allow users that know what they''re doing to not invoke waf at all. # if not options.nowaf: # # If the user is running the "kinds" or "list" options, there is an # implied dependency on the test-runner since we call that program # if those options are selected. We will exit after processing those # options, so if we see them, we can safely only build the test-runner. # # If the user has constrained us to running only a particular type of # file, we can only ask waf to build what we know will be necessary. # For example, if the user only wants to run BVT tests, we only have # to build the test-runner and can ignore all of the examples. # # If the user only wants to run a single example, then we can just build # that example. # # If there is no constraint, then we have to build everything since the # user wants to run everything. # if options.kinds or options.list or (len(options.constrain) and options.constrain in core_kinds): if sys.platform == "win32": waf_cmd = "waf --target=test-runner" else: waf_cmd = "./waf --target=test-runner" elif len(options.example): if sys.platform == "win32": waf_cmd = "waf --target=%s" % os.path.basename(options.example) else: waf_cmd = "./waf --target=%s" % os.path.basename(options.example) else: if sys.platform == "win32": waf_cmd = "waf" else: waf_cmd = "./waf" if options.verbose: print "Building: %s" % waf_cmd proc = subprocess.Popen(waf_cmd, shell = True) proc.communicate() if proc.returncode: print >> sys.stderr, "Waf died. Not running tests" return proc.returncode # # Pull some interesting configuration information out of waf, primarily # so we can know where executables can be found, but also to tell us what # pieces of the system have been built. This will tell us what examples # are runnable. # read_waf_active_variant() read_waf_config() make_paths() # # If lots of logging is enabled, we can crash Python when it tries to # save all of the text. We just don't allow logging to be turned on when # test.py runs. If you want to see logging output from your tests, you # have to run them using the test-runner directly. # os.environ["NS_LOG"] = "" # # There are a couple of options that imply we can to exit before starting # up a bunch of threads and running tests. Let's detect these cases and # handle them without doing all of the hard work. # if options.kinds: path_cmd = os.path.join("utils", "test-runner --kinds") (rc, standard_out, standard_err, et) = run_job_synchronously(path_cmd, os.getcwd(), False, False) print standard_out if options.list: path_cmd = os.path.join("utils", "test-runner --list") (rc, standard_out, standard_err, et) = run_job_synchronously(path_cmd, os.getcwd(), False, False) print standard_out if options.kinds or options.list: return # # We communicate results in two ways. First, a simple message relating # PASS, FAIL, CRASH or SKIP is always written to the standard output. It # is expected that this will be one of the main use cases. A developer can # just run test.py with no options and see that all of the tests still # pass. # # The second main use case is when detailed status is requested (with the # --text or --html options). Typicall this will be text if a developer # finds a problem, or HTML for nightly builds. In these cases, an # XML file is written containing the status messages from the test suites. # This file is then read and translated into text or HTML. It is expected # that nobody will really be interested in the XML, so we write it somewhere # with a unique name (time) to avoid collisions. In case an error happens, we # provide a runtime option to retain the temporary files. # # When we run examples as smoke tests, they are going to want to create # lots and lots of trace files. We aren't really interested in the contents # of the trace files, so we also just stash them off in the temporary dir. # The retain option also causes these unchecked trace files to be kept. # date_and_time = time.strftime("%Y-%m-%d-%H-%M-%S-CUT", time.gmtime()) if not os.path.exists(TMP_OUTPUT_DIR): os.makedirs(TMP_OUTPUT_DIR) testpy_output_dir = os.path.join(TMP_OUTPUT_DIR, date_and_time); if not os.path.exists(testpy_output_dir): os.makedirs(testpy_output_dir) # # Create the main output file and start filling it with XML. We need to # do this since the tests will just append individual results to this file. # xml_results_file = os.path.join(testpy_output_dir, "results.xml") f = open(xml_results_file, 'w') f.write('\n') f.write('\n') f.close() # # We need to figure out what test suites to execute. We are either given one # suite or example explicitly via the --suite or --example/--pyexample option, # or we need to call into the test runner and ask it to list all of the available # test suites. Further, we need to provide the constraint information if it # has been given to us. # # This translates into allowing the following options with respect to the # suites # # ./test,py: run all of the suites and examples # ./test.py --constrain=core: run all of the suites of all kinds # ./test.py --constrain=unit: run all unit suites # ./test,py --suite=some-test-suite: run a single suite # ./test,py --example=udp/udp-echo: run no test suites # ./test,py --pyexample=wireless/mixed-wireless.py: run no test suites # ./test,py --suite=some-suite --example=some-example: run the single suite # # We can also use the --constrain option to provide an ordering of test # execution quite easily. # if len(options.suite): suites = options.suite + "\n" elif len(options.example) == 0 and len(options.pyexample) == 0: if len(options.constrain): path_cmd = os.path.join("utils", "test-runner --list --constrain=%s" % options.constrain) (rc, suites, standard_err, et) = run_job_synchronously(path_cmd, os.getcwd(), False, False) else: path_cmd = os.path.join("utils", "test-runner --list") (rc, suites, standard_err, et) = run_job_synchronously(path_cmd, os.getcwd(), False, False) else: suites = "" # # suite_list will either a single test suite name that the user has # indicated she wants to run or a list of test suites provided by # the test-runner possibly according to user provided constraints. # We go through the trouble of setting up the parallel execution # even in the case of a single suite to avoid having two process the # results in two different places. # suite_list = suites.split('\n') # # We now have a possibly large number of test suites to run, so we want to # run them in parallel. We're going to spin up a number of worker threads # that will run our test jobs for us. # input_queue = Queue.Queue(0) output_queue = Queue.Queue(0) jobs = 0 threads=[] # # In Python 2.6 you can just use multiprocessing module, but we don't want # to introduce that dependency yet; so we jump through a few hoops. # processors = 1 if sys.platform != "win32": if 'SC_NPROCESSORS_ONLN'in os.sysconf_names: processors = os.sysconf('SC_NPROCESSORS_ONLN') else: proc = subprocess.Popen("sysctl -n hw.ncpu", shell = True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout_results, stderr_results = proc.communicate() if len(stderr_results) == 0: processors = int(stdout_results) # # Now, spin up one thread per processor which will eventually mean one test # per processor running concurrently. # for i in range(processors): thread = worker_thread(input_queue, output_queue) threads.append(thread) thread.start() # # Keep track of some summary statistics # total_tests = 0 skipped_tests = 0 # # We now have worker threads spun up, and a list of work to do. So, run # through the list of test suites and dispatch a job to run each one. # # Dispatching will run with unlimited speed and the worker threads will # execute as fast as possible from the queue. # # Note that we actually dispatch tests to be skipped, so all of the # PASS, FAIL, CRASH and SKIP processing is done in the same place. # for test in suite_list: test = test.strip() if len(test): job = Job() job.set_is_example(False) job.set_is_pyexample(False) job.set_display_name(test) job.set_tmp_file_name(os.path.join(testpy_output_dir, "%s.xml" % test)) job.set_cwd(os.getcwd()) job.set_basedir(os.getcwd()) job.set_tempdir(testpy_output_dir) if (options.multiple): multiple = " --multiple" else: multiple = "" path_cmd = os.path.join("utils", "test-runner --suite=%s%s" % (test, multiple)) job.set_shell_command(path_cmd) if options.valgrind and test in core_valgrind_skip_tests: job.set_is_skip(True) # Skip tests that will fail if NSC is missing. if not NSC_ENABLED and test in core_nsc_missing_skip_tests: job.set_is_skip(True) if options.verbose: print "Queue %s" % test input_queue.put(job) jobs = jobs + 1 total_tests = total_tests + 1 # # We've taken care of the discovered or specified test suites. Now we # have to deal with examples run as smoke tests. We have a list of all of # the example programs it makes sense to try and run. Each example will # have a condition associated with it that must evaluate to true for us # to try and execute it. This is used to determine if the example has # a dependency that is not satisfied. For example, if an example depends # on NSC being configured by waf, that example should have a condition # that evaluates to true if NSC is enabled. For example, # # ("tcp-nsc-zoo", "NSC_ENABLED == True"), # # In this case, the example "tcp-nsc-zoo" will only be run if we find the # waf configuration variable "NSC_ENABLED" to be True. # # We don't care at all how the trace files come out, so we just write them # to a single temporary directory. # # XXX As it stands, all of the trace files have unique names, and so file # collisions can only happen if two instances of an example are running in # two versions of the test.py process concurrently. We may want to create # uniquely named temporary traces directories to avoid this problem. # # We need to figure out what examples to execute. We are either given one # suite or example explicitly via the --suite or --example option, or we # need to walk the list of examples looking for available example # conditions. # # This translates into allowing the following options with respect to the # suites # # ./test,py: run all of the examples # ./test.py --constrain=unit run no examples # ./test.py --constrain=example run all of the examples # ./test.py --suite=some-test-suite: run no examples # ./test.py --example=some-example: run the single example # ./test.py --suite=some-suite --example=some-example: run the single example # # XXX could use constrain to separate out examples used for performance # testing # if len(options.suite) == 0 and len(options.example) == 0 and len(options.pyexample) == 0: if len(options.constrain) == 0 or options.constrain == "example": if ENABLE_EXAMPLES: for test, do_run, do_valgrind_run in example_tests: if eval(do_run): job = Job() job.set_is_example(True) job.set_is_pyexample(False) job.set_display_name(test) job.set_tmp_file_name("") job.set_cwd(testpy_output_dir) job.set_basedir(os.getcwd()) job.set_tempdir(testpy_output_dir) job.set_shell_command(test) if options.valgrind and not eval(do_valgrind_run): job.set_is_skip (True) if options.verbose: print "Queue %s" % test input_queue.put(job) jobs = jobs + 1 total_tests = total_tests + 1 elif len(options.example): # # If you tell me to run an example, I will try and run the example # irrespective of any condition. # job = Job() job.set_is_example(True) job.set_is_pyexample(False) job.set_display_name(options.example) job.set_tmp_file_name("") job.set_cwd(testpy_output_dir) job.set_basedir(os.getcwd()) job.set_tempdir(testpy_output_dir) job.set_shell_command("examples/%s" % options.example) if options.verbose: print "Queue %s" % options.example input_queue.put(job) jobs = jobs + 1 total_tests = total_tests + 1 # # Run some Python examples as smoke tests. We have a list of all of # the example programs it makes sense to try and run. Each example will # have a condition associated with it that must evaluate to true for us # to try and execute it. This is used to determine if the example has # a dependency that is not satisfied. # # We don't care at all how the trace files come out, so we just write them # to a single temporary directory. # # We need to figure out what python examples to execute. We are either # given one pyexample explicitly via the --pyexample option, or we # need to walk the list of python examples # # This translates into allowing the following options with respect to the # suites # # ./test.py --constrain=pyexample run all of the python examples # ./test.py --pyexample=some-example.py: run the single python example # if len(options.suite) == 0 and len(options.example) == 0 and len(options.pyexample) == 0: if len(options.constrain) == 0 or options.constrain == "pyexample": if ENABLE_EXAMPLES: for test, do_run in python_tests: if eval(do_run): job = Job() job.set_is_example(False) job.set_is_pyexample(True) job.set_display_name(test) job.set_tmp_file_name("") job.set_cwd(testpy_output_dir) job.set_basedir(os.getcwd()) job.set_tempdir(testpy_output_dir) job.set_shell_command(test) # # Python programs and valgrind do not work and play # well together, so we skip them under valgrind. # We go through the trouble of doing all of this # work to report the skipped tests in a consistent # way throught the output formatter. # if options.valgrind: job.set_is_skip (True) # # The user can disable python bindings, so we need # to pay attention to that and give some feedback # that we're not testing them # if not ENABLE_PYTHON_BINDINGS: job.set_is_skip (True) if options.verbose: print "Queue %s" % test input_queue.put(job) jobs = jobs + 1 total_tests = total_tests + 1 elif len(options.pyexample): # # If you tell me to run a python example, I will try and run the example # irrespective of any condition. # job = Job() job.set_is_pyexample(True) job.set_display_name(options.pyexample) job.set_tmp_file_name("") job.set_cwd(testpy_output_dir) job.set_basedir(os.getcwd()) job.set_tempdir(testpy_output_dir) job.set_shell_command("examples/%s" % options.pyexample) if options.verbose: print "Queue %s" % options.pyexample input_queue.put(job) jobs = jobs + 1 total_tests = total_tests + 1 # # Tell the worker threads to pack up and go home for the day. Each one # will exit when they see their is_break task. # for i in range(processors): job = Job() job.set_is_break(True) input_queue.put(job) # # Now all of the tests have been dispatched, so all we have to do here # in the main thread is to wait for them to complete. Keyboard interrupt # handling is broken as mentioned above. We use a signal handler to catch # sigint and set a global variable. When the worker threads sense this # they stop doing real work and will just start throwing jobs back at us # with is_break set to True. In this case, there are no real results so we # ignore them. If there are real results, we always print PASS or FAIL to # standard out as a quick indication of what happened. # passed_tests = 0 failed_tests = 0 crashed_tests = 0 valgrind_errors = 0 for i in range(jobs): job = output_queue.get() if job.is_break: continue if job.is_example or job.is_pyexample: kind = "Example" else: kind = "TestSuite" if job.is_skip: status = "SKIP" skipped_tests = skipped_tests + 1 else: if job.returncode == 0: status = "PASS" passed_tests = passed_tests + 1 elif job.returncode == 1: failed_tests = failed_tests + 1 status = "FAIL" elif job.returncode == 2: valgrind_errors = valgrind_errors + 1 status = "VALGR" else: crashed_tests = crashed_tests + 1 status = "CRASH" print "%s: %s %s" % (status, kind, job.display_name) if job.is_example or job.is_pyexample: # # Examples are the odd man out here. They are written without any # knowledge that they are going to be run as a test, so we need to # cook up some kind of output for them. We're writing an xml file, # so we do some simple XML that says we ran the example. # # XXX We could add some timing information to the examples, i.e. run # them through time and print the results here. # f = open(xml_results_file, 'a') f.write('\n') example_name = " %s\n" % job.display_name f.write(example_name) if status == "PASS": f.write(' PASS\n') elif status == "FAIL": f.write(' FAIL\n') elif status == "VALGR": f.write(' VALGR\n') elif status == "SKIP": f.write(' SKIP\n') else: f.write(' CRASH\n') f.write(' %.3f\n' % job.elapsed_time) f.write('\n') f.close() else: # # If we're not running an example, we're running a test suite. # These puppies are running concurrently and generating output # that was written to a temporary file to avoid collisions. # # Now that we are executing sequentially in the main thread, we can # concatenate the contents of the associated temp file to the main # results file and remove that temp file. # # One thing to consider is that a test suite can crash just as # well as any other program, so we need to deal with that # possibility as well. If it ran correctly it will return 0 # if it passed, or 1 if it failed. In this case, we can count # on the results file it saved being complete. If it crashed, it # will return some other code, and the file should be considered # corrupt and useless. If the suite didn't create any XML, then # we're going to have to do it ourselves. # # Another issue is how to deal with a valgrind error. If we run # a test suite under valgrind and it passes, we will get a return # code of 0 and there will be a valid xml results file since the code # ran to completion. If we get a return code of 1 under valgrind, # the test case failed, but valgrind did not find any problems so the # test case return code was passed through. We will have a valid xml # results file here as well since the test suite ran. If we see a # return code of 2, this means that valgrind found an error (we asked # it to return 2 if it found a problem in run_job_synchronously) but # the suite ran to completion so there is a valid xml results file. # If the suite crashes under valgrind we will see some other error # return code (like 139). If valgrind finds an illegal instruction or # some other strange problem, it will die with its own strange return # code (like 132). However, if the test crashes by itself, not under # valgrind we will also see some other return code. # # If the return code is 0, 1, or 2, we have a valid xml file. If we # get another return code, we have no xml and we can't really say what # happened -- maybe the TestSuite crashed, maybe valgrind crashed due # to an illegal instruction. If we get something beside 0-2, we assume # a crash and fake up an xml entry. After this is all done, we still # need to indicate a valgrind error somehow, so we fake up an xml entry # with a VALGR result. Thus, in the case of a working TestSuite that # fails valgrind, we'll see the PASS entry for the working TestSuite # followed by a VALGR failing test suite of the same name. # if job.is_skip: f = open(xml_results_file, 'a') f.write("\n") f.write(" %s\n" % job.display_name) f.write(' SKIP\n') f.write(' Execution times not available\n') f.write("\n") f.close() else: if job.returncode == 0 or job.returncode == 1 or job.returncode == 2: f_to = open(xml_results_file, 'a') f_from = open(job.tmp_file_name) f_to.write(f_from.read()) f_to.close() f_from.close() else: f = open(xml_results_file, 'a') f.write("\n") f.write(" %s\n" % job.display_name) f.write(' CRASH\n') f.write(' Execution times not available\n') f.write("\n") f.close() if job.returncode == 2: f = open(xml_results_file, 'a') f.write("\n") f.write(" %s\n" % job.display_name) f.write(' VALGR\n') f.write(' Execution times not available\n') f.write("\n") f.close() # # We have all of the tests run and the results written out. One final # bit of housekeeping is to wait for all of the threads to close down # so we can exit gracefully. # for thread in threads: thread.join() # # Back at the beginning of time, we started the body of an XML document # since the test suites and examples were going to just write their # individual pieces. So, we need to finish off and close out the XML # document # f = open(xml_results_file, 'a') f.write('\n') f.close() # # Print a quick summary of events # print "%d of %d tests passed (%d passed, %d skipped, %d failed, %d crashed, %d valgrind errors)" % (passed_tests, total_tests, passed_tests, skipped_tests, failed_tests, crashed_tests, valgrind_errors) # # The last things to do are to translate the XML results file to "human # readable form" if the user asked for it (or make an XML file somewhere) # if len(options.html): translate_to_html(xml_results_file, options.html) if len(options.text): translate_to_text(xml_results_file, options.text) if len(options.xml): shutil.copyfile(xml_results_file, options.xml) # # If we have been asked to retain all of the little temporary files, we # don't delete tm. If we do delete the temporary files, delete only the # directory we just created. We don't want to happily delete any retained # directories, which will probably surprise the user. # if not options.retain: shutil.rmtree(testpy_output_dir) if passed_tests + skipped_tests == total_tests: return 0 # success else: return 1 # catchall for general errors def main(argv): parser = optparse.OptionParser() parser.add_option("-c", "--constrain", action="store", type="string", dest="constrain", default="", metavar="KIND", help="constrain the test-runner by kind of test") parser.add_option("-e", "--example", action="store", type="string", dest="example", default="", metavar="EXAMPLE", help="specify a single example to run") parser.add_option("-g", "--grind", action="store_true", dest="valgrind", default=False, help="run the test suites and examples using valgrind") parser.add_option("-k", "--kinds", action="store_true", dest="kinds", default=False, help="print the kinds of tests available") parser.add_option("-l", "--list", action="store_true", dest="list", default=False, help="print the list of known tests") parser.add_option("-m", "--multiple", action="store_true", dest="multiple", default=False, help="report multiple failures from test suites and test cases") parser.add_option("-n", "--nowaf", action="store_true", dest="nowaf", default=False, help="do not run waf before starting testing") parser.add_option("-p", "--pyexample", action="store", type="string", dest="pyexample", default="", metavar="PYEXAMPLE", help="specify a single python example to run") parser.add_option("-r", "--retain", action="store_true", dest="retain", default=False, help="retain all temporary files (which are normally deleted)") parser.add_option("-s", "--suite", action="store", type="string", dest="suite", default="", metavar="TEST-SUITE", help="specify a single test suite to run") parser.add_option("-t", "--text", action="store", type="string", dest="text", default="", metavar="TEXT-FILE", help="write detailed test results into TEXT-FILE.txt") parser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False, help="print progress and informational messages") parser.add_option("-w", "--web", "--html", action="store", type="string", dest="html", default="", metavar="HTML-FILE", help="write detailed test results into HTML-FILE.html") parser.add_option("-x", "--xml", action="store", type="string", dest="xml", default="", metavar="XML-FILE", help="write detailed test results into XML-FILE.xml") global options options = parser.parse_args()[0] signal.signal(signal.SIGINT, sigint_hook) return run_tests() if __name__ == '__main__': sys.exit(main(sys.argv))