summaryrefslogtreecommitdiff
path: root/testing/framework
diff options
context:
space:
mode:
Diffstat (limited to 'testing/framework')
-rw-r--r--testing/framework/README.txt50
-rw-r--r--testing/framework/SConscript62
-rw-r--r--testing/framework/TestCmd.py1999
-rw-r--r--testing/framework/TestCmdTests.py3418
-rw-r--r--testing/framework/TestCommon.py749
-rw-r--r--testing/framework/TestCommonTests.py2392
-rw-r--r--testing/framework/TestRuntest.py173
-rw-r--r--testing/framework/TestSCons.py1745
-rw-r--r--testing/framework/TestSConsMSVS.py907
-rw-r--r--testing/framework/TestSCons_time.py361
-rw-r--r--testing/framework/TestSConsign.py90
-rw-r--r--testing/framework/TestUnit/__init__.py5
-rw-r--r--testing/framework/TestUnit/cli.py35
-rw-r--r--testing/framework/TestUnit/taprunner.py130
-rw-r--r--testing/framework/test-framework.rst523
15 files changed, 12639 insertions, 0 deletions
diff --git a/testing/framework/README.txt b/testing/framework/README.txt
new file mode 100644
index 0000000..817cb3f
--- /dev/null
+++ b/testing/framework/README.txt
@@ -0,0 +1,50 @@
+This directory contains testing infrastructure. Note that not all of
+the pieces here are local to SCons.
+
+ README.txt
+
+ What you're looking at right now.
+
+ SConscript
+
+ Configuration for our packaging build, to copy the necessary
+ parts of the infrastructure into a build directory.
+
+ TestCmd.py
+ TestCmdTests.py
+ TestCommon.py
+ TestCommonTests.py
+
+ The TestCmd infrastructure for testing external commands.
+ These are for generic command testing, are used by some
+ other projects, and are developed separately from SCons.
+ (They're developed by SK, but still...)
+
+ We've captured the unit tests (Test*Tests.py) for these files
+ along with the actual modules themselves to make it a little
+ easier to hack on them for our purposes. Note, however,
+ that any SCons-specific functionality should be implemented
+ in one of the
+
+ TestRuntest.py
+
+ Test infrastructure for our runtest.py script.
+
+ TestSCons.py
+
+ Test infrastructure for SCons itself.
+
+ TestSConsMSVS.py
+
+ Test infrastructure for SCons' Visual Studio support.
+
+ TestSCons_time.py
+
+ Test infrastructure for the scons-time.py script.
+
+ TestSConsign.py
+
+ Test infrastructure for the sconsign.py script.
+
+Copyright (c) 2001 - 2019 The SCons Foundation
+testing/framework/README.txt e724ae812eb96f4858a132f5b8c769724744faf6 2019-07-21 00:04:47 bdeegan
diff --git a/testing/framework/SConscript b/testing/framework/SConscript
new file mode 100644
index 0000000..f79a254
--- /dev/null
+++ b/testing/framework/SConscript
@@ -0,0 +1,62 @@
+#
+# SConscript file for external packages we need.
+#
+
+#
+# Copyright (c) 2001 - 2019 The SCons Foundation
+#
+# Permission is hereby granted, free of charge, to any person obtaining
+# a copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sublicense, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
+# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
+# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+#
+
+import os.path
+
+Import('build_dir', 'env')
+
+files = [
+ 'TestCmd.py',
+ 'TestCommon.py',
+ 'TestRuntest.py',
+ 'TestSCons.py',
+ 'TestSConsign.py',
+ 'TestSCons_time.py',
+]
+
+def copy(target, source, env):
+ t = str(target[0])
+ s = str(source[0])
+ with open(s, 'r') as i, open(t, 'w') as o:
+ c = i.read()
+ # Note: We construct the __ VERSION __ substitution string at
+ # run-time so it doesn't get replaced when this file gets copied
+ # into the tree for packaging.
+ c = c.replace('__' + 'VERSION' + '__', env['VERSION'])
+ o.write(c)
+
+for file in files:
+ # Guarantee that real copies of these files always exist in
+ # build/testing/framework. If there's a symlink there, then this is an Aegis
+ # build and we blow them away now so that they'll get "built" later.
+ p = os.path.join(build_dir, 'testing','framework', file)
+ if os.path.islink(p):
+ os.unlink(p)
+ if not os.path.isabs(p):
+ p = '#' + p
+ env.Command(p, file, copy)
+ Local(p)
diff --git a/testing/framework/TestCmd.py b/testing/framework/TestCmd.py
new file mode 100644
index 0000000..81e03f3
--- /dev/null
+++ b/testing/framework/TestCmd.py
@@ -0,0 +1,1999 @@
+"""
+TestCmd.py: a testing framework for commands and scripts.
+
+The TestCmd module provides a framework for portable automated testing
+of executable commands and scripts (in any language, not just Python),
+especially commands and scripts that require file system interaction.
+
+In addition to running tests and evaluating conditions, the TestCmd
+module manages and cleans up one or more temporary workspace
+directories, and provides methods for creating files and directories in
+those workspace directories from in-line data, here-documents), allowing
+tests to be completely self-contained.
+
+A TestCmd environment object is created via the usual invocation:
+
+ import TestCmd
+ test = TestCmd.TestCmd()
+
+There are a bunch of keyword arguments available at instantiation:
+
+ test = TestCmd.TestCmd(description = 'string',
+ program = 'program_or_script_to_test',
+ interpreter = 'script_interpreter',
+ workdir = 'prefix',
+ subdir = 'subdir',
+ verbose = Boolean,
+ match = default_match_function,
+ match_stdout = default_match_stdout_function,
+ match_stderr = default_match_stderr_function,
+ diff = default_diff_stderr_function,
+ diff_stdout = default_diff_stdout_function,
+ diff_stderr = default_diff_stderr_function,
+ combine = Boolean)
+
+There are a bunch of methods that let you do different things:
+
+ test.verbose_set(1)
+
+ test.description_set('string')
+
+ test.program_set('program_or_script_to_test')
+
+ test.interpreter_set('script_interpreter')
+ test.interpreter_set(['script_interpreter', 'arg'])
+
+ test.workdir_set('prefix')
+ test.workdir_set('')
+
+ test.workpath('file')
+ test.workpath('subdir', 'file')
+
+ test.subdir('subdir', ...)
+
+ test.rmdir('subdir', ...)
+
+ test.write('file', "contents\n")
+ test.write(['subdir', 'file'], "contents\n")
+
+ test.read('file')
+ test.read(['subdir', 'file'])
+ test.read('file', mode)
+ test.read(['subdir', 'file'], mode)
+
+ test.writable('dir', 1)
+ test.writable('dir', None)
+
+ test.preserve(condition, ...)
+
+ test.cleanup(condition)
+
+ test.command_args(program = 'program_or_script_to_run',
+ interpreter = 'script_interpreter',
+ arguments = 'arguments to pass to program')
+
+ test.run(program = 'program_or_script_to_run',
+ interpreter = 'script_interpreter',
+ arguments = 'arguments to pass to program',
+ chdir = 'directory_to_chdir_to',
+ stdin = 'input to feed to the program\n')
+ universal_newlines = True)
+
+ p = test.start(program = 'program_or_script_to_run',
+ interpreter = 'script_interpreter',
+ arguments = 'arguments to pass to program',
+ universal_newlines = None)
+
+ test.finish(self, p)
+
+ test.pass_test()
+ test.pass_test(condition)
+ test.pass_test(condition, function)
+
+ test.fail_test()
+ test.fail_test(condition)
+ test.fail_test(condition, function)
+ test.fail_test(condition, function, skip)
+ test.fail_test(condition, function, skip, message)
+
+ test.no_result()
+ test.no_result(condition)
+ test.no_result(condition, function)
+ test.no_result(condition, function, skip)
+
+ test.stdout()
+ test.stdout(run)
+
+ test.stderr()
+ test.stderr(run)
+
+ test.symlink(target, link)
+
+ test.banner(string)
+ test.banner(string, width)
+
+ test.diff(actual, expected)
+
+ test.diff_stderr(actual, expected)
+
+ test.diff_stdout(actual, expected)
+
+ test.match(actual, expected)
+
+ test.match_stderr(actual, expected)
+
+ test.match_stdout(actual, expected)
+
+ test.set_match_function(match, stdout, stderr)
+
+ test.match_exact("actual 1\nactual 2\n", "expected 1\nexpected 2\n")
+ test.match_exact(["actual 1\n", "actual 2\n"],
+ ["expected 1\n", "expected 2\n"])
+ test.match_caseinsensitive("Actual 1\nACTUAL 2\n", "expected 1\nEXPECTED 2\n")
+
+ test.match_re("actual 1\nactual 2\n", regex_string)
+ test.match_re(["actual 1\n", "actual 2\n"], list_of_regexes)
+
+ test.match_re_dotall("actual 1\nactual 2\n", regex_string)
+ test.match_re_dotall(["actual 1\n", "actual 2\n"], list_of_regexes)
+
+ test.tempdir()
+ test.tempdir('temporary-directory')
+
+ test.sleep()
+ test.sleep(seconds)
+
+ test.where_is('foo')
+ test.where_is('foo', 'PATH1:PATH2')
+ test.where_is('foo', 'PATH1;PATH2', '.suffix3;.suffix4')
+
+ test.unlink('file')
+ test.unlink('subdir', 'file')
+
+The TestCmd module provides pass_test(), fail_test(), and no_result()
+unbound functions that report test results for use with the Aegis change
+management system. These methods terminate the test immediately,
+reporting PASSED, FAILED, or NO RESULT respectively, and exiting with
+status 0 (success), 1 or 2 respectively. This allows for a distinction
+between an actual failed test and a test that could not be properly
+evaluated because of an external condition (such as a full file system
+or incorrect permissions).
+
+ import TestCmd
+
+ TestCmd.pass_test()
+ TestCmd.pass_test(condition)
+ TestCmd.pass_test(condition, function)
+
+ TestCmd.fail_test()
+ TestCmd.fail_test(condition)
+ TestCmd.fail_test(condition, function)
+ TestCmd.fail_test(condition, function, skip)
+ TestCmd.fail_test(condition, function, skip, message)
+
+ TestCmd.no_result()
+ TestCmd.no_result(condition)
+ TestCmd.no_result(condition, function)
+ TestCmd.no_result(condition, function, skip)
+
+The TestCmd module also provides unbound global functions that handle
+matching in the same way as the match_*() methods described above.
+
+ import TestCmd
+
+ test = TestCmd.TestCmd(match = TestCmd.match_exact)
+
+ test = TestCmd.TestCmd(match = TestCmd.match_caseinsensitive)
+
+ test = TestCmd.TestCmd(match = TestCmd.match_re)
+
+ test = TestCmd.TestCmd(match = TestCmd.match_re_dotall)
+
+These functions are also available as static methods:
+
+ import TestCmd
+
+ test = TestCmd.TestCmd(match = TestCmd.TestCmd.match_exact)
+
+ test = TestCmd.TestCmd(match = TestCmd.TestCmd.match_caseinsensitive)
+
+ test = TestCmd.TestCmd(match = TestCmd.TestCmd.match_re)
+
+ test = TestCmd.TestCmd(match = TestCmd.TestCmd.match_re_dotall)
+
+These static methods can be accessed by a string naming the method:
+
+ import TestCmd
+
+ test = TestCmd.TestCmd(match = 'match_exact')
+
+ test = TestCmd.TestCmd(match = 'match_caseinsensitive')
+
+ test = TestCmd.TestCmd(match = 'match_re')
+
+ test = TestCmd.TestCmd(match = 'match_re_dotall')
+
+The TestCmd module provides unbound global functions that can be used
+for the "diff" argument to TestCmd.TestCmd instantiation:
+
+ import TestCmd
+
+ test = TestCmd.TestCmd(match = TestCmd.match_re,
+ diff = TestCmd.diff_re)
+
+ test = TestCmd.TestCmd(diff = TestCmd.simple_diff)
+
+ test = TestCmd.TestCmd(diff = TestCmd.context_diff)
+
+ test = TestCmd.TestCmd(diff = TestCmd.unified_diff)
+
+These functions are also available as static methods:
+
+ import TestCmd
+
+ test = TestCmd.TestCmd(match = TestCmd.TestCmd.match_re,
+ diff = TestCmd.TestCmd.diff_re)
+
+ test = TestCmd.TestCmd(diff = TestCmd.TestCmd.simple_diff)
+
+ test = TestCmd.TestCmd(diff = TestCmd.TestCmd.context_diff)
+
+ test = TestCmd.TestCmd(diff = TestCmd.TestCmd.unified_diff)
+
+These static methods can be accessed by a string naming the method:
+
+ import TestCmd
+
+ test = TestCmd.TestCmd(match = 'match_re', diff = 'diff_re')
+
+ test = TestCmd.TestCmd(diff = 'simple_diff')
+
+ test = TestCmd.TestCmd(diff = 'context_diff')
+
+ test = TestCmd.TestCmd(diff = 'unified_diff')
+
+The "diff" argument can also be used with standard difflib functions:
+
+ import difflib
+
+ test = TestCmd.TestCmd(diff = difflib.context_diff)
+
+ test = TestCmd.TestCmd(diff = difflib.unified_diff)
+
+Lastly, the where_is() method also exists in an unbound function
+version.
+
+ import TestCmd
+
+ TestCmd.where_is('foo')
+ TestCmd.where_is('foo', 'PATH1:PATH2')
+ TestCmd.where_is('foo', 'PATH1;PATH2', '.suffix3;.suffix4')
+"""
+
+# Copyright 2000-2010 Steven Knight
+# This module is free software, and you may redistribute it and/or modify
+# it under the same terms as Python itself, so long as this copyright message
+# and disclaimer are retained in their original form.
+#
+# IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
+# SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF
+# THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+# DAMAGE.
+#
+# THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+# PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS,
+# AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
+# SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
+from __future__ import division, print_function
+
+__author__ = "Steven Knight <knight at baldmt dot com>"
+__revision__ = "TestCmd.py 1.3.D001 2010/06/03 12:58:27 knight"
+__version__ = "1.3"
+
+import atexit
+import difflib
+import errno
+import os
+import re
+import shutil
+import signal
+import stat
+import sys
+import tempfile
+import threading
+import time
+import traceback
+import types
+
+
+IS_PY3 = sys.version_info[0] == 3
+IS_WINDOWS = sys.platform == 'win32'
+IS_64_BIT = sys.maxsize > 2**32
+
+class null(object):
+ pass
+
+
+_Null = null()
+
+try:
+ from collections import UserList, UserString
+except ImportError:
+ # no 'collections' module or no UserFoo in collections
+ exec('from UserList import UserList')
+ exec('from UserString import UserString')
+
+__all__ = [
+ 'diff_re',
+ 'fail_test',
+ 'no_result',
+ 'pass_test',
+ 'match_exact',
+ 'match_caseinsensitive',
+ 'match_re',
+ 'match_re_dotall',
+ 'python',
+ '_python_',
+ 'TestCmd',
+ 'to_bytes',
+ 'to_str',
+]
+
+
+def is_List(e):
+ return isinstance(e, (list, UserList))
+
+
+def to_bytes(s):
+ if isinstance(s, bytes) or bytes is str:
+ return s
+ return bytes(s, 'utf-8')
+
+
+def to_str(s):
+ if bytes is str or is_String(s):
+ return s
+ return str(s, 'utf-8')
+
+
+try:
+ eval('unicode')
+except NameError:
+ def is_String(e):
+ return isinstance(e, (str, UserString))
+else:
+ def is_String(e):
+ return isinstance(e, (str, unicode, UserString))
+
+testprefix = 'testcmd.'
+if os.name in ('posix', 'nt'):
+ testprefix += "%s." % str(os.getpid())
+
+re_space = re.compile(r'\s')
+
+
+def _caller(tblist, skip):
+ string = ""
+ arr = []
+ for file, line, name, text in tblist:
+ if file[-10:] == "TestCmd.py":
+ break
+ arr = [(file, line, name, text)] + arr
+ atfrom = "at"
+ for file, line, name, text in arr[skip:]:
+ if name in ("?", "<module>"):
+ name = ""
+ else:
+ name = " (" + name + ")"
+ string = string + ("%s line %d of %s%s\n" % (atfrom, line, file, name))
+ atfrom = "\tfrom"
+ return string
+
+
+def fail_test(self=None, condition=1, function=None, skip=0, message=None):
+ """Cause the test to fail.
+
+ By default, the fail_test() method reports that the test FAILED
+ and exits with a status of 1. If a condition argument is supplied,
+ the test fails only if the condition is true.
+ """
+ if not condition:
+ return
+ if not function is None:
+ function()
+ of = ""
+ desc = ""
+ sep = " "
+ if not self is None:
+ if self.program:
+ of = " of " + self.program
+ sep = "\n\t"
+ if self.description:
+ desc = " [" + self.description + "]"
+ sep = "\n\t"
+
+ at = _caller(traceback.extract_stack(), skip)
+ if message:
+ msg = "\t%s\n" % message
+ else:
+ msg = ""
+ sys.stderr.write("FAILED test" + of + desc + sep + at + msg)
+
+ sys.exit(1)
+
+
+def no_result(self=None, condition=1, function=None, skip=0):
+ """Causes a test to exit with no valid result.
+
+ By default, the no_result() method reports NO RESULT for the test
+ and exits with a status of 2. If a condition argument is supplied,
+ the test fails only if the condition is true.
+ """
+ if not condition:
+ return
+ if not function is None:
+ function()
+ of = ""
+ desc = ""
+ sep = " "
+ if not self is None:
+ if self.program:
+ of = " of " + self.program
+ sep = "\n\t"
+ if self.description:
+ desc = " [" + self.description + "]"
+ sep = "\n\t"
+
+ at = _caller(traceback.extract_stack(), skip)
+ sys.stderr.write("NO RESULT for test" + of + desc + sep + at)
+
+ sys.exit(2)
+
+
+def pass_test(self=None, condition=1, function=None):
+ """Causes a test to pass.
+
+ By default, the pass_test() method reports PASSED for the test
+ and exits with a status of 0. If a condition argument is supplied,
+ the test passes only if the condition is true.
+ """
+ if not condition:
+ return
+ if not function is None:
+ function()
+ sys.stderr.write("PASSED\n")
+ sys.exit(0)
+
+
+def match_exact(lines=None, matches=None, newline=os.sep):
+ """
+ Match function using exact match.
+
+ :param lines: data lines
+ :type lines: str or list[str]
+ :param matches: expected lines to match
+ :type matches: str or list[str]
+ :param newline: line separator
+ :returns: an object (1) on match, else None, like re.match
+ """
+
+ if isinstance(lines, bytes) or bytes is str:
+ newline = to_bytes(newline)
+
+ if not is_List(lines):
+ lines = lines.split(newline)
+ if not is_List(matches):
+ matches = matches.split(newline)
+ if len(lines) != len(matches):
+ return None
+ for line, match in zip(lines, matches):
+ if line != match:
+ return None
+ return 1
+
+
+def match_caseinsensitive(lines=None, matches=None):
+ """
+ Match function using case-insensitive matching.
+
+ Only a simplistic comparison is done, based on lowercasing the
+ strings. This has plenty of holes for unicode data using
+ non-English languages.
+
+ TODO: casefold() is better than lower() if we don't need Py2 support.
+
+ :param lines: data lines
+ :type lines: str or list[str]
+ :param matches: expected lines to match
+ :type matches: str or list[str]
+ :returns: True or False
+ :returns: an object (1) on match, else None, like re.match
+ """
+ if not is_List(lines):
+ lines = lines.split("\n")
+ if not is_List(matches):
+ matches = matches.split("\n")
+ if len(lines) != len(matches):
+ return None
+ for line, match in zip(lines, matches):
+ if line.lower() != match.lower():
+ return None
+ return 1
+
+
+def match_re(lines=None, res=None):
+ """
+ Match function using line-by-line regular expression match.
+
+ :param lines: data lines
+ :type lines: str or list[str]
+ :param res: regular expression(s) for matching
+ :type res: str or list[str]
+ :returns: an object (1) on match, else None, like re.match
+ """
+ if not is_List(lines):
+ # CRs mess up matching (Windows) so split carefully
+ lines = re.split('\r?\n', lines)
+ if not is_List(res):
+ res = res.split("\n")
+ if len(lines) != len(res):
+ print("match_re: expected %d lines, found %d" % (len(res), len(lines)))
+ return None
+ for i, (line, regex) in enumerate(zip(lines, res)):
+ s = r"^{}$".format(regex)
+ try:
+ expr = re.compile(s)
+ except re.error as e:
+ msg = "Regular expression error in %s: %s"
+ raise re.error(msg % (repr(s), e.args[0]))
+ if not expr.search(line):
+ miss_tmpl = "match_re: mismatch at line {}:\n search re='{}'\n line='{}'"
+ print(miss_tmpl.format(i, s, line))
+ return None
+ return 1
+
+
+def match_re_dotall(lines=None, res=None):
+ """
+ Match function using regular expression match.
+
+ Unlike match_re, the arguments are converted to strings (if necessary)
+ and must match exactly.
+
+ :param lines: data lines
+ :type lines: str or list[str]
+ :param res: regular expression(s) for matching
+ :type res: str or list[str]
+ :returns: a match object, or None as for re.match
+ """
+ if not isinstance(lines, str):
+ lines = "\n".join(lines)
+ if not isinstance(res, str):
+ res = "\n".join(res)
+ s = r"^{}$".format(res)
+ try:
+ expr = re.compile(s, re.DOTALL)
+ except re.error as e:
+ msg = "Regular expression error in %s: %s"
+ raise re.error(msg % (repr(s), e.args[0]))
+ return expr.match(lines)
+
+
+def simple_diff(a, b, fromfile='', tofile='',
+ fromfiledate='', tofiledate='', n=0, lineterm=''):
+ r"""
+ Compare two sequences of lines; generate the delta as a simple diff.
+
+ Similar to difflib.context_diff and difflib.unified_diff but
+ output is like from the 'diff" command without arguments. The function
+ keeps the same signature as the difflib ones so they will be
+ interchangeable, but except for lineterm, the arguments beyond the
+ two sequences are ignored in this version. By default, the
+ diff is not created with trailing newlines, set the lineterm
+ argument to '\n' to do so.
+
+ :raises re.error: if a regex fails to compile
+
+ Example:
+
+ >>> print(''.join(simple_diff('one\ntwo\nthree\nfour\n'.splitlines(True),
+ ... 'zero\none\ntree\nfour\n'.splitlines(True), lineterm='\n')))
+ 0a1
+ > zero
+ 2,3c3
+ < two
+ < three
+ ---
+ > tree
+
+ """
+ a = [to_str(q) for q in a]
+ b = [to_str(q) for q in b]
+ sm = difflib.SequenceMatcher(None, a, b)
+
+ def comma(x1, x2):
+ return x1 + 1 == x2 and str(x2) or '%s,%s' % (x1 + 1, x2)
+
+ for op, a1, a2, b1, b2 in sm.get_opcodes():
+ if op == 'delete':
+ yield "{}d{}{}".format(comma(a1, a2), b1, lineterm)
+ for l in a[a1:a2]:
+ yield '< ' + l
+ elif op == 'insert':
+ yield "{}a{}{}".format(a1, comma(b1, b2), lineterm)
+ for l in b[b1:b2]:
+ yield '> ' + l
+ elif op == 'replace':
+ yield "{}c{}{}".format(comma(a1, a2), comma(b1, b2), lineterm)
+ for l in a[a1:a2]:
+ yield '< ' + l
+ yield '---{}'.format(lineterm)
+ for l in b[b1:b2]:
+ yield '> ' + l
+
+
+def diff_re(a, b, fromfile='', tofile='',
+ fromfiledate='', tofiledate='', n=3, lineterm='\n'):
+ """
+ Compare a and b (lists of strings) where a are regexes.
+
+ A simple "diff" of two sets of lines when the expected lines
+ are regular expressions. This is a really dumb thing that
+ just compares each line in turn, so it doesn't look for
+ chunks of matching lines and the like--but at least it lets
+ you know exactly which line first didn't compare correctl...
+ """
+ result = []
+ diff = len(a) - len(b)
+ if diff < 0:
+ a = a + [''] * (-diff)
+ elif diff > 0:
+ b = b + [''] * diff
+ for i, (aline, bline) in enumerate(zip(a, b)):
+ s = r"^{}$".format(aline)
+ try:
+ expr = re.compile(s)
+ except re.error as e:
+ msg = "Regular expression error in %s: %s"
+ raise re.error(msg % (repr(s), e.args[0]))
+ if not expr.search(bline):
+ result.append("%sc%s" % (i + 1, i + 1))
+ result.append('< ' + repr(a[i]))
+ result.append('---')
+ result.append('> ' + repr(b[i]))
+ return result
+
+
+if os.name == 'posix':
+ def escape(arg):
+ "escape shell special characters"
+ slash = '\\'
+ special = '"$'
+ arg = arg.replace(slash, slash + slash)
+ for c in special:
+ arg = arg.replace(c, slash + c)
+ if re_space.search(arg):
+ arg = '"' + arg + '"'
+ return arg
+else:
+ # Windows does not allow special characters in file names
+ # anyway, so no need for an escape function, we will just quote
+ # the arg.
+ def escape(arg):
+ if re_space.search(arg):
+ arg = '"' + arg + '"'
+ return arg
+
+if os.name == 'java':
+ python = os.path.join(sys.prefix, 'jython')
+else:
+ python = os.environ.get('python_executable', sys.executable)
+_python_ = escape(python)
+
+if sys.platform == 'win32':
+
+ default_sleep_seconds = 2
+
+ def where_is(file, path=None, pathext=None):
+ if path is None:
+ path = os.environ['PATH']
+ if is_String(path):
+ path = path.split(os.pathsep)
+ if pathext is None:
+ pathext = os.environ['PATHEXT']
+ if is_String(pathext):
+ pathext = pathext.split(os.pathsep)
+ for ext in pathext:
+ if ext.lower() == file[-len(ext):].lower():
+ pathext = ['']
+ break
+ for dir in path:
+ f = os.path.join(dir, file)
+ for ext in pathext:
+ fext = f + ext
+ if os.path.isfile(fext):
+ return fext
+ return None
+
+else:
+
+ def where_is(file, path=None, pathext=None):
+ if path is None:
+ path = os.environ['PATH']
+ if is_String(path):
+ path = path.split(os.pathsep)
+ for dir in path:
+ f = os.path.join(dir, file)
+ if os.path.isfile(f):
+ try:
+ st = os.stat(f)
+ except OSError:
+ continue
+ if stat.S_IMODE(st[stat.ST_MODE]) & 0o111:
+ return f
+ return None
+
+ default_sleep_seconds = 1
+
+
+import subprocess
+
+try:
+ subprocess.Popen.terminate
+except AttributeError:
+ if sys.platform == 'win32':
+ import win32process
+
+ def terminate(self):
+ win32process.TerminateProcess(self._handle, 1)
+ else:
+ def terminate(self):
+ os.kill(self.pid, signal.SIGTERM)
+ method = types.MethodType(terminate, None, subprocess.Popen)
+ setattr(subprocess.Popen, 'terminate', method)
+
+
+# From Josiah Carlson,
+# ASPN : Python Cookbook : Module to allow Asynchronous subprocess use on Windows and Posix platforms
+# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/440554
+
+PIPE = subprocess.PIPE
+
+if sys.platform == 'win32': # and subprocess.mswindows:
+ try:
+ from win32file import ReadFile, WriteFile
+ from win32pipe import PeekNamedPipe
+ except ImportError:
+ # If PyWin32 is not available, try ctypes instead
+ # XXX These replicate _just_enough_ PyWin32 behaviour for our purposes
+ import ctypes
+ from ctypes.wintypes import DWORD
+
+ def ReadFile(hFile, bufSize, ol=None):
+ assert ol is None
+ lpBuffer = ctypes.create_string_buffer(bufSize)
+ bytesRead = DWORD()
+ bErr = ctypes.windll.kernel32.ReadFile(
+ hFile, lpBuffer, bufSize, ctypes.byref(bytesRead), ol)
+ if not bErr:
+ raise ctypes.WinError()
+ return (0, ctypes.string_at(lpBuffer, bytesRead.value))
+
+ def WriteFile(hFile, data, ol=None):
+ assert ol is None
+ bytesWritten = DWORD()
+ bErr = ctypes.windll.kernel32.WriteFile(
+ hFile, data, len(data), ctypes.byref(bytesWritten), ol)
+ if not bErr:
+ raise ctypes.WinError()
+ return (0, bytesWritten.value)
+
+ def PeekNamedPipe(hPipe, size):
+ assert size == 0
+ bytesAvail = DWORD()
+ bErr = ctypes.windll.kernel32.PeekNamedPipe(
+ hPipe, None, size, None, ctypes.byref(bytesAvail), None)
+ if not bErr:
+ raise ctypes.WinError()
+ return ("", bytesAvail.value, None)
+ import msvcrt
+else:
+ import select
+ import fcntl
+
+ try:
+ fcntl.F_GETFL
+ except AttributeError:
+ fcntl.F_GETFL = 3
+
+ try:
+ fcntl.F_SETFL
+ except AttributeError:
+ fcntl.F_SETFL = 4
+
+
+class Popen(subprocess.Popen):
+ def recv(self, maxsize=None):
+ return self._recv('stdout', maxsize)
+
+ def recv_err(self, maxsize=None):
+ return self._recv('stderr', maxsize)
+
+ def send_recv(self, input='', maxsize=None):
+ return self.send(input), self.recv(maxsize), self.recv_err(maxsize)
+
+ def get_conn_maxsize(self, which, maxsize):
+ if maxsize is None:
+ maxsize = 1024
+ elif maxsize < 1:
+ maxsize = 1
+ return getattr(self, which), maxsize
+
+ def _close(self, which):
+ getattr(self, which).close()
+ setattr(self, which, None)
+
+ if sys.platform == 'win32': # and subprocess.mswindows:
+ def send(self, input):
+ input = to_bytes(input)
+ if not self.stdin:
+ return None
+
+ try:
+ x = msvcrt.get_osfhandle(self.stdin.fileno())
+ (errCode, written) = WriteFile(x, input)
+ except ValueError:
+ return self._close('stdin')
+ except (subprocess.pywintypes.error, Exception) as why:
+ if why.args[0] in (109, errno.ESHUTDOWN):
+ return self._close('stdin')
+ raise
+
+ return written
+
+ def _recv(self, which, maxsize):
+ conn, maxsize = self.get_conn_maxsize(which, maxsize)
+ if conn is None:
+ return None
+
+ try:
+ x = msvcrt.get_osfhandle(conn.fileno())
+ (read, nAvail, nMessage) = PeekNamedPipe(x, 0)
+ if maxsize < nAvail:
+ nAvail = maxsize
+ if nAvail > 0:
+ (errCode, read) = ReadFile(x, nAvail, None)
+ except ValueError:
+ return self._close(which)
+ except (subprocess.pywintypes.error, Exception) as why:
+ if why.args[0] in (109, errno.ESHUTDOWN):
+ return self._close(which)
+ raise
+
+ # if self.universal_newlines:
+ # read = self._translate_newlines(read)
+ return read
+
+ else:
+ def send(self, input):
+ if not self.stdin:
+ return None
+
+ if not select.select([], [self.stdin], [], 0)[1]:
+ return 0
+
+ try:
+ written = os.write(self.stdin.fileno(),
+ bytearray(input, 'utf-8'))
+ except OSError as why:
+ if why.args[0] == errno.EPIPE: # broken pipe
+ return self._close('stdin')
+ raise
+
+ return written
+
+ def _recv(self, which, maxsize):
+ conn, maxsize = self.get_conn_maxsize(which, maxsize)
+ if conn is None:
+ return None
+
+ try:
+ flags = fcntl.fcntl(conn, fcntl.F_GETFL)
+ except TypeError:
+ flags = None
+ else:
+ if not conn.closed:
+ fcntl.fcntl(conn, fcntl.F_SETFL, flags | os.O_NONBLOCK)
+
+ try:
+ if not select.select([conn], [], [], 0)[0]:
+ return ''
+
+ r = conn.read(maxsize)
+ if not r:
+ return self._close(which)
+
+ # if self.universal_newlines:
+ # r = self._translate_newlines(r)
+ return r
+ finally:
+ if not conn.closed and not flags is None:
+ fcntl.fcntl(conn, fcntl.F_SETFL, flags)
+
+
+disconnect_message = "Other end disconnected!"
+
+
+def recv_some(p, t=.1, e=1, tr=5, stderr=0):
+ if tr < 1:
+ tr = 1
+ x = time.time() + t
+ y = []
+ r = ''
+ pr = p.recv
+ if stderr:
+ pr = p.recv_err
+ while time.time() < x or r:
+ r = pr()
+ if r is None:
+ if e:
+ raise Exception(disconnect_message)
+ else:
+ break
+ elif r:
+ y.append(r)
+ else:
+ time.sleep(max((x - time.time()) / tr, 0))
+ return ''.join(y)
+
+
+def send_all(p, data):
+ while len(data):
+ sent = p.send(data)
+ if sent is None:
+ raise Exception(disconnect_message)
+ data = memoryview(data)[sent:]
+
+
+_Cleanup = []
+
+
+def _clean():
+ global _Cleanup
+ cleanlist = [c for c in _Cleanup if c]
+ del _Cleanup[:]
+ cleanlist.reverse()
+ for test in cleanlist:
+ test.cleanup()
+
+
+atexit.register(_clean)
+
+
+class TestCmd(object):
+ """Class TestCmd
+ """
+
+ def __init__(self, description=None,
+ program=None,
+ interpreter=None,
+ workdir=None,
+ subdir=None,
+ verbose=None,
+ match=None,
+ match_stdout=None,
+ match_stderr=None,
+ diff=None,
+ diff_stdout=None,
+ diff_stderr=None,
+ combine=0,
+ universal_newlines=True,
+ timeout=None):
+ self.external = os.environ.get('SCONS_EXTERNAL_TEST', 0)
+ self._cwd = os.getcwd()
+ self.description_set(description)
+ self.program_set(program)
+ self.interpreter_set(interpreter)
+ if verbose is None:
+ try:
+ verbose = max(0, int(os.environ.get('TESTCMD_VERBOSE', 0)))
+ except ValueError:
+ verbose = 0
+ self.verbose_set(verbose)
+ self.combine = combine
+ self.universal_newlines = universal_newlines
+ self.process = None
+ self.set_timeout(timeout)
+ self.set_match_function(match, match_stdout, match_stderr)
+ self.set_diff_function(diff, diff_stdout, diff_stderr)
+ self._dirlist = []
+ self._preserve = {'pass_test': 0, 'fail_test': 0, 'no_result': 0}
+ preserve_value = os.environ.get('PRESERVE', False)
+ if preserve_value not in [0, '0', 'False']:
+ self._preserve['pass_test'] = os.environ['PRESERVE']
+ self._preserve['fail_test'] = os.environ['PRESERVE']
+ self._preserve['no_result'] = os.environ['PRESERVE']
+ else:
+ try:
+ self._preserve['pass_test'] = os.environ['PRESERVE_PASS']
+ except KeyError:
+ pass
+ try:
+ self._preserve['fail_test'] = os.environ['PRESERVE_FAIL']
+ except KeyError:
+ pass
+ try:
+ self._preserve['no_result'] = os.environ['PRESERVE_NO_RESULT']
+ except KeyError:
+ pass
+ self._stdout = []
+ self._stderr = []
+ self.status = None
+ self.condition = 'no_result'
+ self.workdir_set(workdir)
+ self.subdir(subdir)
+ self.fixture_dirs = []
+
+ try:
+ self.fixture_dirs = (os.environ['FIXTURE_DIRS']).split(os.pathsep)
+ except KeyError:
+ pass
+
+
+ def __del__(self):
+ self.cleanup()
+
+ def __repr__(self):
+ return "%x" % id(self)
+
+ banner_char = '='
+ banner_width = 80
+
+ def banner(self, s, width=None):
+ if width is None:
+ width = self.banner_width
+ return s + self.banner_char * (width - len(s))
+
+ escape = staticmethod(escape)
+
+ def canonicalize(self, path):
+ if is_List(path):
+ path = os.path.join(*tuple(path))
+ if not os.path.isabs(path):
+ path = os.path.join(self.workdir, path)
+ return path
+
+ def chmod(self, path, mode):
+ """Changes permissions on the specified file or directory
+ path name."""
+ path = self.canonicalize(path)
+ os.chmod(path, mode)
+
+ def cleanup(self, condition=None):
+ """Removes any temporary working directories for the specified
+ TestCmd environment. If the environment variable PRESERVE was
+ set when the TestCmd environment was created, temporary working
+ directories are not removed. If any of the environment variables
+ PRESERVE_PASS, PRESERVE_FAIL, or PRESERVE_NO_RESULT were set
+ when the TestCmd environment was created, then temporary working
+ directories are not removed if the test passed, failed, or had
+ no result, respectively. Temporary working directories are also
+ preserved for conditions specified via the preserve method.
+
+ Typically, this method is not called directly, but is used when
+ the script exits to clean up temporary working directories as
+ appropriate for the exit status.
+ """
+ if not self._dirlist:
+ return
+ os.chdir(self._cwd)
+ self.workdir = None
+ if condition is None:
+ condition = self.condition
+ if self._preserve[condition]:
+ for dir in self._dirlist:
+ print(u"Preserved directory " + dir)
+ else:
+ list = self._dirlist[:]
+ list.reverse()
+ for dir in list:
+ self.writable(dir, 1)
+ shutil.rmtree(dir, ignore_errors=1)
+ self._dirlist = []
+
+ global _Cleanup
+ if self in _Cleanup:
+ _Cleanup.remove(self)
+
+ def command_args(self, program=None,
+ interpreter=None,
+ arguments=None):
+ if not self.external:
+ if program:
+ if isinstance(program, str) and not os.path.isabs(program):
+ program = os.path.join(self._cwd, program)
+ else:
+ program = self.program
+ if not interpreter:
+ interpreter = self.interpreter
+ else:
+ if not program:
+ program = self.program
+ if not interpreter:
+ interpreter = self.interpreter
+ if not isinstance(program, (list, tuple)):
+ program = [program]
+ cmd = list(program)
+ if interpreter:
+ if not isinstance(interpreter, (list, tuple)):
+ interpreter = [interpreter]
+ cmd = list(interpreter) + cmd
+ if arguments:
+ if isinstance(arguments, str):
+ arguments = arguments.split()
+ cmd.extend(arguments)
+ return cmd
+
+ def description_set(self, description):
+ """Set the description of the functionality being tested.
+ """
+ self.description = description
+
+ def set_diff_function(self, diff=_Null, stdout=_Null, stderr=_Null):
+ """Sets the specified diff functions.
+ """
+ if diff is not _Null:
+ self._diff_function = diff
+ if stdout is not _Null:
+ self._diff_stdout_function = stdout
+ if stderr is not _Null:
+ self._diff_stderr_function = stderr
+
+ def diff(self, a, b, name=None, diff_function=None, *args, **kw):
+ if diff_function is None:
+ try:
+ diff_function = getattr(self, self._diff_function)
+ except TypeError:
+ diff_function = self._diff_function
+ if diff_function is None:
+ diff_function = self.simple_diff
+ if name is not None:
+ print(self.banner(name))
+
+ if not is_List(a):
+ a=a.splitlines()
+ if not is_List(b):
+ b=b.splitlines()
+
+ args = (a, b) + args
+ for line in diff_function(*args, **kw):
+ print(line)
+
+ def diff_stderr(self, a, b, *args, **kw):
+ """Compare actual and expected file contents.
+ """
+ try:
+ diff_stderr_function = getattr(self, self._diff_stderr_function)
+ except TypeError:
+ diff_stderr_function = self._diff_stderr_function
+ return self.diff(a, b, diff_function=diff_stderr_function, *args, **kw)
+
+ def diff_stdout(self, a, b, *args, **kw):
+ """Compare actual and expected file contents.
+ """
+ try:
+ diff_stdout_function = getattr(self, self._diff_stdout_function)
+ except TypeError:
+ diff_stdout_function = self._diff_stdout_function
+ return self.diff(a, b, diff_function=diff_stdout_function, *args, **kw)
+
+ simple_diff = staticmethod(simple_diff)
+
+ diff_re = staticmethod(diff_re)
+
+ context_diff = staticmethod(difflib.context_diff)
+
+ unified_diff = staticmethod(difflib.unified_diff)
+
+ def fail_test(self, condition=1, function=None, skip=0, message=None):
+ """Cause the test to fail.
+ """
+ if not condition:
+ return
+ self.condition = 'fail_test'
+ fail_test(self=self,
+ condition=condition,
+ function=function,
+ skip=skip,
+ message=message)
+
+ def interpreter_set(self, interpreter):
+ """Set the program to be used to interpret the program
+ under test as a script.
+ """
+ self.interpreter = interpreter
+
+ def set_match_function(self, match=_Null, stdout=_Null, stderr=_Null):
+ """Sets the specified match functions.
+ """
+ if match is not _Null:
+ self._match_function = match
+ if stdout is not _Null:
+ self._match_stdout_function = stdout
+ if stderr is not _Null:
+ self._match_stderr_function = stderr
+
+ def match(self, lines, matches):
+ """Compare actual and expected file contents.
+ """
+ try:
+ match_function = getattr(self, self._match_function)
+ except TypeError:
+ match_function = self._match_function
+ if match_function is None:
+ # Default is regular expression matches.
+ match_function = self.match_re
+ return match_function(lines, matches)
+
+ def match_stderr(self, lines, matches):
+ """Compare actual and expected file contents.
+ """
+ try:
+ match_stderr_function = getattr(self, self._match_stderr_function)
+ except TypeError:
+ match_stderr_function = self._match_stderr_function
+ if match_stderr_function is None:
+ # Default is to use whatever match= is set to.
+ match_stderr_function = self.match
+ return match_stderr_function(lines, matches)
+
+ def match_stdout(self, lines, matches):
+ """Compare actual and expected file contents.
+ """
+ try:
+ match_stdout_function = getattr(self, self._match_stdout_function)
+ except TypeError:
+ match_stdout_function = self._match_stdout_function
+ if match_stdout_function is None:
+ # Default is to use whatever match= is set to.
+ match_stdout_function = self.match
+ return match_stdout_function(lines, matches)
+
+ match_exact = staticmethod(match_exact)
+
+ match_caseinsensitive = staticmethod(match_caseinsensitive)
+
+ match_re = staticmethod(match_re)
+
+ match_re_dotall = staticmethod(match_re_dotall)
+
+ def no_result(self, condition=1, function=None, skip=0):
+ """Report that the test could not be run.
+ """
+ if not condition:
+ return
+ self.condition = 'no_result'
+ no_result(self=self,
+ condition=condition,
+ function=function,
+ skip=skip)
+
+ def pass_test(self, condition=1, function=None):
+ """Cause the test to pass.
+ """
+ if not condition:
+ return
+ self.condition = 'pass_test'
+ pass_test(self=self, condition=condition, function=function)
+
+ def preserve(self, *conditions):
+ """Arrange for the temporary working directories for the
+ specified TestCmd environment to be preserved for one or more
+ conditions. If no conditions are specified, arranges for
+ the temporary working directories to be preserved for all
+ conditions.
+ """
+ if not conditions:
+ conditions = ('pass_test', 'fail_test', 'no_result')
+ for cond in conditions:
+ self._preserve[cond] = 1
+
+ def program_set(self, program):
+ """Set the executable program or script to be tested.
+ """
+ if not self.external:
+ if program and not os.path.isabs(program):
+ program = os.path.join(self._cwd, program)
+ self.program = program
+
+ def read(self, file, mode='rb', newline=None):
+ """Reads and returns the contents of the specified file name.
+
+ The file name may be a list, in which case the elements are
+ concatenated with the os.path.join() method. The file is
+ assumed to be under the temporary working directory unless it
+ is an absolute path name. The I/O mode for the file may
+ be specified; it must begin with an 'r'. The default is
+ 'rb' (binary read).
+ """
+ file = self.canonicalize(file)
+ if mode[0] != 'r':
+ raise ValueError("mode must begin with 'r'")
+ if IS_PY3 and 'b' not in mode:
+ with open(file, mode, newline=newline) as f:
+ return f.read()
+ else:
+ with open(file, mode) as f:
+ return f.read()
+
+ def rmdir(self, dir):
+ """Removes the specified dir name.
+
+ The dir name may be a list, in which case the elements are
+ concatenated with the os.path.join() method. The dir is
+ assumed to be under the temporary working directory unless it
+ is an absolute path name.
+ The dir must be empty.
+ """
+ dir = self.canonicalize(dir)
+ os.rmdir(dir)
+
+ def _timeout(self):
+ self.process.terminate()
+ self.timer.cancel()
+ self.timer = None
+
+ def set_timeout(self, timeout):
+ self.timeout = timeout
+ self.timer = None
+
+ def parse_path(self, path, suppress_current=False):
+ """Return a list with the single path components of path.
+ """
+ head, tail = os.path.split(path)
+ result = []
+ if not tail:
+ if head == path:
+ return [head]
+ else:
+ result.append(tail)
+ head, tail = os.path.split(head)
+ while head and tail:
+ result.append(tail)
+ head, tail = os.path.split(head)
+ result.append(head or tail)
+ result.reverse()
+
+ return result
+
+ def dir_fixture(self, srcdir, dstdir=None):
+ """Copies the contents of the specified folder srcdir from
+ the directory of the called script, to the current
+ working directory.
+
+ The srcdir name may be a list, in which case the elements are
+ concatenated with the os.path.join() method. The dstdir is
+ assumed to be under the temporary working directory, it gets
+ created automatically, if it does not already exist.
+ """
+
+ if srcdir and self.fixture_dirs and not os.path.isabs(srcdir):
+ for dir in self.fixture_dirs:
+ spath = os.path.join(dir, srcdir)
+ if os.path.isdir(spath):
+ break
+ else:
+ spath = srcdir
+
+ if dstdir:
+ dstdir = self.canonicalize(dstdir)
+ else:
+ dstdir = '.'
+
+ if dstdir != '.' and not os.path.exists(dstdir):
+ dstlist = self.parse_path(dstdir)
+ if len(dstlist) > 0 and dstlist[0] == ".":
+ dstlist = dstlist[1:]
+ for idx in range(len(dstlist)):
+ self.subdir(dstlist[:idx + 1])
+
+ if dstdir and self.workdir:
+ dstdir = os.path.join(self.workdir, dstdir)
+
+ for entry in os.listdir(spath):
+ epath = os.path.join(spath, entry)
+ dpath = os.path.join(dstdir, entry)
+ if os.path.isdir(epath):
+ # Copy the subfolder
+ shutil.copytree(epath, dpath)
+ else:
+ shutil.copy(epath, dpath)
+
+ def file_fixture(self, srcfile, dstfile=None):
+ """Copies the file srcfile from the directory of
+ the called script, to the current working directory.
+
+ The dstfile is assumed to be under the temporary working
+ directory unless it is an absolute path name.
+ If dstfile is specified its target directory gets created
+ automatically, if it does not already exist.
+ """
+ srcpath, srctail = os.path.split(srcfile)
+
+ if srcpath and (not self.fixture_dirs or os.path.isabs(srcpath)):
+ spath = srcfile
+ else:
+ for dir in self.fixture_dirs:
+ spath = os.path.join(dir, srcfile)
+ if os.path.isfile(spath):
+ break
+
+ if not dstfile:
+ if srctail:
+ dpath = os.path.join(self.workdir, srctail)
+ else:
+ return
+ else:
+ dstpath, dsttail = os.path.split(dstfile)
+ if dstpath:
+ if not os.path.exists(os.path.join(self.workdir, dstpath)):
+ dstlist = self.parse_path(dstpath)
+ if len(dstlist) > 0 and dstlist[0] == ".":
+ dstlist = dstlist[1:]
+ for idx in range(len(dstlist)):
+ self.subdir(dstlist[:idx + 1])
+
+ dpath = os.path.join(self.workdir, dstfile)
+ shutil.copy(spath, dpath)
+
+ def start(self, program=None,
+ interpreter=None,
+ arguments=None,
+ universal_newlines=None,
+ timeout=_Null,
+ **kw):
+ """
+ Starts a program or script for the test environment.
+
+ The specified program will have the original directory
+ prepended unless it is enclosed in a [list].
+ """
+ cmd = self.command_args(program, interpreter, arguments)
+ if self.verbose:
+ cmd_string = ' '.join([self.escape(c) for c in cmd])
+ sys.stderr.write(cmd_string + "\n")
+ if universal_newlines is None:
+ universal_newlines = self.universal_newlines
+
+ # On Windows, if we make stdin a pipe when we plan to send
+ # no input, and the test program exits before
+ # Popen calls msvcrt.open_osfhandle, that call will fail.
+ # So don't use a pipe for stdin if we don't need one.
+ stdin = kw.get('stdin', None)
+ if stdin is not None:
+ stdin = subprocess.PIPE
+
+ combine = kw.get('combine', self.combine)
+ if combine:
+ stderr_value = subprocess.STDOUT
+ else:
+ stderr_value = subprocess.PIPE
+
+ if timeout is _Null:
+ timeout = self.timeout
+ if timeout:
+ self.timer = threading.Timer(float(timeout), self._timeout)
+ self.timer.start()
+
+ if IS_PY3 and sys.platform == 'win32':
+ # Set this otherwist stdout/stderr pipes default to
+ # windows default locale cp1252 which will throw exception
+ # if using non-ascii characters.
+ # For example test/Install/non-ascii-name.py
+ os.environ['PYTHONIOENCODING'] = 'utf-8'
+
+ # It seems that all pythons up to py3.6 still set text mode if you set encoding.
+ # TODO: File enhancement request on python to propagate universal_newlines even
+ # if encoding is set.hg c
+ p = Popen(cmd,
+ stdin=stdin,
+ stdout=subprocess.PIPE,
+ stderr=stderr_value,
+ env=os.environ,
+ universal_newlines=False)
+
+ self.process = p
+ return p
+
+ @staticmethod
+ def fix_binary_stream(stream):
+ """
+ Handle stdout/stderr from popen when we specify universal_newlines = False.
+
+ This will read from the pipes in binary mode, not decode the output,
+ and not convert line endings to \n.
+ We do this because in py3 (3.5) with universal_newlines=True, it will
+ choose the default system locale to decode the output, and this breaks unicode
+ output. Specifically breaking test/option--tree.py which outputs a unicode char.
+
+ py 3.6 allows us to pass an encoding param to popen thus not requiring the decode
+ nor end of line handling, because we propagate universal_newlines as specified.
+
+ TODO: Do we need to pass universal newlines into this function?
+ """
+
+ if not stream:
+ return stream
+ # TODO: Run full tests on both platforms and see if this fixes failures
+ # It seems that py3.6 still sets text mode if you set encoding.
+ elif sys.version_info[0] == 3: # TODO and sys.version_info[1] < 6:
+ stream = stream.decode('utf-8')
+ stream = stream.replace('\r\n', '\n')
+ elif sys.version_info[0] == 2:
+ stream = stream.replace('\r\n', '\n')
+
+ return stream
+
+
+ def finish(self, popen=None, **kw):
+ """
+ Finishes and waits for the process being run under control of
+ the specified popen argument, recording the exit status,
+ standard output and error output.
+ """
+ if popen is None:
+ popen = self.process
+ stdout, stderr = popen.communicate()
+
+ stdout = self.fix_binary_stream(stdout)
+ stderr = self.fix_binary_stream(stderr)
+
+ if self.timer:
+ self.timer.cancel()
+ self.timer = None
+ self.status = popen.returncode
+ self.process = None
+ self._stdout.append(stdout or '')
+ self._stderr.append(stderr or '')
+
+ def run(self, program=None,
+ interpreter=None,
+ arguments=None,
+ chdir=None,
+ stdin=None,
+ universal_newlines=None,
+ timeout=_Null):
+ """Runs a test of the program or script for the test
+ environment. Standard output and error output are saved for
+ future retrieval via the stdout() and stderr() methods.
+
+ The specified program will have the original directory
+ prepended unless it is enclosed in a [list].
+ """
+ if self.external:
+ if not program:
+ program = self.program
+ if not interpreter:
+ interpreter = self.interpreter
+
+ if universal_newlines is None:
+ universal_newlines = self.universal_newlines
+
+ if chdir:
+ oldcwd = os.getcwd()
+ if not os.path.isabs(chdir):
+ chdir = os.path.join(self.workpath(chdir))
+ if self.verbose:
+ sys.stderr.write("chdir(" + chdir + ")\n")
+ os.chdir(chdir)
+ p = self.start(program=program,
+ interpreter=interpreter,
+ arguments=arguments,
+ universal_newlines=universal_newlines,
+ timeout=timeout,
+ stdin=stdin)
+ if is_List(stdin):
+ stdin = ''.join(stdin)
+
+ if stdin and IS_PY3:# and sys.version_info[1] < 6:
+ stdin = to_bytes(stdin)
+
+ # TODO(sgk): figure out how to re-use the logic in the .finish()
+ # method above. Just calling it from here causes problems with
+ # subclasses that redefine .finish(). We could abstract this
+ # into Yet Another common method called both here and by .finish(),
+ # but that seems ill-thought-out.
+ stdout, stderr = p.communicate(input=stdin)
+ if self.timer:
+ self.timer.cancel()
+ self.timer = None
+ self.status = p.returncode
+ self.process = None
+
+ stdout = self.fix_binary_stream(stdout)
+ stderr = self.fix_binary_stream(stderr)
+
+
+ self._stdout.append(stdout or '')
+ self._stderr.append(stderr or '')
+
+ if chdir:
+ os.chdir(oldcwd)
+ if self.verbose >= 2:
+ write = sys.stdout.write
+ write('============ STATUS: %d\n' % self.status)
+ out = self.stdout()
+ if out or self.verbose >= 3:
+ write('============ BEGIN STDOUT (len=%d):\n' % len(out))
+ write(out)
+ write('============ END STDOUT\n')
+ err = self.stderr()
+ if err or self.verbose >= 3:
+ write('============ BEGIN STDERR (len=%d)\n' % len(err))
+ write(err)
+ write('============ END STDERR\n')
+
+ def sleep(self, seconds=default_sleep_seconds):
+ """Sleeps at least the specified number of seconds. If no
+ number is specified, sleeps at least the minimum number of
+ seconds necessary to advance file time stamps on the current
+ system. Sleeping more seconds is all right.
+ """
+ time.sleep(seconds)
+
+ def stderr(self, run=None):
+ """Returns the error output from the specified run number.
+ If there is no specified run number, then returns the error
+ output of the last run. If the run number is less than zero,
+ then returns the error output from that many runs back from the
+ current run.
+ """
+ if not run:
+ run = len(self._stderr)
+ elif run < 0:
+ run = len(self._stderr) + run
+ run = run - 1
+ return self._stderr[run]
+
+ def stdout(self, run=None):
+ """
+ Returns the stored standard output from a given run.
+
+ Args:
+ run: run number to select. If run number is omitted,
+ return the standard output of the most recent run.
+ If negative, use as a relative offset, so that -2
+ means the run two prior to the most recent.
+
+ Returns:
+ selected stdout string or None if there are no
+ stored runs.
+ """
+ if not run:
+ run = len(self._stdout)
+ elif run < 0:
+ run = len(self._stdout) + run
+ run = run - 1
+ try:
+ return self._stdout[run]
+ except IndexError:
+ return None
+
+ def subdir(self, *subdirs):
+ """Create new subdirectories under the temporary working
+ directory, one for each argument. An argument may be a list,
+ in which case the list elements are concatenated using the
+ os.path.join() method. Subdirectories multiple levels deep
+ must be created using a separate argument for each level:
+
+ test.subdir('sub', ['sub', 'dir'], ['sub', 'dir', 'ectory'])
+
+ Returns the number of subdirectories actually created.
+ """
+ count = 0
+ for sub in subdirs:
+ if sub is None:
+ continue
+ if is_List(sub):
+ sub = os.path.join(*tuple(sub))
+ new = os.path.join(self.workdir, sub)
+ try:
+ os.mkdir(new)
+ except OSError as e:
+ print("Got error creating dir: %s :%s" % (sub, e))
+ pass
+ else:
+ count = count + 1
+ return count
+
+ def symlink(self, target, link):
+ """Creates a symlink to the specified target.
+ The link name may be a list, in which case the elements are
+ concatenated with the os.path.join() method. The link is
+ assumed to be under the temporary working directory unless it
+ is an absolute path name. The target is *not* assumed to be
+ under the temporary working directory.
+ """
+ if sys.platform == 'win32':
+ # Skip this on windows as we're not enabling it due to
+ # it requiring user permissions which aren't always present
+ # and we don't have a good way to detect those permissions yet.
+ return
+ link = self.canonicalize(link)
+ try:
+ os.symlink(target, link)
+ except AttributeError:
+ pass # Windows has no symlink
+
+ def tempdir(self, path=None):
+ """Creates a temporary directory.
+ A unique directory name is generated if no path name is specified.
+ The directory is created, and will be removed when the TestCmd
+ object is destroyed.
+ """
+ if path is None:
+ try:
+ path = tempfile.mkdtemp(prefix=testprefix)
+ except TypeError:
+ path = tempfile.mkdtemp()
+ else:
+ os.mkdir(path)
+
+ # Symlinks in the path will report things
+ # differently from os.getcwd(), so chdir there
+ # and back to fetch the canonical path.
+ cwd = os.getcwd()
+ try:
+ os.chdir(path)
+ path = os.getcwd()
+ finally:
+ os.chdir(cwd)
+
+ # Uppercase the drive letter since the case of drive
+ # letters is pretty much random on win32:
+ drive, rest = os.path.splitdrive(path)
+ if drive:
+ path = drive.upper() + rest
+
+ #
+ self._dirlist.append(path)
+
+ global _Cleanup
+ if self not in _Cleanup:
+ _Cleanup.append(self)
+
+ return path
+
+ def touch(self, path, mtime=None):
+ """Updates the modification time on the specified file or
+ directory path name. The default is to update to the
+ current time if no explicit modification time is specified.
+ """
+ path = self.canonicalize(path)
+ atime = os.path.getatime(path)
+ if mtime is None:
+ mtime = time.time()
+ os.utime(path, (atime, mtime))
+
+ def unlink(self, file):
+ """Unlinks the specified file name.
+ The file name may be a list, in which case the elements are
+ concatenated with the os.path.join() method. The file is
+ assumed to be under the temporary working directory unless it
+ is an absolute path name.
+ """
+ file = self.canonicalize(file)
+ os.unlink(file)
+
+ def verbose_set(self, verbose):
+ """Set the verbose level.
+ """
+ self.verbose = verbose
+
+ def where_is(self, file, path=None, pathext=None):
+ """Find an executable file.
+ """
+ if is_List(file):
+ file = os.path.join(*tuple(file))
+ if not os.path.isabs(file):
+ file = where_is(file, path, pathext)
+ return file
+
+ def workdir_set(self, path):
+ """Creates a temporary working directory with the specified
+ path name. If the path is a null string (''), a unique
+ directory name is created.
+ """
+ if (path != None):
+ if path == '':
+ path = None
+ path = self.tempdir(path)
+ self.workdir = path
+
+ def workpath(self, *args):
+ """Returns the absolute path name to a subdirectory or file
+ within the current temporary working directory. Concatenates
+ the temporary working directory name with the specified
+ arguments using the os.path.join() method.
+ """
+ return os.path.join(self.workdir, *tuple(args))
+
+ def readable(self, top, read=1):
+ """Make the specified directory tree readable (read == 1)
+ or not (read == None).
+
+ This method has no effect on Windows systems, which use a
+ completely different mechanism to control file readability.
+ """
+
+ if sys.platform == 'win32':
+ return
+
+ if read:
+ def do_chmod(fname):
+ try:
+ st = os.stat(fname)
+ except OSError:
+ pass
+ else:
+ os.chmod(fname, stat.S_IMODE(
+ st[stat.ST_MODE] | stat.S_IREAD))
+ else:
+ def do_chmod(fname):
+ try:
+ st = os.stat(fname)
+ except OSError:
+ pass
+ else:
+ os.chmod(fname, stat.S_IMODE(
+ st[stat.ST_MODE] & ~stat.S_IREAD))
+
+ if os.path.isfile(top):
+ # If it's a file, that's easy, just chmod it.
+ do_chmod(top)
+ elif read:
+ # It's a directory and we're trying to turn on read
+ # permission, so it's also pretty easy, just chmod the
+ # directory and then chmod every entry on our walk down the
+ # tree.
+ do_chmod(top)
+ for dirpath, dirnames, filenames in os.walk(top):
+ for name in dirnames + filenames:
+ do_chmod(os.path.join(dirpath, name))
+ else:
+ # It's a directory and we're trying to turn off read
+ # permission, which means we have to chmod the directories
+ # in the tree bottom-up, lest disabling read permission from
+ # the top down get in the way of being able to get at lower
+ # parts of the tree.
+ for dirpath, dirnames, filenames in os.walk(top, topdown=0):
+ for name in dirnames + filenames:
+ do_chmod(os.path.join(dirpath, name))
+ do_chmod(top)
+
+ def writable(self, top, write=1):
+ """Make the specified directory tree writable (write == 1)
+ or not (write == None).
+ """
+
+ if sys.platform == 'win32':
+
+ if write:
+ def do_chmod(fname):
+ try:
+ os.chmod(fname, stat.S_IWRITE)
+ except OSError:
+ pass
+ else:
+ def do_chmod(fname):
+ try:
+ os.chmod(fname, stat.S_IREAD)
+ except OSError:
+ pass
+
+ else:
+
+ if write:
+ def do_chmod(fname):
+ try:
+ st = os.stat(fname)
+ except OSError:
+ pass
+ else:
+ os.chmod(fname, stat.S_IMODE(st[stat.ST_MODE] | 0o200))
+ else:
+ def do_chmod(fname):
+ try:
+ st = os.stat(fname)
+ except OSError:
+ pass
+ else:
+ os.chmod(fname, stat.S_IMODE(
+ st[stat.ST_MODE] & ~0o200))
+
+ if os.path.isfile(top):
+ do_chmod(top)
+ else:
+ do_chmod(top)
+ for dirpath, dirnames, filenames in os.walk(top, topdown=0):
+ for name in dirnames + filenames:
+ do_chmod(os.path.join(dirpath, name))
+
+ def executable(self, top, execute=1):
+ """Make the specified directory tree executable (execute == 1)
+ or not (execute == None).
+
+ This method has no effect on Windows systems, which use a
+ completely different mechanism to control file executability.
+ """
+
+ if sys.platform == 'win32':
+ return
+
+ if execute:
+ def do_chmod(fname):
+ try:
+ st = os.stat(fname)
+ except OSError:
+ pass
+ else:
+ os.chmod(fname, stat.S_IMODE(
+ st[stat.ST_MODE] | stat.S_IEXEC))
+ else:
+ def do_chmod(fname):
+ try:
+ st = os.stat(fname)
+ except OSError:
+ pass
+ else:
+ os.chmod(fname, stat.S_IMODE(
+ st[stat.ST_MODE] & ~stat.S_IEXEC))
+
+ if os.path.isfile(top):
+ # If it's a file, that's easy, just chmod it.
+ do_chmod(top)
+ elif execute:
+ # It's a directory and we're trying to turn on execute
+ # permission, so it's also pretty easy, just chmod the
+ # directory and then chmod every entry on our walk down the
+ # tree.
+ do_chmod(top)
+ for dirpath, dirnames, filenames in os.walk(top):
+ for name in dirnames + filenames:
+ do_chmod(os.path.join(dirpath, name))
+ else:
+ # It's a directory and we're trying to turn off execute
+ # permission, which means we have to chmod the directories
+ # in the tree bottom-up, lest disabling execute permission from
+ # the top down get in the way of being able to get at lower
+ # parts of the tree.
+ for dirpath, dirnames, filenames in os.walk(top, topdown=0):
+ for name in dirnames + filenames:
+ do_chmod(os.path.join(dirpath, name))
+ do_chmod(top)
+
+ def write(self, file, content, mode='wb'):
+ """Writes the specified content text (second argument) to the
+ specified file name (first argument). The file name may be
+ a list, in which case the elements are concatenated with the
+ os.path.join() method. The file is created under the temporary
+ working directory. Any subdirectories in the path must already
+ exist. The I/O mode for the file may be specified; it must
+ begin with a 'w'. The default is 'wb' (binary write).
+ """
+ file = self.canonicalize(file)
+ if mode[0] != 'w':
+ raise ValueError("mode must begin with 'w'")
+ with open(file, mode) as f:
+ try:
+ f.write(content)
+ except TypeError as e:
+ # python 3 default strings are not bytes, but unicode
+ f.write(bytes(content, 'utf-8'))
+
+# Local Variables:
+# tab-width:4
+# indent-tabs-mode:nil
+# End:
+# vim: set expandtab tabstop=4 shiftwidth=4:
diff --git a/testing/framework/TestCmdTests.py b/testing/framework/TestCmdTests.py
new file mode 100644
index 0000000..ef76228
--- /dev/null
+++ b/testing/framework/TestCmdTests.py
@@ -0,0 +1,3418 @@
+#!/usr/bin/env python
+"""
+TestCmdTests.py: Unit tests for the TestCmd.py module.
+
+Copyright 2000-2010 Steven Knight
+This module is free software, and you may redistribute it and/or modify
+it under the same terms as Python itself, so long as this copyright message
+and disclaimer are retained in their original form.
+
+IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
+SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF
+THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGE.
+
+THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS,
+AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
+SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
+"""
+
+__author__ = "Steven Knight <knight at baldmt dot com>"
+__revision__ = "TestCmdTests.py 1.3.D001 2010/06/03 12:58:27 knight"
+
+import os
+import shutil
+import signal
+import stat
+try:
+ from cStringIO import StringIO
+except ImportError:
+ from io import StringIO
+from contextlib import closing
+import sys
+import tempfile
+import time
+import types
+import unittest
+try:
+ from collections import UserList
+except ImportError:
+ from UserList import UserList
+
+from SCons.Util import to_bytes, to_str
+
+
+# Strip the current directory so we get the right TestCmd.py module.
+sys.path = sys.path[1:]
+
+import TestCmd
+
+def _is_readable(path):
+ # XXX this doesn't take into account UID, it assumes it's our file
+ return os.stat(path)[stat.ST_MODE] & stat.S_IREAD
+
+def _is_writable(path):
+ # XXX this doesn't take into account UID, it assumes it's our file
+ return os.stat(path)[stat.ST_MODE] & stat.S_IWRITE
+
+def _is_executable(path):
+ # XXX this doesn't take into account UID, it assumes it's our file
+ return os.stat(path)[stat.ST_MODE] & stat.S_IEXEC
+
+def _clear_dict(dict, *keys):
+ for key in keys:
+ try:
+ del dict[key]
+ except KeyError:
+ pass
+
+import subprocess
+
+try:
+ subprocess.Popen.terminate
+except AttributeError:
+ if sys.platform == 'win32':
+ import win32process
+ def terminate(self):
+ win32process.TerminateProcess(self._handle, 1)
+ else:
+ def terminate(self):
+ os.kill(self.pid, signal.SIGTERM)
+ method = types.MethodType(terminate, None, subprocess.Popen)
+ setattr(subprocess.Popen, 'terminate', method)
+
+class ExitError(Exception):
+ pass
+
+class TestCmdTestCase(unittest.TestCase):
+ """Base class for TestCmd test cases, with fixture and utility methods."""
+
+ def setUp(self):
+ self.orig_cwd = os.getcwd()
+
+ def tearDown(self):
+ os.chdir(self.orig_cwd)
+
+ def setup_run_scripts(self):
+ class T:
+ pass
+
+ t = T()
+
+ t.script = 'script'
+ t.scriptx = 'scriptx.bat'
+ t.script1 = 'script_1.txt'
+ t.scriptout = 'scriptout'
+ t.scripterr = 'scripterr'
+ fmt = "import os, sys; cwd = os.getcwd(); " + \
+ "sys.stdout.write('%s: STDOUT: %%s: %%s\\n' %% (cwd, sys.argv[1:])); " + \
+ "sys.stderr.write('%s: STDERR: %%s: %%s\\n' %% (cwd, sys.argv[1:]))"
+ fmtout = "import os, sys; cwd = os.getcwd(); " + \
+ "sys.stdout.write('%s: STDOUT: %%s: %%s\\n' %% (cwd, sys.argv[1:]))"
+ fmterr = "import os, sys; cwd = os.getcwd(); " + \
+ "sys.stderr.write('%s: STDERR: %%s: %%s\\n' %% (cwd, sys.argv[1:]))"
+ text = fmt % (t.script, t.script)
+ textx = fmt % (t.scriptx, t.scriptx)
+ if sys.platform == 'win32':
+ textx = textx.replace('%', '%%')
+ textx = '@python -c "%s"' % textx + ' %1 %2 %3 %4 %5 %6 %7 %8 %9\n'
+ else:
+ textx = '#! /usr/bin/env python\n' + textx + '\n'
+ text1 = 'A first line to be ignored!\n' + fmt % (t.script1, t.script1)
+ textout = fmtout % (t.scriptout)
+ texterr = fmterr % (t.scripterr)
+
+ run_env = TestCmd.TestCmd(workdir = '')
+ run_env.subdir('sub dir')
+ t.run_env = run_env
+
+ t.sub_dir = run_env.workpath('sub dir')
+ t.script_path = run_env.workpath('sub dir', t.script)
+ t.scriptx_path = run_env.workpath('sub dir', t.scriptx)
+ t.script1_path = run_env.workpath('sub dir', t.script1)
+ t.scriptout_path = run_env.workpath('sub dir', t.scriptout)
+ t.scripterr_path = run_env.workpath('sub dir', t.scripterr)
+
+ run_env.write(t.script_path, text)
+ run_env.write(t.scriptx_path, textx)
+ run_env.write(t.script1_path, text1)
+ run_env.write(t.scriptout_path, textout)
+ run_env.write(t.scripterr_path, texterr)
+
+ os.chmod(t.script_path, 0o644) # XXX UNIX-specific
+ os.chmod(t.scriptx_path, 0o755) # XXX UNIX-specific
+ os.chmod(t.script1_path, 0o644) # XXX UNIX-specific
+ os.chmod(t.scriptout_path, 0o644) # XXX UNIX-specific
+ os.chmod(t.scripterr_path, 0o644) # XXX UNIX-specific
+
+ t.orig_cwd = os.getcwd()
+
+ t.workdir = run_env.workpath('sub dir')
+ os.chdir(t.workdir)
+
+ return t
+
+ def translate_newlines(self, data):
+ data = data.replace("\r\n", "\n")
+ return data
+
+ def call_python(self, input, python=None):
+ if python is None:
+ python = sys.executable
+ p = subprocess.Popen(python,
+ stdin=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ stdout=subprocess.PIPE)
+ stdout, stderr = p.communicate(to_bytes(input))
+ stdout = self.translate_newlines(to_str(stdout))
+ stderr = self.translate_newlines(to_str(stderr))
+ return stdout, stderr, p.returncode
+
+ def popen_python(self, input, status=0, stdout="", stderr="", python=None):
+ if python is None:
+ python = sys.executable
+ _stdout, _stderr, _status = self.call_python(input, python)
+ _stdout = self.translate_newlines(_stdout)
+ _stderr = self.translate_newlines(_stderr)
+ assert _status == status, \
+ "status = %s, expected %s\n" % (str(_status), str(status)) + \
+ "STDOUT ===================\n" + _stdout + \
+ "STDERR ===================\n" + _stderr
+ assert _stdout == stdout, \
+ "Expected STDOUT ==========\n" + stdout + \
+ "Actual STDOUT ============\n" + _stdout + \
+ "STDERR ===================\n" + _stderr
+ assert _stderr == stderr, \
+ "Expected STDERR ==========\n" + stderr + \
+ "Actual STDERR ============\n" + _stderr
+
+ def run_match(self, content, *args):
+ expect = "%s: %s: %s: %s\n" % args
+ content = self.translate_newlines(to_str(content))
+ assert content == expect, \
+ "Expected %s ==========\n" % args[1] + expect + \
+ "Actual %s ============\n" % args[1] + content
+
+
+
+class __init__TestCase(TestCmdTestCase):
+ def test_init(self):
+ """Test init()"""
+ test = TestCmd.TestCmd()
+ test = TestCmd.TestCmd(description = 'test')
+ test = TestCmd.TestCmd(description = 'test', program = 'foo')
+ test = TestCmd.TestCmd(description = 'test',
+ program = 'foo',
+ universal_newlines=None)
+
+
+
+class basename_TestCase(TestCmdTestCase):
+ def test_basename(self):
+ """Test basename() [XXX TO BE WRITTEN]"""
+ assert 1 == 1
+
+
+
+class cleanup_TestCase(TestCmdTestCase):
+ def test_cleanup(self):
+ """Test cleanup()"""
+ test = TestCmd.TestCmd(workdir = '')
+ wdir = test.workdir
+ test.write('file1', "Test file #1\n")
+ test.cleanup()
+ assert not os.path.exists(wdir)
+
+ def test_writable(self):
+ """Test cleanup() when the directory isn't writable"""
+ test = TestCmd.TestCmd(workdir = '')
+ wdir = test.workdir
+ test.write('file2', "Test file #2\n")
+ os.chmod(test.workpath('file2'), 0o400)
+ os.chmod(wdir, 0o500)
+ test.cleanup()
+ assert not os.path.exists(wdir)
+
+ def test_shutil(self):
+ """Test cleanup() when used with shutil"""
+ test = TestCmd.TestCmd(workdir = '')
+ wdir = test.workdir
+ os.chdir(wdir)
+
+ import shutil
+ save_rmtree = shutil.rmtree
+ def my_rmtree(dir, ignore_errors=0, wdir=wdir, _rmtree=save_rmtree):
+ assert os.getcwd() != wdir
+ return _rmtree(dir, ignore_errors=ignore_errors)
+ try:
+ shutil.rmtree = my_rmtree
+ test.cleanup()
+ finally:
+ shutil.rmtree = save_rmtree
+
+ def test_atexit(self):
+ """Test cleanup() when atexit is used"""
+ self.popen_python("""from __future__ import print_function
+import sys
+sys.path = ['%s'] + sys.path
+import atexit
+def my_exitfunc():
+ print("my_exitfunc()")
+atexit.register(my_exitfunc)
+import TestCmd
+result = TestCmd.TestCmd(workdir = '')
+sys.exit(0)
+""" % self.orig_cwd, stdout='my_exitfunc()\n')
+
+ @unittest.skipIf(TestCmd.IS_PY3, "No sys.exitfunc in Python 3")
+ def test_exitfunc(self):
+ """Test cleanup() when sys.exitfunc is set"""
+ self.popen_python("""from __future__ import print_function
+import sys
+sys.path = ['%s'] + sys.path
+def my_exitfunc():
+ print("my_exitfunc()")
+sys.exitfunc = my_exitfunc
+import TestCmd
+result = TestCmd.TestCmd(workdir = '')
+sys.exit(0)
+""" % self.orig_cwd, stdout='my_exitfunc()\n')
+
+
+class chmod_TestCase(TestCmdTestCase):
+ def test_chmod(self):
+ """Test chmod()"""
+ test = TestCmd.TestCmd(workdir = '', subdir = 'sub')
+
+ wdir_file1 = os.path.join(test.workdir, 'file1')
+ wdir_sub_file2 = os.path.join(test.workdir, 'sub', 'file2')
+
+ with open(wdir_file1, 'w') as f:
+ f.write("")
+ with open(wdir_sub_file2, 'w') as f:
+ f.write("")
+
+ if sys.platform == 'win32':
+
+ test.chmod(wdir_file1, stat.S_IREAD)
+ test.chmod(['sub', 'file2'], stat.S_IWRITE)
+
+ file1_mode = stat.S_IMODE(os.stat(wdir_file1)[stat.ST_MODE])
+ assert file1_mode == 0o444, '0%o' % file1_mode
+ file2_mode = stat.S_IMODE(os.stat(wdir_sub_file2)[stat.ST_MODE])
+ assert file2_mode == 0o666, '0%o' % file2_mode
+
+ test.chmod('file1', stat.S_IWRITE)
+ test.chmod(wdir_sub_file2, stat.S_IREAD)
+
+ file1_mode = stat.S_IMODE(os.stat(wdir_file1)[stat.ST_MODE])
+ assert file1_mode == 0o666, '0%o' % file1_mode
+ file2_mode = stat.S_IMODE(os.stat(wdir_sub_file2)[stat.ST_MODE])
+ assert file2_mode == 0o444, '0%o' % file2_mode
+
+ else:
+
+ test.chmod(wdir_file1, 0o700)
+ test.chmod(['sub', 'file2'], 0o760)
+
+ file1_mode = stat.S_IMODE(os.stat(wdir_file1)[stat.ST_MODE])
+ assert file1_mode == 0o700, '0%o' % file1_mode
+ file2_mode = stat.S_IMODE(os.stat(wdir_sub_file2)[stat.ST_MODE])
+ assert file2_mode == 0o760, '0%o' % file2_mode
+
+ test.chmod('file1', 0o765)
+ test.chmod(wdir_sub_file2, 0o567)
+
+ file1_mode = stat.S_IMODE(os.stat(wdir_file1)[stat.ST_MODE])
+ assert file1_mode == 0o765, '0%o' % file1_mode
+ file2_mode = stat.S_IMODE(os.stat(wdir_sub_file2)[stat.ST_MODE])
+ assert file2_mode == 0o567, '0%o' % file2_mode
+
+
+
+class combine_TestCase(TestCmdTestCase):
+ def test_combine(self):
+ """Test combining stdout and stderr"""
+ run_env = TestCmd.TestCmd(workdir = '')
+ run_env.write('run1', """import sys
+sys.stdout.write("run1 STDOUT %s\\n" % sys.argv[1:])
+sys.stdout.write("run1 STDOUT second line\\n")
+sys.stderr.write("run1 STDERR %s\\n" % sys.argv[1:])
+sys.stderr.write("run1 STDERR second line\\n")
+sys.stdout.write("run1 STDOUT third line\\n")
+sys.stderr.write("run1 STDERR third line\\n")
+""")
+ run_env.write('run2', """import sys
+sys.stdout.write("run2 STDOUT %s\\n" % sys.argv[1:])
+sys.stdout.write("run2 STDOUT second line\\n")
+sys.stderr.write("run2 STDERR %s\\n" % sys.argv[1:])
+sys.stderr.write("run2 STDERR second line\\n")
+sys.stdout.write("run2 STDOUT third line\\n")
+sys.stderr.write("run2 STDERR third line\\n")
+""")
+ cwd = os.getcwd()
+ os.chdir(run_env.workdir)
+ # Everything before this prepared our "source directory."
+ # Now do the real test.
+ try:
+ test = TestCmd.TestCmd(interpreter = 'python',
+ workdir = '',
+ combine = 1)
+ output = test.stdout()
+ if output is not None:
+ raise IndexError("got unexpected output:\n\t`%s'\n" % output)
+
+ # The underlying system subprocess implementations can combine
+ # stdout and stderr in different orders, so we accomodate both.
+
+ test.program_set('run1')
+ test.run(arguments = 'foo bar')
+ stdout_lines = """\
+run1 STDOUT ['foo', 'bar']
+run1 STDOUT second line
+run1 STDOUT third line
+"""
+ stderr_lines = """\
+run1 STDERR ['foo', 'bar']
+run1 STDERR second line
+run1 STDERR third line
+"""
+ foo_bar_expect = (stdout_lines + stderr_lines,
+ stderr_lines + stdout_lines)
+
+ test.program_set('run2')
+ test.run(arguments = 'snafu')
+ stdout_lines = """\
+run2 STDOUT ['snafu']
+run2 STDOUT second line
+run2 STDOUT third line
+"""
+ stderr_lines = """\
+run2 STDERR ['snafu']
+run2 STDERR second line
+run2 STDERR third line
+"""
+ snafu_expect = (stdout_lines + stderr_lines,
+ stderr_lines + stdout_lines)
+
+ # XXX SHOULD TEST ABSOLUTE NUMBER AS WELL
+ output = test.stdout()
+ output = self.translate_newlines(output)
+ assert output in snafu_expect, output
+ error = test.stderr()
+ assert error == '', error
+
+ output = test.stdout(run = -1)
+ output = self.translate_newlines(output)
+ assert output in foo_bar_expect, output
+ error = test.stderr(-1)
+ assert error == '', error
+ finally:
+ os.chdir(cwd)
+
+
+
+class description_TestCase(TestCmdTestCase):
+ def test_description(self):
+ """Test description()"""
+ test = TestCmd.TestCmd()
+ assert test.description is None, 'initialized description?'
+ test = TestCmd.TestCmd(description = 'test')
+ assert test.description == 'test', 'uninitialized description'
+ test.description_set('foo')
+ assert test.description == 'foo', 'did not set description'
+
+
+
+class diff_TestCase(TestCmdTestCase):
+ def test_diff_re(self):
+ """Test diff_re()"""
+ result = TestCmd.diff_re(["abcde"], ["abcde"])
+ result = list(result)
+ assert result == [], result
+ result = TestCmd.diff_re(["a.*e"], ["abcde"])
+ result = list(result)
+ assert result == [], result
+ result = TestCmd.diff_re(["a.*e"], ["xxx"])
+ result = list(result)
+ assert result == ['1c1', "< 'a.*e'", '---', "> 'xxx'"], result
+
+ def test_diff_custom_function(self):
+ """Test diff() using a custom function"""
+ self.popen_python("""import sys
+sys.path = ['%s'] + sys.path
+import TestCmd
+def my_diff(a, b):
+ return [
+ '*****',
+ a,
+ '*****',
+ b,
+ '*****',
+ ]
+test = TestCmd.TestCmd(diff = my_diff)
+test.diff("a\\nb1\\nc\\n", "a\\nb2\\nc\\n", "STDOUT")
+sys.exit(0)
+""" % self.orig_cwd,
+ stdout = """\
+STDOUT==========================================================================
+*****
+['a', 'b1', 'c']
+*****
+['a', 'b2', 'c']
+*****
+""")
+
+ def test_diff_string(self):
+ self.popen_python("""import sys
+sys.path = ['%s'] + sys.path
+import TestCmd
+test = TestCmd.TestCmd(diff = 'diff_re')
+test.diff("a\\nb1\\nc\\n", "a\\nb2\\nc\\n", 'STDOUT')
+sys.exit(0)
+""" % self.orig_cwd,
+ stdout = """\
+STDOUT==========================================================================
+2c2
+< 'b1'
+---
+> 'b2'
+""")
+
+ def test_error(self):
+ """Test handling a compilation error in TestCmd.diff_re()"""
+ script_input = """import sys
+sys.path = ['%s'] + sys.path
+import TestCmd
+assert TestCmd.diff_re([r"a.*(e"], ["abcde"])
+sys.exit(0)
+""" % self.orig_cwd
+ stdout, stderr, status = self.call_python(script_input)
+ assert status == 1, status
+ expect1 = "Regular expression error in '^a.*(e$': missing )"
+ expect2 = "Regular expression error in '^a.*(e$': unbalanced parenthesis"
+ assert (stderr.find(expect1) != -1 or
+ stderr.find(expect2) != -1), repr(stderr)
+
+ def test_simple_diff_static_method(self):
+ """Test calling the TestCmd.TestCmd.simple_diff() static method"""
+ self.popen_python("""import sys
+sys.path = ['%s'] + sys.path
+import TestCmd
+result = TestCmd.TestCmd.simple_diff(['a', 'b', 'c', 'e', 'f1'],
+ ['a', 'c', 'd', 'e', 'f2'])
+result = list(result)
+expect = ['2d1', '< b', '3a3', '> d', '5c5', '< f1', '---', '> f2']
+assert result == expect, result
+sys.exit(0)
+""" % self.orig_cwd)
+
+ def test_context_diff_static_method(self):
+ """Test calling the TestCmd.TestCmd.context_diff() static method"""
+ self.popen_python("""import sys
+sys.path = ['%s'] + sys.path
+import TestCmd
+result = TestCmd.TestCmd.context_diff(['a\\n', 'b\\n', 'c\\n', 'e\\n', 'f1\\n'],
+ ['a\\n', 'c\\n', 'd\\n', 'e\\n', 'f2\\n'])
+result = list(result)
+expect = [
+ '*** \\n',
+ '--- \\n',
+ '***************\\n',
+ '*** 1,5 ****\\n',
+ ' a\\n',
+ '- b\\n',
+ ' c\\n',
+ ' e\\n',
+ '! f1\\n',
+ '--- 1,5 ----\\n',
+ ' a\\n',
+ ' c\\n',
+ '+ d\\n',
+ ' e\\n',
+ '! f2\\n',
+]
+assert result == expect, result
+sys.exit(0)
+""" % self.orig_cwd)
+
+ def test_unified_diff_static_method(self):
+ """Test calling the TestCmd.TestCmd.unified_diff() static method"""
+ self.popen_python("""import sys
+sys.path = ['%s'] + sys.path
+import TestCmd
+result = TestCmd.TestCmd.unified_diff(['a\\n', 'b\\n', 'c\\n', 'e\\n', 'f1\\n'],
+ ['a\\n', 'c\\n', 'd\\n', 'e\\n', 'f2\\n'])
+result = list(result)
+expect = [
+ '--- \\n',
+ '+++ \\n',
+ '@@ -1,5 +1,5 @@\\n',
+ ' a\\n',
+ '-b\\n',
+ ' c\\n',
+ '+d\\n',
+ ' e\\n',
+ '-f1\\n',
+ '+f2\\n'
+]
+assert result == expect, result
+sys.exit(0)
+""" % self.orig_cwd)
+
+ def test_diff_re_static_method(self):
+ """Test calling the TestCmd.TestCmd.diff_re() static method"""
+ self.popen_python("""import sys
+sys.path = ['%s'] + sys.path
+import TestCmd
+result = TestCmd.TestCmd.diff_re(['a', 'b', 'c', '.', 'f1'],
+ ['a', 'c', 'd', 'e', 'f2'])
+result = list(result)
+expect = [
+ '2c2',
+ "< 'b'",
+ '---',
+ "> 'c'",
+ '3c3',
+ "< 'c'",
+ '---',
+ "> 'd'",
+ '5c5',
+ "< 'f1'",
+ '---',
+ "> 'f2'"
+]
+assert result == expect, result
+sys.exit(0)
+""" % self.orig_cwd)
+
+
+
+class diff_stderr_TestCase(TestCmdTestCase):
+ def test_diff_stderr_default(self):
+ """Test diff_stderr() default behavior"""
+ self.popen_python(r"""import sys
+sys.path = ['%s'] + sys.path
+import TestCmd
+test = TestCmd.TestCmd()
+test.diff_stderr('a\nb1\nc\n', 'a\nb2\nc\n')
+sys.exit(0)
+""" % self.orig_cwd,
+ stdout="""\
+2c2
+< b1
+---
+> b2
+""")
+
+ def test_diff_stderr_not_affecting_diff_stdout(self):
+ """Test diff_stderr() not affecting diff_stdout() behavior"""
+ self.popen_python(r"""from __future__ import print_function
+import sys
+sys.path = ['%s'] + sys.path
+import TestCmd
+test = TestCmd.TestCmd(diff_stderr='diff_re')
+print("diff_stderr:")
+test.diff_stderr('a\nb.\nc\n', 'a\nbb\nc\n')
+print("diff_stdout:")
+test.diff_stdout('a\nb.\nc\n', 'a\nbb\nc\n')
+sys.exit(0)
+""" % self.orig_cwd,
+ stdout="""\
+diff_stderr:
+diff_stdout:
+2c2
+< b.
+---
+> bb
+""")
+
+ def test_diff_stderr_custom_function(self):
+ """Test diff_stderr() using a custom function"""
+ self.popen_python(r"""import sys
+sys.path = ['%s'] + sys.path
+import TestCmd
+def my_diff(a, b):
+ return ["a:"] + a + ["b:"] + b
+test = TestCmd.TestCmd(diff_stderr=my_diff)
+test.diff_stderr('abc', 'def')
+sys.exit(0)
+""" % self.orig_cwd,
+ stdout="""\
+a:
+abc
+b:
+def
+""")
+
+ def test_diff_stderr_TestCmd_function(self):
+ """Test diff_stderr() using a TestCmd function"""
+ self.popen_python(r"""import sys
+sys.path = ['%s'] + sys.path
+import TestCmd
+test = TestCmd.TestCmd(diff_stderr = TestCmd.diff_re)
+test.diff_stderr('a\n.\n', 'b\nc\n')
+sys.exit(0)
+""" % self.orig_cwd,
+ stdout="""\
+1c1
+< 'a'
+---
+> 'b'
+""")
+
+ def test_diff_stderr_static_method(self):
+ """Test diff_stderr() using a static method"""
+ self.popen_python(r"""import sys
+sys.path = ['%s'] + sys.path
+import TestCmd
+test = TestCmd.TestCmd(diff_stderr=TestCmd.TestCmd.diff_re)
+test.diff_stderr('a\n.\n', 'b\nc\n')
+sys.exit(0)
+""" % self.orig_cwd,
+ stdout="""\
+1c1
+< 'a'
+---
+> 'b'
+""")
+
+ def test_diff_stderr_string(self):
+ """Test diff_stderr() using a string to fetch the diff method"""
+ self.popen_python(r"""import sys
+sys.path = ['%s'] + sys.path
+import TestCmd
+test = TestCmd.TestCmd(diff_stderr='diff_re')
+test.diff_stderr('a\n.\n', 'b\nc\n')
+sys.exit(0)
+""" % self.orig_cwd,
+ stdout="""\
+1c1
+< 'a'
+---
+> 'b'
+""")
+
+
+
+class diff_stdout_TestCase(TestCmdTestCase):
+ def test_diff_stdout_default(self):
+ """Test diff_stdout() default behavior"""
+ self.popen_python(r"""import sys
+sys.path = ['%s'] + sys.path
+import TestCmd
+test = TestCmd.TestCmd()
+test.diff_stdout('a\nb1\nc\n', 'a\nb2\nc\n')
+sys.exit(0)
+""" % self.orig_cwd,
+ stdout="""\
+2c2
+< b1
+---
+> b2
+""")
+
+ def test_diff_stdout_not_affecting_diff_stderr(self):
+ """Test diff_stdout() not affecting diff_stderr() behavior"""
+ self.popen_python(r"""from __future__ import print_function
+import sys
+sys.path = ['%s'] + sys.path
+import TestCmd
+test = TestCmd.TestCmd(diff_stdout='diff_re')
+print("diff_stdout:")
+test.diff_stdout('a\nb.\nc\n', 'a\nbb\nc\n')
+print("diff_stderr:")
+test.diff_stderr('a\nb.\nc\n', 'a\nbb\nc\n')
+sys.exit(0)
+""" % self.orig_cwd,
+ stdout="""\
+diff_stdout:
+diff_stderr:
+2c2
+< b.
+---
+> bb
+""")
+
+ def test_diff_stdout_custom_function(self):
+ """Test diff_stdout() using a custom function"""
+ self.popen_python(r"""import sys
+sys.path = ['%s'] + sys.path
+import TestCmd
+def my_diff(a, b):
+ return ["a:"] + a + ["b:"] + b
+test = TestCmd.TestCmd(diff_stdout=my_diff)
+test.diff_stdout('abc', 'def')
+sys.exit(0)
+""" % self.orig_cwd,
+ stdout="""\
+a:
+abc
+b:
+def
+""")
+
+ def test_diff_stdout_TestCmd_function(self):
+ """Test diff_stdout() using a TestCmd function"""
+ self.popen_python(r"""import sys
+sys.path = ['%s'] + sys.path
+import TestCmd
+test = TestCmd.TestCmd(diff_stdout = TestCmd.diff_re)
+test.diff_stdout('a\n.\n', 'b\nc\n')
+sys.exit(0)
+""" % self.orig_cwd,
+ stdout="""\
+1c1
+< 'a'
+---
+> 'b'
+""")
+
+ def test_diff_stdout_static_method(self):
+ """Test diff_stdout() using a static method"""
+ self.popen_python(r"""import sys
+sys.path = ['%s'] + sys.path
+import TestCmd
+test = TestCmd.TestCmd(diff_stdout=TestCmd.TestCmd.diff_re)
+test.diff_stdout('a\n.\n', 'b\nc\n')
+sys.exit(0)
+""" % self.orig_cwd,
+ stdout="""\
+1c1
+< 'a'
+---
+> 'b'
+""")
+
+ def test_diff_stdout_string(self):
+ """Test diff_stdout() using a string to fetch the diff method"""
+ self.popen_python(r"""import sys
+sys.path = ['%s'] + sys.path
+import TestCmd
+test = TestCmd.TestCmd(diff_stdout='diff_re')
+test.diff_stdout('a\n.\n', 'b\nc\n')
+sys.exit(0)
+""" % self.orig_cwd,
+ stdout="""\
+1c1
+< 'a'
+---
+> 'b'
+""")
+
+
+
+class exit_TestCase(TestCmdTestCase):
+ def test_exit(self):
+ """Test exit()"""
+ def _test_it(cwd, tempdir, condition, preserved):
+ close_true = {'pass_test': 1, 'fail_test': 0, 'no_result': 0}
+ exit_status = {'pass_test': 0, 'fail_test': 1, 'no_result': 2}
+ result_string = {'pass_test': "PASSED\n",
+ 'fail_test': "FAILED test at line 5 of <stdin>\n",
+ 'no_result': "NO RESULT for test at line 5 of <stdin>\n"}
+ global ExitError
+ input = """import sys
+sys.path = ['%s'] + sys.path
+import TestCmd
+test = TestCmd.TestCmd(workdir = '%s')
+test.%s()
+""" % (cwd, tempdir, condition)
+ stdout, stderr, status = self.call_python(input, python="python")
+ if close_true[condition]:
+ unexpected = (status != 0)
+ else:
+ unexpected = (status == 0)
+ if unexpected:
+ msg = "Unexpected exit status from python: %s\n"
+ raise ExitError(msg % status + stdout + stderr)
+ if status != exit_status[condition]:
+ msg = "Expected exit status %d, got %d\n"
+ raise ExitError(msg % (exit_status[condition], status))
+ if stderr != result_string[condition]:
+ msg = "Expected error output:\n%sGot error output:\n%s"
+ raise ExitError(msg % (result_string[condition], stderr))
+ if preserved:
+ if not os.path.exists(tempdir):
+ msg = "Working directory %s was mistakenly removed\n"
+ raise ExitError(msg % tempdir + stdout)
+ else:
+ if os.path.exists(tempdir):
+ msg = "Working directory %s was mistakenly preserved\n"
+ raise ExitError(msg % tempdir + stdout)
+
+ run_env = TestCmd.TestCmd(workdir = '')
+ os.chdir(run_env.workdir)
+ # Everything before this prepared our "source directory."
+ # Now do the real test.
+ try:
+ cwd = self.orig_cwd
+ _clear_dict(os.environ, 'PRESERVE', 'PRESERVE_PASS', 'PRESERVE_FAIL', 'PRESERVE_NO_RESULT')
+ _test_it(cwd, 'dir01', 'pass_test', 0)
+ _test_it(cwd, 'dir02', 'fail_test', 0)
+ _test_it(cwd, 'dir03', 'no_result', 0)
+ os.environ['PRESERVE'] = '1'
+ _test_it(cwd, 'dir04', 'pass_test', 1)
+ _test_it(cwd, 'dir05', 'fail_test', 1)
+ _test_it(cwd, 'dir06', 'no_result', 1)
+ del os.environ['PRESERVE']
+ os.environ['PRESERVE_PASS'] = '1'
+ _test_it(cwd, 'dir07', 'pass_test', 1)
+ _test_it(cwd, 'dir08', 'fail_test', 0)
+ _test_it(cwd, 'dir09', 'no_result', 0)
+ del os.environ['PRESERVE_PASS']
+ os.environ['PRESERVE_FAIL'] = '1'
+ _test_it(cwd, 'dir10', 'pass_test', 0)
+ _test_it(cwd, 'dir11', 'fail_test', 1)
+ _test_it(cwd, 'dir12', 'no_result', 0)
+ del os.environ['PRESERVE_FAIL']
+ os.environ['PRESERVE_NO_RESULT'] = '1'
+ _test_it(cwd, 'dir13', 'pass_test', 0)
+ _test_it(cwd, 'dir14', 'fail_test', 0)
+ _test_it(cwd, 'dir15', 'no_result', 1)
+ del os.environ['PRESERVE_NO_RESULT']
+ finally:
+ _clear_dict(os.environ, 'PRESERVE', 'PRESERVE_PASS', 'PRESERVE_FAIL', 'PRESERVE_NO_RESULT')
+
+
+
+class fail_test_TestCase(TestCmdTestCase):
+ def test_fail_test(self):
+ """Test fail_test()"""
+ run_env = TestCmd.TestCmd(workdir = '')
+ run_env.write('run', """import sys
+sys.stdout.write("run: STDOUT\\n")
+sys.stderr.write("run: STDERR\\n")
+""")
+ os.chdir(run_env.workdir)
+ # Everything before this prepared our "source directory."
+ # Now do the real test.
+ self.popen_python("""import sys
+sys.path = ['%s'] + sys.path
+import TestCmd
+TestCmd.fail_test(condition = 1)
+""" % self.orig_cwd, status = 1, stderr = "FAILED test at line 4 of <stdin>\n")
+
+ self.popen_python("""import sys
+sys.path = ['%s'] + sys.path
+import TestCmd
+test = TestCmd.TestCmd(program = 'run', interpreter = 'python', workdir = '')
+test.run()
+test.fail_test(condition = (test.status == 0))
+""" % self.orig_cwd, status = 1, stderr = "FAILED test of %s\n\tat line 6 of <stdin>\n" % run_env.workpath('run'))
+
+ self.popen_python("""import sys
+sys.path = ['%s'] + sys.path
+import TestCmd
+test = TestCmd.TestCmd(program = 'run', interpreter = 'python', description = 'xyzzy', workdir = '')
+test.run()
+test.fail_test(condition = (test.status == 0))
+""" % self.orig_cwd, status = 1, stderr = "FAILED test of %s [xyzzy]\n\tat line 6 of <stdin>\n" % run_env.workpath('run'))
+
+ self.popen_python("""import sys
+sys.path = ['%s'] + sys.path
+import TestCmd
+test = TestCmd.TestCmd(program = 'run', interpreter = 'python', workdir = '')
+test.run()
+def xxx():
+ sys.stderr.write("printed on failure\\n")
+test.fail_test(condition = (test.status == 0), function = xxx)
+""" % self.orig_cwd, status = 1, stderr = "printed on failure\nFAILED test of %s\n\tat line 8 of <stdin>\n" % run_env.workpath('run'))
+
+ self.popen_python("""import sys
+sys.path = ['%s'] + sys.path
+import TestCmd
+def test1(self):
+ self.run()
+ self.fail_test(condition = (self.status == 0))
+def test2(self):
+ test1(self)
+test2(TestCmd.TestCmd(program = 'run', interpreter = 'python', workdir = ''))
+""" % self.orig_cwd, status = 1, stderr = "FAILED test of %s\n\tat line 6 of <stdin> (test1)\n\tfrom line 8 of <stdin> (test2)\n\tfrom line 9 of <stdin>\n" % run_env.workpath('run'))
+
+ self.popen_python("""import sys
+sys.path = ['%s'] + sys.path
+import TestCmd
+def test1(self):
+ self.run()
+ self.fail_test(condition = (self.status == 0), skip = 1)
+def test2(self):
+ test1(self)
+test2(TestCmd.TestCmd(program = 'run', interpreter = 'python', workdir = ''))
+""" % self.orig_cwd, status = 1, stderr = "FAILED test of %s\n\tat line 8 of <stdin> (test2)\n\tfrom line 9 of <stdin>\n" % run_env.workpath('run'))
+
+
+
+class interpreter_TestCase(TestCmdTestCase):
+ def test_interpreter(self):
+ """Test interpreter()"""
+ run_env = TestCmd.TestCmd(workdir = '')
+ run_env.write('run', """import sys
+sys.stdout.write("run: STDOUT\\n")
+sys.stderr.write("run: STDERR\\n")
+""")
+ os.chdir(run_env.workdir)
+ # Everything before this prepared our "source directory."
+ # Now do the real test.
+ test = TestCmd.TestCmd(program = 'run', workdir = '')
+ test.interpreter_set('foo')
+ assert test.interpreter == 'foo', 'did not set interpreter'
+ test.interpreter_set('python')
+ assert test.interpreter == 'python', 'did not set interpreter'
+ test.run()
+
+
+
+class match_TestCase(TestCmdTestCase):
+ def test_match_default(self):
+ """Test match() default behavior"""
+ test = TestCmd.TestCmd()
+ assert test.match("abcde\n", "a.*e\n")
+ assert test.match("12345\nabcde\n", "1\\d+5\na.*e\n")
+ lines = ["vwxyz\n", "67890\n"]
+ regexes = ["v[^a-u]*z\n", "6[^ ]+0\n"]
+ assert test.match(lines, regexes)
+
+ def test_match_custom_function(self):
+ """Test match() using a custom function"""
+ def match_length(lines, matches):
+ return len(lines) == len(matches)
+ test = TestCmd.TestCmd(match=match_length)
+ assert not test.match("123\n", "1\n")
+ assert test.match("123\n", "111\n")
+ assert not test.match("123\n123\n", "1\n1\n")
+ assert test.match("123\n123\n", "111\n111\n")
+ lines = ["123\n", "123\n"]
+ regexes = ["1\n", "1\n"]
+ assert test.match(lines, regexes) # due to equal numbers of lines
+
+ def test_match_TestCmd_function(self):
+ """Test match() using a TestCmd function"""
+ test = TestCmd.TestCmd(match = TestCmd.match_exact)
+ assert not test.match("abcde\n", "a.*e\n")
+ assert test.match("abcde\n", "abcde\n")
+ assert not test.match("12345\nabcde\n", "1\\d+5\na.*e\n")
+ assert test.match("12345\nabcde\n", "12345\nabcde\n")
+ lines = ["vwxyz\n", "67890\n"]
+ regexes = [r"v[^a-u]*z\n", r"6[^ ]+0\n"]
+ assert not test.match(lines, regexes)
+ assert test.match(lines, lines)
+
+ def test_match_static_method(self):
+ """Test match() using a static method"""
+ test = TestCmd.TestCmd(match=TestCmd.TestCmd.match_exact)
+ assert not test.match("abcde\n", "a.*e\n")
+ assert test.match("abcde\n", "abcde\n")
+ assert not test.match("12345\nabcde\n", "1\\d+5\na.*e\n")
+ assert test.match("12345\nabcde\n", "12345\nabcde\n")
+ lines = ["vwxyz\n", "67890\n"]
+ regexes = [r"v[^a-u]*z\n", r"6[^ ]+0\n"]
+ assert not test.match(lines, regexes)
+ assert test.match(lines, lines)
+
+ def test_match_string(self):
+ """Test match() using a string to fetch the match method"""
+ test = TestCmd.TestCmd(match='match_exact')
+ assert not test.match("abcde\n", "a.*e\n")
+ assert test.match("abcde\n", "abcde\n")
+ assert not test.match("12345\nabcde\n", "1\\d+5\na.*e\n")
+ assert test.match("12345\nabcde\n", "12345\nabcde\n")
+ lines = ["vwxyz\n", "67890\n"]
+ regexes = [r"v[^a-u]*z\n", r"6[^ ]+0\n"]
+ assert not test.match(lines, regexes)
+ assert test.match(lines, lines)
+
+
+
+class match_exact_TestCase(TestCmdTestCase):
+ def test_match_exact_function(self):
+ """Test calling the TestCmd.match_exact() function"""
+ assert not TestCmd.match_exact("abcde\\n", "a.*e\\n")
+ assert TestCmd.match_exact("abcde\\n", "abcde\\n")
+
+ def test_match_exact_instance_method(self):
+ """Test calling the TestCmd.TestCmd().match_exact() instance method"""
+ test = TestCmd.TestCmd()
+ assert not test.match_exact("abcde\\n", "a.*e\\n")
+ assert test.match_exact("abcde\\n", "abcde\\n")
+
+ def test_match_exact_static_method(self):
+ """Test calling the TestCmd.TestCmd.match_exact() static method"""
+ assert not TestCmd.TestCmd.match_exact("abcde\\n", "a.*e\\n")
+ assert TestCmd.TestCmd.match_exact("abcde\\n", "abcde\\n")
+
+ def test_evaluation(self):
+ """Test match_exact() evaluation"""
+ test = TestCmd.TestCmd()
+ assert not test.match_exact("abcde\n", "a.*e\n")
+ assert test.match_exact("abcde\n", "abcde\n")
+ assert not test.match_exact(["12345\n", "abcde\n"], ["1[0-9]*5\n", "a.*e\n"])
+ assert test.match_exact(["12345\n", "abcde\n"], ["12345\n", "abcde\n"])
+ assert not test.match_exact(UserList(["12345\n", "abcde\n"]),
+ ["1[0-9]*5\n", "a.*e\n"])
+ assert test.match_exact(UserList(["12345\n", "abcde\n"]),
+ ["12345\n", "abcde\n"])
+ assert not test.match_exact(["12345\n", "abcde\n"],
+ UserList(["1[0-9]*5\n", "a.*e\n"]))
+ assert test.match_exact(["12345\n", "abcde\n"],
+ UserList(["12345\n", "abcde\n"]))
+ assert not test.match_exact("12345\nabcde\n", "1[0-9]*5\na.*e\n")
+ assert test.match_exact("12345\nabcde\n", "12345\nabcde\n")
+ lines = ["vwxyz\n", "67890\n"]
+ regexes = ["v[^a-u]*z\n", "6[^ ]+0\n"]
+ assert not test.match_exact(lines, regexes)
+ assert test.match_exact(lines, lines)
+
+
+
+class match_re_dotall_TestCase(TestCmdTestCase):
+ def test_match_re_dotall_function(self):
+ """Test calling the TestCmd.match_re_dotall() function"""
+ assert TestCmd.match_re_dotall("abcde\nfghij\n", r"a.*j\n")
+
+ def test_match_re_dotall_instance_method(self):
+ """Test calling the TestCmd.TestCmd().match_re_dotall() instance method"""
+ test = TestCmd.TestCmd()
+ test.match_re_dotall("abcde\\nfghij\\n", r"a.*j\\n")
+
+ def test_match_re_dotall_static_method(self):
+ """Test calling the TestCmd.TestCmd.match_re_dotall() static method"""
+ assert TestCmd.TestCmd.match_re_dotall("abcde\nfghij\n", r"a.*j\n")
+
+ def test_error(self):
+ """Test handling a compilation error in TestCmd.match_re_dotall()"""
+ run_env = TestCmd.TestCmd(workdir = '')
+ cwd = os.getcwd()
+ os.chdir(run_env.workdir)
+ # Everything before this prepared our "source directory."
+ # Now do the real test.
+ try:
+ script_input = """import sys
+sys.path = ['%s'] + sys.path
+import TestCmd
+assert TestCmd.match_re_dotall("abcde", r"a.*(e")
+sys.exit(0)
+""" % cwd
+ stdout, stderr, status = self.call_python(script_input)
+ assert status == 1, status
+ expect1 = "Regular expression error in '^a.*(e$': missing )"
+ expect2 = "Regular expression error in '^a.*(e$': unbalanced parenthesis"
+ assert (stderr.find(expect1) != -1 or
+ stderr.find(expect2) != -1), repr(stderr)
+ finally:
+ os.chdir(cwd)
+
+ def test_evaluation(self):
+ """Test match_re_dotall() evaluation"""
+ test = TestCmd.TestCmd()
+ assert test.match_re_dotall("abcde\nfghij\n", r"a.*e\nf.*j\n")
+ assert test.match_re_dotall("abcde\nfghij\n", r"a[^j]*j\n")
+ assert test.match_re_dotall("abcde\nfghij\n", r"abcde\nfghij\n")
+ assert test.match_re_dotall(["12345\n", "abcde\n", "fghij\n"],
+ [r"1[0-9]*5\n", r"a.*e\n", r"f.*j\n"])
+ assert test.match_re_dotall(["12345\n", "abcde\n", "fghij\n"],
+ [r"1.*j\n"])
+ assert test.match_re_dotall(["12345\n", "abcde\n", "fghij\n"],
+ [r"12345\n", r"abcde\n", r"fghij\n"])
+ assert test.match_re_dotall(UserList(["12345\n", "abcde\n", "fghij\n"]),
+ [r"1[0-9]*5\n", r"a.*e\n", r"f.*j\n"])
+ assert test.match_re_dotall(UserList(["12345\n", "abcde\n", "fghij\n"]),
+ [r"1.*j\n"])
+ assert test.match_re_dotall(UserList(["12345\n", "abcde\n", "fghij\n"]),
+ [r"12345\n", r"abcde\n", r"fghij\n"])
+ assert test.match_re_dotall(["12345\n", "abcde\n", "fghij\n"],
+ UserList([r"1[0-9]*5\n", r"a.*e\n", r"f.*j\n"]))
+ assert test.match_re_dotall(["12345\n", "abcde\n", "fghij\n"],
+ UserList([r"1.*j\n"]))
+ assert test.match_re_dotall(["12345\n", "abcde\n", "fghij\n"],
+ UserList([r"12345\n", r"abcde\n", r"fghij\n"]))
+ assert test.match_re_dotall("12345\nabcde\nfghij\n",
+ r"1[0-9]*5\na.*e\nf.*j\n")
+ assert test.match_re_dotall("12345\nabcde\nfghij\n", r"1.*j\n")
+ assert test.match_re_dotall("12345\nabcde\nfghij\n",
+ r"12345\nabcde\nfghij\n")
+ lines = ["vwxyz\n", "67890\n"]
+ regexes = [r"v[^a-u]*z\n", r"6[^ ]+0\n"]
+ assert test.match_re_dotall(lines, regexes)
+ assert test.match_re_dotall(lines, lines)
+
+
+
+class match_re_TestCase(TestCmdTestCase):
+ def test_match_re_function(self):
+ """Test calling the TestCmd.match_re() function"""
+ assert TestCmd.match_re("abcde\n", "a.*e\n")
+
+ def test_match_re_instance_method(self):
+ """Test calling the TestCmd.TestCmd().match_re() instance method"""
+ test = TestCmd.TestCmd()
+ assert test.match_re("abcde\n", "a.*e\n")
+
+ def test_match_re_static_method(self):
+ """Test calling the TestCmd.TestCmd.match_re() static method"""
+ assert TestCmd.TestCmd.match_re("abcde\n", "a.*e\n")
+
+ def test_error(self):
+ """Test handling a compilation error in TestCmd.match_re()"""
+ run_env = TestCmd.TestCmd(workdir = '')
+ cwd = os.getcwd()
+ os.chdir(run_env.workdir)
+ # Everything before this prepared our "source directory."
+ # Now do the real test.
+ try:
+ script_input = """import sys
+sys.path = ['%s'] + sys.path
+import TestCmd
+assert TestCmd.match_re("abcde\\n", "a.*(e\\n")
+sys.exit(0)
+""" % cwd
+ stdout, stderr, status = self.call_python(script_input)
+ assert status == 1, status
+ expect1 = "Regular expression error in '^a.*(e$': missing )"
+ expect2 = "Regular expression error in '^a.*(e$': unbalanced parenthesis"
+ assert (stderr.find(expect1) != -1 or
+ stderr.find(expect2) != -1), repr(stderr)
+ finally:
+ os.chdir(cwd)
+
+ def test_evaluation(self):
+ """Test match_re() evaluation"""
+ test = TestCmd.TestCmd()
+ assert test.match_re("abcde\n", "a.*e\n")
+ assert test.match_re("abcde\n", "abcde\n")
+ assert test.match_re(["12345\n", "abcde\n"], ["1[0-9]*5\n", "a.*e\n"])
+ assert test.match_re(["12345\n", "abcde\n"], ["12345\n", "abcde\n"])
+ assert test.match_re(UserList(["12345\n", "abcde\n"]),
+ ["1[0-9]*5\n", "a.*e\n"])
+ assert test.match_re(UserList(["12345\n", "abcde\n"]),
+ ["12345\n", "abcde\n"])
+ assert test.match_re(["12345\n", "abcde\n"],
+ UserList(["1[0-9]*5\n", "a.*e\n"]))
+ assert test.match_re(["12345\n", "abcde\n"],
+ UserList(["12345\n", "abcde\n"]))
+ assert test.match_re("12345\nabcde\n", "1[0-9]*5\na.*e\n")
+ assert test.match_re("12345\nabcde\n", "12345\nabcde\n")
+ lines = ["vwxyz\n", "67890\n"]
+ regexes = [r"v[^a-u]*z\n", r"6[^ ]+0\n"]
+ assert test.match_re(lines, regexes)
+ assert test.match_re(lines, lines)
+
+
+
+class match_stderr_TestCase(TestCmdTestCase):
+ def test_match_stderr_default(self):
+ """Test match_stderr() default behavior"""
+ test = TestCmd.TestCmd()
+ assert test.match_stderr("abcde\n", "a.*e\n")
+ assert test.match_stderr("12345\nabcde\n", "1\\d+5\na.*e\n")
+ lines = ["vwxyz\n", "67890\n"]
+ regexes = [r"v[^a-u]*z\n", r"6[^ ]+0\n"]
+ assert test.match_stderr(lines, regexes)
+
+ def test_match_stderr_not_affecting_match_stdout(self):
+ """Test match_stderr() not affecting match_stdout() behavior"""
+ test = TestCmd.TestCmd(match_stderr=TestCmd.TestCmd.match_exact)
+
+ assert not test.match_stderr("abcde\n", "a.*e\n")
+ assert test.match_stderr("abcde\n", "abcde\n")
+ assert not test.match_stderr("12345\nabcde\n", "1\\d+5\na.*e\n")
+ assert test.match_stderr("12345\nabcde\n", "12345\nabcde\n")
+ lines = ["vwxyz\n", "67890\n"]
+ regexes = [r"v[^a-u]*z\n", r"6[^ ]+0\n"]
+ assert not test.match_stderr(lines, regexes)
+ assert test.match_stderr(lines, lines)
+
+ assert test.match_stdout("abcde\n", "a.*e\n")
+ assert test.match_stdout("12345\nabcde\n", "1\\d+5\na.*e\n")
+ lines = ["vwxyz\n", "67890\n"]
+ regexes = [r"v[^a-u]*z\n", r"6[^ ]+0\n"]
+ assert test.match_stdout(lines, regexes)
+
+ def test_match_stderr_custom_function(self):
+ """Test match_stderr() using a custom function"""
+ def match_length(lines, matches):
+ return len(lines) == len(matches)
+ test = TestCmd.TestCmd(match_stderr=match_length)
+ assert not test.match_stderr("123\n", "1\n")
+ assert test.match_stderr("123\n", "111\n")
+ assert not test.match_stderr("123\n123\n", "1\n1\n")
+ assert test.match_stderr("123\n123\n", "111\n111\n")
+ lines = ["123\n", "123\n"]
+ regexes = [r"1\n", r"1\n"]
+ assert test.match_stderr(lines, regexes) # equal numbers of lines
+
+ def test_match_stderr_TestCmd_function(self):
+ """Test match_stderr() using a TestCmd function"""
+ test = TestCmd.TestCmd(match_stderr = TestCmd.match_exact)
+ assert not test.match_stderr("abcde\n", "a.*e\n")
+ assert test.match_stderr("abcde\n", "abcde\n")
+ assert not test.match_stderr("12345\nabcde\n", "1\\d+5\na.*e\n")
+ assert test.match_stderr("12345\nabcde\n", "12345\nabcde\n")
+ lines = ["vwxyz\n", "67890\n"]
+ regexes = [r"v[^a-u]*z\n", r"6[^ ]+0\n"]
+ assert not test.match_stderr(lines, regexes)
+ assert test.match_stderr(lines, lines)
+
+ def test_match_stderr_static_method(self):
+ """Test match_stderr() using a static method"""
+ test = TestCmd.TestCmd(match_stderr=TestCmd.TestCmd.match_exact)
+ assert not test.match_stderr("abcde\n", "a.*e\n")
+ assert test.match_stderr("abcde\n", "abcde\n")
+ assert not test.match_stderr("12345\nabcde\n", "1\\d+5\na.*e\n")
+ assert test.match_stderr("12345\nabcde\n", "12345\nabcde\n")
+ lines = ["vwxyz\n", "67890\n"]
+ regexes = [r"v[^a-u]*z\n", r"6[^ ]+0\n"]
+ assert not test.match_stderr(lines, regexes)
+ assert test.match_stderr(lines, lines)
+
+ def test_match_stderr_string(self):
+ """Test match_stderr() using a string to fetch the match method"""
+ test = TestCmd.TestCmd(match_stderr='match_exact')
+ assert not test.match_stderr("abcde\n", "a.*e\n")
+ assert test.match_stderr("abcde\n", "abcde\n")
+ assert not test.match_stderr("12345\nabcde\n", "1\\d+5\na.*e\n")
+ assert test.match_stderr("12345\nabcde\n", "12345\nabcde\n")
+ lines = ["vwxyz\n", "67890\n"]
+ regexes = [r"v[^a-u]*z\n", r"6[^ ]+0\n"]
+ assert not test.match_stderr(lines, regexes)
+ assert test.match_stderr(lines, lines)
+
+
+
+class match_stdout_TestCase(TestCmdTestCase):
+ def test_match_stdout_default(self):
+ """Test match_stdout() default behavior"""
+ test = TestCmd.TestCmd()
+ assert test.match_stdout("abcde\n", "a.*e\n")
+ assert test.match_stdout("12345\nabcde\n", "1\\d+5\na.*e\n")
+ lines = ["vwxyz\n", "67890\n"]
+ regexes = [r"v[^a-u]*z\n", r"6[^ ]+0\n"]
+ assert test.match_stdout(lines, regexes)
+
+ def test_match_stdout_not_affecting_match_stderr(self):
+ """Test match_stdout() not affecting match_stderr() behavior"""
+ test = TestCmd.TestCmd(match_stdout=TestCmd.TestCmd.match_exact)
+
+ assert not test.match_stdout("abcde\n", "a.*e\n")
+ assert test.match_stdout("abcde\n", "abcde\n")
+ assert not test.match_stdout("12345\nabcde\n", "1\\d+5\na.*e\n")
+ assert test.match_stdout("12345\nabcde\n", "12345\nabcde\n")
+ lines = ["vwxyz\n", "67890\n"]
+ regexes = [r"v[^a-u]*z\n", r"6[^ ]+0\n"]
+ assert not test.match_stdout(lines, regexes)
+ assert test.match_stdout(lines, lines)
+
+ assert test.match_stderr("abcde\n", "a.*e\n")
+ assert test.match_stderr("12345\nabcde\n", "1\\d+5\na.*e\n")
+ lines = ["vwxyz\n", "67890\n"]
+ regexes = [r"v[^a-u]*z\n", r"6[^ ]+0\n"]
+ assert test.match_stderr(lines, regexes)
+
+ def test_match_stdout_custom_function(self):
+ """Test match_stdout() using a custom function"""
+ def match_length(lines, matches):
+ return len(lines) == len(matches)
+ test = TestCmd.TestCmd(match_stdout=match_length)
+ assert not test.match_stdout("123\n", "1\n")
+ assert test.match_stdout("123\n", "111\n")
+ assert not test.match_stdout("123\n123\n", "1\n1\n")
+ assert test.match_stdout("123\n123\n", "111\n111\n")
+ lines = ["123\n", "123\n"]
+ regexes = [r"1\n", r"1\n"]
+ assert test.match_stdout(lines, regexes) # equal numbers of lines
+
+ def test_match_stdout_TestCmd_function(self):
+ """Test match_stdout() using a TestCmd function"""
+ test = TestCmd.TestCmd(match_stdout = TestCmd.match_exact)
+ assert not test.match_stdout("abcde\n", "a.*e\n")
+ assert test.match_stdout("abcde\n", "abcde\n")
+ assert not test.match_stdout("12345\nabcde\n", "1\\d+5\na.*e\n")
+ assert test.match_stdout("12345\nabcde\n", "12345\nabcde\n")
+ lines = ["vwxyz\n", "67890\n"]
+ regexes = [r"v[^a-u]*z\n", r"6[^ ]+0\n"]
+ assert not test.match_stdout(lines, regexes)
+ assert test.match_stdout(lines, lines)
+
+ def test_match_stdout_static_method(self):
+ """Test match_stdout() using a static method"""
+ test = TestCmd.TestCmd(match_stdout=TestCmd.TestCmd.match_exact)
+ assert not test.match_stdout("abcde\n", "a.*e\n")
+ assert test.match_stdout("abcde\n", "abcde\n")
+ assert not test.match_stdout("12345\nabcde\n", "1\\d+5\na.*e\n")
+ assert test.match_stdout("12345\nabcde\n", "12345\nabcde\n")
+ lines = ["vwxyz\n", "67890\n"]
+ regexes = [r"v[^a-u]*z\n", r"6[^ ]+0\n"]
+ assert not test.match_stdout(lines, regexes)
+ assert test.match_stdout(lines, lines)
+
+ def test_match_stdout_string(self):
+ """Test match_stdout() using a string to fetch the match method"""
+ test = TestCmd.TestCmd(match_stdout='match_exact')
+ assert not test.match_stdout("abcde\n", "a.*e\n")
+ assert test.match_stdout("abcde\n", "abcde\n")
+ assert not test.match_stdout("12345\nabcde\n", "1\\d+5\na.*e\n")
+ assert test.match_stdout("12345\nabcde\n", "12345\nabcde\n")
+ lines = ["vwxyz\n", "67890\n"]
+ regexes = [r"v[^a-u]*z\n", r"6[^ ]+0\n"]
+ assert not test.match_stdout(lines, regexes)
+ assert test.match_stdout(lines, lines)
+
+
+
+class no_result_TestCase(TestCmdTestCase):
+ def test_no_result(self):
+ """Test no_result()"""
+ run_env = TestCmd.TestCmd(workdir = '')
+ run_env.write('run', """import sys
+sys.stdout.write("run: STDOUT\\n")
+sys.stderr.write("run: STDERR\\n")
+""")
+ os.chdir(run_env.workdir)
+ # Everything before this prepared our "source directory."
+ # Now do the real test.
+ self.popen_python("""import sys
+sys.path = ['%s'] + sys.path
+import TestCmd
+TestCmd.no_result(condition = 1)
+""" % self.orig_cwd, status = 2, stderr = "NO RESULT for test at line 4 of <stdin>\n")
+
+ self.popen_python("""import sys
+sys.path = ['%s'] + sys.path
+import TestCmd
+test = TestCmd.TestCmd(program = 'run', interpreter = 'python', workdir = '')
+test.run()
+test.no_result(condition = (test.status == 0))
+""" % self.orig_cwd, status = 2, stderr = "NO RESULT for test of %s\n\tat line 6 of <stdin>\n" % run_env.workpath('run'))
+
+ self.popen_python("""import sys
+sys.path = ['%s'] + sys.path
+import TestCmd
+test = TestCmd.TestCmd(program = 'run', interpreter = 'python', description = 'xyzzy', workdir = '')
+test.run()
+test.no_result(condition = (test.status == 0))
+""" % self.orig_cwd, status = 2, stderr = "NO RESULT for test of %s [xyzzy]\n\tat line 6 of <stdin>\n" % run_env.workpath('run'))
+
+ self.popen_python("""import sys
+sys.path = ['%s'] + sys.path
+import TestCmd
+test = TestCmd.TestCmd(program = 'run', interpreter = 'python', workdir = '')
+test.run()
+def xxx():
+ sys.stderr.write("printed on no result\\n")
+test.no_result(condition = (test.status == 0), function = xxx)
+""" % self.orig_cwd, status = 2, stderr = "printed on no result\nNO RESULT for test of %s\n\tat line 8 of <stdin>\n" % run_env.workpath('run'))
+
+ self.popen_python("""import sys
+sys.path = ['%s'] + sys.path
+import TestCmd
+def test1(self):
+ self.run()
+ self.no_result(condition = (self.status == 0))
+def test2(self):
+ test1(self)
+test2(TestCmd.TestCmd(program = 'run', interpreter = 'python', workdir = ''))
+""" % self.orig_cwd, status = 2, stderr = "NO RESULT for test of %s\n\tat line 6 of <stdin> (test1)\n\tfrom line 8 of <stdin> (test2)\n\tfrom line 9 of <stdin>\n" % run_env.workpath('run'))
+
+ self.popen_python("""import sys
+sys.path = ['%s'] + sys.path
+import TestCmd
+def test1(self):
+ self.run()
+ self.no_result(condition = (self.status == 0), skip = 1)
+def test2(self):
+ test1(self)
+test2(TestCmd.TestCmd(program = 'run', interpreter = 'python', workdir = ''))
+""" % self.orig_cwd, status = 2, stderr = "NO RESULT for test of %s\n\tat line 8 of <stdin> (test2)\n\tfrom line 9 of <stdin>\n" % run_env.workpath('run'))
+
+
+
+class pass_test_TestCase(TestCmdTestCase):
+ def test_pass_test(self):
+ """Test pass_test()"""
+ run_env = TestCmd.TestCmd(workdir = '')
+ run_env.write('run', """import sys
+sys.stdout.write("run: STDOUT\\n")
+sys.stderr.write("run: STDERR\\n")
+""")
+ os.chdir(run_env.workdir)
+ # Everything before this prepared our "source directory."
+ # Now do the real test.
+ self.popen_python("""import sys
+sys.path = ['%s'] + sys.path
+import TestCmd
+TestCmd.pass_test(condition = 1)
+""" % self.orig_cwd, stderr = "PASSED\n")
+
+ self.popen_python("""import sys
+sys.path = ['%s'] + sys.path
+import TestCmd
+test = TestCmd.TestCmd(program = 'run', interpreter = 'python', workdir = '')
+test.run()
+test.pass_test(condition = (test.status == 0))
+""" % self.orig_cwd, stderr = "PASSED\n")
+
+ self.popen_python("""import sys
+sys.path = ['%s'] + sys.path
+import TestCmd
+test = TestCmd.TestCmd(program = 'run', interpreter = 'python', workdir = '')
+test.run()
+def brag():
+ sys.stderr.write("printed on success\\n")
+test.pass_test(condition = (test.status == 0), function = brag)
+""" % self.orig_cwd, stderr = "printed on success\nPASSED\n")
+
+ # TODO(sgk): SHOULD ALSO TEST FAILURE CONDITIONS
+
+
+
+class preserve_TestCase(TestCmdTestCase):
+ def test_preserve(self):
+ """Test preserve()"""
+ def cleanup_test(test, cond=None, stdout=""):
+ save = sys.stdout
+ with closing(StringIO()) as io:
+ sys.stdout = io
+ try:
+ if cond:
+ test.cleanup(cond)
+ else:
+ test.cleanup()
+ o = io.getvalue()
+ assert o == stdout, "o = `%s', stdout = `%s'" % (o, stdout)
+ finally:
+ sys.stdout = save
+
+ test = TestCmd.TestCmd(workdir='')
+ wdir = test.workdir
+ try:
+ test.write('file1', "Test file #1\n")
+ #test.cleanup()
+ cleanup_test(test, )
+ assert not os.path.exists(wdir)
+ finally:
+ if os.path.exists(wdir):
+ shutil.rmtree(wdir, ignore_errors=1)
+ test._dirlist.remove(wdir)
+
+ test = TestCmd.TestCmd(workdir='')
+ wdir = test.workdir
+ try:
+ test.write('file2', "Test file #2\n")
+ test.preserve('pass_test')
+ cleanup_test(test, 'pass_test', "Preserved directory %s\n" % wdir)
+ assert os.path.isdir(wdir)
+ cleanup_test(test, 'fail_test')
+ assert not os.path.exists(wdir)
+ finally:
+ if os.path.exists(wdir):
+ shutil.rmtree(wdir, ignore_errors = 1)
+ test._dirlist.remove(wdir)
+
+ test = TestCmd.TestCmd(workdir = '')
+ wdir = test.workdir
+ try:
+ test.write('file3', "Test file #3\n")
+ test.preserve('fail_test')
+ cleanup_test(test, 'fail_test', "Preserved directory %s\n" % wdir)
+ assert os.path.isdir(wdir)
+ cleanup_test(test, 'pass_test')
+ assert not os.path.exists(wdir)
+ finally:
+ if os.path.exists(wdir):
+ shutil.rmtree(wdir, ignore_errors = 1)
+ test._dirlist.remove(wdir)
+
+ test = TestCmd.TestCmd(workdir = '')
+ wdir = test.workdir
+ try:
+ test.write('file4', "Test file #4\n")
+ test.preserve('fail_test', 'no_result')
+ cleanup_test(test, 'fail_test', "Preserved directory %s\n" % wdir)
+ assert os.path.isdir(wdir)
+ cleanup_test(test, 'no_result', "Preserved directory %s\n" % wdir)
+ assert os.path.isdir(wdir)
+ cleanup_test(test, 'pass_test')
+ assert not os.path.exists(wdir)
+ finally:
+ if os.path.exists(wdir):
+ shutil.rmtree(wdir, ignore_errors = 1)
+ test._dirlist.remove(wdir)
+
+ test = TestCmd.TestCmd(workdir = '')
+ wdir = test.workdir
+ try:
+ test.preserve()
+ cleanup_test(test, 'pass_test', "Preserved directory %s\n" % wdir)
+ assert os.path.isdir(wdir)
+ cleanup_test(test, 'fail_test', "Preserved directory %s\n" % wdir)
+ assert os.path.isdir(wdir)
+ cleanup_test(test, 'no_result', "Preserved directory %s\n" % wdir)
+ assert os.path.isdir(wdir)
+ finally:
+ if os.path.exists(wdir):
+ shutil.rmtree(wdir, ignore_errors = 1)
+ test._dirlist.remove(wdir)
+
+
+
+class program_TestCase(TestCmdTestCase):
+ def test_program(self):
+ """Test program()"""
+ test = TestCmd.TestCmd()
+ assert test.program is None, 'initialized program?'
+ test = TestCmd.TestCmd(program = 'test')
+ assert test.program == os.path.join(os.getcwd(), 'test'), 'uninitialized program'
+ test.program_set('foo')
+ assert test.program == os.path.join(os.getcwd(), 'foo'), 'did not set program'
+
+
+
+class read_TestCase(TestCmdTestCase):
+ def test_read(self):
+ """Test read()"""
+ test = TestCmd.TestCmd(workdir = '', subdir = 'foo')
+ wdir_file1 = os.path.join(test.workdir, 'file1')
+ wdir_file2 = os.path.join(test.workdir, 'file2')
+ wdir_foo_file3 = os.path.join(test.workdir, 'foo', 'file3')
+ wdir_file4 = os.path.join(test.workdir, 'file4')
+ wdir_file5 = os.path.join(test.workdir, 'file5')
+
+ with open(wdir_file1, 'wb') as f:
+ f.write(to_bytes(""))
+ with open(wdir_file2, 'wb') as f:
+ f.write(to_bytes("Test\nfile\n#2.\n"))
+ with open(wdir_foo_file3, 'wb') as f:
+ f.write(to_bytes("Test\nfile\n#3.\n"))
+ with open(wdir_file4, 'wb') as f:
+ f.write(to_bytes("Test\nfile\n#4.\n"))
+ with open(wdir_file5, 'wb') as f:
+ f.write(to_bytes("Test\r\nfile\r\n#5.\r\n"))
+
+ try:
+ contents = test.read('no_file')
+ except IOError: # expect "No such file or directory"
+ pass
+ except:
+ raise
+
+ try:
+ test.read(test.workpath('file_x'), mode = 'w')
+ except ValueError: # expect "mode must begin with 'r'
+ pass
+ except:
+ raise
+
+ def _file_matches(file, contents, expected):
+ contents = to_str(contents)
+ assert contents == expected, \
+ "Expected contents of " + str(file) + "==========\n" + \
+ expected + \
+ "Actual contents of " + str(file) + "============\n" + \
+ contents
+
+ _file_matches(wdir_file1, test.read('file1'), "")
+ _file_matches(wdir_file2, test.read('file2'), "Test\nfile\n#2.\n")
+ _file_matches(wdir_foo_file3, test.read(['foo', 'file3']),
+ "Test\nfile\n#3.\n")
+ _file_matches(wdir_foo_file3,
+ test.read(UserList(['foo', 'file3'])),
+ "Test\nfile\n#3.\n")
+ _file_matches(wdir_file4, test.read('file4', mode = 'r'),
+ "Test\nfile\n#4.\n")
+ _file_matches(wdir_file5, test.read('file5', mode = 'rb'),
+ "Test\r\nfile\r\n#5.\r\n")
+
+
+
+class rmdir_TestCase(TestCmdTestCase):
+ def test_rmdir(self):
+ """Test rmdir()"""
+ test = TestCmd.TestCmd(workdir = '')
+
+ try:
+ test.rmdir(['no', 'such', 'dir'])
+ except EnvironmentError:
+ pass
+ else:
+ raise Exception("did not catch expected SConsEnvironmentError")
+
+ test.subdir(['sub'],
+ ['sub', 'dir'],
+ ['sub', 'dir', 'one'])
+
+ s = test.workpath('sub')
+ s_d = test.workpath('sub', 'dir')
+ s_d_o = test.workpath('sub', 'dir', 'one')
+
+ try:
+ test.rmdir(['sub'])
+ except EnvironmentError:
+ pass
+ else:
+ raise Exception("did not catch expected SConsEnvironmentError")
+
+ assert os.path.isdir(s_d_o), "%s is gone?" % s_d_o
+
+ try:
+ test.rmdir(['sub'])
+ except EnvironmentError:
+ pass
+ else:
+ raise Exception("did not catch expected SConsEnvironmentError")
+
+ assert os.path.isdir(s_d_o), "%s is gone?" % s_d_o
+
+ test.rmdir(['sub', 'dir', 'one'])
+
+ assert not os.path.exists(s_d_o), "%s exists?" % s_d_o
+ assert os.path.isdir(s_d), "%s is gone?" % s_d
+
+ test.rmdir(['sub', 'dir'])
+
+ assert not os.path.exists(s_d), "%s exists?" % s_d
+ assert os.path.isdir(s), "%s is gone?" % s
+
+ test.rmdir('sub')
+
+ assert not os.path.exists(s), "%s exists?" % s
+
+
+
+class run_TestCase(TestCmdTestCase):
+ def test_run(self):
+ """Test run()"""
+
+ t = self.setup_run_scripts()
+
+ # Everything before this prepared our "source directory."
+ # Now do the real test.
+ try:
+ test = TestCmd.TestCmd(program = t.script,
+ interpreter = 'python',
+ workdir = '',
+ subdir = 'script_subdir')
+
+ test.run()
+ self.run_match(test.stdout(), t.script, "STDOUT", t.workdir,
+ repr([]))
+ self.run_match(test.stderr(), t.script, "STDERR", t.workdir,
+ repr([]))
+
+ test.run(arguments = 'arg1 arg2 arg3')
+ self.run_match(test.stdout(), t.script, "STDOUT", t.workdir,
+ repr(['arg1', 'arg2', 'arg3']))
+ self.run_match(test.stderr(), t.script, "STDERR", t.workdir,
+ repr(['arg1', 'arg2', 'arg3']))
+
+ test.run(program = t.scriptx, arguments = 'foo')
+ self.run_match(test.stdout(), t.scriptx, "STDOUT", t.workdir,
+ repr(['foo']))
+ self.run_match(test.stderr(), t.scriptx, "STDERR", t.workdir,
+ repr(['foo']))
+
+ test.run(chdir = os.curdir, arguments = 'x y z')
+ self.run_match(test.stdout(), t.script, "STDOUT", test.workdir,
+ repr(['x', 'y', 'z']))
+ self.run_match(test.stderr(), t.script, "STDERR", test.workdir,
+ repr(['x', 'y', 'z']))
+
+ test.run(chdir = 'script_subdir')
+ script_subdir = test.workpath('script_subdir')
+ self.run_match(test.stdout(), t.script, "STDOUT", script_subdir,
+ repr([]))
+ self.run_match(test.stderr(), t.script, "STDERR", script_subdir,
+ repr([]))
+
+ test.run(program = t.script1, interpreter = ['python', '-x'])
+ self.run_match(test.stdout(), t.script1, "STDOUT", t.workdir,
+ repr([]))
+ self.run_match(test.stderr(), t.script1, "STDERR", t.workdir,
+ repr([]))
+
+ try:
+ test.run(chdir = 'no_subdir')
+ except OSError:
+ pass
+
+ test.run(program = 'no_script', interpreter = 'python')
+ assert test.status != None, test.status
+
+ try:
+ test.run(program = 'no_script', interpreter = 'no_interpreter')
+ except OSError:
+ # Python versions that use subprocess throw an OSError
+ # exception when they try to execute something that
+ # isn't there.
+ pass
+ else:
+ # Python versions that use os.popen3() or the Popen3
+ # class run things through the shell, which just returns
+ # a non-zero exit status.
+ assert test.status != None, test.status
+
+ testx = TestCmd.TestCmd(program = t.scriptx,
+ workdir = '',
+ subdir = 't.scriptx_subdir')
+
+ testx.run()
+ self.run_match(testx.stdout(), t.scriptx, "STDOUT", t.workdir,
+ repr([]))
+ self.run_match(testx.stderr(), t.scriptx, "STDERR", t.workdir,
+ repr([]))
+
+ testx.run(arguments = 'foo bar')
+ self.run_match(testx.stdout(), t.scriptx, "STDOUT", t.workdir,
+ repr(['foo', 'bar']))
+ self.run_match(testx.stderr(), t.scriptx, "STDERR", t.workdir,
+ repr(['foo', 'bar']))
+
+ testx.run(program = t.script, interpreter = 'python', arguments = 'bar')
+ self.run_match(testx.stdout(), t.script, "STDOUT", t.workdir,
+ repr(['bar']))
+ self.run_match(testx.stderr(), t.script, "STDERR", t.workdir,
+ repr(['bar']))
+
+ testx.run(chdir = os.curdir, arguments = 'baz')
+ self.run_match(testx.stdout(), t.scriptx, "STDOUT", testx.workdir,
+ repr(['baz']))
+ self.run_match(testx.stderr(), t.scriptx, "STDERR", testx.workdir,
+ repr(['baz']))
+
+ testx.run(chdir = 't.scriptx_subdir')
+ t.scriptx_subdir = testx.workpath('t.scriptx_subdir')
+ self.run_match(testx.stdout(), t.scriptx, "STDOUT", t.scriptx_subdir,
+ repr([]))
+ self.run_match(testx.stderr(), t.scriptx, "STDERR", t.scriptx_subdir,
+ repr([]))
+
+ testx.run(program = t.script1, interpreter = ('python', '-x'))
+ self.run_match(testx.stdout(), t.script1, "STDOUT", t.workdir,
+ repr([]))
+ self.run_match(testx.stderr(), t.script1, "STDERR", t.workdir,
+ repr([]))
+
+ s = os.path.join('.', t.scriptx)
+ testx.run(program = [s])
+ self.run_match(testx.stdout(), t.scriptx, "STDOUT", t.workdir,
+ repr([]))
+ self.run_match(testx.stderr(), t.scriptx, "STDERR", t.workdir,
+ repr([]))
+
+ try:
+ testx.run(chdir = 'no_subdir')
+ except OSError:
+ pass
+
+ try:
+ testx.run(program = 'no_program')
+ except OSError:
+ # Python versions that use subprocess throw an OSError
+ # exception when they try to execute something that
+ # isn't there.
+ pass
+ else:
+ # Python versions that use os.popen3() or the Popen3
+ # class run things through the shell, which just returns
+ # a non-zero exit status.
+ assert test.status != None
+
+ test1 = TestCmd.TestCmd(program = t.script1,
+ interpreter = ['python', '-x'],
+ workdir = '')
+
+ test1.run()
+ self.run_match(test1.stdout(), t.script1, "STDOUT", t.workdir,
+ repr([]))
+ self.run_match(test1.stderr(), t.script1, "STDERR", t.workdir,
+ repr([]))
+
+ finally:
+ os.chdir(t.orig_cwd)
+
+ def test_run_subclass(self):
+ """Test run() through a subclass with different signatures"""
+
+ t = self.setup_run_scripts()
+
+ # Everything before this prepared our "source directory."
+ # Now do the real test.
+
+ class MyTestCmdSubclass(TestCmd.TestCmd):
+ def start(self, additional_argument=None, **kw):
+ return TestCmd.TestCmd.start(self, **kw)
+
+ try:
+ test = MyTestCmdSubclass(program = t.script,
+ interpreter = 'python',
+ workdir = '',
+ subdir = 'script_subdir')
+
+ test.run()
+ self.run_match(test.stdout(), t.script, "STDOUT", t.workdir,
+ repr([]))
+ self.run_match(test.stderr(), t.script, "STDERR", t.workdir,
+ repr([]))
+ finally:
+ os.chdir(t.orig_cwd)
+
+
+class run_verbose_TestCase(TestCmdTestCase):
+ def test_run_verbose(self):
+ """Test the run() method's verbose attribute"""
+
+ # Prepare our "source directory."
+ t = self.setup_run_scripts()
+
+ save_stdout = sys.stderr
+ save_stderr = sys.stderr
+
+ try:
+ # Test calling TestCmd() with an explicit verbose = 1.
+
+ test = TestCmd.TestCmd(program = t.script,
+ interpreter = 'python',
+ workdir = '',
+ verbose = 1)
+
+ with closing(StringIO()) as sys.stdout, closing(StringIO()) as sys.stderr:
+ test.run(arguments = ['arg1 arg2'])
+ o = sys.stdout.getvalue()
+ assert o == '', o
+ e = sys.stderr.getvalue()
+ expect = 'python "%s" "arg1 arg2"\n' % t.script_path
+ assert expect == e, (expect, e)
+
+ testx = TestCmd.TestCmd(program = t.scriptx,
+ workdir = '',
+ verbose = 1)
+
+ with closing(StringIO()) as sys.stdout, closing(StringIO()) as sys.stderr:
+ testx.run(arguments = ['arg1 arg2'])
+ expect = '"%s" "arg1 arg2"\n' % t.scriptx_path
+ o = sys.stdout.getvalue()
+ assert o == '', o
+ e = sys.stderr.getvalue()
+ assert expect == e, (expect, e)
+
+ # Test calling TestCmd() with an explicit verbose = 2.
+
+ outerr_fmt = """\
+============ STATUS: 0
+============ BEGIN STDOUT (len=%s):
+%s============ END STDOUT
+============ BEGIN STDERR (len=%s)
+%s============ END STDERR
+"""
+
+ out_fmt = """\
+============ STATUS: 0
+============ BEGIN STDOUT (len=%s):
+%s============ END STDOUT
+"""
+
+ err_fmt = """\
+============ STATUS: 0
+============ BEGIN STDERR (len=%s)
+%s============ END STDERR
+"""
+
+ test = TestCmd.TestCmd(program = t.script,
+ interpreter = 'python',
+ workdir = '',
+ verbose = 2)
+
+ with closing(StringIO()) as sys.stdout, closing(StringIO()) as sys.stderr:
+ test.run(arguments = ['arg1 arg2'])
+
+ line_fmt = "script: %s: %s: ['arg1 arg2']\n"
+ stdout_line = line_fmt % ('STDOUT', t.sub_dir)
+ stderr_line = line_fmt % ('STDERR', t.sub_dir)
+ expect = outerr_fmt % (len(stdout_line), stdout_line,
+ len(stderr_line), stderr_line)
+ o = sys.stdout.getvalue()
+ assert expect == o, (expect, o)
+
+ expect = 'python "%s" "arg1 arg2"\n' % t.script_path
+ e = sys.stderr.getvalue()
+ assert e == expect, (e, expect)
+
+ testx = TestCmd.TestCmd(program = t.scriptx,
+ workdir = '',
+ verbose = 2)
+
+ with closing(StringIO()) as sys.stdout, closing(StringIO()) as sys.stderr:
+ testx.run(arguments = ['arg1 arg2'])
+
+ line_fmt = "scriptx.bat: %s: %s: ['arg1 arg2']\n"
+ stdout_line = line_fmt % ('STDOUT', t.sub_dir)
+ stderr_line = line_fmt % ('STDERR', t.sub_dir)
+ expect = outerr_fmt % (len(stdout_line), stdout_line,
+ len(stderr_line), stderr_line)
+ o = sys.stdout.getvalue()
+ assert expect == o, (expect, o)
+
+ expect = '"%s" "arg1 arg2"\n' % t.scriptx_path
+ e = sys.stderr.getvalue()
+ assert e == expect, (e, expect)
+
+ # Test calling TestCmd() with an explicit verbose = 3.
+
+ test = TestCmd.TestCmd(program = t.scriptout,
+ interpreter = 'python',
+ workdir = '',
+ verbose = 2)
+
+ with closing(StringIO()) as sys.stdout, closing(StringIO()) as sys.stderr:
+ test.run(arguments = ['arg1 arg2'])
+
+ line_fmt = "scriptout: %s: %s: ['arg1 arg2']\n"
+ stdout_line = line_fmt % ('STDOUT', t.sub_dir)
+ expect = out_fmt % (len(stdout_line), stdout_line)
+ o = sys.stdout.getvalue()
+ assert expect == o, (expect, o)
+
+ e = sys.stderr.getvalue()
+ expect = 'python "%s" "arg1 arg2"\n' % (t.scriptout_path)
+ assert e == expect, (e, expect)
+
+ test = TestCmd.TestCmd(program = t.scriptout,
+ interpreter = 'python',
+ workdir = '',
+ verbose = 3)
+
+ with closing(StringIO()) as sys.stdout, closing(StringIO()) as sys.stderr:
+ test.run(arguments = ['arg1 arg2'])
+
+ line_fmt = "scriptout: %s: %s: ['arg1 arg2']\n"
+ stdout_line = line_fmt % ('STDOUT', t.sub_dir)
+ expect = outerr_fmt % (len(stdout_line), stdout_line,
+ '0', '')
+ o = sys.stdout.getvalue()
+ assert expect == o, (expect, o)
+
+ e = sys.stderr.getvalue()
+ expect = 'python "%s" "arg1 arg2"\n' % (t.scriptout_path)
+ assert e == expect, (e, expect)
+
+ # Test letting TestCmd() pick up verbose = 2 from the environment.
+
+ os.environ['TESTCMD_VERBOSE'] = '2'
+
+ test = TestCmd.TestCmd(program = t.script,
+ interpreter = 'python',
+ workdir = '')
+
+ with closing(StringIO()) as sys.stdout, closing(StringIO()) as sys.stderr:
+ test.run(arguments = ['arg1 arg2'])
+
+ line_fmt = "script: %s: %s: ['arg1 arg2']\n"
+ stdout_line = line_fmt % ('STDOUT', t.sub_dir)
+ stderr_line = line_fmt % ('STDERR', t.sub_dir)
+ expect = outerr_fmt % (len(stdout_line), stdout_line,
+ len(stderr_line), stderr_line)
+ o = sys.stdout.getvalue()
+ assert expect == o, (expect, o)
+
+ expect = 'python "%s" "arg1 arg2"\n' % t.script_path
+ e = sys.stderr.getvalue()
+ assert e == expect, (e, expect)
+
+ testx = TestCmd.TestCmd(program = t.scriptx,
+ workdir = '')
+
+ with closing(StringIO()) as sys.stdout, closing(StringIO()) as sys.stderr:
+ testx.run(arguments = ['arg1 arg2'])
+
+ line_fmt = "scriptx.bat: %s: %s: ['arg1 arg2']\n"
+ stdout_line = line_fmt % ('STDOUT', t.sub_dir)
+ stderr_line = line_fmt % ('STDERR', t.sub_dir)
+ expect = outerr_fmt % (len(stdout_line), stdout_line,
+ len(stderr_line), stderr_line)
+ o = sys.stdout.getvalue()
+ assert expect == o, (expect, o)
+
+ expect = '"%s" "arg1 arg2"\n' % t.scriptx_path
+ e = sys.stderr.getvalue()
+ assert e == expect, (e, expect)
+
+ # Test letting TestCmd() pick up verbose = 1 from the environment.
+
+ os.environ['TESTCMD_VERBOSE'] = '1'
+
+ test = TestCmd.TestCmd(program = t.script,
+ interpreter = 'python',
+ workdir = '',
+ verbose = 1)
+
+ with closing(StringIO()) as sys.stdout, closing(StringIO()) as sys.stderr:
+ test.run(arguments = ['arg1 arg2'])
+ o = sys.stdout.getvalue()
+ assert o == '', o
+ e = sys.stderr.getvalue()
+ expect = 'python "%s" "arg1 arg2"\n' % t.script_path
+ assert expect == e, (expect, e)
+
+ testx = TestCmd.TestCmd(program = t.scriptx,
+ workdir = '',
+ verbose = 1)
+
+ with closing(StringIO()) as sys.stdout, closing(StringIO()) as sys.stderr:
+ testx.run(arguments = ['arg1 arg2'])
+ expect = '"%s" "arg1 arg2"\n' % t.scriptx_path
+ o = sys.stdout.getvalue()
+ assert o == '', o
+ e = sys.stderr.getvalue()
+ assert expect == e, (expect, e)
+
+ finally:
+ sys.stdout = save_stdout
+ sys.stderr = save_stderr
+ os.chdir(t.orig_cwd)
+ os.environ['TESTCMD_VERBOSE'] = ''
+
+
+
+class set_diff_function_TestCase(TestCmdTestCase):
+ def test_set_diff_function(self):
+ """Test set_diff_function()"""
+ self.popen_python(r"""import sys
+sys.path = ['%s'] + sys.path
+import TestCmd
+test = TestCmd.TestCmd()
+test.diff("a\n", "a\n")
+test.set_diff_function('diff_re')
+test.diff(".\n", "a\n")
+sys.exit(0)
+""" % self.orig_cwd)
+
+ def test_set_diff_function_stdout(self):
+ """Test set_diff_function(): stdout"""
+ self.popen_python("""from __future__ import print_function
+import sys
+sys.path = ['%s'] + sys.path
+import TestCmd
+test = TestCmd.TestCmd()
+print("diff:")
+test.diff("a\\n", "a\\n")
+print("diff_stdout:")
+test.diff_stdout("a\\n", "a\\n")
+test.set_diff_function(stdout='diff_re')
+print("diff:")
+test.diff(".\\n", "a\\n")
+print("diff_stdout:")
+test.diff_stdout(".\\n", "a\\n")
+sys.exit(0)
+""" % self.orig_cwd,
+ stdout="""\
+diff:
+diff_stdout:
+diff:
+1c1
+< .
+---
+> a
+diff_stdout:
+""")
+
+ def test_set_diff_function_stderr(self):
+ """Test set_diff_function(): stderr """
+ self.popen_python("""from __future__ import print_function
+import sys
+sys.path = ['%s'] + sys.path
+import TestCmd
+test = TestCmd.TestCmd()
+print("diff:")
+test.diff("a\\n", "a\\n")
+print("diff_stderr:")
+test.diff_stderr("a\\n", "a\\n")
+test.set_diff_function(stderr='diff_re')
+print("diff:")
+test.diff(".\\n", "a\\n")
+print("diff_stderr:")
+test.diff_stderr(".\\n", "a\\n")
+sys.exit(0)
+""" % self.orig_cwd,
+ stdout="""\
+diff:
+diff_stderr:
+diff:
+1c1
+< .
+---
+> a
+diff_stderr:
+""")
+
+
+
+class set_match_function_TestCase(TestCmdTestCase):
+ def test_set_match_function(self):
+ """Test set_match_function()"""
+ test = TestCmd.TestCmd()
+ assert test.match("abcde\n", "a.*e\n")
+ assert test.match("abcde\n", "abcde\n")
+
+ test.set_match_function('match_exact')
+
+ assert not test.match("abcde\n", "a.*e\n")
+ assert test.match("abcde\n", "abcde\n")
+
+ def test_set_match_function_stdout(self):
+ """Test set_match_function(): stdout """
+ test = TestCmd.TestCmd()
+ assert test.match("abcde\n", "a.*e\n")
+ assert test.match("abcde\n", "abcde\n")
+ assert test.match_stdout("abcde\n", "a.*e\n")
+ assert test.match_stdout("abcde\n", "abcde\n")
+
+ test.set_match_function(stdout='match_exact')
+
+ assert test.match("abcde\n", "a.*e\n")
+ assert test.match("abcde\n", "abcde\n")
+ assert not test.match_stdout("abcde\n", "a.*e\n")
+ assert test.match_stdout("abcde\n", "abcde\n")
+
+ def test_set_match_function_stderr(self):
+ """Test set_match_function(): stderr """
+ test = TestCmd.TestCmd()
+ assert test.match("abcde\n", "a.*e\n")
+ assert test.match("abcde\n", "abcde\n")
+ assert test.match_stderr("abcde\n", "a.*e\n")
+ assert test.match_stderr("abcde\n", "abcde\n")
+
+ test.set_match_function(stderr='match_exact')
+
+ assert test.match("abcde\n", "a.*e\n")
+ assert test.match("abcde\n", "abcde\n")
+ assert not test.match_stderr("abcde\n", "a.*e\n")
+ assert test.match_stderr("abcde\n", "abcde\n")
+
+
+
+class sleep_TestCase(TestCmdTestCase):
+ def test_sleep(self):
+ """Test sleep()"""
+ test = TestCmd.TestCmd()
+
+ start = time.time()
+ test.sleep()
+ end = time.time()
+ diff = end - start
+ assert diff > 0.9, "only slept %f seconds (start %f, end %f), not default" % (diff, start, end)
+
+ start = time.time()
+ test.sleep(3)
+ end = time.time()
+ diff = end - start
+ assert diff > 2.9, "only slept %f seconds (start %f, end %f), not 3" % (diff, start, end)
+
+
+
+class stderr_TestCase(TestCmdTestCase):
+ def test_stderr(self):
+ """Test stderr()"""
+ run_env = TestCmd.TestCmd(workdir = '')
+ run_env.write('run1', """import sys
+sys.stdout.write("run1 STDOUT %s\\n" % sys.argv[1:])
+sys.stdout.write("run1 STDOUT second line\\n")
+sys.stderr.write("run1 STDERR %s\\n" % sys.argv[1:])
+sys.stderr.write("run1 STDERR second line\\n")
+""")
+ run_env.write('run2', """import sys
+sys.stdout.write("run2 STDOUT %s\\n" % sys.argv[1:])
+sys.stdout.write("run2 STDOUT second line\\n")
+sys.stderr.write("run2 STDERR %s\\n" % sys.argv[1:])
+sys.stderr.write("run2 STDERR second line\\n")
+""")
+ os.chdir(run_env.workdir)
+ # Everything before this prepared our "source directory."
+ # Now do the real test.
+ test = TestCmd.TestCmd(interpreter = 'python', workdir = '')
+ try:
+ output = test.stderr()
+ except IndexError:
+ pass
+ else:
+ raise IndexError("got unexpected output:\n" + output)
+ test.program_set('run1')
+ test.run(arguments = 'foo bar')
+ test.program_set('run2')
+ test.run(arguments = 'snafu')
+ # XXX SHOULD TEST ABSOLUTE NUMBER AS WELL
+ output = test.stderr()
+ assert output == "run2 STDERR ['snafu']\nrun2 STDERR second line\n", output
+ output = test.stderr(run = -1)
+ assert output == "run1 STDERR ['foo', 'bar']\nrun1 STDERR second line\n", output
+
+
+
+class command_args_TestCase(TestCmdTestCase):
+ def test_command_args(self):
+ """Test command_args()"""
+ run_env = TestCmd.TestCmd(workdir = '')
+ os.chdir(run_env.workdir)
+ # Everything before this prepared our "source directory."
+ # Now do the real test.
+ test = TestCmd.TestCmd(workdir = '')
+
+ r = test.command_args('prog')
+ expect = [run_env.workpath('prog')]
+ assert r == expect, (expect, r)
+
+ r = test.command_args(test.workpath('new_prog'))
+ expect = [test.workpath('new_prog')]
+ assert r == expect, (expect, r)
+
+ r = test.command_args('prog', 'python')
+ expect = ['python', run_env.workpath('prog')]
+ assert r == expect, (expect, r)
+
+ r = test.command_args('prog', 'python', 'arg1 arg2')
+ expect = ['python', run_env.workpath('prog'), 'arg1', 'arg2']
+ assert r == expect, (expect, r)
+
+ test.program_set('default_prog')
+ default_prog = run_env.workpath('default_prog')
+
+ r = test.command_args()
+ expect = [default_prog]
+ assert r == expect, (expect, r)
+
+ r = test.command_args(interpreter='PYTHON')
+ expect = ['PYTHON', default_prog]
+ assert r == expect, (expect, r)
+
+ r = test.command_args(interpreter='PYTHON', arguments='arg3 arg4')
+ expect = ['PYTHON', default_prog, 'arg3', 'arg4']
+ assert r == expect, (expect, r)
+
+ test.interpreter_set('default_python')
+
+ r = test.command_args()
+ expect = ['default_python', default_prog]
+ assert r == expect, (expect, r)
+
+ r = test.command_args(arguments='arg5 arg6')
+ expect = ['default_python', default_prog, 'arg5', 'arg6']
+ assert r == expect, (expect, r)
+
+ r = test.command_args('new_prog_1')
+ expect = [run_env.workpath('new_prog_1')]
+ assert r == expect, (expect, r)
+
+ r = test.command_args(program='new_prog_2')
+ expect = [run_env.workpath('new_prog_2')]
+ assert r == expect, (expect, r)
+
+
+
+class start_TestCase(TestCmdTestCase):
+ def setup_run_scripts(self):
+ t = TestCmdTestCase.setup_run_scripts(self)
+ t.recv_script = 'script_recv'
+ t.recv_script_path = t.run_env.workpath(t.sub_dir, t.recv_script)
+ t.recv_out_path = t.run_env.workpath('script_recv.out')
+ text = """\
+import os
+import sys
+
+class Unbuffered:
+ def __init__(self, file):
+ self.file = file
+ def write(self, arg):
+ self.file.write(arg)
+ self.file.flush()
+ def __getattr__(self, attr):
+ return getattr(self.file, attr)
+
+sys.stdout = Unbuffered(sys.stdout)
+sys.stderr = Unbuffered(sys.stderr)
+
+sys.stdout.write('script_recv: STDOUT\\n')
+sys.stderr.write('script_recv: STDERR\\n')
+with open(r'%s', 'wb') as logfp:
+ while 1:
+ line = sys.stdin.readline()
+ if not line:
+ break
+ logfp.write('script_recv: ' + line)
+ sys.stdout.write('script_recv: STDOUT: ' + line)
+ sys.stderr.write('script_recv: STDERR: ' + line)
+""" % t.recv_out_path
+ t.run_env.write(t.recv_script_path, text)
+ os.chmod(t.recv_script_path, 0o644) # XXX UNIX-specific
+ return t
+
+ def test_start(self):
+ """Test start()"""
+
+ t = self.setup_run_scripts()
+
+ # Everything before this prepared our "source directory."
+ # Now do the real test.
+ try:
+ test = TestCmd.TestCmd(program = t.script,
+ interpreter = 'python',
+ workdir = '',
+ subdir = 'script_subdir')
+
+ p = test.start()
+ self.run_match(p.stdout.read(), t.script, "STDOUT", t.workdir,
+ repr([]))
+ self.run_match(p.stderr.read(), t.script, "STDERR", t.workdir,
+ repr([]))
+ p.wait()
+
+ p = test.start(arguments='arg1 arg2 arg3')
+ self.run_match(p.stdout.read(), t.script, "STDOUT", t.workdir,
+ repr(['arg1', 'arg2', 'arg3']))
+ self.run_match(p.stderr.read(), t.script, "STDERR", t.workdir,
+ repr(['arg1', 'arg2', 'arg3']))
+ p.wait()
+
+ p = test.start(program=t.scriptx, arguments='foo')
+ self.run_match(p.stdout.read(), t.scriptx, "STDOUT", t.workdir,
+ repr(['foo']))
+ self.run_match(p.stderr.read(), t.scriptx, "STDERR", t.workdir,
+ repr(['foo']))
+ p.wait()
+
+ p = test.start(program=t.script1, interpreter=['python', '-x'])
+ self.run_match(p.stdout.read(), t.script1, "STDOUT", t.workdir,
+ repr([]))
+ self.run_match(p.stderr.read(), t.script1, "STDERR", t.workdir,
+ repr([]))
+ p.wait()
+
+ p = test.start(program='no_script', interpreter='python')
+ status = p.wait()
+ assert status != None, status
+
+ try:
+ p = test.start(program='no_script', interpreter='no_interpreter')
+ except OSError:
+ # Python versions that use subprocess throw an OSError
+ # exception when they try to execute something that
+ # isn't there.
+ pass
+ else:
+ status = p.wait()
+ # Python versions that use os.popen3() or the Popen3
+ # class run things through the shell, which just returns
+ # a non-zero exit status.
+ assert status != None, status
+
+ testx = TestCmd.TestCmd(program = t.scriptx,
+ workdir = '',
+ subdir = 't.scriptx_subdir')
+
+ p = testx.start()
+ self.run_match(p.stdout.read(), t.scriptx, "STDOUT", t.workdir,
+ repr([]))
+ self.run_match(p.stderr.read(), t.scriptx, "STDERR", t.workdir,
+ repr([]))
+ p.wait()
+
+ p = testx.start(arguments='foo bar')
+ self.run_match(p.stdout.read(), t.scriptx, "STDOUT", t.workdir,
+ repr(['foo', 'bar']))
+ self.run_match(p.stderr.read(), t.scriptx, "STDERR", t.workdir,
+ repr(['foo', 'bar']))
+ p.wait()
+
+ p = testx.start(program=t.script, interpreter='python', arguments='bar')
+ self.run_match(p.stdout.read(), t.script, "STDOUT", t.workdir,
+ repr(['bar']))
+ self.run_match(p.stderr.read(), t.script, "STDERR", t.workdir,
+ repr(['bar']))
+ p.wait()
+
+ p = testx.start(program=t.script1, interpreter=('python', '-x'))
+ self.run_match(p.stdout.read(), t.script1, "STDOUT", t.workdir,
+ repr([]))
+ self.run_match(p.stderr.read(), t.script1, "STDERR", t.workdir,
+ repr([]))
+ p.wait()
+
+ s = os.path.join('.', t.scriptx)
+ p = testx.start(program=[s])
+ self.run_match(p.stdout.read(), t.scriptx, "STDOUT", t.workdir,
+ repr([]))
+ self.run_match(p.stderr.read(), t.scriptx, "STDERR", t.workdir,
+ repr([]))
+ p.wait()
+
+ try:
+ testx.start(program='no_program')
+ except OSError:
+ # Python versions that use subprocess throw an OSError
+ # exception when they try to execute something that
+ # isn't there.
+ pass
+ else:
+ # Python versions that use os.popen3() or the Popen3
+ # class run things through the shell, which just dies
+ # trying to execute the non-existent program before
+ # we can wait() for it.
+ try:
+ p = p.wait()
+ except OSError:
+ pass
+
+ test1 = TestCmd.TestCmd(program = t.script1,
+ interpreter = ['python', '-x'],
+ workdir = '')
+
+ p = test1.start()
+ self.run_match(p.stdout.read(), t.script1, "STDOUT", t.workdir,
+ repr([]))
+ self.run_match(p.stderr.read(), t.script1, "STDERR", t.workdir,
+ repr([]))
+ p.wait()
+
+ finally:
+ os.chdir(t.orig_cwd)
+
+ def test_finish(self):
+ """Test finish()"""
+
+ t = self.setup_run_scripts()
+
+ # Everything before this prepared our "source directory."
+ # Now do the real test.
+ try:
+
+ test = TestCmd.TestCmd(program = t.recv_script,
+ interpreter = 'python',
+ workdir = '',
+ subdir = 'script_subdir')
+
+ test.start(stdin=1)
+ test.finish()
+ expect_stdout = """\
+script_recv: STDOUT
+"""
+ expect_stderr = """\
+script_recv: STDERR
+"""
+ stdout = test.stdout()
+ assert stdout == expect_stdout, stdout
+ stderr = test.stderr()
+ assert stderr == expect_stderr, stderr
+
+ p = test.start(stdin=1)
+ p.send('input\n')
+ test.finish(p)
+ expect_stdout = """\
+script_recv: STDOUT
+script_recv: STDOUT: input
+"""
+ expect_stderr = """\
+script_recv: STDERR
+script_recv: STDERR: input
+"""
+ stdout = test.stdout()
+ assert stdout == expect_stdout, stdout
+ stderr = test.stderr()
+ assert stderr == expect_stderr, stderr
+
+ p = test.start(combine=1, stdin=1)
+ p.send('input\n')
+ test.finish(p)
+ expect_stdout = """\
+script_recv: STDOUT
+script_recv: STDERR
+script_recv: STDOUT: input
+script_recv: STDERR: input
+"""
+ expect_stderr = ""
+ stdout = test.stdout()
+ assert stdout == expect_stdout, stdout
+ stderr = test.stderr()
+ assert stderr == expect_stderr, stderr
+
+ finally:
+ os.chdir(t.orig_cwd)
+
+ def test_recv(self):
+ """Test the recv() method of objects returned by start()"""
+
+ t = self.setup_run_scripts()
+
+ # Everything before this prepared our "source directory."
+ # Now do the real test.
+ try:
+ test = TestCmd.TestCmd(program = t.script,
+ interpreter = 'python',
+ workdir = '',
+ subdir = 'script_subdir')
+
+ p = test.start()
+ stdout = p.recv()
+ while stdout == '':
+ import time
+ time.sleep(1)
+ stdout = p.recv()
+ self.run_match(stdout, t.script, "STDOUT", t.workdir,
+ repr([]))
+ p.wait()
+
+ finally:
+ os.chdir(t.orig_cwd)
+
+ def test_recv_err(self):
+ """Test the recv_err() method of objects returned by start()"""
+
+ t = self.setup_run_scripts()
+
+ # Everything before this prepared our "source directory."
+ # Now do the real test.
+ try:
+
+ test = TestCmd.TestCmd(program = t.script,
+ interpreter = 'python',
+ workdir = '',
+ subdir = 'script_subdir')
+
+ p = test.start()
+ stderr = p.recv_err()
+ while stderr == '':
+ import time
+ time.sleep(1)
+ stderr = p.recv_err()
+ self.run_match(stderr, t.script, "STDERR", t.workdir,
+ repr([]))
+ p.wait()
+
+
+ finally:
+ os.chdir(t.orig_cwd)
+
+ def test_send(self):
+ """Test the send() method of objects returned by start()"""
+
+ t = self.setup_run_scripts()
+
+ # Everything before this prepared our "source directory."
+ # Now do the real test.
+ try:
+
+ test = TestCmd.TestCmd(program = t.recv_script,
+ interpreter = 'python',
+ workdir = '',
+ subdir = 'script_subdir')
+
+ p = test.start(stdin=1)
+ input = 'stdin.write() input to the receive script\n'
+ p.stdin.write(to_bytes(input))
+ p.stdin.close()
+ p.wait()
+ with open(t.recv_out_path, 'rb') as f:
+ result = to_str(f.read())
+ expect = 'script_recv: ' + input
+ assert result == expect, repr(result)
+
+ p = test.start(stdin=1)
+ input = 'send() input to the receive script\n'
+ p.send(input)
+ p.stdin.close()
+ p.wait()
+ with open(t.recv_out_path, 'rb') as f:
+ result = to_str(f.read())
+ expect = 'script_recv: ' + input
+ assert result == expect, repr(result)
+
+ finally:
+ os.chdir(t.orig_cwd)
+
+ # TODO(sgk): figure out how to eliminate the race conditions here.
+ def __FLAKY__test_send_recv(self):
+ """Test the send_recv() method of objects returned by start()"""
+
+ t = self.setup_run_scripts()
+
+ # Everything before this prepared our "source directory."
+ # Now do the real test.
+ try:
+
+ test = TestCmd.TestCmd(program = t.recv_script,
+ interpreter = 'python',
+ workdir = '',
+ subdir = 'script_subdir')
+
+ def do_send_recv(p, input):
+ send, stdout, stderr = p.send_recv(input)
+ stdout = self.translate_newlines(stdout)
+ stderr = self.translate_newlines(stderr)
+ return send, stdout, stderr
+
+ p = test.start(stdin=1)
+ input = 'input to the receive script\n'
+ send, stdout, stderr = do_send_recv(p, input)
+ # Buffering issues and a race condition prevent this from
+ # being completely deterministic, so check for both null
+ # output and the first write() on each stream.
+ assert stdout in ("", "script_recv: STDOUT\n"), stdout
+ assert stderr in ("", "script_recv: STDERR\n"), stderr
+ send, stdout, stderr = do_send_recv(p, input)
+ assert stdout in ("", "script_recv: STDOUT\n"), stdout
+ assert stderr in ("", "script_recv: STDERR\n"), stderr
+ p.stdin.close()
+ stdout = self.translate_newlines(p.recv())
+ stderr = self.translate_newlines(p.recv_err())
+ assert stdout in ("", "script_recv: STDOUT\n"), stdout
+ assert stderr in ("", "script_recv: STDERR\n"), stderr
+ p.wait()
+ stdout = self.translate_newlines(p.recv())
+ stderr = self.translate_newlines(p.recv_err())
+ expect_stdout = """\
+script_recv: STDOUT
+script_recv: STDOUT: input to the receive script
+script_recv: STDOUT: input to the receive script
+"""
+ expect_stderr = """\
+script_recv: STDERR
+script_recv: STDERR: input to the receive script
+script_recv: STDERR: input to the receive script
+"""
+ assert stdout == expect_stdout, stdout
+ assert stderr == expect_stderr, stderr
+ with open(t.recv_out_path, 'rb') as f:
+ result = f.read()
+ expect = ('script_recv: ' + input) * 2
+ assert result == expect, (result, stdout, stderr)
+
+ finally:
+ os.chdir(t.orig_cwd)
+
+
+
+class stdin_TestCase(TestCmdTestCase):
+ def test_stdin(self):
+ """Test stdin()"""
+ run_env = TestCmd.TestCmd(workdir = '')
+ run_env.write('run', """from __future__ import print_function
+import fileinput
+for line in fileinput.input():
+ print('Y'.join(line[:-1].split('X')))
+""")
+ run_env.write('input', "X on X this X line X\n")
+ os.chdir(run_env.workdir)
+ # Everything before this prepared our "source directory."
+ # Now do the real test.
+ test = TestCmd.TestCmd(program = 'run', interpreter = 'python', workdir = '')
+ test.run(arguments = 'input')
+ assert test.stdout() == "Y on Y this Y line Y\n"
+ test.run(stdin = "X is X here X tooX\n")
+ assert test.stdout() == "Y is Y here Y tooY\n"
+ test.run(stdin = """X here X
+X there X
+""")
+ assert test.stdout() == "Y here Y\nY there Y\n"
+ test.run(stdin = ["X line X\n", "X another X\n"])
+ assert test.stdout() == "Y line Y\nY another Y\n"
+
+
+
+class stdout_TestCase(TestCmdTestCase):
+ def test_stdout(self):
+ """Test stdout()"""
+ run_env = TestCmd.TestCmd(workdir = '')
+ run_env.write('run1', """import sys
+sys.stdout.write("run1 STDOUT %s\\n" % sys.argv[1:])
+sys.stdout.write("run1 STDOUT second line\\n")
+sys.stderr.write("run1 STDERR %s\\n" % sys.argv[1:])
+sys.stderr.write("run1 STDERR second line\\n")
+""")
+ run_env.write('run2', """import sys
+sys.stdout.write("run2 STDOUT %s\\n" % sys.argv[1:])
+sys.stdout.write("run2 STDOUT second line\\n")
+sys.stderr.write("run2 STDERR %s\\n" % sys.argv[1:])
+sys.stderr.write("run2 STDERR second line\\n")
+""")
+ os.chdir(run_env.workdir)
+ # Everything before this prepared our "source directory."
+ # Now do the real test.
+ test = TestCmd.TestCmd(interpreter = 'python', workdir = '')
+ output = test.stdout()
+ if output is not None:
+ raise IndexError("got unexpected output:\n\t`%s'\n" % output)
+ test.program_set('run1')
+ test.run(arguments = 'foo bar')
+ test.program_set('run2')
+ test.run(arguments = 'snafu')
+ # XXX SHOULD TEST ABSOLUTE NUMBER AS WELL
+ output = test.stdout()
+ assert output == "run2 STDOUT ['snafu']\nrun2 STDOUT second line\n", output
+ output = test.stdout(run = -1)
+ assert output == "run1 STDOUT ['foo', 'bar']\nrun1 STDOUT second line\n", output
+
+
+
+class subdir_TestCase(TestCmdTestCase):
+ def test_subdir(self):
+ """Test subdir()"""
+ test = TestCmd.TestCmd(workdir = '', subdir = ['no', 'such', 'subdir'])
+ assert not os.path.exists(test.workpath('no'))
+
+ test = TestCmd.TestCmd(workdir = '', subdir = 'foo')
+ assert test.subdir('bar') == 1
+ assert test.subdir(['foo', 'succeed']) == 1
+ if os.name != "nt":
+ os.chmod(test.workpath('foo'), 0o500)
+ assert test.subdir(['foo', 'fail']) == 0
+ assert test.subdir(['sub', 'dir', 'ectory'], 'sub') == 1
+ assert test.subdir('one',
+ UserList(['one', 'two']),
+ ['one', 'two', 'three']) == 3
+ assert os.path.isdir(test.workpath('foo'))
+ assert os.path.isdir(test.workpath('bar'))
+ assert os.path.isdir(test.workpath('foo', 'succeed'))
+ if os.name != "nt":
+ assert not os.path.exists(test.workpath('foo', 'fail'))
+ assert os.path.isdir(test.workpath('sub'))
+ assert not os.path.exists(test.workpath('sub', 'dir'))
+ assert not os.path.exists(test.workpath('sub', 'dir', 'ectory'))
+ assert os.path.isdir(test.workpath('one', 'two', 'three'))
+
+
+
+class symlink_TestCase(TestCmdTestCase):
+ def test_symlink(self):
+ """Test symlink()"""
+ try: os.symlink
+ except AttributeError: return
+
+ test = TestCmd.TestCmd(workdir = '', subdir = 'foo')
+ wdir_file1 = os.path.join(test.workdir, 'file1')
+ wdir_target1 = os.path.join(test.workdir, 'target1')
+ wdir_foo_file2 = os.path.join(test.workdir, 'foo', 'file2')
+ wdir_target2 = os.path.join(test.workdir, 'target2')
+ wdir_foo_target2 = os.path.join(test.workdir, 'foo', 'target2')
+
+ test.symlink('target1', 'file1')
+ assert os.path.islink(wdir_file1)
+ assert not os.path.exists(wdir_file1)
+ with open(wdir_target1, 'w') as f:
+ f.write("")
+ assert os.path.exists(wdir_file1)
+
+ test.symlink('target2', ['foo', 'file2'])
+ assert os.path.islink(wdir_foo_file2)
+ assert not os.path.exists(wdir_foo_file2)
+ with open(wdir_target2, 'w') as f:
+ f.write("")
+ assert not os.path.exists(wdir_foo_file2)
+ with open(wdir_foo_target2, 'w') as f:
+ f.write("")
+ assert os.path.exists(wdir_foo_file2)
+
+
+
+class tempdir_TestCase(TestCmdTestCase):
+ def setUp(self):
+ TestCmdTestCase.setUp(self)
+ self._tempdir = tempfile.mktemp()
+ os.mkdir(self._tempdir)
+ os.chdir(self._tempdir)
+
+ def tearDown(self):
+ TestCmdTestCase.tearDown(self)
+ os.rmdir(self._tempdir)
+
+ def test_tempdir(self):
+ """Test tempdir()"""
+ test = TestCmd.TestCmd()
+ tdir1 = test.tempdir()
+ assert os.path.isdir(tdir1)
+ test.workdir_set(None)
+ test.cleanup()
+ assert not os.path.exists(tdir1)
+
+ test = TestCmd.TestCmd()
+ tdir2 = test.tempdir('temp')
+ assert os.path.isdir(tdir2)
+ tdir3 = test.tempdir()
+ assert os.path.isdir(tdir3)
+ test.workdir_set(None)
+ test.cleanup()
+ assert not os.path.exists(tdir2)
+ assert not os.path.exists(tdir3)
+
+
+timeout_script = """\
+import sys
+import time
+seconds = int(sys.argv[1])
+sys.stdout.write('sleeping %s\\n' % seconds)
+sys.stdout.flush()
+time.sleep(seconds)
+sys.stdout.write('slept %s\\n' % seconds)
+sys.stdout.flush()
+sys.exit(0)
+"""
+
+class timeout_TestCase(TestCmdTestCase):
+ def test_initialization(self):
+ """Test initialization timeout"""
+ test = TestCmd.TestCmd(workdir='', timeout=2)
+ test.write('sleep.py', timeout_script)
+
+ test.run([sys.executable, test.workpath('sleep.py'), '4'])
+ assert test.stderr() == '', test.stderr()
+ assert test.stdout() == 'sleeping 4\n', test.stdout()
+
+ test.run([sys.executable, test.workpath('sleep.py'), '4'])
+ assert test.stderr() == '', test.stderr()
+ assert test.stdout() == 'sleeping 4\n', test.stdout()
+
+ def test_cancellation(self):
+ """Test timer cancellation after firing"""
+ test = TestCmd.TestCmd(workdir='', timeout=4)
+ test.write('sleep.py', timeout_script)
+
+ test.run([sys.executable, test.workpath('sleep.py'), '6'])
+ assert test.stderr() == '', test.stderr()
+ assert test.stdout() == 'sleeping 6\n', test.stdout()
+
+ test.run([sys.executable, test.workpath('sleep.py'), '2'])
+ assert test.stderr() == '', test.stderr()
+ assert test.stdout() == 'sleeping 2\nslept 2\n', test.stdout()
+
+ test.run([sys.executable, test.workpath('sleep.py'), '6'])
+ assert test.stderr() == '', test.stderr()
+ assert test.stdout() == 'sleeping 6\n', test.stdout()
+
+ def test_run(self):
+ """Test run() timeout"""
+ test = TestCmd.TestCmd(workdir='', timeout=8)
+ test.write('sleep.py', timeout_script)
+
+ test.run([sys.executable, test.workpath('sleep.py'), '2'],
+ timeout=4)
+ assert test.stderr() == '', test.stderr()
+ assert test.stdout() == 'sleeping 2\nslept 2\n', test.stdout()
+
+ test.run([sys.executable, test.workpath('sleep.py'), '6'],
+ timeout=4)
+ assert test.stderr() == '', test.stderr()
+ assert test.stdout() == 'sleeping 6\n', test.stdout()
+
+ def test_set_timeout(self):
+ """Test set_timeout()"""
+ test = TestCmd.TestCmd(workdir='', timeout=2)
+ test.write('sleep.py', timeout_script)
+
+ test.run([sys.executable, test.workpath('sleep.py'), '4'])
+ assert test.stderr() == '', test.stderr()
+ assert test.stdout() == 'sleeping 4\n', test.stdout()
+
+ test.set_timeout(None)
+
+ test.run([sys.executable, test.workpath('sleep.py'), '4'])
+ assert test.stderr() == '', test.stderr()
+ assert test.stdout() == 'sleeping 4\nslept 4\n', test.stdout()
+
+ test.set_timeout(6)
+
+ test.run([sys.executable, test.workpath('sleep.py'), '4'])
+ assert test.stderr() == '', test.stderr()
+ assert test.stdout() == 'sleeping 4\nslept 4\n', test.stdout()
+
+ test.run([sys.executable, test.workpath('sleep.py'), '8'])
+ assert test.stderr() == '', test.stderr()
+ assert test.stdout() == 'sleeping 8\n', test.stdout()
+
+
+
+class unlink_TestCase(TestCmdTestCase):
+ def test_unlink(self):
+ """Test unlink()"""
+ test = TestCmd.TestCmd(workdir = '', subdir = 'foo')
+ wdir_file1 = os.path.join(test.workdir, 'file1')
+ wdir_file2 = os.path.join(test.workdir, 'file2')
+ wdir_foo_file3a = os.path.join(test.workdir, 'foo', 'file3a')
+ wdir_foo_file3b = os.path.join(test.workdir, 'foo', 'file3b')
+ wdir_foo_file4 = os.path.join(test.workdir, 'foo', 'file4')
+ wdir_file5 = os.path.join(test.workdir, 'file5')
+
+ with open(wdir_file1, 'w') as f:
+ f.write("")
+ with open(wdir_file2, 'w') as f:
+ f.write("")
+ with open(wdir_foo_file3a, 'w') as f:
+ f.write("")
+ with open(wdir_foo_file3b, 'w') as f:
+ f.write("")
+ with open(wdir_foo_file4, 'w') as f:
+ f.write("")
+ with open(wdir_file5, 'w') as f:
+ f.write("")
+
+ try:
+ contents = test.unlink('no_file')
+ except OSError: # expect "No such file or directory"
+ pass
+ except:
+ raise
+
+ test.unlink("file1")
+ assert not os.path.exists(wdir_file1)
+
+ test.unlink(wdir_file2)
+ assert not os.path.exists(wdir_file2)
+
+ test.unlink(['foo', 'file3a'])
+ assert not os.path.exists(wdir_foo_file3a)
+
+ test.unlink(UserList(['foo', 'file3b']))
+ assert not os.path.exists(wdir_foo_file3b)
+
+ test.unlink([test.workdir, 'foo', 'file4'])
+ assert not os.path.exists(wdir_foo_file4)
+
+ # Make it so we can't unlink file5.
+ # For UNIX, remove write permission from the dir and the file.
+ # For Windows, open the file.
+ os.chmod(test.workdir, 0o500)
+ os.chmod(wdir_file5, 0o400)
+ with open(wdir_file5, 'r'):
+ try:
+ try:
+ test.unlink('file5')
+ except OSError: # expect "Permission denied"
+ pass
+ except:
+ raise
+ finally:
+ os.chmod(test.workdir, 0o700)
+ os.chmod(wdir_file5, 0o600)
+
+
+class touch_TestCase(TestCmdTestCase):
+ def test_touch(self):
+ """Test touch()"""
+ test = TestCmd.TestCmd(workdir = '', subdir = 'sub')
+
+ wdir_file1 = os.path.join(test.workdir, 'file1')
+ wdir_sub_file2 = os.path.join(test.workdir, 'sub', 'file2')
+
+ with open(wdir_file1, 'w') as f:
+ f.write("")
+ with open(wdir_sub_file2, 'w') as f:
+ f.write("")
+
+ file1_old_time = os.path.getmtime(wdir_file1)
+ file2_old_time = os.path.getmtime(wdir_sub_file2)
+
+ test.sleep()
+
+ test.touch(wdir_file1)
+
+ file1_new_time = os.path.getmtime(wdir_file1)
+ assert file1_new_time > file1_old_time
+
+ test.touch('file1', file1_old_time)
+
+ result = os.path.getmtime(wdir_file1)
+ # Sub-second granularity of file systems may still vary.
+ # On Windows, the two times may be off by a microsecond.
+ assert int(result) == int(file1_old_time), (result, file1_old_time)
+
+ test.touch(['sub', 'file2'])
+
+ file2_new_time = os.path.getmtime(wdir_sub_file2)
+ assert file2_new_time > file2_old_time
+
+
+
+class verbose_TestCase(TestCmdTestCase):
+ def test_verbose(self):
+ """Test verbose()"""
+ test = TestCmd.TestCmd()
+ assert test.verbose == 0, 'verbose already initialized?'
+ test = TestCmd.TestCmd(verbose = 1)
+ assert test.verbose == 1, 'did not initialize verbose'
+ test.verbose = 2
+ assert test.verbose == 2, 'did not set verbose'
+
+
+
+class workdir_TestCase(TestCmdTestCase):
+ def test_workdir(self):
+ """Test workdir()"""
+ run_env = TestCmd.TestCmd(workdir = '')
+ os.chdir(run_env.workdir)
+ # Everything before this prepared our "source directory."
+ # Now do the real test.
+ test = TestCmd.TestCmd()
+ assert test.workdir is None
+
+ test = TestCmd.TestCmd(workdir = None)
+ assert test.workdir is None
+
+ test = TestCmd.TestCmd(workdir = '')
+ assert test.workdir != None
+ assert os.path.isdir(test.workdir)
+
+ test = TestCmd.TestCmd(workdir = 'dir')
+ assert test.workdir != None
+ assert os.path.isdir(test.workdir)
+
+ no_such_subdir = os.path.join('no', 'such', 'subdir')
+ try:
+ test = TestCmd.TestCmd(workdir = no_such_subdir)
+ except OSError: # expect "No such file or directory"
+ pass
+ except:
+ raise
+
+ test = TestCmd.TestCmd(workdir = 'foo')
+ workdir_foo = test.workdir
+ assert workdir_foo != None
+
+ test.workdir_set('bar')
+ workdir_bar = test.workdir
+ assert workdir_bar != None
+
+ try:
+ test.workdir_set(no_such_subdir)
+ except OSError:
+ pass # expect "No such file or directory"
+ except:
+ raise
+ assert workdir_bar == test.workdir
+
+ assert os.path.isdir(workdir_foo)
+ assert os.path.isdir(workdir_bar)
+
+
+
+class workdirs_TestCase(TestCmdTestCase):
+ def test_workdirs(self):
+ """Test workdirs()"""
+ test = TestCmd.TestCmd()
+ assert test.workdir is None
+ test.workdir_set('')
+ wdir1 = test.workdir
+ test.workdir_set('')
+ wdir2 = test.workdir
+ assert os.path.isdir(wdir1)
+ assert os.path.isdir(wdir2)
+ test.cleanup()
+ assert not os.path.exists(wdir1)
+ assert not os.path.exists(wdir2)
+
+
+
+class workpath_TestCase(TestCmdTestCase):
+ def test_workpath(self):
+ """Test workpath()"""
+ test = TestCmd.TestCmd()
+ assert test.workdir is None
+
+ test = TestCmd.TestCmd(workdir = '')
+ wpath = test.workpath('foo', 'bar')
+ assert wpath == os.path.join(test.workdir, 'foo', 'bar')
+
+
+
+class readable_TestCase(TestCmdTestCase):
+ def test_readable(self):
+ """Test readable()"""
+ test = TestCmd.TestCmd(workdir = '', subdir = 'foo')
+ test.write('file1', "Test file #1\n")
+ test.write(['foo', 'file2'], "Test file #2\n")
+
+ try: symlink = os.symlink
+ except AttributeError: pass
+ else: symlink('no_such_file', test.workpath('dangling_symlink'))
+
+ test.readable(test.workdir, 0)
+ # XXX skip these tests if euid == 0?
+ assert not _is_readable(test.workdir)
+ assert not _is_readable(test.workpath('file1'))
+ assert not _is_readable(test.workpath('foo'))
+ assert not _is_readable(test.workpath('foo', 'file2'))
+
+ test.readable(test.workdir, 1)
+ assert _is_readable(test.workdir)
+ assert _is_readable(test.workpath('file1'))
+ assert _is_readable(test.workpath('foo'))
+ assert _is_readable(test.workpath('foo', 'file2'))
+
+ test.readable(test.workdir, 0)
+ # XXX skip these tests if euid == 0?
+ assert not _is_readable(test.workdir)
+ assert not _is_readable(test.workpath('file1'))
+ assert not _is_readable(test.workpath('foo'))
+ assert not _is_readable(test.workpath('foo', 'file2'))
+
+ test.readable(test.workpath('file1'), 1)
+ assert _is_readable(test.workpath('file1'))
+
+ test.readable(test.workpath('file1'), 0)
+ assert not _is_readable(test.workpath('file1'))
+
+ test.readable(test.workdir, 1)
+
+
+
+class writable_TestCase(TestCmdTestCase):
+ def test_writable(self):
+ """Test writable()"""
+ test = TestCmd.TestCmd(workdir = '', subdir = 'foo')
+ test.write('file1', "Test file #1\n")
+ test.write(['foo', 'file2'], "Test file #2\n")
+
+ try: symlink = os.symlink
+ except AttributeError: pass
+ else: symlink('no_such_file', test.workpath('dangling_symlink'))
+
+ test.writable(test.workdir, 0)
+ # XXX skip these tests if euid == 0?
+ assert not _is_writable(test.workdir)
+ assert not _is_writable(test.workpath('file1'))
+ assert not _is_writable(test.workpath('foo'))
+ assert not _is_writable(test.workpath('foo', 'file2'))
+
+ test.writable(test.workdir, 1)
+ assert _is_writable(test.workdir)
+ assert _is_writable(test.workpath('file1'))
+ assert _is_writable(test.workpath('foo'))
+ assert _is_writable(test.workpath('foo', 'file2'))
+
+ test.writable(test.workdir, 0)
+ # XXX skip these tests if euid == 0?
+ assert not _is_writable(test.workdir)
+ assert not _is_writable(test.workpath('file1'))
+ assert not _is_writable(test.workpath('foo'))
+ assert not _is_writable(test.workpath('foo', 'file2'))
+
+ test.writable(test.workpath('file1'), 1)
+ assert _is_writable(test.workpath('file1'))
+
+ test.writable(test.workpath('file1'), 0)
+ assert not _is_writable(test.workpath('file1'))
+
+
+
+class executable_TestCase(TestCmdTestCase):
+ def test_executable(self):
+ """Test executable()"""
+ test = TestCmd.TestCmd(workdir = '', subdir = 'foo')
+ test.write('file1', "Test file #1\n")
+ test.write(['foo', 'file2'], "Test file #2\n")
+
+ try: symlink = os.symlink
+ except AttributeError: pass
+ else: symlink('no_such_file', test.workpath('dangling_symlink'))
+
+ def make_executable(fname):
+ st = os.stat(fname)
+ os.chmod(fname, stat.S_IMODE(st[stat.ST_MODE]|0o100))
+
+ def make_non_executable(fname):
+ st = os.stat(fname)
+ os.chmod(fname, stat.S_IMODE(st[stat.ST_MODE]&~0o100))
+
+ test.executable(test.workdir, 0)
+ # XXX skip these tests if euid == 0?
+ assert not _is_executable(test.workdir)
+ make_executable(test.workdir)
+ assert not _is_executable(test.workpath('file1'))
+ assert not _is_executable(test.workpath('foo'))
+ make_executable(test.workpath('foo'))
+ assert not _is_executable(test.workpath('foo', 'file2'))
+ make_non_executable(test.workpath('foo'))
+ make_non_executable(test.workdir)
+
+ test.executable(test.workdir, 1)
+ assert _is_executable(test.workdir)
+ assert _is_executable(test.workpath('file1'))
+ assert _is_executable(test.workpath('foo'))
+ assert _is_executable(test.workpath('foo', 'file2'))
+
+ test.executable(test.workdir, 0)
+ # XXX skip these tests if euid == 0?
+ assert not _is_executable(test.workdir)
+ make_executable(test.workdir)
+ assert not _is_executable(test.workpath('file1'))
+ assert not _is_executable(test.workpath('foo'))
+ make_executable(test.workpath('foo'))
+ assert not _is_executable(test.workpath('foo', 'file2'))
+
+ test.executable(test.workpath('file1'), 1)
+ assert _is_executable(test.workpath('file1'))
+
+ test.executable(test.workpath('file1'), 0)
+ assert not _is_executable(test.workpath('file1'))
+
+ test.executable(test.workdir, 1)
+
+
+
+class write_TestCase(TestCmdTestCase):
+ def test_write(self):
+ """Test write()"""
+ test = TestCmd.TestCmd(workdir = '', subdir = 'foo')
+ test.write('file1', "Test file #1\n")
+ test.write(['foo', 'file2'], "Test file #2\n")
+ try:
+ test.write(['bar', 'file3'], "Test file #3 (should not get created)\n")
+ except IOError: # expect "No such file or directory"
+ pass
+ except:
+ raise
+ test.write(test.workpath('file4'), "Test file #4.\n")
+ test.write(test.workpath('foo', 'file5'), "Test file #5.\n")
+ try:
+ test.write(test.workpath('bar', 'file6'), "Test file #6 (should not get created)\n")
+ except IOError: # expect "No such file or directory"
+ pass
+ except:
+ raise
+
+ try:
+ test.write('file7', "Test file #8.\n", mode = 'r')
+ except ValueError: # expect "mode must begin with 'w'
+ pass
+ except:
+ raise
+
+ test.write('file8', "Test file #8.\n", mode = 'w')
+ test.write('file9', "Test file #9.\r\n", mode = 'wb')
+
+ if os.name != "nt":
+ os.chmod(test.workdir, 0o500)
+ try:
+ test.write('file10', "Test file #10 (should not get created).\n")
+ except IOError: # expect "Permission denied"
+ pass
+ except:
+ raise
+
+ assert os.path.isdir(test.workpath('foo'))
+ assert not os.path.exists(test.workpath('bar'))
+ assert os.path.isfile(test.workpath('file1'))
+ assert os.path.isfile(test.workpath('foo', 'file2'))
+ assert not os.path.exists(test.workpath('bar', 'file3'))
+ assert os.path.isfile(test.workpath('file4'))
+ assert os.path.isfile(test.workpath('foo', 'file5'))
+ assert not os.path.exists(test.workpath('bar', 'file6'))
+ assert not os.path.exists(test.workpath('file7'))
+ assert os.path.isfile(test.workpath('file8'))
+ assert os.path.isfile(test.workpath('file9'))
+ if os.name != "nt":
+ assert not os.path.exists(test.workpath('file10'))
+
+ with open(test.workpath('file8'), 'r') as f:
+ res = f.read()
+ assert res == "Test file #8.\n", res
+ with open(test.workpath('file9'), 'rb') as f:
+ res = to_str(f.read())
+ assert res == "Test file #9.\r\n", res
+
+
+class variables_TestCase(TestCmdTestCase):
+ def test_variables(self):
+ """Test global variables"""
+ run_env = TestCmd.TestCmd(workdir = '')
+
+ variables = [
+ 'fail_test',
+ 'no_result',
+ 'pass_test',
+ 'match_exact',
+ 'match_re',
+ 'match_re_dotall',
+ 'python',
+ '_python_',
+ 'TestCmd',
+ ]
+
+ script = "from __future__ import print_function\n" + \
+ "import TestCmd\n" + \
+ '\n'.join([ "print(TestCmd.%s\n)" % v for v in variables ])
+ run_env.run(program=sys.executable, stdin=script)
+ stderr = run_env.stderr()
+ assert stderr == "", stderr
+
+ script = "from __future__ import print_function\n" + \
+ "from TestCmd import *\n" + \
+ '\n'.join([ "print(%s)" % v for v in variables ])
+ run_env.run(program=sys.executable, stdin=script)
+ stderr = run_env.stderr()
+ assert stderr == "", stderr
+
+
+
+if __name__ == "__main__":
+ tclasses = [
+ __init__TestCase,
+ basename_TestCase,
+ cleanup_TestCase,
+ chmod_TestCase,
+ combine_TestCase,
+ command_args_TestCase,
+ description_TestCase,
+ diff_TestCase,
+ diff_stderr_TestCase,
+ diff_stdout_TestCase,
+ exit_TestCase,
+ fail_test_TestCase,
+ interpreter_TestCase,
+ match_TestCase,
+ match_exact_TestCase,
+ match_re_dotall_TestCase,
+ match_re_TestCase,
+ match_stderr_TestCase,
+ match_stdout_TestCase,
+ no_result_TestCase,
+ pass_test_TestCase,
+ preserve_TestCase,
+ program_TestCase,
+ read_TestCase,
+ rmdir_TestCase,
+ run_TestCase,
+ run_verbose_TestCase,
+ set_diff_function_TestCase,
+ set_match_function_TestCase,
+ sleep_TestCase,
+ start_TestCase,
+ stderr_TestCase,
+ stdin_TestCase,
+ stdout_TestCase,
+ subdir_TestCase,
+ symlink_TestCase,
+ tempdir_TestCase,
+ timeout_TestCase,
+ unlink_TestCase,
+ touch_TestCase,
+ verbose_TestCase,
+ workdir_TestCase,
+ workdirs_TestCase,
+ workpath_TestCase,
+ writable_TestCase,
+ write_TestCase,
+ variables_TestCase,
+ ]
+ if sys.platform != 'win32':
+ tclasses.extend([
+ executable_TestCase,
+ readable_TestCase,
+ ])
+ suite = unittest.TestSuite()
+ for tclass in tclasses:
+ names = unittest.getTestCaseNames(tclass, 'test_')
+ suite.addTests([ tclass(n) for n in names ])
+ if not unittest.TextTestRunner().run(suite).wasSuccessful():
+ sys.exit(1)
+
+# Local Variables:
+# tab-width:4
+# indent-tabs-mode:nil
+# End:
+# vim: set expandtab tabstop=4 shiftwidth=4:
diff --git a/testing/framework/TestCommon.py b/testing/framework/TestCommon.py
new file mode 100644
index 0000000..ca4a147
--- /dev/null
+++ b/testing/framework/TestCommon.py
@@ -0,0 +1,749 @@
+"""
+TestCommon.py: a testing framework for commands and scripts
+ with commonly useful error handling
+
+The TestCommon module provides a simple, high-level interface for writing
+tests of executable commands and scripts, especially commands and scripts
+that interact with the file system. All methods throw exceptions and
+exit on failure, with useful error messages. This makes a number of
+explicit checks unnecessary, making the test scripts themselves simpler
+to write and easier to read.
+
+The TestCommon class is a subclass of the TestCmd class. In essence,
+TestCommon is a wrapper that handles common TestCmd error conditions in
+useful ways. You can use TestCommon directly, or subclass it for your
+program and add additional (or override) methods to tailor it to your
+program's specific needs. Alternatively, the TestCommon class serves
+as a useful example of how to define your own TestCmd subclass.
+
+As a subclass of TestCmd, TestCommon provides access to all of the
+variables and methods from the TestCmd module. Consequently, you can
+use any variable or method documented in the TestCmd module without
+having to explicitly import TestCmd.
+
+A TestCommon environment object is created via the usual invocation:
+
+ import TestCommon
+ test = TestCommon.TestCommon()
+
+You can use all of the TestCmd keyword arguments when instantiating a
+TestCommon object; see the TestCmd documentation for details.
+
+Here is an overview of the methods and keyword arguments that are
+provided by the TestCommon class:
+
+ test.must_be_writable('file1', ['file2', ...])
+
+ test.must_contain('file', 'required text\n')
+
+ test.must_contain_all(output, input, ['title', find])
+
+ test.must_contain_all_lines(output, lines, ['title', find])
+
+ test.must_contain_any_line(output, lines, ['title', find])
+
+ test.must_contain_exactly_lines(output, lines, ['title', find])
+
+ test.must_exist('file1', ['file2', ...])
+
+ test.must_match('file', "expected contents\n")
+
+ test.must_not_be_writable('file1', ['file2', ...])
+
+ test.must_not_contain('file', 'banned text\n')
+
+ test.must_not_contain_any_line(output, lines, ['title', find])
+
+ test.must_not_exist('file1', ['file2', ...])
+
+ test.run(options = "options to be prepended to arguments",
+ stdout = "expected standard output from the program",
+ stderr = "expected error output from the program",
+ status = expected_status,
+ match = match_function)
+
+The TestCommon module also provides the following variables
+
+ TestCommon.python
+ TestCommon._python_
+ TestCommon.exe_suffix
+ TestCommon.obj_suffix
+ TestCommon.shobj_prefix
+ TestCommon.shobj_suffix
+ TestCommon.lib_prefix
+ TestCommon.lib_suffix
+ TestCommon.dll_prefix
+ TestCommon.dll_suffix
+
+"""
+
+# Copyright 2000-2010 Steven Knight
+# This module is free software, and you may redistribute it and/or modify
+# it under the same terms as Python itself, so long as this copyright message
+# and disclaimer are retained in their original form.
+#
+# IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
+# SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF
+# THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+# DAMAGE.
+#
+# THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+# PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS,
+# AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
+# SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
+
+from __future__ import print_function
+
+__author__ = "Steven Knight <knight at baldmt dot com>"
+__revision__ = "TestCommon.py 1.3.D001 2010/06/03 12:58:27 knight"
+__version__ = "1.3"
+
+import copy
+import os
+import stat
+import sys
+import glob
+
+try:
+ from collections import UserList
+except ImportError:
+ # no 'collections' module or no UserList in collections
+ exec('from UserList import UserList')
+
+from TestCmd import *
+from TestCmd import __all__
+
+__all__.extend([ 'TestCommon',
+ 'exe_suffix',
+ 'obj_suffix',
+ 'shobj_prefix',
+ 'shobj_suffix',
+ 'lib_prefix',
+ 'lib_suffix',
+ 'dll_prefix',
+ 'dll_suffix',
+ ])
+
+# Variables that describe the prefixes and suffixes on this system.
+if sys.platform == 'win32':
+ exe_suffix = '.exe'
+ obj_suffix = '.obj'
+ shobj_suffix = '.obj'
+ shobj_prefix = ''
+ lib_prefix = ''
+ lib_suffix = '.lib'
+ dll_prefix = ''
+ dll_suffix = '.dll'
+elif sys.platform == 'cygwin':
+ exe_suffix = '.exe'
+ obj_suffix = '.o'
+ shobj_suffix = '.os'
+ shobj_prefix = ''
+ lib_prefix = 'lib'
+ lib_suffix = '.a'
+ dll_prefix = 'cyg'
+ dll_suffix = '.dll'
+elif sys.platform.find('irix') != -1:
+ exe_suffix = ''
+ obj_suffix = '.o'
+ shobj_suffix = '.o'
+ shobj_prefix = ''
+ lib_prefix = 'lib'
+ lib_suffix = '.a'
+ dll_prefix = 'lib'
+ dll_suffix = '.so'
+elif sys.platform.find('darwin') != -1:
+ exe_suffix = ''
+ obj_suffix = '.o'
+ shobj_suffix = '.os'
+ shobj_prefix = ''
+ lib_prefix = 'lib'
+ lib_suffix = '.a'
+ dll_prefix = 'lib'
+ dll_suffix = '.dylib'
+elif sys.platform.find('sunos') != -1:
+ exe_suffix = ''
+ obj_suffix = '.o'
+ shobj_suffix = '.o'
+ shobj_prefix = 'so_'
+ lib_prefix = 'lib'
+ lib_suffix = '.a'
+ dll_prefix = 'lib'
+ dll_suffix = '.so'
+else:
+ exe_suffix = ''
+ obj_suffix = '.o'
+ shobj_suffix = '.os'
+ shobj_prefix = ''
+ lib_prefix = 'lib'
+ lib_suffix = '.a'
+ dll_prefix = 'lib'
+ dll_suffix = '.so'
+
+def is_List(e):
+ return isinstance(e, (list, UserList))
+
+def is_Tuple(e):
+ return isinstance(e, tuple)
+
+def is_Sequence(e):
+ return (not hasattr(e, "strip") and
+ hasattr(e, "__getitem__") or
+ hasattr(e, "__iter__"))
+
+def is_writable(f):
+ mode = os.stat(f)[stat.ST_MODE]
+ return mode & stat.S_IWUSR
+
+def separate_files(flist):
+ existing = []
+ missing = []
+ for f in flist:
+ if os.path.exists(f):
+ existing.append(f)
+ else:
+ missing.append(f)
+ return existing, missing
+
+def contains(seq, subseq, find):
+ # Returns True or False.
+ if find is None:
+ return subseq in seq
+ else:
+ f = find(seq, subseq)
+ return f not in (None, -1) and f is not False
+
+def find_index(seq, subseq, find):
+ # Returns either an index of the subseq within the seq, or None.
+ # Accepts a function find(seq, subseq), which returns an integer on success
+ # and either: None, False, or -1, on failure.
+ if find is None:
+ try:
+ return seq.index(subseq)
+ except ValueError:
+ return None
+ else:
+ i = find(seq, subseq)
+ return None if (i in (None, -1) or i is False) else i
+
+
+if os.name == 'posix':
+ def _failed(self, status = 0):
+ if self.status is None or status is None:
+ return None
+ return _status(self) != status
+ def _status(self):
+ return self.status
+elif os.name == 'nt':
+ def _failed(self, status = 0):
+ return not (self.status is None or status is None) and \
+ self.status != status
+ def _status(self):
+ return self.status
+
+class TestCommon(TestCmd):
+
+ # Additional methods from the Perl Test::Cmd::Common module
+ # that we may wish to add in the future:
+ #
+ # $test->subdir('subdir', ...);
+ #
+ # $test->copy('src_file', 'dst_file');
+
+ def __init__(self, **kw):
+ """Initialize a new TestCommon instance. This involves just
+ calling the base class initialization, and then changing directory
+ to the workdir.
+ """
+ TestCmd.__init__(self, **kw)
+ os.chdir(self.workdir)
+
+ def options_arguments(self, options, arguments):
+ """Merges the "options" keyword argument with the arguments."""
+ if options:
+ if arguments is None:
+ return options
+
+ # If not list, then split into lists
+ # this way we're not losing arguments specified with
+ # Spaces in quotes.
+ if isinstance(options, str):
+ options = options.split()
+ if isinstance(arguments, str):
+ arguments = arguments.split()
+ arguments = options + arguments
+
+ return arguments
+
+ def must_be_writable(self, *files):
+ """Ensures that the specified file(s) exist and are writable.
+ An individual file can be specified as a list of directory names,
+ in which case the pathname will be constructed by concatenating
+ them. Exits FAILED if any of the files does not exist or is
+ not writable.
+ """
+ files = [is_List(x) and os.path.join(*x) or x for x in files]
+ existing, missing = separate_files(files)
+ unwritable = [x for x in existing if not is_writable(x)]
+ if missing:
+ print("Missing files: `%s'" % "', `".join(missing))
+ if unwritable:
+ print("Unwritable files: `%s'" % "', `".join(unwritable))
+ self.fail_test(missing + unwritable)
+
+ def must_contain(self, file, required, mode='rb', find=None):
+ """Ensures specified file contains the required text.
+
+ Args:
+ file (string): name of file to search in.
+ required (string): text to search for. For the default
+ find function, type must match the return type from
+ reading the file; current implementation will convert.
+ mode (string): file open mode.
+ find (func): optional custom search routine. Must take the
+ form "find(output, line)" non-negative integer on success
+ and None, False, or -1, on failure.
+
+ Calling test exits FAILED if search result is false
+ """
+ if 'b' in mode:
+ # Python 3: reading a file in binary mode returns a
+ # bytes object. We cannot find the index of a different
+ # (str) type in that, so convert.
+ required = to_bytes(required)
+ file_contents = self.read(file, mode)
+
+ if not contains(file_contents, required, find):
+ print("File `%s' does not contain required string." % file)
+ print(self.banner('Required string '))
+ print(required)
+ print(self.banner('%s contents ' % file))
+ print(file_contents)
+ self.fail_test()
+
+ def must_contain_all(self, output, input, title=None, find=None):
+ """Ensures that the specified output string (first argument)
+ contains all of the specified input as a block (second argument).
+
+ An optional third argument can be used to describe the type
+ of output being searched, and only shows up in failure output.
+
+ An optional fourth argument can be used to supply a different
+ function, of the form "find(output, line)", to use when searching
+ for lines in the output.
+ """
+ if is_List(output):
+ output = os.newline.join(output)
+
+ if not contains(output, input, find):
+ if title is None:
+ title = 'output'
+ print('Missing expected input from {}:'.format(title))
+ print(input)
+ print(self.banner(title + ' '))
+ print(output)
+ self.fail_test()
+
+ def must_contain_all_lines(self, output, lines, title=None, find=None):
+ """Ensures that the specified output string (first argument)
+ contains all of the specified lines (second argument).
+
+ An optional third argument can be used to describe the type
+ of output being searched, and only shows up in failure output.
+
+ An optional fourth argument can be used to supply a different
+ function, of the form "find(output, line)", to use when searching
+ for lines in the output.
+ """
+ missing = []
+ if is_List(output):
+ output = '\n'.join(output)
+
+ for line in lines:
+ if not contains(output, line, find):
+ missing.append(line)
+
+ if missing:
+ if title is None:
+ title = 'output'
+ sys.stdout.write("Missing expected lines from %s:\n" % title)
+ for line in missing:
+ sys.stdout.write(' ' + repr(line) + '\n')
+ sys.stdout.write(self.banner(title + ' ') + '\n')
+ sys.stdout.write(output)
+ self.fail_test()
+
+ def must_contain_any_line(self, output, lines, title=None, find=None):
+ """Ensures that the specified output string (first argument)
+ contains at least one of the specified lines (second argument).
+
+ An optional third argument can be used to describe the type
+ of output being searched, and only shows up in failure output.
+
+ An optional fourth argument can be used to supply a different
+ function, of the form "find(output, line)", to use when searching
+ for lines in the output.
+ """
+ for line in lines:
+ if contains(output, line, find):
+ return
+
+ if title is None:
+ title = 'output'
+ sys.stdout.write("Missing any expected line from %s:\n" % title)
+ for line in lines:
+ sys.stdout.write(' ' + repr(line) + '\n')
+ sys.stdout.write(self.banner(title + ' ') + '\n')
+ sys.stdout.write(output)
+ self.fail_test()
+
+ def must_contain_exactly_lines(self, output, expect, title=None, find=None):
+ """Ensures that the specified output string (first argument)
+ contains all of the lines in the expected string (second argument)
+ with none left over.
+
+ An optional third argument can be used to describe the type
+ of output being searched, and only shows up in failure output.
+
+ An optional fourth argument can be used to supply a different
+ function, of the form "find(output, line)", to use when searching
+ for lines in the output. The function must return the index
+ of the found line in the output, or None if the line is not found.
+ """
+ out = output.splitlines()
+ if is_List(expect):
+ exp = [ e.rstrip('\n') for e in expect ]
+ else:
+ exp = expect.splitlines()
+ if sorted(out) == sorted(exp):
+ # early out for exact match
+ return
+ missing = []
+ for line in exp:
+ i = find_index(out, line, find)
+ if i is None:
+ missing.append(line)
+ else:
+ out.pop(i)
+
+ if not missing and not out:
+ # all lines were matched
+ return
+
+ if title is None:
+ title = 'output'
+ if missing:
+ sys.stdout.write("Missing expected lines from %s:\n" % title)
+ for line in missing:
+ sys.stdout.write(' ' + repr(line) + '\n')
+ sys.stdout.write(self.banner('Missing %s ' % title) + '\n')
+ if out:
+ sys.stdout.write("Extra unexpected lines from %s:\n" % title)
+ for line in out:
+ sys.stdout.write(' ' + repr(line) + '\n')
+ sys.stdout.write(self.banner('Extra %s ' % title) + '\n')
+ sys.stdout.flush()
+ self.fail_test()
+
+ def must_contain_lines(self, lines, output, title=None, find = None):
+ # Deprecated; retain for backwards compatibility.
+ return self.must_contain_all_lines(output, lines, title, find)
+
+ def must_exist(self, *files):
+ """Ensures that the specified file(s) must exist. An individual
+ file be specified as a list of directory names, in which case the
+ pathname will be constructed by concatenating them. Exits FAILED
+ if any of the files does not exist.
+ """
+ files = [is_List(x) and os.path.join(*x) or x for x in files]
+ missing = [x for x in files if not os.path.exists(x) and not os.path.islink(x) ]
+ if missing:
+ print("Missing files: `%s'" % "', `".join(missing))
+ self.fail_test(missing)
+
+ def must_exist_one_of(self, files):
+ """Ensures that at least one of the specified file(s) exists.
+ The filenames can be given as a list, where each entry may be
+ a single path string, or a tuple of folder names and the final
+ filename that get concatenated.
+ Supports wildcard names like 'foo-1.2.3-*.rpm'.
+ Exits FAILED if none of the files exists.
+ """
+ missing = []
+ for x in files:
+ if is_List(x) or is_Tuple(x):
+ xpath = os.path.join(*x)
+ else:
+ xpath = is_Sequence(x) and os.path.join(x) or x
+ if glob.glob(xpath):
+ return
+ missing.append(xpath)
+ print("Missing one of: `%s'" % "', `".join(missing))
+ self.fail_test(missing)
+
+ def must_match(self, file, expect, mode = 'rb', match=None, message=None, newline=None):
+ """Matches the contents of the specified file (first argument)
+ against the expected contents (second argument). The expected
+ contents are a list of lines or a string which will be split
+ on newlines.
+ """
+ file_contents = self.read(file, mode, newline)
+ if not match:
+ match = self.match
+ try:
+ self.fail_test(not match(to_str(file_contents), to_str(expect)), message=message)
+ except KeyboardInterrupt:
+ raise
+ except:
+ print("Unexpected contents of `%s'" % file)
+ self.diff(expect, file_contents, 'contents ')
+ raise
+
+ def must_not_contain(self, file, banned, mode = 'rb', find = None):
+ """Ensures that the specified file doesn't contain the banned text.
+ """
+ file_contents = self.read(file, mode)
+
+ if contains(file_contents, banned, find):
+ print("File `%s' contains banned string." % file)
+ print(self.banner('Banned string '))
+ print(banned)
+ print(self.banner('%s contents ' % file))
+ print(file_contents)
+ self.fail_test()
+
+ def must_not_contain_any_line(self, output, lines, title=None, find=None):
+ """Ensures that the specified output string (first argument)
+ does not contain any of the specified lines (second argument).
+
+ An optional third argument can be used to describe the type
+ of output being searched, and only shows up in failure output.
+
+ An optional fourth argument can be used to supply a different
+ function, of the form "find(output, line)", to use when searching
+ for lines in the output.
+ """
+ unexpected = []
+ for line in lines:
+ if contains(output, line, find):
+ unexpected.append(line)
+
+ if unexpected:
+ if title is None:
+ title = 'output'
+ sys.stdout.write("Unexpected lines in %s:\n" % title)
+ for line in unexpected:
+ sys.stdout.write(' ' + repr(line) + '\n')
+ sys.stdout.write(self.banner(title + ' ') + '\n')
+ sys.stdout.write(output)
+ self.fail_test()
+
+ def must_not_contain_lines(self, lines, output, title=None, find=None):
+ return self.must_not_contain_any_line(output, lines, title, find)
+
+ def must_not_exist(self, *files):
+ """Ensures that the specified file(s) must not exist.
+ An individual file be specified as a list of directory names, in
+ which case the pathname will be constructed by concatenating them.
+ Exits FAILED if any of the files exists.
+ """
+ files = [is_List(x) and os.path.join(*x) or x for x in files]
+ existing = [x for x in files if os.path.exists(x) or os.path.islink(x)]
+ if existing:
+ print("Unexpected files exist: `%s'" % "', `".join(existing))
+ self.fail_test(existing)
+
+ def must_not_exist_any_of(self, files):
+ """Ensures that none of the specified file(s) exists.
+ The filenames can be given as a list, where each entry may be
+ a single path string, or a tuple of folder names and the final
+ filename that get concatenated.
+ Supports wildcard names like 'foo-1.2.3-*.rpm'.
+ Exits FAILED if any of the files exists.
+ """
+ existing = []
+ for x in files:
+ if is_List(x) or is_Tuple(x):
+ xpath = os.path.join(*x)
+ else:
+ xpath = is_Sequence(x) and os.path.join(x) or x
+ if glob.glob(xpath):
+ existing.append(xpath)
+ if existing:
+ print("Unexpected files exist: `%s'" % "', `".join(existing))
+ self.fail_test(existing)
+
+ def must_not_be_writable(self, *files):
+ """Ensures that the specified file(s) exist and are not writable.
+ An individual file can be specified as a list of directory names,
+ in which case the pathname will be constructed by concatenating
+ them. Exits FAILED if any of the files does not exist or is
+ writable.
+ """
+ files = [is_List(x) and os.path.join(*x) or x for x in files]
+ existing, missing = separate_files(files)
+ writable = [file for file in existing if is_writable(file)]
+ if missing:
+ print("Missing files: `%s'" % "', `".join(missing))
+ if writable:
+ print("Writable files: `%s'" % "', `".join(writable))
+ self.fail_test(missing + writable)
+
+ def _complete(self, actual_stdout, expected_stdout,
+ actual_stderr, expected_stderr, status, match):
+ """
+ Post-processes running a subcommand, checking for failure
+ status and displaying output appropriately.
+ """
+ if _failed(self, status):
+ expect = ''
+ if status != 0:
+ expect = " (expected %s)" % str(status)
+ print("%s returned %s%s" % (self.program, _status(self), expect))
+ print(self.banner('STDOUT '))
+ print(actual_stdout)
+ print(self.banner('STDERR '))
+ print(actual_stderr)
+ self.fail_test()
+ if (expected_stdout is not None
+ and not match(actual_stdout, expected_stdout)):
+ self.diff(expected_stdout, actual_stdout, 'STDOUT ')
+ if actual_stderr:
+ print(self.banner('STDERR '))
+ print(actual_stderr)
+ self.fail_test()
+ if (expected_stderr is not None
+ and not match(actual_stderr, expected_stderr)):
+ print(self.banner('STDOUT '))
+ print(actual_stdout)
+ self.diff(expected_stderr, actual_stderr, 'STDERR ')
+ self.fail_test()
+
+ def start(self, program = None,
+ interpreter = None,
+ options = None,
+ arguments = None,
+ universal_newlines = None,
+ **kw):
+ """
+ Starts a program or script for the test environment, handling
+ any exceptions.
+ """
+ arguments = self.options_arguments(options, arguments)
+ try:
+ return TestCmd.start(self, program, interpreter, arguments,
+ universal_newlines, **kw)
+ except KeyboardInterrupt:
+ raise
+ except Exception as e:
+ print(self.banner('STDOUT '))
+ try:
+ print(self.stdout())
+ except IndexError:
+ pass
+ print(self.banner('STDERR '))
+ try:
+ print(self.stderr())
+ except IndexError:
+ pass
+ cmd_args = self.command_args(program, interpreter, arguments)
+ sys.stderr.write('Exception trying to execute: %s\n' % cmd_args)
+ raise e
+
+ def finish(self, popen, stdout = None, stderr = '', status = 0, **kw):
+ """
+ Finishes and waits for the process being run under control of
+ the specified popen argument. Additional arguments are similar
+ to those of the run() method:
+
+ stdout The expected standard output from
+ the command. A value of None means
+ don't test standard output.
+
+ stderr The expected error output from
+ the command. A value of None means
+ don't test error output.
+
+ status The expected exit status from the
+ command. A value of None means don't
+ test exit status.
+ """
+ TestCmd.finish(self, popen, **kw)
+ match = kw.get('match', self.match)
+ self._complete(self.stdout(), stdout,
+ self.stderr(), stderr, status, match)
+
+ def run(self, options = None, arguments = None,
+ stdout = None, stderr = '', status = 0, **kw):
+ """Runs the program under test, checking that the test succeeded.
+
+ The parameters are the same as the base TestCmd.run() method,
+ with the addition of:
+
+ options Extra options that get prepended to the beginning
+ of the arguments.
+
+ stdout The expected standard output from
+ the command. A value of None means
+ don't test standard output.
+
+ stderr The expected error output from
+ the command. A value of None means
+ don't test error output.
+
+ status The expected exit status from the
+ command. A value of None means don't
+ test exit status.
+
+ By default, this expects a successful exit (status = 0), does
+ not test standard output (stdout = None), and expects that error
+ output is empty (stderr = "").
+ """
+ kw['arguments'] = self.options_arguments(options, arguments)
+ try:
+ match = kw['match']
+ del kw['match']
+ except KeyError:
+ match = self.match
+ TestCmd.run(self, **kw)
+ self._complete(self.stdout(), stdout,
+ self.stderr(), stderr, status, match)
+
+ def skip_test(self, message="Skipping test.\n"):
+ """Skips a test.
+
+ Proper test-skipping behavior is dependent on the external
+ TESTCOMMON_PASS_SKIPS environment variable. If set, we treat
+ the skip as a PASS (exit 0), and otherwise treat it as NO RESULT.
+ In either case, we print the specified message as an indication
+ that the substance of the test was skipped.
+
+ (This was originally added to support development under Aegis.
+ Technically, skipping a test is a NO RESULT, but Aegis would
+ treat that as a test failure and prevent the change from going to
+ the next step. Since we ddn't want to force anyone using Aegis
+ to have to install absolutely every tool used by the tests, we
+ would actually report to Aegis that a skipped test has PASSED
+ so that the workflow isn't held up.)
+ """
+ if message:
+ sys.stdout.write(message)
+ sys.stdout.flush()
+ pass_skips = os.environ.get('TESTCOMMON_PASS_SKIPS')
+ if pass_skips in [None, 0, '0']:
+ # skip=1 means skip this function when showing where this
+ # result came from. They only care about the line where the
+ # script called test.skip_test(), not the line number where
+ # we call test.no_result().
+ self.no_result(skip=1)
+ else:
+ # We're under the development directory for this change,
+ # so this is an Aegis invocation; pass the test (exit 0).
+ self.pass_test()
+
+# Local Variables:
+# tab-width:4
+# indent-tabs-mode:nil
+# End:
+# vim: set expandtab tabstop=4 shiftwidth=4:
diff --git a/testing/framework/TestCommonTests.py b/testing/framework/TestCommonTests.py
new file mode 100644
index 0000000..c54f33f
--- /dev/null
+++ b/testing/framework/TestCommonTests.py
@@ -0,0 +1,2392 @@
+#!/usr/bin/env python
+"""
+TestCommonTests.py: Unit tests for the TestCommon.py module.
+
+Copyright 2000-2010 Steven Knight
+This module is free software, and you may redistribute it and/or modify
+it under the same terms as Python itself, so long as this copyright message
+and disclaimer are retained in their original form.
+
+IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
+SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF
+THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGE.
+
+THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS,
+AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
+SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
+"""
+
+__author__ = "Steven Knight <knight at baldmt dot com>"
+__revision__ = "TestCommonTests.py 1.3.D001 2010/06/03 12:58:27 knight"
+
+import difflib
+import os
+import re
+import signal
+import stat
+import sys
+import unittest
+
+# Strip the current directory so we get the right TestCommon.py module.
+sys.path = sys.path[1:]
+
+import TestCmd
+import TestCommon
+
+def lstrip(s):
+ lines = [ _.expandtabs() for _ in s.split('\n') ]
+ if lines[0] == '':
+ lines = lines[1:]
+ spaces = len(re.match('^( *).*', lines[0]).group(1))
+ if spaces:
+ lines = [ l[spaces:] for l in lines ]
+ return '\n'.join(lines)
+
+if sys.version[:3] == '1.5':
+ expected_newline = '\\012'
+else:
+ expected_newline = '\\n'
+
+def assert_display(expect, result, error=None):
+ try:
+ expect = expect.pattern
+ except AttributeError:
+ pass
+ result = [
+ '\n',
+ ('*'*80) + '\n',
+ expect,
+ ('*'*80) + '\n',
+ result,
+ ('*'*80) + '\n',
+ ]
+ if error:
+ result.append(error)
+ return ''.join(result)
+
+
+class TestCommonTestCase(unittest.TestCase):
+ """Base class for TestCommon test cases, fixture and utility methods."""
+ create_run_env = True
+
+ def setUp(self):
+ self.orig_cwd = os.getcwd()
+ if self.create_run_env:
+ self.run_env = TestCmd.TestCmd(workdir = '')
+
+ def tearDown(self):
+ os.chdir(self.orig_cwd)
+
+ def set_up_execution_scripts(self):
+ run_env = self.run_env
+
+ run_env.subdir('sub dir')
+
+ self.python = sys.executable
+
+ self.pass_script = run_env.workpath('sub dir', 'pass')
+ self.fail_script = run_env.workpath('sub dir', 'fail')
+ self.stdout_script = run_env.workpath('sub dir', 'stdout')
+ self.stderr_script = run_env.workpath('sub dir', 'stderr')
+ self.signal_script = run_env.workpath('sub dir', 'signal')
+ self.stdin_script = run_env.workpath('sub dir', 'stdin')
+
+ preamble = "import sys"
+ stdout = "; sys.stdout.write(r'%s: STDOUT: ' + repr(sys.argv[1:]) + '\\n')"
+ stderr = "; sys.stderr.write(r'%s: STDERR: ' + repr(sys.argv[1:]) + '\\n')"
+ exit0 = "; sys.exit(0)"
+ exit1 = "; sys.exit(1)"
+ if sys.platform == 'win32':
+ wrapper = '@python -c "%s" %%1 %%2 %%3 %%4 %%5 %%6 %%7 %%8 %%9\n'
+ else:
+ wrapper = '#! /usr/bin/env python\n%s\n'
+ wrapper = '#! /usr/bin/env python\n%s\n'
+
+ pass_body = preamble + stdout % self.pass_script + exit0
+ fail_body = preamble + stdout % self.fail_script + exit1
+ stderr_body = preamble + stderr % self.stderr_script + exit0
+
+ run_env.write(self.pass_script, wrapper % pass_body)
+ run_env.write(self.fail_script, wrapper % fail_body)
+ run_env.write(self.stderr_script, wrapper % stderr_body)
+
+ signal_body = lstrip("""\
+ import os
+ import signal
+ os.kill(os.getpid(), signal.SIGTERM)
+ """)
+
+ run_env.write(self.signal_script, wrapper % signal_body)
+
+ stdin_body = lstrip("""\
+ import sys
+ input = sys.stdin.read()[:-1]
+ sys.stdout.write(r'%s: STDOUT: ' + repr(input) + '\\n')
+ sys.stderr.write(r'%s: STDERR: ' + repr(input) + '\\n')
+ """ % (self.stdin_script, self.stdin_script))
+
+ run_env.write(self.stdin_script, wrapper % stdin_body)
+
+ def run_execution_test(self, script, expect_stdout, expect_stderr):
+ self.set_up_execution_scripts()
+
+ run_env = self.run_env
+
+ os.chdir(run_env.workpath('sub dir'))
+
+ # Everything before this prepared our "source directory."
+ # Now do the real test.
+ script = script % self.__dict__
+ run_env.run(program=sys.executable, stdin=script)
+
+ stdout = run_env.stdout()
+ stderr = run_env.stderr()
+
+ expect_stdout = expect_stdout % self.__dict__
+ assert stdout == expect_stdout, assert_display(expect_stdout,
+ stdout,
+ stderr)
+
+ try:
+ match = expect_stderr.match
+ except AttributeError:
+ expect_stderr = expect_stderr % self.__dict__
+ assert stderr == expect_stderr, assert_display(expect_stderr,
+ stderr)
+ else:
+ assert expect_stderr.match(stderr), assert_display(expect_stderr,
+ stderr)
+
+
+class __init__TestCase(TestCommonTestCase):
+ def test___init__(self):
+ """Test initialization"""
+ run_env = self.run_env
+
+ os.chdir(run_env.workdir)
+ script = lstrip("""\
+ from __future__ import print_function
+ from TestCommon import TestCommon
+ tc = TestCommon(workdir='')
+ import os
+ print(os.getcwd())
+ """)
+ run_env.run(program=sys.executable, stdin=script)
+ stdout = run_env.stdout()[:-1]
+ assert stdout != run_env.workdir, stdout
+ stderr = run_env.stderr()
+ assert stderr == "", stderr
+
+
+class banner_TestCase(TestCommonTestCase):
+ create_run_env = False
+ def test_banner(self):
+ """Test banner()"""
+ tc = TestCommon.TestCommon(workdir='')
+
+ b = tc.banner('xyzzy ')
+ assert b == "xyzzy ==========================================================================", b
+
+ tc.banner_width = 10
+
+ b = tc.banner('xyzzy ')
+ assert b == "xyzzy ====", b
+
+ b = tc.banner('xyzzy ', 20)
+ assert b == "xyzzy ==============", b
+
+ tc.banner_char = '-'
+
+ b = tc.banner('xyzzy ')
+ assert b == "xyzzy ----", b
+
+class must_be_writable_TestCase(TestCommonTestCase):
+ def test_file_does_not_exists(self):
+ """Test must_be_writable(): file does not exist"""
+ run_env = self.run_env
+
+ script = lstrip("""\
+ from TestCommon import TestCommon
+ tc = TestCommon(workdir='')
+ tc.must_be_writable('file1')
+ tc.pass_test()
+ """)
+ run_env.run(program=sys.executable, stdin=script)
+ stdout = run_env.stdout()
+ assert stdout == "Missing files: `file1'\n", stdout
+ stderr = run_env.stderr()
+ assert stderr.find("FAILED") != -1, stderr
+
+ def test_writable_file_exists(self):
+ """Test must_be_writable(): writable file exists"""
+ run_env = self.run_env
+
+ script = lstrip("""\
+ import os
+ import stat
+ from TestCommon import TestCommon
+ tc = TestCommon(workdir='')
+ tc.write('file1', "file1\\n")
+ f1 = tc.workpath('file1')
+ mode = os.stat(f1)[stat.ST_MODE]
+ os.chmod(f1, mode | stat.S_IWUSR)
+ tc.must_be_writable('file1')
+ tc.pass_test()
+ """)
+ run_env.run(program=sys.executable, stdin=script)
+ stdout = run_env.stdout()
+ assert stdout == "", stdout
+ stderr = run_env.stderr()
+ assert stderr == "PASSED\n", stderr
+
+ def test_non_writable_file_exists(self):
+ """Test must_be_writable(): non-writable file exists"""
+ run_env = self.run_env
+
+ script = lstrip("""\
+ import os
+ import stat
+ from TestCommon import TestCommon
+ tc = TestCommon(workdir='')
+ tc.write('file1', "file1\\n")
+ f1 = tc.workpath('file1')
+ mode = os.stat(f1)[stat.ST_MODE]
+ os.chmod(f1, mode & ~stat.S_IWUSR)
+ tc.must_be_writable('file1')
+ tc.pass_test()
+ """)
+ run_env.run(program=sys.executable, stdin=script)
+ stdout = run_env.stdout()
+ assert stdout == "Unwritable files: `file1'\n", stdout
+ stderr = run_env.stderr()
+ assert stderr.find("FAILED") != -1, stderr
+
+ def test_file_specified_as_list(self):
+ """Test must_be_writable(): file specified as list"""
+ run_env = self.run_env
+
+ script = lstrip("""\
+ import os
+ import stat
+ from TestCommon import TestCommon
+ tc = TestCommon(workdir='')
+ tc.subdir('sub')
+ tc.write(['sub', 'file1'], "sub/file1\\n")
+ f1 = tc.workpath('sub', 'file1')
+ mode = os.stat(f1)[stat.ST_MODE]
+ os.chmod(f1, mode | stat.S_IWUSR)
+ tc.must_be_writable(['sub', 'file1'])
+ tc.pass_test()
+ """)
+ run_env.run(program=sys.executable, stdin=script)
+ stdout = run_env.stdout()
+ assert stdout == "", stdout
+ stderr = run_env.stderr()
+ assert stderr == "PASSED\n", stderr
+
+
+class must_contain_TestCase(TestCommonTestCase):
+ def test_success(self):
+ """Test must_contain(): success"""
+ run_env = self.run_env
+
+ script = lstrip("""\
+ from TestCommon import TestCommon
+ tc = TestCommon(workdir='')
+ tc.write('file1', "file1 contents\\n")
+ tc.must_contain('file1', "1 c")
+ tc.pass_test()
+ """)
+ run_env.run(program=sys.executable, stdin=script)
+ stdout = run_env.stdout()
+ assert stdout == "", stdout
+ stderr = run_env.stderr()
+ assert stderr == "PASSED\n", stderr
+
+ def test_success_index_0(self):
+ """Test must_contain(): success at index 0"""
+ run_env = self.run_env
+
+ script = lstrip("""\
+ from TestCommon import TestCommon
+ tc = TestCommon(workdir='')
+ tc.write('file1', "file1 contents\\n")
+ tc.must_contain('file1', "file1 c")
+ tc.pass_test()
+ """)
+ run_env.run(program=sys.executable, stdin=script)
+ stdout = run_env.stdout()
+ assert stdout == "", stdout
+ stderr = run_env.stderr()
+ assert stderr == "PASSED\n", stderr
+
+ def test_file_missing(self):
+ """Test must_contain(): file missing"""
+ run_env = self.run_env
+
+ script = lstrip("""\
+ from TestCommon import TestCommon
+ tc = TestCommon(workdir='')
+ tc.must_contain('file1', "1 c\\n")
+ tc.pass_test()
+ """)
+ run_env.run(program=sys.executable, stdin=script)
+ stdout = run_env.stdout()
+ assert stdout == "", stdout
+ stderr = run_env.stderr()
+ assert stderr.find("No such file or directory:") != -1, stderr
+
+ def test_failure(self):
+ """Test must_contain(): failure"""
+ run_env = self.run_env
+
+ script = lstrip("""\
+ from TestCommon import TestCommon
+ tc = TestCommon(workdir='')
+ tc.write('file1', "file1 does not match\\n")
+ tc.must_contain('file1', "1 c")
+ tc.run()
+ """)
+ expect = lstrip("""\
+ File `file1' does not contain required string.
+ Required string ================================================================
+ 1 c
+ file1 contents =================================================================
+ file1 does not match
+
+ """)
+ run_env.run(program=sys.executable, stdin=script)
+ stdout = run_env.stdout()
+ assert stdout == expect, repr(stdout)
+ stderr = run_env.stderr()
+ assert stderr.find("FAILED") != -1, stderr
+
+ def test_mode(self):
+ """Test must_contain(): mode"""
+ run_env = self.run_env
+
+ script = lstrip("""\
+ from TestCommon import TestCommon
+ tc = TestCommon(workdir='')
+ tc.write('file1', "file1 contents\\n", mode='w')
+ tc.must_contain('file1', "1 c", mode='r')
+ tc.write('file2', "file2 contents\\n", mode='wb')
+ tc.must_contain('file2', "2 c", mode='rb')
+ tc.pass_test()
+ """)
+ run_env.run(program=sys.executable, stdin=script)
+ stdout = run_env.stdout()
+ assert stdout == "", stdout
+ stderr = run_env.stderr()
+ assert stderr == "PASSED\n", stderr
+
+
+
+class must_contain_all_lines_TestCase(TestCommonTestCase):
+ def test_success(self):
+ """Test must_contain_all_lines(): success"""
+ run_env = self.run_env
+
+ script = lstrip("""
+ import TestCommon
+ test = TestCommon.TestCommon(workdir='')
+
+ lines = [
+ 'xxx\\n',
+ 'yyy\\n',
+ ]
+
+ output = '''\\
+ www
+ xxx
+ yyy
+ zzz
+ '''
+
+ test.must_contain_all_lines(output, lines)
+
+ test.must_contain_all_lines(output, ['www\\n'])
+
+ test.pass_test()
+ """)
+
+ run_env.run(program=sys.executable, stdin=script)
+ stdout = run_env.stdout()
+ assert stdout == "", stdout
+ stderr = run_env.stderr()
+ assert stderr == "PASSED\n", stderr
+
+ def test_failure(self):
+ """Test must_contain_all_lines(): failure"""
+ run_env = self.run_env
+
+ script = lstrip("""
+ import TestCommon
+ test = TestCommon.TestCommon(workdir='')
+
+ lines = [
+ 'xxx\\n',
+ 'yyy\\n',
+ ]
+
+ output = '''\\
+ www
+ zzz
+ '''
+
+ test.must_contain_all_lines(output, lines)
+
+ test.pass_test()
+ """)
+
+ expect = lstrip("""\
+ Missing expected lines from output:
+ 'xxx%(expected_newline)s'
+ 'yyy%(expected_newline)s'
+ output =========================================================================
+ www
+ zzz
+ """ % globals())
+
+ run_env.run(program=sys.executable, stdin=script)
+ stdout = run_env.stdout()
+ stderr = run_env.stderr()
+ assert stdout == expect, assert_display(expect, stdout, stderr)
+ assert stderr.find("FAILED") != -1, stderr
+
+ def test_find(self):
+ """Test must_contain_all_lines(): find"""
+ run_env = self.run_env
+
+ script = lstrip("""
+ import re
+ import TestCommon
+ test = TestCommon.TestCommon(workdir='')
+
+ lines = [
+ 'x.*',
+ '.*y',
+ ]
+
+ output = '''\\
+ www
+ xxx
+ yyy
+ zzz
+ '''
+
+ def re_search(output, line):
+ return re.compile(line, re.S).search(output)
+ test.must_contain_all_lines(output, lines, find=re_search)
+
+ test.pass_test()
+ """)
+
+ run_env.run(program=sys.executable, stdin=script)
+ stdout = run_env.stdout()
+ assert stdout == "", stdout
+ stderr = run_env.stderr()
+ assert stderr == "PASSED\n", stderr
+
+ def test_title(self):
+ """Test must_contain_all_lines(): title"""
+ run_env = self.run_env
+
+ script = lstrip("""
+ import TestCommon
+ test = TestCommon.TestCommon(workdir='')
+
+ lines = [
+ 'xxx\\n',
+ 'yyy\\n',
+ ]
+
+ output = '''\\
+ www
+ zzz
+ '''
+
+ test.must_contain_all_lines(output, lines, title='STDERR')
+
+ test.pass_test()
+ """)
+
+ expect = lstrip("""\
+ Missing expected lines from STDERR:
+ 'xxx%(expected_newline)s'
+ 'yyy%(expected_newline)s'
+ STDERR =========================================================================
+ www
+ zzz
+ """ % globals())
+
+ run_env.run(program=sys.executable, stdin=script)
+ stdout = run_env.stdout()
+ stderr = run_env.stderr()
+ assert stdout == expect, assert_display(expect, stdout, stderr)
+ assert stderr.find("FAILED") != -1, stderr
+
+
+
+class must_contain_any_line_TestCase(TestCommonTestCase):
+ def test_success(self):
+ """Test must_contain_any_line(): success"""
+ run_env = self.run_env
+
+ script = lstrip("""
+ import TestCommon
+ test = TestCommon.TestCommon(workdir='')
+
+ lines = [
+ 'aaa\\n',
+ 'yyy\\n',
+ ]
+
+ output = '''\\
+ www
+ xxx
+ yyy
+ zzz
+ '''
+
+ test.must_contain_any_line(output, lines)
+
+ test.must_contain_any_line(output, ['www\\n'])
+
+ test.pass_test()
+ """)
+
+ run_env.run(program=sys.executable, stdin=script)
+ stdout = run_env.stdout()
+ assert stdout == "", stdout
+ stderr = run_env.stderr()
+ assert stderr == "PASSED\n", stderr
+
+ def test_failure(self):
+ """Test must_contain_any_line(): failure"""
+ run_env = self.run_env
+
+ script = lstrip("""
+ import TestCommon
+ test = TestCommon.TestCommon(workdir='')
+
+ lines = [
+ 'xxx\\n',
+ 'yyy\\n',
+ ]
+
+ output = '''\\
+ www
+ zzz
+ '''
+
+ test.must_contain_any_line(output, lines)
+
+ test.pass_test()
+ """)
+
+ expect = lstrip("""\
+ Missing any expected line from output:
+ 'xxx%(expected_newline)s'
+ 'yyy%(expected_newline)s'
+ output =========================================================================
+ www
+ zzz
+ """ % globals())
+
+ run_env.run(program=sys.executable, stdin=script)
+ stdout = run_env.stdout()
+ stderr = run_env.stderr()
+ assert stdout == expect, assert_display(expect, stdout, stderr)
+ assert stderr.find("FAILED") != -1, stderr
+
+ def test_find(self):
+ """Test must_contain_any_line(): find"""
+ run_env = self.run_env
+
+ script = lstrip("""
+ import re
+ import TestCommon
+ test = TestCommon.TestCommon(workdir='')
+
+ lines = [
+ 'aaa',
+ '.*y',
+ ]
+
+ output = '''\\
+ www
+ xxx
+ yyy
+ zzz
+ '''
+
+ def re_search(output, line):
+ return re.compile(line, re.S).search(output)
+ test.must_contain_any_line(output, lines, find=re_search)
+
+ test.pass_test()
+ """)
+
+ run_env.run(program=sys.executable, stdin=script)
+ stdout = run_env.stdout()
+ assert stdout == "", stdout
+ stderr = run_env.stderr()
+ assert stderr == "PASSED\n", stderr
+
+ def test_title(self):
+ """Test must_contain_any_line(): title"""
+ run_env = self.run_env
+
+ script = lstrip("""
+ import TestCommon
+ test = TestCommon.TestCommon(workdir='')
+
+ lines = [
+ 'xxx\\n',
+ 'yyy\\n',
+ ]
+
+ output = '''\\
+ www
+ zzz
+ '''
+
+ test.must_contain_any_line(output, lines, title='STDOUT')
+
+ test.pass_test()
+ """)
+
+ expect = lstrip("""\
+ Missing any expected line from STDOUT:
+ 'xxx%(expected_newline)s'
+ 'yyy%(expected_newline)s'
+ STDOUT =========================================================================
+ www
+ zzz
+ """ % globals())
+
+ run_env.run(program=sys.executable, stdin=script)
+ stdout = run_env.stdout()
+ stderr = run_env.stderr()
+ assert stdout == expect, assert_display(expect, stdout, stderr)
+ assert stderr.find("FAILED") != -1, stderr
+
+
+
+class must_contain_exactly_lines_TestCase(TestCommonTestCase):
+ def test_success_list(self):
+ """Test must_contain_exactly_lines(): success (input list)"""
+ run_env = self.run_env
+
+ script = lstrip("""
+ import TestCommon
+ test = TestCommon.TestCommon(workdir='')
+
+ lines = [
+ 'yyy\\n',
+ 'xxx\\n',
+ 'zzz',
+ 'www\\n',
+ ]
+
+ output = '''\\
+ www
+ xxx
+ yyy
+ zzz
+ '''
+
+ test.must_contain_exactly_lines(output, lines)
+
+ test.pass_test()
+ """)
+
+ run_env.run(program=sys.executable, stdin=script)
+ stdout = run_env.stdout()
+ assert stdout == "", stdout
+ stderr = run_env.stderr()
+ assert stderr == "PASSED\n", stderr
+
+ def test_success_string(self):
+ """Test must_contain_exactly_lines(): success (input string)"""
+ run_env = self.run_env
+
+ script = lstrip("""
+ import TestCommon
+ test = TestCommon.TestCommon(workdir='')
+
+ lines = '''\\
+ yyy
+ xxx
+ zzz
+ www
+ '''
+
+ output = '''\\
+ www
+ xxx
+ yyy
+ zzz
+ '''
+
+ test.must_contain_exactly_lines(output, lines)
+
+ test.pass_test()
+ """)
+
+ run_env.run(program=sys.executable, stdin=script)
+ stdout = run_env.stdout()
+ assert stdout == "", stdout
+ stderr = run_env.stderr()
+ assert stderr == "PASSED\n", stderr
+
+ def test_failure(self):
+ """Test must_contain_exactly_lines(): failure"""
+ run_env = self.run_env
+
+ script = lstrip("""
+ import TestCommon
+ test = TestCommon.TestCommon(workdir='')
+
+ lines = [
+ 'xxx\\n',
+ 'yyy\\n',
+ ]
+
+ output = '''\\
+ www
+ zzz
+ '''
+
+ test.must_contain_exactly_lines(output, lines)
+
+ test.pass_test()
+ """)
+
+ expect = lstrip("""\
+ Missing expected lines from output:
+ 'xxx'
+ 'yyy'
+ Missing output =================================================================
+ Extra unexpected lines from output:
+ 'www'
+ 'zzz'
+ Extra output ===================================================================
+ """ % globals())
+
+ run_env.run(program=sys.executable, stdin=script)
+ stdout = run_env.stdout()
+ stderr = run_env.stderr()
+ assert stdout == expect, assert_display(expect, stdout, stderr)
+ assert stderr.find("FAILED") != -1, stderr
+
+ def test_find(self):
+ """Test must_contain_exactly_lines(): find"""
+ run_env = self.run_env
+
+ script = lstrip("""
+ import re
+ import TestCommon
+ test = TestCommon.TestCommon(workdir='')
+
+ lines = [
+ 'zzz',
+ '.*y',
+ 'xxx',
+ 'www',
+ ]
+
+ output = '''\\\
+ www
+ xxx
+ yyy
+ zzz
+ '''
+
+ def re_search(output, line):
+ pattern = re.compile(line, re.S)
+ index = 0
+ for o in output:
+ if pattern.search(o):
+ return index
+ index +=1
+ return None
+ test.must_contain_exactly_lines(output, lines, find=re_search)
+
+ test.pass_test()
+ """)
+
+ run_env.run(program=sys.executable, stdin=script)
+ stdout = run_env.stdout()
+ assert stdout == "", stdout
+ stderr = run_env.stderr()
+ assert stderr == "PASSED\n", stderr
+
+ def test_title(self):
+ """Test must_contain_exactly_lines(): title"""
+ run_env = self.run_env
+
+ script = lstrip("""
+ import TestCommon
+ test = TestCommon.TestCommon(workdir='')
+
+ lines = [
+ 'xxx\\n',
+ 'yyy\\n',
+ ]
+
+ output = '''\\
+ www
+ zzz
+ '''
+
+ test.must_contain_exactly_lines(output, lines, title='STDOUT')
+
+ test.pass_test()
+ """)
+
+ expect = lstrip("""\
+ Missing expected lines from STDOUT:
+ 'xxx'
+ 'yyy'
+ Missing STDOUT =================================================================
+ Extra unexpected lines from STDOUT:
+ 'www'
+ 'zzz'
+ Extra STDOUT ===================================================================
+ """ % globals())
+
+ run_env.run(program=sys.executable, stdin=script)
+ stdout = run_env.stdout()
+ stderr = run_env.stderr()
+ assert stdout == expect, assert_display(expect, stdout, stderr)
+ assert stderr.find("FAILED") != -1, stderr
+
+
+
+class must_contain_lines_TestCase(TestCommonTestCase):
+ def test_success(self):
+ """Test must_contain_lines(): success"""
+ run_env = self.run_env
+
+ script = lstrip("""
+ import TestCommon
+ test = TestCommon.TestCommon(workdir='')
+
+ lines = [
+ 'xxx\\n',
+ 'yyy\\n',
+ ]
+
+ output = '''\\
+ www
+ xxx
+ yyy
+ zzz
+ '''
+
+ test.must_contain_lines(lines, output)
+
+ test.pass_test()
+ """)
+
+ run_env.run(program=sys.executable, stdin=script)
+ stdout = run_env.stdout()
+ assert stdout == "", stdout
+ stderr = run_env.stderr()
+ assert stderr == "PASSED\n", stderr
+
+ def test_failure(self):
+ """Test must_contain_lines(): failure"""
+ run_env = self.run_env
+
+ script = lstrip("""
+ import TestCommon
+ test = TestCommon.TestCommon(workdir='')
+
+ lines = [
+ 'xxx\\n',
+ 'yyy\\n',
+ ]
+
+ output = '''\\
+ www
+ zzz
+ '''
+
+ test.must_contain_lines(lines, output)
+
+ test.pass_test()
+ """)
+
+ expect = lstrip("""\
+ Missing expected lines from output:
+ 'xxx%(expected_newline)s'
+ 'yyy%(expected_newline)s'
+ output =========================================================================
+ www
+ zzz
+ """ % globals())
+
+ run_env.run(program=sys.executable, stdin=script)
+ stdout = run_env.stdout()
+ stderr = run_env.stderr()
+ assert stdout == expect, assert_display(expect, stdout, stderr)
+ assert stderr.find("FAILED") != -1, stderr
+
+
+
+class must_exist_TestCase(TestCommonTestCase):
+ def test_success(self):
+ """Test must_exist(): success"""
+ run_env = self.run_env
+
+ script = lstrip("""\
+ from TestCommon import TestCommon
+ tc = TestCommon(workdir='')
+ tc.write('file1', "file1\\n")
+ tc.must_exist('file1')
+ tc.pass_test()
+ """)
+ run_env.run(program=sys.executable, stdin=script)
+ stdout = run_env.stdout()
+ assert stdout == "", stdout
+ stderr = run_env.stderr()
+ assert stderr == "PASSED\n", stderr
+
+ def test_failure(self):
+ """Test must_exist(): failure"""
+ run_env = self.run_env
+
+ script = lstrip("""\
+ from TestCommon import TestCommon
+ tc = TestCommon(workdir='')
+ tc.must_exist('file1')
+ tc.pass_test()
+ """)
+ run_env.run(program=sys.executable, stdin=script)
+ stdout = run_env.stdout()
+ assert stdout == "Missing files: `file1'\n", stdout
+ stderr = run_env.stderr()
+ assert stderr.find("FAILED") != -1, stderr
+
+ def test_file_specified_as_list(self):
+ """Test must_exist(): file specified as list"""
+ run_env = self.run_env
+
+ script = lstrip("""\
+ from TestCommon import TestCommon
+ tc = TestCommon(workdir='')
+ tc.subdir('sub')
+ tc.write(['sub', 'file1'], "sub/file1\\n")
+ tc.must_exist(['sub', 'file1'])
+ tc.pass_test()
+ """)
+ run_env.run(program=sys.executable, stdin=script)
+ stdout = run_env.stdout()
+ assert stdout == "", stdout
+ stderr = run_env.stderr()
+ assert stderr == "PASSED\n", stderr
+
+ def test_broken_link(self) :
+ """Test must_exist(): exists but it is a broken link"""
+ run_env = self.run_env
+
+ script = lstrip("""\
+ from TestCommon import TestCommon
+ tc = TestCommon(workdir='')
+ tc.symlink('badtarget', "brokenlink")
+ tc.must_exist('brokenlink')
+ tc.pass_test()
+ """)
+ run_env.run(program=sys.executable, stdin=script)
+ stdout = run_env.stdout()
+ assert stdout == "", stdout
+ stderr = run_env.stderr()
+ assert stderr == "PASSED\n", stderr
+
+class must_exist_one_of_TestCase(TestCommonTestCase):
+ def test_success(self):
+ """Test must_exist_one_of(): success"""
+ run_env = self.run_env
+
+ script = lstrip("""\
+ from TestCommon import TestCommon
+ tc = TestCommon(workdir='')
+ tc.write('file1', "file1\\n")
+ tc.must_exist_one_of(['file1'])
+ tc.pass_test()
+ """)
+ run_env.run(program=sys.executable, stdin=script)
+ stdout = run_env.stdout()
+ assert stdout == "", stdout
+ stderr = run_env.stderr()
+ assert stderr == "PASSED\n", stderr
+
+ def test_failure(self):
+ """Test must_exist_one_of(): failure"""
+ run_env = self.run_env
+
+ script = lstrip("""\
+ from TestCommon import TestCommon
+ tc = TestCommon(workdir='')
+ tc.must_exist_one_of(['file1'])
+ tc.pass_test()
+ """)
+ run_env.run(program=sys.executable, stdin=script)
+ stdout = run_env.stdout()
+ assert stdout == "Missing one of: `file1'\n", stdout
+ stderr = run_env.stderr()
+ assert stderr.find("FAILED") != -1, stderr
+
+ def test_files_specified_as_list(self):
+ """Test must_exist_one_of(): files specified as list"""
+ run_env = self.run_env
+
+ script = lstrip("""\
+ from TestCommon import TestCommon
+ tc = TestCommon(workdir='')
+ tc.write('file1', "file1\\n")
+ tc.must_exist_one_of(['file2', 'file1'])
+ tc.pass_test()
+ """)
+ run_env.run(program=sys.executable, stdin=script)
+ stdout = run_env.stdout()
+ assert stdout == "", stdout
+ stderr = run_env.stderr()
+ assert stderr == "PASSED\n", stderr
+
+ def test_files_specified_with_wildcards(self):
+ """Test must_exist_one_of(): files specified with wildcards"""
+ run_env = self.run_env
+
+ script = lstrip("""\
+ from TestCommon import TestCommon
+ tc = TestCommon(workdir='')
+ tc.write('file7', "file7\\n")
+ tc.must_exist_one_of(['file?'])
+ tc.pass_test()
+ """)
+ run_env.run(program=sys.executable, stdin=script)
+ stdout = run_env.stdout()
+ assert stdout == "", stdout
+ stderr = run_env.stderr()
+ assert stderr == "PASSED\n", stderr
+
+ def test_file_given_as_list(self):
+ """Test must_exist_one_of(): file given as list"""
+ run_env = self.run_env
+
+ script = lstrip("""\
+ from TestCommon import TestCommon
+ tc = TestCommon(workdir='')
+ tc.subdir('sub')
+ tc.write(['sub', 'file1'], "sub/file1\\n")
+ tc.must_exist_one_of(['file2',
+ ['sub', 'file1']])
+ tc.pass_test()
+ """)
+ run_env.run(program=sys.executable, stdin=script)
+ stdout = run_env.stdout()
+ assert stdout == "", stdout
+ stderr = run_env.stderr()
+ assert stderr == "PASSED\n", stderr
+
+ def test_file_given_as_sequence(self):
+ """Test must_exist_one_of(): file given as sequence"""
+ run_env = self.run_env
+
+ script = lstrip("""\
+ from TestCommon import TestCommon
+ tc = TestCommon(workdir='')
+ tc.subdir('sub')
+ tc.write(['sub', 'file1'], "sub/file1\\n")
+ tc.must_exist_one_of(['file2',
+ ('sub', 'file1')])
+ tc.pass_test()
+ """)
+ run_env.run(program=sys.executable, stdin=script)
+ stdout = run_env.stdout()
+ assert stdout == "", stdout
+ stderr = run_env.stderr()
+ assert stderr == "PASSED\n", stderr
+
+class must_match_TestCase(TestCommonTestCase):
+ def test_success(self):
+ """Test must_match(): success"""
+ run_env = self.run_env
+
+ script = lstrip("""\
+ from TestCommon import TestCommon
+ tc = TestCommon(workdir='')
+ tc.write('file1', "file1\\n")
+ tc.must_match('file1', "file1\\n")
+ tc.pass_test()
+ """)
+ run_env.run(program=sys.executable, stdin=script)
+ stdout = run_env.stdout()
+ assert stdout == "", stdout
+ stderr = run_env.stderr()
+ assert stderr == "PASSED\n", stderr
+
+ def test_file_does_not_exists(self):
+ """Test must_match(): file does not exist"""
+ run_env = self.run_env
+
+ script = lstrip("""\
+ from TestCommon import TestCommon
+ tc = TestCommon(workdir='')
+ tc.must_match('file1', "file1\\n")
+ tc.pass_test()
+ """)
+ run_env.run(program=sys.executable, stdin=script)
+ stdout = run_env.stdout()
+ assert stdout == "", stdout
+ stderr = run_env.stderr()
+ assert stderr.find("No such file or directory:") != -1, stderr
+
+ def test_failure(self):
+ """Test must_match(): failure"""
+ run_env = self.run_env
+
+ script = lstrip("""\
+ from TestCommon import TestCommon
+ tc = TestCommon(workdir='')
+ tc.write('file1', "file1 does not match\\n")
+ tc.must_match('file1', "file1\\n")
+ tc.run()
+ """)
+
+ expect = lstrip("""\
+ match_re: mismatch at line 0:
+ search re='^file1$'
+ line='file1 does not match'
+ Unexpected contents of `file1'
+ contents =======================================================================
+ 1c1
+ < file1
+ ---
+ > file1 does not match
+ """)
+ run_env.run(program=sys.executable, stdin=script)
+ stdout = run_env.stdout()
+ assert stdout == expect, stdout
+ stderr = run_env.stderr()
+ assert stderr.find("FAILED") != -1, stderr
+
+ def test_mode(self):
+ """Test must_match(): mode"""
+ run_env = self.run_env
+
+ script = lstrip("""\
+ from TestCommon import TestCommon
+ tc = TestCommon(workdir='')
+ tc.write('file1', "file1\\n", mode='w')
+ tc.must_match('file1', "file1\\n", mode='r')
+ tc.write('file2', "file2\\n", mode='wb')
+ tc.must_match('file2', "file2\\n", mode='rb')
+ tc.pass_test()
+ """)
+ run_env.run(program=sys.executable, stdin=script)
+ stdout = run_env.stdout()
+ assert stdout == "", stdout
+ stderr = run_env.stderr()
+ assert stderr == "PASSED\n", stderr
+
+
+
+class must_not_be_writable_TestCase(TestCommonTestCase):
+ def test_file_does_not_exists(self):
+ """Test must_not_be_writable(): file does not exist"""
+ run_env = self.run_env
+
+ script = lstrip("""\
+ from TestCommon import TestCommon
+ tc = TestCommon(workdir='')
+ tc.must_not_be_writable('file1')
+ tc.pass_test()
+ """)
+ run_env.run(program=sys.executable, stdin=script)
+ stdout = run_env.stdout()
+ assert stdout == "Missing files: `file1'\n", stdout
+ stderr = run_env.stderr()
+ assert stderr.find("FAILED") != -1, stderr
+
+ def test_writable_file_exists(self):
+ """Test must_not_be_writable(): writable file exists"""
+ run_env = self.run_env
+
+ script = lstrip("""\
+ import os
+ import stat
+ from TestCommon import TestCommon
+ tc = TestCommon(workdir='')
+ tc.write('file1', "file1\\n")
+ f1 = tc.workpath('file1')
+ mode = os.stat(f1)[stat.ST_MODE]
+ os.chmod(f1, mode | stat.S_IWUSR)
+ tc.must_not_be_writable('file1')
+ tc.pass_test()
+ """)
+ run_env.run(program=sys.executable, stdin=script)
+ stdout = run_env.stdout()
+ assert stdout == "Writable files: `file1'\n", stdout
+ stderr = run_env.stderr()
+ assert stderr.find("FAILED") != -1, stderr
+
+ def test_non_writable_file_exists(self):
+ """Test must_not_be_writable(): non-writable file exists"""
+ run_env = self.run_env
+
+ script = lstrip("""\
+ import os
+ import stat
+ from TestCommon import TestCommon
+ tc = TestCommon(workdir='')
+ tc.write('file1', "file1\\n")
+ f1 = tc.workpath('file1')
+ mode = os.stat(f1)[stat.ST_MODE]
+ os.chmod(f1, mode & ~stat.S_IWUSR)
+ tc.must_not_be_writable('file1')
+ tc.pass_test()
+ """)
+ run_env.run(program=sys.executable, stdin=script)
+ stdout = run_env.stdout()
+ assert stdout == "", stdout
+ stderr = run_env.stderr()
+ assert stderr == "PASSED\n", stderr
+
+ def test_file_specified_as_list(self):
+ """Test must_not_be_writable(): file specified as list"""
+ run_env = self.run_env
+
+ script = lstrip("""\
+ import os
+ import stat
+ from TestCommon import TestCommon
+ tc = TestCommon(workdir='')
+ tc.subdir('sub')
+ tc.write(['sub', 'file1'], "sub/file1\\n")
+ f1 = tc.workpath('sub', 'file1')
+ mode = os.stat(f1)[stat.ST_MODE]
+ os.chmod(f1, mode & ~stat.S_IWUSR)
+ tc.must_not_be_writable(['sub', 'file1'])
+ tc.pass_test()
+ """)
+ run_env.run(program=sys.executable, stdin=script)
+ stdout = run_env.stdout()
+ assert stdout == "", stdout
+ stderr = run_env.stderr()
+ assert stderr == "PASSED\n", stderr
+
+
+
+class must_not_contain_TestCase(TestCommonTestCase):
+ def test_success(self):
+ """Test must_not_contain(): success"""
+ run_env = self.run_env
+
+ script = lstrip("""\
+ from TestCommon import TestCommon
+ tc = TestCommon(workdir='')
+ tc.write('file1', "file1 contents\\n")
+ tc.must_not_contain('file1', "1 does not contain c")
+ tc.pass_test()
+ """)
+ run_env.run(program=sys.executable, stdin=script)
+ stdout = run_env.stdout()
+ assert stdout == "", stdout
+ stderr = run_env.stderr()
+ assert stderr == "PASSED\n", stderr
+
+ def test_file_does_not_exist(self):
+ """Test must_not_contain(): file does not exist"""
+ run_env = self.run_env
+
+ script = lstrip("""\
+ from TestCommon import TestCommon
+ tc = TestCommon(workdir='')
+ tc.must_not_contain('file1', "1 c\\n")
+ tc.pass_test()
+ """)
+ run_env.run(program=sys.executable, stdin=script)
+ stdout = run_env.stdout()
+ assert stdout == "", stdout
+ stderr = run_env.stderr()
+ assert stderr.find("No such file or directory:") != -1, stderr
+
+ def test_failure(self):
+ """Test must_not_contain(): failure"""
+ run_env = self.run_env
+
+ script = lstrip("""\
+ from TestCommon import TestCommon
+ tc = TestCommon(workdir='')
+ tc.write('file1', "file1 does contain contents\\n")
+ tc.must_not_contain('file1', "1 does contain c")
+ tc.run()
+ """)
+ expect = lstrip("""\
+ File `file1' contains banned string.
+ Banned string ==================================================================
+ 1 does contain c
+ file1 contents =================================================================
+ file1 does contain contents
+
+ """)
+ run_env.run(program=sys.executable, stdin=script)
+ stdout = run_env.stdout()
+ assert stdout == expect, repr(stdout)
+ stderr = run_env.stderr()
+ assert stderr.find("FAILED") != -1, stderr
+
+ def test_failure_index_0(self):
+ """Test must_not_contain(): failure at index 0"""
+ run_env = self.run_env
+
+ script = lstrip("""\
+ from TestCommon import TestCommon
+ tc = TestCommon(workdir='')
+ tc.write('file1', "file1 does contain contents\\n")
+ tc.must_not_contain('file1', "file1 does")
+ tc.run()
+ """)
+ expect = lstrip("""\
+ File `file1' contains banned string.
+ Banned string ==================================================================
+ file1 does
+ file1 contents =================================================================
+ file1 does contain contents
+
+ """)
+ run_env.run(program=sys.executable, stdin=script)
+ stdout = run_env.stdout()
+ assert stdout == expect, repr(stdout)
+ stderr = run_env.stderr()
+ assert stderr.find("FAILED") != -1, stderr
+
+ def test_mode(self):
+ """Test must_not_contain(): mode"""
+ run_env = self.run_env
+
+ script = lstrip("""\
+ from TestCommon import TestCommon
+ tc = TestCommon(workdir='')
+ tc.write('file1', "file1 contents\\n", mode='w')
+ tc.must_not_contain('file1', "1 does not contain c", mode='r')
+ tc.write('file2', "file2 contents\\n", mode='wb')
+ tc.must_not_contain('file2', "2 does not contain c", mode='rb')
+ tc.pass_test()
+ """)
+ run_env.run(program=sys.executable, stdin=script)
+ stdout = run_env.stdout()
+ assert stdout == "", stdout
+ stderr = run_env.stderr()
+ assert stderr == "PASSED\n", stderr
+
+
+
+class must_not_contain_any_line_TestCase(TestCommonTestCase):
+ def test_failure(self):
+ """Test must_not_contain_any_line(): failure"""
+ run_env = self.run_env
+
+ script = lstrip("""
+ import TestCommon
+ test = TestCommon.TestCommon(workdir='')
+
+ lines = [
+ 'xxx\\n',
+ 'yyy\\n',
+ 'www\\n',
+ ]
+
+ output = '''\\
+ www
+ xxx
+ yyy
+ zzz
+ '''
+
+ test.must_not_contain_any_line(output, lines)
+
+ test.pass_test()
+ """)
+
+ expect = lstrip("""\
+ Unexpected lines in output:
+ 'xxx%(expected_newline)s'
+ 'yyy%(expected_newline)s'
+ 'www%(expected_newline)s'
+ output =========================================================================
+ www
+ xxx
+ yyy
+ zzz
+ """ % globals())
+
+ run_env.run(program=sys.executable, stdin=script)
+ stdout = run_env.stdout()
+ stderr = run_env.stderr()
+ assert stdout == expect, assert_display(expect, stdout, stderr)
+ assert stderr.find("FAILED") != -1, stderr
+
+ def test_find(self):
+ """Test must_not_contain_any_line(): find"""
+ run_env = self.run_env
+
+ script = lstrip("""
+ import re
+ import TestCommon
+ test = TestCommon.TestCommon(workdir='')
+
+ lines = [
+ 'x.*'
+ '.*y'
+ ]
+
+ output = '''\\
+ www
+ zzz
+ '''
+
+ def re_search(output, line):
+ return re.compile(line, re.S).search(output)
+ test.must_not_contain_any_line(output, lines, find=re_search)
+
+ test.pass_test()
+ """)
+
+ run_env.run(program=sys.executable, stdin=script)
+ stdout = run_env.stdout()
+ assert stdout == "", stdout
+ stderr = run_env.stderr()
+ assert stderr == "PASSED\n", stderr
+
+ def test_success(self):
+ """Test must_not_contain_any_line(): success"""
+ run_env = self.run_env
+
+ script = lstrip("""
+ import TestCommon
+ test = TestCommon.TestCommon(workdir='')
+
+ lines = [
+ 'xxx\\n'
+ 'yyy\\n'
+ ]
+
+ output = '''\\
+ www
+ zzz
+ '''
+
+ test.must_not_contain_any_line(output, lines)
+
+ test.pass_test()
+ """)
+
+ run_env.run(program=sys.executable, stdin=script)
+ stdout = run_env.stdout()
+ assert stdout == "", stdout
+ stderr = run_env.stderr()
+ assert stderr == "PASSED\n", stderr
+
+ def test_title(self):
+ """Test must_not_contain_any_line(): title"""
+ run_env = self.run_env
+
+ script = lstrip("""
+ import TestCommon
+ test = TestCommon.TestCommon(workdir='')
+
+ lines = [
+ 'xxx\\n',
+ 'yyy\\n',
+ ]
+
+ output = '''\\
+ www
+ xxx
+ yyy
+ zzz
+ '''
+
+ test.must_not_contain_any_line(output, lines, title='XYZZY')
+
+ test.pass_test()
+ """)
+
+ expect = lstrip("""\
+ Unexpected lines in XYZZY:
+ 'xxx%(expected_newline)s'
+ 'yyy%(expected_newline)s'
+ XYZZY ==========================================================================
+ www
+ xxx
+ yyy
+ zzz
+ """ % globals())
+
+ run_env.run(program=sys.executable, stdin=script)
+ stdout = run_env.stdout()
+ stderr = run_env.stderr()
+ assert stdout == expect, assert_display(expect, stdout, stderr)
+ assert stderr.find("FAILED") != -1, stderr
+
+
+
+class must_not_contain_lines_TestCase(TestCommonTestCase):
+ def test_failure(self):
+ """Test must_not_contain_lines(): failure"""
+ run_env = self.run_env
+
+ script = lstrip("""
+ import TestCommon
+ test = TestCommon.TestCommon(workdir='')
+
+ lines = [
+ 'xxx\\n',
+ 'yyy\\n',
+ ]
+
+ output = '''\\
+ www
+ xxx
+ yyy
+ zzz
+ '''
+
+ test.must_not_contain_lines(lines, output)
+
+ test.pass_test()
+ """)
+
+ expect = lstrip("""\
+ Unexpected lines in output:
+ 'xxx%(expected_newline)s'
+ 'yyy%(expected_newline)s'
+ output =========================================================================
+ www
+ xxx
+ yyy
+ zzz
+ """ % globals())
+
+ run_env.run(program=sys.executable, stdin=script)
+ stdout = run_env.stdout()
+ stderr = run_env.stderr()
+ assert stdout == expect, assert_display(expect, stdout, stderr)
+ assert stderr.find("FAILED") != -1, stderr
+
+ def test_success(self):
+ """Test must_not_contain_lines(): success"""
+ run_env = self.run_env
+
+ script = lstrip("""
+ import TestCommon
+ test = TestCommon.TestCommon(workdir='')
+
+ lines = [
+ 'xxx\\n'
+ 'yyy\\n'
+ ]
+
+ output = '''\\
+ www
+ zzz
+ '''
+
+ test.must_not_contain_lines(lines, output)
+
+ test.pass_test()
+ """)
+
+ run_env.run(program=sys.executable, stdin=script)
+ stdout = run_env.stdout()
+ assert stdout == "", stdout
+ stderr = run_env.stderr()
+ assert stderr == "PASSED\n", stderr
+
+
+
+class must_not_exist_TestCase(TestCommonTestCase):
+ def test_failure(self):
+ """Test must_not_exist(): failure"""
+ run_env = self.run_env
+
+ script = lstrip("""\
+ from TestCommon import TestCommon
+ tc = TestCommon(workdir='')
+ tc.write('file1', "file1\\n")
+ tc.must_not_exist('file1')
+ tc.pass_test()
+ """)
+ run_env.run(program=sys.executable, stdin=script)
+ stdout = run_env.stdout()
+ assert stdout == "Unexpected files exist: `file1'\n", stdout
+ stderr = run_env.stderr()
+ assert stderr.find("FAILED") != -1, stderr
+
+ def test_success(self):
+ """Test must_not_exist(): success"""
+ run_env = self.run_env
+
+ script = lstrip("""\
+ from TestCommon import TestCommon
+ tc = TestCommon(workdir='')
+ tc.must_not_exist('file1')
+ tc.pass_test()
+ """)
+ run_env.run(program=sys.executable, stdin=script)
+ stdout = run_env.stdout()
+ assert stdout == "", stdout
+ stderr = run_env.stderr()
+ assert stderr == "PASSED\n", stderr
+
+ def test_file_specified_as_list(self):
+ """Test must_not_exist(): file specified as list"""
+ run_env = self.run_env
+
+ script = lstrip("""\
+ from TestCommon import TestCommon
+ tc = TestCommon(workdir='')
+ tc.subdir('sub')
+ tc.must_not_exist(['sub', 'file1'])
+ tc.pass_test()
+ """)
+ run_env.run(program=sys.executable, stdin=script)
+ stdout = run_env.stdout()
+ assert stdout == "", stdout
+ stderr = run_env.stderr()
+ assert stderr == "PASSED\n", stderr
+
+ def test_existing_broken_link(self):
+ """Test must_not_exist(): exists but it is a broken link"""
+ run_env = self.run_env
+
+ script = lstrip("""\
+ from TestCommon import TestCommon
+ tc = TestCommon(workdir='')
+ tc.symlink('badtarget', 'brokenlink')
+ tc.must_not_exist('brokenlink')
+ tc.pass_test()
+ """)
+ run_env.run(program=sys.executable, stdin=script)
+ stdout = run_env.stdout()
+ assert stdout == "Unexpected files exist: `brokenlink'\n", stdout
+ stderr = run_env.stderr()
+ assert stderr.find("FAILED") != -1, stderr
+
+class must_not_exist_any_of_TestCase(TestCommonTestCase):
+ def test_success(self):
+ """Test must_not_exist_any_of(): success"""
+ run_env = self.run_env
+
+ script = lstrip("""\
+ from TestCommon import TestCommon
+ tc = TestCommon(workdir='')
+ tc.must_not_exist_any_of(['file1'])
+ tc.pass_test()
+ """)
+ run_env.run(program=sys.executable, stdin=script)
+ stdout = run_env.stdout()
+ assert stdout == "", stdout
+ stderr = run_env.stderr()
+ assert stderr == "PASSED\n", stderr
+
+ def test_failure(self):
+ """Test must_not_exist_any_of(): failure"""
+ run_env = self.run_env
+
+ script = lstrip("""\
+ from TestCommon import TestCommon
+ tc = TestCommon(workdir='')
+ tc.write('file1', "file1\\n")
+ tc.must_not_exist_any_of(['file1'])
+ tc.pass_test()
+ """)
+ run_env.run(program=sys.executable, stdin=script)
+ stdout = run_env.stdout()
+ assert stdout == "Unexpected files exist: `file1'\n", stdout
+ stderr = run_env.stderr()
+ assert stderr.find("FAILED") != -1, stderr
+
+ def test_files_specified_as_list(self):
+ """Test must_not_exist_any_of(): files specified as list"""
+ run_env = self.run_env
+
+ script = lstrip("""\
+ from TestCommon import TestCommon
+ tc = TestCommon(workdir='')
+ tc.must_not_exist_any_of(['file2', 'file1'])
+ tc.pass_test()
+ """)
+ run_env.run(program=sys.executable, stdin=script)
+ stdout = run_env.stdout()
+ assert stdout == "", stdout
+ stderr = run_env.stderr()
+ assert stderr == "PASSED\n", stderr
+
+ def test_files_specified_with_wildcards(self):
+ """Test must_not_exist_any_of(): files specified with wildcards"""
+ run_env = self.run_env
+
+ script = lstrip("""\
+ from TestCommon import TestCommon
+ tc = TestCommon(workdir='')
+ tc.write('file7', "file7\\n")
+ tc.must_not_exist_any_of(['files?'])
+ tc.pass_test()
+ """)
+ run_env.run(program=sys.executable, stdin=script)
+ stdout = run_env.stdout()
+ assert stdout == "", stdout
+ stderr = run_env.stderr()
+ assert stderr == "PASSED\n", stderr
+
+ def test_file_given_as_list(self):
+ """Test must_not_exist_any_of(): file given as list"""
+ run_env = self.run_env
+
+ script = lstrip("""\
+ from TestCommon import TestCommon
+ tc = TestCommon(workdir='')
+ tc.subdir('sub')
+ tc.write(['sub', 'file1'], "sub/file1\\n")
+ tc.must_not_exist_any_of(['file2',
+ ['sub', 'files*']])
+ tc.pass_test()
+ """)
+ run_env.run(program=sys.executable, stdin=script)
+ stdout = run_env.stdout()
+ assert stdout == "", stdout
+ stderr = run_env.stderr()
+ assert stderr == "PASSED\n", stderr
+
+ def test_file_given_as_sequence(self):
+ """Test must_not_exist_any_of(): file given as sequence"""
+ run_env = self.run_env
+
+ script = lstrip("""\
+ from TestCommon import TestCommon
+ tc = TestCommon(workdir='')
+ tc.subdir('sub')
+ tc.write(['sub', 'file1'], "sub/file1\\n")
+ tc.must_not_exist_any_of(['file2',
+ ('sub', 'files?')])
+ tc.pass_test()
+ """)
+ run_env.run(program=sys.executable, stdin=script)
+ stdout = run_env.stdout()
+ assert stdout == "", stdout
+ stderr = run_env.stderr()
+ assert stderr == "PASSED\n", stderr
+
+class run_TestCase(TestCommonTestCase):
+ def test_argument_handling(self):
+ """Test run(): argument handling"""
+
+ script = lstrip("""\
+ from TestCommon import TestCommon, match_exact
+ tc = TestCommon(program=r'%(pass_script)s',
+ interpreter='%(python)s',
+ workdir="",
+ match=match_exact)
+ tc.run(arguments = "arg1 arg2 arg3",
+ stdout = r"%(pass_script)s: STDOUT: ['arg1', 'arg2', 'arg3']" + "\\n")
+ """)
+
+ self.run_execution_test(script, "", "")
+
+ def test_default_pass(self):
+ """Test run(): default arguments, script passes"""
+
+ script = lstrip("""\
+ from TestCommon import TestCommon
+ tc = TestCommon(program=r'%(pass_script)s',
+ interpreter=r'%(python)s',
+ workdir='')
+ tc.run()
+ """)
+
+ self.run_execution_test(script, "", "")
+
+ def test_default_fail(self):
+ """Test run(): default arguments, script fails"""
+
+ script = lstrip("""\
+ from TestCommon import TestCommon
+ tc = TestCommon(program=r'%(fail_script)s',
+ interpreter='%(python)s',
+ workdir='')
+ tc.run()
+ """)
+
+ expect_stdout = lstrip("""\
+ %(fail_script)s returned 1
+ STDOUT =========================================================================
+ %(fail_script)s: STDOUT: []
+
+ STDERR =========================================================================
+
+ """)
+
+ expect_stderr = lstrip("""\
+ FAILED test of .*fail
+ \\tat line \\d+ of .*TestCommon\\.py \\(_complete\\)
+ \\tfrom line \\d+ of .*TestCommon\\.py \\(run\\)
+ \\tfrom line \\d+ of <stdin>( \(<module>\))?
+ """)
+ expect_stderr = re.compile(expect_stderr, re.M)
+
+ self.run_execution_test(script, expect_stdout, expect_stderr)
+
+ def test_default_stderr(self):
+ """Test run(): default arguments, error output"""
+ script = lstrip("""\
+ from TestCommon import TestCommon
+ tc = TestCommon(program=r'%(stderr_script)s',
+ interpreter='%(python)s',
+ workdir='')
+ tc.run()
+ """)
+
+ expect_stdout = lstrip("""\
+ match_re: expected 1 lines, found 2
+ STDOUT =========================================================================
+
+ STDERR =========================================================================
+ 0a1
+ > %(stderr_script)s: STDERR: []
+ """)
+
+ expect_stderr = lstrip("""\
+ FAILED test of .*stderr
+ \\tat line \\d+ of .*TestCommon\\.py \\(_complete\\)
+ \\tfrom line \\d+ of .*TestCommon\\.py \\(run\\)
+ \\tfrom line \\d+ of <stdin>
+ """)
+ expect_stderr = re.compile(expect_stderr, re.M)
+
+ self.run_execution_test(script, expect_stdout, expect_stderr)
+
+ def test_exception_handling(self):
+ """Test run(): exception handling"""
+ script = lstrip("""\
+ import TestCmd
+ from TestCommon import TestCommon
+ def raise_exception(*args, **kw):
+ raise TypeError("forced TypeError")
+ TestCmd.TestCmd.start = raise_exception
+ tc = TestCommon(program='%(pass_script)s',
+ interpreter='%(python)s',
+ workdir='')
+ tc.run()
+ """)
+
+ expect_stdout = lstrip("""\
+ STDOUT =========================================================================
+ STDERR =========================================================================
+ """)
+
+ expect_stderr = lstrip("""\
+ Exception trying to execute: \\[%s, '[^']*pass'\\]
+ Traceback \\((innermost|most recent call) last\\):
+ File "<stdin>", line \\d+, in (\\?|<module>)
+ File "[^"]+TestCommon.py", line \\d+, in run
+ TestCmd.run\\(self, \\*\\*kw\\)
+ File "[^"]+TestCmd.py", line \\d+, in run
+ .*
+ File "[^"]+TestCommon.py", line \\d+, in start
+ raise e
+ TypeError: forced TypeError
+ """ % re.escape(repr(sys.executable)))
+ expect_stderr = re.compile(expect_stderr, re.M)
+
+ self.run_execution_test(script, expect_stdout, expect_stderr)
+
+ def test_ignore_stderr(self):
+ """Test run(): ignore stderr"""
+
+ script = lstrip("""\
+ from TestCommon import TestCommon
+ tc = TestCommon(program=r'%(stderr_script)s',
+ interpreter='%(python)s',
+ workdir='')
+ tc.run(stderr = None)
+ """)
+
+ self.run_execution_test(script, "", "")
+
+ def test_match_function_stdout(self):
+ """Test run(): explicit match function, stdout"""
+
+ script = lstrip("""\
+ def my_match_exact(actual, expect): return actual == expect
+ from TestCommon import TestCommon, match_re_dotall
+ tc = TestCommon(program=r'%(pass_script)s',
+ interpreter='%(python)s',
+ workdir="",
+ match=match_re_dotall)
+ tc.run(arguments = "arg1 arg2 arg3",
+ stdout = r"%(pass_script)s: STDOUT: ['arg1', 'arg2', 'arg3']" + "\\n",
+ match = my_match_exact)
+ """)
+
+ self.run_execution_test(script, "", "")
+
+ def test_match_function_stderr(self):
+ """Test run(): explicit match function, stderr"""
+
+ script = lstrip("""\
+ def my_match_exact(actual, expect): return actual == expect
+ from TestCommon import TestCommon, match_re_dotall
+ tc = TestCommon(program=r'%(stderr_script)s',
+ interpreter='%(python)s',
+ workdir="",
+ match=match_re_dotall)
+ tc.run(arguments = "arg1 arg2 arg3",
+ stderr = r"%(stderr_script)s: STDERR: ['arg1', 'arg2', 'arg3']" + "\\n",
+ match = my_match_exact)
+ """)
+
+ self.run_execution_test(script, "", "")
+
+ def test_matched_status_fails(self):
+ """Test run(): matched status, script fails"""
+
+ script = lstrip("""\
+ from TestCommon import TestCommon
+ tc = TestCommon(program=r'%(fail_script)s',
+ interpreter='%(python)s',
+ workdir='')
+ tc.run(status = 1)
+ """)
+
+ self.run_execution_test(script, "", "")
+
+ def test_matched_stdout(self):
+ """Test run(): matched stdout"""
+
+ script = lstrip("""\
+ from TestCommon import TestCommon, match_exact
+ tc = TestCommon(program=r'%(pass_script)s',
+ interpreter='%(python)s',
+ workdir="",
+ match=match_exact)
+ tc.run(stdout = r"%(pass_script)s: STDOUT: []" + "\\n")
+ """)
+
+ self.run_execution_test(script, "", "")
+
+ def test_matched_stderr(self):
+ """Test run(): matched stderr"""
+
+ script = lstrip("""\
+ from TestCommon import TestCommon, match_exact
+ tc = TestCommon(program=r'%(stderr_script)s',
+ interpreter='%(python)s',
+ workdir="",
+ match=match_exact)
+ tc.run(stderr = r"%(stderr_script)s: STDERR: []" + "\\n")
+ """)
+
+ self.run_execution_test(script, "", "")
+
+ def test_mismatched_status_pass(self):
+ """Test run(): mismatched status, script passes"""
+
+ script = lstrip("""\
+ from TestCommon import TestCommon
+ tc = TestCommon(program=r'%(pass_script)s',
+ interpreter='%(python)s',
+ workdir='')
+ tc.run(status = 1)
+ """)
+
+ expect_stdout = lstrip("""\
+ %(pass_script)s returned 0 (expected 1)
+ STDOUT =========================================================================
+ %(pass_script)s: STDOUT: []
+
+ STDERR =========================================================================
+
+ """)
+
+ expect_stderr = lstrip("""\
+ FAILED test of .*pass
+ \\tat line \\d+ of .*TestCommon\\.py \\(_complete\\)
+ \\tfrom line \\d+ of .*TestCommon\\.py \\(run\\)
+ \\tfrom line \\d+ of <stdin>( \(<module>\))?
+ """)
+ expect_stderr = re.compile(expect_stderr, re.M)
+
+ self.run_execution_test(script, expect_stdout, expect_stderr)
+
+ def test_mismatched_status_fail(self):
+ """Test run(): mismatched status, script fails"""
+
+ script = lstrip("""\
+ from TestCommon import TestCommon
+ tc = TestCommon(program=r'%(fail_script)s',
+ interpreter='%(python)s',
+ workdir='')
+ tc.run(status = 2)
+ """)
+
+ expect_stdout = lstrip("""\
+ %(fail_script)s returned 1 (expected 2)
+ STDOUT =========================================================================
+ %(fail_script)s: STDOUT: []
+
+ STDERR =========================================================================
+
+ """)
+
+ expect_stderr = lstrip("""\
+ FAILED test of .*fail
+ \\tat line \\d+ of .*TestCommon\\.py \\(_complete\\)
+ \\tfrom line \\d+ of .*TestCommon\\.py \\(run\\)
+ \\tfrom line \\d+ of <stdin>( \(<module>\))?
+ """)
+ expect_stderr = re.compile(expect_stderr, re.M)
+
+ self.run_execution_test(script, expect_stdout, expect_stderr)
+
+ def test_mismatched_stdout(self):
+ """Test run(): mismatched stdout"""
+
+ script = lstrip("""\
+ from TestCommon import TestCommon
+ tc = TestCommon(program=r'%(pass_script)s',
+ interpreter='%(python)s',
+ workdir='')
+ tc.run(stdout = "Not found\\n")
+ """)
+
+ expect_stdout = lstrip("""\
+ match_re: mismatch at line 0:
+ search re='^Not found$'
+ line='%(pass_script)s: STDOUT: []'
+ STDOUT =========================================================================
+ 1c1
+ < Not found
+ ---
+ > %(pass_script)s: STDOUT: []
+ """)
+
+ expect_stderr = lstrip("""\
+ FAILED test of .*pass
+ \\tat line \\d+ of .*TestCommon\\.py \\(_complete\\)
+ \\tfrom line \\d+ of .*TestCommon\\.py \\(run\\)
+ \\tfrom line \\d+ of <stdin>( \(<module>\))?
+ """)
+ expect_stderr = re.compile(expect_stderr, re.M)
+
+ self.run_execution_test(script, expect_stdout, expect_stderr)
+
+ def test_mismatched_stderr(self):
+ """Test run(): mismatched stderr"""
+
+ script = lstrip("""\
+ from TestCommon import TestCommon
+ tc = TestCommon(program=r'%(stderr_script)s',
+ interpreter='%(python)s',
+ workdir='')
+ tc.run(stderr = "Not found\\n")
+ """)
+
+ expect_stdout = lstrip("""\
+ match_re: mismatch at line 0:
+ search re='^Not found$'
+ line='%(stderr_script)s: STDERR: []'
+ STDOUT =========================================================================
+
+ STDERR =========================================================================
+ 1c1
+ < Not found
+ ---
+ > %(stderr_script)s: STDERR: []
+ """)
+
+ expect_stderr = lstrip("""\
+ FAILED test of .*stderr
+ \\tat line \\d+ of .*TestCommon\\.py \\(_complete\\)
+ \\tfrom line \\d+ of .*TestCommon\\.py \\(run\\)
+ \\tfrom line \\d+ of <stdin>( \(<module>\))?
+ """)
+ expect_stderr = re.compile(expect_stderr, re.M)
+
+ self.run_execution_test(script, expect_stdout, expect_stderr)
+
+ def test_option_handling(self):
+ """Test run(): option handling"""
+
+ script = lstrip("""\
+ from TestCommon import TestCommon, match_exact
+ tc = TestCommon(program=r'%(pass_script)s',
+ interpreter='%(python)s',
+ workdir="",
+ match=match_exact)
+ tc.run(options = "opt1 opt2 opt3",
+ stdout = r"%(pass_script)s: STDOUT: ['opt1', 'opt2', 'opt3']" + "\\n")
+ """)
+
+ self.run_execution_test(script, "", "")
+
+ def test_options_plus_arguments(self):
+ """Test run(): option handling with arguments"""
+
+ script = lstrip("""\
+ from TestCommon import TestCommon, match_exact
+ tc = TestCommon(program=r'%(pass_script)s',
+ interpreter='%(python)s',
+ workdir="",
+ match=match_exact)
+ tc.run(options = "opt1 opt2 opt3",
+ arguments = "arg1 arg2 arg3",
+ stdout = r"%(pass_script)s: STDOUT: ['opt1', 'opt2', 'opt3', 'arg1', 'arg2', 'arg3']" + "\\n")
+ """)
+
+ self.run_execution_test(script, "", "")
+
+ def test_signal_handling(self):
+ """Test run(): signal handling"""
+
+ try:
+ os.kill
+ except AttributeError:
+ sys.stderr.write('can not test, no os.kill ... ')
+ return
+
+ script = lstrip("""\
+ from TestCommon import TestCommon
+ tc = TestCommon(program=r'%(signal_script)s',
+ interpreter='%(python)s',
+ workdir='')
+ tc.run()
+ """)
+
+ self.SIGTERM = signal.SIGTERM
+
+ # Script returns the signal value as a negative number.
+ expect_stdout = lstrip("""\
+ %(signal_script)s returned -%(SIGTERM)s
+ STDOUT =========================================================================
+
+ STDERR =========================================================================
+
+ """)
+
+ expect_stderr = lstrip("""\
+ FAILED test of .*signal
+ \\tat line \\d+ of .*TestCommon\\.py \\(_complete\\)
+ \\tfrom line \\d+ of .*TestCommon\\.py \\(run\\)
+ \\tfrom line \\d+ of <stdin>
+ """)
+ expect_stderr = re.compile(expect_stderr, re.M)
+
+ self.run_execution_test(script, expect_stdout, expect_stderr)
+
+ def test_stdin(self):
+ """Test run(): stdin handling"""
+
+ script = lstrip("""\
+ from TestCommon import TestCommon, match_exact
+ tc = TestCommon(program=r'%(stdin_script)s',
+ interpreter='%(python)s',
+ workdir='',
+ match=match_exact)
+ expect_stdout = r"%(stdin_script)s: STDOUT: 'input'" + "\\n"
+ expect_stderr = r"%(stdin_script)s: STDERR: 'input'" + "\\n"
+ tc.run(stdin="input\\n", stdout = expect_stdout, stderr = expect_stderr)
+ """)
+
+ expect_stdout = lstrip("""\
+ %(pass_script)s returned 0 (expected 1)
+ STDOUT =========================================================================
+ %(pass_script)s: STDOUT: []
+
+ STDERR =========================================================================
+
+ """)
+
+ self.run_execution_test(script, "", "")
+
+
+
+class start_TestCase(TestCommonTestCase):
+ def test_option_handling(self):
+ """Test start(): option handling"""
+
+ script = lstrip("""\
+ from TestCommon import TestCommon, match_exact
+ tc = TestCommon(program=r'%(pass_script)s',
+ interpreter='%(python)s',
+ workdir="",
+ match=match_exact)
+ p = tc.start(options = "opt1 opt2 opt3")
+ expect = r"%(pass_script)s: STDOUT: ['opt1', 'opt2', 'opt3']" + "\\n"
+ tc.finish(p, stdout = expect)
+ """)
+
+ self.run_execution_test(script, "", "")
+
+ def test_options_plus_arguments(self):
+ """Test start(): option handling with arguments"""
+
+ script = lstrip("""\
+ from TestCommon import TestCommon, match_exact
+ tc = TestCommon(program=r'%(pass_script)s',
+ interpreter='%(python)s',
+ workdir="",
+ match=match_exact)
+ p = tc.start(options = "opt1 opt2 opt3",
+ arguments = "arg1 arg2 arg3")
+ expect = r"%(pass_script)s: STDOUT: ['opt1', 'opt2', 'opt3', 'arg1', 'arg2', 'arg3']" + "\\n"
+ tc.finish(p, stdout = expect)
+ """)
+
+ self.run_execution_test(script, "", "")
+
+
+
+class skip_test_TestCase(TestCommonTestCase):
+ def test_skip_test(self):
+ """Test skip_test()"""
+ run_env = self.run_env
+
+ script = lstrip("""\
+ import TestCommon
+ test = TestCommon.TestCommon(workdir='')
+ test.skip_test()
+ """)
+ run_env.run(program=sys.executable, stdin=script)
+ stdout = run_env.stdout()
+ assert stdout == "Skipping test.\n", stdout
+ stderr = run_env.stderr()
+ expect = [
+ "NO RESULT for test at line 3 of <stdin>\n",
+ "NO RESULT for test at line 3 of <stdin> (<module>)\n",
+ ]
+ assert stderr in expect, repr(stderr)
+
+ script = lstrip("""\
+ import TestCommon
+ test = TestCommon.TestCommon(workdir='')
+ test.skip_test("skipping test because I said so\\n")
+ """)
+ run_env.run(program=sys.executable, stdin=script)
+ stdout = run_env.stdout()
+ assert stdout == "skipping test because I said so\n", stdout
+ stderr = run_env.stderr()
+ expect = [
+ "NO RESULT for test at line 3 of <stdin>\n",
+ "NO RESULT for test at line 3 of <stdin> (<module>)\n",
+ ]
+ assert stderr in expect, repr(stderr)
+
+ import os
+ os.environ['TESTCOMMON_PASS_SKIPS'] = '1'
+
+ try:
+ script = lstrip("""\
+ import TestCommon
+ test = TestCommon.TestCommon(workdir='')
+ test.skip_test()
+ """)
+ run_env.run(program=sys.executable, stdin=script)
+ stdout = run_env.stdout()
+ assert stdout == "Skipping test.\n", stdout
+ stderr = run_env.stderr()
+ assert stderr == "PASSED\n", stderr
+
+ finally:
+ del os.environ['TESTCOMMON_PASS_SKIPS']
+
+
+
+class variables_TestCase(TestCommonTestCase):
+ def test_variables(self):
+ """Test global variables"""
+ run_env = self.run_env
+
+ variables = [
+ 'fail_test',
+ 'no_result',
+ 'pass_test',
+ 'match_exact',
+ 'match_re',
+ 'match_re_dotall',
+ 'python',
+ '_python_',
+ 'TestCmd',
+
+ 'TestCommon',
+ 'exe_suffix',
+ 'obj_suffix',
+ 'shobj_prefix',
+ 'shobj_suffix',
+ 'lib_prefix',
+ 'lib_suffix',
+ 'dll_prefix',
+ 'dll_suffix',
+ ]
+
+ script = "from __future__ import print_function\n" + \
+ "import TestCommon\n" + \
+ '\n'.join([ "print(TestCommon.%s)\n" % v for v in variables ])
+ run_env.run(program=sys.executable, stdin=script)
+ stderr = run_env.stderr()
+ assert stderr == "", stderr
+
+ script = "from __future__ import print_function\n" + \
+ "from TestCommon import *\n" + \
+ '\n'.join([ "print(%s)" % v for v in variables ])
+ run_env.run(program=sys.executable, stdin=script)
+ stderr = run_env.stderr()
+ assert stderr == "", stderr
+
+
+
+if __name__ == "__main__":
+ tclasses = [
+ __init__TestCase,
+ banner_TestCase,
+ must_be_writable_TestCase,
+ must_contain_TestCase,
+ must_contain_all_lines_TestCase,
+ must_contain_any_line_TestCase,
+ must_contain_exactly_lines_TestCase,
+ must_contain_lines_TestCase,
+ must_exist_TestCase,
+ must_exist_one_of_TestCase,
+ must_match_TestCase,
+ must_not_be_writable_TestCase,
+ must_not_contain_TestCase,
+ must_not_contain_any_line_TestCase,
+ must_not_contain_lines_TestCase,
+ must_not_exist_TestCase,
+ must_not_exist_any_of_TestCase,
+ run_TestCase,
+ start_TestCase,
+ skip_test_TestCase,
+ variables_TestCase,
+ ]
+ suite = unittest.TestSuite()
+ for tclass in tclasses:
+ names = unittest.getTestCaseNames(tclass, 'test_')
+ suite.addTests([ tclass(n) for n in names ])
+ if not unittest.TextTestRunner().run(suite).wasSuccessful():
+ sys.exit(1)
+
+# Local Variables:
+# tab-width:4
+# indent-tabs-mode:nil
+# End:
+# vim: set expandtab tabstop=4 shiftwidth=4:
diff --git a/testing/framework/TestRuntest.py b/testing/framework/TestRuntest.py
new file mode 100644
index 0000000..e9ca524
--- /dev/null
+++ b/testing/framework/TestRuntest.py
@@ -0,0 +1,173 @@
+"""
+TestRuntest.py: a testing framework for the runtest.py command used to
+invoke SCons tests.
+
+A TestRuntest environment object is created via the usual invocation:
+
+ test = TestRuntest()
+
+TestRuntest is a subclass of TestCommon, which is in turn is a subclass
+of TestCmd), and hence has available all of the methods and attributes
+from those classes, as well as any overridden or additional methods or
+attributes defined in this subclass.
+"""
+
+# Copyright (c) 2001 - 2019 The SCons Foundation
+
+__revision__ = "testing/framework/TestRuntest.py e724ae812eb96f4858a132f5b8c769724744faf6 2019-07-21 00:04:47 bdeegan"
+
+import os
+import os.path
+import re
+import shutil
+import sys
+
+from TestCommon import *
+from TestCommon import __all__
+
+__all__.extend([ 'TestRuntest',
+ 'pythonstring',
+ 'pythonflags',
+ ])
+
+if re.search(r'\s', python):
+ pythonstring = _python_
+else:
+ pythonstring = python
+pythonstring = pythonstring.replace('\\', '\\\\')
+pythonflags = ''
+if sys.version_info[0] < 3:
+ pythonflags = ' -tt'
+
+failing_test_template = """\
+import sys
+sys.stdout.write('FAILING TEST STDOUT\\n')
+sys.stderr.write('FAILING TEST STDERR\\n')
+sys.exit(1)
+"""
+
+no_result_test_template = """\
+import sys
+sys.stdout.write('NO RESULT TEST STDOUT\\n')
+sys.stderr.write('NO RESULT TEST STDERR\\n')
+sys.exit(2)
+"""
+
+passing_test_template = """\
+import sys
+sys.stdout.write('PASSING TEST STDOUT\\n')
+sys.stderr.write('PASSING TEST STDERR\\n')
+sys.exit(0)
+"""
+
+fake_scons_py = """
+__version__ = '1.2.3'
+__build__ = 'D123'
+__buildsys__ = 'fake_system'
+__date__ = 'Jan 1 1970'
+__developer__ = 'Anonymous'
+"""
+
+fake___init___py = """
+__version__ = '4.5.6'
+__build__ = 'D456'
+__buildsys__ = 'another_fake_system'
+__date__ = 'Dec 31 1999'
+__developer__ = 'John Doe'
+"""
+
+class TestRuntest(TestCommon):
+ """Class for testing the runtest.py script.
+
+ This provides a common place for initializing Runtest tests,
+ eliminating the need to begin every test with the same repeated
+ initializations.
+ """
+
+ def __init__(self, **kw):
+ """Initialize a Runtest testing object.
+
+ If they're not overridden by keyword arguments, this
+ initializes the object with the following default values:
+
+ program = 'runtest.py'
+ interpreter = ['python', '-tt']
+ match = match_exact
+ workdir = ''
+
+ The workdir value means that, by default, a temporary
+ workspace directory is created for a TestRuntest environment.
+ The superclass TestCommon.__init__() will change directory (chdir)
+ to the workspace directory, so an explicit "chdir = '.'" on all
+ of the run() method calls is not necessary. This initialization
+ also copies the runtest.py and testing/framework/ subdirectory tree to the
+ temporary directory, duplicating how this test infrastructure
+ appears in a normal workspace.
+ """
+ if 'program' not in kw:
+ kw['program'] = 'runtest.py'
+ if 'interpreter' not in kw:
+ kw['interpreter'] = [python,]
+ if sys.version_info[0] < 3:
+ kw['interpreter'].append('-tt')
+
+ if 'match' not in kw:
+ kw['match'] = match_exact
+ if 'workdir' not in kw:
+ kw['workdir'] = ''
+
+ try:
+ things_to_copy = kw['things_to_copy']
+ except KeyError:
+ things_to_copy = [
+ 'runtest.py',
+ 'testing/framework',
+ ]
+ else:
+ del kw['things_to_copy']
+
+ orig_cwd = os.getcwd()
+ TestCommon.__init__(self, **kw)
+
+ dirs = [os.environ.get('SCONS_RUNTEST_DIR', orig_cwd)]
+
+ for thing in things_to_copy:
+ for dir in dirs:
+ t = os.path.join(dir, thing)
+ if os.path.exists(t):
+ if os.path.isdir(t):
+ copy_func = shutil.copytree
+ else:
+ copy_func = shutil.copyfile
+ copy_func(t, self.workpath(thing))
+ break
+
+ self.program_set(self.workpath(kw['program']))
+
+ os.environ['PYTHONPATH'] = ''
+
+ def write_fake_scons_source_tree(self):
+ os.mkdir('src')
+ os.mkdir('src/script')
+ self.write('src/script/scons.py', fake_scons_py)
+
+ os.mkdir('src/engine')
+ os.mkdir('src/engine/SCons')
+ self.write('src/engine/SCons/__init__.py', fake___init___py)
+ os.mkdir('src/engine/SCons/Script')
+ self.write('src/engine/SCons/Script/__init__.py', fake___init___py)
+
+ def write_failing_test(self, name):
+ self.write(name, failing_test_template)
+
+ def write_no_result_test(self, name):
+ self.write(name, no_result_test_template)
+
+ def write_passing_test(self, name):
+ self.write(name, passing_test_template)
+
+# Local Variables:
+# tab-width:4
+# indent-tabs-mode:nil
+# End:
+# vim: set expandtab tabstop=4 shiftwidth=4:
diff --git a/testing/framework/TestSCons.py b/testing/framework/TestSCons.py
new file mode 100644
index 0000000..2228423
--- /dev/null
+++ b/testing/framework/TestSCons.py
@@ -0,0 +1,1745 @@
+"""
+TestSCons.py: a testing framework for the SCons software construction
+tool.
+
+A TestSCons environment object is created via the usual invocation:
+
+ test = TestSCons()
+
+TestScons is a subclass of TestCommon, which in turn is a subclass
+of TestCmd), and hence has available all of the methods and attributes
+from those classes, as well as any overridden or additional methods or
+attributes defined in this subclass.
+"""
+
+# Copyright (c) 2001 - 2019 The SCons Foundation
+from __future__ import division, print_function
+
+__revision__ = "testing/framework/TestSCons.py e724ae812eb96f4858a132f5b8c769724744faf6 2019-07-21 00:04:47 bdeegan"
+
+import os
+import re
+import shutil
+import sys
+import time
+import subprocess
+
+from TestCommon import *
+from TestCommon import __all__
+
+from TestCmd import Popen
+from TestCmd import PIPE
+
+# Some tests which verify that SCons has been packaged properly need to
+# look for specific version file names. Replicating the version number
+# here provides some independent verification that what we packaged
+# conforms to what we expect.
+
+default_version = '3.1.0'
+
+python_version_unsupported = (2, 6, 0)
+python_version_deprecated = (2, 7, 0)
+
+# In the checked-in source, the value of SConsVersion in the following
+# line must remain "__ VERSION __" (without the spaces) so the built
+# version in build/testing/framework/TestSCons.py contains the actual version
+# string of the packages that have been built.
+SConsVersion = '3.1.0'
+if SConsVersion == '__' + 'VERSION' + '__':
+ SConsVersion = default_version
+
+__all__.extend([
+ 'TestSCons',
+ 'machine',
+ 'python',
+ '_exe',
+ '_obj',
+ '_shobj',
+ 'shobj_',
+ 'lib_',
+ '_lib',
+ 'dll_',
+ '_dll'
+ ])
+
+machine_map = {
+ 'i686' : 'i386',
+ 'i586' : 'i386',
+ 'i486' : 'i386',
+}
+
+try:
+ uname = os.uname
+except AttributeError:
+ # Windows doesn't have a uname() function. We could use something like
+ # sys.platform as a fallback, but that's not really a "machine," so
+ # just leave it as None.
+ machine = None
+else:
+ machine = uname()[4]
+ machine = machine_map.get(machine, machine)
+
+_exe = exe_suffix
+_obj = obj_suffix
+_shobj = shobj_suffix
+shobj_ = shobj_prefix
+_lib = lib_suffix
+lib_ = lib_prefix
+_dll = dll_suffix
+dll_ = dll_prefix
+
+
+if sys.platform == 'cygwin':
+ # On Cygwin, os.path.normcase() lies, so just report back the
+ # fact that the underlying Win32 OS is case-insensitive.
+ def case_sensitive_suffixes(s1, s2):
+ return 0
+else:
+ def case_sensitive_suffixes(s1, s2):
+ return (os.path.normcase(s1) != os.path.normcase(s2))
+
+
+file_expr = r"""File "[^"]*", line \d+, in [^\n]+
+"""
+
+# re.escape escapes too much.
+def re_escape(str):
+ for c in '\\.[]()*+?': # Not an exhaustive list.
+ str = str.replace(c, '\\' + c)
+ return str
+
+#
+# Helper functions that we use as a replacement to the default re.match
+# when searching for special strings in stdout/stderr.
+#
+def search_re(out, l):
+ """ Search the regular expression 'l' in the output 'out'
+ and return the start index when successful.
+ """
+ m = re.search(l, out)
+ if m:
+ return m.start()
+
+ return None
+
+def search_re_in_list(out, l):
+ """ Search the regular expression 'l' in each line of
+ the given string list 'out' and return the line's index
+ when successful.
+ """
+ for idx, o in enumerate(out):
+ m = re.search(l, o)
+ if m:
+ return idx
+
+ return None
+
+#
+# Helpers for handling Python version numbers
+#
+def python_version_string():
+ return sys.version.split()[0]
+
+def python_minor_version_string():
+ return sys.version[:3]
+
+def unsupported_python_version(version=sys.version_info):
+ return version < python_version_unsupported
+
+def deprecated_python_version(version=sys.version_info):
+ return version < python_version_deprecated
+
+if deprecated_python_version():
+ msg = r"""
+scons: warning: Support for pre-2.7.0 Python version (%s) is deprecated.
+ If this will cause hardship, contact scons-dev@scons.org
+"""
+
+ deprecated_python_expr = re_escape(msg % python_version_string()) + file_expr
+ del msg
+else:
+ deprecated_python_expr = ""
+
+
+def initialize_sconsflags(ignore_python_version):
+ """
+ Add the --warn=no-python-version option to SCONSFLAGS for every
+ command so test scripts don't have to filter out Python version
+ deprecation warnings.
+ Same for --warn=no-visual-c-missing.
+ """
+ save_sconsflags = os.environ.get('SCONSFLAGS')
+ if save_sconsflags:
+ sconsflags = [save_sconsflags]
+ else:
+ sconsflags = []
+ if ignore_python_version and deprecated_python_version():
+ sconsflags.append('--warn=no-python-version')
+ # Provide a way to suppress or provide alternate flags for
+ # TestSCons purposes by setting TESTSCONS_SCONSFLAGS.
+ # (The intended use case is to set it to null when running
+ # timing tests of earlier versions of SCons which don't
+ # support the --warn=no-visual-c-missing warning.)
+ visual_c = os.environ.get('TESTSCONS_SCONSFLAGS',
+ '--warn=no-visual-c-missing')
+ if visual_c:
+ sconsflags.append(visual_c)
+ os.environ['SCONSFLAGS'] = ' '.join(sconsflags)
+ return save_sconsflags
+
+def restore_sconsflags(sconsflags):
+ if sconsflags is None:
+ del os.environ['SCONSFLAGS']
+ else:
+ os.environ['SCONSFLAGS'] = sconsflags
+
+
+class TestSCons(TestCommon):
+ """Class for testing SCons.
+
+ This provides a common place for initializing SCons tests,
+ eliminating the need to begin every test with the same repeated
+ initializations.
+ """
+
+ scons_version = SConsVersion
+ javac_is_gcj = False
+
+ def __init__(self, **kw):
+ """Initialize an SCons testing object.
+
+ If they're not overridden by keyword arguments, this
+ initializes the object with the following default values:
+
+ program = 'scons' if it exists,
+ else 'scons.py'
+ interpreter = 'python'
+ match = match_exact
+ workdir = ''
+
+ The workdir value means that, by default, a temporary workspace
+ directory is created for a TestSCons environment. In addition,
+ this method changes directory (chdir) to the workspace directory,
+ so an explicit "chdir = '.'" on all of the run() method calls
+ is not necessary.
+ """
+ self.orig_cwd = os.getcwd()
+ self.external = os.environ.get('SCONS_EXTERNAL_TEST', 0)
+
+ if not self.external:
+ try:
+ script_dir = os.environ['SCONS_SCRIPT_DIR']
+ except KeyError:
+ pass
+ else:
+ os.chdir(script_dir)
+ if 'program' not in kw:
+ kw['program'] = os.environ.get('SCONS')
+ if not kw['program']:
+ if not self.external:
+ if os.path.exists('scons'):
+ kw['program'] = 'scons'
+ else:
+ kw['program'] = 'scons.py'
+ else:
+ kw['program'] = 'scons'
+ kw['interpreter'] = ''
+ elif not self.external and not os.path.isabs(kw['program']):
+ kw['program'] = os.path.join(self.orig_cwd, kw['program'])
+ if 'interpreter' not in kw and not os.environ.get('SCONS_EXEC'):
+ kw['interpreter'] = [python,]
+ if sys.version_info[0] < 3:
+ kw['interpreter'].append('-tt')
+ if 'match' not in kw:
+ kw['match'] = match_exact
+ if 'workdir' not in kw:
+ kw['workdir'] = ''
+
+ # Term causing test failures due to bogus readline init
+ # control character output on FC8
+ # TERM can cause test failures due to control chars in prompts etc.
+ os.environ['TERM'] = 'dumb'
+
+ self.ignore_python_version = kw.get('ignore_python_version', 1)
+ if kw.get('ignore_python_version', -1) != -1:
+ del kw['ignore_python_version']
+
+ TestCommon.__init__(self, **kw)
+
+ if not self.external:
+ import SCons.Node.FS
+ if SCons.Node.FS.default_fs is None:
+ SCons.Node.FS.default_fs = SCons.Node.FS.FS()
+
+ try:
+ self.fixture_dirs = (os.environ['FIXTURE_DIRS']).split(os.pathsep)
+ except KeyError:
+ pass
+
+ def Environment(self, ENV=None, *args, **kw):
+ """
+ Return a construction Environment that optionally overrides
+ the default external environment with the specified ENV.
+ """
+ if not self.external:
+ import SCons.Environment
+ import SCons.Errors
+ if not ENV is None:
+ kw['ENV'] = ENV
+ try:
+ return SCons.Environment.Environment(*args, **kw)
+ except (SCons.Errors.UserError, SCons.Errors.InternalError):
+ return None
+
+ return None
+
+ def detect(self, var, prog=None, ENV=None, norm=None):
+ """
+ Return the detected path to a tool program.
+
+ Searches first the named construction variable, then
+ the SCons path.
+
+ Args:
+ var: name of construction variable to check for tool name.
+ prog: tool program to check for.
+ ENV: if present, kwargs to initialize an environment that
+ will be created to perform the lookup.
+ norm: if true, normalize any returned path looked up in
+ the environment to use UNIX-style path separators.
+
+ Returns: full path to the tool, or None.
+
+ """
+ env = self.Environment(ENV)
+ if env:
+ v = env.subst('$' + var)
+ if not v:
+ return None
+ if prog is None:
+ prog = v
+ if v != prog:
+ return None
+ result = env.WhereIs(prog)
+ if result and norm and os.sep != '/':
+ result = result.replace(os.sep, '/')
+ return result
+
+ return self.where_is(prog)
+
+ def detect_tool(self, tool, prog=None, ENV=None):
+ """
+ Given a tool (i.e., tool specification that would be passed
+ to the "tools=" parameter of Environment()) and a program that
+ corresponds to that tool, return true if and only if we can find
+ that tool using Environment.Detect().
+
+ By default, prog is set to the value passed into the tools parameter.
+ """
+
+ if not prog:
+ prog = tool
+ env = self.Environment(ENV, tools=[tool])
+ if env is None:
+ return None
+ return env.Detect([prog])
+
+ def where_is(self, prog, path=None, pathext=None):
+ """
+ Given a program, search for it in the specified external PATH,
+ or in the actual external PATH if none is specified.
+ """
+ if path is None:
+ path = os.environ['PATH']
+ if self.external:
+ if isinstance(prog, str):
+ prog = [prog]
+ for p in prog:
+ result = TestCmd.where_is(self, p, path, pathext)
+ if result:
+ return os.path.normpath(result)
+ else:
+ import SCons.Environment
+ env = SCons.Environment.Environment()
+ return env.WhereIs(prog, path, pathext)
+
+ return None
+
+ def wrap_stdout(self, build_str = "", read_str = "", error = 0, cleaning = 0):
+ """Wraps standard output string(s) in the normal
+ "Reading ... done" and "Building ... done" strings
+ """
+ cap,lc = [ ('Build','build'),
+ ('Clean','clean') ][cleaning]
+ if error:
+ term = "scons: %sing terminated because of errors.\n" % lc
+ else:
+ term = "scons: done %sing targets.\n" % lc
+ return "scons: Reading SConscript files ...\n" + \
+ read_str + \
+ "scons: done reading SConscript files.\n" + \
+ "scons: %sing targets ...\n" % cap + \
+ build_str + \
+ term
+
+ def run(self, *args, **kw):
+ """
+ Set up SCONSFLAGS for every command so test scripts don't need
+ to worry about unexpected warnings in their output.
+ """
+ sconsflags = initialize_sconsflags(self.ignore_python_version)
+ try:
+ TestCommon.run(self, *args, **kw)
+ finally:
+ restore_sconsflags(sconsflags)
+
+# Modifying the options should work and ought to be simpler, but this
+# class is used for more than just running 'scons' itself. If there's
+# an automated way of determining whether it's running 'scons' or
+# something else, this code should be resurected.
+# options = kw.get('options')
+# if options:
+# options = [options]
+# else:
+# options = []
+# if self.ignore_python_version and deprecated_python_version():
+# options.append('--warn=no-python-version')
+# # Provide a way to suppress or provide alternate flags for
+# # TestSCons purposes by setting TESTSCONS_SCONSFLAGS.
+# # (The intended use case is to set it to null when running
+# # timing tests of earlier versions of SCons which don't
+# # support the --warn=no-visual-c-missing warning.)
+# visual_c = os.environ.get('TESTSCONS_SCONSFLAGS',
+# '--warn=no-visual-c-missing')
+# if visual_c:
+# options.append(visual_c)
+# kw['options'] = ' '.join(options)
+# TestCommon.run(self, *args, **kw)
+
+ def up_to_date(self, arguments = '.', read_str = "", **kw):
+ """Asserts that all of the targets listed in arguments is
+ up to date, but does not make any assumptions on other targets.
+ This function is most useful in conjunction with the -n option.
+ """
+ s = ""
+ for arg in arguments.split():
+ s = s + "scons: `%s' is up to date.\n" % arg
+ kw['arguments'] = arguments
+ stdout = self.wrap_stdout(read_str = read_str, build_str = s)
+ # Append '.*' so that timing output that comes after the
+ # up-to-date output is okay.
+ kw['stdout'] = re.escape(stdout) + '.*'
+ kw['match'] = self.match_re_dotall
+ self.run(**kw)
+
+ def not_up_to_date(self, arguments = '.', **kw):
+ """Asserts that none of the targets listed in arguments is
+ up to date, but does not make any assumptions on other targets.
+ This function is most useful in conjunction with the -n option.
+ """
+ s = ""
+ for arg in arguments.split():
+ s = s + "(?!scons: `%s' is up to date.)" % re.escape(arg)
+ s = '('+s+'[^\n]*\n)*'
+ kw['arguments'] = arguments
+ stdout = re.escape(self.wrap_stdout(build_str='ARGUMENTSGOHERE'))
+ kw['stdout'] = stdout.replace('ARGUMENTSGOHERE', s)
+ kw['match'] = self.match_re_dotall
+ self.run(**kw)
+
+ def option_not_yet_implemented(self, option, arguments=None, **kw):
+ """
+ Verifies expected behavior for options that are not yet implemented:
+ a warning message, and exit status 1.
+ """
+ msg = "Warning: the %s option is not yet implemented\n" % option
+ kw['stderr'] = msg
+ if arguments:
+ # If it's a long option and the argument string begins with '=',
+ # it's of the form --foo=bar and needs no separating space.
+ if option[:2] == '--' and arguments[0] == '=':
+ kw['arguments'] = option + arguments
+ else:
+ kw['arguments'] = option + ' ' + arguments
+ return self.run(**kw)
+
+ def deprecated_wrap(self, msg):
+ """
+ Calculate the pattern that matches a deprecation warning.
+ """
+ return '\nscons: warning: ' + re_escape(msg) + '\n' + file_expr
+
+ def deprecated_fatal(self, warn, msg):
+ """
+ Determines if the warning has turned into a fatal error. If so,
+ passes the test, as any remaining runs are now moot.
+
+ This method expects a SConscript to be present that will causes
+ the warning. The method writes a SConstruct that calls the
+ SConsscript and looks to see what type of result occurs.
+
+ The pattern that matches the warning is returned.
+
+ TODO: Actually detect that it's now an error. We don't have any
+ cases yet, so there's no way to test it.
+ """
+ self.write('SConstruct', """if True:
+ WARN = ARGUMENTS.get('WARN')
+ if WARN: SetOption('warn', WARN)
+ SConscript('SConscript')
+ """)
+
+ def err_out():
+ # TODO calculate stderr for fatal error
+ return re_escape('put something here')
+
+ # no option, should get one of nothing, warning, or error
+ warning = self.deprecated_wrap(msg)
+ self.run(arguments = '.', stderr = None)
+ stderr = self.stderr()
+ if stderr:
+ # most common case done first
+ if match_re_dotall(stderr, warning):
+ # expected output
+ pass
+ elif match_re_dotall(stderr, err_out()):
+ # now a fatal error; skip the rest of the tests
+ self.pass_test()
+ else:
+ # test failed; have to do this by hand...
+ print(self.banner('STDOUT '))
+ print(self.stdout())
+ print(self.diff(warning, stderr, 'STDERR '))
+ self.fail_test()
+
+ return warning
+
+ def deprecated_warning(self, warn, msg):
+ """
+ Verifies the expected behavior occurs for deprecation warnings.
+ This method expects a SConscript to be present that will causes
+ the warning. The method writes a SConstruct and exercises various
+ combinations of command-line options and SetOption parameters to
+ validate that it performs correctly.
+
+ The pattern that matches the warning is returned.
+ """
+ warning = self.deprecated_fatal(warn, msg)
+
+ def RunPair(option, expected):
+ # run the same test with the option on the command line and
+ # then with the option passed via SetOption().
+ self.run(options = '--warn=' + option,
+ arguments = '.',
+ stderr = expected,
+ match = match_re_dotall)
+ self.run(options = 'WARN=' + option,
+ arguments = '.',
+ stderr = expected,
+ match = match_re_dotall)
+
+ # all warnings off, should get no output
+ RunPair('no-deprecated', '')
+
+ # warning enabled, should get expected output
+ RunPair(warn, warning)
+
+ # warning disabled, should get either nothing or mandatory message
+ expect = """()|(Can not disable mandataory warning: 'no-%s'\n\n%s)""" % (warn, warning)
+ RunPair('no-' + warn, expect)
+
+ return warning
+
+ def diff_substr(self, expect, actual, prelen=20, postlen=40):
+ i = 0
+ for x, y in zip(expect, actual):
+ if x != y:
+ return "Actual did not match expect at char %d:\n" \
+ " Expect: %s\n" \
+ " Actual: %s\n" \
+ % (i, repr(expect[i-prelen:i+postlen]),
+ repr(actual[i-prelen:i+postlen]))
+ i = i + 1
+ return "Actual matched the expected output???"
+
+ def python_file_line(self, file, line):
+ """
+ Returns a Python error line for output comparisons.
+
+ The exec of the traceback line gives us the correct format for
+ this version of Python.
+
+ File "<string>", line 1, <module>
+
+ We stick the requested file name and line number in the right
+ places, abstracting out the version difference.
+ """
+ # This routine used to use traceback to get the proper format
+ # that doesn't work well with py3. And the format of the
+ # traceback seems to be stable, so let's just format
+ # an appropriate string
+ #
+ #exec('import traceback; x = traceback.format_stack()[-1]')
+ # import traceback
+ # x = traceback.format_stack()
+ # x = # XXX: .lstrip()
+ # x = x.replace('<string>', file)
+ # x = x.replace('line 1,', 'line %s,' % line)
+ # x="\n".join(x)
+ x='File "%s", line %s, in <module>\n'%(file,line)
+ return x
+
+ def normalize_ps(self, s):
+ s = re.sub(r'(Creation|Mod)Date: .*',
+ r'\1Date XXXX', s)
+ s = re.sub(r'%DVIPSSource:\s+TeX output\s.*',
+ r'%DVIPSSource: TeX output XXXX', s)
+ s = re.sub(r'/(BaseFont|FontName) /[A-Z0-9]{6}',
+ r'/\1 /XXXXXX', s)
+ s = re.sub(r'BeginFont: [A-Z0-9]{6}',
+ r'BeginFont: XXXXXX', s)
+
+ return s
+
+ @staticmethod
+ def to_bytes_re_sub(pattern, repl, str, count=0, flags=0):
+ """
+ Wrapper around re.sub to change pattern and repl to bytes to work with
+ both python 2 & 3
+ """
+ pattern = to_bytes(pattern)
+ repl = to_bytes(repl)
+ return re.sub(pattern, repl, str, count, flags)
+
+ def normalize_pdf(self, s):
+ s = self.to_bytes_re_sub(r'/(Creation|Mod)Date \(D:[^)]*\)',
+ r'/\1Date (D:XXXX)', s)
+ s = self.to_bytes_re_sub(r'/ID \[<[0-9a-fA-F]*> <[0-9a-fA-F]*>\]',
+ r'/ID [<XXXX> <XXXX>]', s)
+ s = self.to_bytes_re_sub(r'/(BaseFont|FontName) /[A-Z]{6}',
+ r'/\1 /XXXXXX', s)
+ s = self.to_bytes_re_sub(r'/Length \d+ *\n/Filter /FlateDecode\n',
+ r'/Length XXXX\n/Filter /FlateDecode\n', s)
+
+ try:
+ import zlib
+ except ImportError:
+ pass
+ else:
+ begin_marker = to_bytes('/FlateDecode\n>>\nstream\n')
+ end_marker = to_bytes('endstream\nendobj')
+
+ encoded = []
+ b = s.find(begin_marker, 0)
+ while b != -1:
+ b = b + len(begin_marker)
+ e = s.find(end_marker, b)
+ encoded.append((b, e))
+ b = s.find(begin_marker, e + len(end_marker))
+
+ x = 0
+ r = []
+ for b, e in encoded:
+ r.append(s[x:b])
+ d = zlib.decompress(s[b:e])
+ d = self.to_bytes_re_sub(r'%%CreationDate: [^\n]*\n',
+ r'%%CreationDate: 1970 Jan 01 00:00:00\n', d)
+ d = self.to_bytes_re_sub(r'%DVIPSSource: TeX output \d\d\d\d\.\d\d\.\d\d:\d\d\d\d',
+ r'%DVIPSSource: TeX output 1970.01.01:0000', d)
+ d = self.to_bytes_re_sub(r'/(BaseFont|FontName) /[A-Z]{6}',
+ r'/\1 /XXXXXX', d)
+ r.append(d)
+ x = e
+ r.append(s[x:])
+ s = to_bytes('').join(r)
+
+ return s
+
+ def paths(self,patterns):
+ import glob
+ result = []
+ for p in patterns:
+ result.extend(sorted(glob.glob(p)))
+ return result
+
+ def unlink_sconsignfile(self,name='.sconsign.dblite'):
+ """
+ Delete sconsign file.
+ Note on python it seems to append .p3 to the file name so we take care of that
+ Parameters
+ ----------
+ name - expected name of sconsign file
+
+ Returns
+ -------
+ None
+ """
+ if sys.version_info[0] == 3:
+ name += '.p3'
+ self.unlink(name)
+
+ def java_ENV(self, version=None):
+ """
+ Initialize with a default external environment that uses a local
+ Java SDK in preference to whatever's found in the default PATH.
+
+ :param version: if set, match only that version
+ :return: the new env.
+ """
+ if not self.external:
+ try:
+ return self._java_env[version]['ENV']
+ except AttributeError:
+ self._java_env = {}
+ except KeyError:
+ pass
+
+ import SCons.Environment
+ env = SCons.Environment.Environment()
+ self._java_env[version] = env
+
+ if version:
+ if sys.platform == 'win32':
+ patterns = [
+ 'C:/Program Files*/Java/jdk*%s*/bin' % version,
+ ]
+ else:
+ patterns = [
+ '/usr/java/jdk%s*/bin' % version,
+ '/usr/lib/jvm/*-%s*/bin' % version,
+ '/usr/local/j2sdk%s*/bin' % version,
+ ]
+ java_path = self.paths(patterns) + [env['ENV']['PATH']]
+ else:
+ if sys.platform == 'win32':
+ patterns = [
+ 'C:/Program Files*/Java/jdk*/bin',
+ ]
+ else:
+ patterns = [
+ '/usr/java/latest/bin',
+ '/usr/lib/jvm/*/bin',
+ '/usr/local/j2sdk*/bin',
+ ]
+ java_path = self.paths(patterns) + [env['ENV']['PATH']]
+
+ env['ENV']['PATH'] = os.pathsep.join(java_path)
+ return env['ENV']
+
+ return None
+
+ def java_where_includes(self,version=None):
+ """
+ Find include path needed for compiling java jni code.
+
+ :param version: if set, match only that version
+ :return: path to java headers
+ """
+ import sys
+
+ result = []
+ if sys.platform[:6] == 'darwin':
+ java_home = self.java_where_java_home(version)
+ jni_path = os.path.join(java_home,'include','jni.h')
+ if os.path.exists(jni_path):
+ result.append(os.path.dirname(jni_path))
+
+ if not version:
+ version=''
+ jni_dirs = ['/System/Library/Frameworks/JavaVM.framework/Headers/jni.h',
+ '/usr/lib/jvm/default-java/include/jni.h',
+ '/usr/lib/jvm/java-*-oracle/include/jni.h']
+ else:
+ jni_dirs = ['/System/Library/Frameworks/JavaVM.framework/Versions/%s*/Headers/jni.h'%version]
+ jni_dirs.extend(['/usr/lib/jvm/java-*-sun-%s*/include/jni.h'%version,
+ '/usr/lib/jvm/java-%s*-openjdk*/include/jni.h'%version,
+ '/usr/java/jdk%s*/include/jni.h'%version])
+ dirs = self.paths(jni_dirs)
+ if not dirs:
+ return None
+ d=os.path.dirname(self.paths(jni_dirs)[0])
+ result.append(d)
+
+ if sys.platform == 'win32':
+ result.append(os.path.join(d,'win32'))
+ elif sys.platform.startswith('linux'):
+ result.append(os.path.join(d,'linux'))
+ return result
+
+ def java_where_java_home(self, version=None):
+ """
+ Find path to what would be JAVA_HOME.
+
+ SCons does not read JAVA_HOME from the environment, so deduce it.
+
+ :param version: if set, match only that version
+ :return: path where JDK components live
+ """
+ if sys.platform[:6] == 'darwin':
+ # osx 10.11, 10.12
+ home_tool = '/usr/libexec/java_home'
+ java_home = False
+ if os.path.exists(home_tool):
+ java_home = subprocess.check_output(home_tool).strip()
+ java_home = java_home.decode()
+
+ if version is None:
+ if java_home:
+ return java_home
+ else:
+ homes = ['/System/Library/Frameworks/JavaVM.framework/Home',
+ # osx 10.10
+ '/System/Library/Frameworks/JavaVM.framework/Versions/Current/Home']
+ for home in homes:
+ if os.path.exists(home):
+ return home
+
+ else:
+ if java_home.find('jdk%s'%version) != -1:
+ return java_home
+ else:
+ home = '/System/Library/Frameworks/JavaVM.framework/Versions/%s/Home' % version
+ if not os.path.exists(home):
+ # This works on OSX 10.10
+ home = '/System/Library/Frameworks/JavaVM.framework/Versions/Current/'
+ else:
+ jar = self.java_where_jar(version)
+ home = os.path.normpath('%s/..'%jar)
+ if os.path.isdir(home):
+ return home
+ print("Could not determine JAVA_HOME: %s is not a directory" % home)
+ self.fail_test()
+
+ def java_mac_check(self, where_java_bin, java_bin_name):
+ # on Mac there is a place holder java installed to start the java install process
+ # so we need to check the output in this case, more info here:
+ # http://anas.pk/2015/09/02/solution-no-java-runtime-present-mac-yosemite/
+ sp = subprocess.Popen([where_java_bin, "-version"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ stdout, stderr = sp.communicate()
+ sp.wait()
+ if("No Java runtime" in str(stderr)):
+ self.skip_test("Could not find Java " + java_bin_name + ", skipping test(s).\n")
+
+ def java_where_jar(self, version=None):
+ """
+ Find java archiver jar.
+
+ :param version: if set, match only that version
+ :return: path to jar
+ """
+ ENV = self.java_ENV(version)
+ if self.detect_tool('jar', ENV=ENV):
+ where_jar = self.detect('JAR', 'jar', ENV=ENV)
+ else:
+ where_jar = self.where_is('jar', ENV['PATH'])
+ if not where_jar:
+ self.skip_test("Could not find Java jar, skipping test(s).\n")
+ elif sys.platform == "darwin":
+ self.java_mac_check(where_jar, 'jar')
+
+ return where_jar
+
+ def java_where_java(self, version=None):
+ """
+ Find java executable.
+
+ :param version: if set, match only that version
+ :return: path to the java rutime
+ """
+ ENV = self.java_ENV(version)
+ where_java = self.where_is('java', ENV['PATH'])
+
+ if not where_java:
+ self.skip_test("Could not find Java java, skipping test(s).\n")
+ elif sys.platform == "darwin":
+ self.java_mac_check(where_java, 'java')
+
+ return where_java
+
+ def java_where_javac(self, version=None):
+ """
+ Find java compiler.
+
+ :param version: if set, match only that version
+ :return: path to javac
+ """
+ ENV = self.java_ENV(version)
+ if self.detect_tool('javac'):
+ where_javac = self.detect('JAVAC', 'javac', ENV=ENV)
+ else:
+ where_javac = self.where_is('javac', ENV['PATH'])
+ if not where_javac:
+ self.skip_test("Could not find Java javac, skipping test(s).\n")
+ elif sys.platform == "darwin":
+ self.java_mac_check(where_javac, 'javac')
+
+ self.run(program = where_javac,
+ arguments = '-version',
+ stderr=None,
+ status=None)
+ # Note recent versions output version info to stdout instead of stderr
+ if version:
+ verf = 'javac %s' % version
+ if self.stderr().find(verf) == -1 and self.stdout().find(verf) == -1:
+ fmt = "Could not find javac for Java version %s, skipping test(s).\n"
+ self.skip_test(fmt % version)
+ else:
+ version_re = r'javac (\d*\.*\d)'
+ m = re.search(version_re, self.stderr())
+ if not m:
+ m = re.search(version_re, self.stdout())
+
+ if m:
+ version = m.group(1)
+ self.javac_is_gcj = False
+ elif self.stderr().find('gcj') != -1:
+ version='1.2'
+ self.javac_is_gcj = True
+ else:
+ version = None
+ self.javac_is_gcj = False
+ return where_javac, version
+
+ def java_where_javah(self, version=None):
+ """
+ Find java header generation tool.
+
+ TODO issue #3347 since JDK10, there is no separate javah command,
+ 'javac -h' is used. We should not return a javah from a different
+ installed JDK - how to detect and what to return in this case?
+
+ :param version: if set, match only that version
+ :return: path to javah
+ """
+ ENV = self.java_ENV(version)
+ if self.detect_tool('javah'):
+ where_javah = self.detect('JAVAH', 'javah', ENV=ENV)
+ else:
+ where_javah = self.where_is('javah', ENV['PATH'])
+ if not where_javah:
+ self.skip_test("Could not find Java javah, skipping test(s).\n")
+ return where_javah
+
+ def java_where_rmic(self, version=None):
+ """
+ Find java rmic tool.
+
+ :param version: if set, match only that version
+ :return: path to rmic
+ """
+ ENV = self.java_ENV(version)
+ if self.detect_tool('rmic'):
+ where_rmic = self.detect('RMIC', 'rmic', ENV=ENV)
+ else:
+ where_rmic = self.where_is('rmic', ENV['PATH'])
+ if not where_rmic:
+ self.skip_test("Could not find Java rmic, skipping non-simulated test(s).\n")
+ return where_rmic
+
+
+ def java_get_class_files(self, dir):
+ result = []
+ for dirpath, dirnames, filenames in os.walk(dir):
+ for fname in filenames:
+ if fname.endswith('.class'):
+ result.append(os.path.join(dirpath, fname))
+ return sorted(result)
+
+
+ def Qt_dummy_installation(self, dir='qt'):
+ # create a dummy qt installation
+
+ self.subdir(dir, [dir, 'bin'], [dir, 'include'], [dir, 'lib'])
+
+ self.write([dir, 'bin', 'mymoc.py'], """\
+import getopt
+import sys
+import re
+# -w and -z are fake options used in test/QT/QTFLAGS.py
+cmd_opts, args = getopt.getopt(sys.argv[1:], 'io:wz', [])
+impl = 0
+opt_string = ''
+for opt, arg in cmd_opts:
+ if opt == '-o': outfile = arg
+ elif opt == '-i': impl = 1
+ else: opt_string = opt_string + ' ' + opt
+
+with open(outfile, 'w') as ofp:
+ ofp.write("/* mymoc.py%s */\\n" % opt_string)
+ for a in args:
+ with open(a, 'r') as ifp:
+ contents = ifp.read()
+ a = a.replace('\\\\', '\\\\\\\\')
+ subst = r'{ my_qt_symbol( "' + a + '\\\\n" ); }'
+ if impl:
+ contents = re.sub(r'#include.*', '', contents)
+ ofp.write(contents.replace('Q_OBJECT', subst))
+sys.exit(0)
+""")
+
+ self.write([dir, 'bin', 'myuic.py'], """\
+import os.path
+import re
+import sys
+output_arg = 0
+impl_arg = 0
+impl = None
+source = None
+opt_string = ''
+for arg in sys.argv[1:]:
+ if output_arg:
+ outfile = arg
+ output_arg = 0
+ elif impl_arg:
+ impl = arg
+ impl_arg = 0
+ elif arg == "-o":
+ output_arg = 1
+ elif arg == "-impl":
+ impl_arg = 1
+ elif arg[0:1] == "-":
+ opt_string = opt_string + ' ' + arg
+ else:
+ if source:
+ sys.exit(1)
+ source = sourceFile = arg
+
+with open(outfile, 'w') as ofp, open(source, 'r') as ifp:
+ ofp.write("/* myuic.py%s */\\n" % opt_string)
+ if impl:
+ ofp.write('#include "' + impl + '"\\n')
+ includes = re.findall('<include.*?>(.*?)</include>', ifp.read())
+ for incFile in includes:
+ # this is valid for ui.h files, at least
+ if os.path.exists(incFile):
+ ofp.write('#include "' + incFile + '"\\n')
+ else:
+ ofp.write('#include "my_qobject.h"\\n' + ifp.read() + " Q_OBJECT \\n")
+sys.exit(0)
+""" )
+
+ self.write([dir, 'include', 'my_qobject.h'], r"""
+#define Q_OBJECT ;
+void my_qt_symbol(const char *arg);
+""")
+
+ self.write([dir, 'lib', 'my_qobject.cpp'], r"""
+#include "../include/my_qobject.h"
+#include <stdio.h>
+void my_qt_symbol(const char *arg) {
+ fputs(arg, stdout);
+}
+""")
+
+ self.write([dir, 'lib', 'SConstruct'], r"""
+env = Environment()
+import sys
+if sys.platform == 'win32':
+ env.StaticLibrary('myqt', 'my_qobject.cpp')
+else:
+ env.SharedLibrary('myqt', 'my_qobject.cpp')
+""")
+
+ self.run(chdir = self.workpath(dir, 'lib'),
+ arguments = '.',
+ stderr = noisy_ar,
+ match = self.match_re_dotall)
+
+ self.QT = self.workpath(dir)
+ self.QT_LIB = 'myqt'
+ self.QT_MOC = '%s %s' % (_python_, self.workpath(dir, 'bin', 'mymoc.py'))
+ self.QT_UIC = '%s %s' % (_python_, self.workpath(dir, 'bin', 'myuic.py'))
+ self.QT_LIB_DIR = self.workpath(dir, 'lib')
+
+ def Qt_create_SConstruct(self, place):
+ if isinstance(place, list):
+ place = test.workpath(*place)
+ self.write(place, """\
+if ARGUMENTS.get('noqtdir', 0): QTDIR=None
+else: QTDIR=r'%s'
+env = Environment(QTDIR = QTDIR,
+ QT_LIB = r'%s',
+ QT_MOC = r'%s',
+ QT_UIC = r'%s',
+ tools=['default','qt'])
+dup = 1
+if ARGUMENTS.get('variant_dir', 0):
+ if ARGUMENTS.get('chdir', 0):
+ SConscriptChdir(1)
+ else:
+ SConscriptChdir(0)
+ dup=int(ARGUMENTS.get('dup', 1))
+ if dup == 0:
+ builddir = 'build_dup0'
+ env['QT_DEBUG'] = 1
+ else:
+ builddir = 'build'
+ VariantDir(builddir, '.', duplicate=dup)
+ print(builddir, dup)
+ sconscript = Dir(builddir).File('SConscript')
+else:
+ sconscript = File('SConscript')
+Export("env dup")
+SConscript(sconscript)
+""" % (self.QT, self.QT_LIB, self.QT_MOC, self.QT_UIC))
+
+
+ NCR = 0 # non-cached rebuild
+ CR = 1 # cached rebuild (up to date)
+ NCF = 2 # non-cached build failure
+ CF = 3 # cached build failure
+
+ if sys.platform == 'win32':
+ Configure_lib = 'msvcrt'
+ else:
+ Configure_lib = 'm'
+
+ # to use cygwin compilers on cmd.exe -> uncomment following line
+ #Configure_lib = 'm'
+
+ def coverage_run(self):
+ """ Check if the the tests are being run under coverage.
+ """
+ return 'COVERAGE_PROCESS_START' in os.environ or 'COVERAGE_FILE' in os.environ
+
+ def skip_if_not_msvc(self, check_platform=True):
+ """ Check whether we are on a Windows platform and skip the
+ test if not. This check can be omitted by setting
+ check_platform to False.
+ Then, for a win32 platform, additionally check
+ whether we have a MSVC toolchain installed
+ in the system, and skip the test if none can be
+ found (=MinGW is the only compiler available).
+ """
+ if check_platform:
+ if sys.platform != 'win32':
+ msg = "Skipping Visual C/C++ test on non-Windows platform '%s'\n" % sys.platform
+ self.skip_test(msg)
+ return
+
+ try:
+ import SCons.Tool.MSCommon as msc
+ if not msc.msvc_exists():
+ msg = "No MSVC toolchain found...skipping test\n"
+ self.skip_test(msg)
+ except:
+ pass
+
+ def checkLogAndStdout(self, checks, results, cached,
+ logfile, sconf_dir, sconstruct,
+ doCheckLog=True, doCheckStdout=True):
+ """
+ Used to verify the expected output from using Configure()
+ via the contents of one or both of stdout or config.log file.
+ The checks, results, cached parameters all are zipped together
+ for use in comparing results.
+
+ TODO: Perhaps a better API makes sense?
+
+ Parameters
+ ----------
+ checks : The Configure checks being run
+
+ results : The expected results for each check
+
+ cached : If the corresponding check is expected to be cached
+
+ logfile : Name of the config log
+
+ sconf_dir : Name of the sconf dir
+
+ sconstruct : SConstruct file name
+
+ doCheckLog : check specified log file, defaults to true
+
+ doCheckStdout : Check stdout, defaults to true
+
+ Returns
+ -------
+
+ """
+
+ class NoMatch(Exception):
+ def __init__(self, p):
+ self.pos = p
+
+ def matchPart(log, logfile, lastEnd, NoMatch=NoMatch):
+ """
+ Match part of the logfile
+ """
+ m = re.match(log, logfile[lastEnd:])
+ if not m:
+ raise NoMatch(lastEnd)
+ return m.end() + lastEnd
+
+ try:
+
+ # Build regexp for a character which is not
+ # a linesep, and in the case of CR/LF
+ # build it with both CR and CR/LF
+ # TODO: Not sure why this is a good idea. A static string
+ # could do the same since we only have two variations
+ # to do with?
+ # ls = os.linesep
+ # nols = "("
+ # for i in range(len(ls)):
+ # nols = nols + "("
+ # for j in range(i):
+ # nols = nols + ls[j]
+ # nols = nols + "[^" + ls[i] + "])"
+ # if i < len(ls)-1:
+ # nols = nols + "|"
+ # nols = nols + ")"
+ #
+ # Replaced above logic with \n as we're reading the file
+ # using non-binary read. Python will translate \r\n -> \n
+ # For us.
+ ls = '\n'
+ nols = '([^\n])'
+ lastEnd = 0
+
+ # Read the whole logfile
+ logfile = self.read(self.workpath(logfile), mode='r')
+
+ # Some debug code to keep around..
+ # sys.stderr.write("LOGFILE[%s]:%s"%(type(logfile),logfile))
+
+ if (doCheckLog and
+ logfile.find("scons: warning: The stored build information has an unexpected class.") >= 0):
+ self.fail_test()
+
+ sconf_dir = sconf_dir
+ sconstruct = sconstruct
+
+ log = r'file\ \S*%s\,line \d+:' % re.escape(sconstruct) + ls
+ if doCheckLog:
+ lastEnd = matchPart(log, logfile, lastEnd)
+
+ log = "\t" + re.escape("Configure(confdir = %s)" % sconf_dir) + ls
+ if doCheckLog:
+ lastEnd = matchPart(log, logfile, lastEnd)
+
+ rdstr = ""
+ cnt = 0
+ for check,result,cache_desc in zip(checks, results, cached):
+ log = re.escape("scons: Configure: " + check) + ls
+
+ if doCheckLog:
+ lastEnd = matchPart(log, logfile, lastEnd)
+
+ log = ""
+ result_cached = 1
+ for bld_desc in cache_desc: # each TryXXX
+ for ext, flag in bld_desc: # each file in TryBuild
+ file = os.path.join(sconf_dir,"conftest_%d%s" % (cnt, ext))
+ if flag == self.NCR:
+ # NCR = Non Cached Rebuild
+ # rebuild will pass
+ if ext in ['.c', '.cpp']:
+ log=log + re.escape(file + " <-") + ls
+ log=log + r"( \|" + nols + "*" + ls + ")+?"
+ else:
+ log=log + "(" + nols + "*" + ls +")*?"
+ result_cached = 0
+ if flag == self.CR:
+ # CR = cached rebuild (up to date)s
+ # up to date
+ log=log + \
+ re.escape("scons: Configure: \"%s\" is up to date."
+ % file) + ls
+ log=log+re.escape("scons: Configure: The original builder "
+ "output was:") + ls
+ log=log+r"( \|.*"+ls+")+"
+ if flag == self.NCF:
+ # non-cached rebuild failure
+ log=log + "(" + nols + "*" + ls + ")*?"
+ result_cached = 0
+ if flag == self.CF:
+ # cached rebuild failure
+ log=log + \
+ re.escape("scons: Configure: Building \"%s\" failed "
+ "in a previous run and all its sources are"
+ " up to date." % file) + ls
+ log=log+re.escape("scons: Configure: The original builder "
+ "output was:") + ls
+ log=log+r"( \|.*"+ls+")+"
+ cnt = cnt + 1
+ if result_cached:
+ result = "(cached) " + result
+ rdstr = rdstr + re.escape(check) + re.escape(result) + "\n"
+ log=log + re.escape("scons: Configure: " + result) + ls + ls
+
+ if doCheckLog:
+ lastEnd = matchPart(log, logfile, lastEnd)
+
+ log = ""
+ if doCheckLog: lastEnd = matchPart(ls, logfile, lastEnd)
+ if doCheckLog and lastEnd != len(logfile):
+ raise NoMatch(lastEnd)
+
+ except NoMatch as m:
+ print("Cannot match log file against log regexp.")
+ print("log file: ")
+ print("------------------------------------------------------")
+ print(logfile[m.pos:])
+ print("------------------------------------------------------")
+ print("log regexp: ")
+ print("------------------------------------------------------")
+ print(log)
+ print("------------------------------------------------------")
+ self.fail_test()
+
+ if doCheckStdout:
+ exp_stdout = self.wrap_stdout(".*", rdstr)
+ if not self.match_re_dotall(self.stdout(), exp_stdout):
+ print("Unexpected stdout: ")
+ print("-----------------------------------------------------")
+ print(repr(self.stdout()))
+ print("-----------------------------------------------------")
+ print(repr(exp_stdout))
+ print("-----------------------------------------------------")
+ self.fail_test()
+
+ def get_python_version(self):
+ """
+ Returns the Python version (just so everyone doesn't have to
+ hand-code slicing the right number of characters).
+ """
+ # see also sys.prefix documentation
+ return python_minor_version_string()
+
+ def get_platform_python_info(self, python_h_required=False):
+ """
+ Returns a path to a Python executable suitable for testing on
+ this platform and its associated include path, library path and
+ library name.
+
+ If the Python executable or Python header (if required)
+ is not found, the test is skipped.
+
+ Returns a tuple:
+ (path to python, include path, library path, library name)
+ """
+ python = os.environ.get('python_executable', self.where_is('python'))
+ if not python:
+ self.skip_test('Can not find installed "python", skipping test.\n')
+
+ # construct a program to run in the intended environment
+ # in order to fetch the characteristics of that Python.
+ # Windows Python doesn't store all the info in config vars.
+ if sys.platform == 'win32':
+ self.run(program=python, stdin="""\
+import sysconfig, sys, os.path
+py_ver = 'python%d%d' % sys.version_info[:2]
+# use distutils to help find include and lib path
+# TODO: PY3 fine to use sysconfig.get_config_var("INCLUDEPY")
+try:
+ import distutils.sysconfig
+ exec_prefix = distutils.sysconfig.EXEC_PREFIX
+ include = distutils.sysconfig.get_python_inc()
+ print(include)
+ lib_path = os.path.join(exec_prefix, 'libs')
+ if not os.path.exists(lib_path):
+ # check for virtualenv path.
+ # this might not build anything different than first try.
+ def venv_path():
+ if hasattr(sys, 'real_prefix'):
+ return sys.real_prefix
+ if hasattr(sys, 'base_prefix'):
+ return sys.base_prefix
+ lib_path = os.path.join(venv_path(), 'libs')
+ if not os.path.exists(lib_path):
+ # not clear this is useful: 'lib' does not contain linkable libs
+ lib_path = os.path.join(exec_prefix, 'lib')
+ print(lib_path)
+except:
+ include = os.path.join(sys.prefix, 'include', py_ver)
+ print(include)
+ lib_path = os.path.join(sys.prefix, 'lib', py_ver, 'config')
+ print(lib_path)
+print(py_ver)
+Python_h = os.path.join(include, "Python.h")
+if os.path.exists(Python_h):
+ print(Python_h)
+else:
+ print("False")
+""")
+ else:
+ self.run(program=python, stdin="""\
+import sys, sysconfig, os.path
+include = sysconfig.get_config_var("INCLUDEPY")
+print(include)
+print(sysconfig.get_config_var("LIBDIR"))
+py_library_ver = sysconfig.get_config_var("LDVERSION")
+if not py_library_ver:
+ py_library_ver = '%d.%d' % sys.version_info[:2]
+print("python"+py_library_ver)
+Python_h = os.path.join(include, "Python.h")
+if os.path.exists(Python_h):
+ print(Python_h)
+else:
+ print("False")
+""")
+ incpath, libpath, libname, python_h = self.stdout().strip().split('\n')
+ if python_h == "False" and python_h_required:
+ self.skip_test('Can not find required "Python.h", skipping test.\n')
+
+ return (python, incpath, libpath, libname)
+
+ def start(self, *args, **kw):
+ """
+ Starts SCons in the test environment.
+
+ This method exists to tell Test{Cmd,Common} that we're going to
+ use standard input without forcing every .start() call in the
+ individual tests to do so explicitly.
+ """
+ if 'stdin' not in kw:
+ kw['stdin'] = True
+ sconsflags = initialize_sconsflags(self.ignore_python_version)
+ try:
+ p = TestCommon.start(self, *args, **kw)
+ finally:
+ restore_sconsflags(sconsflags)
+ return p
+
+ def wait_for(self, fname, timeout=20.0, popen=None):
+ """
+ Waits for the specified file name to exist.
+ """
+ waited = 0.0
+ while not os.path.exists(fname):
+ if timeout and waited >= timeout:
+ sys.stderr.write('timed out waiting for %s to exist\n' % fname)
+ if popen:
+ popen.stdin.close()
+ popen.stdin = None
+ self.status = 1
+ self.finish(popen)
+ stdout = self.stdout()
+ if stdout:
+ sys.stdout.write(self.banner('STDOUT ') + '\n')
+ sys.stdout.write(stdout)
+ stderr = self.stderr()
+ if stderr:
+ sys.stderr.write(self.banner('STDERR ') + '\n')
+ sys.stderr.write(stderr)
+ self.fail_test()
+ time.sleep(1.0)
+ waited = waited + 1.0
+
+ def get_alt_cpp_suffix(self):
+ """
+ Many CXX tests have this same logic.
+ They all needed to determine if the current os supports
+ files with .C and .c as different files or not
+ in which case they are instructed to use .cpp instead of .C
+ """
+ if not case_sensitive_suffixes('.c','.C'):
+ alt_cpp_suffix = '.cpp'
+ else:
+ alt_cpp_suffix = '.C'
+ return alt_cpp_suffix
+
+ def platform_has_symlink(self):
+ if not hasattr(os, 'symlink') or sys.platform == 'win32':
+ return False
+ else:
+ return True
+
+
+class Stat:
+ def __init__(self, name, units, expression, convert=None):
+ if convert is None:
+ convert = lambda x: x
+ self.name = name
+ self.units = units
+ self.expression = re.compile(expression)
+ self.convert = convert
+
+StatList = [
+ Stat('memory-initial', 'kbytes',
+ r'Memory before reading SConscript files:\s+(\d+)',
+ convert=lambda s: int(s) // 1024),
+ Stat('memory-prebuild', 'kbytes',
+ r'Memory before building targets:\s+(\d+)',
+ convert=lambda s: int(s) // 1024),
+ Stat('memory-final', 'kbytes',
+ r'Memory after building targets:\s+(\d+)',
+ convert=lambda s: int(s) // 1024),
+
+ Stat('time-sconscript', 'seconds',
+ r'Total SConscript file execution time:\s+([\d.]+) seconds'),
+ Stat('time-scons', 'seconds',
+ r'Total SCons execution time:\s+([\d.]+) seconds'),
+ Stat('time-commands', 'seconds',
+ r'Total command execution time:\s+([\d.]+) seconds'),
+ Stat('time-total', 'seconds',
+ r'Total build time:\s+([\d.]+) seconds'),
+]
+
+
+class TimeSCons(TestSCons):
+ """Class for timing SCons."""
+ def __init__(self, *args, **kw):
+ """
+ In addition to normal TestSCons.TestSCons intialization,
+ this enables verbose mode (which causes the command lines to
+ be displayed in the output) and copies the contents of the
+ directory containing the executing script to the temporary
+ working directory.
+ """
+ self.variables = kw.get('variables')
+ default_calibrate_variables = []
+ if self.variables is not None:
+ for variable, value in self.variables.items():
+ value = os.environ.get(variable, value)
+ try:
+ value = int(value)
+ except ValueError:
+ try:
+ value = float(value)
+ except ValueError:
+ pass
+ else:
+ default_calibrate_variables.append(variable)
+ else:
+ default_calibrate_variables.append(variable)
+ self.variables[variable] = value
+ del kw['variables']
+ calibrate_keyword_arg = kw.get('calibrate')
+ if calibrate_keyword_arg is None:
+ self.calibrate_variables = default_calibrate_variables
+ else:
+ self.calibrate_variables = calibrate_keyword_arg
+ del kw['calibrate']
+
+ self.calibrate = os.environ.get('TIMESCONS_CALIBRATE', '0') != '0'
+
+ if 'verbose' not in kw and not self.calibrate:
+ kw['verbose'] = True
+
+ TestSCons.__init__(self, *args, **kw)
+
+ # TODO(sgk): better way to get the script dir than sys.argv[0]
+ self.test_dir = os.path.dirname(sys.argv[0])
+ test_name = os.path.basename(self.test_dir)
+
+ if not os.path.isabs(self.test_dir):
+ self.test_dir = os.path.join(self.orig_cwd, self.test_dir)
+ self.copy_timing_configuration(self.test_dir, self.workpath())
+
+ def main(self, *args, **kw):
+ """
+ The main entry point for standard execution of timings.
+
+ This method run SCons three times:
+
+ Once with the --help option, to have it exit after just reading
+ the configuration.
+
+ Once as a full build of all targets.
+
+ Once again as a (presumably) null or up-to-date build of
+ all targets.
+
+ The elapsed time to execute each build is printed after
+ it has finished.
+ """
+ if 'options' not in kw and self.variables:
+ options = []
+ for variable, value in self.variables.items():
+ options.append('%s=%s' % (variable, value))
+ kw['options'] = ' '.join(options)
+ if self.calibrate:
+ self.calibration(*args, **kw)
+ else:
+ self.uptime()
+ self.startup(*args, **kw)
+ self.full(*args, **kw)
+ self.null(*args, **kw)
+
+ def trace(self, graph, name, value, units, sort=None):
+ fmt = "TRACE: graph=%s name=%s value=%s units=%s"
+ line = fmt % (graph, name, value, units)
+ if sort is not None:
+ line = line + (' sort=%s' % sort)
+ line = line + '\n'
+ sys.stdout.write(line)
+ sys.stdout.flush()
+
+ def report_traces(self, trace, stats):
+ self.trace('TimeSCons-elapsed',
+ trace,
+ self.elapsed_time(),
+ "seconds",
+ sort=0)
+ for name, args in stats.items():
+ self.trace(name, trace, **args)
+
+ def uptime(self):
+ try:
+ fp = open('/proc/loadavg')
+ except EnvironmentError:
+ pass
+ else:
+ avg1, avg5, avg15 = fp.readline().split(" ")[:3]
+ fp.close()
+ self.trace('load-average', 'average1', avg1, 'processes')
+ self.trace('load-average', 'average5', avg5, 'processes')
+ self.trace('load-average', 'average15', avg15, 'processes')
+
+ def collect_stats(self, input):
+ result = {}
+ for stat in StatList:
+ m = stat.expression.search(input)
+ if m:
+ value = stat.convert(m.group(1))
+ # The dict keys match the keyword= arguments
+ # of the trace() method above so they can be
+ # applied directly to that call.
+ result[stat.name] = {'value':value, 'units':stat.units}
+ return result
+
+ def add_timing_options(self, kw, additional=None):
+ """
+ Add the necessary timings options to the kw['options'] value.
+ """
+ options = kw.get('options', '')
+ if additional is not None:
+ options += additional
+ kw['options'] = options + ' --debug=memory,time'
+
+ def startup(self, *args, **kw):
+ """
+ Runs scons with the --help option.
+
+ This serves as a way to isolate just the amount of startup time
+ spent reading up the configuration, since --help exits before any
+ "real work" is done.
+ """
+ self.add_timing_options(kw, ' --help')
+ # Ignore the exit status. If the --help run dies, we just
+ # won't report any statistics for it, but we can still execute
+ # the full and null builds.
+ kw['status'] = None
+ self.run(*args, **kw)
+ sys.stdout.write(self.stdout())
+ stats = self.collect_stats(self.stdout())
+ # Delete the time-commands, since no commands are ever
+ # executed on the help run and it is (or should be) always 0.0.
+ del stats['time-commands']
+ self.report_traces('startup', stats)
+
+ def full(self, *args, **kw):
+ """
+ Runs a full build of SCons.
+ """
+ self.add_timing_options(kw)
+ self.run(*args, **kw)
+ sys.stdout.write(self.stdout())
+ stats = self.collect_stats(self.stdout())
+ self.report_traces('full', stats)
+ self.trace('full-memory', 'initial', **stats['memory-initial'])
+ self.trace('full-memory', 'prebuild', **stats['memory-prebuild'])
+ self.trace('full-memory', 'final', **stats['memory-final'])
+
+ def calibration(self, *args, **kw):
+ """
+ Runs a full build of SCons, but only reports calibration
+ information (the variable(s) that were set for this configuration,
+ and the elapsed time to run.
+ """
+ self.add_timing_options(kw)
+ self.run(*args, **kw)
+ for variable in self.calibrate_variables:
+ value = self.variables[variable]
+ sys.stdout.write('VARIABLE: %s=%s\n' % (variable, value))
+ sys.stdout.write('ELAPSED: %s\n' % self.elapsed_time())
+
+ def null(self, *args, **kw):
+ """
+ Runs an up-to-date null build of SCons.
+ """
+ # TODO(sgk): allow the caller to specify the target (argument)
+ # that must be up-to-date.
+ self.add_timing_options(kw)
+ self.up_to_date(arguments='.', **kw)
+ sys.stdout.write(self.stdout())
+ stats = self.collect_stats(self.stdout())
+ # time-commands should always be 0.0 on a null build, because
+ # no commands should be executed. Remove it from the stats
+ # so we don't trace it, but only if it *is* 0 so that we'll
+ # get some indication if a supposedly-null build actually does
+ # build something.
+ if float(stats['time-commands']['value']) == 0.0:
+ del stats['time-commands']
+ self.report_traces('null', stats)
+ self.trace('null-memory', 'initial', **stats['memory-initial'])
+ self.trace('null-memory', 'prebuild', **stats['memory-prebuild'])
+ self.trace('null-memory', 'final', **stats['memory-final'])
+
+ def elapsed_time(self):
+ """
+ Returns the elapsed time of the most recent command execution.
+ """
+ return self.endTime - self.startTime
+
+ def run(self, *args, **kw):
+ """
+ Runs a single build command, capturing output in the specified file.
+
+ Because this class is about timing SCons, we record the start
+ and end times of the elapsed execution, and also add the
+ --debug=memory and --debug=time options to have SCons report
+ its own memory and timing statistics.
+ """
+ self.startTime = time.time()
+ try:
+ result = TestSCons.run(self, *args, **kw)
+ finally:
+ self.endTime = time.time()
+ return result
+
+ def copy_timing_configuration(self, source_dir, dest_dir):
+ """
+ Copies the timing configuration from the specified source_dir (the
+ directory in which the controlling script lives) to the specified
+ dest_dir (a temporary working directory).
+
+ This ignores all files and directories that begin with the string
+ 'TimeSCons-', and all '.svn' subdirectories.
+ """
+ for root, dirs, files in os.walk(source_dir):
+ if '.svn' in dirs:
+ dirs.remove('.svn')
+ dirs = [ d for d in dirs if not d.startswith('TimeSCons-') ]
+ files = [ f for f in files if not f.startswith('TimeSCons-') ]
+ for dirname in dirs:
+ source = os.path.join(root, dirname)
+ destination = source.replace(source_dir, dest_dir)
+ os.mkdir(destination)
+ if sys.platform != 'win32':
+ shutil.copystat(source, destination)
+ for filename in files:
+ source = os.path.join(root, filename)
+ destination = source.replace(source_dir, dest_dir)
+ shutil.copy2(source, destination)
+
+
+# In some environments, $AR will generate a warning message to stderr
+# if the library doesn't previously exist and is being created. One
+# way to fix this is to tell AR to be quiet (sometimes the 'c' flag),
+# but this is difficult to do in a platform-/implementation-specific
+# method. Instead, we will use the following as a stderr match for
+# tests that use AR so that we will view zero or more "ar: creating
+# <file>" messages to be successful executions of the test (see
+# test/AR.py for sample usage).
+
+noisy_ar=r'(ar: creating( archive)? \S+\n?)*'
+
+# Local Variables:
+# tab-width:4
+# indent-tabs-mode:nil
+# End:
+# vim: set expandtab tabstop=4 shiftwidth=4:
diff --git a/testing/framework/TestSConsMSVS.py b/testing/framework/TestSConsMSVS.py
new file mode 100644
index 0000000..86935fd
--- /dev/null
+++ b/testing/framework/TestSConsMSVS.py
@@ -0,0 +1,907 @@
+"""
+TestSConsMSVS.py: a testing framework for the SCons software construction
+tool.
+
+A TestSConsMSVS environment object is created via the usual invocation:
+
+ test = TestSConsMSVS()
+
+TestSConsMSVS is a subsclass of TestSCons, which is in turn a subclass
+of TestCommon, which is in turn is a subclass of TestCmd), and hence
+has available all of the methods and attributes from those classes,
+as well as any overridden or additional methods or attributes defined
+in this subclass.
+"""
+
+# Copyright (c) 2001 - 2019 The SCons Foundation
+
+__revision__ = "testing/framework/TestSConsMSVS.py e724ae812eb96f4858a132f5b8c769724744faf6 2019-07-21 00:04:47 bdeegan"
+
+import os
+import sys
+import platform
+import traceback
+from xml.etree import ElementTree
+
+import SCons.Errors
+from TestSCons import *
+from TestSCons import __all__
+
+
+
+expected_dspfile_6_0 = '''\
+# Microsoft Developer Studio Project File - Name="Test" - Package Owner=<4>
+# Microsoft Developer Studio Generated Build File, Format Version 6.00
+# ** DO NOT EDIT **
+
+# TARGTYPE "Win32 (x86) External Target" 0x0106
+
+CFG=Test - Win32 Release
+!MESSAGE This is not a valid makefile. To build this project using NMAKE,
+!MESSAGE use the Export Makefile command and run
+!MESSAGE
+!MESSAGE NMAKE /f "Test.mak".
+!MESSAGE
+!MESSAGE You can specify a configuration when running NMAKE
+!MESSAGE by defining the macro CFG on the command line. For example:
+!MESSAGE
+!MESSAGE NMAKE /f "Test.mak" CFG="Test - Win32 Release"
+!MESSAGE
+!MESSAGE Possible choices for configuration are:
+!MESSAGE
+!MESSAGE "Test - Win32 Release" (based on "Win32 (x86) External Target")
+!MESSAGE
+
+# Begin Project
+# PROP AllowPerConfigDependencies 0
+# PROP Scc_ProjName ""
+# PROP Scc_LocalPath ""
+
+!IF "$(CFG)" == "Test - Win32 Release"
+
+# PROP BASE Use_MFC 0
+# PROP BASE Use_Debug_Libraries 0
+# PROP BASE Output_Dir ""
+# PROP BASE Intermediate_Dir ""
+# PROP BASE Cmd_Line "echo Starting SCons && "<PYTHON>" -c "<SCONS_SCRIPT_MAIN>" -C "<WORKPATH>" -f SConstruct "Test.exe""
+# PROP BASE Rebuild_Opt "-c && echo Starting SCons && "<PYTHON>" -c "<SCONS_SCRIPT_MAIN>" -C "<WORKPATH>" -f SConstruct "Test.exe""
+# PROP BASE Target_File "Test.exe"
+# PROP BASE Bsc_Name ""
+# PROP BASE Target_Dir ""
+# PROP Use_MFC 0
+# PROP Use_Debug_Libraries 0
+# PROP Output_Dir ""
+# PROP Intermediate_Dir ""
+# PROP Cmd_Line "echo Starting SCons && "<PYTHON>" -c "<SCONS_SCRIPT_MAIN>" -C "<WORKPATH>" -f SConstruct "Test.exe""
+# PROP Rebuild_Opt "-c && echo Starting SCons && "<PYTHON>" -c "<SCONS_SCRIPT_MAIN>" -C "<WORKPATH>" -f SConstruct "Test.exe""
+# PROP Target_File "Test.exe"
+# PROP Bsc_Name ""
+# PROP Target_Dir ""
+
+!ENDIF
+
+# Begin Target
+
+# Name "Test - Win32 Release"
+
+!IF "$(CFG)" == "Test - Win32 Release"
+
+!ENDIF
+
+# Begin Group "Header Files"
+
+# PROP Default_Filter "h;hpp;hxx;hm;inl"
+# Begin Source File
+
+SOURCE="sdk.h"
+# End Source File
+# End Group
+# Begin Group "Local Headers"
+
+# PROP Default_Filter "h;hpp;hxx;hm;inl"
+# Begin Source File
+
+SOURCE="test.h"
+# End Source File
+# End Group
+# Begin Group "Other Files"
+
+# PROP Default_Filter ""
+# Begin Source File
+
+SOURCE="readme.txt"
+# End Source File
+# End Group
+# Begin Group "Resource Files"
+
+# PROP Default_Filter "r;rc;ico;cur;bmp;dlg;rc2;rct;bin;cnt;rtf;gif;jpg;jpeg;jpe"
+# Begin Source File
+
+SOURCE="test.rc"
+# End Source File
+# End Group
+# Begin Group "Source Files"
+
+# PROP Default_Filter "cpp;c;cxx;l;y;def;odl;idl;hpj;bat"
+# Begin Source File
+
+SOURCE="test.c"
+# End Source File
+# End Group
+# Begin Source File
+
+SOURCE="<SCONSCRIPT>"
+# End Source File
+# End Target
+# End Project
+'''
+
+expected_dswfile_6_0 = '''\
+Microsoft Developer Studio Workspace File, Format Version 6.00
+# WARNING: DO NOT EDIT OR DELETE THIS WORKSPACE FILE!
+
+###############################################################################
+
+Project: "Test"="Test.dsp" - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+}}}
+
+###############################################################################
+
+Global:
+
+Package=<5>
+{{{
+}}}
+
+Package=<3>
+{{{
+}}}
+
+###############################################################################
+'''
+
+SConscript_contents_6_0 = """\
+env=Environment(platform='win32', tools=['msvs'],
+ MSVS_VERSION='6.0',HOST_ARCH='%(HOST_ARCH)s')
+
+testsrc = ['test.c']
+testincs = ['sdk.h']
+testlocalincs = ['test.h']
+testresources = ['test.rc']
+testmisc = ['readme.txt']
+
+env.MSVSProject(target = 'Test.dsp',
+ srcs = testsrc,
+ incs = testincs,
+ localincs = testlocalincs,
+ resources = testresources,
+ misc = testmisc,
+ buildtarget = 'Test.exe',
+ variant = 'Release')
+"""
+
+
+
+expected_slnfile_7_0 = """\
+Microsoft Visual Studio Solution File, Format Version 7.00
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "Test", "Test.vcproj", "<PROJECT_GUID>"
+EndProject
+Global
+<SCC_SLN_INFO>
+\tGlobalSection(SolutionConfiguration) = preSolution
+\t\tConfigName.0 = Release
+\tEndGlobalSection
+\tGlobalSection(ProjectDependencies) = postSolution
+\tEndGlobalSection
+\tGlobalSection(ProjectConfiguration) = postSolution
+\t\t<PROJECT_GUID>.Release.ActiveCfg = Release|Win32
+\t\t<PROJECT_GUID>.Release.Build.0 = Release|Win32
+\tEndGlobalSection
+\tGlobalSection(ExtensibilityGlobals) = postSolution
+\tEndGlobalSection
+\tGlobalSection(ExtensibilityAddIns) = postSolution
+\tEndGlobalSection
+EndGlobal
+"""
+
+expected_vcprojfile_7_0 = """\
+<?xml version="1.0" encoding="Windows-1252"?>
+<VisualStudioProject
+\tProjectType="Visual C++"
+\tVersion="7.00"
+\tName="Test"
+\tProjectGUID="<PROJECT_GUID>"
+<SCC_VCPROJ_INFO>
+\tKeyword="MakeFileProj">
+\t<Platforms>
+\t\t<Platform
+\t\t\tName="Win32"/>
+\t</Platforms>
+\t<Configurations>
+\t\t<Configuration
+\t\t\tName="Release|Win32"
+\t\t\tOutputDirectory=""
+\t\t\tIntermediateDirectory=""
+\t\t\tConfigurationType="0"
+\t\t\tUseOfMFC="0"
+\t\t\tATLMinimizesCRunTimeLibraryUsage="FALSE">
+\t\t\t<Tool
+\t\t\t\tName="VCNMakeTool"
+\t\t\t\tBuildCommandLine="echo Starting SCons &amp;&amp; &quot;<PYTHON>&quot; -c &quot;<SCONS_SCRIPT_MAIN_XML>&quot; -C &quot;<WORKPATH>&quot; -f SConstruct &quot;Test.exe&quot;"
+\t\t\t\tReBuildCommandLine="echo Starting SCons &amp;&amp; &quot;<PYTHON>&quot; -c &quot;<SCONS_SCRIPT_MAIN_XML>&quot; -C &quot;<WORKPATH>&quot; -f SConstruct &quot;Test.exe&quot;"
+\t\t\t\tCleanCommandLine="echo Starting SCons &amp;&amp; &quot;<PYTHON>&quot; -c &quot;<SCONS_SCRIPT_MAIN_XML>&quot; -C &quot;<WORKPATH>&quot; -f SConstruct -c &quot;Test.exe&quot;"
+\t\t\t\tOutput="Test.exe"/>
+\t\t</Configuration>
+\t</Configurations>
+\t<Files>
+\t\t<Filter
+\t\t\tName="Header Files"
+\t\t\tFilter="h;hpp;hxx;hm;inl">
+\t\t\t<File
+\t\t\t\tRelativePath="sdk.h">
+\t\t\t</File>
+\t\t</Filter>
+\t\t<Filter
+\t\t\tName="Local Headers"
+\t\t\tFilter="h;hpp;hxx;hm;inl">
+\t\t\t<File
+\t\t\t\tRelativePath="test.h">
+\t\t\t</File>
+\t\t</Filter>
+\t\t<Filter
+\t\t\tName="Other Files"
+\t\t\tFilter="">
+\t\t\t<File
+\t\t\t\tRelativePath="readme.txt">
+\t\t\t</File>
+\t\t</Filter>
+\t\t<Filter
+\t\t\tName="Resource Files"
+\t\t\tFilter="r;rc;ico;cur;bmp;dlg;rc2;rct;bin;cnt;rtf;gif;jpg;jpeg;jpe">
+\t\t\t<File
+\t\t\t\tRelativePath="test.rc">
+\t\t\t</File>
+\t\t</Filter>
+\t\t<Filter
+\t\t\tName="Source Files"
+\t\t\tFilter="cpp;c;cxx;l;y;def;odl;idl;hpj;bat">
+\t\t\t<File
+\t\t\t\tRelativePath="test1.cpp">
+\t\t\t</File>
+\t\t\t<File
+\t\t\t\tRelativePath="test2.cpp">
+\t\t\t</File>
+\t\t</Filter>
+\t\t<File
+\t\t\tRelativePath="<SCONSCRIPT>">
+\t\t</File>
+\t</Files>
+\t<Globals>
+\t</Globals>
+</VisualStudioProject>
+"""
+
+SConscript_contents_7_0 = """\
+env=Environment(platform='win32', tools=['msvs'],
+ MSVS_VERSION='7.0',HOST_ARCH='%(HOST_ARCH)s')
+
+testsrc = ['test1.cpp', 'test2.cpp']
+testincs = ['sdk.h']
+testlocalincs = ['test.h']
+testresources = ['test.rc']
+testmisc = ['readme.txt']
+
+env.MSVSProject(target = 'Test.vcproj',
+ slnguid = '{SLNGUID}',
+ srcs = testsrc,
+ incs = testincs,
+ localincs = testlocalincs,
+ resources = testresources,
+ misc = testmisc,
+ buildtarget = 'Test.exe',
+ variant = 'Release')
+"""
+
+
+
+expected_slnfile_7_1 = """\
+Microsoft Visual Studio Solution File, Format Version 8.00
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "Test", "Test.vcproj", "<PROJECT_GUID>"
+\tProjectSection(ProjectDependencies) = postProject
+\tEndProjectSection
+EndProject
+Global
+<SCC_SLN_INFO>
+\tGlobalSection(SolutionConfiguration) = preSolution
+\t\tConfigName.0 = Release
+\tEndGlobalSection
+\tGlobalSection(ProjectDependencies) = postSolution
+\tEndGlobalSection
+\tGlobalSection(ProjectConfiguration) = postSolution
+\t\t<PROJECT_GUID>.Release.ActiveCfg = Release|Win32
+\t\t<PROJECT_GUID>.Release.Build.0 = Release|Win32
+\tEndGlobalSection
+\tGlobalSection(ExtensibilityGlobals) = postSolution
+\tEndGlobalSection
+\tGlobalSection(ExtensibilityAddIns) = postSolution
+\tEndGlobalSection
+EndGlobal
+"""
+
+expected_vcprojfile_7_1 = """\
+<?xml version="1.0" encoding="Windows-1252"?>
+<VisualStudioProject
+\tProjectType="Visual C++"
+\tVersion="7.10"
+\tName="Test"
+\tProjectGUID="<PROJECT_GUID>"
+<SCC_VCPROJ_INFO>
+\tKeyword="MakeFileProj">
+\t<Platforms>
+\t\t<Platform
+\t\t\tName="Win32"/>
+\t</Platforms>
+\t<Configurations>
+\t\t<Configuration
+\t\t\tName="Release|Win32"
+\t\t\tOutputDirectory=""
+\t\t\tIntermediateDirectory=""
+\t\t\tConfigurationType="0"
+\t\t\tUseOfMFC="0"
+\t\t\tATLMinimizesCRunTimeLibraryUsage="FALSE">
+\t\t\t<Tool
+\t\t\t\tName="VCNMakeTool"
+\t\t\t\tBuildCommandLine="echo Starting SCons &amp;&amp; &quot;<PYTHON>&quot; -c &quot;<SCONS_SCRIPT_MAIN_XML>&quot; -C &quot;<WORKPATH>&quot; -f SConstruct &quot;Test.exe&quot;"
+\t\t\t\tReBuildCommandLine="echo Starting SCons &amp;&amp; &quot;<PYTHON>&quot; -c &quot;<SCONS_SCRIPT_MAIN_XML>&quot; -C &quot;<WORKPATH>&quot; -f SConstruct &quot;Test.exe&quot;"
+\t\t\t\tCleanCommandLine="echo Starting SCons &amp;&amp; &quot;<PYTHON>&quot; -c &quot;<SCONS_SCRIPT_MAIN_XML>&quot; -C &quot;<WORKPATH>&quot; -f SConstruct -c &quot;Test.exe&quot;"
+\t\t\t\tOutput="Test.exe"/>
+\t\t</Configuration>
+\t</Configurations>
+\t<References>
+\t</References>
+\t<Files>
+\t\t<Filter
+\t\t\tName="Header Files"
+\t\t\tFilter="h;hpp;hxx;hm;inl">
+\t\t\t<File
+\t\t\t\tRelativePath="sdk.h">
+\t\t\t</File>
+\t\t</Filter>
+\t\t<Filter
+\t\t\tName="Local Headers"
+\t\t\tFilter="h;hpp;hxx;hm;inl">
+\t\t\t<File
+\t\t\t\tRelativePath="test.h">
+\t\t\t</File>
+\t\t</Filter>
+\t\t<Filter
+\t\t\tName="Other Files"
+\t\t\tFilter="">
+\t\t\t<File
+\t\t\t\tRelativePath="readme.txt">
+\t\t\t</File>
+\t\t</Filter>
+\t\t<Filter
+\t\t\tName="Resource Files"
+\t\t\tFilter="r;rc;ico;cur;bmp;dlg;rc2;rct;bin;cnt;rtf;gif;jpg;jpeg;jpe">
+\t\t\t<File
+\t\t\t\tRelativePath="test.rc">
+\t\t\t</File>
+\t\t</Filter>
+\t\t<Filter
+\t\t\tName="Source Files"
+\t\t\tFilter="cpp;c;cxx;l;y;def;odl;idl;hpj;bat">
+\t\t\t<File
+\t\t\t\tRelativePath="test1.cpp">
+\t\t\t</File>
+\t\t\t<File
+\t\t\t\tRelativePath="test2.cpp">
+\t\t\t</File>
+\t\t</Filter>
+\t\t<File
+\t\t\tRelativePath="<SCONSCRIPT>">
+\t\t</File>
+\t</Files>
+\t<Globals>
+\t</Globals>
+</VisualStudioProject>
+"""
+
+SConscript_contents_7_1 = """\
+env=Environment(platform='win32', tools=['msvs'],
+ MSVS_VERSION='7.1',HOST_ARCH='%(HOST_ARCH)s')
+
+testsrc = ['test1.cpp', 'test2.cpp']
+testincs = ['sdk.h']
+testlocalincs = ['test.h']
+testresources = ['test.rc']
+testmisc = ['readme.txt']
+
+env.MSVSProject(target = 'Test.vcproj',
+ slnguid = '{SLNGUID}',
+ srcs = testsrc,
+ incs = testincs,
+ localincs = testlocalincs,
+ resources = testresources,
+ misc = testmisc,
+ buildtarget = 'Test.exe',
+ variant = 'Release')
+"""
+
+
+expected_slnfile_fmt = """\
+Microsoft Visual Studio Solution File, Format Version %(FORMAT_VERSION)s
+# Visual Studio %(VS_NUMBER)s
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "%(PROJECT_NAME)s", "%(PROJECT_FILE)s", "<PROJECT_GUID>"
+EndProject
+Global
+<SCC_SLN_INFO>
+\tGlobalSection(SolutionConfigurationPlatforms) = preSolution
+\t\tRelease|Win32 = Release|Win32
+\tEndGlobalSection
+\tGlobalSection(ProjectConfigurationPlatforms) = postSolution
+\t\t<PROJECT_GUID>.Release|Win32.ActiveCfg = Release|Win32
+\t\t<PROJECT_GUID>.Release|Win32.Build.0 = Release|Win32
+\tEndGlobalSection
+\tGlobalSection(SolutionProperties) = preSolution
+\t\tHideSolutionNode = FALSE
+\tEndGlobalSection
+EndGlobal
+"""
+
+expected_vcprojfile_fmt = """\
+<?xml version="1.0" encoding="Windows-1252"?>
+<VisualStudioProject
+\tProjectType="Visual C++"
+\tVersion="%(TOOLS_VERSION)s"
+\tName="Test"
+\tProjectGUID="<PROJECT_GUID>"
+\tRootNamespace="Test"
+<SCC_VCPROJ_INFO>
+\tKeyword="MakeFileProj">
+\t<Platforms>
+\t\t<Platform
+\t\t\tName="Win32"/>
+\t</Platforms>
+\t<ToolFiles>
+\t</ToolFiles>
+\t<Configurations>
+\t\t<Configuration
+\t\t\tName="Release|Win32"
+\t\t\tConfigurationType="0"
+\t\t\tUseOfMFC="0"
+\t\t\tATLMinimizesCRunTimeLibraryUsage="false"
+\t\t\t>
+\t\t\t<Tool
+\t\t\t\tName="VCNMakeTool"
+\t\t\t\tBuildCommandLine="echo Starting SCons &amp;&amp; &quot;<PYTHON>&quot; -c &quot;<SCONS_SCRIPT_MAIN_XML>&quot; -C &quot;<WORKPATH>&quot; -f SConstruct &quot;Test.exe&quot;"
+\t\t\t\tReBuildCommandLine="echo Starting SCons &amp;&amp; &quot;<PYTHON>&quot; -c &quot;<SCONS_SCRIPT_MAIN_XML>&quot; -C &quot;<WORKPATH>&quot; -f SConstruct &quot;Test.exe&quot;"
+\t\t\t\tCleanCommandLine="echo Starting SCons &amp;&amp; &quot;<PYTHON>&quot; -c &quot;<SCONS_SCRIPT_MAIN_XML>&quot; -C &quot;<WORKPATH>&quot; -f SConstruct -c &quot;Test.exe&quot;"
+\t\t\t\tOutput="Test.exe"
+\t\t\t\tPreprocessorDefinitions="DEF1;DEF2;DEF3=1234"
+\t\t\t\tIncludeSearchPath="%(INCLUDE_DIRS)s"
+\t\t\t\tForcedIncludes=""
+\t\t\t\tAssemblySearchPath=""
+\t\t\t\tForcedUsingAssemblies=""
+\t\t\t\tCompileAsManaged=""
+\t\t\t/>
+\t\t</Configuration>
+\t</Configurations>
+\t<References>
+\t</References>
+\t<Files>
+\t\t<Filter
+\t\t\tName="Header Files"
+\t\t\tFilter="h;hpp;hxx;hm;inl">
+\t\t\t<File
+\t\t\t\tRelativePath="sdk_dir\\sdk.h">
+\t\t\t</File>
+\t\t</Filter>
+\t\t<Filter
+\t\t\tName="Local Headers"
+\t\t\tFilter="h;hpp;hxx;hm;inl">
+\t\t\t<File
+\t\t\t\tRelativePath="test.h">
+\t\t\t</File>
+\t\t</Filter>
+\t\t<Filter
+\t\t\tName="Other Files"
+\t\t\tFilter="">
+\t\t\t<File
+\t\t\t\tRelativePath="readme.txt">
+\t\t\t</File>
+\t\t</Filter>
+\t\t<Filter
+\t\t\tName="Resource Files"
+\t\t\tFilter="r;rc;ico;cur;bmp;dlg;rc2;rct;bin;cnt;rtf;gif;jpg;jpeg;jpe">
+\t\t\t<File
+\t\t\t\tRelativePath="test.rc">
+\t\t\t</File>
+\t\t</Filter>
+\t\t<Filter
+\t\t\tName="Source Files"
+\t\t\tFilter="cpp;c;cxx;l;y;def;odl;idl;hpj;bat">
+\t\t\t<File
+\t\t\t\tRelativePath="test1.cpp">
+\t\t\t</File>
+\t\t\t<File
+\t\t\t\tRelativePath="test2.cpp">
+\t\t\t</File>
+\t\t</Filter>
+\t\t<File
+\t\t\tRelativePath="<SCONSCRIPT>">
+\t\t</File>
+\t</Files>
+\t<Globals>
+\t</Globals>
+</VisualStudioProject>
+"""
+
+expected_vcxprojfile_fmt = """\
+<?xml version="1.0" encoding="utf-8"?>
+<Project DefaultTargets="Build" ToolsVersion="%(TOOLS_VERSION)s" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+\t<ItemGroup Label="ProjectConfigurations">
+\t\t<ProjectConfiguration Include="Release|Win32">
+\t\t\t<Configuration>Release</Configuration>
+\t\t\t<Platform>Win32</Platform>
+\t\t</ProjectConfiguration>
+\t</ItemGroup>
+\t<PropertyGroup Label="Globals">
+\t\t<ProjectGuid>{39A97E1F-1A52-8954-A0B1-A10A8487545E}</ProjectGuid>
+<SCC_VCPROJ_INFO>
+\t\t<RootNamespace>Test</RootNamespace>
+\t\t<Keyword>MakeFileProj</Keyword>
+\t\t<VCProjectUpgraderObjectName>NoUpgrade</VCProjectUpgraderObjectName>
+\t</PropertyGroup>
+\t<Import Project="$(VCTargetsPath)\\Microsoft.Cpp.Default.props" />
+\t<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
+\t\t<ConfigurationType>Makefile</ConfigurationType>
+\t\t<UseOfMfc>false</UseOfMfc>
+\t\t<PlatformToolset>v100</PlatformToolset>
+\t</PropertyGroup>
+\t<Import Project="$(VCTargetsPath)\\Microsoft.Cpp.props" />
+\t<ImportGroup Label="ExtensionSettings">
+\t</ImportGroup>
+\t<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="PropertySheets">
+\t\t<Import Project="$(UserRootDir)\\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+\t</ImportGroup>
+\t<PropertyGroup Label="UserMacros" />
+\t<PropertyGroup>
+\t<_ProjectFileVersion>10.0.30319.1</_ProjectFileVersion>
+\t\t<NMakeBuildCommandLine Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">echo Starting SCons &amp;&amp; &quot;<PYTHON>&quot; -c &quot;<SCONS_SCRIPT_MAIN_XML>&quot; -C &quot;<WORKPATH>&quot; -f SConstruct &quot;Test.exe&quot;</NMakeBuildCommandLine>
+\t\t<NMakeReBuildCommandLine Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">echo Starting SCons &amp;&amp; &quot;<PYTHON>&quot; -c &quot;<SCONS_SCRIPT_MAIN_XML>&quot; -C &quot;<WORKPATH>&quot; -f SConstruct &quot;Test.exe&quot;</NMakeReBuildCommandLine>
+\t\t<NMakeCleanCommandLine Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">echo Starting SCons &amp;&amp; &quot;<PYTHON>&quot; -c &quot;<SCONS_SCRIPT_MAIN_XML>&quot; -C &quot;<WORKPATH>&quot; -f SConstruct -c &quot;Test.exe&quot;</NMakeCleanCommandLine>
+\t\t<NMakeOutput Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">Test.exe</NMakeOutput>
+\t\t<NMakePreprocessorDefinitions Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">DEF1;DEF2;DEF3=1234</NMakePreprocessorDefinitions>
+\t\t<NMakeIncludeSearchPath Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">%(INCLUDE_DIRS)s</NMakeIncludeSearchPath>
+\t\t<NMakeForcedIncludes Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">$(NMakeForcedIncludes)</NMakeForcedIncludes>
+\t\t<NMakeAssemblySearchPath Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">$(NMakeAssemblySearchPath)</NMakeAssemblySearchPath>
+\t\t<NMakeForcedUsingAssemblies Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">$(NMakeForcedUsingAssemblies)</NMakeForcedUsingAssemblies>
+\t</PropertyGroup>
+\t<ItemGroup>
+\t\t<ClInclude Include="sdk_dir\\sdk.h" />
+\t</ItemGroup>
+\t<ItemGroup>
+\t\t<ClInclude Include="test.h" />
+\t</ItemGroup>
+\t<ItemGroup>
+\t\t<None Include="readme.txt" />
+\t</ItemGroup>
+\t<ItemGroup>
+\t\t<None Include="test.rc" />
+\t</ItemGroup>
+\t<ItemGroup>
+\t\t<ClCompile Include="test1.cpp" />
+\t\t<ClCompile Include="test2.cpp" />
+\t</ItemGroup>
+\t<ItemGroup>
+\t\t<None Include="SConstruct" />
+\t</ItemGroup>
+\t<Import Project="$(VCTargetsPath)\\Microsoft.Cpp.targets" />
+\t<ImportGroup Label="ExtensionTargets">
+\t</ImportGroup>
+</Project>
+"""
+
+SConscript_contents_fmt = """\
+env=Environment(platform='win32', tools=['msvs'], MSVS_VERSION='%(MSVS_VERSION)s',
+ CPPDEFINES=['DEF1', 'DEF2',('DEF3','1234')],
+ CPPPATH=['inc1', 'inc2'],
+ HOST_ARCH='%(HOST_ARCH)s')
+
+testsrc = ['test1.cpp', 'test2.cpp']
+testincs = [r'sdk_dir\\sdk.h']
+testlocalincs = ['test.h']
+testresources = ['test.rc']
+testmisc = ['readme.txt']
+
+env.MSVSProject(target = '%(PROJECT_FILE)s',
+ slnguid = '{SLNGUID}',
+ srcs = testsrc,
+ incs = testincs,
+ localincs = testlocalincs,
+ resources = testresources,
+ misc = testmisc,
+ buildtarget = 'Test.exe',
+ variant = 'Release')
+"""
+
+
+def get_tested_proj_file_vc_versions():
+ """
+ Returns all MSVC versions that we want to test project file creation for.
+ """
+ return ['8.0', '9.0', '10.0', '11.0', '12.0', '14.0', '14.1', '14.2']
+
+
+class TestSConsMSVS(TestSCons):
+ """Subclass for testing MSVS-specific portions of SCons."""
+
+ def msvs_versions(self):
+ if not hasattr(self, '_msvs_versions'):
+
+ # Determine the SCons version and the versions of the MSVS
+ # environments installed on the test machine.
+ #
+ # We do this by executing SCons with an SConstruct file
+ # (piped on stdin) that spits out Python assignments that
+ # we can just exec(). We construct the SCons.__"version"__
+ # string in the input here so that the SCons build itself
+ # doesn't fill it in when packaging SCons.
+ input = """\
+import SCons
+import SCons.Tool.MSCommon
+print("self.scons_version =%%s"%%repr(SCons.__%s__))
+print("self._msvs_versions =%%s"%%str(SCons.Tool.MSCommon.query_versions()))
+""" % 'version'
+
+ self.run(arguments = '-n -q -Q -f -', stdin = input)
+ exec(self.stdout())
+
+ return self._msvs_versions
+
+ def vcproj_sys_path(self, fname):
+ """
+ """
+ orig = 'sys.path = [ join(sys'
+
+ enginepath = repr(os.path.join(self._cwd, '..', 'engine'))
+ replace = 'sys.path = [ %s, join(sys' % enginepath
+
+ contents = self.read(fname, mode='r')
+ contents = contents.replace(orig, replace)
+ self.write(fname, contents)
+
+ def msvs_substitute(self, input, msvs_ver,
+ subdir=None, sconscript=None,
+ python=None,
+ project_guid=None,
+ vcproj_sccinfo='', sln_sccinfo=''):
+ if not hasattr(self, '_msvs_versions'):
+ self.msvs_versions()
+
+ if subdir:
+ workpath = self.workpath(subdir)
+ else:
+ workpath = self.workpath()
+
+ if sconscript is None:
+ sconscript = self.workpath('SConstruct')
+
+ if python is None:
+ python = sys.executable
+
+ if project_guid is None:
+ project_guid = "{E5466E26-0003-F18B-8F8A-BCD76C86388D}"
+
+ if 'SCONS_LIB_DIR' in os.environ:
+ exec_script_main = "from os.path import join; import sys; sys.path = [ r'%s' ] + sys.path; import SCons.Script; SCons.Script.main()" % os.environ['SCONS_LIB_DIR']
+ else:
+ exec_script_main = "from os.path import join; import sys; sys.path = [ join(sys.prefix, 'Lib', 'site-packages', 'scons-%s'), join(sys.prefix, 'scons-%s'), join(sys.prefix, 'Lib', 'site-packages', 'scons'), join(sys.prefix, 'scons') ] + sys.path; import SCons.Script; SCons.Script.main()" % (self.scons_version, self.scons_version)
+ exec_script_main_xml = exec_script_main.replace("'", "&apos;")
+
+ result = input.replace(r'<WORKPATH>', workpath)
+ result = result.replace(r'<PYTHON>', python)
+ result = result.replace(r'<SCONSCRIPT>', sconscript)
+ result = result.replace(r'<SCONS_SCRIPT_MAIN>', exec_script_main)
+ result = result.replace(r'<SCONS_SCRIPT_MAIN_XML>', exec_script_main_xml)
+ result = result.replace(r'<PROJECT_GUID>', project_guid)
+ result = result.replace('<SCC_VCPROJ_INFO>\n', vcproj_sccinfo)
+ result = result.replace('<SCC_SLN_INFO>\n', sln_sccinfo)
+ return result
+
+ def get_msvs_executable(self, version):
+ """Returns a full path to the executable (MSDEV or devenv)
+ for the specified version of Visual Studio.
+ """
+ from SCons.Tool.MSCommon import get_vs_by_version
+
+ msvs = get_vs_by_version(version)
+ if not msvs:
+ return None
+ return msvs.get_executable()
+
+ def run(self, *args, **kw):
+ """
+ Suppress MSVS deprecation warnings.
+ """
+ save_sconsflags = os.environ.get('SCONSFLAGS')
+ if save_sconsflags:
+ sconsflags = [save_sconsflags]
+ else:
+ sconsflags = []
+ sconsflags = sconsflags + ['--warn=no-deprecated']
+ os.environ['SCONSFLAGS'] = ' '.join(sconsflags)
+ try:
+ result = TestSCons.run(self, *args, **kw)
+ finally:
+ os.environ['SCONSFLAGS'] = save_sconsflags or ''
+ return result
+
+ def get_vs_host_arch(self):
+ """ Get an MSVS, SDK , and/or MSVS acceptable platform arch
+ """
+
+ # Dict to 'canonalize' the arch
+ _ARCH_TO_CANONICAL = {
+ "x86": "x86",
+ "amd64": "amd64",
+ "i386": "x86",
+ "emt64": "amd64",
+ "x86_64": "amd64",
+ "itanium": "ia64",
+ "ia64": "ia64",
+ }
+
+ host_platform = platform.machine()
+ # TODO(2.5): the native Python platform.machine() function returns
+ # '' on all Python versions before 2.6, after which it also uses
+ # PROCESSOR_ARCHITECTURE.
+ if not host_platform:
+ host_platform = os.environ.get('PROCESSOR_ARCHITECTURE', '')
+
+ try:
+ host = _ARCH_TO_CANONICAL[host_platform]
+ except KeyError as e:
+ # Default to x86 for all other platforms
+ host = 'x86'
+
+ return host
+
+ def validate_msvs_file(self, file):
+ try:
+ x = ElementTree.parse(file)
+ except:
+ print("--------------------------------------------------------------")
+ print("--------------------------------------------------------------")
+ print(traceback.format_exc())
+ print("Failed to validate xml in MSVS file: ")
+ print(file)
+ print("--------------------------------------------------------------")
+ print("--------------------------------------------------------------")
+ self.fail_test()
+
+ def parse_vc_version(self, vc_version):
+ """
+ Parses the string vc_version to determine the major and minor version
+ included.
+ """
+ components = vc_version.split('.')
+ major = int(components[0])
+ minor = 0 if len(components) < 2 else int(components[1])
+ return major, minor
+
+ def _get_solution_file_format_version(self, vc_version):
+ """
+ Returns the Visual Studio format version expected in the .sln file.
+ """
+ major, _ = self.parse_vc_version(vc_version)
+ if major == 8:
+ return '9.00'
+ elif major == 9:
+ return '10.00'
+ elif major == 10:
+ return '11.00'
+ elif major > 10:
+ return '12.00'
+ else:
+ raise SCons.Errors.UserError('Received unexpected VC version %s' % vc_version)
+
+ def _get_solution_file_vs_number(self, vc_version):
+ """
+ Returns the Visual Studio number expected in the .sln file.
+ """
+ major, minor = self.parse_vc_version(vc_version)
+ if major == 8:
+ return '2005'
+ elif major == 9:
+ return '2008'
+ if major == 10:
+ return '2010'
+ elif major == 11:
+ return '11'
+ elif major == 12:
+ return '14'
+ elif major == 14 and (minor == 0 or minor == 1):
+ # Visual Studio 2015 and 2017 both use 15 in this entry.
+ return '15'
+ elif major == 14 and minor == 2:
+ return '16'
+ else:
+ raise SCons.Errors.UserError('Received unexpected VC version %s' % vc_version)
+
+ def _get_vcxproj_file_tools_version(self, vc_version):
+ """
+ Returns the version entry expected in the project file.
+ For .vcxproj files, this goes is ToolsVersion.
+ For .vcproj files, this goes in Version.
+ """
+ major, minor = self.parse_vc_version(vc_version)
+ if major == 8:
+ # Version="8.00"
+ return '8.00'
+ elif major == 9:
+ # Version="9.00"
+ return '9.00'
+ elif major < 14:
+ # ToolsVersion='4.0'
+ return '4.0'
+ elif major == 14 and minor == 0:
+ # ToolsVersion='14.0'
+ return '14.0'
+ elif major == 14 and minor == 1:
+ # ToolsVersion='15.0'
+ return '15.0'
+ elif vc_version == '14.2':
+ # ToolsVersion='16'
+ return '16.0'
+ else:
+ raise SCons.Errors.UserError('Received unexpected VC version %s' % vc_version)
+
+ def _get_vcxproj_file_cpp_path(self, dirs):
+ """Returns the include paths expected in the .vcxproj file"""
+ return ';'.join([self.workpath(dir) for dir in dirs])
+
+ def get_expected_sln_file_contents(self, vc_version, project_file):
+ """
+ Returns the expected .sln file contents.
+ Currently this function only supports the newer VC versions that use
+ the .vcxproj file format.
+ """
+ return expected_slnfile_fmt % {
+ 'FORMAT_VERSION': self._get_solution_file_format_version(vc_version),
+ 'VS_NUMBER': self._get_solution_file_vs_number(vc_version),
+ 'PROJECT_NAME': project_file.split('.')[0],
+ 'PROJECT_FILE': project_file,
+ }
+
+ def get_expected_proj_file_contents(self, vc_version, dirs, project_file):
+ """Returns the expected .vcxproj file contents"""
+ if project_file.endswith('.vcxproj'):
+ fmt = expected_vcxprojfile_fmt
+ else:
+ fmt = expected_vcprojfile_fmt
+ return fmt % {
+ 'TOOLS_VERSION': self._get_vcxproj_file_tools_version(vc_version),
+ 'INCLUDE_DIRS': self._get_vcxproj_file_cpp_path(dirs),
+ }
+
+ def get_expected_sconscript_file_contents(self, vc_version, project_file):
+ return SConscript_contents_fmt % {
+ 'HOST_ARCH': self.get_vs_host_arch(),
+ 'MSVS_VERSION': vc_version,
+ 'PROJECT_FILE': project_file,
+ }
+
+# Local Variables:
+# tab-width:4
+# indent-tabs-mode:nil
+# End:
+# vim: set expandtab tabstop=4 shiftwidth=4:
diff --git a/testing/framework/TestSCons_time.py b/testing/framework/TestSCons_time.py
new file mode 100644
index 0000000..547e264
--- /dev/null
+++ b/testing/framework/TestSCons_time.py
@@ -0,0 +1,361 @@
+"""
+TestSCons_time.py: a testing framework for the scons-test.py script
+
+A TestSCons_time environment object is created via the usual invocation:
+
+ test = TestSCons_time()
+
+TestSCons_time is a subclass of TestCommon, which is in turn is a subclass
+of TestCmd), and hence has available all of the methods and attributes
+from those classes, as well as any overridden or additional methods or
+attributes defined in this subclass.
+"""
+
+# Copyright (c) 2001 - 2019 The SCons Foundation
+
+__revision__ = "testing/framework/TestSCons_time.py e724ae812eb96f4858a132f5b8c769724744faf6 2019-07-21 00:04:47 bdeegan"
+
+import os
+import os.path
+import sys
+
+from TestCommon import *
+from TestCommon import __all__
+# some of the scons_time tests may need regex-based matching:
+from TestSCons import search_re, search_re_in_list
+
+__all__.extend(['TestSCons_time',])
+
+SConstruct = """\
+from __future__ import print_function
+import os
+print("SConstruct file directory:", os.getcwd())
+"""
+
+scons_py = """\
+#!/usr/bin/env python
+import os
+import sys
+
+def write_args(fp, args):
+ fp.write(args[0] + '\\n')
+ for arg in args[1:]:
+ fp.write(' ' + arg + '\\n')
+
+write_args(sys.stdout, sys.argv)
+for arg in sys.argv[1:]:
+ if arg[:10] == '--profile=':
+ with open(arg[10:], 'w') as profile:
+ profile.write('--profile\\n')
+ write_args(profile, sys.argv)
+ break
+sys.stdout.write('SCONS_LIB_DIR = ' + os.environ['SCONS_LIB_DIR'] + '\\n')
+with open('SConstruct', 'r') as f:
+ script = f.read()
+exec(script)
+"""
+
+aegis_py = """\
+#!/usr/bin/env python
+import os
+import sys
+
+script_dir = 'src/script'
+if not os.path.exists(script_dir):
+ os.makedirs(script_dir)
+with open(script_dir + '/scons.py', 'w') as f:
+ f.write(r'''%s''')
+""" % scons_py
+
+
+svn_py = """\
+#!/usr/bin/env python
+import os
+import sys
+
+dir = sys.argv[-1]
+script_dir = dir + '/src/script'
+os.makedirs(script_dir)
+with open(script_dir + '/scons.py', 'w') as f:
+ f.write(r'''%s''')
+""" % scons_py
+
+
+git_py = """\
+#!/usr/bin/env python
+import os
+import sys
+
+dir = sys.argv[-1]
+script_dir = dir + '/src/script'
+os.makedirs(script_dir)
+with open(script_dir + '/scons.py', 'w') as f:
+ f.write(r'''%s''')
+""" % scons_py
+
+
+logfile_contents = """\
+Memory before reading SConscript files: 100%(index)s
+Memory after reading SConscript files: 200%(index)s
+Memory before building targets: 300%(index)s
+Memory after building targets: 400%(index)s
+Object counts:
+ pre- post- pre- post-
+ read read build build Class
+ 101%(index)s 102%(index)s 103%(index)s 104%(index)s Action.CommandAction
+ 201%(index)s 202%(index)s 203%(index)s 204%(index)s Action.CommandGeneratorAction
+ 301%(index)s 302%(index)s 303%(index)s 304%(index)s Action.FunctionAction
+ 401%(index)s 402%(index)s 403%(index)s 404%(index)s Action.LazyAction
+ 501%(index)s 502%(index)s 503%(index)s 504%(index)s Action.ListAction
+ 601%(index)s 602%(index)s 603%(index)s 604%(index)s Builder.BuilderBase
+ 701%(index)s 702%(index)s 703%(index)s 704%(index)s Builder.CompositeBuilder
+ 801%(index)s 802%(index)s 803%(index)s 804%(index)s Builder.ListBuilder
+ 901%(index)s 902%(index)s 903%(index)s 904%(index)s Builder.MultiStepBuilder
+ 1001%(index)s 1002%(index)s 1003%(index)s 1004%(index)s Builder.OverrideWarner
+ 1101%(index)s 1102%(index)s 1103%(index)s 1104%(index)s Environment.Base
+ 1201%(index)s 1202%(index)s 1203%(index)s 1204%(index)s Environment.EnvironmentClone
+ 1301%(index)s 1302%(index)s 1303%(index)s 1304%(index)s Environment.OverrideEnvironment
+ 1401%(index)s 1402%(index)s 1403%(index)s 1404%(index)s Executor.Executor
+ 1501%(index)s 1502%(index)s 1503%(index)s 1504%(index)s Node.FS
+ 1601%(index)s 1602%(index)s 1603%(index)s 1604%(index)s Node.FS.Base
+ 1701%(index)s 1702%(index)s 1703%(index)s 1704%(index)s Node.FS.Dir
+ 1801%(index)s 1802%(index)s 1803%(index)s 1804%(index)s Node.FS.File
+ 1901%(index)s 1902%(index)s 1904%(index)s 1904%(index)s Node.FS.RootDir
+ 2001%(index)s 2002%(index)s 2003%(index)s 2004%(index)s Node.Node
+Total build time: 11.123456 seconds
+Total SConscript file execution time: 22.234567 seconds
+Total SCons execution time: 33.345678 seconds
+Total command execution time: 44.456789 seconds
+"""
+
+
+profile_py = """\
+%(body)s
+
+import profile
+
+try: dispatch = profile.Profile.dispatch
+except AttributeError: pass
+else: dispatch['c_exception'] = profile.Profile.trace_dispatch_return
+
+prof = profile.Profile()
+prof.runcall(%(call)s)
+prof.dump_stats(r'%(profile_name)s')
+"""
+
+
+class TestSCons_time(TestCommon):
+ """Class for testing the scons-time script.
+
+ This provides a common place for initializing scons-time tests,
+ eliminating the need to begin every test with the same repeated
+ initializations.
+ """
+
+ def __init__(self, **kw):
+ """Initialize an SCons_time testing object.
+
+ If they're not overridden by keyword arguments, this
+ initializes the object with the following default values:
+
+ program = 'scons-time'
+ interpreter = ['python', '-tt']
+ match = match_exact
+ workdir = ''
+
+ The workdir value means that, by default, a temporary workspace
+ directory is created for a TestSCons_time environment.
+ In addition, this method changes directory (chdir) to the
+ workspace directory, so an explicit "chdir = '.'" on all of the
+ run() method calls is not necessary.
+ """
+
+ self.orig_cwd = os.getcwd()
+ try:
+ script_dir = os.environ['SCONS_SCRIPT_DIR']
+ except KeyError:
+ pass
+ else:
+ os.chdir(script_dir)
+ if 'program' not in kw:
+ p = os.environ.get('SCONS_TIME')
+ if not p:
+ p = 'scons-time'
+ if not os.path.exists(p):
+ p = 'scons-time.py'
+ kw['program'] = p
+
+ if 'interpreter' not in kw:
+ kw['interpreter'] = [python,]
+ if sys.version_info[0] < 3:
+ kw['interpreter'].append('-tt')
+
+ if 'match' not in kw:
+ kw['match'] = match_exact
+
+ if 'workdir' not in kw:
+ kw['workdir'] = ''
+
+ TestCommon.__init__(self, **kw)
+
+ def archive_split(self, path):
+ if path[-7:] == '.tar.gz':
+ return path[:-7], path[-7:]
+ else:
+ return os.path.splitext(path)
+
+ def fake_logfile(self, logfile_name, index=0):
+ self.write(self.workpath(logfile_name), logfile_contents % locals())
+
+ def profile_data(self, profile_name, python_name, call, body):
+ profile_name = self.workpath(profile_name)
+ python_name = self.workpath(python_name)
+ d = {
+ 'profile_name' : profile_name,
+ 'python_name' : python_name,
+ 'call' : call,
+ 'body' : body,
+ }
+ self.write(python_name, profile_py % d)
+ self.run(program = python_name, interpreter = sys.executable)
+
+ def tempdir_re(self, *args):
+ """
+ Returns a regular expression to match a scons-time
+ temporary directory.
+ """
+ import re
+ import tempfile
+
+ sep = re.escape(os.sep)
+ tempdir = tempfile.gettempdir()
+
+ try:
+ realpath = os.path.realpath
+ except AttributeError:
+ pass
+ else:
+ tempdir = realpath(tempdir)
+
+ args = (tempdir, 'scons-time-',) + args
+ x = os.path.join(*args)
+ x = re.escape(x)
+ x = x.replace('time\\-', 'time\\-[^%s]*' % sep)
+ return x
+
+ def write_fake_aegis_py(self, name):
+ name = self.workpath(name)
+ self.write(name, aegis_py)
+ os.chmod(name, 0o755)
+ return name
+
+ def write_fake_scons_py(self):
+ self.subdir('src', ['src', 'script'])
+ self.write('src/script/scons.py', scons_py)
+
+ def write_fake_svn_py(self, name):
+ name = self.workpath(name)
+ self.write(name, svn_py)
+ os.chmod(name, 0o755)
+ return name
+
+ def write_fake_git_py(self, name):
+ name = self.workpath(name)
+ self.write(name, git_py)
+ os.chmod(name, 0o755)
+ return name
+
+ def write_sample_directory(self, archive, dir, files):
+ dir = self.workpath(dir)
+ for name, content in files:
+ path = os.path.join(dir, name)
+ d, f = os.path.split(path)
+ if not os.path.isdir(d):
+ os.makedirs(d)
+ with open(path, 'w') as f:
+ f.write(content)
+ return dir
+
+ def write_sample_tarfile(self, archive, dir, files):
+ import shutil
+ try:
+ import tarfile
+
+ except ImportError:
+
+ self.skip_test('no tarfile module\n')
+
+ else:
+
+ base, suffix = self.archive_split(archive)
+
+ mode = {
+ '.tar' : 'w',
+ '.tar.gz' : 'w:gz',
+ '.tgz' : 'w:gz',
+ }
+
+ tar = tarfile.open(archive, mode[suffix])
+ for name, content in files:
+ path = os.path.join(dir, name)
+ with open(path, 'wb') as f:
+ f.write(bytearray(content,'utf-8'))
+ tarinfo = tar.gettarinfo(path, path)
+ tarinfo.uid = 111
+ tarinfo.gid = 111
+ tarinfo.uname = 'fake_user'
+ tarinfo.gname = 'fake_group'
+ with open(path, 'rb') as f:
+ tar.addfile(tarinfo, f)
+ tar.close()
+ shutil.rmtree(dir)
+ return self.workpath(archive)
+
+ def write_sample_zipfile(self, archive, dir, files):
+ import shutil
+ try:
+ import zipfile
+ except ImportError:
+
+ sys.stderr.write('no zipfile module\n')
+ self.no_result()
+
+ else:
+
+ zip = zipfile.ZipFile(archive, 'w')
+ for name, content in files:
+ path = os.path.join(dir, name)
+ with open(path, 'w') as f:
+ f.write(content)
+ zip.write(path)
+ zip.close()
+ shutil.rmtree(dir)
+ return self.workpath(archive)
+
+ sample_project_files = [
+ ('SConstruct', SConstruct),
+ ]
+
+ def write_sample_project(self, archive, dir=None):
+ base, suffix = self.archive_split(archive)
+
+ write_sample = {
+ '.tar' : self.write_sample_tarfile,
+ '.tar.gz' : self.write_sample_tarfile,
+ '.tgz' : self.write_sample_tarfile,
+ '.zip' : self.write_sample_zipfile,
+ }.get(suffix, self.write_sample_directory)
+
+ if not dir:
+ dir = base
+
+ os.mkdir(dir)
+ path = write_sample(archive, dir, self.sample_project_files)
+
+ return path
+
+# Local Variables:
+# tab-width:4
+# indent-tabs-mode:nil
+# End:
+# vim: set expandtab tabstop=4 shiftwidth=4:
diff --git a/testing/framework/TestSConsign.py b/testing/framework/TestSConsign.py
new file mode 100644
index 0000000..8f992ef
--- /dev/null
+++ b/testing/framework/TestSConsign.py
@@ -0,0 +1,90 @@
+# Copyright (c) 2001 - 2019 The SCons Foundation
+from __future__ import print_function
+
+__revision__ = "testing/framework/TestSConsign.py e724ae812eb96f4858a132f5b8c769724744faf6 2019-07-21 00:04:47 bdeegan"
+
+__doc__ = """
+TestSConsign.py: a testing framework for the "sconsign" script
+tool.
+
+A TestSConsign environment object is created via the usual invocation:
+
+ test = TestSConsign()
+
+TestSconsign is a subclass of TestSCons, which is a subclass of
+TestCommon, which is in turn is a subclass of TestCmd), and hence
+has available all of the methods and attributes from those classes,
+as well as any overridden or additional methods or attributes defined
+in this subclass.
+"""
+
+import os
+import os.path
+import sys
+
+from TestSCons import *
+from TestSCons import __all__
+
+__all__.extend([ 'TestSConsign', ])
+
+class TestSConsign(TestSCons):
+ """Class for testing the sconsign.py script.
+
+ This provides a common place for initializing sconsign tests,
+ eliminating the need to begin every test with the same repeated
+ initializations.
+
+ This adds additional methods for running the sconsign script
+ without changing the basic ability of the run() method to run
+ "scons" itself, since we need to run scons to generate the
+ .sconsign files that we want the sconsign script to read.
+ """
+ def __init__(self, *args, **kw):
+ try:
+ script_dir = os.environ['SCONS_SCRIPT_DIR']
+ except KeyError:
+ pass
+ else:
+ os.chdir(script_dir)
+ self.script_dir = os.getcwd()
+
+ TestSCons.__init__(self, *args, **kw)
+
+ self.my_kw = {
+ 'interpreter' : python, # imported from TestSCons
+ }
+
+ if 'program' not in kw:
+ kw['program'] = os.environ.get('SCONS')
+ if not kw['program']:
+ if os.path.exists('scons'):
+ kw['program'] = 'scons'
+ else:
+ kw['program'] = 'scons.py'
+
+ sconsign = os.environ.get('SCONSIGN')
+ if not sconsign:
+ if os.path.exists(self.script_path('sconsign.py')):
+ sconsign = 'sconsign.py'
+ elif os.path.exists(self.script_path('sconsign')):
+ sconsign = 'sconsign'
+ else:
+ print("Can find neither 'sconsign.py' nor 'sconsign' scripts.")
+ self.no_result()
+ self.set_sconsign(sconsign)
+
+ def script_path(self, script):
+ return os.path.join(self.script_dir, script)
+
+ def set_sconsign(self, sconsign):
+ self.my_kw['program'] = sconsign
+
+ def run_sconsign(self, *args, **kw):
+ kw.update(self.my_kw)
+ return self.run(*args, **kw)
+
+# Local Variables:
+# tab-width:4
+# indent-tabs-mode:nil
+# End:
+# vim: set expandtab tabstop=4 shiftwidth=4:
diff --git a/testing/framework/TestUnit/__init__.py b/testing/framework/TestUnit/__init__.py
new file mode 100644
index 0000000..51cf972
--- /dev/null
+++ b/testing/framework/TestUnit/__init__.py
@@ -0,0 +1,5 @@
+
+__all__ = ['TAPTestRunner', 'TAPTestResult', 'run']
+
+from .taprunner import TAPTestRunner, TAPTestResult
+from .cli import run
diff --git a/testing/framework/TestUnit/cli.py b/testing/framework/TestUnit/cli.py
new file mode 100644
index 0000000..6aec735
--- /dev/null
+++ b/testing/framework/TestUnit/cli.py
@@ -0,0 +1,35 @@
+"""
+Choose test runner class from --runner command line option
+and execute test cases.
+"""
+
+import unittest
+import optparse
+import sys
+
+
+def get_runner():
+ parser = optparse.OptionParser()
+ parser.add_option('--runner', default='unittest.TextTestRunner',
+ help='name of test runner class to use')
+ opts, args = parser.parse_args()
+
+ fromsplit = opts.runner.rsplit('.', 1)
+ if len(fromsplit) < 2:
+ raise ValueError('Can\'t use module as a runner')
+ else:
+ runnermod = __import__(fromsplit[0])
+ return getattr(runnermod, fromsplit[1])
+
+
+def run(suite=None):
+ runner = get_runner()
+ if suite:
+ if not runner().run(suite).wasSuccessful():
+ sys.exit(1)
+ else:
+ unittest.main(argv=sys.argv[:1], testRunner=runner)
+
+
+if __name__ == '__main__':
+ run()
diff --git a/testing/framework/TestUnit/taprunner.py b/testing/framework/TestUnit/taprunner.py
new file mode 100644
index 0000000..5c2e87c
--- /dev/null
+++ b/testing/framework/TestUnit/taprunner.py
@@ -0,0 +1,130 @@
+"""
+Format unittest results in Test Anything Protocol (TAP).
+http://testanything.org/tap-version-13-specification.html
+
+Public domain work by:
+ anatoly techtonik <techtonik@gmail.com>
+
+Changes:
+ 0.3 - fixed used imports that failed on Python 2.6
+ 0.2 - removed unused import that failed on Python 2.6
+ 0.1 - initial release
+"""
+
+__version__ = "0.3"
+
+
+from unittest import TextTestRunner
+try:
+ from unittest import TextTestResult
+except ImportError:
+ # Python 2.6
+ from unittest import _TextTestResult as TextTestResult
+
+
+class TAPTestResult(TextTestResult):
+
+ def _process(self, test, msg, failtype = None, directive = None):
+ """ increase the counter, format and output TAP info """
+ # counterhack: increase test counter
+ test.suite.tap_counter += 1
+ msg = "%s %d" % (msg, test.suite.tap_counter)
+ if "not" not in msg:
+ msg += " " # justify
+ self.stream.write("%s - " % msg)
+ if failtype:
+ self.stream.write("%s - " % failtype)
+ self.stream.write("%s" % test.__class__.__name__)
+ self.stream.write(".%s" % test._testMethodName)
+ if directive:
+ self.stream.write(directive)
+ self.stream.write("\n")
+ # [ ] write test __doc__ (if exists) in comment
+ self.stream.flush()
+
+ def addSuccess(self, test):
+ super(TextTestResult, self).addSuccess(test)
+ self._process(test, "ok")
+
+ def addFailure(self, test, err):
+ super(TextTestResult, self).addFailure(test, err)
+ self._process(test, "not ok", "FAIL")
+ # [ ] add structured data about assertion
+
+ def addError(self, test, err):
+ super(TextTestResult, self).addError(test, err)
+ self._process(test, "not ok", "ERROR")
+ # [ ] add structured data about exception
+
+ def addSkip(self, test, reason):
+ super(TextTestResult, self).addSkip(test, reason)
+ self._process(test, "ok", directive=(" # SKIP %s" % reason))
+
+ def addExpectedFailure(self, test, err):
+ super(TextTestResult, self).addExpectedFailure(test, err)
+ self._process(test, "not ok", directive=(" # TODO"))
+
+ def addUnexpectedSuccess(self, test):
+ super(TextTestResult, self).addUnexpectedSuccess(test)
+ self._process(test, "not ok", "FAIL (unexpected success)")
+
+ """
+ def printErrors(self):
+ def printErrorList(self, flavour, errors):
+ """
+
+
+class TAPTestRunner(TextTestRunner):
+ resultclass = TAPTestResult
+
+ def run(self, test):
+ self.stream.write("TAP version 13\n")
+ # [ ] add commented block with test suite __doc__
+ # [ ] check call with a single test
+ # if isinstance(test, suite.TestSuite):
+ self.stream.write("1..%s\n" % len(list(test)))
+
+ # counterhack: inject test counter into test suite
+ test.tap_counter = 0
+ # counterhack: inject reference to suite into each test case
+ for case in test:
+ case.suite = test
+
+ return super(TAPTestRunner, self).run(test)
+
+
+if __name__ == "__main__":
+ import sys
+ import unittest
+
+ class Test(unittest.TestCase):
+ def test_ok(self):
+ pass
+ def test_fail(self):
+ self.assertTrue(False)
+ def test_error(self):
+ bad_symbol
+ @unittest.skip("skipin'")
+ def test_skip(self):
+ pass
+ @unittest.expectedFailure
+ def test_not_ready(self):
+ self.fail()
+ @unittest.expectedFailure
+ def test_invalid_fail_mark(self):
+ pass
+ def test_another_ok(self):
+ pass
+
+
+ suite = unittest.TestSuite([
+ Test('test_ok'),
+ Test('test_fail'),
+ Test('test_error'),
+ Test('test_skip'),
+ Test('test_not_ready'),
+ Test('test_invalid_fail_mark'),
+ Test('test_another_ok')
+ ])
+ if not TAPTestRunner().run(suite).wasSuccessful():
+ sys.exit(1)
diff --git a/testing/framework/test-framework.rst b/testing/framework/test-framework.rst
new file mode 100644
index 0000000..cb6b8e1
--- /dev/null
+++ b/testing/framework/test-framework.rst
@@ -0,0 +1,523 @@
+=======================
+SCons Testing Framework
+=======================
+
+SCons uses extensive automated tests to ensure quality. The primary goal
+is that users be able to upgrade from version to version without
+any surprise changes in behavior.
+
+In general, no change goes into SCons unless it has one or more new
+or modified tests that demonstrably exercise the bug being fixed or
+the feature being added. There are exceptions to this guideline, but
+they should be just that, ''exceptions''. When in doubt, make sure
+it's tested.
+
+Test Organization
+=================
+
+There are three types of SCons tests:
+
+*End-to-End Tests*
+ End-to-end tests of SCons are Python scripts (``*.py``) underneath the
+ ``test/`` subdirectory. They use the test infrastructure modules in
+ the ``testing/framework`` subdirectory. They build set up complete
+ projects and call scons to execute them, checking that the behavior is
+ as expected.
+
+*Unit Tests*
+ Unit tests for individual SCons modules live underneath the
+ ``src/engine/`` subdirectory and are the same base name as the module
+ to be tests, with ``Tests`` appended before the ``.py``. For example,
+ the unit tests for the ``Builder.py`` module are in the
+ ``BuilderTests.py`` script. Unit tests tend to be based on assertions.
+
+*External Tests*
+ For the support of external Tools (in the form of packages, preferably),
+ the testing framework is extended so it can run in standalone mode.
+ You can start it from the top-level folder of your Tool's source tree,
+ where it then finds all Python scripts (``*.py``) underneath the local
+ ``test/`` directory. This implies that Tool tests have to be kept in
+ a folder named ``test``, like for the SCons core.
+
+
+Contrasting End-to-End and Unit Tests
+#####################################
+
+In general, functionality with end-to-end tests
+should be considered a hardened part of the public interface (that is,
+something that a user might do) and should not be broken. Unit tests
+are now considered more malleable, more for testing internal interfaces
+that can change so long as we don't break users' ``SConscript`` files.
+(This wasn't always the case, and there's a lot of meaty code in many
+of the unit test scripts that does, in fact, capture external interface
+behavior. In general, we should try to move those things to end-to-end
+scripts as we find them.)
+
+End-to-end tests are by their nature harder to debug.
+You can drop straight into the Python debugger on the unit test
+scripts by using the ``runtest.py --pdb`` option, but the end-to-end
+tests treat an SCons invocation as a "black box" and just look for
+external effects; simple methods like inserting ``print`` statements
+in the SCons code itself can disrupt those external effects.
+See `Debugging End-to-End Tests`_ for some more thoughts.
+
+Naming Conventions
+##################
+
+The end-to-end tests, more or less, stick to the following naming
+conventions:
+
+#. All tests end with a .py suffix.
+
+#. In the *General* form we use
+
+ ``Feature.py``
+ for the test of a specified feature; try to keep this description
+ reasonably short
+
+ ``Feature-x.py``
+ for the test of a specified feature using option ``x``
+#. The *command line option* tests take the form
+
+ ``option-x.py``
+ for a lower-case single-letter option
+
+ ``option--X.py``
+ upper-case single-letter option (with an extra hyphen, so the
+ file names will be unique on case-insensitive systems)
+
+ ``option--lo.py``
+ long option; abbreviate the long option name to a few characters
+
+
+Running Tests
+=============
+
+The standard set of SCons tests are run from the top-level source
+directory by the ``runtest.py`` script.
+
+Help is available through the ``-h`` option::
+
+ $ python runtest.py -h
+
+To simply run all the tests, use the ``-a`` option::
+
+ $ python runtest.py -a
+
+By default, ``runtest.py`` prints a count and percentage message for each
+test case, along with the name of the test file. If you need the output
+to be more silent, have a look at the ``-q``, ``-s`` and ``-k`` options.
+
+You may specifically list one or more tests to be run::
+
+ $ python runtest.py src/engine/SCons/BuilderTests.py
+ $ python runtest.py test/option-j.py test/Program.py
+
+Folder names are allowed arguments as well, so you can do::
+
+ $ python runtest.py test/SWIG
+
+to run all SWIG tests only.
+
+You can also use the ``-f`` option to execute just the tests listed in
+a specified text file::
+
+ $ cat testlist.txt
+ test/option-j.py
+ test/Program.py
+ $ python runtest.py -f testlist.txt
+
+One test must be listed per line, and any lines that begin with '#'
+will be ignored (the intent being to allow you, for example, to comment
+out tests that are currently passing and then uncomment all of the tests
+in the file for a final validation run).
+
+If more than one test is run, the ``runtest.py`` script prints a summary
+of how many tests passed, failed, or yielded no result, and lists any
+unsuccessful tests.
+
+The above invocations all test directly the files underneath the ``src/``
+subdirectory, and do not require that a packaging build be performed
+first. The ``runtest.py`` script supports additional options to run
+tests against unpacked packages in the ``build/test-*/`` subdirectories.
+
+If you are testing a separate Tool outside of the SCons source tree, you
+have to call the ``runtest.py`` script in *external* (stand-alone) mode::
+
+ $ python ~/scons/runtest.py -e -a
+
+This ensures that the testing framework doesn't try to access SCons
+classes needed for some of the *internal* test cases.
+
+Note, that the actual tests are carried out in a temporary folder each,
+which gets deleted afterwards. This ensures that your source directories
+don't get clobbered with temporary files from the test runs. It also
+means that you can't simply change into a folder to "debug things" after
+a test has gone wrong. For a way around this, check out the ``PRESERVE``
+environment variable. It can be seen in action in
+`How to convert old tests`_ below.
+
+Not Running Tests
+=================
+
+If you simply want to check which tests would get executed, you can call
+the ``runtest.py`` script with the ``-l`` option::
+
+ $ python runtest.py -l
+
+Then there is also the ``-n`` option, which prints the command line for
+each single test, but doesn't actually execute them::
+
+ $ python runtest.py -n
+
+Finding Tests
+=============
+
+When started in *standard* mode::
+
+ $ python runtest.py -a
+
+``runtest.py`` assumes that it is run from the SCons top-level source
+directory. It then dives into the ``src`` and ``test`` folders, where
+it tries to find filenames
+
+``*Test.py``
+ for the ``src`` directory
+
+``*.py``
+ for the ``test`` folder
+
+When using fixtures, you may quickly end up in a position where you have
+supporting Python script files in a subfolder, but they shouldn't get
+picked up as test scripts. In this case you have two options:
+
+#. Add a file with the name ``sconstest.skip`` to your subfolder. This
+ lets ``runtest.py`` skip the contents of the directory completely.
+#. Create a file ``.exclude_tests`` in each folder in question, and in
+ it list line-by-line the files to get excluded from testing.
+
+The same rules apply when testing external Tools by using the ``-e``
+option.
+
+
+Example End-to-End Test Script
+==============================
+
+To illustrate how the end-to-end test scripts work, let's walk through
+a simple "Hello, world!" example::
+
+ #!python
+ import TestSCons
+
+ test = TestSCons.TestSCons()
+
+ test.write('SConstruct', """\
+ Program('hello.c')
+ """)
+
+ test.write('hello.c', """\
+ int
+ main(int argc, char *argv[])
+ {
+ printf("Hello, world!\\n");
+ exit (0);
+ }
+ """)
+
+ test.run()
+
+ test.run(program='./hello', stdout="Hello, world!\n")
+
+ test.pass_test()
+
+
+``import TestSCons``
+ Imports the main infrastructure for writing SCons tests. This is
+ normally the only part of the infrastructure that needs importing.
+ Sometimes other Python modules are necessary or helpful, and get
+ imported before this line.
+
+``test = TestSCons.TestSCons()``
+ This initializes an object for testing. A fair amount happens under
+ the covers when the object is created, including:
+
+ * A temporary directory is created for all the in-line files that will
+ get created.
+
+ * The temporary directory's removal is arranged for when
+ the test is finished.
+
+ * The test does ``os.chdir()`` to the temporary directory.
+
+``test.write('SConstruct', ...)``
+ This line creates an ``SConstruct`` file in the temporary directory,
+ to be used as input to the ``scons`` run(s) that we're testing.
+ Note the use of the Python triple-quote syntax for the contents
+ of the ``SConstruct`` file. Because input files for tests are all
+ created from in-line data like this, the tests can sometimes get
+ a little confusing to read, because some of the Python code is found
+
+``test.write('hello.c', ...)``
+ This lines creates an ``hello.c`` file in the temporary directory.
+ Note that we have to escape the ``\\n`` in the
+ ``"Hello, world!\\n"`` string so that it ends up as a single
+ backslash in the ``hello.c`` file on disk.
+
+``test.run()``
+ This actually runs SCons. Like the object initialization, things
+ happen under the covers:
+
+ * The exit status is verified; the test exits with a failure if
+ the exit status is not zero.
+ * The error output is examined, and the test exits with a failure
+ if there is any.
+
+``test.run(program='./hello', stdout="Hello, world!\n")``
+ This shows use of the ``TestSCons.run()`` method to execute a program
+ other than ``scons``, in this case the ``hello`` program we just
+ presumably built. The ``stdout=`` keyword argument also tells the
+ ``TestSCons.run()`` method to fail if the program output does not
+ match the expected string ``"Hello, world!\n"``. Like the previous
+ ``test.run()`` line, it will also fail the test if the exit status is
+ non-zero, or there is any error output.
+
+``test.pass_test()``
+ This is always the last line in a test script. It prints ``PASSED``
+ on the screen and makes sure we exit with a ``0`` status to indicate
+ the test passed. As a side effect of destroying the ``test`` object,
+ the created temporary directory will be removed.
+
+Working with Fixtures
+=====================
+
+In the simple example above, the files to set up the test are created
+on the fly by the test program. We give a filename to the ``TestSCons.write()``
+method, and a string holding its contents, and it gets written to the test
+folder right before starting..
+
+This technique can still be seen throughout most of the end-to-end tests,
+but there is a better way. To create a test, you need to create the
+files that will be used, then when they work reasonably, they need to
+be pasted into the script. The process repeats for maintenance. Once
+a test gets more complex and/or grows many steps, the test script gets
+harder to read. Why not keep the files as is?
+
+In testing parlance, a fixture is a repeatable test setup. The scons
+test harness allows the use of saved files or directories to be used
+in that sense: "the fixture for this test is foo", instead of writing
+a whole bunch of strings to create files. Since these setups can be
+reusable across multiple tests, the *fixture* terminology applies well.
+
+Directory Fixtures
+##################
+
+The function ``dir_fixture(self, srcdir, dstdir=None)`` in the ``TestCmd``
+class copies the contents of the specified folder ``srcdir`` from
+the directory of the called test script to the current temporary test
+directory. The ``srcdir`` name may be a list, in which case the elements
+are concatenated with the ``os.path.join()`` method. The ``dstdir``
+is assumed to be under the temporary working directory, it gets created
+automatically, if it does not already exist.
+
+A short syntax example::
+
+ test = TestSCons.TestSCons()
+ test.dir_fixture('image')
+ test.run()
+
+would copy all files and subfolders from the local ``image`` folder,
+to the temporary directory for the current test.
+
+To see a real example for this in action, refer to the test named
+``test/packaging/convenience-functions/convenience-functions.py``.
+
+File Fixtures
+#############
+
+Like for directory fixtures, ``file_fixture(self, srcfile, dstfile=None)``
+copies the file ``srcfile`` from the directory of the called script,
+to the temporary test directory. The ``dstfile`` is assumed to be
+under the temporary working directory, unless it is an absolute path
+name. If ``dstfile`` is specified, its target directory gets created
+automatically if it doesn't already exist.
+
+With the following code::
+
+ test = TestSCons.TestSCons()
+ test.file_fixture('SConstruct')
+ test.file_fixture(['src','main.cpp'],['src','main.cpp'])
+ test.run()
+
+The files ``SConstruct`` and ``src/main.cpp`` are copied to the
+temporary test directory. Notice the second ``file_fixture`` line
+preserves the path of the original, otherwise ``main.cpp``
+would have landed in the top level of the test directory.
+
+Again, a reference example can be found in the current revision
+of SCons, it is ``test/packaging/sandbox-test/sandbox-test.py``.
+
+For even more examples you should check out
+one of the external Tools, e.g. the *Qt4* Tool at
+https://bitbucket.org/dirkbaechle/scons_qt4. Also visit the SCons Tools
+Index at https://github.com/SCons/scons/wiki/ToolsIndex for a complete
+list of available Tools, though not all may have tests yet.
+
+How to Convert Old Tests to Use Fixures
+#######################################
+
+Tests using the inline ``TestSCons.write()`` method can easily be
+converted to the fixture based approach. For this, we need to get at the
+files as they are written to each temporary test folder.
+
+``runtest.py`` checks for the existence of an environment
+variable named ``PRESERVE``. If it is set to a non-zero value, the testing
+framework preserves the test folder instead of deleting it, and prints
+its name to the screen.
+
+So, you should be able to give the commands::
+
+ $ PRESERVE=1 python runtest.py test/packaging/sandbox-test.py
+
+assuming Linux and a bash-like shell. For a Windows ``cmd`` shell, use
+``set PRESERVE=1`` (that will leave it set for the duration of the
+``cmd`` session, unless manually deleted).
+
+The output should then look something like this::
+
+ 1/1 (100.00%) /usr/bin/python -tt test/packaging/sandbox-test.py
+ PASSED
+ Preserved directory /tmp/testcmd.4060.twlYNI
+
+You can now copy the files from that folder to your new
+*fixture* folder. Then, in the test script you simply remove all the
+tedious ``TestSCons.write()`` statements and replace them by a single
+``TestSCons.dir_fixture()``.
+
+Finally, don't forget to clean up and remove the temporary test
+directory. ``;)``
+
+When Not to Use a Fixture
+#########################
+
+Note that some files are not appropriate for use in a fixture as-is:
+fixture files should be static. If the creation of the file involves
+interpolating data discovered during the run of the test script,
+that process should stay in the script. Here is an example of this
+kind of usage that does not lend itself to a fixture::
+
+ import TestSCons
+ _python_ = TestSCons._python_
+
+ test.write('SConstruct', """
+ cc = Environment().Dictionary('CC')
+ env = Environment(LINK = r'%(_python_)s mylink.py',
+ LINKFLAGS = [],
+ CC = r'%(_python_)s mycc.py',
+ CXX = cc,
+ CXXFLAGS = [])
+ env.Program(target = 'test1', source = 'test1.c')
+ """ % locals())
+
+Here the value of ``_python_`` is picked out of the script's
+``locals`` dictionary and interpolated into the string that
+will be written to ``SConstruct``.
+
+The other files created in this test may still be candidates for
+use in a fixture, however.
+
+Debugging End-to-End Tests
+==========================
+
+Most of the end to end tests have expectations for standard output
+and error from the test runs. The expectation could be either
+that there is nothing on that stream, or that it will contain
+very specific text which the test matches against. So adding
+``print()`` calls, or ``sys,stderr.write()`` or similar will
+emit data that the tests do not expect, and cause further failures.
+Say you have three different tests in a script, and the third
+one is unexpectedly failing. You add some debug prints to the
+part of scons that is involved, and now the first test of the
+three starts failing, aborting the test run before it gets
+to the third test you were trying to debug.
+
+Still, there are some techniques to help debugging.
+
+Probably the most effective technique is to use the internal
+``SCons.Debug.Trace()`` function, which prints output to
+``/dev/tty`` on Linux/UNIX systems and ``con`` on Windows systems,
+so you can see what's going on.
+
+If you do need to add informational messages in scons code
+to debug a problem, you can use logging and send the messages
+to a file instead, so they don't interrupt the test expectations.
+
+Part of the technique discussed in the section
+`How to Convert Old Tests to Use Fixures`_ can also be helpful
+for debugging purposes. If you have a failing test, try::
+
+ $ PRESERVE=1 python runtest.py test/failing-test.py
+
+You can now go to the save directory reported from this run
+and invoke the test manually to see what it is doing, without
+the presence of the test infrastructure which would otherwise
+"swallow" output you may be interested in. In this case,
+adding debug prints may be more useful.
+
+
+Test Infrastructure
+===================
+
+The main test API in the ``TestSCons.py`` class. ``TestSCons``
+is a subclass of ``TestCommon``, which is a subclass of ``TestCmd``.
+All those classes are defined in python files of the same name
+in ``testing/framework``. Start in
+``testing/framework/TestCmd.py`` for the base API definitions, like how
+to create files (``test.write()``) and run commands (``test.run()``).
+
+Use ``TestSCons`` for the end-to-end tests in ``test``, but use
+``TestCmd`` for the unit tests in the ``src`` folder.
+
+The match functions work like this:
+
+``TestSCons.match_re``
+ match each line with a RE
+
+ * Splits the lines into a list (unless they already are)
+ * splits the REs at newlines (unless already a list) and puts ^..$ around each
+ * then each RE must match each line. This means there must be as many
+ REs as lines.
+
+``TestSCons.match_re_dotall``
+ match all the lines against a single RE
+
+ * Joins the lines with newline (unless already a string)
+ * joins the REs with newline (unless it's a string) and puts ``^..$``
+ around the whole thing
+ * then whole thing must match with python re.DOTALL.
+
+Use them in a test like this::
+
+ test.run(..., match=TestSCons.match_re, ...)
+
+or::
+
+ test.must_match(..., match=TestSCons.match_re, ...)
+
+Avoiding Tests Based on Tool Existence
+======================================
+
+Here's a simple example::
+
+ #!python
+ intelc = test.detect_tool('intelc', prog='icpc')
+ if not intelc:
+ test.skip_test("Could not load 'intelc' Tool; skipping test(s).\n")
+
+See ``testing/framework/TestSCons.py`` for the ``detect_tool`` method.
+It calls the tool's ``generate()`` method, and then looks for the given
+program (tool name by default) in ``env['ENV']['PATH']``.
+
+The ``where_is`` method can be used to look for programs that
+are do not have tool specifications. The existing test code
+will have many samples of using either or both of these to detect
+if it is worth even proceeding with a test.