summaryrefslogtreecommitdiff
path: root/src/engine/SCons/compat
diff options
context:
space:
mode:
authorLuca Falavigna <dktrkranz@debian.org>2010-01-02 20:56:27 +0100
committerLuca Falavigna <dktrkranz@debian.org>2010-01-02 20:56:27 +0100
commit72c578fd4b0b4a5a43e18594339ac4ff26c376dc (patch)
treecadaf3abe37a1066ceae933bc8fe7b75c85f56d2 /src/engine/SCons/compat
parent548ed1064f327bccc6e538806740d41ea2d928a1 (diff)
Imported Upstream version 1.2.0.d20091224upstream/1.2.0.d20091224
Diffstat (limited to 'src/engine/SCons/compat')
-rw-r--r--src/engine/SCons/compat/__init__.py302
-rw-r--r--src/engine/SCons/compat/_scons_UserString.py98
-rw-r--r--src/engine/SCons/compat/_scons_hashlib.py91
-rw-r--r--src/engine/SCons/compat/_scons_itertools.py124
-rw-r--r--src/engine/SCons/compat/_scons_optparse.py1725
-rw-r--r--src/engine/SCons/compat/_scons_platform.py237
-rw-r--r--src/engine/SCons/compat/_scons_sets.py583
-rw-r--r--src/engine/SCons/compat/_scons_sets15.py176
-rw-r--r--src/engine/SCons/compat/_scons_shlex.py325
-rw-r--r--src/engine/SCons/compat/_scons_subprocess.py1296
-rw-r--r--src/engine/SCons/compat/_scons_textwrap.py382
-rw-r--r--src/engine/SCons/compat/builtins.py187
12 files changed, 5526 insertions, 0 deletions
diff --git a/src/engine/SCons/compat/__init__.py b/src/engine/SCons/compat/__init__.py
new file mode 100644
index 0000000..6eb9340
--- /dev/null
+++ b/src/engine/SCons/compat/__init__.py
@@ -0,0 +1,302 @@
+#
+# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
+#
+# Permission is hereby granted, free of charge, to any person obtaining
+# a copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sublicense, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
+# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
+# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+#
+
+__doc__ = """
+SCons compatibility package for old Python versions
+
+This subpackage holds modules that provide backwards-compatible
+implementations of various things that we'd like to use in SCons but which
+only show up in later versions of Python than the early, old version(s)
+we still support.
+
+Other code will not generally reference things in this package through
+the SCons.compat namespace. The modules included here add things to
+the __builtin__ namespace or the global module list so that the rest
+of our code can use the objects and names imported here regardless of
+Python version.
+
+Simply enough, things that go in the __builtin__ name space come from
+our builtins module.
+
+The rest of the things here will be in individual compatibility modules
+that are either: 1) suitably modified copies of the future modules that
+we want to use; or 2) backwards compatible re-implementations of the
+specific portions of a future module's API that we want to use.
+
+GENERAL WARNINGS: Implementations of functions in the SCons.compat
+modules are *NOT* guaranteed to be fully compliant with these functions in
+later versions of Python. We are only concerned with adding functionality
+that we actually use in SCons, so be wary if you lift this code for
+other uses. (That said, making these more nearly the same as later,
+official versions is still a desirable goal, we just don't need to be
+obsessive about it.)
+
+We name the compatibility modules with an initial '_scons_' (for example,
+_scons_subprocess.py is our compatibility module for subprocess) so
+that we can still try to import the real module name and fall back to
+our compatibility module if we get an ImportError. The import_as()
+function defined below loads the module as the "real" name (without the
+'_scons'), after which all of the "import {module}" statements in the
+rest of our code will find our pre-loaded compatibility module.
+"""
+
+__revision__ = "src/engine/SCons/compat/__init__.py 4577 2009/12/27 19:44:43 scons"
+
+def import_as(module, name):
+ """
+ Imports the specified module (from our local directory) as the
+ specified name.
+ """
+ import imp
+ import os.path
+ dir = os.path.split(__file__)[0]
+ file, filename, suffix_mode_type = imp.find_module(module, [dir])
+ imp.load_module(name, file, filename, suffix_mode_type)
+
+import builtins
+
+try:
+ import hashlib
+except ImportError:
+ # Pre-2.5 Python has no hashlib module.
+ try:
+ import_as('_scons_hashlib', 'hashlib')
+ except ImportError:
+ # If we failed importing our compatibility module, it probably
+ # means this version of Python has no md5 module. Don't do
+ # anything and let the higher layer discover this fact, so it
+ # can fall back to using timestamp.
+ pass
+
+try:
+ set
+except NameError:
+ # Pre-2.4 Python has no native set type
+ try:
+ # Python 2.2 and 2.3 can use the copy of the 2.[45] sets module
+ # that we grabbed.
+ import_as('_scons_sets', 'sets')
+ except (ImportError, SyntaxError):
+ # Python 1.5 (ImportError, no __future_ module) and 2.1
+ # (SyntaxError, no generators in __future__) will blow up
+ # trying to import the 2.[45] sets module, so back off to a
+ # custom sets module that can be discarded easily when we
+ # stop supporting those versions.
+ import_as('_scons_sets15', 'sets')
+ import __builtin__
+ import sets
+ __builtin__.set = sets.Set
+
+import fnmatch
+try:
+ fnmatch.filter
+except AttributeError:
+ # Pre-2.2 Python has no fnmatch.filter() function.
+ def filter(names, pat):
+ """Return the subset of the list NAMES that match PAT"""
+ import os,posixpath
+ result=[]
+ pat = os.path.normcase(pat)
+ if not fnmatch._cache.has_key(pat):
+ import re
+ res = fnmatch.translate(pat)
+ fnmatch._cache[pat] = re.compile(res)
+ match = fnmatch._cache[pat].match
+ if os.path is posixpath:
+ # normcase on posix is NOP. Optimize it away from the loop.
+ for name in names:
+ if match(name):
+ result.append(name)
+ else:
+ for name in names:
+ if match(os.path.normcase(name)):
+ result.append(name)
+ return result
+ fnmatch.filter = filter
+ del filter
+
+try:
+ import itertools
+except ImportError:
+ # Pre-2.3 Python has no itertools module.
+ import_as('_scons_itertools', 'itertools')
+
+# If we need the compatibility version of textwrap, it must be imported
+# before optparse, which uses it.
+try:
+ import textwrap
+except ImportError:
+ # Pre-2.3 Python has no textwrap module.
+ import_as('_scons_textwrap', 'textwrap')
+
+try:
+ import optparse
+except ImportError:
+ # Pre-2.3 Python has no optparse module.
+ import_as('_scons_optparse', 'optparse')
+
+import os
+try:
+ os.devnull
+except AttributeError:
+ # Pre-2.4 Python has no os.devnull attribute
+ import sys
+ _names = sys.builtin_module_names
+ if 'posix' in _names:
+ os.devnull = '/dev/null'
+ elif 'nt' in _names:
+ os.devnull = 'nul'
+ os.path.devnull = os.devnull
+try:
+ os.path.lexists
+except AttributeError:
+ # Pre-2.4 Python has no os.path.lexists function
+ def lexists(path):
+ return os.path.exists(path) or os.path.islink(path)
+ os.path.lexists = lexists
+
+
+try:
+ import platform
+except ImportError:
+ # Pre-2.3 Python has no platform module.
+ import_as('_scons_platform', 'platform')
+
+
+import shlex
+try:
+ shlex.split
+except AttributeError:
+ # Pre-2.3 Python has no shlex.split() function.
+ #
+ # The full white-space splitting semantics of shlex.split() are
+ # complicated to reproduce by hand, so just use a compatibility
+ # version of the shlex module cribbed from Python 2.5 with some
+ # minor modifications for older Python versions.
+ del shlex
+ import_as('_scons_shlex', 'shlex')
+
+
+import shutil
+try:
+ shutil.move
+except AttributeError:
+ # Pre-2.3 Python has no shutil.move() function.
+ #
+ # Cribbed from Python 2.5.
+ import os
+
+ def move(src, dst):
+ """Recursively move a file or directory to another location.
+
+ If the destination is on our current filesystem, then simply use
+ rename. Otherwise, copy src to the dst and then remove src.
+ A lot more could be done here... A look at a mv.c shows a lot of
+ the issues this implementation glosses over.
+
+ """
+ try:
+ os.rename(src, dst)
+ except OSError:
+ if os.path.isdir(src):
+ if shutil.destinsrc(src, dst):
+ raise Error, "Cannot move a directory '%s' into itself '%s'." % (src, dst)
+ shutil.copytree(src, dst, symlinks=True)
+ shutil.rmtree(src)
+ else:
+ shutil.copy2(src,dst)
+ os.unlink(src)
+ shutil.move = move
+ del move
+
+ def destinsrc(src, dst):
+ src = os.path.abspath(src)
+ return os.path.abspath(dst)[:len(src)] == src
+ shutil.destinsrc = destinsrc
+ del destinsrc
+
+
+try:
+ import subprocess
+except ImportError:
+ # Pre-2.4 Python has no subprocess module.
+ import_as('_scons_subprocess', 'subprocess')
+
+import sys
+try:
+ sys.version_info
+except AttributeError:
+ # Pre-1.6 Python has no sys.version_info
+ import string
+ version_string = string.split(sys.version)[0]
+ version_ints = map(int, string.split(version_string, '.'))
+ sys.version_info = tuple(version_ints + ['final', 0])
+
+try:
+ import UserString
+except ImportError:
+ # Pre-1.6 Python has no UserString module.
+ import_as('_scons_UserString', 'UserString')
+
+import tempfile
+try:
+ tempfile.mkstemp
+except AttributeError:
+ # Pre-2.3 Python has no tempfile.mkstemp function, so try to simulate it.
+ # adapted from the mkstemp implementation in python 3.
+ import os
+ import errno
+ def mkstemp(*args, **kw):
+ text = False
+ # TODO (1.5)
+ #if 'text' in kw :
+ if 'text' in kw.keys() :
+ text = kw['text']
+ del kw['text']
+ elif len( args ) == 4 :
+ text = args[3]
+ args = args[:3]
+ flags = os.O_RDWR | os.O_CREAT | os.O_EXCL
+ if not text and hasattr( os, 'O_BINARY' ) :
+ flags = flags | os.O_BINARY
+ while True:
+ try :
+ name = apply(tempfile.mktemp, args, kw)
+ fd = os.open( name, flags, 0600 )
+ return (fd, os.path.abspath(name))
+ except OSError, e:
+ if e.errno == errno.EEXIST:
+ continue
+ raise
+
+ tempfile.mkstemp = mkstemp
+ del mkstemp
+
+
+
+
+# Local Variables:
+# tab-width:4
+# indent-tabs-mode:nil
+# End:
+# vim: set expandtab tabstop=4 shiftwidth=4:
diff --git a/src/engine/SCons/compat/_scons_UserString.py b/src/engine/SCons/compat/_scons_UserString.py
new file mode 100644
index 0000000..932c216
--- /dev/null
+++ b/src/engine/SCons/compat/_scons_UserString.py
@@ -0,0 +1,98 @@
+#
+# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
+#
+# Permission is hereby granted, free of charge, to any person obtaining
+# a copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sublicense, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
+# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
+# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+#
+
+__revision__ = "src/engine/SCons/compat/_scons_UserString.py 4577 2009/12/27 19:44:43 scons"
+
+__doc__ = """
+A user-defined wrapper around string objects
+
+This class is "borrowed" from the Python 2.2 UserString and modified
+slightly for use with SCons. It is *NOT* guaranteed to be fully compliant
+with the standard UserString class from all later versions of Python.
+In particular, it does not necessarily contain all of the methods found
+in later versions.
+"""
+
+import types
+
+StringType = types.StringType
+
+if hasattr(types, 'UnicodeType'):
+ UnicodeType = types.UnicodeType
+ def is_String(obj):
+ return type(obj) in (StringType, UnicodeType)
+else:
+ def is_String(obj):
+ return type(obj) is StringType
+
+class UserString:
+ def __init__(self, seq):
+ if is_String(seq):
+ self.data = seq
+ elif isinstance(seq, UserString):
+ self.data = seq.data[:]
+ else:
+ self.data = str(seq)
+ def __str__(self): return str(self.data)
+ def __repr__(self): return repr(self.data)
+ def __int__(self): return int(self.data)
+ def __long__(self): return long(self.data)
+ def __float__(self): return float(self.data)
+ def __complex__(self): return complex(self.data)
+ def __hash__(self): return hash(self.data)
+
+ def __cmp__(self, string):
+ if isinstance(string, UserString):
+ return cmp(self.data, string.data)
+ else:
+ return cmp(self.data, string)
+ def __contains__(self, char):
+ return char in self.data
+
+ def __len__(self): return len(self.data)
+ def __getitem__(self, index): return self.__class__(self.data[index])
+ def __getslice__(self, start, end):
+ start = max(start, 0); end = max(end, 0)
+ return self.__class__(self.data[start:end])
+
+ def __add__(self, other):
+ if isinstance(other, UserString):
+ return self.__class__(self.data + other.data)
+ elif is_String(other):
+ return self.__class__(self.data + other)
+ else:
+ return self.__class__(self.data + str(other))
+ def __radd__(self, other):
+ if is_String(other):
+ return self.__class__(other + self.data)
+ else:
+ return self.__class__(str(other) + self.data)
+ def __mul__(self, n):
+ return self.__class__(self.data*n)
+ __rmul__ = __mul__
+
+# Local Variables:
+# tab-width:4
+# indent-tabs-mode:nil
+# End:
+# vim: set expandtab tabstop=4 shiftwidth=4:
diff --git a/src/engine/SCons/compat/_scons_hashlib.py b/src/engine/SCons/compat/_scons_hashlib.py
new file mode 100644
index 0000000..bb4c69c
--- /dev/null
+++ b/src/engine/SCons/compat/_scons_hashlib.py
@@ -0,0 +1,91 @@
+#
+# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
+#
+# Permission is hereby granted, free of charge, to any person obtaining
+# a copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sublicense, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
+# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
+# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+#
+
+__doc__ = """
+hashlib backwards-compatibility module for older (pre-2.5) Python versions
+
+This does not not NOT (repeat, *NOT*) provide complete hashlib
+functionality. It only wraps the portions of MD5 functionality used
+by SCons, in an interface that looks like hashlib (or enough for our
+purposes, anyway). In fact, this module will raise an ImportError if
+the underlying md5 module isn't available.
+"""
+
+__revision__ = "src/engine/SCons/compat/_scons_hashlib.py 4577 2009/12/27 19:44:43 scons"
+
+import md5
+import string
+
+class md5obj:
+
+ md5_module = md5
+
+ def __init__(self, name, string=''):
+ if not name in ('MD5', 'md5'):
+ raise ValueError, "unsupported hash type"
+ self.name = 'md5'
+ self.m = self.md5_module.md5()
+
+ def __repr__(self):
+ return '<%s HASH object @ %#x>' % (self.name, id(self))
+
+ def copy(self):
+ import copy
+ result = copy.copy(self)
+ result.m = self.m.copy()
+ return result
+
+ def digest(self):
+ return self.m.digest()
+
+ def update(self, arg):
+ return self.m.update(arg)
+
+ if hasattr(md5.md5(), 'hexdigest'):
+
+ def hexdigest(self):
+ return self.m.hexdigest()
+
+ else:
+
+ # Objects created by the underlying md5 module have no native
+ # hexdigest() method (*cough* 1.5.2 *cough*), so provide an
+ # equivalent lifted from elsewhere.
+ def hexdigest(self):
+ h = string.hexdigits
+ r = ''
+ for c in self.digest():
+ i = ord(c)
+ r = r + h[(i >> 4) & 0xF] + h[i & 0xF]
+ return r
+
+new = md5obj
+
+def md5(string=''):
+ return md5obj('md5', string)
+
+# Local Variables:
+# tab-width:4
+# indent-tabs-mode:nil
+# End:
+# vim: set expandtab tabstop=4 shiftwidth=4:
diff --git a/src/engine/SCons/compat/_scons_itertools.py b/src/engine/SCons/compat/_scons_itertools.py
new file mode 100644
index 0000000..f2e93c6
--- /dev/null
+++ b/src/engine/SCons/compat/_scons_itertools.py
@@ -0,0 +1,124 @@
+#
+# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
+#
+# Permission is hereby granted, free of charge, to any person obtaining
+# a copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sublicense, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
+# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
+# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+#
+
+__revision__ = "src/engine/SCons/compat/_scons_itertools.py 4577 2009/12/27 19:44:43 scons"
+
+__doc__ = """
+Implementations of itertools functions for Python versions that don't
+have iterators.
+
+These implement the functions by creating the entire list, not returning
+it element-by-element as the real itertools functions do. This means
+that early Python versions won't get the performance benefit of using
+the itertools, but we can still use them so the later Python versions
+do get the advantages of using iterators.
+
+Because we return the entire list, we intentionally do not implement the
+itertools functions that "return" infinitely-long lists: the count(),
+cycle() and repeat() functions. Other functions below have remained
+unimplemented simply because they aren't being used (yet) and it wasn't
+obvious how to do it. Or, conversely, we only implemented those functions
+that *were* easy to implement (mostly because the Python documentation
+contained examples of equivalent code).
+
+Note that these do not have independent unit tests, so it's possible
+that there are bugs.
+"""
+
+def chain(*iterables):
+ result = []
+ for x in iterables:
+ result.extend(list(x))
+ return result
+
+def count(n=0):
+ # returns infinite length, should not be supported
+ raise NotImplementedError
+
+def cycle(iterable):
+ # returns infinite length, should not be supported
+ raise NotImplementedError
+
+def dropwhile(predicate, iterable):
+ result = []
+ for x in iterable:
+ if not predicate(x):
+ result.append(x)
+ break
+ result.extend(iterable)
+ return result
+
+def groupby(iterable, *args):
+ raise NotImplementedError
+
+def ifilter(predicate, iterable):
+ result = []
+ if predicate is None:
+ predicate = bool
+ for x in iterable:
+ if predicate(x):
+ result.append(x)
+ return result
+
+def ifilterfalse(predicate, iterable):
+ result = []
+ if predicate is None:
+ predicate = bool
+ for x in iterable:
+ if not predicate(x):
+ result.append(x)
+ return result
+
+def imap(function, *iterables):
+ return apply(map, (function,) + tuple(iterables))
+
+def islice(*args, **kw):
+ raise NotImplementedError
+
+def izip(*iterables):
+ return apply(zip, iterables)
+
+def repeat(*args, **kw):
+ # returns infinite length, should not be supported
+ raise NotImplementedError
+
+def starmap(*args, **kw):
+ raise NotImplementedError
+
+def takewhile(predicate, iterable):
+ result = []
+ for x in iterable:
+ if predicate(x):
+ result.append(x)
+ else:
+ break
+ return result
+
+def tee(*args, **kw):
+ raise NotImplementedError
+
+# Local Variables:
+# tab-width:4
+# indent-tabs-mode:nil
+# End:
+# vim: set expandtab tabstop=4 shiftwidth=4:
diff --git a/src/engine/SCons/compat/_scons_optparse.py b/src/engine/SCons/compat/_scons_optparse.py
new file mode 100644
index 0000000..219adba
--- /dev/null
+++ b/src/engine/SCons/compat/_scons_optparse.py
@@ -0,0 +1,1725 @@
+"""optparse - a powerful, extensible, and easy-to-use option parser.
+
+By Greg Ward <gward@python.net>
+
+Originally distributed as Optik; see http://optik.sourceforge.net/ .
+
+If you have problems with this module, please do not file bugs,
+patches, or feature requests with Python; instead, use Optik's
+SourceForge project page:
+ http://sourceforge.net/projects/optik
+
+For support, use the optik-users@lists.sourceforge.net mailing list
+(http://lists.sourceforge.net/lists/listinfo/optik-users).
+"""
+
+# Python developers: please do not make changes to this file, since
+# it is automatically generated from the Optik source code.
+
+__version__ = "1.5.3"
+
+__all__ = ['Option',
+ 'SUPPRESS_HELP',
+ 'SUPPRESS_USAGE',
+ 'Values',
+ 'OptionContainer',
+ 'OptionGroup',
+ 'OptionParser',
+ 'HelpFormatter',
+ 'IndentedHelpFormatter',
+ 'TitledHelpFormatter',
+ 'OptParseError',
+ 'OptionError',
+ 'OptionConflictError',
+ 'OptionValueError',
+ 'BadOptionError']
+
+__copyright__ = """
+Copyright (c) 2001-2006 Gregory P. Ward. All rights reserved.
+Copyright (c) 2002-2006 Python Software Foundation. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of the author nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR
+CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+"""
+
+import string
+import sys, os
+import types
+import textwrap
+
+def _repr(self):
+ return "<%s at 0x%x: %s>" % (self.__class__.__name__, id(self), self)
+
+
+try:
+ sys.getdefaultencoding
+except AttributeError:
+ def fake_getdefaultencoding():
+ return None
+ sys.getdefaultencoding = fake_getdefaultencoding
+
+try:
+ ''.encode
+except AttributeError:
+ def encode_wrapper(s, encoding, replacement):
+ return s
+else:
+ def encode_wrapper(s, encoding, replacement):
+ return s.encode(encoding, replacement)
+
+
+# This file was generated from:
+# Id: option_parser.py 527 2006-07-23 15:21:30Z greg
+# Id: option.py 522 2006-06-11 16:22:03Z gward
+# Id: help.py 527 2006-07-23 15:21:30Z greg
+# Id: errors.py 509 2006-04-20 00:58:24Z gward
+
+try:
+ from gettext import gettext
+except ImportError:
+ def gettext(message):
+ return message
+_ = gettext
+
+
+class OptParseError (Exception):
+ def __init__(self, msg):
+ self.msg = msg
+
+ def __str__(self):
+ return self.msg
+
+
+class OptionError (OptParseError):
+ """
+ Raised if an Option instance is created with invalid or
+ inconsistent arguments.
+ """
+
+ def __init__(self, msg, option):
+ self.msg = msg
+ self.option_id = str(option)
+
+ def __str__(self):
+ if self.option_id:
+ return "option %s: %s" % (self.option_id, self.msg)
+ else:
+ return self.msg
+
+class OptionConflictError (OptionError):
+ """
+ Raised if conflicting options are added to an OptionParser.
+ """
+
+class OptionValueError (OptParseError):
+ """
+ Raised if an invalid option value is encountered on the command
+ line.
+ """
+
+class BadOptionError (OptParseError):
+ """
+ Raised if an invalid option is seen on the command line.
+ """
+ def __init__(self, opt_str):
+ self.opt_str = opt_str
+
+ def __str__(self):
+ return _("no such option: %s") % self.opt_str
+
+class AmbiguousOptionError (BadOptionError):
+ """
+ Raised if an ambiguous option is seen on the command line.
+ """
+ def __init__(self, opt_str, possibilities):
+ BadOptionError.__init__(self, opt_str)
+ self.possibilities = possibilities
+
+ def __str__(self):
+ return (_("ambiguous option: %s (%s?)")
+ % (self.opt_str, string.join(self.possibilities, ", ")))
+
+
+class HelpFormatter:
+
+ """
+ Abstract base class for formatting option help. OptionParser
+ instances should use one of the HelpFormatter subclasses for
+ formatting help; by default IndentedHelpFormatter is used.
+
+ Instance attributes:
+ parser : OptionParser
+ the controlling OptionParser instance
+ indent_increment : int
+ the number of columns to indent per nesting level
+ max_help_position : int
+ the maximum starting column for option help text
+ help_position : int
+ the calculated starting column for option help text;
+ initially the same as the maximum
+ width : int
+ total number of columns for output (pass None to constructor for
+ this value to be taken from the $COLUMNS environment variable)
+ level : int
+ current indentation level
+ current_indent : int
+ current indentation level (in columns)
+ help_width : int
+ number of columns available for option help text (calculated)
+ default_tag : str
+ text to replace with each option's default value, "%default"
+ by default. Set to false value to disable default value expansion.
+ option_strings : { Option : str }
+ maps Option instances to the snippet of help text explaining
+ the syntax of that option, e.g. "-h, --help" or
+ "-fFILE, --file=FILE"
+ _short_opt_fmt : str
+ format string controlling how short options with values are
+ printed in help text. Must be either "%s%s" ("-fFILE") or
+ "%s %s" ("-f FILE"), because those are the two syntaxes that
+ Optik supports.
+ _long_opt_fmt : str
+ similar but for long options; must be either "%s %s" ("--file FILE")
+ or "%s=%s" ("--file=FILE").
+ """
+
+ NO_DEFAULT_VALUE = "none"
+
+ def __init__(self,
+ indent_increment,
+ max_help_position,
+ width,
+ short_first):
+ self.parser = None
+ self.indent_increment = indent_increment
+ self.help_position = self.max_help_position = max_help_position
+ if width is None:
+ try:
+ width = int(os.environ['COLUMNS'])
+ except (KeyError, ValueError):
+ width = 80
+ width = width - 2
+ self.width = width
+ self.current_indent = 0
+ self.level = 0
+ self.help_width = None # computed later
+ self.short_first = short_first
+ self.default_tag = "%default"
+ self.option_strings = {}
+ self._short_opt_fmt = "%s %s"
+ self._long_opt_fmt = "%s=%s"
+
+ def set_parser(self, parser):
+ self.parser = parser
+
+ def set_short_opt_delimiter(self, delim):
+ if delim not in ("", " "):
+ raise ValueError(
+ "invalid metavar delimiter for short options: %r" % delim)
+ self._short_opt_fmt = "%s" + delim + "%s"
+
+ def set_long_opt_delimiter(self, delim):
+ if delim not in ("=", " "):
+ raise ValueError(
+ "invalid metavar delimiter for long options: %r" % delim)
+ self._long_opt_fmt = "%s" + delim + "%s"
+
+ def indent(self):
+ self.current_indent = self.current_indent + self.indent_increment
+ self.level = self.level + 1
+
+ def dedent(self):
+ self.current_indent = self.current_indent - self.indent_increment
+ assert self.current_indent >= 0, "Indent decreased below 0."
+ self.level = self.level - 1
+
+ def format_usage(self, usage):
+ raise NotImplementedError, "subclasses must implement"
+
+ def format_heading(self, heading):
+ raise NotImplementedError, "subclasses must implement"
+
+ def _format_text(self, text):
+ """
+ Format a paragraph of free-form text for inclusion in the
+ help output at the current indentation level.
+ """
+ text_width = self.width - self.current_indent
+ indent = " "*self.current_indent
+ return textwrap.fill(text,
+ text_width,
+ initial_indent=indent,
+ subsequent_indent=indent)
+
+ def format_description(self, description):
+ if description:
+ return self._format_text(description) + "\n"
+ else:
+ return ""
+
+ def format_epilog(self, epilog):
+ if epilog:
+ return "\n" + self._format_text(epilog) + "\n"
+ else:
+ return ""
+
+
+ def expand_default(self, option):
+ if self.parser is None or not self.default_tag:
+ return option.help
+
+ default_value = self.parser.defaults.get(option.dest)
+ if default_value is NO_DEFAULT or default_value is None:
+ default_value = self.NO_DEFAULT_VALUE
+
+ return string.replace(option.help, self.default_tag, str(default_value))
+
+ def format_option(self, option):
+ # The help for each option consists of two parts:
+ # * the opt strings and metavars
+ # eg. ("-x", or "-fFILENAME, --file=FILENAME")
+ # * the user-supplied help string
+ # eg. ("turn on expert mode", "read data from FILENAME")
+ #
+ # If possible, we write both of these on the same line:
+ # -x turn on expert mode
+ #
+ # But if the opt string list is too long, we put the help
+ # string on a second line, indented to the same column it would
+ # start in if it fit on the first line.
+ # -fFILENAME, --file=FILENAME
+ # read data from FILENAME
+ result = []
+ opts = self.option_strings[option]
+ opt_width = self.help_position - self.current_indent - 2
+ if len(opts) > opt_width:
+ opts = "%*s%s\n" % (self.current_indent, "", opts)
+ indent_first = self.help_position
+ else: # start help on same line as opts
+ opts = "%*s%-*s " % (self.current_indent, "", opt_width, opts)
+ indent_first = 0
+ result.append(opts)
+ if option.help:
+ help_text = self.expand_default(option)
+ help_lines = textwrap.wrap(help_text, self.help_width)
+ result.append("%*s%s\n" % (indent_first, "", help_lines[0]))
+ for line in help_lines[1:]:
+ result.append("%*s%s\n" % (self.help_position, "", line))
+ elif opts[-1] != "\n":
+ result.append("\n")
+ return string.join(result, "")
+
+ def store_option_strings(self, parser):
+ self.indent()
+ max_len = 0
+ for opt in parser.option_list:
+ strings = self.format_option_strings(opt)
+ self.option_strings[opt] = strings
+ max_len = max(max_len, len(strings) + self.current_indent)
+ self.indent()
+ for group in parser.option_groups:
+ for opt in group.option_list:
+ strings = self.format_option_strings(opt)
+ self.option_strings[opt] = strings
+ max_len = max(max_len, len(strings) + self.current_indent)
+ self.dedent()
+ self.dedent()
+ self.help_position = min(max_len + 2, self.max_help_position)
+ self.help_width = self.width - self.help_position
+
+ def format_option_strings(self, option):
+ """Return a comma-separated list of option strings & metavariables."""
+ if option.takes_value():
+ metavar = option.metavar or string.upper(option.dest)
+ short_opts = []
+ for sopt in option._short_opts:
+ short_opts.append(self._short_opt_fmt % (sopt, metavar))
+ long_opts = []
+ for lopt in option._long_opts:
+ long_opts.append(self._long_opt_fmt % (lopt, metavar))
+ else:
+ short_opts = option._short_opts
+ long_opts = option._long_opts
+
+ if self.short_first:
+ opts = short_opts + long_opts
+ else:
+ opts = long_opts + short_opts
+
+ return string.join(opts, ", ")
+
+class IndentedHelpFormatter (HelpFormatter):
+ """Format help with indented section bodies.
+ """
+
+ def __init__(self,
+ indent_increment=2,
+ max_help_position=24,
+ width=None,
+ short_first=1):
+ HelpFormatter.__init__(
+ self, indent_increment, max_help_position, width, short_first)
+
+ def format_usage(self, usage):
+ return _("Usage: %s\n") % usage
+
+ def format_heading(self, heading):
+ return "%*s%s:\n" % (self.current_indent, "", heading)
+
+
+class TitledHelpFormatter (HelpFormatter):
+ """Format help with underlined section headers.
+ """
+
+ def __init__(self,
+ indent_increment=0,
+ max_help_position=24,
+ width=None,
+ short_first=0):
+ HelpFormatter.__init__ (
+ self, indent_increment, max_help_position, width, short_first)
+
+ def format_usage(self, usage):
+ return "%s %s\n" % (self.format_heading(_("Usage")), usage)
+
+ def format_heading(self, heading):
+ return "%s\n%s\n" % (heading, "=-"[self.level] * len(heading))
+
+
+def _parse_num(val, type):
+ if string.lower(val[:2]) == "0x": # hexadecimal
+ radix = 16
+ elif string.lower(val[:2]) == "0b": # binary
+ radix = 2
+ val = val[2:] or "0" # have to remove "0b" prefix
+ elif val[:1] == "0": # octal
+ radix = 8
+ else: # decimal
+ radix = 10
+
+ return type(val, radix)
+
+def _parse_int(val):
+ return _parse_num(val, int)
+
+def _parse_long(val):
+ return _parse_num(val, long)
+
+try:
+ int('0', 10)
+except TypeError:
+ # Python 1.5.2 doesn't allow a radix value to be passed to int().
+ _parse_int = int
+
+try:
+ long('0', 10)
+except TypeError:
+ # Python 1.5.2 doesn't allow a radix value to be passed to long().
+ _parse_long = long
+
+_builtin_cvt = { "int" : (_parse_int, _("integer")),
+ "long" : (_parse_long, _("long integer")),
+ "float" : (float, _("floating-point")),
+ "complex" : (complex, _("complex")) }
+
+def check_builtin(option, opt, value):
+ (cvt, what) = _builtin_cvt[option.type]
+ try:
+ return cvt(value)
+ except ValueError:
+ raise OptionValueError(
+ _("option %s: invalid %s value: %r") % (opt, what, value))
+
+def check_choice(option, opt, value):
+ if value in option.choices:
+ return value
+ else:
+ choices = string.join(map(repr, option.choices), ", ")
+ raise OptionValueError(
+ _("option %s: invalid choice: %r (choose from %s)")
+ % (opt, value, choices))
+
+# Not supplying a default is different from a default of None,
+# so we need an explicit "not supplied" value.
+NO_DEFAULT = ("NO", "DEFAULT")
+
+
+class Option:
+ """
+ Instance attributes:
+ _short_opts : [string]
+ _long_opts : [string]
+
+ action : string
+ type : string
+ dest : string
+ default : any
+ nargs : int
+ const : any
+ choices : [string]
+ callback : function
+ callback_args : (any*)
+ callback_kwargs : { string : any }
+ help : string
+ metavar : string
+ """
+
+ # The list of instance attributes that may be set through
+ # keyword args to the constructor.
+ ATTRS = ['action',
+ 'type',
+ 'dest',
+ 'default',
+ 'nargs',
+ 'const',
+ 'choices',
+ 'callback',
+ 'callback_args',
+ 'callback_kwargs',
+ 'help',
+ 'metavar']
+
+ # The set of actions allowed by option parsers. Explicitly listed
+ # here so the constructor can validate its arguments.
+ ACTIONS = ("store",
+ "store_const",
+ "store_true",
+ "store_false",
+ "append",
+ "append_const",
+ "count",
+ "callback",
+ "help",
+ "version")
+
+ # The set of actions that involve storing a value somewhere;
+ # also listed just for constructor argument validation. (If
+ # the action is one of these, there must be a destination.)
+ STORE_ACTIONS = ("store",
+ "store_const",
+ "store_true",
+ "store_false",
+ "append",
+ "append_const",
+ "count")
+
+ # The set of actions for which it makes sense to supply a value
+ # type, ie. which may consume an argument from the command line.
+ TYPED_ACTIONS = ("store",
+ "append",
+ "callback")
+
+ # The set of actions which *require* a value type, ie. that
+ # always consume an argument from the command line.
+ ALWAYS_TYPED_ACTIONS = ("store",
+ "append")
+
+ # The set of actions which take a 'const' attribute.
+ CONST_ACTIONS = ("store_const",
+ "append_const")
+
+ # The set of known types for option parsers. Again, listed here for
+ # constructor argument validation.
+ TYPES = ("string", "int", "long", "float", "complex", "choice")
+
+ # Dictionary of argument checking functions, which convert and
+ # validate option arguments according to the option type.
+ #
+ # Signature of checking functions is:
+ # check(option : Option, opt : string, value : string) -> any
+ # where
+ # option is the Option instance calling the checker
+ # opt is the actual option seen on the command-line
+ # (eg. "-a", "--file")
+ # value is the option argument seen on the command-line
+ #
+ # The return value should be in the appropriate Python type
+ # for option.type -- eg. an integer if option.type == "int".
+ #
+ # If no checker is defined for a type, arguments will be
+ # unchecked and remain strings.
+ TYPE_CHECKER = { "int" : check_builtin,
+ "long" : check_builtin,
+ "float" : check_builtin,
+ "complex": check_builtin,
+ "choice" : check_choice,
+ }
+
+
+ # CHECK_METHODS is a list of unbound method objects; they are called
+ # by the constructor, in order, after all attributes are
+ # initialized. The list is created and filled in later, after all
+ # the methods are actually defined. (I just put it here because I
+ # like to define and document all class attributes in the same
+ # place.) Subclasses that add another _check_*() method should
+ # define their own CHECK_METHODS list that adds their check method
+ # to those from this class.
+ CHECK_METHODS = None
+
+
+ # -- Constructor/initialization methods ----------------------------
+
+ def __init__(self, *opts, **attrs):
+ # Set _short_opts, _long_opts attrs from 'opts' tuple.
+ # Have to be set now, in case no option strings are supplied.
+ self._short_opts = []
+ self._long_opts = []
+ opts = self._check_opt_strings(opts)
+ self._set_opt_strings(opts)
+
+ # Set all other attrs (action, type, etc.) from 'attrs' dict
+ self._set_attrs(attrs)
+
+ # Check all the attributes we just set. There are lots of
+ # complicated interdependencies, but luckily they can be farmed
+ # out to the _check_*() methods listed in CHECK_METHODS -- which
+ # could be handy for subclasses! The one thing these all share
+ # is that they raise OptionError if they discover a problem.
+ for checker in self.CHECK_METHODS:
+ checker(self)
+
+ def _check_opt_strings(self, opts):
+ # Filter out None because early versions of Optik had exactly
+ # one short option and one long option, either of which
+ # could be None.
+ opts = filter(None, opts)
+ if not opts:
+ raise TypeError("at least one option string must be supplied")
+ return opts
+
+ def _set_opt_strings(self, opts):
+ for opt in opts:
+ if len(opt) < 2:
+ raise OptionError(
+ "invalid option string %r: "
+ "must be at least two characters long" % opt, self)
+ elif len(opt) == 2:
+ if not (opt[0] == "-" and opt[1] != "-"):
+ raise OptionError(
+ "invalid short option string %r: "
+ "must be of the form -x, (x any non-dash char)" % opt,
+ self)
+ self._short_opts.append(opt)
+ else:
+ if not (opt[0:2] == "--" and opt[2] != "-"):
+ raise OptionError(
+ "invalid long option string %r: "
+ "must start with --, followed by non-dash" % opt,
+ self)
+ self._long_opts.append(opt)
+
+ def _set_attrs(self, attrs):
+ for attr in self.ATTRS:
+ if attrs.has_key(attr):
+ setattr(self, attr, attrs[attr])
+ del attrs[attr]
+ else:
+ if attr == 'default':
+ setattr(self, attr, NO_DEFAULT)
+ else:
+ setattr(self, attr, None)
+ if attrs:
+ attrs = attrs.keys()
+ attrs.sort()
+ raise OptionError(
+ "invalid keyword arguments: %s" % string.join(attrs, ", "),
+ self)
+
+
+ # -- Constructor validation methods --------------------------------
+
+ def _check_action(self):
+ if self.action is None:
+ self.action = "store"
+ elif self.action not in self.ACTIONS:
+ raise OptionError("invalid action: %r" % self.action, self)
+
+ def _check_type(self):
+ if self.type is None:
+ if self.action in self.ALWAYS_TYPED_ACTIONS:
+ if self.choices is not None:
+ # The "choices" attribute implies "choice" type.
+ self.type = "choice"
+ else:
+ # No type given? "string" is the most sensible default.
+ self.type = "string"
+ else:
+ # Allow type objects or builtin type conversion functions
+ # (int, str, etc.) as an alternative to their names. (The
+ # complicated check of __builtin__ is only necessary for
+ # Python 2.1 and earlier, and is short-circuited by the
+ # first check on modern Pythons.)
+ import __builtin__
+ if ( type(self.type) is types.TypeType or
+ (hasattr(self.type, "__name__") and
+ getattr(__builtin__, self.type.__name__, None) is self.type) ):
+ self.type = self.type.__name__
+
+ if self.type == "str":
+ self.type = "string"
+
+ if self.type not in self.TYPES:
+ raise OptionError("invalid option type: %r" % self.type, self)
+ if self.action not in self.TYPED_ACTIONS:
+ raise OptionError(
+ "must not supply a type for action %r" % self.action, self)
+
+ def _check_choice(self):
+ if self.type == "choice":
+ if self.choices is None:
+ raise OptionError(
+ "must supply a list of choices for type 'choice'", self)
+ elif type(self.choices) not in (types.TupleType, types.ListType):
+ raise OptionError(
+ "choices must be a list of strings ('%s' supplied)"
+ % string.split(str(type(self.choices)), "'")[1], self)
+ elif self.choices is not None:
+ raise OptionError(
+ "must not supply choices for type %r" % self.type, self)
+
+ def _check_dest(self):
+ # No destination given, and we need one for this action. The
+ # self.type check is for callbacks that take a value.
+ takes_value = (self.action in self.STORE_ACTIONS or
+ self.type is not None)
+ if self.dest is None and takes_value:
+
+ # Glean a destination from the first long option string,
+ # or from the first short option string if no long options.
+ if self._long_opts:
+ # eg. "--foo-bar" -> "foo_bar"
+ self.dest = string.replace(self._long_opts[0][2:], '-', '_')
+ else:
+ self.dest = self._short_opts[0][1]
+
+ def _check_const(self):
+ if self.action not in self.CONST_ACTIONS and self.const is not None:
+ raise OptionError(
+ "'const' must not be supplied for action %r" % self.action,
+ self)
+
+ def _check_nargs(self):
+ if self.action in self.TYPED_ACTIONS:
+ if self.nargs is None:
+ self.nargs = 1
+ elif self.nargs is not None:
+ raise OptionError(
+ "'nargs' must not be supplied for action %r" % self.action,
+ self)
+
+ def _check_callback(self):
+ if self.action == "callback":
+ if not callable(self.callback):
+ raise OptionError(
+ "callback not callable: %r" % self.callback, self)
+ if (self.callback_args is not None and
+ type(self.callback_args) is not types.TupleType):
+ raise OptionError(
+ "callback_args, if supplied, must be a tuple: not %r"
+ % self.callback_args, self)
+ if (self.callback_kwargs is not None and
+ type(self.callback_kwargs) is not types.DictType):
+ raise OptionError(
+ "callback_kwargs, if supplied, must be a dict: not %r"
+ % self.callback_kwargs, self)
+ else:
+ if self.callback is not None:
+ raise OptionError(
+ "callback supplied (%r) for non-callback option"
+ % self.callback, self)
+ if self.callback_args is not None:
+ raise OptionError(
+ "callback_args supplied for non-callback option", self)
+ if self.callback_kwargs is not None:
+ raise OptionError(
+ "callback_kwargs supplied for non-callback option", self)
+
+
+ CHECK_METHODS = [_check_action,
+ _check_type,
+ _check_choice,
+ _check_dest,
+ _check_const,
+ _check_nargs,
+ _check_callback]
+
+
+ # -- Miscellaneous methods -----------------------------------------
+
+ def __str__(self):
+ return string.join(self._short_opts + self._long_opts, "/")
+
+ __repr__ = _repr
+
+ def takes_value(self):
+ return self.type is not None
+
+ def get_opt_string(self):
+ if self._long_opts:
+ return self._long_opts[0]
+ else:
+ return self._short_opts[0]
+
+
+ # -- Processing methods --------------------------------------------
+
+ def check_value(self, opt, value):
+ checker = self.TYPE_CHECKER.get(self.type)
+ if checker is None:
+ return value
+ else:
+ return checker(self, opt, value)
+
+ def convert_value(self, opt, value):
+ if value is not None:
+ if self.nargs == 1:
+ return self.check_value(opt, value)
+ else:
+ return tuple(map(lambda v, o=opt, s=self: s.check_value(o, v), value))
+
+ def process(self, opt, value, values, parser):
+
+ # First, convert the value(s) to the right type. Howl if any
+ # value(s) are bogus.
+ value = self.convert_value(opt, value)
+
+ # And then take whatever action is expected of us.
+ # This is a separate method to make life easier for
+ # subclasses to add new actions.
+ return self.take_action(
+ self.action, self.dest, opt, value, values, parser)
+
+ def take_action(self, action, dest, opt, value, values, parser):
+ if action == "store":
+ setattr(values, dest, value)
+ elif action == "store_const":
+ setattr(values, dest, self.const)
+ elif action == "store_true":
+ setattr(values, dest, True)
+ elif action == "store_false":
+ setattr(values, dest, False)
+ elif action == "append":
+ values.ensure_value(dest, []).append(value)
+ elif action == "append_const":
+ values.ensure_value(dest, []).append(self.const)
+ elif action == "count":
+ setattr(values, dest, values.ensure_value(dest, 0) + 1)
+ elif action == "callback":
+ args = self.callback_args or ()
+ kwargs = self.callback_kwargs or {}
+ apply(self.callback, (self, opt, value, parser,) + args, kwargs)
+ elif action == "help":
+ parser.print_help()
+ parser.exit()
+ elif action == "version":
+ parser.print_version()
+ parser.exit()
+ else:
+ raise RuntimeError, "unknown action %r" % self.action
+
+ return 1
+
+# class Option
+
+
+SUPPRESS_HELP = "SUPPRESS"+"HELP"
+SUPPRESS_USAGE = "SUPPRESS"+"USAGE"
+
+# For compatibility with Python 2.2
+try:
+ True, False
+except NameError:
+ (True, False) = (1, 0)
+
+try:
+ types.UnicodeType
+except AttributeError:
+ def isbasestring(x):
+ return isinstance(x, types.StringType)
+else:
+ def isbasestring(x):
+ return isinstance(x, types.StringType) or isinstance(x, types.UnicodeType)
+
+class Values:
+
+ def __init__(self, defaults=None):
+ if defaults:
+ for (attr, val) in defaults.items():
+ setattr(self, attr, val)
+
+ def __str__(self):
+ return str(self.__dict__)
+
+ __repr__ = _repr
+
+ def __cmp__(self, other):
+ if isinstance(other, Values):
+ return cmp(self.__dict__, other.__dict__)
+ elif isinstance(other, types.DictType):
+ return cmp(self.__dict__, other)
+ else:
+ return -1
+
+ def _update_careful(self, dict):
+ """
+ Update the option values from an arbitrary dictionary, but only
+ use keys from dict that already have a corresponding attribute
+ in self. Any keys in dict without a corresponding attribute
+ are silently ignored.
+ """
+ for attr in dir(self):
+ if dict.has_key(attr):
+ dval = dict[attr]
+ if dval is not None:
+ setattr(self, attr, dval)
+
+ def _update_loose(self, dict):
+ """
+ Update the option values from an arbitrary dictionary,
+ using all keys from the dictionary regardless of whether
+ they have a corresponding attribute in self or not.
+ """
+ self.__dict__.update(dict)
+
+ def _update(self, dict, mode):
+ if mode == "careful":
+ self._update_careful(dict)
+ elif mode == "loose":
+ self._update_loose(dict)
+ else:
+ raise ValueError, "invalid update mode: %r" % mode
+
+ def read_module(self, modname, mode="careful"):
+ __import__(modname)
+ mod = sys.modules[modname]
+ self._update(vars(mod), mode)
+
+ def read_file(self, filename, mode="careful"):
+ vars = {}
+ exec open(filename, 'rU').read() in vars
+ self._update(vars, mode)
+
+ def ensure_value(self, attr, value):
+ if not hasattr(self, attr) or getattr(self, attr) is None:
+ setattr(self, attr, value)
+ return getattr(self, attr)
+
+
+class OptionContainer:
+
+ """
+ Abstract base class.
+
+ Class attributes:
+ standard_option_list : [Option]
+ list of standard options that will be accepted by all instances
+ of this parser class (intended to be overridden by subclasses).
+
+ Instance attributes:
+ option_list : [Option]
+ the list of Option objects contained by this OptionContainer
+ _short_opt : { string : Option }
+ dictionary mapping short option strings, eg. "-f" or "-X",
+ to the Option instances that implement them. If an Option
+ has multiple short option strings, it will appears in this
+ dictionary multiple times. [1]
+ _long_opt : { string : Option }
+ dictionary mapping long option strings, eg. "--file" or
+ "--exclude", to the Option instances that implement them.
+ Again, a given Option can occur multiple times in this
+ dictionary. [1]
+ defaults : { string : any }
+ dictionary mapping option destination names to default
+ values for each destination [1]
+
+ [1] These mappings are common to (shared by) all components of the
+ controlling OptionParser, where they are initially created.
+
+ """
+
+ def __init__(self, option_class, conflict_handler, description):
+ # Initialize the option list and related data structures.
+ # This method must be provided by subclasses, and it must
+ # initialize at least the following instance attributes:
+ # option_list, _short_opt, _long_opt, defaults.
+ self._create_option_list()
+
+ self.option_class = option_class
+ self.set_conflict_handler(conflict_handler)
+ self.set_description(description)
+
+ def _create_option_mappings(self):
+ # For use by OptionParser constructor -- create the master
+ # option mappings used by this OptionParser and all
+ # OptionGroups that it owns.
+ self._short_opt = {} # single letter -> Option instance
+ self._long_opt = {} # long option -> Option instance
+ self.defaults = {} # maps option dest -> default value
+
+
+ def _share_option_mappings(self, parser):
+ # For use by OptionGroup constructor -- use shared option
+ # mappings from the OptionParser that owns this OptionGroup.
+ self._short_opt = parser._short_opt
+ self._long_opt = parser._long_opt
+ self.defaults = parser.defaults
+
+ def set_conflict_handler(self, handler):
+ if handler not in ("error", "resolve"):
+ raise ValueError, "invalid conflict_resolution value %r" % handler
+ self.conflict_handler = handler
+
+ def set_description(self, description):
+ self.description = description
+
+ def get_description(self):
+ return self.description
+
+
+ def destroy(self):
+ """see OptionParser.destroy()."""
+ del self._short_opt
+ del self._long_opt
+ del self.defaults
+
+
+ # -- Option-adding methods -----------------------------------------
+
+ def _check_conflict(self, option):
+ conflict_opts = []
+ for opt in option._short_opts:
+ if self._short_opt.has_key(opt):
+ conflict_opts.append((opt, self._short_opt[opt]))
+ for opt in option._long_opts:
+ if self._long_opt.has_key(opt):
+ conflict_opts.append((opt, self._long_opt[opt]))
+
+ if conflict_opts:
+ handler = self.conflict_handler
+ if handler == "error":
+ raise OptionConflictError(
+ "conflicting option string(s): %s"
+ % string.join(map(lambda co: co[0], conflict_opts), ", "),
+ option)
+ elif handler == "resolve":
+ for (opt, c_option) in conflict_opts:
+ if opt[:2] == "--":
+ c_option._long_opts.remove(opt)
+ del self._long_opt[opt]
+ else:
+ c_option._short_opts.remove(opt)
+ del self._short_opt[opt]
+ if not (c_option._short_opts or c_option._long_opts):
+ c_option.container.option_list.remove(c_option)
+
+ def add_option(self, *args, **kwargs):
+ """add_option(Option)
+ add_option(opt_str, ..., kwarg=val, ...)
+ """
+ if type(args[0]) is types.StringType:
+ option = apply(self.option_class, args, kwargs)
+ elif len(args) == 1 and not kwargs:
+ option = args[0]
+ if not isinstance(option, Option):
+ raise TypeError, "not an Option instance: %r" % option
+ else:
+ raise TypeError, "invalid arguments"
+
+ self._check_conflict(option)
+
+ self.option_list.append(option)
+ option.container = self
+ for opt in option._short_opts:
+ self._short_opt[opt] = option
+ for opt in option._long_opts:
+ self._long_opt[opt] = option
+
+ if option.dest is not None: # option has a dest, we need a default
+ if option.default is not NO_DEFAULT:
+ self.defaults[option.dest] = option.default
+ elif not self.defaults.has_key(option.dest):
+ self.defaults[option.dest] = None
+
+ return option
+
+ def add_options(self, option_list):
+ for option in option_list:
+ self.add_option(option)
+
+ # -- Option query/removal methods ----------------------------------
+
+ def get_option(self, opt_str):
+ return (self._short_opt.get(opt_str) or
+ self._long_opt.get(opt_str))
+
+ def has_option(self, opt_str):
+ return (self._short_opt.has_key(opt_str) or
+ self._long_opt.has_key(opt_str))
+
+ def remove_option(self, opt_str):
+ option = self._short_opt.get(opt_str)
+ if option is None:
+ option = self._long_opt.get(opt_str)
+ if option is None:
+ raise ValueError("no such option %r" % opt_str)
+
+ for opt in option._short_opts:
+ del self._short_opt[opt]
+ for opt in option._long_opts:
+ del self._long_opt[opt]
+ option.container.option_list.remove(option)
+
+
+ # -- Help-formatting methods ---------------------------------------
+
+ def format_option_help(self, formatter):
+ if not self.option_list:
+ return ""
+ result = []
+ for option in self.option_list:
+ if not option.help is SUPPRESS_HELP:
+ result.append(formatter.format_option(option))
+ return string.join(result, "")
+
+ def format_description(self, formatter):
+ return formatter.format_description(self.get_description())
+
+ def format_help(self, formatter):
+ result = []
+ if self.description:
+ result.append(self.format_description(formatter))
+ if self.option_list:
+ result.append(self.format_option_help(formatter))
+ return string.join(result, "\n")
+
+
+class OptionGroup (OptionContainer):
+
+ def __init__(self, parser, title, description=None):
+ self.parser = parser
+ OptionContainer.__init__(
+ self, parser.option_class, parser.conflict_handler, description)
+ self.title = title
+
+ def _create_option_list(self):
+ self.option_list = []
+ self._share_option_mappings(self.parser)
+
+ def set_title(self, title):
+ self.title = title
+
+ def destroy(self):
+ """see OptionParser.destroy()."""
+ OptionContainer.destroy(self)
+ del self.option_list
+
+ # -- Help-formatting methods ---------------------------------------
+
+ def format_help(self, formatter):
+ result = formatter.format_heading(self.title)
+ formatter.indent()
+ result = result + OptionContainer.format_help(self, formatter)
+ formatter.dedent()
+ return result
+
+
+class OptionParser (OptionContainer):
+
+ """
+ Class attributes:
+ standard_option_list : [Option]
+ list of standard options that will be accepted by all instances
+ of this parser class (intended to be overridden by subclasses).
+
+ Instance attributes:
+ usage : string
+ a usage string for your program. Before it is displayed
+ to the user, "%prog" will be expanded to the name of
+ your program (self.prog or os.path.basename(sys.argv[0])).
+ prog : string
+ the name of the current program (to override
+ os.path.basename(sys.argv[0])).
+ epilog : string
+ paragraph of help text to print after option help
+
+ option_groups : [OptionGroup]
+ list of option groups in this parser (option groups are
+ irrelevant for parsing the command-line, but very useful
+ for generating help)
+
+ allow_interspersed_args : bool = true
+ if true, positional arguments may be interspersed with options.
+ Assuming -a and -b each take a single argument, the command-line
+ -ablah foo bar -bboo baz
+ will be interpreted the same as
+ -ablah -bboo -- foo bar baz
+ If this flag were false, that command line would be interpreted as
+ -ablah -- foo bar -bboo baz
+ -- ie. we stop processing options as soon as we see the first
+ non-option argument. (This is the tradition followed by
+ Python's getopt module, Perl's Getopt::Std, and other argument-
+ parsing libraries, but it is generally annoying to users.)
+
+ process_default_values : bool = true
+ if true, option default values are processed similarly to option
+ values from the command line: that is, they are passed to the
+ type-checking function for the option's type (as long as the
+ default value is a string). (This really only matters if you
+ have defined custom types; see SF bug #955889.) Set it to false
+ to restore the behaviour of Optik 1.4.1 and earlier.
+
+ rargs : [string]
+ the argument list currently being parsed. Only set when
+ parse_args() is active, and continually trimmed down as
+ we consume arguments. Mainly there for the benefit of
+ callback options.
+ largs : [string]
+ the list of leftover arguments that we have skipped while
+ parsing options. If allow_interspersed_args is false, this
+ list is always empty.
+ values : Values
+ the set of option values currently being accumulated. Only
+ set when parse_args() is active. Also mainly for callbacks.
+
+ Because of the 'rargs', 'largs', and 'values' attributes,
+ OptionParser is not thread-safe. If, for some perverse reason, you
+ need to parse command-line arguments simultaneously in different
+ threads, use different OptionParser instances.
+
+ """
+
+ standard_option_list = []
+
+ def __init__(self,
+ usage=None,
+ option_list=None,
+ option_class=Option,
+ version=None,
+ conflict_handler="error",
+ description=None,
+ formatter=None,
+ add_help_option=True,
+ prog=None,
+ epilog=None):
+ OptionContainer.__init__(
+ self, option_class, conflict_handler, description)
+ self.set_usage(usage)
+ self.prog = prog
+ self.version = version
+ self.allow_interspersed_args = True
+ self.process_default_values = True
+ if formatter is None:
+ formatter = IndentedHelpFormatter()
+ self.formatter = formatter
+ self.formatter.set_parser(self)
+ self.epilog = epilog
+
+ # Populate the option list; initial sources are the
+ # standard_option_list class attribute, the 'option_list'
+ # argument, and (if applicable) the _add_version_option() and
+ # _add_help_option() methods.
+ self._populate_option_list(option_list,
+ add_help=add_help_option)
+
+ self._init_parsing_state()
+
+
+ def destroy(self):
+ """
+ Declare that you are done with this OptionParser. This cleans up
+ reference cycles so the OptionParser (and all objects referenced by
+ it) can be garbage-collected promptly. After calling destroy(), the
+ OptionParser is unusable.
+ """
+ OptionContainer.destroy(self)
+ for group in self.option_groups:
+ group.destroy()
+ del self.option_list
+ del self.option_groups
+ del self.formatter
+
+
+ # -- Private methods -----------------------------------------------
+ # (used by our or OptionContainer's constructor)
+
+ def _create_option_list(self):
+ self.option_list = []
+ self.option_groups = []
+ self._create_option_mappings()
+
+ def _add_help_option(self):
+ self.add_option("-h", "--help",
+ action="help",
+ help=_("show this help message and exit"))
+
+ def _add_version_option(self):
+ self.add_option("--version",
+ action="version",
+ help=_("show program's version number and exit"))
+
+ def _populate_option_list(self, option_list, add_help=True):
+ if self.standard_option_list:
+ self.add_options(self.standard_option_list)
+ if option_list:
+ self.add_options(option_list)
+ if self.version:
+ self._add_version_option()
+ if add_help:
+ self._add_help_option()
+
+ def _init_parsing_state(self):
+ # These are set in parse_args() for the convenience of callbacks.
+ self.rargs = None
+ self.largs = None
+ self.values = None
+
+
+ # -- Simple modifier methods ---------------------------------------
+
+ def set_usage(self, usage):
+ if usage is None:
+ self.usage = _("%prog [options]")
+ elif usage is SUPPRESS_USAGE:
+ self.usage = None
+ # For backwards compatibility with Optik 1.3 and earlier.
+ elif string.lower(usage)[:7] == "usage: ":
+ self.usage = usage[7:]
+ else:
+ self.usage = usage
+
+ def enable_interspersed_args(self):
+ self.allow_interspersed_args = True
+
+ def disable_interspersed_args(self):
+ self.allow_interspersed_args = False
+
+ def set_process_default_values(self, process):
+ self.process_default_values = process
+
+ def set_default(self, dest, value):
+ self.defaults[dest] = value
+
+ def set_defaults(self, **kwargs):
+ self.defaults.update(kwargs)
+
+ def _get_all_options(self):
+ options = self.option_list[:]
+ for group in self.option_groups:
+ options.extend(group.option_list)
+ return options
+
+ def get_default_values(self):
+ if not self.process_default_values:
+ # Old, pre-Optik 1.5 behaviour.
+ return Values(self.defaults)
+
+ defaults = self.defaults.copy()
+ for option in self._get_all_options():
+ default = defaults.get(option.dest)
+ if isbasestring(default):
+ opt_str = option.get_opt_string()
+ defaults[option.dest] = option.check_value(opt_str, default)
+
+ return Values(defaults)
+
+
+ # -- OptionGroup methods -------------------------------------------
+
+ def add_option_group(self, *args, **kwargs):
+ # XXX lots of overlap with OptionContainer.add_option()
+ if type(args[0]) is types.StringType:
+ group = apply(OptionGroup, (self,) + args, kwargs)
+ elif len(args) == 1 and not kwargs:
+ group = args[0]
+ if not isinstance(group, OptionGroup):
+ raise TypeError, "not an OptionGroup instance: %r" % group
+ if group.parser is not self:
+ raise ValueError, "invalid OptionGroup (wrong parser)"
+ else:
+ raise TypeError, "invalid arguments"
+
+ self.option_groups.append(group)
+ return group
+
+ def get_option_group(self, opt_str):
+ option = (self._short_opt.get(opt_str) or
+ self._long_opt.get(opt_str))
+ if option and option.container is not self:
+ return option.container
+ return None
+
+
+ # -- Option-parsing methods ----------------------------------------
+
+ def _get_args(self, args):
+ if args is None:
+ return sys.argv[1:]
+ else:
+ return args[:] # don't modify caller's list
+
+ def parse_args(self, args=None, values=None):
+ """
+ parse_args(args : [string] = sys.argv[1:],
+ values : Values = None)
+ -> (values : Values, args : [string])
+
+ Parse the command-line options found in 'args' (default:
+ sys.argv[1:]). Any errors result in a call to 'error()', which
+ by default prints the usage message to stderr and calls
+ sys.exit() with an error message. On success returns a pair
+ (values, args) where 'values' is an Values instance (with all
+ your option values) and 'args' is the list of arguments left
+ over after parsing options.
+ """
+ rargs = self._get_args(args)
+ if values is None:
+ values = self.get_default_values()
+
+ # Store the halves of the argument list as attributes for the
+ # convenience of callbacks:
+ # rargs
+ # the rest of the command-line (the "r" stands for
+ # "remaining" or "right-hand")
+ # largs
+ # the leftover arguments -- ie. what's left after removing
+ # options and their arguments (the "l" stands for "leftover"
+ # or "left-hand")
+ self.rargs = rargs
+ self.largs = largs = []
+ self.values = values
+
+ try:
+ stop = self._process_args(largs, rargs, values)
+ except (BadOptionError, OptionValueError), err:
+ self.error(str(err))
+
+ args = largs + rargs
+ return self.check_values(values, args)
+
+ def check_values(self, values, args):
+ """
+ check_values(values : Values, args : [string])
+ -> (values : Values, args : [string])
+
+ Check that the supplied option values and leftover arguments are
+ valid. Returns the option values and leftover arguments
+ (possibly adjusted, possibly completely new -- whatever you
+ like). Default implementation just returns the passed-in
+ values; subclasses may override as desired.
+ """
+ return (values, args)
+
+ def _process_args(self, largs, rargs, values):
+ """_process_args(largs : [string],
+ rargs : [string],
+ values : Values)
+
+ Process command-line arguments and populate 'values', consuming
+ options and arguments from 'rargs'. If 'allow_interspersed_args' is
+ false, stop at the first non-option argument. If true, accumulate any
+ interspersed non-option arguments in 'largs'.
+ """
+ while rargs:
+ arg = rargs[0]
+ # We handle bare "--" explicitly, and bare "-" is handled by the
+ # standard arg handler since the short arg case ensures that the
+ # len of the opt string is greater than 1.
+ if arg == "--":
+ del rargs[0]
+ return
+ elif arg[0:2] == "--":
+ # process a single long option (possibly with value(s))
+ self._process_long_opt(rargs, values)
+ elif arg[:1] == "-" and len(arg) > 1:
+ # process a cluster of short options (possibly with
+ # value(s) for the last one only)
+ self._process_short_opts(rargs, values)
+ elif self.allow_interspersed_args:
+ largs.append(arg)
+ del rargs[0]
+ else:
+ return # stop now, leave this arg in rargs
+
+ # Say this is the original argument list:
+ # [arg0, arg1, ..., arg(i-1), arg(i), arg(i+1), ..., arg(N-1)]
+ # ^
+ # (we are about to process arg(i)).
+ #
+ # Then rargs is [arg(i), ..., arg(N-1)] and largs is a *subset* of
+ # [arg0, ..., arg(i-1)] (any options and their arguments will have
+ # been removed from largs).
+ #
+ # The while loop will usually consume 1 or more arguments per pass.
+ # If it consumes 1 (eg. arg is an option that takes no arguments),
+ # then after _process_arg() is done the situation is:
+ #
+ # largs = subset of [arg0, ..., arg(i)]
+ # rargs = [arg(i+1), ..., arg(N-1)]
+ #
+ # If allow_interspersed_args is false, largs will always be
+ # *empty* -- still a subset of [arg0, ..., arg(i-1)], but
+ # not a very interesting subset!
+
+ def _match_long_opt(self, opt):
+ """_match_long_opt(opt : string) -> string
+
+ Determine which long option string 'opt' matches, ie. which one
+ it is an unambiguous abbrevation for. Raises BadOptionError if
+ 'opt' doesn't unambiguously match any long option string.
+ """
+ return _match_abbrev(opt, self._long_opt)
+
+ def _process_long_opt(self, rargs, values):
+ arg = rargs.pop(0)
+
+ # Value explicitly attached to arg? Pretend it's the next
+ # argument.
+ if "=" in arg:
+ (opt, next_arg) = string.split(arg, "=", 1)
+ rargs.insert(0, next_arg)
+ had_explicit_value = True
+ else:
+ opt = arg
+ had_explicit_value = False
+
+ opt = self._match_long_opt(opt)
+ option = self._long_opt[opt]
+ if option.takes_value():
+ nargs = option.nargs
+ if len(rargs) < nargs:
+ if nargs == 1:
+ self.error(_("%s option requires an argument") % opt)
+ else:
+ self.error(_("%s option requires %d arguments")
+ % (opt, nargs))
+ elif nargs == 1:
+ value = rargs.pop(0)
+ else:
+ value = tuple(rargs[0:nargs])
+ del rargs[0:nargs]
+
+ elif had_explicit_value:
+ self.error(_("%s option does not take a value") % opt)
+
+ else:
+ value = None
+
+ option.process(opt, value, values, self)
+
+ def _process_short_opts(self, rargs, values):
+ arg = rargs.pop(0)
+ stop = False
+ i = 1
+ for ch in arg[1:]:
+ opt = "-" + ch
+ option = self._short_opt.get(opt)
+ i = i + 1 # we have consumed a character
+
+ if not option:
+ raise BadOptionError(opt)
+ if option.takes_value():
+ # Any characters left in arg? Pretend they're the
+ # next arg, and stop consuming characters of arg.
+ if i < len(arg):
+ rargs.insert(0, arg[i:])
+ stop = True
+
+ nargs = option.nargs
+ if len(rargs) < nargs:
+ if nargs == 1:
+ self.error(_("%s option requires an argument") % opt)
+ else:
+ self.error(_("%s option requires %d arguments")
+ % (opt, nargs))
+ elif nargs == 1:
+ value = rargs.pop(0)
+ else:
+ value = tuple(rargs[0:nargs])
+ del rargs[0:nargs]
+
+ else: # option doesn't take a value
+ value = None
+
+ option.process(opt, value, values, self)
+
+ if stop:
+ break
+
+
+ # -- Feedback methods ----------------------------------------------
+
+ def get_prog_name(self):
+ if self.prog is None:
+ return os.path.basename(sys.argv[0])
+ else:
+ return self.prog
+
+ def expand_prog_name(self, s):
+ return string.replace(s, "%prog", self.get_prog_name())
+
+ def get_description(self):
+ return self.expand_prog_name(self.description)
+
+ def exit(self, status=0, msg=None):
+ if msg:
+ sys.stderr.write(msg)
+ sys.exit(status)
+
+ def error(self, msg):
+ """error(msg : string)
+
+ Print a usage message incorporating 'msg' to stderr and exit.
+ If you override this in a subclass, it should not return -- it
+ should either exit or raise an exception.
+ """
+ self.print_usage(sys.stderr)
+ self.exit(2, "%s: error: %s\n" % (self.get_prog_name(), msg))
+
+ def get_usage(self):
+ if self.usage:
+ return self.formatter.format_usage(
+ self.expand_prog_name(self.usage))
+ else:
+ return ""
+
+ def print_usage(self, file=None):
+ """print_usage(file : file = stdout)
+
+ Print the usage message for the current program (self.usage) to
+ 'file' (default stdout). Any occurence of the string "%prog" in
+ self.usage is replaced with the name of the current program
+ (basename of sys.argv[0]). Does nothing if self.usage is empty
+ or not defined.
+ """
+ if self.usage:
+ file.write(self.get_usage() + '\n')
+
+ def get_version(self):
+ if self.version:
+ return self.expand_prog_name(self.version)
+ else:
+ return ""
+
+ def print_version(self, file=None):
+ """print_version(file : file = stdout)
+
+ Print the version message for this program (self.version) to
+ 'file' (default stdout). As with print_usage(), any occurence
+ of "%prog" in self.version is replaced by the current program's
+ name. Does nothing if self.version is empty or undefined.
+ """
+ if self.version:
+ file.write(self.get_version() + '\n')
+
+ def format_option_help(self, formatter=None):
+ if formatter is None:
+ formatter = self.formatter
+ formatter.store_option_strings(self)
+ result = []
+ result.append(formatter.format_heading(_("Options")))
+ formatter.indent()
+ if self.option_list:
+ result.append(OptionContainer.format_option_help(self, formatter))
+ result.append("\n")
+ for group in self.option_groups:
+ result.append(group.format_help(formatter))
+ result.append("\n")
+ formatter.dedent()
+ # Drop the last "\n", or the header if no options or option groups:
+ return string.join(result[:-1], "")
+
+ def format_epilog(self, formatter):
+ return formatter.format_epilog(self.epilog)
+
+ def format_help(self, formatter=None):
+ if formatter is None:
+ formatter = self.formatter
+ result = []
+ if self.usage:
+ result.append(self.get_usage() + "\n")
+ if self.description:
+ result.append(self.format_description(formatter) + "\n")
+ result.append(self.format_option_help(formatter))
+ result.append(self.format_epilog(formatter))
+ return string.join(result, "")
+
+ # used by test suite
+ def _get_encoding(self, file):
+ encoding = getattr(file, "encoding", None)
+ if not encoding:
+ encoding = sys.getdefaultencoding()
+ return encoding
+
+ def print_help(self, file=None):
+ """print_help(file : file = stdout)
+
+ Print an extended help message, listing all options and any
+ help text provided with them, to 'file' (default stdout).
+ """
+ if file is None:
+ file = sys.stdout
+ encoding = self._get_encoding(file)
+ file.write(encode_wrapper(self.format_help(), encoding, "replace"))
+
+# class OptionParser
+
+
+def _match_abbrev(s, wordmap):
+ """_match_abbrev(s : string, wordmap : {string : Option}) -> string
+
+ Return the string key in 'wordmap' for which 's' is an unambiguous
+ abbreviation. If 's' is found to be ambiguous or doesn't match any of
+ 'words', raise BadOptionError.
+ """
+ # Is there an exact match?
+ if wordmap.has_key(s):
+ return s
+ else:
+ # Isolate all words with s as a prefix.
+ possibilities = filter(lambda w, s=s: w[:len(s)] == s, wordmap.keys())
+ # No exact match, so there had better be just one possibility.
+ if len(possibilities) == 1:
+ return possibilities[0]
+ elif not possibilities:
+ raise BadOptionError(s)
+ else:
+ # More than one possible completion: ambiguous prefix.
+ possibilities.sort()
+ raise AmbiguousOptionError(s, possibilities)
+
+
+# Some day, there might be many Option classes. As of Optik 1.3, the
+# preferred way to instantiate Options is indirectly, via make_option(),
+# which will become a factory function when there are many Option
+# classes.
+make_option = Option
+
+# Local Variables:
+# tab-width:4
+# indent-tabs-mode:nil
+# End:
+# vim: set expandtab tabstop=4 shiftwidth=4:
diff --git a/src/engine/SCons/compat/_scons_platform.py b/src/engine/SCons/compat/_scons_platform.py
new file mode 100644
index 0000000..8b675ea
--- /dev/null
+++ b/src/engine/SCons/compat/_scons_platform.py
@@ -0,0 +1,237 @@
+#
+# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
+#
+# Permission is hereby granted, free of charge, to any person obtaining
+# a copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sublicense, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
+# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
+# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+#
+
+__doc__ = """
+platform backwards-compatibility module for older (pre-2.3) Python versions
+
+This does not not NOT (repeat, *NOT*) provide complete platform
+functionality. It only wraps the portions of platform functionality used
+by SCons.
+"""
+
+__revision__ = "src/engine/SCons/compat/_scons_platform.py 4577 2009/12/27 19:44:43 scons"
+
+### Portable uname() interface
+
+_uname_cache = None
+
+def uname():
+
+ """ Fairly portable uname interface. Returns a tuple
+ of strings (system,node,release,version,machine,processor)
+ identifying the underlying platform.
+
+ Note that unlike the os.uname function this also returns
+ possible processor information as an additional tuple entry.
+
+ Entries which cannot be determined are set to ''.
+
+ """
+ global _uname_cache
+ no_os_uname = 0
+
+ if _uname_cache is not None:
+ return _uname_cache
+
+ processor = ''
+
+ # Get some infos from the builtin os.uname API...
+ try:
+ system,node,release,version,machine = os.uname()
+ except AttributeError:
+ no_os_uname = 1
+
+ if no_os_uname or not filter(None, (system, node, release, version, machine)):
+ # Hmm, no there is either no uname or uname has returned
+ #'unknowns'... we'll have to poke around the system then.
+ if no_os_uname:
+ system = sys.platform
+ release = ''
+ version = ''
+ node = _node()
+ machine = ''
+
+ use_syscmd_ver = 01
+
+ # Try win32_ver() on win32 platforms
+ if system == 'win32':
+ release,version,csd,ptype = win32_ver()
+ if release and version:
+ use_syscmd_ver = 0
+ # Try to use the PROCESSOR_* environment variables
+ # available on Win XP and later; see
+ # http://support.microsoft.com/kb/888731 and
+ # http://www.geocities.com/rick_lively/MANUALS/ENV/MSWIN/PROCESSI.HTM
+ if not machine:
+ machine = os.environ.get('PROCESSOR_ARCHITECTURE', '')
+ if not processor:
+ processor = os.environ.get('PROCESSOR_IDENTIFIER', machine)
+
+ # Try the 'ver' system command available on some
+ # platforms
+ if use_syscmd_ver:
+ system,release,version = _syscmd_ver(system)
+ # Normalize system to what win32_ver() normally returns
+ # (_syscmd_ver() tends to return the vendor name as well)
+ if system == 'Microsoft Windows':
+ system = 'Windows'
+ elif system == 'Microsoft' and release == 'Windows':
+ # Under Windows Vista and Windows Server 2008,
+ # Microsoft changed the output of the ver command. The
+ # release is no longer printed. This causes the
+ # system and release to be misidentified.
+ system = 'Windows'
+ if '6.0' == version[:3]:
+ release = 'Vista'
+ else:
+ release = ''
+
+ # In case we still don't know anything useful, we'll try to
+ # help ourselves
+ if system in ('win32','win16'):
+ if not version:
+ if system == 'win32':
+ version = '32bit'
+ else:
+ version = '16bit'
+ system = 'Windows'
+
+ elif system[:4] == 'java':
+ release,vendor,vminfo,osinfo = java_ver()
+ system = 'Java'
+ version = string.join(vminfo,', ')
+ if not version:
+ version = vendor
+
+ elif os.name == 'mac':
+ release,(version,stage,nonrel),machine = mac_ver()
+ system = 'MacOS'
+
+ # System specific extensions
+ if system == 'OpenVMS':
+ # OpenVMS seems to have release and version mixed up
+ if not release or release == '0':
+ release = version
+ version = ''
+ # Get processor information
+ try:
+ import vms_lib
+ except ImportError:
+ pass
+ else:
+ csid, cpu_number = vms_lib.getsyi('SYI$_CPU',0)
+ if (cpu_number >= 128):
+ processor = 'Alpha'
+ else:
+ processor = 'VAX'
+ if not processor:
+ # Get processor information from the uname system command
+ processor = _syscmd_uname('-p','')
+
+ #If any unknowns still exist, replace them with ''s, which are more portable
+ if system == 'unknown':
+ system = ''
+ if node == 'unknown':
+ node = ''
+ if release == 'unknown':
+ release = ''
+ if version == 'unknown':
+ version = ''
+ if machine == 'unknown':
+ machine = ''
+ if processor == 'unknown':
+ processor = ''
+
+ # normalize name
+ if system == 'Microsoft' and release == 'Windows':
+ system = 'Windows'
+ release = 'Vista'
+
+ _uname_cache = system,node,release,version,machine,processor
+ return _uname_cache
+
+### Direct interfaces to some of the uname() return values
+
+def system():
+
+ """ Returns the system/OS name, e.g. 'Linux', 'Windows' or 'Java'.
+
+ An empty string is returned if the value cannot be determined.
+
+ """
+ return uname()[0]
+
+def node():
+
+ """ Returns the computer's network name (which may not be fully
+ qualified)
+
+ An empty string is returned if the value cannot be determined.
+
+ """
+ return uname()[1]
+
+def release():
+
+ """ Returns the system's release, e.g. '2.2.0' or 'NT'
+
+ An empty string is returned if the value cannot be determined.
+
+ """
+ return uname()[2]
+
+def version():
+
+ """ Returns the system's release version, e.g. '#3 on degas'
+
+ An empty string is returned if the value cannot be determined.
+
+ """
+ return uname()[3]
+
+def machine():
+
+ """ Returns the machine type, e.g. 'i386'
+
+ An empty string is returned if the value cannot be determined.
+
+ """
+ return uname()[4]
+
+def processor():
+
+ """ Returns the (true) processor name, e.g. 'amdk6'
+
+ An empty string is returned if the value cannot be
+ determined. Note that many platforms do not provide this
+ information or simply return the same value as for machine(),
+ e.g. NetBSD does this.
+
+ """
+ return uname()[5]
+
+# Local Variables:
+# tab-width:4
+# indent-tabs-mode:nil
+# End:
+# vim: set expandtab tabstop=4 shiftwidth=4:
diff --git a/src/engine/SCons/compat/_scons_sets.py b/src/engine/SCons/compat/_scons_sets.py
new file mode 100644
index 0000000..12dbead
--- /dev/null
+++ b/src/engine/SCons/compat/_scons_sets.py
@@ -0,0 +1,583 @@
+"""Classes to represent arbitrary sets (including sets of sets).
+
+This module implements sets using dictionaries whose values are
+ignored. The usual operations (union, intersection, deletion, etc.)
+are provided as both methods and operators.
+
+Important: sets are not sequences! While they support 'x in s',
+'len(s)', and 'for x in s', none of those operations are unique for
+sequences; for example, mappings support all three as well. The
+characteristic operation for sequences is subscripting with small
+integers: s[i], for i in range(len(s)). Sets don't support
+subscripting at all. Also, sequences allow multiple occurrences and
+their elements have a definite order; sets on the other hand don't
+record multiple occurrences and don't remember the order of element
+insertion (which is why they don't support s[i]).
+
+The following classes are provided:
+
+BaseSet -- All the operations common to both mutable and immutable
+ sets. This is an abstract class, not meant to be directly
+ instantiated.
+
+Set -- Mutable sets, subclass of BaseSet; not hashable.
+
+ImmutableSet -- Immutable sets, subclass of BaseSet; hashable.
+ An iterable argument is mandatory to create an ImmutableSet.
+
+_TemporarilyImmutableSet -- A wrapper around a Set, hashable,
+ giving the same hash value as the immutable set equivalent
+ would have. Do not use this class directly.
+
+Only hashable objects can be added to a Set. In particular, you cannot
+really add a Set as an element to another Set; if you try, what is
+actually added is an ImmutableSet built from it (it compares equal to
+the one you tried adding).
+
+When you ask if `x in y' where x is a Set and y is a Set or
+ImmutableSet, x is wrapped into a _TemporarilyImmutableSet z, and
+what's tested is actually `z in y'.
+
+"""
+
+# Code history:
+#
+# - Greg V. Wilson wrote the first version, using a different approach
+# to the mutable/immutable problem, and inheriting from dict.
+#
+# - Alex Martelli modified Greg's version to implement the current
+# Set/ImmutableSet approach, and make the data an attribute.
+#
+# - Guido van Rossum rewrote much of the code, made some API changes,
+# and cleaned up the docstrings.
+#
+# - Raymond Hettinger added a number of speedups and other
+# improvements.
+
+from __future__ import generators
+try:
+ from itertools import ifilter, ifilterfalse
+except ImportError:
+ # Code to make the module run under Py2.2
+ def ifilter(predicate, iterable):
+ if predicate is None:
+ def predicate(x):
+ return x
+ for x in iterable:
+ if predicate(x):
+ yield x
+ def ifilterfalse(predicate, iterable):
+ if predicate is None:
+ def predicate(x):
+ return x
+ for x in iterable:
+ if not predicate(x):
+ yield x
+ try:
+ True, False
+ except NameError:
+ True, False = (0==0, 0!=0)
+
+__all__ = ['BaseSet', 'Set', 'ImmutableSet']
+
+class BaseSet(object):
+ """Common base class for mutable and immutable sets."""
+
+ __slots__ = ['_data']
+
+ # Constructor
+
+ def __init__(self):
+ """This is an abstract class."""
+ # Don't call this from a concrete subclass!
+ if self.__class__ is BaseSet:
+ raise TypeError, ("BaseSet is an abstract class. "
+ "Use Set or ImmutableSet.")
+
+ # Standard protocols: __len__, __repr__, __str__, __iter__
+
+ def __len__(self):
+ """Return the number of elements of a set."""
+ return len(self._data)
+
+ def __repr__(self):
+ """Return string representation of a set.
+
+ This looks like 'Set([<list of elements>])'.
+ """
+ return self._repr()
+
+ # __str__ is the same as __repr__
+ __str__ = __repr__
+
+ def _repr(self, sorted=False):
+ elements = self._data.keys()
+ if sorted:
+ elements.sort()
+ return '%s(%r)' % (self.__class__.__name__, elements)
+
+ def __iter__(self):
+ """Return an iterator over the elements or a set.
+
+ This is the keys iterator for the underlying dict.
+ """
+ return self._data.iterkeys()
+
+ # Three-way comparison is not supported. However, because __eq__ is
+ # tried before __cmp__, if Set x == Set y, x.__eq__(y) returns True and
+ # then cmp(x, y) returns 0 (Python doesn't actually call __cmp__ in this
+ # case).
+
+ def __cmp__(self, other):
+ raise TypeError, "can't compare sets using cmp()"
+
+ # Equality comparisons using the underlying dicts. Mixed-type comparisons
+ # are allowed here, where Set == z for non-Set z always returns False,
+ # and Set != z always True. This allows expressions like "x in y" to
+ # give the expected result when y is a sequence of mixed types, not
+ # raising a pointless TypeError just because y contains a Set, or x is
+ # a Set and y contain's a non-set ("in" invokes only __eq__).
+ # Subtle: it would be nicer if __eq__ and __ne__ could return
+ # NotImplemented instead of True or False. Then the other comparand
+ # would get a chance to determine the result, and if the other comparand
+ # also returned NotImplemented then it would fall back to object address
+ # comparison (which would always return False for __eq__ and always
+ # True for __ne__). However, that doesn't work, because this type
+ # *also* implements __cmp__: if, e.g., __eq__ returns NotImplemented,
+ # Python tries __cmp__ next, and the __cmp__ here then raises TypeError.
+
+ def __eq__(self, other):
+ if isinstance(other, BaseSet):
+ return self._data == other._data
+ else:
+ return False
+
+ def __ne__(self, other):
+ if isinstance(other, BaseSet):
+ return self._data != other._data
+ else:
+ return True
+
+ # Copying operations
+
+ def copy(self):
+ """Return a shallow copy of a set."""
+ result = self.__class__()
+ result._data.update(self._data)
+ return result
+
+ __copy__ = copy # For the copy module
+
+ def __deepcopy__(self, memo):
+ """Return a deep copy of a set; used by copy module."""
+ # This pre-creates the result and inserts it in the memo
+ # early, in case the deep copy recurses into another reference
+ # to this same set. A set can't be an element of itself, but
+ # it can certainly contain an object that has a reference to
+ # itself.
+ from copy import deepcopy
+ result = self.__class__()
+ memo[id(self)] = result
+ data = result._data
+ value = True
+ for elt in self:
+ data[deepcopy(elt, memo)] = value
+ return result
+
+ # Standard set operations: union, intersection, both differences.
+ # Each has an operator version (e.g. __or__, invoked with |) and a
+ # method version (e.g. union).
+ # Subtle: Each pair requires distinct code so that the outcome is
+ # correct when the type of other isn't suitable. For example, if
+ # we did "union = __or__" instead, then Set().union(3) would return
+ # NotImplemented instead of raising TypeError (albeit that *why* it
+ # raises TypeError as-is is also a bit subtle).
+
+ def __or__(self, other):
+ """Return the union of two sets as a new set.
+
+ (I.e. all elements that are in either set.)
+ """
+ if not isinstance(other, BaseSet):
+ return NotImplemented
+ return self.union(other)
+
+ def union(self, other):
+ """Return the union of two sets as a new set.
+
+ (I.e. all elements that are in either set.)
+ """
+ result = self.__class__(self)
+ result._update(other)
+ return result
+
+ def __and__(self, other):
+ """Return the intersection of two sets as a new set.
+
+ (I.e. all elements that are in both sets.)
+ """
+ if not isinstance(other, BaseSet):
+ return NotImplemented
+ return self.intersection(other)
+
+ def intersection(self, other):
+ """Return the intersection of two sets as a new set.
+
+ (I.e. all elements that are in both sets.)
+ """
+ if not isinstance(other, BaseSet):
+ other = Set(other)
+ if len(self) <= len(other):
+ little, big = self, other
+ else:
+ little, big = other, self
+ common = ifilter(big._data.has_key, little)
+ return self.__class__(common)
+
+ def __xor__(self, other):
+ """Return the symmetric difference of two sets as a new set.
+
+ (I.e. all elements that are in exactly one of the sets.)
+ """
+ if not isinstance(other, BaseSet):
+ return NotImplemented
+ return self.symmetric_difference(other)
+
+ def symmetric_difference(self, other):
+ """Return the symmetric difference of two sets as a new set.
+
+ (I.e. all elements that are in exactly one of the sets.)
+ """
+ result = self.__class__()
+ data = result._data
+ value = True
+ selfdata = self._data
+ try:
+ otherdata = other._data
+ except AttributeError:
+ otherdata = Set(other)._data
+ for elt in ifilterfalse(otherdata.has_key, selfdata):
+ data[elt] = value
+ for elt in ifilterfalse(selfdata.has_key, otherdata):
+ data[elt] = value
+ return result
+
+ def __sub__(self, other):
+ """Return the difference of two sets as a new Set.
+
+ (I.e. all elements that are in this set and not in the other.)
+ """
+ if not isinstance(other, BaseSet):
+ return NotImplemented
+ return self.difference(other)
+
+ def difference(self, other):
+ """Return the difference of two sets as a new Set.
+
+ (I.e. all elements that are in this set and not in the other.)
+ """
+ result = self.__class__()
+ data = result._data
+ try:
+ otherdata = other._data
+ except AttributeError:
+ otherdata = Set(other)._data
+ value = True
+ for elt in ifilterfalse(otherdata.has_key, self):
+ data[elt] = value
+ return result
+
+ # Membership test
+
+ def __contains__(self, element):
+ """Report whether an element is a member of a set.
+
+ (Called in response to the expression `element in self'.)
+ """
+ try:
+ return element in self._data
+ except TypeError:
+ transform = getattr(element, "__as_temporarily_immutable__", None)
+ if transform is None:
+ raise # re-raise the TypeError exception we caught
+ return transform() in self._data
+
+ # Subset and superset test
+
+ def issubset(self, other):
+ """Report whether another set contains this set."""
+ self._binary_sanity_check(other)
+ if len(self) > len(other): # Fast check for obvious cases
+ return False
+ for elt in ifilterfalse(other._data.has_key, self):
+ return False
+ return True
+
+ def issuperset(self, other):
+ """Report whether this set contains another set."""
+ self._binary_sanity_check(other)
+ if len(self) < len(other): # Fast check for obvious cases
+ return False
+ for elt in ifilterfalse(self._data.has_key, other):
+ return False
+ return True
+
+ # Inequality comparisons using the is-subset relation.
+ __le__ = issubset
+ __ge__ = issuperset
+
+ def __lt__(self, other):
+ self._binary_sanity_check(other)
+ return len(self) < len(other) and self.issubset(other)
+
+ def __gt__(self, other):
+ self._binary_sanity_check(other)
+ return len(self) > len(other) and self.issuperset(other)
+
+ # Assorted helpers
+
+ def _binary_sanity_check(self, other):
+ # Check that the other argument to a binary operation is also
+ # a set, raising a TypeError otherwise.
+ if not isinstance(other, BaseSet):
+ raise TypeError, "Binary operation only permitted between sets"
+
+ def _compute_hash(self):
+ # Calculate hash code for a set by xor'ing the hash codes of
+ # the elements. This ensures that the hash code does not depend
+ # on the order in which elements are added to the set. This is
+ # not called __hash__ because a BaseSet should not be hashable;
+ # only an ImmutableSet is hashable.
+ result = 0
+ for elt in self:
+ result ^= hash(elt)
+ return result
+
+ def _update(self, iterable):
+ # The main loop for update() and the subclass __init__() methods.
+ data = self._data
+
+ # Use the fast update() method when a dictionary is available.
+ if isinstance(iterable, BaseSet):
+ data.update(iterable._data)
+ return
+
+ value = True
+
+ if type(iterable) in (list, tuple, xrange):
+ # Optimized: we know that __iter__() and next() can't
+ # raise TypeError, so we can move 'try:' out of the loop.
+ it = iter(iterable)
+ while True:
+ try:
+ for element in it:
+ data[element] = value
+ return
+ except TypeError:
+ transform = getattr(element, "__as_immutable__", None)
+ if transform is None:
+ raise # re-raise the TypeError exception we caught
+ data[transform()] = value
+ else:
+ # Safe: only catch TypeError where intended
+ for element in iterable:
+ try:
+ data[element] = value
+ except TypeError:
+ transform = getattr(element, "__as_immutable__", None)
+ if transform is None:
+ raise # re-raise the TypeError exception we caught
+ data[transform()] = value
+
+
+class ImmutableSet(BaseSet):
+ """Immutable set class."""
+
+ __slots__ = ['_hashcode']
+
+ # BaseSet + hashing
+
+ def __init__(self, iterable=None):
+ """Construct an immutable set from an optional iterable."""
+ self._hashcode = None
+ self._data = {}
+ if iterable is not None:
+ self._update(iterable)
+
+ def __hash__(self):
+ if self._hashcode is None:
+ self._hashcode = self._compute_hash()
+ return self._hashcode
+
+ def __getstate__(self):
+ return self._data, self._hashcode
+
+ def __setstate__(self, state):
+ self._data, self._hashcode = state
+
+class Set(BaseSet):
+ """ Mutable set class."""
+
+ __slots__ = []
+
+ # BaseSet + operations requiring mutability; no hashing
+
+ def __init__(self, iterable=None):
+ """Construct a set from an optional iterable."""
+ self._data = {}
+ if iterable is not None:
+ self._update(iterable)
+
+ def __getstate__(self):
+ # getstate's results are ignored if it is not
+ return self._data,
+
+ def __setstate__(self, data):
+ self._data, = data
+
+ def __hash__(self):
+ """A Set cannot be hashed."""
+ # We inherit object.__hash__, so we must deny this explicitly
+ raise TypeError, "Can't hash a Set, only an ImmutableSet."
+
+ # In-place union, intersection, differences.
+ # Subtle: The xyz_update() functions deliberately return None,
+ # as do all mutating operations on built-in container types.
+ # The __xyz__ spellings have to return self, though.
+
+ def __ior__(self, other):
+ """Update a set with the union of itself and another."""
+ self._binary_sanity_check(other)
+ self._data.update(other._data)
+ return self
+
+ def union_update(self, other):
+ """Update a set with the union of itself and another."""
+ self._update(other)
+
+ def __iand__(self, other):
+ """Update a set with the intersection of itself and another."""
+ self._binary_sanity_check(other)
+ self._data = (self & other)._data
+ return self
+
+ def intersection_update(self, other):
+ """Update a set with the intersection of itself and another."""
+ if isinstance(other, BaseSet):
+ self &= other
+ else:
+ self._data = (self.intersection(other))._data
+
+ def __ixor__(self, other):
+ """Update a set with the symmetric difference of itself and another."""
+ self._binary_sanity_check(other)
+ self.symmetric_difference_update(other)
+ return self
+
+ def symmetric_difference_update(self, other):
+ """Update a set with the symmetric difference of itself and another."""
+ data = self._data
+ value = True
+ if not isinstance(other, BaseSet):
+ other = Set(other)
+ if self is other:
+ self.clear()
+ for elt in other:
+ if elt in data:
+ del data[elt]
+ else:
+ data[elt] = value
+
+ def __isub__(self, other):
+ """Remove all elements of another set from this set."""
+ self._binary_sanity_check(other)
+ self.difference_update(other)
+ return self
+
+ def difference_update(self, other):
+ """Remove all elements of another set from this set."""
+ data = self._data
+ if not isinstance(other, BaseSet):
+ other = Set(other)
+ if self is other:
+ self.clear()
+ for elt in ifilter(data.has_key, other):
+ del data[elt]
+
+ # Python dict-like mass mutations: update, clear
+
+ def update(self, iterable):
+ """Add all values from an iterable (such as a list or file)."""
+ self._update(iterable)
+
+ def clear(self):
+ """Remove all elements from this set."""
+ self._data.clear()
+
+ # Single-element mutations: add, remove, discard
+
+ def add(self, element):
+ """Add an element to a set.
+
+ This has no effect if the element is already present.
+ """
+ try:
+ self._data[element] = True
+ except TypeError:
+ transform = getattr(element, "__as_immutable__", None)
+ if transform is None:
+ raise # re-raise the TypeError exception we caught
+ self._data[transform()] = True
+
+ def remove(self, element):
+ """Remove an element from a set; it must be a member.
+
+ If the element is not a member, raise a KeyError.
+ """
+ try:
+ del self._data[element]
+ except TypeError:
+ transform = getattr(element, "__as_temporarily_immutable__", None)
+ if transform is None:
+ raise # re-raise the TypeError exception we caught
+ del self._data[transform()]
+
+ def discard(self, element):
+ """Remove an element from a set if it is a member.
+
+ If the element is not a member, do nothing.
+ """
+ try:
+ self.remove(element)
+ except KeyError:
+ pass
+
+ def pop(self):
+ """Remove and return an arbitrary set element."""
+ return self._data.popitem()[0]
+
+ def __as_immutable__(self):
+ # Return a copy of self as an immutable set
+ return ImmutableSet(self)
+
+ def __as_temporarily_immutable__(self):
+ # Return self wrapped in a temporarily immutable set
+ return _TemporarilyImmutableSet(self)
+
+
+class _TemporarilyImmutableSet(BaseSet):
+ # Wrap a mutable set as if it was temporarily immutable.
+ # This only supplies hashing and equality comparisons.
+
+ def __init__(self, set):
+ self._set = set
+ self._data = set._data # Needed by ImmutableSet.__eq__()
+
+ def __hash__(self):
+ return self._set._compute_hash()
+
+# Local Variables:
+# tab-width:4
+# indent-tabs-mode:nil
+# End:
+# vim: set expandtab tabstop=4 shiftwidth=4:
diff --git a/src/engine/SCons/compat/_scons_sets15.py b/src/engine/SCons/compat/_scons_sets15.py
new file mode 100644
index 0000000..bafa009
--- /dev/null
+++ b/src/engine/SCons/compat/_scons_sets15.py
@@ -0,0 +1,176 @@
+#
+# A Set class that works all the way back to Python 1.5. From:
+#
+# Python Cookbook: Yet another Set class for Python
+# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/106469
+# Goncalo Rodriques
+#
+# This is a pure Pythonic implementation of a set class. The syntax
+# and methods implemented are, for the most part, borrowed from
+# PEP 218 by Greg Wilson.
+#
+# Note that this class violates the formal definition of a set() by adding
+# a __getitem__() method so we can iterate over a set's elements under
+# Python 1.5 and 2.1, which don't support __iter__() and iterator types.
+#
+
+import string
+
+class Set:
+ """The set class. It can contain mutable objects."""
+
+ def __init__(self, seq = None):
+ """The constructor. It can take any object giving an iterator as an optional
+ argument to populate the new set."""
+ self.elems = []
+ if seq:
+ for elem in seq:
+ if elem not in self.elems:
+ hash(elem)
+ self.elems.append(elem)
+
+ def __str__(self):
+ return "set([%s])" % string.join(map(str, self.elems), ", ")
+
+
+ def copy(self):
+ """Shallow copy of a set object."""
+ return Set(self.elems)
+
+ def __contains__(self, elem):
+ return elem in self.elems
+
+ def __len__(self):
+ return len(self.elems)
+
+ def __getitem__(self, index):
+ # Added so that Python 1.5 can iterate over the elements.
+ # The cookbook recipe's author didn't like this because there
+ # really isn't any order in a set object, but this is necessary
+ # to make the class work well enough for our purposes.
+ return self.elems[index]
+
+ def items(self):
+ """Returns a list of the elements in the set."""
+ return self.elems
+
+ def add(self, elem):
+ """Add one element to the set."""
+ if elem not in self.elems:
+ hash(elem)
+ self.elems.append(elem)
+
+ def remove(self, elem):
+ """Remove an element from the set. Return an error if elem is not in the set."""
+ try:
+ self.elems.remove(elem)
+ except ValueError:
+ raise LookupError, "Object %s is not a member of the set." % str(elem)
+
+ def discard(self, elem):
+ """Remove an element from the set. Do nothing if elem is not in the set."""
+ try:
+ self.elems.remove(elem)
+ except ValueError:
+ pass
+
+ def sort(self, func=cmp):
+ self.elems.sort(func)
+
+ #Define an iterator for a set.
+ def __iter__(self):
+ return iter(self.elems)
+
+ #The basic binary operations with sets.
+ def __or__(self, other):
+ """Union of two sets."""
+ ret = self.copy()
+ for elem in other.elems:
+ if elem not in ret:
+ ret.elems.append(elem)
+ return ret
+
+ def __sub__(self, other):
+ """Difference of two sets."""
+ ret = self.copy()
+ for elem in other.elems:
+ ret.discard(elem)
+ return ret
+
+ def __and__(self, other):
+ """Intersection of two sets."""
+ ret = Set()
+ for elem in self.elems:
+ if elem in other.elems:
+ ret.elems.append(elem)
+ return ret
+
+ def __add__(self, other):
+ """Symmetric difference of two sets."""
+ ret = Set()
+ temp = other.copy()
+ for elem in self.elems:
+ if elem in temp.elems:
+ temp.elems.remove(elem)
+ else:
+ ret.elems.append(elem)
+ #Add remaining elements.
+ for elem in temp.elems:
+ ret.elems.append(elem)
+ return ret
+
+ def __mul__(self, other):
+ """Cartesian product of two sets."""
+ ret = Set()
+ for elemself in self.elems:
+ x = map(lambda other, s=elemself: (s, other), other.elems)
+ ret.elems.extend(x)
+ return ret
+
+ #Some of the binary comparisons.
+ def __lt__(self, other):
+ """Returns 1 if the lhs set is contained but not equal to the rhs set."""
+ if len(self.elems) < len(other.elems):
+ temp = other.copy()
+ for elem in self.elems:
+ if elem in temp.elems:
+ temp.remove(elem)
+ else:
+ return 0
+ return len(temp.elems) == 0
+ else:
+ return 0
+
+ def __le__(self, other):
+ """Returns 1 if the lhs set is contained in the rhs set."""
+ if len(self.elems) <= len(other.elems):
+ ret = 1
+ for elem in self.elems:
+ if elem not in other.elems:
+ ret = 0
+ break
+ return ret
+ else:
+ return 0
+
+ def __eq__(self, other):
+ """Returns 1 if the sets are equal."""
+ if len(self.elems) != len(other.elems):
+ return 0
+ else:
+ return len(self - other) == 0
+
+ def __cmp__(self, other):
+ """Returns 1 if the sets are equal."""
+ if self.__lt__(other):
+ return -1
+ elif other.__lt__(self):
+ return 1
+ else:
+ return 0
+
+# Local Variables:
+# tab-width:4
+# indent-tabs-mode:nil
+# End:
+# vim: set expandtab tabstop=4 shiftwidth=4:
diff --git a/src/engine/SCons/compat/_scons_shlex.py b/src/engine/SCons/compat/_scons_shlex.py
new file mode 100644
index 0000000..9e30a01
--- /dev/null
+++ b/src/engine/SCons/compat/_scons_shlex.py
@@ -0,0 +1,325 @@
+# -*- coding: iso-8859-1 -*-
+"""A lexical analyzer class for simple shell-like syntaxes."""
+
+# Module and documentation by Eric S. Raymond, 21 Dec 1998
+# Input stacking and error message cleanup added by ESR, March 2000
+# push_source() and pop_source() made explicit by ESR, January 2001.
+# Posix compliance, split(), string arguments, and
+# iterator interface by Gustavo Niemeyer, April 2003.
+
+import os.path
+import sys
+#from collections import deque
+
+class deque:
+ def __init__(self):
+ self.data = []
+ def __len__(self):
+ return len(self.data)
+ def appendleft(self, item):
+ self.data.insert(0, item)
+ def popleft(self):
+ return self.data.pop(0)
+
+try:
+ basestring
+except NameError:
+ import types
+ def is_basestring(s):
+ return type(s) is types.StringType
+else:
+ def is_basestring(s):
+ return isinstance(s, basestring)
+
+try:
+ from cStringIO import StringIO
+except ImportError:
+ from StringIO import StringIO
+
+__all__ = ["shlex", "split"]
+
+class shlex:
+ "A lexical analyzer class for simple shell-like syntaxes."
+ def __init__(self, instream=None, infile=None, posix=False):
+ if is_basestring(instream):
+ instream = StringIO(instream)
+ if instream is not None:
+ self.instream = instream
+ self.infile = infile
+ else:
+ self.instream = sys.stdin
+ self.infile = None
+ self.posix = posix
+ if posix:
+ self.eof = None
+ else:
+ self.eof = ''
+ self.commenters = '#'
+ self.wordchars = ('abcdfeghijklmnopqrstuvwxyz'
+ 'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_')
+ if self.posix:
+ self.wordchars = self.wordchars + ('ßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ'
+ 'ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞ')
+ self.whitespace = ' \t\r\n'
+ self.whitespace_split = False
+ self.quotes = '\'"'
+ self.escape = '\\'
+ self.escapedquotes = '"'
+ self.state = ' '
+ self.pushback = deque()
+ self.lineno = 1
+ self.debug = 0
+ self.token = ''
+ self.filestack = deque()
+ self.source = None
+ if self.debug:
+ print 'shlex: reading from %s, line %d' \
+ % (self.instream, self.lineno)
+
+ def push_token(self, tok):
+ "Push a token onto the stack popped by the get_token method"
+ if self.debug >= 1:
+ print "shlex: pushing token " + repr(tok)
+ self.pushback.appendleft(tok)
+
+ def push_source(self, newstream, newfile=None):
+ "Push an input source onto the lexer's input source stack."
+ if is_basestring(newstream):
+ newstream = StringIO(newstream)
+ self.filestack.appendleft((self.infile, self.instream, self.lineno))
+ self.infile = newfile
+ self.instream = newstream
+ self.lineno = 1
+ if self.debug:
+ if newfile is not None:
+ print 'shlex: pushing to file %s' % (self.infile,)
+ else:
+ print 'shlex: pushing to stream %s' % (self.instream,)
+
+ def pop_source(self):
+ "Pop the input source stack."
+ self.instream.close()
+ (self.infile, self.instream, self.lineno) = self.filestack.popleft()
+ if self.debug:
+ print 'shlex: popping to %s, line %d' \
+ % (self.instream, self.lineno)
+ self.state = ' '
+
+ def get_token(self):
+ "Get a token from the input stream (or from stack if it's nonempty)"
+ if self.pushback:
+ tok = self.pushback.popleft()
+ if self.debug >= 1:
+ print "shlex: popping token " + repr(tok)
+ return tok
+ # No pushback. Get a token.
+ raw = self.read_token()
+ # Handle inclusions
+ if self.source is not None:
+ while raw == self.source:
+ spec = self.sourcehook(self.read_token())
+ if spec:
+ (newfile, newstream) = spec
+ self.push_source(newstream, newfile)
+ raw = self.get_token()
+ # Maybe we got EOF instead?
+ while raw == self.eof:
+ if not self.filestack:
+ return self.eof
+ else:
+ self.pop_source()
+ raw = self.get_token()
+ # Neither inclusion nor EOF
+ if self.debug >= 1:
+ if raw != self.eof:
+ print "shlex: token=" + repr(raw)
+ else:
+ print "shlex: token=EOF"
+ return raw
+
+ def read_token(self):
+ quoted = False
+ escapedstate = ' '
+ while True:
+ nextchar = self.instream.read(1)
+ if nextchar == '\n':
+ self.lineno = self.lineno + 1
+ if self.debug >= 3:
+ print "shlex: in state", repr(self.state), \
+ "I see character:", repr(nextchar)
+ if self.state is None:
+ self.token = '' # past end of file
+ break
+ elif self.state == ' ':
+ if not nextchar:
+ self.state = None # end of file
+ break
+ elif nextchar in self.whitespace:
+ if self.debug >= 2:
+ print "shlex: I see whitespace in whitespace state"
+ if self.token or (self.posix and quoted):
+ break # emit current token
+ else:
+ continue
+ elif nextchar in self.commenters:
+ self.instream.readline()
+ self.lineno = self.lineno + 1
+ elif self.posix and nextchar in self.escape:
+ escapedstate = 'a'
+ self.state = nextchar
+ elif nextchar in self.wordchars:
+ self.token = nextchar
+ self.state = 'a'
+ elif nextchar in self.quotes:
+ if not self.posix:
+ self.token = nextchar
+ self.state = nextchar
+ elif self.whitespace_split:
+ self.token = nextchar
+ self.state = 'a'
+ else:
+ self.token = nextchar
+ if self.token or (self.posix and quoted):
+ break # emit current token
+ else:
+ continue
+ elif self.state in self.quotes:
+ quoted = True
+ if not nextchar: # end of file
+ if self.debug >= 2:
+ print "shlex: I see EOF in quotes state"
+ # XXX what error should be raised here?
+ raise ValueError, "No closing quotation"
+ if nextchar == self.state:
+ if not self.posix:
+ self.token = self.token + nextchar
+ self.state = ' '
+ break
+ else:
+ self.state = 'a'
+ elif self.posix and nextchar in self.escape and \
+ self.state in self.escapedquotes:
+ escapedstate = self.state
+ self.state = nextchar
+ else:
+ self.token = self.token + nextchar
+ elif self.state in self.escape:
+ if not nextchar: # end of file
+ if self.debug >= 2:
+ print "shlex: I see EOF in escape state"
+ # XXX what error should be raised here?
+ raise ValueError, "No escaped character"
+ # In posix shells, only the quote itself or the escape
+ # character may be escaped within quotes.
+ if escapedstate in self.quotes and \
+ nextchar != self.state and nextchar != escapedstate:
+ self.token = self.token + self.state
+ self.token = self.token + nextchar
+ self.state = escapedstate
+ elif self.state == 'a':
+ if not nextchar:
+ self.state = None # end of file
+ break
+ elif nextchar in self.whitespace:
+ if self.debug >= 2:
+ print "shlex: I see whitespace in word state"
+ self.state = ' '
+ if self.token or (self.posix and quoted):
+ break # emit current token
+ else:
+ continue
+ elif nextchar in self.commenters:
+ self.instream.readline()
+ self.lineno = self.lineno + 1
+ if self.posix:
+ self.state = ' '
+ if self.token or (self.posix and quoted):
+ break # emit current token
+ else:
+ continue
+ elif self.posix and nextchar in self.quotes:
+ self.state = nextchar
+ elif self.posix and nextchar in self.escape:
+ escapedstate = 'a'
+ self.state = nextchar
+ elif nextchar in self.wordchars or nextchar in self.quotes \
+ or self.whitespace_split:
+ self.token = self.token + nextchar
+ else:
+ self.pushback.appendleft(nextchar)
+ if self.debug >= 2:
+ print "shlex: I see punctuation in word state"
+ self.state = ' '
+ if self.token:
+ break # emit current token
+ else:
+ continue
+ result = self.token
+ self.token = ''
+ if self.posix and not quoted and result == '':
+ result = None
+ if self.debug > 1:
+ if result:
+ print "shlex: raw token=" + repr(result)
+ else:
+ print "shlex: raw token=EOF"
+ return result
+
+ def sourcehook(self, newfile):
+ "Hook called on a filename to be sourced."
+ if newfile[0] == '"':
+ newfile = newfile[1:-1]
+ # This implements cpp-like semantics for relative-path inclusion.
+ if is_basestring(self.infile) and not os.path.isabs(newfile):
+ newfile = os.path.join(os.path.dirname(self.infile), newfile)
+ return (newfile, open(newfile, "r"))
+
+ def error_leader(self, infile=None, lineno=None):
+ "Emit a C-compiler-like, Emacs-friendly error-message leader."
+ if infile is None:
+ infile = self.infile
+ if lineno is None:
+ lineno = self.lineno
+ return "\"%s\", line %d: " % (infile, lineno)
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ token = self.get_token()
+ if token == self.eof:
+ raise StopIteration
+ return token
+
+def split(s, comments=False):
+ lex = shlex(s, posix=True)
+ lex.whitespace_split = True
+ if not comments:
+ lex.commenters = ''
+ #return list(lex)
+ result = []
+ while True:
+ token = lex.get_token()
+ if token == lex.eof:
+ break
+ result.append(token)
+ return result
+
+if __name__ == '__main__':
+ if len(sys.argv) == 1:
+ lexer = shlex()
+ else:
+ file = sys.argv[1]
+ lexer = shlex(open(file), file)
+ while 1:
+ tt = lexer.get_token()
+ if tt:
+ print "Token: " + repr(tt)
+ else:
+ break
+
+# Local Variables:
+# tab-width:4
+# indent-tabs-mode:nil
+# End:
+# vim: set expandtab tabstop=4 shiftwidth=4:
diff --git a/src/engine/SCons/compat/_scons_subprocess.py b/src/engine/SCons/compat/_scons_subprocess.py
new file mode 100644
index 0000000..4968825
--- /dev/null
+++ b/src/engine/SCons/compat/_scons_subprocess.py
@@ -0,0 +1,1296 @@
+# subprocess - Subprocesses with accessible I/O streams
+#
+# For more information about this module, see PEP 324.
+#
+# This module should remain compatible with Python 2.2, see PEP 291.
+#
+# Copyright (c) 2003-2005 by Peter Astrand <astrand@lysator.liu.se>
+#
+# Licensed to PSF under a Contributor Agreement.
+# See http://www.python.org/2.4/license for licensing details.
+
+r"""subprocess - Subprocesses with accessible I/O streams
+
+This module allows you to spawn processes, connect to their
+input/output/error pipes, and obtain their return codes. This module
+intends to replace several other, older modules and functions, like:
+
+os.system
+os.spawn*
+os.popen*
+popen2.*
+commands.*
+
+Information about how the subprocess module can be used to replace these
+modules and functions can be found below.
+
+
+
+Using the subprocess module
+===========================
+This module defines one class called Popen:
+
+class Popen(args, bufsize=0, executable=None,
+ stdin=None, stdout=None, stderr=None,
+ preexec_fn=None, close_fds=False, shell=False,
+ cwd=None, env=None, universal_newlines=False,
+ startupinfo=None, creationflags=0):
+
+
+Arguments are:
+
+args should be a string, or a sequence of program arguments. The
+program to execute is normally the first item in the args sequence or
+string, but can be explicitly set by using the executable argument.
+
+On UNIX, with shell=False (default): In this case, the Popen class
+uses os.execvp() to execute the child program. args should normally
+be a sequence. A string will be treated as a sequence with the string
+as the only item (the program to execute).
+
+On UNIX, with shell=True: If args is a string, it specifies the
+command string to execute through the shell. If args is a sequence,
+the first item specifies the command string, and any additional items
+will be treated as additional shell arguments.
+
+On Windows: the Popen class uses CreateProcess() to execute the child
+program, which operates on strings. If args is a sequence, it will be
+converted to a string using the list2cmdline method. Please note that
+not all MS Windows applications interpret the command line the same
+way: The list2cmdline is designed for applications using the same
+rules as the MS C runtime.
+
+bufsize, if given, has the same meaning as the corresponding argument
+to the built-in open() function: 0 means unbuffered, 1 means line
+buffered, any other positive value means use a buffer of
+(approximately) that size. A negative bufsize means to use the system
+default, which usually means fully buffered. The default value for
+bufsize is 0 (unbuffered).
+
+stdin, stdout and stderr specify the executed programs' standard
+input, standard output and standard error file handles, respectively.
+Valid values are PIPE, an existing file descriptor (a positive
+integer), an existing file object, and None. PIPE indicates that a
+new pipe to the child should be created. With None, no redirection
+will occur; the child's file handles will be inherited from the
+parent. Additionally, stderr can be STDOUT, which indicates that the
+stderr data from the applications should be captured into the same
+file handle as for stdout.
+
+If preexec_fn is set to a callable object, this object will be called
+in the child process just before the child is executed.
+
+If close_fds is true, all file descriptors except 0, 1 and 2 will be
+closed before the child process is executed.
+
+if shell is true, the specified command will be executed through the
+shell.
+
+If cwd is not None, the current directory will be changed to cwd
+before the child is executed.
+
+If env is not None, it defines the environment variables for the new
+process.
+
+If universal_newlines is true, the file objects stdout and stderr are
+opened as a text files, but lines may be terminated by any of '\n',
+the Unix end-of-line convention, '\r', the Macintosh convention or
+'\r\n', the Windows convention. All of these external representations
+are seen as '\n' by the Python program. Note: This feature is only
+available if Python is built with universal newline support (the
+default). Also, the newlines attribute of the file objects stdout,
+stdin and stderr are not updated by the communicate() method.
+
+The startupinfo and creationflags, if given, will be passed to the
+underlying CreateProcess() function. They can specify things such as
+appearance of the main window and priority for the new process.
+(Windows only)
+
+
+This module also defines two shortcut functions:
+
+call(*popenargs, **kwargs):
+ Run command with arguments. Wait for command to complete, then
+ return the returncode attribute.
+
+ The arguments are the same as for the Popen constructor. Example:
+
+ retcode = call(["ls", "-l"])
+
+check_call(*popenargs, **kwargs):
+ Run command with arguments. Wait for command to complete. If the
+ exit code was zero then return, otherwise raise
+ CalledProcessError. The CalledProcessError object will have the
+ return code in the returncode attribute.
+
+ The arguments are the same as for the Popen constructor. Example:
+
+ check_call(["ls", "-l"])
+
+Exceptions
+----------
+Exceptions raised in the child process, before the new program has
+started to execute, will be re-raised in the parent. Additionally,
+the exception object will have one extra attribute called
+'child_traceback', which is a string containing traceback information
+from the childs point of view.
+
+The most common exception raised is OSError. This occurs, for
+example, when trying to execute a non-existent file. Applications
+should prepare for OSErrors.
+
+A ValueError will be raised if Popen is called with invalid arguments.
+
+check_call() will raise CalledProcessError, if the called process
+returns a non-zero return code.
+
+
+Security
+--------
+Unlike some other popen functions, this implementation will never call
+/bin/sh implicitly. This means that all characters, including shell
+metacharacters, can safely be passed to child processes.
+
+
+Popen objects
+=============
+Instances of the Popen class have the following methods:
+
+poll()
+ Check if child process has terminated. Returns returncode
+ attribute.
+
+wait()
+ Wait for child process to terminate. Returns returncode attribute.
+
+communicate(input=None)
+ Interact with process: Send data to stdin. Read data from stdout
+ and stderr, until end-of-file is reached. Wait for process to
+ terminate. The optional stdin argument should be a string to be
+ sent to the child process, or None, if no data should be sent to
+ the child.
+
+ communicate() returns a tuple (stdout, stderr).
+
+ Note: The data read is buffered in memory, so do not use this
+ method if the data size is large or unlimited.
+
+The following attributes are also available:
+
+stdin
+ If the stdin argument is PIPE, this attribute is a file object
+ that provides input to the child process. Otherwise, it is None.
+
+stdout
+ If the stdout argument is PIPE, this attribute is a file object
+ that provides output from the child process. Otherwise, it is
+ None.
+
+stderr
+ If the stderr argument is PIPE, this attribute is file object that
+ provides error output from the child process. Otherwise, it is
+ None.
+
+pid
+ The process ID of the child process.
+
+returncode
+ The child return code. A None value indicates that the process
+ hasn't terminated yet. A negative value -N indicates that the
+ child was terminated by signal N (UNIX only).
+
+
+Replacing older functions with the subprocess module
+====================================================
+In this section, "a ==> b" means that b can be used as a replacement
+for a.
+
+Note: All functions in this section fail (more or less) silently if
+the executed program cannot be found; this module raises an OSError
+exception.
+
+In the following examples, we assume that the subprocess module is
+imported with "from subprocess import *".
+
+
+Replacing /bin/sh shell backquote
+---------------------------------
+output=`mycmd myarg`
+==>
+output = Popen(["mycmd", "myarg"], stdout=PIPE).communicate()[0]
+
+
+Replacing shell pipe line
+-------------------------
+output=`dmesg | grep hda`
+==>
+p1 = Popen(["dmesg"], stdout=PIPE)
+p2 = Popen(["grep", "hda"], stdin=p1.stdout, stdout=PIPE)
+output = p2.communicate()[0]
+
+
+Replacing os.system()
+---------------------
+sts = os.system("mycmd" + " myarg")
+==>
+p = Popen("mycmd" + " myarg", shell=True)
+pid, sts = os.waitpid(p.pid, 0)
+
+Note:
+
+* Calling the program through the shell is usually not required.
+
+* It's easier to look at the returncode attribute than the
+ exitstatus.
+
+A more real-world example would look like this:
+
+try:
+ retcode = call("mycmd" + " myarg", shell=True)
+ if retcode < 0:
+ print >>sys.stderr, "Child was terminated by signal", -retcode
+ else:
+ print >>sys.stderr, "Child returned", retcode
+except OSError, e:
+ print >>sys.stderr, "Execution failed:", e
+
+
+Replacing os.spawn*
+-------------------
+P_NOWAIT example:
+
+pid = os.spawnlp(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg")
+==>
+pid = Popen(["/bin/mycmd", "myarg"]).pid
+
+
+P_WAIT example:
+
+retcode = os.spawnlp(os.P_WAIT, "/bin/mycmd", "mycmd", "myarg")
+==>
+retcode = call(["/bin/mycmd", "myarg"])
+
+
+Vector example:
+
+os.spawnvp(os.P_NOWAIT, path, args)
+==>
+Popen([path] + args[1:])
+
+
+Environment example:
+
+os.spawnlpe(os.P_NOWAIT, "/bin/mycmd", "mycmd", "myarg", env)
+==>
+Popen(["/bin/mycmd", "myarg"], env={"PATH": "/usr/bin"})
+
+
+Replacing os.popen*
+-------------------
+pipe = os.popen(cmd, mode='r', bufsize)
+==>
+pipe = Popen(cmd, shell=True, bufsize=bufsize, stdout=PIPE).stdout
+
+pipe = os.popen(cmd, mode='w', bufsize)
+==>
+pipe = Popen(cmd, shell=True, bufsize=bufsize, stdin=PIPE).stdin
+
+
+(child_stdin, child_stdout) = os.popen2(cmd, mode, bufsize)
+==>
+p = Popen(cmd, shell=True, bufsize=bufsize,
+ stdin=PIPE, stdout=PIPE, close_fds=True)
+(child_stdin, child_stdout) = (p.stdin, p.stdout)
+
+
+(child_stdin,
+ child_stdout,
+ child_stderr) = os.popen3(cmd, mode, bufsize)
+==>
+p = Popen(cmd, shell=True, bufsize=bufsize,
+ stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
+(child_stdin,
+ child_stdout,
+ child_stderr) = (p.stdin, p.stdout, p.stderr)
+
+
+(child_stdin, child_stdout_and_stderr) = os.popen4(cmd, mode, bufsize)
+==>
+p = Popen(cmd, shell=True, bufsize=bufsize,
+ stdin=PIPE, stdout=PIPE, stderr=STDOUT, close_fds=True)
+(child_stdin, child_stdout_and_stderr) = (p.stdin, p.stdout)
+
+
+Replacing popen2.*
+------------------
+Note: If the cmd argument to popen2 functions is a string, the command
+is executed through /bin/sh. If it is a list, the command is directly
+executed.
+
+(child_stdout, child_stdin) = popen2.popen2("somestring", bufsize, mode)
+==>
+p = Popen(["somestring"], shell=True, bufsize=bufsize
+ stdin=PIPE, stdout=PIPE, close_fds=True)
+(child_stdout, child_stdin) = (p.stdout, p.stdin)
+
+
+(child_stdout, child_stdin) = popen2.popen2(["mycmd", "myarg"], bufsize, mode)
+==>
+p = Popen(["mycmd", "myarg"], bufsize=bufsize,
+ stdin=PIPE, stdout=PIPE, close_fds=True)
+(child_stdout, child_stdin) = (p.stdout, p.stdin)
+
+The popen2.Popen3 and popen3.Popen4 basically works as subprocess.Popen,
+except that:
+
+* subprocess.Popen raises an exception if the execution fails
+* the capturestderr argument is replaced with the stderr argument.
+* stdin=PIPE and stdout=PIPE must be specified.
+* popen2 closes all filedescriptors by default, but you have to specify
+ close_fds=True with subprocess.Popen.
+
+
+"""
+
+import sys
+mswindows = (sys.platform == "win32")
+
+import os
+import string
+import types
+import traceback
+
+# Exception classes used by this module.
+class CalledProcessError(Exception):
+ """This exception is raised when a process run by check_call() returns
+ a non-zero exit status. The exit status will be stored in the
+ returncode attribute."""
+ def __init__(self, returncode, cmd):
+ self.returncode = returncode
+ self.cmd = cmd
+ def __str__(self):
+ return "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode)
+
+
+if mswindows:
+ try:
+ import threading
+ except ImportError:
+ # SCons: the threading module is only used by the communicate()
+ # method, which we don't actually use, so don't worry if we
+ # can't import it.
+ pass
+ import msvcrt
+ if 0: # <-- change this to use pywin32 instead of the _subprocess driver
+ import pywintypes
+ from win32api import GetStdHandle, STD_INPUT_HANDLE, \
+ STD_OUTPUT_HANDLE, STD_ERROR_HANDLE
+ from win32api import GetCurrentProcess, DuplicateHandle, \
+ GetModuleFileName, GetVersion
+ from win32con import DUPLICATE_SAME_ACCESS, SW_HIDE
+ from win32pipe import CreatePipe
+ from win32process import CreateProcess, STARTUPINFO, \
+ GetExitCodeProcess, STARTF_USESTDHANDLES, \
+ STARTF_USESHOWWINDOW, CREATE_NEW_CONSOLE
+ from win32event import WaitForSingleObject, INFINITE, WAIT_OBJECT_0
+ else:
+ # SCons: don't die on Python versions that don't have _subprocess.
+ try:
+ from _subprocess import *
+ except ImportError:
+ pass
+ class STARTUPINFO:
+ dwFlags = 0
+ hStdInput = None
+ hStdOutput = None
+ hStdError = None
+ wShowWindow = 0
+ class pywintypes:
+ error = IOError
+else:
+ import select
+ import errno
+ import fcntl
+ import pickle
+
+ try:
+ fcntl.F_GETFD
+ except AttributeError:
+ fcntl.F_GETFD = 1
+
+ try:
+ fcntl.F_SETFD
+ except AttributeError:
+ fcntl.F_SETFD = 2
+
+__all__ = ["Popen", "PIPE", "STDOUT", "call", "check_call", "CalledProcessError"]
+
+try:
+ MAXFD = os.sysconf("SC_OPEN_MAX")
+except KeyboardInterrupt:
+ raise # SCons: don't swallow keyboard interrupts
+except:
+ MAXFD = 256
+
+# True/False does not exist on 2.2.0
+try:
+ False
+except NameError:
+ False = 0
+ True = 1
+
+try:
+ isinstance(1, int)
+except TypeError:
+ def is_int(obj):
+ return type(obj) == type(1)
+ def is_int_or_long(obj):
+ return type(obj) in (type(1), type(1L))
+else:
+ def is_int(obj):
+ return isinstance(obj, int)
+ def is_int_or_long(obj):
+ return isinstance(obj, (int, long))
+
+try:
+ types.StringTypes
+except AttributeError:
+ try:
+ types.StringTypes = (types.StringType, types.UnicodeType)
+ except AttributeError:
+ types.StringTypes = (types.StringType,)
+ def is_string(obj):
+ return type(obj) in types.StringTypes
+else:
+ def is_string(obj):
+ return isinstance(obj, types.StringTypes)
+
+_active = []
+
+def _cleanup():
+ for inst in _active[:]:
+ if inst.poll(_deadstate=sys.maxint) >= 0:
+ try:
+ _active.remove(inst)
+ except ValueError:
+ # This can happen if two threads create a new Popen instance.
+ # It's harmless that it was already removed, so ignore.
+ pass
+
+PIPE = -1
+STDOUT = -2
+
+
+def call(*popenargs, **kwargs):
+ """Run command with arguments. Wait for command to complete, then
+ return the returncode attribute.
+
+ The arguments are the same as for the Popen constructor. Example:
+
+ retcode = call(["ls", "-l"])
+ """
+ return apply(Popen, popenargs, kwargs).wait()
+
+
+def check_call(*popenargs, **kwargs):
+ """Run command with arguments. Wait for command to complete. If
+ the exit code was zero then return, otherwise raise
+ CalledProcessError. The CalledProcessError object will have the
+ return code in the returncode attribute.
+
+ The arguments are the same as for the Popen constructor. Example:
+
+ check_call(["ls", "-l"])
+ """
+ retcode = apply(call, popenargs, kwargs)
+ cmd = kwargs.get("args")
+ if cmd is None:
+ cmd = popenargs[0]
+ if retcode:
+ raise CalledProcessError(retcode, cmd)
+ return retcode
+
+
+def list2cmdline(seq):
+ """
+ Translate a sequence of arguments into a command line
+ string, using the same rules as the MS C runtime:
+
+ 1) Arguments are delimited by white space, which is either a
+ space or a tab.
+
+ 2) A string surrounded by double quotation marks is
+ interpreted as a single argument, regardless of white space
+ contained within. A quoted string can be embedded in an
+ argument.
+
+ 3) A double quotation mark preceded by a backslash is
+ interpreted as a literal double quotation mark.
+
+ 4) Backslashes are interpreted literally, unless they
+ immediately precede a double quotation mark.
+
+ 5) If backslashes immediately precede a double quotation mark,
+ every pair of backslashes is interpreted as a literal
+ backslash. If the number of backslashes is odd, the last
+ backslash escapes the next double quotation mark as
+ described in rule 3.
+ """
+
+ # See
+ # http://msdn.microsoft.com/library/en-us/vccelng/htm/progs_12.asp
+ result = []
+ needquote = False
+ for arg in seq:
+ bs_buf = []
+
+ # Add a space to separate this argument from the others
+ if result:
+ result.append(' ')
+
+ needquote = (" " in arg) or ("\t" in arg)
+ if needquote:
+ result.append('"')
+
+ for c in arg:
+ if c == '\\':
+ # Don't know if we need to double yet.
+ bs_buf.append(c)
+ elif c == '"':
+ # Double backspaces.
+ result.append('\\' * len(bs_buf)*2)
+ bs_buf = []
+ result.append('\\"')
+ else:
+ # Normal char
+ if bs_buf:
+ result.extend(bs_buf)
+ bs_buf = []
+ result.append(c)
+
+ # Add remaining backspaces, if any.
+ if bs_buf:
+ result.extend(bs_buf)
+
+ if needquote:
+ result.extend(bs_buf)
+ result.append('"')
+
+ return string.join(result, '')
+
+
+try:
+ object
+except NameError:
+ class object:
+ pass
+
+class Popen(object):
+ def __init__(self, args, bufsize=0, executable=None,
+ stdin=None, stdout=None, stderr=None,
+ preexec_fn=None, close_fds=False, shell=False,
+ cwd=None, env=None, universal_newlines=False,
+ startupinfo=None, creationflags=0):
+ """Create new Popen instance."""
+ _cleanup()
+
+ self._child_created = False
+ if not is_int_or_long(bufsize):
+ raise TypeError("bufsize must be an integer")
+
+ if mswindows:
+ if preexec_fn is not None:
+ raise ValueError("preexec_fn is not supported on Windows "
+ "platforms")
+ if close_fds:
+ raise ValueError("close_fds is not supported on Windows "
+ "platforms")
+ else:
+ # POSIX
+ if startupinfo is not None:
+ raise ValueError("startupinfo is only supported on Windows "
+ "platforms")
+ if creationflags != 0:
+ raise ValueError("creationflags is only supported on Windows "
+ "platforms")
+
+ self.stdin = None
+ self.stdout = None
+ self.stderr = None
+ self.pid = None
+ self.returncode = None
+ self.universal_newlines = universal_newlines
+
+ # Input and output objects. The general principle is like
+ # this:
+ #
+ # Parent Child
+ # ------ -----
+ # p2cwrite ---stdin---> p2cread
+ # c2pread <--stdout--- c2pwrite
+ # errread <--stderr--- errwrite
+ #
+ # On POSIX, the child objects are file descriptors. On
+ # Windows, these are Windows file handles. The parent objects
+ # are file descriptors on both platforms. The parent objects
+ # are None when not using PIPEs. The child objects are None
+ # when not redirecting.
+
+ (p2cread, p2cwrite,
+ c2pread, c2pwrite,
+ errread, errwrite) = self._get_handles(stdin, stdout, stderr)
+
+ self._execute_child(args, executable, preexec_fn, close_fds,
+ cwd, env, universal_newlines,
+ startupinfo, creationflags, shell,
+ p2cread, p2cwrite,
+ c2pread, c2pwrite,
+ errread, errwrite)
+
+ if p2cwrite:
+ self.stdin = os.fdopen(p2cwrite, 'wb', bufsize)
+ if c2pread:
+ if universal_newlines:
+ self.stdout = os.fdopen(c2pread, 'rU', bufsize)
+ else:
+ self.stdout = os.fdopen(c2pread, 'rb', bufsize)
+ if errread:
+ if universal_newlines:
+ self.stderr = os.fdopen(errread, 'rU', bufsize)
+ else:
+ self.stderr = os.fdopen(errread, 'rb', bufsize)
+
+
+ def _translate_newlines(self, data):
+ data = data.replace("\r\n", "\n")
+ data = data.replace("\r", "\n")
+ return data
+
+
+ def __del__(self):
+ if not self._child_created:
+ # We didn't get to successfully create a child process.
+ return
+ # In case the child hasn't been waited on, check if it's done.
+ self.poll(_deadstate=sys.maxint)
+ if self.returncode is None and _active is not None:
+ # Child is still running, keep us alive until we can wait on it.
+ _active.append(self)
+
+
+ def communicate(self, input=None):
+ """Interact with process: Send data to stdin. Read data from
+ stdout and stderr, until end-of-file is reached. Wait for
+ process to terminate. The optional input argument should be a
+ string to be sent to the child process, or None, if no data
+ should be sent to the child.
+
+ communicate() returns a tuple (stdout, stderr)."""
+
+ # Optimization: If we are only using one pipe, or no pipe at
+ # all, using select() or threads is unnecessary.
+ if [self.stdin, self.stdout, self.stderr].count(None) >= 2:
+ stdout = None
+ stderr = None
+ if self.stdin:
+ if input:
+ self.stdin.write(input)
+ self.stdin.close()
+ elif self.stdout:
+ stdout = self.stdout.read()
+ elif self.stderr:
+ stderr = self.stderr.read()
+ self.wait()
+ return (stdout, stderr)
+
+ return self._communicate(input)
+
+
+ if mswindows:
+ #
+ # Windows methods
+ #
+ def _get_handles(self, stdin, stdout, stderr):
+ """Construct and return tupel with IO objects:
+ p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
+ """
+ if stdin is None and stdout is None and stderr is None:
+ return (None, None, None, None, None, None)
+
+ p2cread, p2cwrite = None, None
+ c2pread, c2pwrite = None, None
+ errread, errwrite = None, None
+
+ if stdin is None:
+ p2cread = GetStdHandle(STD_INPUT_HANDLE)
+ elif stdin == PIPE:
+ p2cread, p2cwrite = CreatePipe(None, 0)
+ # Detach and turn into fd
+ p2cwrite = p2cwrite.Detach()
+ p2cwrite = msvcrt.open_osfhandle(p2cwrite, 0)
+ elif is_int(stdin):
+ p2cread = msvcrt.get_osfhandle(stdin)
+ else:
+ # Assuming file-like object
+ p2cread = msvcrt.get_osfhandle(stdin.fileno())
+ p2cread = self._make_inheritable(p2cread)
+
+ if stdout is None:
+ c2pwrite = GetStdHandle(STD_OUTPUT_HANDLE)
+ elif stdout == PIPE:
+ c2pread, c2pwrite = CreatePipe(None, 0)
+ # Detach and turn into fd
+ c2pread = c2pread.Detach()
+ c2pread = msvcrt.open_osfhandle(c2pread, 0)
+ elif is_int(stdout):
+ c2pwrite = msvcrt.get_osfhandle(stdout)
+ else:
+ # Assuming file-like object
+ c2pwrite = msvcrt.get_osfhandle(stdout.fileno())
+ c2pwrite = self._make_inheritable(c2pwrite)
+
+ if stderr is None:
+ errwrite = GetStdHandle(STD_ERROR_HANDLE)
+ elif stderr == PIPE:
+ errread, errwrite = CreatePipe(None, 0)
+ # Detach and turn into fd
+ errread = errread.Detach()
+ errread = msvcrt.open_osfhandle(errread, 0)
+ elif stderr == STDOUT:
+ errwrite = c2pwrite
+ elif is_int(stderr):
+ errwrite = msvcrt.get_osfhandle(stderr)
+ else:
+ # Assuming file-like object
+ errwrite = msvcrt.get_osfhandle(stderr.fileno())
+ errwrite = self._make_inheritable(errwrite)
+
+ return (p2cread, p2cwrite,
+ c2pread, c2pwrite,
+ errread, errwrite)
+
+
+ def _make_inheritable(self, handle):
+ """Return a duplicate of handle, which is inheritable"""
+ return DuplicateHandle(GetCurrentProcess(), handle,
+ GetCurrentProcess(), 0, 1,
+ DUPLICATE_SAME_ACCESS)
+
+
+ def _find_w9xpopen(self):
+ """Find and return absolut path to w9xpopen.exe"""
+ w9xpopen = os.path.join(os.path.dirname(GetModuleFileName(0)),
+ "w9xpopen.exe")
+ if not os.path.exists(w9xpopen):
+ # Eeek - file-not-found - possibly an embedding
+ # situation - see if we can locate it in sys.exec_prefix
+ w9xpopen = os.path.join(os.path.dirname(sys.exec_prefix),
+ "w9xpopen.exe")
+ if not os.path.exists(w9xpopen):
+ raise RuntimeError("Cannot locate w9xpopen.exe, which is "
+ "needed for Popen to work with your "
+ "shell or platform.")
+ return w9xpopen
+
+
+ def _execute_child(self, args, executable, preexec_fn, close_fds,
+ cwd, env, universal_newlines,
+ startupinfo, creationflags, shell,
+ p2cread, p2cwrite,
+ c2pread, c2pwrite,
+ errread, errwrite):
+ """Execute program (MS Windows version)"""
+
+ if not isinstance(args, types.StringTypes):
+ args = list2cmdline(args)
+
+ # Process startup details
+ if startupinfo is None:
+ startupinfo = STARTUPINFO()
+ if None not in (p2cread, c2pwrite, errwrite):
+ startupinfo.dwFlags = startupinfo.dwFlags | STARTF_USESTDHANDLES
+ startupinfo.hStdInput = p2cread
+ startupinfo.hStdOutput = c2pwrite
+ startupinfo.hStdError = errwrite
+
+ if shell:
+ startupinfo.dwFlags = startupinfo.dwFlags | STARTF_USESHOWWINDOW
+ startupinfo.wShowWindow = SW_HIDE
+ comspec = os.environ.get("COMSPEC", "cmd.exe")
+ args = comspec + " /c " + args
+ if (GetVersion() >= 0x80000000L or
+ os.path.basename(comspec).lower() == "command.com"):
+ # Win9x, or using command.com on NT. We need to
+ # use the w9xpopen intermediate program. For more
+ # information, see KB Q150956
+ # (http://web.archive.org/web/20011105084002/http://support.microsoft.com/support/kb/articles/Q150/9/56.asp)
+ w9xpopen = self._find_w9xpopen()
+ args = '"%s" %s' % (w9xpopen, args)
+ # Not passing CREATE_NEW_CONSOLE has been known to
+ # cause random failures on win9x. Specifically a
+ # dialog: "Your program accessed mem currently in
+ # use at xxx" and a hopeful warning about the
+ # stability of your system. Cost is Ctrl+C wont
+ # kill children.
+ creationflags = creationflags | CREATE_NEW_CONSOLE
+
+ # Start the process
+ try:
+ hp, ht, pid, tid = CreateProcess(executable, args,
+ # no special security
+ None, None,
+ # must inherit handles to pass std
+ # handles
+ 1,
+ creationflags,
+ env,
+ cwd,
+ startupinfo)
+ except pywintypes.error, e:
+ # Translate pywintypes.error to WindowsError, which is
+ # a subclass of OSError. FIXME: We should really
+ # translate errno using _sys_errlist (or simliar), but
+ # how can this be done from Python?
+ raise apply(WindowsError, e.args)
+
+ # Retain the process handle, but close the thread handle
+ self._child_created = True
+ self._handle = hp
+ self.pid = pid
+ ht.Close()
+
+ # Child is launched. Close the parent's copy of those pipe
+ # handles that only the child should have open. You need
+ # to make sure that no handles to the write end of the
+ # output pipe are maintained in this process or else the
+ # pipe will not close when the child process exits and the
+ # ReadFile will hang.
+ if p2cread is not None:
+ p2cread.Close()
+ if c2pwrite is not None:
+ c2pwrite.Close()
+ if errwrite is not None:
+ errwrite.Close()
+
+
+ def poll(self, _deadstate=None):
+ """Check if child process has terminated. Returns returncode
+ attribute."""
+ if self.returncode is None:
+ if WaitForSingleObject(self._handle, 0) == WAIT_OBJECT_0:
+ self.returncode = GetExitCodeProcess(self._handle)
+ return self.returncode
+
+
+ def wait(self):
+ """Wait for child process to terminate. Returns returncode
+ attribute."""
+ if self.returncode is None:
+ obj = WaitForSingleObject(self._handle, INFINITE)
+ self.returncode = GetExitCodeProcess(self._handle)
+ return self.returncode
+
+
+ def _readerthread(self, fh, buffer):
+ buffer.append(fh.read())
+
+
+ def _communicate(self, input):
+ stdout = None # Return
+ stderr = None # Return
+
+ if self.stdout:
+ stdout = []
+ stdout_thread = threading.Thread(target=self._readerthread,
+ args=(self.stdout, stdout))
+ stdout_thread.setDaemon(True)
+ stdout_thread.start()
+ if self.stderr:
+ stderr = []
+ stderr_thread = threading.Thread(target=self._readerthread,
+ args=(self.stderr, stderr))
+ stderr_thread.setDaemon(True)
+ stderr_thread.start()
+
+ if self.stdin:
+ if input is not None:
+ self.stdin.write(input)
+ self.stdin.close()
+
+ if self.stdout:
+ stdout_thread.join()
+ if self.stderr:
+ stderr_thread.join()
+
+ # All data exchanged. Translate lists into strings.
+ if stdout is not None:
+ stdout = stdout[0]
+ if stderr is not None:
+ stderr = stderr[0]
+
+ # Translate newlines, if requested. We cannot let the file
+ # object do the translation: It is based on stdio, which is
+ # impossible to combine with select (unless forcing no
+ # buffering).
+ if self.universal_newlines and hasattr(file, 'newlines'):
+ if stdout:
+ stdout = self._translate_newlines(stdout)
+ if stderr:
+ stderr = self._translate_newlines(stderr)
+
+ self.wait()
+ return (stdout, stderr)
+
+ else:
+ #
+ # POSIX methods
+ #
+ def _get_handles(self, stdin, stdout, stderr):
+ """Construct and return tupel with IO objects:
+ p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite
+ """
+ p2cread, p2cwrite = None, None
+ c2pread, c2pwrite = None, None
+ errread, errwrite = None, None
+
+ if stdin is None:
+ pass
+ elif stdin == PIPE:
+ p2cread, p2cwrite = os.pipe()
+ elif is_int(stdin):
+ p2cread = stdin
+ else:
+ # Assuming file-like object
+ p2cread = stdin.fileno()
+
+ if stdout is None:
+ pass
+ elif stdout == PIPE:
+ c2pread, c2pwrite = os.pipe()
+ elif is_int(stdout):
+ c2pwrite = stdout
+ else:
+ # Assuming file-like object
+ c2pwrite = stdout.fileno()
+
+ if stderr is None:
+ pass
+ elif stderr == PIPE:
+ errread, errwrite = os.pipe()
+ elif stderr == STDOUT:
+ errwrite = c2pwrite
+ elif is_int(stderr):
+ errwrite = stderr
+ else:
+ # Assuming file-like object
+ errwrite = stderr.fileno()
+
+ return (p2cread, p2cwrite,
+ c2pread, c2pwrite,
+ errread, errwrite)
+
+
+ def _set_cloexec_flag(self, fd):
+ try:
+ cloexec_flag = fcntl.FD_CLOEXEC
+ except AttributeError:
+ cloexec_flag = 1
+
+ old = fcntl.fcntl(fd, fcntl.F_GETFD)
+ fcntl.fcntl(fd, fcntl.F_SETFD, old | cloexec_flag)
+
+
+ def _close_fds(self, but):
+ for i in xrange(3, MAXFD):
+ if i == but:
+ continue
+ try:
+ os.close(i)
+ except KeyboardInterrupt:
+ raise # SCons: don't swallow keyboard interrupts
+ except:
+ pass
+
+
+ def _execute_child(self, args, executable, preexec_fn, close_fds,
+ cwd, env, universal_newlines,
+ startupinfo, creationflags, shell,
+ p2cread, p2cwrite,
+ c2pread, c2pwrite,
+ errread, errwrite):
+ """Execute program (POSIX version)"""
+
+ if is_string(args):
+ args = [args]
+
+ if shell:
+ args = ["/bin/sh", "-c"] + args
+
+ if executable is None:
+ executable = args[0]
+
+ # For transferring possible exec failure from child to parent
+ # The first char specifies the exception type: 0 means
+ # OSError, 1 means some other error.
+ errpipe_read, errpipe_write = os.pipe()
+ self._set_cloexec_flag(errpipe_write)
+
+ self.pid = os.fork()
+ self._child_created = True
+ if self.pid == 0:
+ # Child
+ try:
+ # Close parent's pipe ends
+ if p2cwrite:
+ os.close(p2cwrite)
+ if c2pread:
+ os.close(c2pread)
+ if errread:
+ os.close(errread)
+ os.close(errpipe_read)
+
+ # Dup fds for child
+ if p2cread:
+ os.dup2(p2cread, 0)
+ if c2pwrite:
+ os.dup2(c2pwrite, 1)
+ if errwrite:
+ os.dup2(errwrite, 2)
+
+ # Close pipe fds. Make sure we don't close the same
+ # fd more than once, or standard fds.
+ try:
+ set
+ except NameError:
+ # Fall-back for earlier Python versions, so epydoc
+ # can use this module directly to execute things.
+ if p2cread:
+ os.close(p2cread)
+ if c2pwrite and c2pwrite not in (p2cread,):
+ os.close(c2pwrite)
+ if errwrite and errwrite not in (p2cread, c2pwrite):
+ os.close(errwrite)
+ else:
+ for fd in set((p2cread, c2pwrite, errwrite))-set((0,1,2)):
+ if fd: os.close(fd)
+
+ # Close all other fds, if asked for
+ if close_fds:
+ self._close_fds(but=errpipe_write)
+
+ if cwd is not None:
+ os.chdir(cwd)
+
+ if preexec_fn:
+ apply(preexec_fn)
+
+ if env is None:
+ os.execvp(executable, args)
+ else:
+ os.execvpe(executable, args, env)
+
+ except KeyboardInterrupt:
+ raise # SCons: don't swallow keyboard interrupts
+
+ except:
+ exc_type, exc_value, tb = sys.exc_info()
+ # Save the traceback and attach it to the exception object
+ exc_lines = traceback.format_exception(exc_type,
+ exc_value,
+ tb)
+ exc_value.child_traceback = string.join(exc_lines, '')
+ os.write(errpipe_write, pickle.dumps(exc_value))
+
+ # This exitcode won't be reported to applications, so it
+ # really doesn't matter what we return.
+ os._exit(255)
+
+ # Parent
+ os.close(errpipe_write)
+ if p2cread and p2cwrite:
+ os.close(p2cread)
+ if c2pwrite and c2pread:
+ os.close(c2pwrite)
+ if errwrite and errread:
+ os.close(errwrite)
+
+ # Wait for exec to fail or succeed; possibly raising exception
+ data = os.read(errpipe_read, 1048576) # Exceptions limited to 1 MB
+ os.close(errpipe_read)
+ if data != "":
+ os.waitpid(self.pid, 0)
+ child_exception = pickle.loads(data)
+ raise child_exception
+
+
+ def _handle_exitstatus(self, sts):
+ if os.WIFSIGNALED(sts):
+ self.returncode = -os.WTERMSIG(sts)
+ elif os.WIFEXITED(sts):
+ self.returncode = os.WEXITSTATUS(sts)
+ else:
+ # Should never happen
+ raise RuntimeError("Unknown child exit status!")
+
+
+ def poll(self, _deadstate=None):
+ """Check if child process has terminated. Returns returncode
+ attribute."""
+ if self.returncode is None:
+ try:
+ pid, sts = os.waitpid(self.pid, os.WNOHANG)
+ if pid == self.pid:
+ self._handle_exitstatus(sts)
+ except os.error:
+ if _deadstate is not None:
+ self.returncode = _deadstate
+ return self.returncode
+
+
+ def wait(self):
+ """Wait for child process to terminate. Returns returncode
+ attribute."""
+ if self.returncode is None:
+ pid, sts = os.waitpid(self.pid, 0)
+ self._handle_exitstatus(sts)
+ return self.returncode
+
+
+ def _communicate(self, input):
+ read_set = []
+ write_set = []
+ stdout = None # Return
+ stderr = None # Return
+
+ if self.stdin:
+ # Flush stdio buffer. This might block, if the user has
+ # been writing to .stdin in an uncontrolled fashion.
+ self.stdin.flush()
+ if input:
+ write_set.append(self.stdin)
+ else:
+ self.stdin.close()
+ if self.stdout:
+ read_set.append(self.stdout)
+ stdout = []
+ if self.stderr:
+ read_set.append(self.stderr)
+ stderr = []
+
+ input_offset = 0
+ while read_set or write_set:
+ rlist, wlist, xlist = select.select(read_set, write_set, [])
+
+ if self.stdin in wlist:
+ # When select has indicated that the file is writable,
+ # we can write up to PIPE_BUF bytes without risk
+ # blocking. POSIX defines PIPE_BUF >= 512
+ bytes_written = os.write(self.stdin.fileno(), buffer(input, input_offset, 512))
+ input_offset = input_offset + bytes_written
+ if input_offset >= len(input):
+ self.stdin.close()
+ write_set.remove(self.stdin)
+
+ if self.stdout in rlist:
+ data = os.read(self.stdout.fileno(), 1024)
+ if data == "":
+ self.stdout.close()
+ read_set.remove(self.stdout)
+ stdout.append(data)
+
+ if self.stderr in rlist:
+ data = os.read(self.stderr.fileno(), 1024)
+ if data == "":
+ self.stderr.close()
+ read_set.remove(self.stderr)
+ stderr.append(data)
+
+ # All data exchanged. Translate lists into strings.
+ if stdout is not None:
+ stdout = string.join(stdout, '')
+ if stderr is not None:
+ stderr = string.join(stderr, '')
+
+ # Translate newlines, if requested. We cannot let the file
+ # object do the translation: It is based on stdio, which is
+ # impossible to combine with select (unless forcing no
+ # buffering).
+ if self.universal_newlines and hasattr(file, 'newlines'):
+ if stdout:
+ stdout = self._translate_newlines(stdout)
+ if stderr:
+ stderr = self._translate_newlines(stderr)
+
+ self.wait()
+ return (stdout, stderr)
+
+
+def _demo_posix():
+ #
+ # Example 1: Simple redirection: Get process list
+ #
+ plist = Popen(["ps"], stdout=PIPE).communicate()[0]
+ print "Process list:"
+ print plist
+
+ #
+ # Example 2: Change uid before executing child
+ #
+ if os.getuid() == 0:
+ p = Popen(["id"], preexec_fn=lambda: os.setuid(100))
+ p.wait()
+
+ #
+ # Example 3: Connecting several subprocesses
+ #
+ print "Looking for 'hda'..."
+ p1 = Popen(["dmesg"], stdout=PIPE)
+ p2 = Popen(["grep", "hda"], stdin=p1.stdout, stdout=PIPE)
+ print repr(p2.communicate()[0])
+
+ #
+ # Example 4: Catch execution error
+ #
+ print
+ print "Trying a weird file..."
+ try:
+ print Popen(["/this/path/does/not/exist"]).communicate()
+ except OSError, e:
+ if e.errno == errno.ENOENT:
+ print "The file didn't exist. I thought so..."
+ print "Child traceback:"
+ print e.child_traceback
+ else:
+ print "Error", e.errno
+ else:
+ sys.stderr.write( "Gosh. No error.\n" )
+
+
+def _demo_windows():
+ #
+ # Example 1: Connecting several subprocesses
+ #
+ print "Looking for 'PROMPT' in set output..."
+ p1 = Popen("set", stdout=PIPE, shell=True)
+ p2 = Popen('find "PROMPT"', stdin=p1.stdout, stdout=PIPE)
+ print repr(p2.communicate()[0])
+
+ #
+ # Example 2: Simple execution of program
+ #
+ print "Executing calc..."
+ p = Popen("calc")
+ p.wait()
+
+
+if __name__ == "__main__":
+ if mswindows:
+ _demo_windows()
+ else:
+ _demo_posix()
+
+# Local Variables:
+# tab-width:4
+# indent-tabs-mode:nil
+# End:
+# vim: set expandtab tabstop=4 shiftwidth=4:
diff --git a/src/engine/SCons/compat/_scons_textwrap.py b/src/engine/SCons/compat/_scons_textwrap.py
new file mode 100644
index 0000000..81781af
--- /dev/null
+++ b/src/engine/SCons/compat/_scons_textwrap.py
@@ -0,0 +1,382 @@
+"""Text wrapping and filling.
+"""
+
+# Copyright (C) 1999-2001 Gregory P. Ward.
+# Copyright (C) 2002, 2003 Python Software Foundation.
+# Written by Greg Ward <gward@python.net>
+
+__revision__ = "$Id: textwrap.py,v 1.32.8.2 2004/05/13 01:48:15 gward Exp $"
+
+import string, re
+
+try:
+ unicode
+except NameError:
+ class unicode:
+ pass
+
+# Do the right thing with boolean values for all known Python versions
+# (so this module can be copied to projects that don't depend on Python
+# 2.3, e.g. Optik and Docutils).
+try:
+ True, False
+except NameError:
+ (True, False) = (1, 0)
+
+__all__ = ['TextWrapper', 'wrap', 'fill']
+
+# Hardcode the recognized whitespace characters to the US-ASCII
+# whitespace characters. The main reason for doing this is that in
+# ISO-8859-1, 0xa0 is non-breaking whitespace, so in certain locales
+# that character winds up in string.whitespace. Respecting
+# string.whitespace in those cases would 1) make textwrap treat 0xa0 the
+# same as any other whitespace char, which is clearly wrong (it's a
+# *non-breaking* space), 2) possibly cause problems with Unicode,
+# since 0xa0 is not in range(128).
+_whitespace = '\t\n\x0b\x0c\r '
+
+class TextWrapper:
+ """
+ Object for wrapping/filling text. The public interface consists of
+ the wrap() and fill() methods; the other methods are just there for
+ subclasses to override in order to tweak the default behaviour.
+ If you want to completely replace the main wrapping algorithm,
+ you'll probably have to override _wrap_chunks().
+
+ Several instance attributes control various aspects of wrapping:
+ width (default: 70)
+ the maximum width of wrapped lines (unless break_long_words
+ is false)
+ initial_indent (default: "")
+ string that will be prepended to the first line of wrapped
+ output. Counts towards the line's width.
+ subsequent_indent (default: "")
+ string that will be prepended to all lines save the first
+ of wrapped output; also counts towards each line's width.
+ expand_tabs (default: true)
+ Expand tabs in input text to spaces before further processing.
+ Each tab will become 1 .. 8 spaces, depending on its position in
+ its line. If false, each tab is treated as a single character.
+ replace_whitespace (default: true)
+ Replace all whitespace characters in the input text by spaces
+ after tab expansion. Note that if expand_tabs is false and
+ replace_whitespace is true, every tab will be converted to a
+ single space!
+ fix_sentence_endings (default: false)
+ Ensure that sentence-ending punctuation is always followed
+ by two spaces. Off by default because the algorithm is
+ (unavoidably) imperfect.
+ break_long_words (default: true)
+ Break words longer than 'width'. If false, those words will not
+ be broken, and some lines might be longer than 'width'.
+ """
+
+ whitespace_trans = string.maketrans(_whitespace, ' ' * len(_whitespace))
+
+ unicode_whitespace_trans = {}
+ try:
+ uspace = eval("ord(u' ')")
+ except SyntaxError:
+ # Python1.5 doesn't understand u'' syntax, in which case we
+ # won't actually use the unicode translation below, so it
+ # doesn't matter what value we put in the table.
+ uspace = ord(' ')
+ for x in map(ord, _whitespace):
+ unicode_whitespace_trans[x] = uspace
+
+ # This funky little regex is just the trick for splitting
+ # text up into word-wrappable chunks. E.g.
+ # "Hello there -- you goof-ball, use the -b option!"
+ # splits into
+ # Hello/ /there/ /--/ /you/ /goof-/ball,/ /use/ /the/ /-b/ /option!
+ # (after stripping out empty strings).
+ try:
+ wordsep_re = re.compile(r'(\s+|' # any whitespace
+ r'[^\s\w]*\w{2,}-(?=\w{2,})|' # hyphenated words
+ r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))') # em-dash
+ except re.error:
+ # Pre-2.0 Python versions don't have the (?<= negative look-behind
+ # assertion. It mostly doesn't matter for the simple input
+ # SCons is going to give it, so just leave it out.
+ wordsep_re = re.compile(r'(\s+|' # any whitespace
+ r'-*\w{2,}-(?=\w{2,}))') # hyphenated words
+
+ # XXX will there be a locale-or-charset-aware version of
+ # string.lowercase in 2.3?
+ sentence_end_re = re.compile(r'[%s]' # lowercase letter
+ r'[\.\!\?]' # sentence-ending punct.
+ r'[\"\']?' # optional end-of-quote
+ % string.lowercase)
+
+
+ def __init__(self,
+ width=70,
+ initial_indent="",
+ subsequent_indent="",
+ expand_tabs=True,
+ replace_whitespace=True,
+ fix_sentence_endings=False,
+ break_long_words=True):
+ self.width = width
+ self.initial_indent = initial_indent
+ self.subsequent_indent = subsequent_indent
+ self.expand_tabs = expand_tabs
+ self.replace_whitespace = replace_whitespace
+ self.fix_sentence_endings = fix_sentence_endings
+ self.break_long_words = break_long_words
+
+
+ # -- Private methods -----------------------------------------------
+ # (possibly useful for subclasses to override)
+
+ def _munge_whitespace(self, text):
+ """_munge_whitespace(text : string) -> string
+
+ Munge whitespace in text: expand tabs and convert all other
+ whitespace characters to spaces. Eg. " foo\tbar\n\nbaz"
+ becomes " foo bar baz".
+ """
+ if self.expand_tabs:
+ text = string.expandtabs(text)
+ if self.replace_whitespace:
+ if type(text) == type(''):
+ text = string.translate(text, self.whitespace_trans)
+ elif isinstance(text, unicode):
+ text = string.translate(text, self.unicode_whitespace_trans)
+ return text
+
+
+ def _split(self, text):
+ """_split(text : string) -> [string]
+
+ Split the text to wrap into indivisible chunks. Chunks are
+ not quite the same as words; see wrap_chunks() for full
+ details. As an example, the text
+ Look, goof-ball -- use the -b option!
+ breaks into the following chunks:
+ 'Look,', ' ', 'goof-', 'ball', ' ', '--', ' ',
+ 'use', ' ', 'the', ' ', '-b', ' ', 'option!'
+ """
+ chunks = self.wordsep_re.split(text)
+ chunks = filter(None, chunks)
+ return chunks
+
+ def _fix_sentence_endings(self, chunks):
+ """_fix_sentence_endings(chunks : [string])
+
+ Correct for sentence endings buried in 'chunks'. Eg. when the
+ original text contains "... foo.\nBar ...", munge_whitespace()
+ and split() will convert that to [..., "foo.", " ", "Bar", ...]
+ which has one too few spaces; this method simply changes the one
+ space to two.
+ """
+ i = 0
+ pat = self.sentence_end_re
+ while i < len(chunks)-1:
+ if chunks[i+1] == " " and pat.search(chunks[i]):
+ chunks[i+1] = " "
+ i = i + 2
+ else:
+ i = i + 1
+
+ def _handle_long_word(self, chunks, cur_line, cur_len, width):
+ """_handle_long_word(chunks : [string],
+ cur_line : [string],
+ cur_len : int, width : int)
+
+ Handle a chunk of text (most likely a word, not whitespace) that
+ is too long to fit in any line.
+ """
+ space_left = max(width - cur_len, 1)
+
+ # If we're allowed to break long words, then do so: put as much
+ # of the next chunk onto the current line as will fit.
+ if self.break_long_words:
+ cur_line.append(chunks[0][0:space_left])
+ chunks[0] = chunks[0][space_left:]
+
+ # Otherwise, we have to preserve the long word intact. Only add
+ # it to the current line if there's nothing already there --
+ # that minimizes how much we violate the width constraint.
+ elif not cur_line:
+ cur_line.append(chunks.pop(0))
+
+ # If we're not allowed to break long words, and there's already
+ # text on the current line, do nothing. Next time through the
+ # main loop of _wrap_chunks(), we'll wind up here again, but
+ # cur_len will be zero, so the next line will be entirely
+ # devoted to the long word that we can't handle right now.
+
+ def _wrap_chunks(self, chunks):
+ """_wrap_chunks(chunks : [string]) -> [string]
+
+ Wrap a sequence of text chunks and return a list of lines of
+ length 'self.width' or less. (If 'break_long_words' is false,
+ some lines may be longer than this.) Chunks correspond roughly
+ to words and the whitespace between them: each chunk is
+ indivisible (modulo 'break_long_words'), but a line break can
+ come between any two chunks. Chunks should not have internal
+ whitespace; ie. a chunk is either all whitespace or a "word".
+ Whitespace chunks will be removed from the beginning and end of
+ lines, but apart from that whitespace is preserved.
+ """
+ lines = []
+ if self.width <= 0:
+ raise ValueError("invalid width %r (must be > 0)" % self.width)
+
+ while chunks:
+
+ # Start the list of chunks that will make up the current line.
+ # cur_len is just the length of all the chunks in cur_line.
+ cur_line = []
+ cur_len = 0
+
+ # Figure out which static string will prefix this line.
+ if lines:
+ indent = self.subsequent_indent
+ else:
+ indent = self.initial_indent
+
+ # Maximum width for this line.
+ width = self.width - len(indent)
+
+ # First chunk on line is whitespace -- drop it, unless this
+ # is the very beginning of the text (ie. no lines started yet).
+ if string.strip(chunks[0]) == '' and lines:
+ del chunks[0]
+
+ while chunks:
+ l = len(chunks[0])
+
+ # Can at least squeeze this chunk onto the current line.
+ if cur_len + l <= width:
+ cur_line.append(chunks.pop(0))
+ cur_len = cur_len + l
+
+ # Nope, this line is full.
+ else:
+ break
+
+ # The current line is full, and the next chunk is too big to
+ # fit on *any* line (not just this one).
+ if chunks and len(chunks[0]) > width:
+ self._handle_long_word(chunks, cur_line, cur_len, width)
+
+ # If the last chunk on this line is all whitespace, drop it.
+ if cur_line and string.strip(cur_line[-1]) == '':
+ del cur_line[-1]
+
+ # Convert current line back to a string and store it in list
+ # of all lines (return value).
+ if cur_line:
+ lines.append(indent + string.join(cur_line, ''))
+
+ return lines
+
+
+ # -- Public interface ----------------------------------------------
+
+ def wrap(self, text):
+ """wrap(text : string) -> [string]
+
+ Reformat the single paragraph in 'text' so it fits in lines of
+ no more than 'self.width' columns, and return a list of wrapped
+ lines. Tabs in 'text' are expanded with string.expandtabs(),
+ and all other whitespace characters (including newline) are
+ converted to space.
+ """
+ text = self._munge_whitespace(text)
+ indent = self.initial_indent
+ chunks = self._split(text)
+ if self.fix_sentence_endings:
+ self._fix_sentence_endings(chunks)
+ return self._wrap_chunks(chunks)
+
+ def fill(self, text):
+ """fill(text : string) -> string
+
+ Reformat the single paragraph in 'text' to fit in lines of no
+ more than 'self.width' columns, and return a new string
+ containing the entire wrapped paragraph.
+ """
+ return string.join(self.wrap(text), "\n")
+
+
+# -- Convenience interface ---------------------------------------------
+
+def wrap(text, width=70, **kwargs):
+ """Wrap a single paragraph of text, returning a list of wrapped lines.
+
+ Reformat the single paragraph in 'text' so it fits in lines of no
+ more than 'width' columns, and return a list of wrapped lines. By
+ default, tabs in 'text' are expanded with string.expandtabs(), and
+ all other whitespace characters (including newline) are converted to
+ space. See TextWrapper class for available keyword args to customize
+ wrapping behaviour.
+ """
+ kw = kwargs.copy()
+ kw['width'] = width
+ w = apply(TextWrapper, (), kw)
+ return w.wrap(text)
+
+def fill(text, width=70, **kwargs):
+ """Fill a single paragraph of text, returning a new string.
+
+ Reformat the single paragraph in 'text' to fit in lines of no more
+ than 'width' columns, and return a new string containing the entire
+ wrapped paragraph. As with wrap(), tabs are expanded and other
+ whitespace characters converted to space. See TextWrapper class for
+ available keyword args to customize wrapping behaviour.
+ """
+ kw = kwargs.copy()
+ kw['width'] = width
+ w = apply(TextWrapper, (), kw)
+ return w.fill(text)
+
+
+# -- Loosely related functionality -------------------------------------
+
+def dedent(text):
+ """dedent(text : string) -> string
+
+ Remove any whitespace than can be uniformly removed from the left
+ of every line in `text`.
+
+ This can be used e.g. to make triple-quoted strings line up with
+ the left edge of screen/whatever, while still presenting it in the
+ source code in indented form.
+
+ For example:
+
+ def test():
+ # end first line with \ to avoid the empty line!
+ s = '''\
+ hello
+ world
+ '''
+ print repr(s) # prints ' hello\n world\n '
+ print repr(dedent(s)) # prints 'hello\n world\n'
+ """
+ lines = text.expandtabs().split('\n')
+ margin = None
+ for line in lines:
+ content = line.lstrip()
+ if not content:
+ continue
+ indent = len(line) - len(content)
+ if margin is None:
+ margin = indent
+ else:
+ margin = min(margin, indent)
+
+ if margin is not None and margin > 0:
+ for i in range(len(lines)):
+ lines[i] = lines[i][margin:]
+
+ return string.join(lines, '\n')
+
+# Local Variables:
+# tab-width:4
+# indent-tabs-mode:nil
+# End:
+# vim: set expandtab tabstop=4 shiftwidth=4:
diff --git a/src/engine/SCons/compat/builtins.py b/src/engine/SCons/compat/builtins.py
new file mode 100644
index 0000000..a79b2ad
--- /dev/null
+++ b/src/engine/SCons/compat/builtins.py
@@ -0,0 +1,187 @@
+#
+# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
+#
+# Permission is hereby granted, free of charge, to any person obtaining
+# a copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sublicense, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
+# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
+# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+#
+
+# Portions of the following are derived from the compat.py file in
+# Twisted, under the following copyright:
+#
+# Copyright (c) 2001-2004 Twisted Matrix Laboratories
+
+__doc__ = """
+Compatibility idioms for __builtin__ names
+
+This module adds names to the __builtin__ module for things that we want
+to use in SCons but which don't show up until later Python versions than
+the earliest ones we support.
+
+This module checks for the following __builtin__ names:
+
+ all()
+ any()
+ bool()
+ dict()
+ True
+ False
+ zip()
+
+Implementations of functions are *NOT* guaranteed to be fully compliant
+with these functions in later versions of Python. We are only concerned
+with adding functionality that we actually use in SCons, so be wary
+if you lift this code for other uses. (That said, making these more
+nearly the same as later, official versions is still a desirable goal,
+we just don't need to be obsessive about it.)
+
+If you're looking at this with pydoc and various names don't show up in
+the FUNCTIONS or DATA output, that means those names are already built in
+to this version of Python and we don't need to add them from this module.
+"""
+
+__revision__ = "src/engine/SCons/compat/builtins.py 4577 2009/12/27 19:44:43 scons"
+
+import __builtin__
+
+try:
+ all
+except NameError:
+ # Pre-2.5 Python has no all() function.
+ def all(iterable):
+ """
+ Returns True if all elements of the iterable are true.
+ """
+ for element in iterable:
+ if not element:
+ return False
+ return True
+ __builtin__.all = all
+ all = all
+
+try:
+ any
+except NameError:
+ # Pre-2.5 Python has no any() function.
+ def any(iterable):
+ """
+ Returns True if any element of the iterable is true.
+ """
+ for element in iterable:
+ if element:
+ return True
+ return False
+ __builtin__.any = any
+ any = any
+
+try:
+ bool
+except NameError:
+ # Pre-2.2 Python has no bool() function.
+ def bool(value):
+ """Demote a value to 0 or 1, depending on its truth value.
+
+ This is not to be confused with types.BooleanType, which is
+ way too hard to duplicate in early Python versions to be
+ worth the trouble.
+ """
+ return not not value
+ __builtin__.bool = bool
+ bool = bool
+
+try:
+ dict
+except NameError:
+ # Pre-2.2 Python has no dict() keyword.
+ def dict(seq=[], **kwargs):
+ """
+ New dictionary initialization.
+ """
+ d = {}
+ for k, v in seq:
+ d[k] = v
+ d.update(kwargs)
+ return d
+ __builtin__.dict = dict
+
+try:
+ False
+except NameError:
+ # Pre-2.2 Python has no False keyword.
+ __builtin__.False = not 1
+ # Assign to False in this module namespace so it shows up in pydoc output.
+ False = False
+
+try:
+ True
+except NameError:
+ # Pre-2.2 Python has no True keyword.
+ __builtin__.True = not 0
+ # Assign to True in this module namespace so it shows up in pydoc output.
+ True = True
+
+try:
+ file
+except NameError:
+ # Pre-2.2 Python has no file() function.
+ __builtin__.file = open
+
+#
+try:
+ zip
+except NameError:
+ # Pre-2.2 Python has no zip() function.
+ def zip(*lists):
+ """
+ Emulates the behavior we need from the built-in zip() function
+ added in Python 2.2.
+
+ Returns a list of tuples, where each tuple contains the i-th
+ element rom each of the argument sequences. The returned
+ list is truncated in length to the length of the shortest
+ argument sequence.
+ """
+ result = []
+ for i in xrange(min(map(len, lists))):
+ result.append(tuple(map(lambda l, i=i: l[i], lists)))
+ return result
+ __builtin__.zip = zip
+
+
+
+#if sys.version_info[:3] in ((2, 2, 0), (2, 2, 1)):
+# def lstrip(s, c=string.whitespace):
+# while s and s[0] in c:
+# s = s[1:]
+# return s
+# def rstrip(s, c=string.whitespace):
+# while s and s[-1] in c:
+# s = s[:-1]
+# return s
+# def strip(s, c=string.whitespace, l=lstrip, r=rstrip):
+# return l(r(s, c), c)
+#
+# object.__setattr__(str, 'lstrip', lstrip)
+# object.__setattr__(str, 'rstrip', rstrip)
+# object.__setattr__(str, 'strip', strip)
+
+# Local Variables:
+# tab-width:4
+# indent-tabs-mode:nil
+# End:
+# vim: set expandtab tabstop=4 shiftwidth=4: