Pārlūkot izejas kodu

Use local copy of ctypesgen

git-svn-id: https://svn.osgeo.org/grass/grass/trunk@42345 15284696-431f-4ddb-bdfa-cd5b030d7da7
Glynn Clements 15 gadi atpakaļ
vecāks
revīzija
1ce8b87936
36 mainītis faili ar 8718 papildinājumiem un 60 dzēšanām
  1. 6 48
      configure
  2. 0 9
      configure.in
  3. 0 1
      include/Make/Platform.make.in
  4. 1 1
      lib/Makefile
  5. 1 1
      lib/python/ctypes/Makefile
  6. 150 0
      lib/python/ctypes/ctypesgen.py
  7. 26 0
      lib/python/ctypes/ctypesgencore/LICENSE
  8. 62 0
      lib/python/ctypes/ctypesgencore/__init__.py
  9. 314 0
      lib/python/ctypes/ctypesgencore/ctypedescs.py
  10. 189 0
      lib/python/ctypes/ctypesgencore/descriptions.py
  11. 305 0
      lib/python/ctypes/ctypesgencore/expressions.py
  12. 248 0
      lib/python/ctypes/ctypesgencore/libraryloader.py
  13. 34 0
      lib/python/ctypes/ctypesgencore/messages.py
  14. 312 0
      lib/python/ctypes/ctypesgencore/old libraryloader.py
  15. 37 0
      lib/python/ctypes/ctypesgencore/options.py
  16. 24 0
      lib/python/ctypes/ctypesgencore/parser/__init__.py
  17. 174 0
      lib/python/ctypes/ctypesgencore/parser/cdeclarations.py
  18. 1093 0
      lib/python/ctypes/ctypesgencore/parser/cgrammar.py
  19. 208 0
      lib/python/ctypes/ctypesgencore/parser/cparser.py
  20. 198 0
      lib/python/ctypes/ctypesgencore/parser/ctypesparser.py
  21. 326 0
      lib/python/ctypes/ctypesgencore/parser/datacollectingparser.py
  22. 879 0
      lib/python/ctypes/ctypesgencore/parser/lex.py
  23. 8 0
      lib/python/ctypes/ctypesgencore/parser/lextab.py
  24. 282 0
      lib/python/ctypes/ctypesgencore/parser/parsetab.py
  25. 287 0
      lib/python/ctypes/ctypesgencore/parser/pplexer.py
  26. 197 0
      lib/python/ctypes/ctypesgencore/parser/preprocessor.py
  27. 2261 0
      lib/python/ctypes/ctypesgencore/parser/yacc.py
  28. 10 0
      lib/python/ctypes/ctypesgencore/printer/__init__.py
  29. 9 0
      lib/python/ctypes/ctypesgencore/printer/defaultheader.py
  30. 290 0
      lib/python/ctypes/ctypesgencore/printer/preamble.py
  31. 298 0
      lib/python/ctypes/ctypesgencore/printer/printer.py
  32. 6 0
      lib/python/ctypes/ctypesgencore/printer/test.py
  33. 12 0
      lib/python/ctypes/ctypesgencore/processor/__init__.py
  34. 137 0
      lib/python/ctypes/ctypesgencore/processor/dependencies.py
  35. 200 0
      lib/python/ctypes/ctypesgencore/processor/operations.py
  36. 134 0
      lib/python/ctypes/ctypesgencore/processor/pipeline.py

+ 6 - 48
configure

@@ -14205,46 +14205,6 @@ fi
   #   AC_MSG_ERROR([*** couldn't find swig])
   # fi
 
-  # look for the ctypesgen.py script
-  # Extract the first word of "ctypesgen.py", so it can be a program name with args.
-set dummy ctypesgen.py; ac_word=$2
-echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
-echo "configure:14213: checking for $ac_word" >&5
-
-case "$CTYPESGEN" in
-  /*)
-  ac_cv_path_CTYPESGEN="$CTYPESGEN" # Let the user override the test with a path.
-  ;;
-  ?:/*)			 
-  ac_cv_path_CTYPESGEN="$CTYPESGEN" # Let the user override the test with a dos path.
-  ;;
-  *)
-  IFS="${IFS= 	}"; ac_save_ifs="$IFS"; IFS=":"
-  ac_dummy="$PATH"
-  for ac_dir in $ac_dummy; do 
-    test -z "$ac_dir" && ac_dir=.
-    if test -f $ac_dir/$ac_word; then
-      ac_cv_path_CTYPESGEN="$ac_dir/$ac_word"
-      break
-    fi
-  done
-  IFS="$ac_save_ifs"
-  test -z "$ac_cv_path_CTYPESGEN" && ac_cv_path_CTYPESGEN="no"
-  ;;
-esac
-CTYPESGEN="$ac_cv_path_CTYPESGEN"
-if test -n "$CTYPESGEN"; then
-  echo "$ac_t""$CTYPESGEN" 1>&6
-else
-  echo "$ac_t""no" 1>&6
-fi
-
-
-  if test "$CTYPESGEN" = "no" ; then
-    echo "configure: warning: *** couldn't find ctypesgen" 1>&2
-    CTYPESGEN=
-  fi
-
   # check for available OSX archs in Python, assume framework
   if test -n "$MACOSX_ARCHS" ; then
     pylib=`"$PY_CONFIG" --prefix`/Python
@@ -14269,11 +14229,10 @@ fi # Done checking Python
 
 
 
-
 # Enable wxWidgets support (for wxGUI)
 
 echo $ac_n "checking whether to use wxWidgets""... $ac_c" 1>&6
-echo "configure:14277: checking whether to use wxWidgets" >&5
+echo "configure:14236: checking whether to use wxWidgets" >&5
 
 WXVERSION=
 WXWIDGETSCXXFLAGS= 
@@ -14294,7 +14253,7 @@ else
   # Extract the first word of "wx-config", so it can be a program name with args.
 set dummy wx-config; ac_word=$2
 echo $ac_n "checking for $ac_word""... $ac_c" 1>&6
-echo "configure:14298: checking for $ac_word" >&5
+echo "configure:14257: checking for $ac_word" >&5
 
 case "$WX_CONFIG" in
   /*)
@@ -14342,7 +14301,7 @@ fi
  REQWX="2.8.1"
 
  echo $ac_n "checking wxWidgets version""... $ac_c" 1>&6
-echo "configure:14346: checking wxWidgets version" >&5
+echo "configure:14305: checking wxWidgets version" >&5
  if WXVERSION=`"$WX_CONFIG" --version`; then
    echo "$ac_t""$WXVERSION" 1>&6
  else
@@ -14365,15 +14324,15 @@ for ac_hdr in wx/wxprec.h
 do
 ac_safe=`echo "$ac_hdr" | sed 'y%./+-%__p_%'`
 echo $ac_n "checking for $ac_hdr""... $ac_c" 1>&6
-echo "configure:14369: checking for $ac_hdr" >&5
+echo "configure:14328: checking for $ac_hdr" >&5
 
 cat > conftest.$ac_ext <<EOF
-#line 14372 "configure"
+#line 14331 "configure"
 #include "confdefs.h"
 #include <$ac_hdr>
 EOF
 ac_try="$ac_cpp conftest.$ac_ext >/dev/null 2>conftest.out"
-{ (eval echo configure:14377: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }
+{ (eval echo configure:14336: \"$ac_try\") 1>&5; (eval $ac_try) 2>&5; }
 ac_err=`grep -v '^ *+' conftest.out | grep -v "^conftest.${ac_ext}\$"`
 if test -z "$ac_err"; then
   rm -rf conftest*
@@ -14704,7 +14663,6 @@ s%@USE_PTHREAD@%$USE_PTHREAD%g
 s%@USE_LARGEFILES@%$USE_LARGEFILES%g
 s%@PY_CONFIG@%$PY_CONFIG%g
 s%@SWIG@%$SWIG%g
-s%@CTYPESGEN@%$CTYPESGEN%g
 s%@PYTHONINC@%$PYTHONINC%g
 s%@PYTHONCFLAGS@%$PYTHONCFLAGS%g
 s%@PYTHONLDFLAGS@%$PYTHONLDFLAGS%g

+ 0 - 9
configure.in

@@ -1789,14 +1789,6 @@ else
   #   AC_MSG_ERROR([*** couldn't find swig])
   # fi
 
-  # look for the ctypesgen.py script
-  AC_PATH_PROG(CTYPESGEN, ctypesgen.py, no)
-
-  if test "$CTYPESGEN" = "no" ; then
-    AC_MSG_WARN([*** couldn't find ctypesgen])
-    CTYPESGEN=
-  fi
-
   # check for available OSX archs in Python, assume framework
   if test -n "$MACOSX_ARCHS" ; then
     pylib=`"$PY_CONFIG" --prefix`/Python
@@ -1818,7 +1810,6 @@ AC_SUBST(PYTHONINC)
 AC_SUBST(PYTHONCFLAGS)
 AC_SUBST(PYTHONLDFLAGS)
 AC_SUBST(SWIG)
-AC_SUBST(CTYPESGEN)
 AC_SUBST(USE_PYTHON)
 AC_SUBST(MACOSX_ARCHS_PYTHON)
 

+ 0 - 1
include/Make/Platform.make.in

@@ -220,7 +220,6 @@ PYTHONINC           = @PYTHONINC@
 PYTHONCFLAGS        = @PYTHONCFLAGS@
 PYTHONLDFLAGS       = @PYTHONLDFLAGS@
 SWIG                = @SWIG@
-CTYPESGEN           = @CTYPESGEN@
 USE_PYTHON          = @USE_PYTHON@
 MACOSX_ARCHS_PYTHON = @MACOSX_ARCHS_PYTHON@
 

+ 1 - 1
lib/Makefile

@@ -40,9 +40,9 @@ SUBDIRS = \
 	cdhc \
 	stats \
 	arraystats \
-	python \
 	ogsf \
 	nviz \
+	python \
 	iostream \
 	manage
 

+ 1 - 1
lib/python/ctypes/Makefile

@@ -49,7 +49,7 @@ ogsf_INC	= ogsf_proto.h gstypes.h gsurf.h kftypes.h keyframe.h
 nviz_INC        = nviz.h
 
 SED = sed
-
+CTYPESGEN = ./ctypesgen.py
 CTYPESFLAGS = $(INC)
 EXTRA_CLEAN_FILES := $(foreach M,$(MODULES),$(M).py)
 

+ 150 - 0
lib/python/ctypes/ctypesgen.py

@@ -0,0 +1,150 @@
+#!/usr/bin/env python
+
+def find_names_in_modules(modules):
+    names = set()
+    for module in modules:
+        try:
+            mod = __import__(module)
+        except:
+            pass
+        else:
+            names.union(dir(module))
+    return names
+
+import optparse, sys
+
+def option_callback_W(option, opt, value, parser):
+    # Options preceded by a "-Wl," are simply treated as though the "-Wl,"
+    # is not there? I don't understand the purpose of this code...
+    if len(value) < 4 or value[0:3] != 'l,-':
+        raise optparse.BadOptionError("not in '-Wl,<opt>' form: %s%s"
+                                      % (opt, value))
+    opt = value[2:]
+    if opt not in ['-L', '-R', '--rpath']:
+        raise optparse.BadOptionError("-Wl option must be -L, -R"
+                                      " or --rpath, not " + value[2:])
+    # Push the linker option onto the list for further parsing.
+    parser.rargs.insert(0, value)
+
+def option_callback_libdir(option, opt, value, parser):
+    # There are two sets of linker search paths: those for use at compile time
+    # and those for use at runtime. Search paths specified with -L, -R, or
+    # --rpath are added to both sets.
+    parser.values.compile_libdirs.append(value)
+    parser.values.runtime_libdirs.append(value)
+
+import ctypesgencore
+import ctypesgencore.messages as msgs
+
+if __name__=="__main__":
+    usage = 'usage: %prog [options] /path/to/header.h ...'
+    op = optparse.OptionParser(usage=usage)
+    
+    # Parameters
+    op.add_option('-o', '--output', dest='output', metavar='FILE',
+        help='write wrapper to FILE')
+    op.add_option('-l', '--library', dest='libraries', action='append',
+        default=[], metavar='LIBRARY', help='link to LIBRARY')
+    op.add_option('', '--include', dest='other_headers', action='append',
+        default=[], metavar='HEADER',
+        help='include system header HEADER (e.g. stdio.h or stdlib.h)')
+    op.add_option('-m', '--module', '--link-module', action='append',
+        dest='modules', metavar='MODULE', default=[],
+        help='use symbols from Python module MODULE')
+    op.add_option('-I', '--includedir', dest='include_search_paths',
+        action='append', default=[], metavar='INCLUDEDIR',
+        help='add INCLUDEDIR as a directory to search for headers')
+    op.add_option('-W', action="callback", callback=option_callback_W,
+        metavar="l,OPTION", type="str",
+        help="where OPTION is -L, -R, or --rpath")
+    op.add_option("-L", "-R", "--rpath", "--libdir", action="callback",
+        callback=option_callback_libdir, metavar="LIBDIR", type="str",
+        help="Add LIBDIR to the search path (both compile-time and run-time)")
+    op.add_option('', "--compile-libdir", action="append",
+        dest="compile_libdirs", metavar="LIBDIR", default=[],
+        help="Add LIBDIR to the compile-time library search path.")
+    op.add_option('', "--runtime-libdir", action="append",
+        dest="runtime_libdirs", metavar="LIBDIR", default=[],
+        help="Add LIBDIR to the run-time library search path.")
+    
+    # Parser options
+    op.add_option('', '--cpp', dest='cpp', default='gcc -E',
+        help='The command to invoke the c preprocessor, including any ' \
+             'necessary options (default: gcc -E)')
+    op.add_option('', '--save-preprocessed-headers', metavar='FILENAME',
+        dest='save_preprocessed_headers', default=None,
+        help='Save the preprocessed headers to the specified FILENAME')
+    
+    # Processor options
+    op.add_option('-a', '--all-headers', action='store_true',
+        dest='all_headers', default=False,
+        help='include symbols from all headers, including system headers')
+    op.add_option('', '--builtin-symbols', action='store_true',
+        dest='builtin_symbols', default=False,
+        help='include symbols automatically generated by the preprocessor')
+    op.add_option('', '--no-macros', action='store_false', dest='include_macros',
+        default=True, help="Don't output macros.")
+    op.add_option('-i', '--include-symbols', dest='include_symbols',
+        default=None, help='regular expression for symbols to always include')
+    op.add_option('-x', '--exclude-symbols', dest='exclude_symbols',
+        default=None, help='regular expression for symbols to exclude')
+    
+    # Printer options
+    op.add_option('', '--header-template', dest='header_template', default=None,
+        metavar='TEMPLATE',
+        help='Use TEMPLATE as the header template in the output file.')
+    op.add_option('', '--strip-build-path', dest='strip_build_path',
+        default=None, metavar='BUILD_PATH',
+        help='Strip build path from header paths in the wrapper file.')
+    op.add_option('', '--insert-file', dest='inserted_files', default=[],
+        action='append', metavar='FILENAME',
+        help='Add the contents of FILENAME to the end of the wrapper file.')
+    
+    # Error options
+    op.add_option('', "--all-errors", action="store_true", default=False,
+        dest="show_all_errors", help="Display all warnings and errors even " \
+             "if they would not affect output.")
+    op.add_option('', "--show-long-errors", action="store_true", default=False,
+        dest="show_long_errors", help="Display long error messages " \
+            "instead of abbreviating error messages.")
+    op.add_option('', "--no-macro-warnings", action="store_false", default=True,
+        dest="show_macro_warnings", help="Do not print macro warnings.")
+
+    op.set_defaults(**ctypesgencore.options.default_values)
+    
+    (options, args) = op.parse_args(list(sys.argv[1:]))
+    options.headers = args
+
+    # Figure out what names will be defined by imported Python modules
+    options.other_known_names = find_names_in_modules(options.modules)
+    
+    # Required parameters
+    if len(args) < 1:
+        msgs.error_message('No header files specified', cls='usage')
+        sys.exit(1)
+
+    if options.output is None:
+        msgs.error_message('No output file specified', cls='usage')
+        sys.exit(1)
+
+    if len(options.libraries) == 0:
+        msgs.warning_message('No libraries specified', cls='usage')
+    
+    # Step 1: Parse
+    descriptions=ctypesgencore.parser.parse(options.headers,options)
+    
+    # Step 2: Process
+    ctypesgencore.processor.process(descriptions,options)
+    
+    # Step 3: Print
+    ctypesgencore.printer.WrapperPrinter(options.output,options,descriptions)
+    
+    msgs.status_message("Wrapping complete.")
+    
+    # Correct what may be a common mistake
+    if descriptions.all == []:
+        if not options.all_headers:
+            msgs.warning_message("There wasn't anything of use in the " \
+                "specified header file(s). Perhaps you meant to run with " \
+                "--all-headers to include objects from included sub-headers? ",
+                cls = 'usage')

+ 26 - 0
lib/python/ctypes/ctypesgencore/LICENSE

@@ -0,0 +1,26 @@
+Copyright (c) 2007-2008, Ctypesgen Developers
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice,
+   this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+   notice, this list of conditions and the following disclaimer in the
+   documentation and/or other materials provided with the distribution.
+3. Neither the name of the <ORGANIZATION> nor the names of its
+   contributors may be used to endorse or promote products derived from
+   this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.

+ 62 - 0
lib/python/ctypes/ctypesgencore/__init__.py

@@ -0,0 +1,62 @@
+#!/usr/bin/env python
+
+"""
+Ctypesgencore is the module that contains the main body of ctypesgen - in fact,
+it contains everything but the command-line interface.
+
+ctypesgen's job is divided into three steps:
+
+Step 1: Parse
+
+Ctypesgen reads the input header files and parses them. It generates a list of
+function, variable, struct, union, enum, constant, typedef, and macro
+descriptions from the input files. These descriptions are encapsulated as
+ctypesgen.descriptions.Description objects.
+
+The package ctypesgen.parser is responsible for the parsing stage.
+
+Step 2: Process
+
+Ctypesgen processes the list of descriptions from the parsing stage. This is
+the stage where ctypesgen resolves name conflicts and filters descriptions using
+the regexes specified on the command line. Other processing steps take place
+at this stage, too. When processing is done, ctypesgen finalizes which
+descriptions will be included in the output file.
+
+The package ctypesgen.processor is responsible for the processing stage.
+
+Step 3: Print
+
+Ctypesgen writes the descriptions to the output file, along with a header.
+
+The package ctypesgen.printer is responsible for the printing stage.
+
+There are three modules in ctypesgencore that describe the format that the
+parser, processor, and printer modules use to pass information. They are:
+
+* descriptions: Classes to represent the descriptions.
+
+* ctypedecls: Classes to represent C types.
+
+* expressions: Classes to represent an expression in a language-independent
+format.
+"""
+
+
+__all__ = ["parser","processor","printer",
+           "descriptions","ctypedescs","expressions",
+           "messages","options"]
+
+# Workhorse modules
+import parser
+import processor
+import printer
+
+# Modules describing internal format
+import descriptions
+import ctypedescs
+import expressions
+
+# Helper modules
+import messages
+import options

+ 314 - 0
lib/python/ctypes/ctypesgencore/ctypedescs.py

@@ -0,0 +1,314 @@
+#!/usr/bin/env python
+
+'''
+ctypesgencore.ctypedescs contains classes to represent a C type. All of them
+classes are subclasses of CtypesType.
+
+Unlike in previous versions of ctypesgen, CtypesType and its subclasses are
+completely independent of the parser module.
+
+The most important method of CtypesType and its subclasses is the py_string
+method. str(ctype) returns a string which, when evaluated in the wrapper
+at runtime, results in a ctypes type object.
+
+For example, a CtypesType
+representing an array of four integers could be created using:
+
+>>> ctype = CtypesArray(CtypesSimple("int",True,0),4)
+
+str(ctype) would evaluate to "c_int * 4".
+'''
+
+import warnings
+
+__docformat__ = 'restructuredtext'
+
+ctypes_type_map = {
+   # typename   signed  longs
+    ('void',    True,   0): 'None',
+    ('int',     True,   0): 'c_int',
+    ('int',     False,  0): 'c_uint',
+    ('int',     True,   1): 'c_long',
+    ('int',     False,  1): 'c_ulong',
+    ('int',     True,   2): 'c_longlong',
+    ('int',     False,  2): 'c_ulonglong',
+    ('char',    True,   0): 'c_char',
+    ('char',    False,  0): 'c_ubyte',
+    ('short',   True,   0): 'c_short',
+    ('short',   False,  0): 'c_ushort',
+    ('float',   True,   0): 'c_float',
+    ('double',  True,   0): 'c_double',
+    ('size_t',  True,   0): 'c_size_t',
+    ('int8_t',  True,   0): 'c_int8',
+    ('int16_t', True,   0): 'c_int16',
+    ('int32_t', True,   0): 'c_int32',
+    ('int64_t', True,   0): 'c_int64',
+    ('apr_int64_t',True,0): 'c_int64',
+    ('off64_t', True,   0): 'c_int64',
+    ('uint8_t', True,   0): 'c_uint8',
+    ('uint16_t',True,   0): 'c_uint16',
+    ('uint32_t',True,   0): 'c_uint32',
+    ('uint64_t',True,   0): 'c_uint64',
+    ('apr_uint64_t',True,0): 'c_uint64',
+    ('wchar_t', True,   0): 'c_wchar',
+    ('ptrdiff_t',True,  0): 'c_ptrdiff_t',  # Requires definition in preamble
+    ('ssize_t', True,   0): 'c_ptrdiff_t',  # Requires definition in preamble
+    ('va_list', True,   0): 'c_void_p',
+}
+
+# This protocol is used for walking type trees.
+class CtypesTypeVisitor(object):
+    def visit_struct(self, struct):
+        pass
+
+    def visit_enum(self, enum):
+        pass
+    
+    def visit_typedef(self, name):
+        pass
+    
+    def visit_error(self, error, cls):
+        pass
+    
+    def visit_identifier(self, identifier):
+        # This one comes from inside ExpressionNodes. There may be
+        # ExpressionNode objects in array count expressions.
+        pass
+
+def visit_type_and_collect_info(ctype):
+    class Visitor(CtypesTypeVisitor):
+        def visit_struct(self,struct):
+            structs.append(struct)
+        def visit_enum(self,enum):
+            enums.append(enum)
+        def visit_typedef(self,typedef):
+            typedefs.append(typedef)
+        def visit_error(self,error,cls):
+            errors.append((error,cls))
+        def visit_identifier(self,identifier):
+            identifiers.append(identifier)
+    structs = []
+    enums = []
+    typedefs = []
+    errors = []
+    identifiers = []
+    v = Visitor()
+    ctype.visit(v)
+    return structs,enums,typedefs,errors,identifiers
+
+# Remove one level of indirection from funtion pointer; needed for typedefs
+# and function parameters.
+def remove_function_pointer(t):
+    if type(t) == CtypesPointer and type(t.destination) == CtypesFunction:
+        return t.destination
+    elif type(t) == CtypesPointer:
+        t.destination = remove_function_pointer(t.destination)
+        return t
+    else:
+        return t
+
+class CtypesType(object):
+    def __init__(self):
+        self.errors=[]
+    
+    def __repr__(self):
+        return "<Ctype \"%s\">" % self.py_string()
+    
+    def error(self,message,cls=None):
+        self.errors.append((message,cls))
+    
+    def visit(self,visitor):
+        for error,cls in self.errors:
+            visitor.visit_error(error,cls)
+
+class CtypesSimple(CtypesType):
+    """Represents a builtin type, like "char" or "int"."""
+    def __init__(self, name, signed, longs):
+        CtypesType.__init__(self)
+        self.name = name
+        self.signed = signed
+        self.longs = longs
+
+    def py_string(self):
+        return ctypes_type_map[(self.name,self.signed,self.longs)]
+
+class CtypesSpecial(CtypesType):
+    def __init__(self,name):
+        CtypesType.__init__(self)
+        self.name = name
+    
+    def py_string(self):
+        return self.name
+
+class CtypesTypedef(CtypesType):
+    """Represents a type defined by a typedef."""
+    def __init__(self, name):
+        CtypesType.__init__(self)
+        self.name = name
+    
+    def visit(self,visitor):
+        if not self.errors:
+            visitor.visit_typedef(self.name)
+        CtypesType.visit(self,visitor)
+    
+    def py_string(self):
+        return self.name
+
+class CtypesBitfield(CtypesType):
+    def __init__(self, base, bitfield):
+        CtypesType.__init__(self)
+        self.base = base
+        self.bitfield = bitfield
+    
+    def visit(self,visitor):
+        self.base.visit(visitor)
+        CtypesType.visit(self,visitor)
+    
+    def py_string(self):
+        return self.base.py_string()
+
+class CtypesPointer(CtypesType):
+    def __init__(self, destination, qualifiers):
+        CtypesType.__init__(self)
+        self.destination = destination
+        self.qualifiers = qualifiers
+    
+    def visit(self,visitor):
+        if self.destination:
+            self.destination.visit(visitor)
+        CtypesType.visit(self,visitor)
+
+    def py_string(self):
+        return 'POINTER(%s)' % self.destination.py_string()
+
+class CtypesArray(CtypesType):
+    def __init__(self, base, count):
+        CtypesType.__init__(self)
+        self.base = base
+        self.count = count
+    
+    def visit(self,visitor):
+        self.base.visit(visitor)
+        if self.count:
+            self.count.visit(visitor)
+        CtypesType.visit(self,visitor)
+    
+    def py_string(self):
+        if self.count is None:
+            return 'POINTER(%s)' % self.base.py_string()
+        if type(self.base) == CtypesArray:
+            return '(%s) * %s' % (self.base.py_string(),
+                                  self.count.py_string(False))
+        else:
+            return '%s * %s' % (self.base.py_string(),
+                                self.count.py_string(False))
+
+class CtypesFunction(CtypesType):
+    def __init__(self, restype, parameters, variadic=False):
+        CtypesType.__init__(self)
+        self.restype = restype
+
+        # Don't allow POINTER(None) (c_void_p) as a restype... causes errors
+        # when ctypes automagically returns it as an int.
+        # Instead, convert to POINTER(c_void).  c_void is not a ctypes type,
+        # you can make it any arbitrary type.
+        if type(self.restype) == CtypesPointer and \
+           type(self.restype.destination) == CtypesSimple and \
+           self.restype.destination.name == 'None':
+            self.restype = CtypesPointer(CtypesSpecial('c_void'), ())
+
+        # Return 'ReturnString' instead of simply 'String'
+        if self.restype.py_string() == 'POINTER(c_char)':
+            self.restype = CtypesSpecial('ReturnString')
+
+        self.argtypes = [remove_function_pointer(p) for p in parameters]
+        self.variadic = variadic
+    
+    def visit(self,visitor):
+        self.restype.visit(visitor)
+        for a in self.argtypes:
+            a.visit(visitor)
+        CtypesType.visit(self,visitor)
+
+    def py_string(self):
+        return 'CFUNCTYPE(UNCHECKED(%s), %s)' % (self.restype.py_string(),
+            ', '.join([a.py_string() for a in self.argtypes]))
+
+last_tagnum = 0
+def anonymous_struct_tag():
+    global last_tagnum
+    last_tagnum += 1
+    return 'anon_%d' % last_tagnum
+
+class CtypesStruct(CtypesType):
+    def __init__(self, tag, variety, members, src=None):
+        CtypesType.__init__(self)
+        self.tag = tag
+        self.variety = variety # "struct" or "union"
+        self.members = members
+        
+        if not self.tag:
+            self.tag = anonymous_struct_tag()
+            self.anonymous = True
+        else:
+            self.anonymous = False
+        
+        if self.members==None:
+            self.opaque = True
+        else:
+            self.opaque = False
+        
+        self.src = src        
+    
+    def get_required_types(self):
+        types = CtypesType.get_required_types(self)
+        types.add((self.variety,self.tag))
+        return types
+    
+    def visit(self,visitor):
+        visitor.visit_struct(self)
+        if not self.opaque:
+            for name,ctype in self.members:
+                ctype.visit(visitor)
+        CtypesType.visit(self,visitor)
+    
+    def get_subtypes(self):
+        if self.opaque:
+            return set()
+        else:
+            return set([m[1] for m in self.members])
+
+    def py_string(self):
+        return "%s_%s" % (self.variety,self.tag)
+
+last_tagnum = 0
+def anonymous_enum_tag():
+    global last_tagnum
+    last_tagnum += 1
+    return 'anon_%d' % last_tagnum
+
+class CtypesEnum(CtypesType):
+    def __init__(self, tag, enumerators, src=None):
+        CtypesType.__init__(self)
+        self.tag = tag
+        self.enumerators = enumerators
+        
+        if not self.tag:
+            self.tag = anonymous_enum_tag()
+            self.anonymous = True
+        else:
+            self.anonymous = False
+        
+        if self.enumerators==None:
+            self.opaque = True
+        else:
+            self.opaque = False
+        
+        self.src = src
+        
+    def visit(self,visitor):
+        visitor.visit_enum(self)
+        CtypesType.visit(self,visitor)
+
+    def py_string(self):
+        return 'enum_%s' % self.tag

+ 189 - 0
lib/python/ctypes/ctypesgencore/descriptions.py

@@ -0,0 +1,189 @@
+#!/usr/bin/env python
+
+"""
+ctypesgencore.descriptions contains classes to represent a description of a
+struct, union, enum, function, constant, variable, or macro. All the
+description classes are subclassed from an abstract base class, Description.
+The descriptions module also contains a class, DescriptionCollection, to hold
+lists of Description objects.
+"""
+
+class DescriptionCollection(object):
+    """Represents a collection of Descriptions."""
+    def __init__(self,constants,typedefs,structs,enums,functions,variables,
+                 macros,all,output_order):
+        self.constants=constants
+        self.typedefs=typedefs
+        self.structs=structs
+        self.enums=enums
+        self.functions=functions
+        self.variables=variables
+        self.macros=macros
+        self.all=all
+        self.output_order=output_order
+
+class Description(object):
+    """Represents a constant, typedef, struct, function, variable, enum,
+    or macro description. Description is an abstract base class."""
+    def __init__(self,src=None):
+        self.src=src # A tuple of (filename, lineno)
+        
+        # If object will be included in output file. Values are "yes", "never",
+        # and "if_needed".
+        self.include_rule="yes" 
+        
+        # A word about requirements, and dependents:
+        # If X requires Y, Y is in X.requirements.
+        # If X is in Y.requirements, then Y is in X.dependents.
+        self.requirements=set()
+        self.dependents=set()
+        
+        # If the processor module finds a fatal error that prevents a
+        # a description from being output, then it appends a string describing
+        # the problem to 'errors'. If it finds a nonfatal error, it appends a
+        # string to 'warnings'. If the description would have been output, then
+        # the errors and warnings are printed.
+        
+        # If there is anything in 'errors' after processing is complete, the
+        # description is not output.
+        
+        self.errors=[] 
+        self.warnings=[]
+    
+    def add_requirements(self,reqs):
+        self.requirements = self.requirements.union(reqs)
+        for req in reqs:
+            req.dependents.add(self)
+    
+    def error(self,msg,cls = None):
+        self.errors.append((msg,cls))
+    def warning(self,msg,cls = None):
+        self.warnings.append((msg,cls))
+    
+    def __repr__(self):
+        return "<Description: %s>" % self.casual_name()
+    
+    def casual_name(self):
+        """Return a name to show the user."""
+    def py_name(self):
+        """Return the name associated with this description in Python code."""
+    def c_name(self):
+        """Return the name associated with this description in C code."""
+
+class ConstantDescription(Description):
+    """Simple class to contain information about a constant."""
+    def __init__(self,name,value,src=None):
+        Description.__init__(self,src)
+        # Name of constant, a string
+        self.name=name 
+        # Value of constant, as an ExpressionNode object
+        self.value=value 
+    def casual_name(self):
+        return "Constant \"%s\""%self.name
+    def py_name(self):
+        return self.name
+    def c_name(self):
+        return self.name
+
+class TypedefDescription(Description):
+    """Simple container class for a type definition."""
+    def __init__(self,name,ctype,src=None):
+        Description.__init__(self,src)
+        self.name=name # Name, a string
+        self.ctype=ctype # The base type as a ctypedescs.CtypeType object
+    def casual_name(self):
+        return "Typedef \"%s\""%self.name
+    def py_name(self):
+        return self.name
+    def c_name(self):
+        return self.name
+
+class StructDescription(Description):
+    """Simple container class for a structure or union definition."""
+    def __init__(self,tag,variety,members,opaque,ctype,src=None):
+        Description.__init__(self,src)
+        # The name of the structure minus the "struct" or "union"
+        self.tag=tag 
+        # A string "struct" or "union"
+        self.variety=variety 
+        # A list of pairs of (name,ctype)
+        self.members=members 
+        # True if struct body was not specified in header file
+        self.opaque=opaque 
+        # The original CtypeStruct that created the struct
+        self.ctype=ctype 
+    def casual_name(self):
+        return "%s \"%s\""%(self.variety.capitalize(),self.tag)
+    def py_name(self):
+        return "%s_%s"%(self.variety,self.tag)
+    def c_name(self):
+        return "%s %s"%(self.variety,self.tag)
+
+class EnumDescription(Description):
+    """Simple container class for an enum definition."""
+    def __init__(self,tag,members,ctype,src=None):
+        Description.__init__(self,src)
+        # The name of the enum, minus the "enum"
+        self.tag=tag 
+        # A list of (name,value) pairs where value is a number
+        self.members=members 
+        # The original CtypeEnum that created the enum
+        self.ctype=ctype 
+    def casual_name(self):
+        return "Enum \"%s\""%self.tag
+    def py_name(self):
+        return "enum_%s"%self.tag
+    def c_name(self):
+        return "enum %s"%self.tag
+
+class FunctionDescription(Description):
+    """Simple container class for a C function."""
+    def __init__(self,name,restype,argtypes,variadic=False,src=None):
+        Description.__init__(self,src)
+        # Name, a string
+        self.name=name 
+        # Name according to C - stored in case description is renamed
+        self.cname=name 
+        # A ctype representing return type
+        self.restype=restype 
+        # A list of ctypes representing the argument types
+        self.argtypes=argtypes 
+        # Does this function accept a variable number of arguments?
+        self.variadic=variadic 
+    def casual_name(self):
+        return "Function \"%s\""%self.name
+    def py_name(self):
+        return self.name
+    def c_name(self):
+        return self.cname
+
+class VariableDescription(Description):
+    """Simple container class for a C variable declaration."""
+    def __init__(self,name,ctype,src=None):
+        Description.__init__(self,src)
+        # Name, a string
+        self.name=name 
+        # Name according to C - stored in case description is renamed
+        self.cname=name 
+        # The type of the variable
+        self.ctype=ctype 
+    def casual_name(self):
+        return "Variable \"%s\""%self.name
+    def py_name(self):
+        return self.name
+    def c_name(self):
+        return self.cname
+
+class MacroDescription(Description):
+    """Simple container class for a C macro."""
+    def __init__(self,name,params,expr,src=None):
+        Description.__init__(self,src)
+        self.name = name
+        self.params = params
+        self.expr = expr # ExpressionNode for the macro's body
+    def casual_name(self):
+        return "Macro \"%s\""%self.name
+    def py_name(self):
+        return self.name
+    def c_name(self):
+        return self.name

+ 305 - 0
lib/python/ctypes/ctypesgencore/expressions.py

@@ -0,0 +1,305 @@
+#!/usr/bin/env python
+
+'''
+The expressions module contains classes to represent an expression. The main
+class is ExpressionNode. ExpressionNode's most useful method is py_string(),
+which returns a Python string representing that expression.
+'''
+
+from ctypedescs import *
+import keyword
+
+# Right now, the objects in this module are all oriented toward evaluation.
+# However, they don't have to be, since ctypes objects are mutable. For example,
+# shouldn't it be possible to translate the macro:
+#
+#   #define INCREMENT(x) ++x
+#
+# into Python? The resulting code should be:
+#
+#   def INCREMENT(x):
+#       x.value+=1
+#       return x.value
+#
+# On the other hand, this would be a challenge to write.
+
+class EvaluationContext(object):
+    '''Interface for evaluating expression nodes.
+    '''
+    def evaluate_identifier(self, name):
+        warnings.warn('Attempt to evaluate identifier "%s" failed' % name)
+        return 0
+
+    def evaluate_sizeof(self, type):
+        warnings.warn('Attempt to evaluate sizeof "%s" failed' % str(type))
+        return 0
+    
+    def evaluate_sizeof(self, object):
+        warnings.warn('Attempt to evaluate sizeof object "%s" failed' % str(object))
+        return 0
+    
+    def evaluate_parameter(self, name):
+        warnings.warn('Attempt to evaluate parameter "%s" failed' % name)
+        return 0
+
+class ExpressionNode(object):
+    def __init__(self):
+        self.errors = []
+    
+    def error(self,message,cls = None):
+        self.errors.append((message,cls))
+    
+    def __repr__(self):
+        try:
+            string = repr(self.py_string(True))
+        except ValueError:
+            string = "<error in expression node>"
+        return "<ExpressionNode: %s>" % string
+    
+    def visit(self,visitor):
+        for error,cls in self.errors:
+            visitor.visit_error(error,cls)
+
+class ConstantExpressionNode(ExpressionNode):
+    def __init__(self, value):
+        ExpressionNode.__init__(self)
+        self.value = value
+    
+    def evaluate(self, context):
+        return self.value
+
+    def py_string(self, can_be_ctype):
+        if self.value == float('inf'):
+            return "float('inf')"
+        elif self.value == float('-inf'):
+            return "float('-inf')"
+        return repr(self.value)
+
+class IdentifierExpressionNode(ExpressionNode):
+    def __init__(self, name):
+        ExpressionNode.__init__(self)
+        self.name = name
+
+    def evaluate(self, context):
+        return context.evaluate_identifier(self.name)
+    
+    def visit(self, visitor):
+        visitor.visit_identifier(self.name)
+        ExpressionNode.visit(self,visitor)
+    
+    def py_string(self, can_be_ctype):
+        # Errors will be thrown in generated code if identifier evaluates
+        # to a ctypes object, and can_be_ctype is False.
+        return self.name
+
+class ParameterExpressionNode(ExpressionNode):
+    def __init__(self, name):
+        ExpressionNode.__init__(self)
+        self.name = name
+    
+    def evaluate(self, context):
+        return context.evaluate_parameter(self.name)
+    
+    def visit(self, visitor):
+        ExpressionNode.visit(self,visitor)
+    
+    def py_string(self, can_be_ctype):
+        # Errors will be thrown in generated code if parameter is
+        # a ctypes object, and can_be_ctype is False.
+        return self.name
+
+class UnaryExpressionNode(ExpressionNode):
+    def __init__(self, name, op, format, child_can_be_ctype, child):
+        ExpressionNode.__init__(self)
+        self.name = name
+        self.op = op
+        self.format = format
+        self.child_can_be_ctype = child_can_be_ctype
+        self.child = child
+    
+    def visit(self, visitor):
+        self.child.visit(visitor)
+        ExpressionNode.visit(self,visitor)
+    
+    def evaluate(self, context):
+        if self.op:
+            return self.op(self.child.evaluate(context))
+        else:
+            raise ValueError,"The C operator \"%s\" can't be evaluated right " \
+                "now" % self.name
+
+    def py_string(self, can_be_ctype):
+        return self.format % \
+            self.child.py_string(self.child_can_be_ctype and can_be_ctype)
+
+class SizeOfExpressionNode(ExpressionNode):
+    def __init__(self, child):
+        ExpressionNode.__init__(self)
+        self.child = child
+    
+    def visit(self, visitor):
+        self.child.visit(visitor)
+        ExpressionNode.visit(self,visitor)
+    
+    def evaluate(self, context):
+        if isinstance(self.child, CtypesType):
+            return context.evaluate_sizeof(self.child)
+        else:
+            return context.evaluate_sizeof_object(self.child)
+
+    def py_string(self, can_be_ctype):
+        if isinstance(self.child, CtypesType):
+            return 'sizeof(%s)' % self.child.py_string()
+        else:
+            return 'sizeof(%s)' % self.child.py_string(True)
+
+class BinaryExpressionNode(ExpressionNode):
+    def __init__(self, name, op, format, can_be_ctype, left, right):
+        ExpressionNode.__init__(self)
+        self.name = name
+        self.op = op
+        self.format = format
+        self.can_be_ctype = can_be_ctype
+        self.left = left
+        self.right = right
+    
+    def visit(self, visitor):
+        self.left.visit(visitor)
+        self.right.visit(visitor)
+        ExpressionNode.visit(self,visitor)
+    
+    def evaluate(self, context):
+        if self.op:
+           return self.op(self.left.evaluate(context), 
+                          self.right.evaluate(context))
+        else:
+            raise ValueError,"The C operator \"%s\" can't be evaluated right " \
+                "now" % self.name
+
+    def py_string(self, can_be_ctype):
+        return self.format % \
+            (self.left.py_string(self.can_be_ctype[0] and can_be_ctype),
+             self.right.py_string(self.can_be_ctype[0] and can_be_ctype))
+
+class ConditionalExpressionNode(ExpressionNode):
+    def __init__(self, cond, yes, no):
+        ExpressionNode.__init__(self)
+        self.cond = cond
+        self.yes = yes
+        self.no = no
+    
+    def visit(self, visitor):
+        self.cond.visit(visitor)
+        self.yes.visit(visitor)
+        self.no.visit(visitor)
+        ExpressionNode.visit(self,visitor)
+    
+    def evaluate(self, context):
+        if self.cond.evaluate(context):
+            return self.yes.evaluate(context)
+        else:
+            return self.no.evaluate(context)
+
+    def py_string(self, can_be_ctype):
+        return "%s and %s or %s" % \
+            (self.cond.py_string(True),
+             self.yes.py_string(can_be_ctype),
+             self.no.py_string(can_be_ctype))
+
+class AttributeExpressionNode(ExpressionNode):
+    def __init__(self, op, format, base, attribute):
+        ExpressionNode.__init__(self)
+        self.op = op
+        self.format = format
+        self.base = base
+        self.attribute = attribute
+        
+        # Attribute access will raise parse errors if you don't do this. 
+        # Fortunately, the processor module does the same thing to 
+        # the struct member name.
+        if self.attribute in keyword.kwlist:
+            self.attribute = "_"+self.attribute
+    
+    def visit(self,visitor):
+        self.base.visit(visitor)
+        ExpressionNode.visit(self,visitor)
+    
+    def evaluate(self, context):
+        return self.op(self.base.evalute(context),self.attribute)
+    
+    def py_string(self, can_be_ctype):
+        if can_be_ctype:
+            return self.format % (self.base.py_string(can_be_ctype),
+                                  self.attribute)
+        else:
+            return "(%s.value)" % (self.format % \
+                    (self.base.py_string(can_be_ctype), self.attribute))
+
+class CallExpressionNode(ExpressionNode):
+    def __init__(self,function,arguments):
+        ExpressionNode.__init__(self)
+        self.function = function
+        self.arguments = arguments
+    
+    def visit(self,visitor):
+        self.function.visit(visitor)
+        for arg in self.arguments:
+            arg.visit(visitor)
+        ExpressionNode.visit(self,visitor)
+    
+    def evaluate(self,context):
+        arguments = [arg.evaluate(context) for arg in self.arguments]
+        return self.function.evaluate(context)(*arguments)
+    
+    def py_string(self, can_be_ctype):
+        function = self.function.py_string(can_be_ctype)
+        arguments = [x.py_string(can_be_ctype) for x in self.arguments]
+        if can_be_ctype:
+            return '(%s (%s))' % (function,", ".join(arguments))
+        else:
+            return '((%s (%s)).value)' % (function,", ".join(arguments))
+
+# There seems not to be any reasonable way to translate C typecasts
+# into Python. Ctypesgen doesn't try, except for the special case of NULL.
+class TypeCastExpressionNode(ExpressionNode):
+    def __init__(self, base, ctype):
+        ExpressionNode.__init__(self)
+        self.base = base
+        self.ctype = ctype
+        self.isnull = isinstance(ctype, CtypesPointer) and \
+                      isinstance(base, ConstantExpressionNode) and \
+                      base.value == 0
+    
+    def visit(self,visitor):
+        # No need to visit ctype because it isn't actually used
+        self.base.visit(visitor)
+        ExpressionNode.visit(self,visitor)
+    
+    def evaluate(self,context):
+        if self.isnull:
+            return None
+        else:
+            return self.base.evaluate(context)
+    
+    def py_string(self, can_be_ctype):
+        if self.isnull:
+            return "None"
+        else:
+            return self.base.py_string(can_be_ctype)
+
+class UnsupportedExpressionNode(ExpressionNode):
+    def __init__(self,message):
+        ExpressionNode.__init__(self)
+        self.message = message
+        self.error(message,'unsupported-type')
+    
+    def evaluate(self,context):
+        raise ValueError, "Tried to evaluate an unsupported expression " \
+            "node: %s" % self.message
+    
+    def __repr__(self):
+        return "<UnsupportedExpressionNode>"
+    
+    def py_string(self, can_be_ctype):
+        raise ValueError, "Called py_string() an unsupported expression " \
+            "node: %s" % self.message

+ 248 - 0
lib/python/ctypes/ctypesgencore/libraryloader.py

@@ -0,0 +1,248 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2008 David James
+# Copyright (c) 2006-2008 Alex Holkner
+# All rights reserved.
+# 
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions 
+# are met:
+#
+#  * Redistributions of source code must retain the above copyright
+#    notice, this list of conditions and the following disclaimer.
+#  * Redistributions in binary form must reproduce the above copyright 
+#    notice, this list of conditions and the following disclaimer in
+#    the documentation and/or other materials provided with the
+#    distribution.
+#  * Neither the name of pyglet nor the names of its
+#    contributors may be used to endorse or promote products
+#    derived from this software without specific prior written
+#    permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+# ----------------------------------------------------------------------------
+
+import os.path, re, sys, glob
+import ctypes
+import ctypes.util
+
+def _environ_path(name):
+    if name in os.environ:
+        return os.environ[name].split(":")
+    else:
+        return []
+
+class LibraryLoader(object):
+    def __init__(self):
+        self.other_dirs=[]
+    
+    def load_library(self,libname):
+        """Given the name of a library, load it."""
+        paths = self.getpaths(libname)
+        
+        for path in paths:
+            if os.path.exists(path):
+                return self.load(path)
+        
+        raise ImportError,"%s not found." % libname
+    
+    def load(self,path):
+        """Given a path to a library, load it."""
+        try:
+            # Darwin requires dlopen to be called with mode RTLD_GLOBAL instead
+            # of the default RTLD_LOCAL.  Without this, you end up with
+            # libraries not being loadable, resulting in "Symbol not found"
+            # errors
+            if sys.platform == 'darwin':
+                return ctypes.CDLL(path, ctypes.RTLD_GLOBAL)
+            else:
+                return ctypes.cdll.LoadLibrary(path)
+        except OSError,e:
+            raise ImportError,e
+    
+    def getpaths(self,libname):
+        """Return a list of paths where the library might be found."""
+        if os.path.isabs(libname):
+            yield libname
+        
+        else:
+            for path in self.getplatformpaths(libname):
+                yield path
+            
+            path = ctypes.util.find_library(libname)
+            if path: yield path
+    
+    def getplatformpaths(self, libname):
+        return []
+
+# Darwin (Mac OS X)
+
+class DarwinLibraryLoader(LibraryLoader):
+    name_formats = ["lib%s.dylib", "lib%s.so", "lib%s.bundle", "%s.dylib",
+                "%s.so", "%s.bundle", "%s"]
+    
+    def getplatformpaths(self,libname):
+        if os.path.pathsep in libname:
+            names = [libname]
+        else:
+            names = [format % libname for format in self.name_formats]
+        
+        for dir in self.getdirs(libname):
+            for name in names:
+                yield os.path.join(dir,name)
+    
+    def getdirs(self,libname):
+        '''Implements the dylib search as specified in Apple documentation:
+        
+        http://developer.apple.com/documentation/DeveloperTools/Conceptual/
+            DynamicLibraries/Articles/DynamicLibraryUsageGuidelines.html
+
+        Before commencing the standard search, the method first checks
+        the bundle's ``Frameworks`` directory if the application is running
+        within a bundle (OS X .app).
+        '''
+
+        dyld_fallback_library_path = _environ_path("DYLD_FALLBACK_LIBRARY_PATH")
+        if not dyld_fallback_library_path:
+            dyld_fallback_library_path = [os.path.expanduser('~/lib'),
+                                          '/usr/local/lib', '/usr/lib']
+        
+        dirs = []
+        
+        if '/' in libname:
+            dirs.extend(_environ_path("DYLD_LIBRARY_PATH"))
+        else:
+            dirs.extend(_environ_path("LD_LIBRARY_PATH"))
+            dirs.extend(_environ_path("DYLD_LIBRARY_PATH"))
+
+        dirs.extend(self.other_dirs)
+        dirs.append(".")
+        
+        if hasattr(sys, 'frozen') and sys.frozen == 'macosx_app':
+            dirs.append(os.path.join(
+                os.environ['RESOURCEPATH'],
+                '..',
+                'Frameworks'))
+
+        dirs.extend(dyld_fallback_library_path)
+        
+        return dirs
+
+# Posix
+
+class PosixLibraryLoader(LibraryLoader):
+    _ld_so_cache = None
+    
+    def _create_ld_so_cache(self):
+        # Recreate search path followed by ld.so.  This is going to be
+        # slow to build, and incorrect (ld.so uses ld.so.cache, which may
+        # not be up-to-date).  Used only as fallback for distros without
+        # /sbin/ldconfig.
+        #
+        # We assume the DT_RPATH and DT_RUNPATH binary sections are omitted.
+
+        directories = []
+        for name in ("LD_LIBRARY_PATH",
+                     "SHLIB_PATH", # HPUX
+                     "LIBPATH", # OS/2, AIX
+                     "LIBRARY_PATH", # BE/OS
+                    ):
+            if name in os.environ:
+                directories.extend(os.environ[name].split(os.pathsep))
+        directories.extend(self.other_dirs)
+        directories.append(".")
+
+        try: directories.extend([dir.strip() for dir in open('/etc/ld.so.conf')])
+        except IOError: pass
+
+        directories.extend(['/lib', '/usr/lib', '/lib64', '/usr/lib64'])
+
+        cache = {}
+        lib_re = re.compile(r'lib(.*)\.s[ol]')
+        ext_re = re.compile(r'\.s[ol]$')
+        for dir in directories:
+            try:
+                for path in glob.glob("%s/*.s[ol]*" % dir):
+                    file = os.path.basename(path)
+
+                    # Index by filename
+                    if file not in cache:
+                        cache[file] = path
+                    
+                    # Index by library name
+                    match = lib_re.match(file)
+                    if match:
+                        library = match.group(1)
+                        if library not in cache:
+                            cache[library] = path
+            except OSError:
+                pass
+
+        self._ld_so_cache = cache
+    
+    def getplatformpaths(self, libname):
+        if self._ld_so_cache is None:
+            self._create_ld_so_cache()
+
+        result = self._ld_so_cache.get(libname)
+        if result: yield result
+
+        path = ctypes.util.find_library(libname)
+        if path: yield os.path.join("/lib",path)
+
+# Windows
+
+class _WindowsLibrary(object):
+    def __init__(self, path):
+        self.cdll = ctypes.cdll.LoadLibrary(path)
+        self.windll = ctypes.windll.LoadLibrary(path)
+
+    def __getattr__(self, name):
+        try: return getattr(self.cdll,name)
+        except AttributeError:
+            try: return getattr(self.windll,name)
+            except AttributeError:
+                raise
+
+class WindowsLibraryLoader(LibraryLoader):
+    name_formats = ["%s.dll", "lib%s.dll"]
+    
+    def load(self, path):
+        return _WindowsLibrary(path)
+    
+    def getplatformpaths(self, libname):
+        if os.path.sep not in libname:
+            for name in self.name_formats:
+                path = ctypes.util.find_library(name % libname)
+                if path:
+                    yield path
+
+# Platform switching
+
+# If your value of sys.platform does not appear in this dict, please contact
+# the Ctypesgen maintainers.
+
+loaderclass = {
+    "darwin":   DarwinLibraryLoader,
+    "cygwin":   WindowsLibraryLoader,
+    "win32":    WindowsLibraryLoader
+}
+
+loader = loaderclass.get(sys.platform, PosixLibraryLoader)()
+
+def add_library_search_dirs(other_dirs):
+    loader.other_dirs = other_dirs
+
+load_library = loader.load_library
+
+del loaderclass

+ 34 - 0
lib/python/ctypes/ctypesgencore/messages.py

@@ -0,0 +1,34 @@
+#!/usr/bin/env python
+
+"""
+ctypesgencore.messages contains functions to display status, error, or warning
+messages to the user. Warning and error messages are also associated
+with a "message class", which is a string, which currently has no effect.
+
+Error classes are:
+'usage' - there was something funny about the command-line parameters
+'cparser' - there was a syntax error in the header file
+'missing-library' - a library could not be loaded
+'macro' - a macro could not be translated to Python
+'unsupported-type' - there was a type in the header that ctypes cannot use, like
+    "long double".
+'other' - catchall.
+
+Warning classes are:
+'usage' - there was something funny about the command-line parameters
+'rename' - a description has been renamed to avoid a name conflict
+'other' - catchall.
+"""
+
+import sys
+
+__all__ = ["error_message","warning_message","status_message"]
+
+def error_message(msg,cls=None):
+    print "Error: %s" % msg
+
+def warning_message(msg,cls=None):
+    print "Warning: %s" % msg
+
+def status_message(msg):
+    print "Status: %s" % msg

+ 312 - 0
lib/python/ctypes/ctypesgencore/old libraryloader.py

@@ -0,0 +1,312 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2008 David James
+# Copyright (c) 2006-2008 Alex Holkner
+# All rights reserved.
+# 
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions 
+# are met:
+#
+#  * Redistributions of source code must retain the above copyright
+#    notice, this list of conditions and the following disclaimer.
+#  * Redistributions in binary form must reproduce the above copyright 
+#    notice, this list of conditions and the following disclaimer in
+#    the documentation and/or other materials provided with the
+#    distribution.
+#  * Neither the name of pyglet nor the names of its
+#    contributors may be used to endorse or promote products
+#    derived from this software without specific prior written
+#    permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+# ----------------------------------------------------------------------------
+
+import os
+import re
+import sys
+
+import ctypes
+import ctypes.util
+
+
+_debug_lib = False
+_debug_trace = False
+
+class _TraceFunction(object):
+    def __init__(self, func):
+        self.__dict__['_func'] = func
+
+    def __str__(self):
+        return self._func.__name__
+
+    def __call__(self, *args, **kwargs):
+        return self._func(*args, **kwargs)
+
+    def __getattr__(self, name):
+        return getattr(self._func, name)
+
+    def __setattr__(self, name, value):
+        setattr(self._func, name, value)
+
+class _TraceLibrary(object):
+    def __init__(self, library):
+        self._library = library
+        print library
+
+    def __getattr__(self, name):
+        func = getattr(self._library, name)
+        f = _TraceFunction(func)
+        return f
+
+class _WindowsLibrary(object):
+    def __init__(self, path):
+        self._libraries = [
+          ctypes.cdll.LoadLibrary(path),
+          ctypes.windll.LoadLibrary(path)
+        ]
+
+    def __getattr__(self, name):
+        for i in range(len(self._libraries)): 
+            try:
+                func = getattr(self._libraries[i], name)
+                f = _TraceFunction(func)
+                return f
+            except AttributeError:
+                if i > 0:
+                    raise
+
+
+
+class LibraryLoader(object):
+    def load_library(self, *names, **kwargs):
+        '''Find and load a library.  
+        
+        More than one name can be specified, they will be tried in order.
+        Platform-specific library names (given as kwargs) are tried first.
+
+        Raises ImportError if library is not found.
+        '''
+                
+        if 'framework' in kwargs and self.platform == 'darwin':
+            return self.load_framework(kwargs['framework'])
+        
+        platform_names = kwargs.get(self.platform, [])
+        if type(platform_names) in (str, unicode):
+            platform_names = [platform_names]
+        elif type(platform_names) is tuple:
+            platform_names = list(platform_names)
+
+        if self.platform == 'linux2':
+            platform_names.extend(['lib%s.so' % n for n in names])
+        elif self.platform == 'win32':
+            platform_names.extend(['%s.dll' % n for n in names])
+            platform_names.extend(['lib%s.dll' % n for n in names])
+        elif self.platform == 'darwin':
+            platform_names.extend(['%s.dylib' % n for n in names])
+            platform_names.extend(['lib%s.dylib' % n for n in names])
+        
+        platform_names.extend(names)
+        for name in platform_names:
+            path = self.find_library(name)
+            if path:
+                try:
+                    if self.platform == 'win32':
+                        lib = _WindowsLibrary(path)
+                    else:
+                        lib = ctypes.cdll.LoadLibrary(path)
+                    if _debug_lib:
+                        print path
+                    if _debug_trace:
+                        lib = _TraceLibrary(lib)
+                    return lib
+                except OSError,e:
+                    pass
+        raise ImportError('Library "%s" not found.' % names[0])
+
+    find_library = lambda self, name: ctypes.util.find_library(name)
+
+    platform = sys.platform
+    if platform == 'cygwin':
+        platform = 'win32'
+
+    def load_framework(self, path):
+        raise RuntimeError("Can't load framework on this platform.")
+
+class MachOLibraryLoader(LibraryLoader):
+    def __init__(self):
+        if 'LD_LIBRARY_PATH' in os.environ:
+            self.ld_library_path = os.environ['LD_LIBRARY_PATH'].split(':')
+        else:
+            self.ld_library_path = []
+
+        if 'DYLD_LIBRARY_PATH' in os.environ:
+            self.dyld_library_path = os.environ['DYLD_LIBRARY_PATH'].split(':')
+        else:
+            self.dyld_library_path = []
+
+        if 'DYLD_FALLBACK_LIBRARY_PATH' in os.environ:
+            self.dyld_fallback_library_path = \
+                os.environ['DYLD_FALLBACK_LIBRARY_PATH'].split(':')
+        else:
+            self.dyld_fallback_library_path = [
+                os.path.expanduser('~/lib'),
+                '/usr/local/lib',
+                '/usr/lib']
+        
+    def find_library(self, path):
+        '''Implements the dylib search as specified in Apple documentation:
+        
+        http://developer.apple.com/documentation/DeveloperTools/Conceptual/DynamicLibraries/Articles/DynamicLibraryUsageGuidelines.html
+
+        Before commencing the standard search, the method first checks
+        the bundle's ``Frameworks`` directory if the application is running
+        within a bundle (OS X .app).
+        '''
+
+        libname = os.path.basename(path)
+        search_path = []
+
+        if hasattr(sys, 'frozen') and sys.frozen == 'macosx_app':
+            search_path.append(os.path.join(
+                os.environ['RESOURCEPATH'],
+                '..',
+                'Frameworks',
+                libname))
+                
+        if '/' in path:
+            search_path.extend(
+                [os.path.join(p, libname) \
+                    for p in self.dyld_library_path])
+            search_path.append(path)
+            search_path.extend(
+                [os.path.join(p, libname) \
+                    for p in self.dyld_fallback_library_path])
+        else:
+            search_path.extend(
+                [os.path.join(p, libname) \
+                    for p in self.ld_library_path])
+            search_path.extend(
+                [os.path.join(p, libname) \
+                    for p in self.dyld_library_path])
+            search_path.append(path)
+            search_path.extend(
+                [os.path.join(p, libname) \
+                    for p in self.dyld_fallback_library_path])
+                
+        for path in search_path:
+            if os.path.exists(path):
+                return path
+
+        return None
+
+    def find_framework(self, path):
+        '''Implement runtime framework search as described by:
+
+        http://developer.apple.com/documentation/MacOSX/Conceptual/BPFrameworks/Concepts/FrameworkBinding.html
+        '''
+
+        # e.g. path == '/System/Library/Frameworks/OpenGL.framework'
+        #      name == 'OpenGL'
+        # return '/System/Library/Frameworks/OpenGL.framework/OpenGL'
+        name = os.path.splitext(os.path.split(path)[1])[0]
+
+        realpath = os.path.join(path, name) 
+        if os.path.exists(realpath):
+            return realpath
+
+        for dir in ('/Library/Frameworks',
+                    '/System/Library/Frameworks'):
+            realpath = os.path.join(dir, '%s.framework' % name, name)
+            if os.path.exists(realpath):
+                return realpath
+
+        return None
+
+    def load_framework(self, path):
+        realpath = self.find_framework(path)
+        if realpath:
+            lib = ctypes.cdll.LoadLibrary(realpath)
+            if _debug_lib:
+                print realpath
+            if _debug_trace:
+                lib = _TraceLibrary(lib)
+            return lib
+
+        raise ImportError("Can't find framework %s." % path)
+
+class LinuxLibraryLoader(LibraryLoader):
+    _ld_so_cache = None
+
+    def _create_ld_so_cache(self):
+        # Recreate search path followed by ld.so.  This is going to be
+        # slow to build, and incorrect (ld.so uses ld.so.cache, which may
+        # not be up-to-date).  Used only as fallback for distros without
+        # /sbin/ldconfig.
+        #
+        # We assume the DT_RPATH and DT_RUNPATH binary sections are omitted.
+
+        directories = []
+        try:
+            directories.extend(os.environ['LD_LIBRARY_PATH'].split(':'))
+        except KeyError:
+            pass
+
+        try:
+            directories.extend([dir.strip() for dir in open('/etc/ld.so.conf')])
+        except IOError:
+            pass
+
+        directories.extend(['/lib', '/usr/lib'])
+
+        cache = {}
+        lib_re = re.compile(r'lib(.*)\.so$')
+        for dir in directories:
+            try:
+                for file in os.listdir(dir):
+                    if '.so' not in file:
+                        continue
+
+                    # Index by filename
+                    path = os.path.join(dir, file)
+                    if file not in cache:
+                        cache[file] = path
+
+                    # Index by library name
+                    match = lib_re.match(file)
+                    if match:
+                        library = match.group(1)
+                        if library not in cache:
+                            cache[library] = path
+            except OSError:
+                pass
+
+        self._ld_so_cache = cache
+
+    def find_library(self, path):
+        # Implement the ld-linux.so search path as described in
+        # the man page.
+
+        if self._ld_so_cache is None:
+            self._create_ld_so_cache()
+
+        return self._ld_so_cache.get(path)
+
+if sys.platform == 'darwin':
+    loader = MachOLibraryLoader()
+elif sys.platform == 'linux2':
+    loader = LinuxLibraryLoader()
+else:
+    loader = LibraryLoader()
+load_library = loader.load_library
+

+ 37 - 0
lib/python/ctypes/ctypesgencore/options.py

@@ -0,0 +1,37 @@
+#!/usr/bin/env python
+
+"""
+All of the components of ctypegencore require an argument called "options".
+In command-line usage, this would be an optparser.Values object. However, if
+ctypesgencore is used as a standard Python module, constructing this object
+would be a pain. So this module exists to provide a "default" options object
+for convenience.
+"""
+
+import optparse, copy
+
+default_values={
+    "other_headers": [],
+    "modules": [],
+    "include_search_paths": [],
+    "compile_libdirs": [],
+    "runtime_libdirs": [],
+    "cpp": "gcc -E",
+    "save_preprocessed_headers": None,
+    "all_headers": False,
+    "builtin_symbols": False,
+    "include_symbols": None,
+    "exclude_symbols": None,
+    "show_all_errors": False,
+    "show_long_errors": False,
+    "show_macro_warnings": True,
+    "header_template": None,
+    "inserted_files": [],
+    "other_known_names": [],
+    "include_macros": True,
+    "libraries": [],
+    "strip_build_path": None
+}
+
+def get_default_options():
+    return optparse.Values(copy.deepcopy(default_values))

+ 24 - 0
lib/python/ctypes/ctypesgencore/parser/__init__.py

@@ -0,0 +1,24 @@
+#!/usr/bin/env python
+
+"""
+This package parses C header files and generates lists of functions, typedefs,
+variables, structs, unions, enums, macros, and constants. This package knows
+nothing about the libraries themselves.
+
+The public interface for this package is the function "parse". Use as follows:
+>>> descriptions = parse(["inputfile1.h","inputfile2.h"], options)
+where "options" is an optparse.Values object.
+
+parse() returns a DescriptionCollection object. See ctypesgencore.descriptions
+for more information.
+
+"""
+
+from datacollectingparser import DataCollectingParser
+
+def parse(headers, options):
+    parser=DataCollectingParser(headers, options)
+    parser.parse()
+    return parser.data()
+
+__all__ = ["parse"]

+ 174 - 0
lib/python/ctypes/ctypesgencore/parser/cdeclarations.py

@@ -0,0 +1,174 @@
+#!/usr/bin/env python
+
+'''
+This file contains classes that represent C declarations. cparser produces
+declarations in this format, and ctypesparser reformats them into a format that
+is not C-specific. The other modules don't need to touch these.
+'''
+
+__docformat__ = 'restructuredtext'
+
+# --------------------------------------------------------------------------
+# C Object Model
+# --------------------------------------------------------------------------
+
+class Declaration(object):
+    def __init__(self):
+        self.declarator = None
+        self.type = Type()
+        self.storage = None
+
+    def __repr__(self):
+        d = {
+            'declarator': self.declarator,
+            'type': self.type,
+        }
+        if self.storage:
+            d['storage'] = self.storage
+        l = ['%s=%r' % (k, v) for k, v in d.items()]
+        return 'Declaration(%s)' % ', '.join(l)
+
+class Declarator(object):
+    pointer = None
+    def __init__(self):
+        self.identifier = None
+        self.initializer = None
+        self.array = None
+        self.parameters = None
+        self.bitfield = None
+
+    # make pointer read-only to catch mistakes early
+    pointer = property(lambda self: None)
+
+    def __repr__(self):
+        s = self.identifier or ''
+        if self.bitfield:
+            s += ":%d" % self.bitfield
+        if self.array:
+            s += repr(self.array)
+        if self.initializer:
+            s += ' = %r' % self.initializer
+        if self.parameters is not None:
+            s += '(' + ', '.join([repr(p) for p in self.parameters]) + ')'
+        return s
+
+class Pointer(Declarator):
+    pointer = None
+    def __init__(self):
+        super(Pointer, self).__init__()
+        self.qualifiers = []
+
+    def __repr__(self):
+        q = ''
+        if self.qualifiers:
+            q = '<%s>' % ' '.join(self.qualifiers)
+        return 'POINTER%s(%r)' % (q, self.pointer) + \
+            super(Pointer, self).__repr__()
+
+class Array(object):
+    def __init__(self):
+        self.size = None
+        self.array = None
+
+    def __repr__(self):
+        if self.size:
+            a =  '[%r]' % self.size
+        else:
+            a = '[]'
+        if self.array:
+            return repr(self.array) + a
+        else:
+            return a
+
+class Parameter(object):
+    def __init__(self):
+        self.type = Type()
+        self.storage = None
+        self.declarator = None
+
+    def __repr__(self):
+        d = {
+            'type': self.type,
+        }
+        if self.declarator:
+            d['declarator'] = self.declarator
+        if self.storage:
+            d['storage'] = self.storage
+        l = ['%s=%r' % (k, v) for k, v in d.items()]
+        return 'Parameter(%s)' % ', '.join(l)
+
+
+class Type(object):
+    def __init__(self):
+        self.qualifiers = []
+        self.specifiers = []
+
+    def __repr__(self):
+        return ' '.join(self.qualifiers + [str(s) for s in self.specifiers])
+
+# These are used only internally.
+
+class StorageClassSpecifier(str):
+    pass
+
+class TypeSpecifier(str):
+    pass
+
+class StructTypeSpecifier(object):
+    def __init__(self, is_union, tag, declarations):
+        self.is_union = is_union
+        self.tag = tag
+        self.declarations = declarations
+
+    def __repr__(self):
+        if self.is_union:
+            s = 'union'
+        else:
+            s = 'struct'
+        if self.tag:
+            s += ' %s' % self.tag
+        if self.declarations:
+            s += ' {%s}' % '; '.join([repr(d) for d in self.declarations])
+        return s
+
+class EnumSpecifier(object):
+    def __init__(self, tag, enumerators, src=None):
+        self.tag = tag
+        self.enumerators = enumerators
+        self.src=src
+
+    def __repr__(self):
+        s = 'enum'
+        if self.tag:
+            s += ' %s' % self.tag
+        if self.enumerators:
+            s += ' {%s}' % ', '.join([repr(e) for e in self.enumerators])
+        return s
+
+class Enumerator(object):
+    def __init__(self, name, expression):
+        self.name = name
+        self.expression = expression
+
+    def __repr__(self):
+        s = self.name
+        if self.expression:
+            s += ' = %r' % self.expression
+        return s
+
+class TypeQualifier(str):
+    pass
+
+def apply_specifiers(specifiers, declaration):
+    '''Apply specifiers to the declaration (declaration may be
+    a Parameter instead).'''
+    for s in specifiers:
+        if type(s) == StorageClassSpecifier:
+            if declaration.storage:
+                # Multiple storage classes, technically an error... ignore it
+                pass
+            declaration.storage = s
+        elif type(s) in (TypeSpecifier, StructTypeSpecifier, EnumSpecifier):
+            declaration.type.specifiers.append(s)
+        elif type(s) == TypeQualifier:
+            declaration.type.qualifiers.append(s)

Failā izmaiņas netiks attēlotas, jo tās ir par lielu
+ 1093 - 0
lib/python/ctypes/ctypesgencore/parser/cgrammar.py


+ 208 - 0
lib/python/ctypes/ctypesgencore/parser/cparser.py

@@ -0,0 +1,208 @@
+#!/usr/bin/env python
+
+'''
+Parse a C source file.
+
+To use, subclass CParser and override its handle_* methods.  Then instantiate
+the class with a string to parse.
+'''
+
+__docformat__ = 'restructuredtext'
+
+import operator
+import os.path
+import re
+import sys
+import time
+import warnings
+
+import preprocessor
+import yacc
+import cgrammar
+import cdeclarations
+
+# --------------------------------------------------------------------------
+# Lexer
+# --------------------------------------------------------------------------
+
+class CLexer(object):
+    def __init__(self, cparser):
+        self.cparser = cparser
+        self.type_names = set()
+        self.in_define = False
+
+    def input(self, tokens):
+        self.tokens = tokens
+        self.pos = 0
+
+    def token(self):
+        while self.pos < len(self.tokens):
+            t = self.tokens[self.pos]
+            
+            self.pos += 1
+
+            if not t:
+                break
+            
+            if t.type == 'PP_DEFINE':
+                self.in_define = True
+            elif t.type == 'PP_END_DEFINE':
+                self.in_define = False
+            
+            # Transform PP tokens into C tokens
+            elif t.type == 'LPAREN':
+                t.type = '('
+            elif t.type == 'PP_NUMBER':
+                t.type = 'CONSTANT'
+            elif t.type == 'IDENTIFIER' and t.value in cgrammar.keywords:
+                t.type = t.value.upper()
+            elif t.type == 'IDENTIFIER' and t.value in self.type_names:
+                if (self.pos < 2 or self.tokens[self.pos-2].type not in
+                    ('ENUM', 'STRUCT', 'UNION')):
+                    t.type = 'TYPE_NAME'
+            
+            t.lexer = self
+            t.clexpos = self.pos - 1
+            
+            return t
+        return None
+        
+# --------------------------------------------------------------------------
+# Parser
+# --------------------------------------------------------------------------
+
+class CParser(object):
+    '''Parse a C source file.
+
+    Subclass and override the handle_* methods.  Call `parse` with a string
+    to parse.
+    '''
+    def __init__(self, options, stddef_types=True, gnu_types=True):
+        self.preprocessor_parser = preprocessor.PreprocessorParser(options,self)
+        self.parser = yacc.Parser()
+        prototype = yacc.yacc(method        = 'LALR',
+                              debug         = False,
+                              module        = cgrammar,
+                              write_tables  = True,
+                              outputdir     = os.path.dirname(__file__),
+                              optimize      = True)
+        
+        # If yacc is reading tables from a file, then it won't find the error
+        # function... need to set it manually
+        prototype.errorfunc = cgrammar.p_error
+        prototype.init_parser(self.parser)
+        self.parser.cparser = self
+
+        self.lexer = CLexer(self)
+        if stddef_types:
+            self.lexer.type_names.add('wchar_t')
+            self.lexer.type_names.add('ptrdiff_t')
+            self.lexer.type_names.add('size_t')
+        if gnu_types:
+            self.lexer.type_names.add('__builtin_va_list')
+        if sys.platform == 'win32':
+            self.lexer.type_names.add('__int64')
+        
+    def parse(self, filename, debug=False):
+        '''Parse a file.
+
+        If `debug` is True, parsing state is dumped to stdout.
+        '''
+
+        self.handle_status('Preprocessing %s' % filename)
+        self.preprocessor_parser.parse(filename)
+        self.lexer.input(self.preprocessor_parser.output)
+        self.handle_status('Parsing %s' % filename)
+        self.parser.parse(lexer=self.lexer, debug=debug)
+
+    # ----------------------------------------------------------------------
+    # Parser interface.  Override these methods in your subclass.
+    # ----------------------------------------------------------------------
+
+    def handle_error(self, message, filename, lineno):
+        '''A parse error occured.  
+        
+        The default implementation prints `lineno` and `message` to stderr.
+        The parser will try to recover from errors by synchronising at the
+        next semicolon.
+        '''
+        print >> sys.stderr, '%s:%s %s' % (filename, lineno, message)
+    
+    def handle_pp_error(self, message):
+        '''The C preprocessor emitted an error.
+        
+        The default implementatin prints the error to stderr. If processing
+        can continue, it will.
+        '''
+        print >> sys.stderr, 'Preprocessor:', message
+    
+    def handle_status(self, message):
+        '''Progress information.
+
+        The default implementationg prints message to stderr.
+        '''
+        print >> sys.stderr, message
+
+    def handle_define(self, name, params, value, filename, lineno):
+        '''#define `name` `value` 
+        or #define `name`(`params`) `value`
+
+        name is a string
+        params is None or a list of strings
+        value is a ...?
+        '''
+
+    def handle_define_constant(self, name, value, filename, lineno):
+        '''#define `name` `value`
+        
+        name is a string
+        value is an ExpressionNode or None
+        '''
+    
+    def handle_define_macro(self, name, params, value, filename, lineno):
+        '''#define `name`(`params`) `value`
+        
+        name is a string
+        params is a list of strings
+        value is an ExpressionNode or None
+        '''
+    
+    def impl_handle_declaration(self, declaration, filename, lineno):
+        '''Internal method that calls `handle_declaration`.  This method
+        also adds any new type definitions to the lexer's list of valid type
+        names, which affects the parsing of subsequent declarations.
+        '''
+        if declaration.storage == 'typedef':
+            declarator = declaration.declarator
+            if not declarator:
+                # XXX TEMPORARY while struct etc not filled
+                return
+            while declarator.pointer:
+                declarator = declarator.pointer
+            self.lexer.type_names.add(declarator.identifier)
+        self.handle_declaration(declaration, filename, lineno)
+
+    def handle_declaration(self, declaration, filename, lineno):
+        '''A declaration was encountered.  
+        
+        `declaration` is an instance of Declaration.  Where a declaration has
+        multiple initialisers, each is returned as a separate declaration.
+        '''
+        pass
+
+class DebugCParser(CParser):
+    '''A convenience class that prints each invocation of a handle_* method to
+    stdout.
+    '''
+
+    def handle_define(self, name, value, filename, lineno):
+        print '#define name=%r, value=%r' % (name, value)
+
+    def handle_define_constant(self, name, value, filename, lineno):
+        print '#define constant name=%r, value=%r' % (name, value)
+
+    def handle_declaration(self, declaration, filename, lineno):
+        print declaration
+        
+if __name__ == '__main__':
+    DebugCParser().parse(sys.argv[1], debug=True)

+ 198 - 0
lib/python/ctypes/ctypesgencore/parser/ctypesparser.py

@@ -0,0 +1,198 @@
+#!/usr/bin/env python
+
+'''
+ctypesgencore.parser.ctypesparser contains a class, CtypesParser, which is a
+subclass of ctypesgencore.parser.cparser.CParser. CtypesParser overrides the
+handle_declaration() method of CParser. It turns the low-level type declarations
+produced by CParser into CtypesType instances and breaks the parser's general
+declarations into function, variable, typedef, constant, and type descriptions.
+'''
+
+__docformat__ = 'restructuredtext'
+
+__all__ = ["CtypesParser"]
+
+from cparser import *
+from ctypesgencore.ctypedescs import *
+from cdeclarations import *
+from ctypesgencore.expressions import *
+
+def get_ctypes_type(typ, declarator, check_qualifiers=False):       
+    signed = True
+    typename = 'int'
+    longs = 0
+    t = None
+    
+    for specifier in typ.specifiers:
+        if isinstance(specifier, StructTypeSpecifier):
+            t = make_struct_from_specifier(specifier)
+        elif isinstance(specifier, EnumSpecifier):
+            t = make_enum_from_specifier(specifier)
+        elif specifier == 'signed':
+            signed = True
+        elif specifier == 'unsigned':
+            signed = False
+        elif specifier == 'long':
+            longs += 1
+        else:
+            typename = str(specifier)
+    
+    if not t:
+        # It is a numeric type of some sort
+        if (typename,signed,longs) in ctypes_type_map:
+            t = CtypesSimple(typename,signed,longs)
+        
+        elif signed and not longs:
+            t = CtypesTypedef(typename)
+        
+        else:
+            name = " ".join(typ.specifiers)
+            if typename in [x[0] for x in ctypes_type_map.keys()]:
+                # It's an unsupported variant of a builtin type
+                error = "Ctypes does not support the type \"%s\"." % name
+            else:
+                error = "Ctypes does not support adding additional " \
+                    "specifiers to typedefs, such as \"%s\"" % name
+            t = CtypesTypedef(name)
+            t.error(error,cls='unsupported-type')
+        
+        if declarator and declarator.bitfield:
+            t = CtypesBitfield(t,declarator.bitfield)
+
+    qualifiers = []
+    qualifiers.extend(typ.qualifiers)
+    while declarator and declarator.pointer:
+        if declarator.parameters is not None:
+            variadic = "..." in declarator.parameters
+
+            params = []
+            for param in declarator.parameters:
+                if param=="...":
+                    break
+                params.append(get_ctypes_type(param.type, param.declarator))
+            t = CtypesFunction(t, params, variadic)
+        
+        a = declarator.array
+        while a:
+            t = CtypesArray(t, a.size)
+            a = a.array
+
+        qualifiers.extend(declarator.qualifiers)
+        
+        t = CtypesPointer(t, declarator.qualifiers)
+        
+        declarator = declarator.pointer
+    
+    if declarator and declarator.parameters is not None:
+        variadic = "..." in declarator.parameters
+
+        params = []
+        for param in declarator.parameters:
+            if param=="...":
+                break
+            params.append(get_ctypes_type(param.type, param.declarator))
+        t = CtypesFunction(t, params, variadic)
+    
+    if declarator:
+        a = declarator.array
+        while a:
+            t = CtypesArray(t, a.size)
+            a = a.array
+    
+    if isinstance(t, CtypesPointer) and \
+       isinstance(t.destination, CtypesSimple) and \
+       t.destination.name=="char" and \
+       t.destination.signed:
+       t = CtypesSpecial("String")
+
+    return t
+
+def make_struct_from_specifier(specifier):
+    variety = {True:"union", False:"struct"}[specifier.is_union]
+    tag = specifier.tag
+    
+    if specifier.declarations:
+        members = []
+        for declaration in specifier.declarations:
+            t = get_ctypes_type(declaration.type,
+                                declaration.declarator,
+                                check_qualifiers=True)
+            declarator = declaration.declarator
+            if declarator is None:
+                # XXX TEMPORARY while struct with no typedef not filled in
+                break
+            while declarator.pointer:
+                declarator = declarator.pointer
+            name = declarator.identifier
+            members.append((name, remove_function_pointer(t)))
+    else:
+        members = None
+    
+    return CtypesStruct(tag,variety,members,
+                        src=(specifier.filename,specifier.lineno))
+
+def make_enum_from_specifier(specifier):
+    tag = specifier.tag
+    
+    enumerators = []
+    last_name = None
+    for e in specifier.enumerators:
+        if e.expression:
+            value = e.expression
+        else:
+            if last_name:
+                value = BinaryExpressionNode("addition", (lambda x,y:x+y),
+                    "(%s + %s)", (False,False),
+                    IdentifierExpressionNode(last_name),
+                    ConstantExpressionNode(1))
+            else:
+                value = ConstantExpressionNode(0)
+        
+        enumerators.append((e.name,value))
+        last_name = e.name
+    
+    return CtypesEnum(tag, enumerators,
+                      src=(specifier.filename,specifier.lineno))
+
+class CtypesParser(CParser):
+    '''Parse a C file for declarations that can be used by ctypes.
+    
+    Subclass and override the handle_ctypes_* methods.
+    '''
+
+    def handle_declaration(self, declaration, filename, lineno):
+        t = get_ctypes_type(declaration.type, declaration.declarator)
+        
+        if type(t) in (CtypesStruct, CtypesEnum):
+            self.handle_ctypes_new_type(
+                remove_function_pointer(t), filename, lineno)
+        
+        declarator = declaration.declarator
+        if declarator is None:
+            # XXX TEMPORARY while struct with no typedef not filled in
+            return
+        while declarator.pointer:
+            declarator = declarator.pointer
+        name = declarator.identifier
+        if declaration.storage == 'typedef':
+            self.handle_ctypes_typedef(
+                name, remove_function_pointer(t), filename, lineno)
+        elif type(t) == CtypesFunction:
+            self.handle_ctypes_function(
+                name, t.restype, t.argtypes, t.variadic, filename, lineno)
+        elif declaration.storage != 'static':
+            self.handle_ctypes_variable(name, t, filename, lineno)
+
+    # ctypes parser interface.  Override these methods in your subclass.
+    
+    def handle_ctypes_new_type(self, ctype, filename, lineno):
+        pass
+    
+    def handle_ctypes_typedef(self, name, ctype, filename, lineno):
+        pass
+
+    def handle_ctypes_function(self, name, restype, argtypes, filename, lineno):
+        pass
+
+    def handle_ctypes_variable(self, name, ctype, filename, lineno):
+        pass

+ 326 - 0
lib/python/ctypes/ctypesgencore/parser/datacollectingparser.py

@@ -0,0 +1,326 @@
+#!/usr/bin/env python
+
+"""
+DataCollectingParser subclasses ctypesparser.CtypesParser and builds Description
+objects from the CtypesType objects and other information from CtypesParser.
+After parsing is complete, a DescriptionCollection object can be retrieved by
+calling DataCollectingParser.data(). 
+"""
+
+import ctypesparser
+from ctypesgencore.descriptions import *
+from ctypesgencore.ctypedescs import *
+from ctypesgencore.expressions import *
+from ctypesgencore.messages import *
+from tempfile import NamedTemporaryFile
+import os
+
+class DataCollectingParser(ctypesparser.CtypesParser,
+                           ctypesparser.CtypesTypeVisitor):
+    """Main class for the Parser component. Steps for use:
+    p=DataCollectingParser(names_of_header_files,options)
+    p.parse()
+    data=p.data() #A dictionary of constants, enums, structs, functions, etc.
+    """
+    def __init__(self,headers,options):
+        ctypesparser.CtypesParser.__init__(self,options)
+        self.headers=headers
+        self.options=options
+        
+        self.constants=[]
+        self.typedefs=[]
+        self.structs=[]
+        self.enums=[]
+        self.functions=[]
+        self.variables=[]
+        self.macros=[]
+        
+        self.all=[]
+        self.output_order=[]
+        
+        # NULL is a useful macro to have defined
+        null = ConstantExpressionNode(None)
+        nullmacro = ConstantDescription("NULL",null,("<built-in>",1))
+        self.constants.append(nullmacro)
+        self.all.append(nullmacro)
+        self.output_order.append(("constant", nullmacro))
+        
+        # A list of tuples describing macros; saved to be processed after
+        # everything else has been parsed
+        self.saved_macros = []
+        # A set of structs that are already known
+        self.already_seen_structs=set() 
+        # A dict of structs that have only been seen in opaque form
+        self.already_seen_opaque_structs={} 
+        # A set of enums that are already known
+        self.already_seen_enums=set() 
+        # A dict of enums that have only been seen in opaque form
+        self.already_seen_opaque_enums={}
+            
+    def parse(self):
+        f = NamedTemporaryFile(suffix=".h")
+        for header in self.options.other_headers:
+            print >>f, '#include <%s>' % header
+        for header in self.headers:
+            print >>f, '#include "%s"' % os.path.abspath(header)
+        f.flush()
+        ctypesparser.CtypesParser.parse(self,f.name,None)
+        f.close()
+        
+        for name, params, expr, (filename,lineno) in self.saved_macros:
+            self.handle_macro(name, params, expr, filename, lineno)
+            
+    def handle_define_constant(self, name, expr, filename, lineno):
+        # Called by CParser
+        # Save to handle later
+        self.saved_macros.append((name, None, expr, (filename, lineno)))
+    
+    def handle_define_unparseable(self, name, params, value, filename, lineno):
+        # Called by CParser
+        if params:
+            original_string = "#define %s(%s) %s" % \
+                (name, ",".join(params), " ".join(value))
+        else:
+            original_string = "#define %s %s" % \
+                (name, " ".join(value))
+        macro = MacroDescription(name, params, None,
+                                 src = (filename,lineno))
+        macro.error("Could not parse macro \"%s\"" % original_string,
+                    cls = 'macro')
+        macro.original_string = original_string
+        self.macros.append(macro)
+        self.all.append(macro)
+        self.output_order.append(('macro',macro))
+    
+    def handle_define_macro(self, name, params, expr, filename, lineno):
+        # Called by CParser
+        # Save to handle later
+        self.saved_macros.append((name, params, expr, (filename,lineno)))
+    
+    def handle_ctypes_typedef(self, name, ctype, filename, lineno):
+        # Called by CtypesParser
+        ctype.visit(self)
+        
+        typedef=TypedefDescription(name,
+                                   ctype,
+                                   src=(filename,repr(lineno)))
+        
+        self.typedefs.append(typedef)
+        self.all.append(typedef)
+        self.output_order.append(('typedef',typedef))
+    
+    def handle_ctypes_new_type(self, ctype, filename, lineno):
+        # Called by CtypesParser
+        if isinstance(ctype,ctypesparser.CtypesEnum):
+            self.handle_enum(ctype, filename, lineno)
+        else:
+            self.handle_struct(ctype, filename, lineno)
+    
+    def handle_ctypes_function(self, name, restype, argtypes, variadic,
+                               filename, lineno):
+        # Called by CtypesParser
+        restype.visit(self)
+        for argtype in argtypes:
+            argtype.visit(self)
+        
+        function=FunctionDescription(name,
+                                     restype,
+                                     argtypes,
+                                     variadic = variadic,
+                                     src=(filename,repr(lineno)))
+        
+        self.functions.append(function)
+        self.all.append(function)
+        self.output_order.append(('function',function))
+
+    def handle_ctypes_variable(self, name, ctype, filename, lineno):
+        # Called by CtypesParser
+        ctype.visit(self)
+        
+        variable=VariableDescription(name,
+                                     ctype,
+                                     src=(filename,repr(lineno)))
+        
+        self.variables.append(variable)
+        self.all.append(variable)
+        self.output_order.append(('variable',variable))
+
+    def handle_struct(self, ctypestruct, filename, lineno):
+        # Called from within DataCollectingParser
+
+        # When we find an opaque struct, we make a StructDescription for it
+        # and record it in self.already_seen_opaque_structs. If we later
+        # find a transparent struct with the same tag, we fill in the
+        # opaque struct with the information from the transparent struct and
+        # move the opaque struct to the end of the struct list.
+        
+        name = "%s %s"%(ctypestruct.variety,ctypestruct.tag)
+        
+        if name in self.already_seen_structs:
+            return
+        
+        if ctypestruct.opaque:
+            if name not in self.already_seen_opaque_structs:
+                struct = StructDescription(ctypestruct.tag,
+                                           ctypestruct.variety,
+                                           None, # No members
+                                           True, # Opaque
+                                           ctypestruct,
+                                           src=(filename,str(lineno)))
+                
+                self.already_seen_opaque_structs[name]=struct
+                self.structs.append(struct)
+                self.all.append(struct)
+                self.output_order.append(('struct',struct))
+        
+        else:
+            for (membername,ctype) in ctypestruct.members:
+                ctype.visit(self)
+            
+            if name in self.already_seen_opaque_structs:
+                # Fill in older version
+                struct=self.already_seen_opaque_structs[name]
+                struct.opaque = False
+                struct.members = ctypestruct.members
+                struct.ctype = ctypestruct
+                struct.src = ctypestruct.src
+                
+                self.output_order.append(('struct-body',struct))
+                
+                del self.already_seen_opaque_structs[name]
+            
+            else:
+                struct = StructDescription(ctypestruct.tag,
+                                           ctypestruct.variety,
+                                           ctypestruct.members,
+                                           False, # Not opaque
+                                           src=(filename,str(lineno)),
+                                           ctype=ctypestruct)                
+                self.structs.append(struct)
+                self.all.append(struct)
+                self.output_order.append(('struct',struct))
+                self.output_order.append(('struct-body',struct))
+            
+            self.already_seen_structs.add(name)
+    
+    def handle_enum(self, ctypeenum, filename, lineno):
+        # Called from within DataCollectingParser.
+        
+        # Process for handling opaque enums is the same as process for opaque
+        # structs. See handle_struct() for more details.
+        
+        tag = ctypeenum.tag
+        if tag in self.already_seen_enums:
+            return
+            
+        if ctypeenum.opaque:
+            if tag not in self.already_seen_opaque_enums:
+                enum=EnumDescription(ctypeenum.tag,
+                             ctypeenum.enumerators,
+                             ctypeenum,
+                             src = (filename,str(lineno)))
+                enum.opaque = True
+                
+                self.already_seen_opaque_enums[tag]=enum
+                self.enums.append(enum)
+                self.all.append(enum)
+                self.output_order.append(('enum',enum))
+                
+        else:
+            if tag in self.already_seen_opaque_enums:
+                # Fill in older opaque version
+                enum = self.already_seen_opaque_enums[tag]
+                enum.opaque = False
+                enum.ctype = ctypeenum
+                enum.src = ctypeenum.src
+            
+                del self.already_seen_opaque_enums[tag]
+            
+            else:
+                enum=EnumDescription(ctypeenum.tag,
+                                None,
+                                src=(filename,str(lineno)),
+                                ctype=ctypeenum)
+                enum.opaque = False
+                
+                self.enums.append(enum)
+                self.all.append(enum)
+                self.output_order.append(('enum',enum))
+            
+            self.already_seen_enums.add(tag)
+            
+            for (enumname,expr) in ctypeenum.enumerators:                
+                constant=ConstantDescription(enumname, expr,
+                                             src=(filename,lineno))
+                
+                self.constants.append(constant)
+                self.all.append(constant)
+                self.output_order.append(('constant',constant))
+    
+    def handle_macro(self, name, params, expr, filename, lineno):
+        # Called from within DataCollectingParser
+        src = (filename,lineno)
+        
+        if expr==None:
+            expr = ConstantExpressionNode(True)
+            constant = ConstantDescription(name, expr, src)
+            self.constants.append(constant)
+            self.all.append(constant)
+            return
+        
+        expr.visit(self)
+        
+        if isinstance(expr,CtypesType):
+            if params:
+                macro = MacroDescription(name, "", src)
+                macro.error("%s has parameters but evaluates to a type. " \
+                    "Ctypesgen does not support it." % macro.casual_name(),
+                    cls = 'macro')
+                self.macros.append(macro)
+                self.all.append(macro)
+                self.output_order.append(('macro',macro))
+            
+            else:
+                typedef = TypedefDescription(name, expr, src)
+                self.typedefs.append(typedef)
+                self.all.append(typedef)
+                self.output_order.append(('typedef',typedef))
+        
+        else:
+            macro = MacroDescription(name, params, expr, src)
+            self.macros.append(macro)
+            self.all.append(macro)
+            self.output_order.append(('macro',macro))
+        
+        # Macros could possibly contain things like __FILE__, __LINE__, etc...
+        # This could be supported, but it would be a lot of work. It would
+        # probably also bloat the Preamble considerably.
+        
+    def handle_error(self, message, filename, lineno):
+        # Called by CParser
+        error_message("%s:%d: %s" % (filename,lineno,message), cls='cparser')
+    
+    def handle_pp_error(self, message):
+        # Called by PreprocessorParser
+        error_message("%s: %s" % (self.options.cpp, message), cls = 'cparser')
+    
+    def handle_status(self, message):
+        # Called by CParser
+        status_message(message)
+    
+    def visit_struct(self, struct):
+        self.handle_struct(struct, struct.src[0], struct.src[1])
+    
+    def visit_enum(self,enum):
+        self.handle_enum(enum, enum.src[0], enum.src[1])
+    
+    def data(self):
+        return DescriptionCollection(self.constants,
+                                     self.typedefs,
+                                     self.structs,
+                                     self.enums,
+                                     self.functions,
+                                     self.variables,
+                                     self.macros,
+                                     self.all,
+                                     self.output_order)

+ 879 - 0
lib/python/ctypes/ctypesgencore/parser/lex.py

@@ -0,0 +1,879 @@
+#-----------------------------------------------------------------------------
+# ply: lex.py
+#
+# Author: David M. Beazley (dave@dabeaz.com)
+# Modification for pyglet by Alex Holkner (alex.holkner@gmail.com)
+# Modification for ctypesgen by Tim Maxwell (timmaxw@gmail.com) <tm>
+#
+# Copyright (C) 2001-2006, David M. Beazley
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+# 
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+# 
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+# 
+# See the file LICENSE for a complete copy of the LGPL.
+#-----------------------------------------------------------------------------
+
+__version__ = "2.2"
+
+import re, sys, types, os.path
+
+# Regular expression used to match valid token names
+_is_identifier = re.compile(r'^[a-zA-Z0-9_]+$')
+
+# Available instance types.  This is used when lexers are defined by a class.
+# It's a little funky because I want to preserve backwards compatibility
+# with Python 2.0 where types.ObjectType is undefined.
+
+try:
+   _INSTANCETYPE = (types.InstanceType, types.ObjectType)
+except AttributeError:
+   _INSTANCETYPE = types.InstanceType
+   class object: pass       # Note: needed if no new-style classes present
+
+# Exception thrown when invalid token encountered and no default error
+# handler is defined.
+class LexError(Exception):
+    def __init__(self,message,s):
+         self.args = (message,)
+         self.text = s
+
+# Token class
+class LexToken(object):
+    def __str__(self):
+        return "LexToken(%s,%r,%d,%d)" % (self.type,self.value,self.lineno,self.lexpos)
+    def __repr__(self):
+        return str(self)
+    def skip(self,n):
+        self.lexer.skip(n)
+
+# -----------------------------------------------------------------------------
+# Lexer class
+#
+# This class encapsulates all of the methods and data associated with a lexer.
+#
+#    input()          -  Store a new string in the lexer
+#    token()          -  Get the next token
+# -----------------------------------------------------------------------------
+
+class Lexer:
+    def __init__(self):
+        self.lexre = None             # Master regular expression. This is a list of 
+                                      # tuples (re,findex) where re is a compiled
+                                      # regular expression and findex is a list
+                                      # mapping regex group numbers to rules
+        self.lexretext = None         # Current regular expression strings
+        self.lexstatere = {}          # Dictionary mapping lexer states to master regexs
+        self.lexstateretext = {}      # Dictionary mapping lexer states to regex strings
+        self.lexstate = "INITIAL"     # Current lexer state
+        self.lexstatestack = []       # Stack of lexer states
+        self.lexstateinfo = None      # State information
+        self.lexstateignore = {}      # Dictionary of ignored characters for each state
+        self.lexstateerrorf = {}      # Dictionary of error functions for each state
+        self.lexreflags = 0           # Optional re compile flags
+        self.lexdata = None           # Actual input data (as a string)
+        self.lexpos = 0               # Current position in input text
+        self.lexlen = 0               # Length of the input text
+        self.lexerrorf = None         # Error rule (if any)
+        self.lextokens = None         # List of valid tokens
+        self.lexignore = ""           # Ignored characters
+        self.lexliterals = ""         # Literal characters that can be passed through
+        self.lexmodule = None         # Module
+        self.lineno = 1               # Current line number
+        self.lexdebug = 0             # Debugging mode
+        self.lexoptimize = 0          # Optimized mode
+
+    def clone(self,object=None):
+        c = Lexer()
+        c.lexstatere = self.lexstatere
+        c.lexstateinfo = self.lexstateinfo
+        c.lexstateretext = self.lexstateretext
+        c.lexstate = self.lexstate
+        c.lexstatestack = self.lexstatestack
+        c.lexstateignore = self.lexstateignore
+        c.lexstateerrorf = self.lexstateerrorf
+        c.lexreflags = self.lexreflags
+        c.lexdata = self.lexdata
+        c.lexpos = self.lexpos
+        c.lexlen = self.lexlen
+        c.lextokens = self.lextokens
+        c.lexdebug = self.lexdebug
+        c.lineno = self.lineno
+        c.lexoptimize = self.lexoptimize
+        c.lexliterals = self.lexliterals
+        c.lexmodule   = self.lexmodule
+
+        # If the object parameter has been supplied, it means we are attaching the
+        # lexer to a new object.  In this case, we have to rebind all methods in
+        # the lexstatere and lexstateerrorf tables.
+
+        if object:
+            newtab = { }
+            for key, ritem in self.lexstatere.items():
+                newre = []
+                for cre, findex in ritem:
+                     newfindex = []
+                     for f in findex:
+                         if not f or not f[0]:
+                             newfindex.append(f)
+                             continue
+                         newfindex.append((getattr(object,f[0].__name__),f[1]))
+                newre.append((cre,newfindex))
+                newtab[key] = newre
+            c.lexstatere = newtab
+            c.lexstateerrorf = { }
+            for key, ef in self.lexstateerrorf.items():
+                c.lexstateerrorf[key] = getattr(object,ef.__name__)
+            c.lexmodule = object
+
+        # Set up other attributes
+        c.begin(c.lexstate)
+        return c
+
+    # ------------------------------------------------------------
+    # writetab() - Write lexer information to a table file
+    # ------------------------------------------------------------
+    # <tm> 25 June 2008 added 'outputdir'
+    def writetab(self,tabfile,outputdir=''):
+        tf = open(os.path.join(outputdir,tabfile)+".py","w")
+        tf.write("# %s.py. This file automatically created by PLY (version %s). Don't edit!\n" % (tabfile,__version__))
+        tf.write("_lextokens    = %s\n" % repr(self.lextokens))
+        tf.write("_lexreflags   = %s\n" % repr(self.lexreflags))
+        tf.write("_lexliterals  = %s\n" % repr(self.lexliterals))
+        tf.write("_lexstateinfo = %s\n" % repr(self.lexstateinfo))
+        
+        tabre = { }
+        for key, lre in self.lexstatere.items():
+             titem = []
+             for i in range(len(lre)):
+                  titem.append((self.lexstateretext[key][i],_funcs_to_names(lre[i][1])))
+             tabre[key] = titem
+
+        tf.write("_lexstatere   = %s\n" % repr(tabre))
+        tf.write("_lexstateignore = %s\n" % repr(self.lexstateignore))
+
+        taberr = { }
+        for key, ef in self.lexstateerrorf.items():
+             if ef:
+                  taberr[key] = ef.__name__
+             else:
+                  taberr[key] = None
+        tf.write("_lexstateerrorf = %s\n" % repr(taberr))
+        tf.close()
+
+    # ------------------------------------------------------------
+    # readtab() - Read lexer information from a tab file
+    # ------------------------------------------------------------
+    def readtab(self,tabfile,fdict):
+        exec "import %s as lextab" % tabfile
+        self.lextokens      = lextab._lextokens
+        self.lexreflags     = lextab._lexreflags
+        self.lexliterals    = lextab._lexliterals
+        self.lexstateinfo   = lextab._lexstateinfo
+        self.lexstateignore = lextab._lexstateignore
+        self.lexstatere     = { }
+        self.lexstateretext = { }
+        for key,lre in lextab._lexstatere.items():
+             titem = []
+             txtitem = []
+             for i in range(len(lre)):
+                  titem.append((re.compile(lre[i][0],lextab._lexreflags),_names_to_funcs(lre[i][1],fdict)))
+                  txtitem.append(lre[i][0])
+             self.lexstatere[key] = titem
+             self.lexstateretext[key] = txtitem
+        self.lexstateerrorf = { }
+        for key,ef in lextab._lexstateerrorf.items():
+             self.lexstateerrorf[key] = fdict[ef]
+        self.begin('INITIAL')
+         
+    # ------------------------------------------------------------
+    # input() - Push a new string into the lexer
+    # ------------------------------------------------------------
+    def input(self,s):
+        if not (isinstance(s,types.StringType) or isinstance(s,types.UnicodeType)):
+            raise ValueError, "Expected a string"
+        self.lexdata = s
+        self.lexpos = 0
+        self.lexlen = len(s)
+
+    # ------------------------------------------------------------
+    # begin() - Changes the lexing state
+    # ------------------------------------------------------------
+    def begin(self,state):
+        if not self.lexstatere.has_key(state):
+            raise ValueError, "Undefined state"
+        self.lexre = self.lexstatere[state]
+        self.lexretext = self.lexstateretext[state]
+        self.lexignore = self.lexstateignore.get(state,"")
+        self.lexerrorf = self.lexstateerrorf.get(state,None)
+        self.lexstate = state
+
+    # ------------------------------------------------------------
+    # push_state() - Changes the lexing state and saves old on stack
+    # ------------------------------------------------------------
+    def push_state(self,state):
+        self.lexstatestack.append(self.lexstate)
+        self.begin(state)
+
+    # ------------------------------------------------------------
+    # pop_state() - Restores the previous state
+    # ------------------------------------------------------------
+    def pop_state(self):
+        self.begin(self.lexstatestack.pop())
+
+    # ------------------------------------------------------------
+    # current_state() - Returns the current lexing state
+    # ------------------------------------------------------------
+    def current_state(self):
+        return self.lexstate
+
+    # ------------------------------------------------------------
+    # skip() - Skip ahead n characters
+    # ------------------------------------------------------------
+    def skip(self,n):
+        self.lexpos += n
+
+    # ------------------------------------------------------------
+    # token() - Return the next token from the Lexer
+    #
+    # Note: This function has been carefully implemented to be as fast
+    # as possible.  Don't make changes unless you really know what
+    # you are doing
+    # ------------------------------------------------------------
+    def token(self):
+        # Make local copies of frequently referenced attributes
+        lexpos    = self.lexpos
+        lexlen    = self.lexlen
+        lexignore = self.lexignore
+        lexdata   = self.lexdata
+
+        while lexpos < lexlen:
+            # This code provides some short-circuit code for whitespace, tabs, and other ignored characters
+            if lexdata[lexpos] in lexignore:
+                lexpos += 1
+                continue
+
+            # Look for a regular expression match
+            for lexre,lexindexfunc in self.lexre:
+                m = lexre.match(lexdata,lexpos)
+                if not m: continue
+
+                # Set last match in lexer so that rules can access it if they want
+                self.lexmatch = m
+
+                # Create a token for return
+                tok = LexToken()
+                tok.value = m.group()
+                tok.groups = m.groups()
+                tok.lineno = self.lineno
+                tok.lexpos = lexpos
+                tok.lexer = self
+
+                lexpos = m.end()
+                i = m.lastindex
+                func,tok.type = lexindexfunc[i]
+                self.lexpos = lexpos
+
+                if not func:
+                   # If no token type was set, it's an ignored token
+                   if tok.type: return tok      
+                   break
+
+                # if func not callable, it means it's an ignored token                
+                if not callable(func):
+                   break 
+
+                # If token is processed by a function, call it
+                newtok = func(tok)
+                
+                # Every function must return a token, if nothing, we just move to next token
+                if not newtok: 
+                    lexpos = self.lexpos        # This is here in case user has updated lexpos.
+                    
+                    # Added for pyglet/tools/wrapper/cparser.py by Alex
+                    # Holkner on 20/Jan/2007 
+                    lexdata = self.lexdata
+                    break
+                
+                # Verify type of the token.  If not in the token map, raise an error
+                if not self.lexoptimize:
+                    # Allow any single-character literal also for
+                    # pyglet/tools/wrapper/cparser.py by Alex Holkner on
+                    # 20/Jan/2007 
+                    if not self.lextokens.has_key(newtok.type) and len(newtok.type) > 1:
+                        raise LexError, ("%s:%d: Rule '%s' returned an unknown token type '%s'" % (
+                            func.func_code.co_filename, func.func_code.co_firstlineno,
+                            func.__name__, newtok.type),lexdata[lexpos:])
+
+                return newtok
+            else:
+                # No match, see if in literals
+                if lexdata[lexpos] in self.lexliterals:
+                    tok = LexToken()
+                    tok.value = lexdata[lexpos]
+                    tok.lineno = self.lineno
+                    tok.lexer = self
+                    tok.type = tok.value
+                    tok.lexpos = lexpos
+                    self.lexpos = lexpos + 1
+                    return tok
+        
+                # No match. Call t_error() if defined.
+                if self.lexerrorf:
+                    tok = LexToken()
+                    tok.value = self.lexdata[lexpos:]
+                    tok.lineno = self.lineno
+                    tok.type = "error"
+                    tok.lexer = self
+                    tok.lexpos = lexpos
+                    self.lexpos = lexpos
+                    newtok = self.lexerrorf(tok)
+                    if lexpos == self.lexpos:
+                        # Error method didn't change text position at all. This is an error.
+                        raise LexError, ("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:])
+                    lexpos = self.lexpos
+                    if not newtok: continue
+                    return newtok
+
+                self.lexpos = lexpos
+                raise LexError, ("Illegal character '%s' at index %d" % (lexdata[lexpos],lexpos), lexdata[lexpos:])
+
+        self.lexpos = lexpos + 1
+        if self.lexdata is None:
+             raise RuntimeError, "No input string given with input()"
+        return None
+        
+# -----------------------------------------------------------------------------
+# _validate_file()
+#
+# This checks to see if there are duplicated t_rulename() functions or strings
+# in the parser input file.  This is done using a simple regular expression
+# match on each line in the filename.
+# -----------------------------------------------------------------------------
+
+def _validate_file(filename):
+    import os.path
+    base,ext = os.path.splitext(filename)
+    if ext != '.py': return 1        # No idea what the file is. Return OK
+
+    try:
+        f = open(filename)
+        lines = f.readlines()
+        f.close()
+    except IOError:
+        return 1                       # Oh well
+
+    fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(')
+    sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=')
+    counthash = { }
+    linen = 1
+    noerror = 1
+    for l in lines:
+        m = fre.match(l)
+        if not m:
+            m = sre.match(l)
+        if m:
+            name = m.group(1)
+            prev = counthash.get(name)
+            if not prev:
+                counthash[name] = linen
+            else:
+                print "%s:%d: Rule %s redefined. Previously defined on line %d" % (filename,linen,name,prev)
+                noerror = 0
+        linen += 1
+    return noerror
+
+# -----------------------------------------------------------------------------
+# _funcs_to_names()
+#
+# Given a list of regular expression functions, this converts it to a list
+# suitable for output to a table file
+# -----------------------------------------------------------------------------
+
+def _funcs_to_names(funclist):
+    result = []
+    for f in funclist:
+         if f and f[0]:
+             result.append((f[0].__name__,f[1]))
+         else:
+             result.append(f)
+    return result
+
+# -----------------------------------------------------------------------------
+# _names_to_funcs()
+#
+# Given a list of regular expression function names, this converts it back to
+# functions.
+# -----------------------------------------------------------------------------
+
+def _names_to_funcs(namelist,fdict):
+     result = []
+     for n in namelist:
+          if n and n[0]:
+              result.append((fdict[n[0]],n[1]))
+          else:
+              result.append(n)
+     return result
+
+# -----------------------------------------------------------------------------
+# _form_master_re()
+#
+# This function takes a list of all of the regex components and attempts to
+# form the master regular expression.  Given limitations in the Python re
+# module, it may be necessary to break the master regex into separate expressions.
+# -----------------------------------------------------------------------------
+
+def _form_master_re(relist,reflags,ldict):
+    if not relist: return []
+    regex = "|".join(relist)
+    try:
+        lexre = re.compile(regex,re.VERBOSE | reflags)
+
+        # Build the index to function map for the matching engine
+        lexindexfunc = [ None ] * (max(lexre.groupindex.values())+1)
+        for f,i in lexre.groupindex.items():
+            handle = ldict.get(f,None)
+            if type(handle) in (types.FunctionType, types.MethodType):
+                lexindexfunc[i] = (handle,handle.__name__[2:])
+            elif handle is not None:
+                # If rule was specified as a string, we build an anonymous
+                # callback function to carry out the action
+                if f.find("ignore_") > 0:
+                    lexindexfunc[i] = (None,None)
+                    print "IGNORE", f
+                else:
+                    lexindexfunc[i] = (None, f[2:])
+         
+        return [(lexre,lexindexfunc)],[regex]
+    except Exception,e:
+        m = int(len(relist)/2)
+        if m == 0: m = 1
+        llist, lre = _form_master_re(relist[:m],reflags,ldict)
+        rlist, rre = _form_master_re(relist[m:],reflags,ldict)
+        return llist+rlist, lre+rre
+
+# -----------------------------------------------------------------------------
+# def _statetoken(s,names)
+#
+# Given a declaration name s of the form "t_" and a dictionary whose keys are
+# state names, this function returns a tuple (states,tokenname) where states
+# is a tuple of state names and tokenname is the name of the token.  For example,
+# calling this with s = "t_foo_bar_SPAM" might return (('foo','bar'),'SPAM')
+# -----------------------------------------------------------------------------
+
+def _statetoken(s,names):
+    nonstate = 1
+    parts = s.split("_")
+    for i in range(1,len(parts)):
+        if not names.has_key(parts[i]) and parts[i] != 'ANY': break
+    if i > 1:
+       states = tuple(parts[1:i])
+    else:
+       states = ('INITIAL',)
+
+    if 'ANY' in states:
+       states = tuple(names.keys())
+      
+    tokenname = "_".join(parts[i:])
+    return (states,tokenname)
+
+# -----------------------------------------------------------------------------
+# lex(module)
+#
+# Build all of the regular expression rules from definitions in the supplied module
+# -----------------------------------------------------------------------------
+# cls added for pyglet/tools/wrapper/cparser.py by Alex Holkner on 22/Jan/2007 
+# <tm> 25 June 2008 added 'outputdir'
+def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,nowarn=0,outputdir='',cls=Lexer):
+    global lexer
+    ldict = None
+    stateinfo  = { 'INITIAL' : 'inclusive'}
+    error = 0
+    files = { }
+    lexobj = cls()
+    lexobj.lexdebug = debug
+    lexobj.lexoptimize = optimize
+    global token,input
+
+    if nowarn: warn = 0
+    else: warn = 1
+    
+    if object: module = object
+
+    if module:
+        # User supplied a module object.
+        if isinstance(module, types.ModuleType):
+            ldict = module.__dict__
+        elif isinstance(module, _INSTANCETYPE):
+            _items = [(k,getattr(module,k)) for k in dir(module)]
+            ldict = { }
+            for (i,v) in _items:
+                ldict[i] = v
+        else:
+            raise ValueError,"Expected a module or instance"
+        lexobj.lexmodule = module
+        
+    else:
+        # No module given.  We might be able to get information from the caller.
+        try:
+            raise RuntimeError
+        except RuntimeError:
+            e,b,t = sys.exc_info()
+            f = t.tb_frame
+            f = f.f_back           # Walk out to our calling function
+            ldict = f.f_globals    # Grab its globals dictionary
+
+    if optimize and lextab:
+        try:
+            lexobj.readtab(lextab,ldict)
+            token = lexobj.token
+            input = lexobj.input
+            lexer = lexobj
+            return lexobj
+        
+        except ImportError:
+            pass
+        
+    # Get the tokens, states, and literals variables (if any)
+    if (module and isinstance(module,_INSTANCETYPE)):
+        tokens   = getattr(module,"tokens",None)
+        states   = getattr(module,"states",None)
+        literals = getattr(module,"literals","")
+    else:
+        tokens   = ldict.get("tokens",None)
+        states   = ldict.get("states",None)
+        literals = ldict.get("literals","")
+        
+    if not tokens:
+        raise SyntaxError,"lex: module does not define 'tokens'"
+    if not (isinstance(tokens,types.ListType) or isinstance(tokens,types.TupleType)):
+        raise SyntaxError,"lex: tokens must be a list or tuple."
+
+    # Build a dictionary of valid token names
+    lexobj.lextokens = { }
+    if not optimize:
+        for n in tokens:
+            if not _is_identifier.match(n):
+                print "lex: Bad token name '%s'" % n
+                error = 1
+            if warn and lexobj.lextokens.has_key(n):
+                print "lex: Warning. Token '%s' multiply defined." % n
+            lexobj.lextokens[n] = None
+    else:
+        for n in tokens: lexobj.lextokens[n] = None
+
+    if debug:
+        print "lex: tokens = '%s'" % lexobj.lextokens.keys()
+
+    try:
+         for c in literals:
+               if not (isinstance(c,types.StringType) or isinstance(c,types.UnicodeType)) or len(c) > 1:
+                    print "lex: Invalid literal %s. Must be a single character" % repr(c)
+                    error = 1
+                    continue
+
+    except TypeError:
+         print "lex: Invalid literals specification. literals must be a sequence of characters."
+         error = 1
+
+    lexobj.lexliterals = literals
+
+    # Build statemap
+    if states:
+         if not (isinstance(states,types.TupleType) or isinstance(states,types.ListType)):
+              print "lex: states must be defined as a tuple or list."
+              error = 1
+         else:
+              for s in states:
+                    if not isinstance(s,types.TupleType) or len(s) != 2:
+                           print "lex: invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')" % repr(s)
+                           error = 1
+                           continue
+                    name, statetype = s
+                    if not isinstance(name,types.StringType):
+                           print "lex: state name %s must be a string" % repr(name)
+                           error = 1
+                           continue
+                    if not (statetype == 'inclusive' or statetype == 'exclusive'):
+                           print "lex: state type for state %s must be 'inclusive' or 'exclusive'" % name
+                           error = 1
+                           continue
+                    if stateinfo.has_key(name):
+                           print "lex: state '%s' already defined." % name
+                           error = 1
+                           continue
+                    stateinfo[name] = statetype
+
+    # Get a list of symbols with the t_ or s_ prefix
+    tsymbols = [f for f in ldict.keys() if f[:2] == 't_' ]
+
+    # Now build up a list of functions and a list of strings
+
+    funcsym =  { }        # Symbols defined as functions
+    strsym =   { }        # Symbols defined as strings
+    toknames = { }        # Mapping of symbols to token names
+
+    for s in stateinfo.keys():
+         funcsym[s] = []
+         strsym[s] = []
+
+    ignore   = { }        # Ignore strings by state
+    errorf   = { }        # Error functions by state
+
+    if len(tsymbols) == 0:
+        raise SyntaxError,"lex: no rules of the form t_rulename are defined."
+
+    for f in tsymbols:
+        t = ldict[f]
+        states, tokname = _statetoken(f,stateinfo)
+        toknames[f] = tokname
+
+        if callable(t):
+            for s in states: funcsym[s].append((f,t))
+        elif (isinstance(t, types.StringType) or isinstance(t,types.UnicodeType)):
+            for s in states: strsym[s].append((f,t))
+        else:
+            print "lex: %s not defined as a function or string" % f
+            error = 1
+
+    # Sort the functions by line number
+    for f in funcsym.values():
+        f.sort(lambda x,y: cmp(x[1].func_code.co_firstlineno,y[1].func_code.co_firstlineno))
+
+    # Sort the strings by regular expression length
+    for s in strsym.values():
+        s.sort(lambda x,y: (len(x[1]) < len(y[1])) - (len(x[1]) > len(y[1])))
+
+    regexs = { }
+
+    # Build the master regular expressions
+    for state in stateinfo.keys():
+        regex_list = []
+
+        # Add rules defined by functions first
+        for fname, f in funcsym[state]:
+            line = f.func_code.co_firstlineno
+            file = f.func_code.co_filename
+            files[file] = None
+            tokname = toknames[fname]
+
+            ismethod = isinstance(f, types.MethodType)
+
+            if not optimize:
+                nargs = f.func_code.co_argcount
+                if ismethod:
+                    reqargs = 2
+                else:
+                    reqargs = 1
+                if nargs > reqargs:
+                    print "%s:%d: Rule '%s' has too many arguments." % (file,line,f.__name__)
+                    error = 1
+                    continue
+
+                if nargs < reqargs:
+                    print "%s:%d: Rule '%s' requires an argument." % (file,line,f.__name__)
+                    error = 1
+                    continue
+
+                if tokname == 'ignore':
+                    print "%s:%d: Rule '%s' must be defined as a string." % (file,line,f.__name__)
+                    error = 1
+                    continue
+        
+            if tokname == 'error':
+                errorf[state] = f
+                continue
+
+            if f.__doc__:
+                if not optimize:
+                    try:
+                        c = re.compile("(?P<%s>%s)" % (f.__name__,f.__doc__), re.VERBOSE | reflags)
+                        if c.match(""):
+                             print "%s:%d: Regular expression for rule '%s' matches empty string." % (file,line,f.__name__)
+                             error = 1
+                             continue
+                    except re.error,e:
+                        print "%s:%d: Invalid regular expression for rule '%s'. %s" % (file,line,f.__name__,e)
+                        if '#' in f.__doc__:
+                             print "%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'." % (file,line, f.__name__)                 
+                        error = 1
+                        continue
+
+                    if debug:
+                        print "lex: Adding rule %s -> '%s' (state '%s')" % (f.__name__,f.__doc__, state)
+
+                # Okay. The regular expression seemed okay.  Let's append it to the master regular
+                # expression we're building
+  
+                regex_list.append("(?P<%s>%s)" % (f.__name__,f.__doc__))
+            else:
+                print "%s:%d: No regular expression defined for rule '%s'" % (file,line,f.__name__)
+
+        # Now add all of the simple rules
+        for name,r in strsym[state]:
+            tokname = toknames[name]       
+
+            if tokname == 'ignore':
+                 ignore[state] = r
+                 continue
+
+            if not optimize:
+                if tokname == 'error':
+                    raise SyntaxError,"lex: Rule '%s' must be defined as a function" % name
+                    error = 1
+                    continue
+        
+                if not lexobj.lextokens.has_key(tokname) and tokname.find("ignore_") < 0:
+                    print "lex: Rule '%s' defined for an unspecified token %s." % (name,tokname)
+                    error = 1
+                    continue
+                try:
+                    c = re.compile("(?P<%s>%s)" % (name,r),re.VERBOSE | reflags)
+                    if (c.match("")):
+                         print "lex: Regular expression for rule '%s' matches empty string." % name
+                         error = 1
+                         continue
+                except re.error,e:
+                    print "lex: Invalid regular expression for rule '%s'. %s" % (name,e)
+                    if '#' in r:
+                         print "lex: Make sure '#' in rule '%s' is escaped with '\\#'." % name
+
+                    error = 1
+                    continue
+                if debug:
+                    print "lex: Adding rule %s -> '%s' (state '%s')" % (name,r,state)
+                
+            regex_list.append("(?P<%s>%s)" % (name,r))
+
+        if not regex_list:
+             print "lex: No rules defined for state '%s'" % state
+             error = 1
+
+        regexs[state] = regex_list
+
+
+    if not optimize:
+        for f in files.keys(): 
+           if not _validate_file(f):
+                error = 1
+
+    if error:
+        raise SyntaxError,"lex: Unable to build lexer."
+
+    # From this point forward, we're reasonably confident that we can build the lexer.
+    # No more errors will be generated, but there might be some warning messages.
+
+    # Build the master regular expressions
+
+    for state in regexs.keys():
+        lexre, re_text = _form_master_re(regexs[state],reflags,ldict)
+        lexobj.lexstatere[state] = lexre
+        lexobj.lexstateretext[state] = re_text
+        if debug:
+            for i in range(len(re_text)):
+                 print "lex: state '%s'. regex[%d] = '%s'" % (state, i, re_text[i])
+
+    # For inclusive states, we need to add the INITIAL state
+    for state,type in stateinfo.items():
+        if state != "INITIAL" and type == 'inclusive':
+             lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL'])
+             lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL'])
+
+    lexobj.lexstateinfo = stateinfo
+    lexobj.lexre = lexobj.lexstatere["INITIAL"]
+    lexobj.lexretext = lexobj.lexstateretext["INITIAL"]
+
+    # Set up ignore variables
+    lexobj.lexstateignore = ignore
+    lexobj.lexignore = lexobj.lexstateignore.get("INITIAL","")
+
+    # Set up error functions
+    lexobj.lexstateerrorf = errorf
+    lexobj.lexerrorf = errorf.get("INITIAL",None)
+    if warn and not lexobj.lexerrorf:
+        print "lex: Warning. no t_error rule is defined."
+
+    # Check state information for ignore and error rules
+    for s,stype in stateinfo.items():
+        if stype == 'exclusive':
+              if warn and not errorf.has_key(s):
+                   print "lex: Warning. no error rule is defined for exclusive state '%s'" % s
+              if warn and not ignore.has_key(s) and lexobj.lexignore:
+                   print "lex: Warning. no ignore rule is defined for exclusive state '%s'" % s
+        elif stype == 'inclusive':
+              if not errorf.has_key(s):
+                   errorf[s] = errorf.get("INITIAL",None)
+              if not ignore.has_key(s):
+                   ignore[s] = ignore.get("INITIAL","")
+   
+
+    # Create global versions of the token() and input() functions
+    token = lexobj.token
+    input = lexobj.input
+    lexer = lexobj
+
+    # If in optimize mode, we write the lextab   
+    if lextab and optimize:
+        lexobj.writetab(lextab,outputdir)
+
+    return lexobj
+
+# -----------------------------------------------------------------------------
+# runmain()
+#
+# This runs the lexer as a main program
+# -----------------------------------------------------------------------------
+
+def runmain(lexer=None,data=None):
+    if not data:
+        try:
+            filename = sys.argv[1]
+            f = open(filename)
+            data = f.read()
+            f.close()
+        except IndexError:
+            print "Reading from standard input (type EOF to end):"
+            data = sys.stdin.read()
+
+    if lexer:
+        _input = lexer.input
+    else:
+        _input = input
+    _input(data)
+    if lexer:
+        _token = lexer.token
+    else:
+        _token = token
+        
+    while 1:
+        tok = _token()
+        if not tok: break
+        print "(%s,%r,%d,%d)" % (tok.type, tok.value, tok.lineno,tok.lexpos)
+        
+
+# -----------------------------------------------------------------------------
+# @TOKEN(regex)
+#
+# This decorator function can be used to set the regex expression on a function
+# when its docstring might need to be set in an alternative way
+# -----------------------------------------------------------------------------
+
+def TOKEN(r):
+    def set_doc(f):
+        f.__doc__ = r
+        return f
+    return set_doc
+
+# Alternative spelling of the TOKEN decorator
+Token = TOKEN
+

Failā izmaiņas netiks attēlotas, jo tās ir par lielu
+ 8 - 0
lib/python/ctypes/ctypesgencore/parser/lextab.py


Failā izmaiņas netiks attēlotas, jo tās ir par lielu
+ 282 - 0
lib/python/ctypes/ctypesgencore/parser/parsetab.py


+ 287 - 0
lib/python/ctypes/ctypesgencore/parser/pplexer.py

@@ -0,0 +1,287 @@
+#!/usr/bin/env python
+
+'''Preprocess a C source file using gcc and convert the result into
+   a token stream
+
+Reference is C99:
+  * http://www.open-std.org/JTC1/SC22/WG14/www/docs/n1124.pdf
+
+'''
+
+__docformat__ = 'restructuredtext'
+
+import os, re, shlex, sys, tokenize, lex, yacc, traceback
+import ctypes
+from lex import TOKEN
+
+tokens = (
+    'HEADER_NAME', 'IDENTIFIER', 'PP_NUMBER', 'CHARACTER_CONSTANT',
+    'STRING_LITERAL', 'OTHER',
+
+    'PTR_OP', 'INC_OP', 'DEC_OP', 'LEFT_OP', 'RIGHT_OP', 'LE_OP', 'GE_OP',
+    'EQ_OP', 'NE_OP', 'AND_OP', 'OR_OP', 'MUL_ASSIGN', 'DIV_ASSIGN',
+    'MOD_ASSIGN', 'ADD_ASSIGN', 'SUB_ASSIGN', 'LEFT_ASSIGN', 'RIGHT_ASSIGN',
+    'AND_ASSIGN', 'XOR_ASSIGN', 'OR_ASSIGN',  'PERIOD', 'ELLIPSIS',
+
+    'LPAREN', 'NEWLINE',
+    
+    'PP_DEFINE', 'PP_DEFINE_NAME', 'PP_DEFINE_MACRO_NAME', 'PP_MACRO_PARAM',
+    'PP_STRINGIFY', 'PP_IDENTIFIER_PASTE', 'PP_END_DEFINE'
+)
+
+states = [('DEFINE',"exclusive")]
+
+subs = {
+    'D': '[0-9]',
+    'L': '[a-zA-Z_]',
+    'H': '[a-fA-F0-9]',
+    'E': '[Ee][+-]?\s*{D}+',
+    'FS': '[FflL]',
+    'IS': '[uUlL]*',
+}
+# Helper: substitute {foo} with subs[foo] in string (makes regexes more lexy)
+sub_pattern = re.compile('{([^}]*)}')
+def sub_repl_match(m):
+    return subs[m.groups()[0]]
+def sub(s):
+    return sub_pattern.sub(sub_repl_match, s)
+
+# --------------------------------------------------------------------------
+# Token value types
+# --------------------------------------------------------------------------
+
+# Numbers represented as int and float types.
+# For all other tokens, type is just str representation.
+
+class StringLiteral(str):
+    def __new__(cls, value):
+        assert value[0] == '"' and value[-1] == '"'
+        # Unescaping probably not perfect but close enough.
+        value = value[1:-1].decode('string_escape')
+        return str.__new__(cls, value)
+
+# --------------------------------------------------------------------------
+# Token declarations
+# --------------------------------------------------------------------------
+
+punctuators = {
+    # value: (regex, type)
+    r'...': (r'\.\.\.', 'ELLIPSIS'),
+    r'>>=': (r'>>=', 'RIGHT_ASSIGN'),
+    r'<<=': (r'<<=', 'LEFT_ASSIGN'),
+    r'+=': (r'\+=', 'ADD_ASSIGN'),
+    r'-=': (r'-=', 'SUB_ASSIGN'),
+    r'*=': (r'\*=', 'MUL_ASSIGN'),
+    r'/=': (r'/=', 'DIV_ASSIGN'),
+    r'%=': (r'%=', 'MOD_ASSIGN'),
+    r'&=': (r'&=', 'AND_ASSIGN'),
+    r'^=': (r'\^=', 'XOR_ASSIGN'),
+    r'|=': (r'\|=', 'OR_ASSIGN'),
+    r'>>': (r'>>', 'RIGHT_OP'),
+    r'<<': (r'<<', 'LEFT_OP'),
+    r'++': (r'\+\+', 'INC_OP'),
+    r'--': (r'--', 'DEC_OP'),
+    r'->': (r'->', 'PTR_OP'),
+    r'&&': (r'&&', 'AND_OP'),
+    r'||': (r'\|\|', 'OR_OP'),
+    r'<=': (r'<=', 'LE_OP'),
+    r'>=': (r'>=', 'GE_OP'),
+    r'==': (r'==', 'EQ_OP'),
+    r'!=': (r'!=', 'NE_OP'),
+    r'<:': (r'<:', '['),
+    r':>': (r':>', ']'),
+    r'<%': (r'<%', '{'),
+    r'%>': (r'%>', '}'),
+    r';': (r';', ';'),
+    r'{': (r'{', '{'),
+    r'}': (r'}', '}'),
+    r',': (r',', ','),
+    r':': (r':', ':'),
+    r'=': (r'=', '='),
+    r')': (r'\)', ')'),
+    r'[': (r'\[', '['),
+    r']': (r']', ']'),
+    r'.': (r'\.', 'PERIOD'),
+    r'&': (r'&', '&'),
+    r'!': (r'!', '!'),
+    r'~': (r'~', '~'),
+    r'-': (r'-', '-'),
+    r'+': (r'\+', '+'),
+    r'*': (r'\*', '*'),
+    r'/': (r'/', '/'),
+    r'%': (r'%', '%'),
+    r'<': (r'<', '<'),
+    r'>': (r'>', '>'),
+    r'^': (r'\^', '^'),
+    r'|': (r'\|', '|'),
+    r'?': (r'\?', '?')
+}
+
+def punctuator_regex(punctuators):
+    punctuator_regexes = [v[0] for v in punctuators.values()]
+    punctuator_regexes.sort(lambda a, b: -cmp(len(a), len(b)))
+    return '(%s)' % '|'.join(punctuator_regexes)
+
+# Process line-number directives from the preprocessor
+# See http://docs.freebsd.org/info/cpp/cpp.info.Output.html
+DIRECTIVE = r'\#\s+(\d+)\s+"([^"]+)"[ \d]*\n'
+@TOKEN(DIRECTIVE)
+def t_ANY_directive(t):
+    t.lexer.filename = t.groups[2]
+    t.lexer.lineno = int(t.groups[1])
+    return None
+
+@TOKEN(punctuator_regex(punctuators))
+def t_ANY_punctuator(t):
+    t.type = punctuators[t.value][1]
+    return t
+
+IDENTIFIER = sub('{L}({L}|{D})*')
+@TOKEN(IDENTIFIER)
+def t_INITIAL_identifier(t):
+    t.type = 'IDENTIFIER'
+    return t
+
+@TOKEN(IDENTIFIER)
+def t_DEFINE_identifier(t):
+    if t.lexer.next_is_define_name:
+        # This identifier is the name of a macro
+        # We need to look ahead and see if this macro takes parameters or not.
+        if t.lexpos + len(t.value) < t.lexer.lexlen and \
+            t.lexer.lexdata[t.lexpos + len(t.value)] == '(':
+            
+            t.type = 'PP_DEFINE_MACRO_NAME'
+            
+            # Look ahead and read macro parameter list
+            lexdata = t.lexer.lexdata
+            pos = t.lexpos + len(t.value) + 1
+            while lexdata[pos] not in '\n)':
+                pos+=1
+            params = lexdata[t.lexpos+len(t.value)+1 : pos]
+            paramlist = [x.strip() for x in params.split(",") if x.strip()]
+            t.lexer.macro_params = paramlist
+                    
+        else:
+            t.type = 'PP_DEFINE_NAME'
+        
+        t.lexer.next_is_define_name = False
+    elif t.value in t.lexer.macro_params:
+        t.type = 'PP_MACRO_PARAM'
+    else:
+        t.type = 'IDENTIFIER'
+    return t
+
+FLOAT_LITERAL = sub(r"(?P<p1>{D}+)?(?P<dp>[.]?)(?P<p2>(?(p1){D}*|{D}+))" \
+                    r"(?P<exp>(?:[Ee][+-]?{D}+)?)(?P<suf>{FS}?)(?!\w)")
+@TOKEN(FLOAT_LITERAL)
+def t_ANY_float(t):
+    t.type = 'PP_NUMBER'
+    m = t.lexer.lexmatch
+    
+    p1 = m.group("p1")
+    dp = m.group("dp")
+    p2 = m.group("p2")
+    exp = m.group("exp")
+    suf = m.group("suf")
+    
+    if dp or exp or (suf and suf in ("Ff")):
+        s = m.group(0)
+        if suf:
+            s = s[:-1]
+        # Attach a prefix so the parser can figure out if should become an
+        # integer, float, or long
+        t.value = "f" + s
+    elif (suf and suf in ("Ll")):
+        t.value = "l" + p1
+    else:
+        t.value = "i" + p1
+        
+    return t
+
+INT_LITERAL = sub(r"(?P<p1>(?:0x{H}+)|(?:{D}+))(?P<suf>{IS})")
+@TOKEN(INT_LITERAL)
+def t_ANY_int(t):
+    t.type = 'PP_NUMBER'
+    m = t.lexer.lexmatch
+    
+    if "L" in m.group(3) or "l" in m.group(2):
+        prefix = "l"
+    else:
+        prefix = "i"
+    
+    g1 = m.group(2)
+    if g1.startswith("0x"):
+        # Convert base from hexadecimal
+        g1 = str(long(g1[2:],16))
+    elif g1[0]=="0":
+        # Convert base from octal
+        g1 = str(long(g1,8))
+    
+    t.value = prefix + g1
+        
+    return t
+
+CHARACTER_CONSTANT = sub(r"L?'(\\.|[^\\'])+'")
+@TOKEN(CHARACTER_CONSTANT)
+def t_ANY_character_constant(t):
+    t.type = 'CHARACTER_CONSTANT'
+    return t
+
+STRING_LITERAL = sub(r'L?"(\\.|[^\\"])*"')
+@TOKEN(STRING_LITERAL)
+def t_ANY_string_literal(t):
+    t.type = 'STRING_LITERAL'
+    t.value = StringLiteral(t.value)
+    return t
+
+@TOKEN(r'\(')
+def t_ANY_lparen(t):
+    if t.lexpos == 0 or t.lexer.lexdata[t.lexpos-1] not in (' \t\f\v\n'):
+        t.type = 'LPAREN'
+    else:
+        t.type = '('
+    return t
+
+@TOKEN(r'\n')
+def t_INITIAL_newline(t):
+    t.lexer.lineno += 1
+    return None
+
+@TOKEN(r'\#define')
+def t_INITIAL_pp_define(t):
+    t.type = 'PP_DEFINE'
+    t.lexer.begin("DEFINE")
+    t.lexer.next_is_define_name = True
+    t.lexer.macro_params = set()
+    return t
+
+@TOKEN(r'\n')
+def t_DEFINE_newline(t):
+    t.type = 'PP_END_DEFINE'
+    t.lexer.begin("INITIAL")
+    del t.lexer.macro_params
+    
+    # Damage control in case the token immediately after the #define failed
+    # to handle this
+    t.lexer.next_is_define_name = False
+    return t
+
+@TOKEN(r'(\#\#)|(\#)')
+def t_DEFINE_pp_param_op(t):
+    if t.value=='#':
+        t.type = 'PP_STRINGIFY'
+    else:
+        t.type = 'PP_IDENTIFIER_PASTE'
+    return t
+
+def t_INITIAL_error(t):
+    t.type = 'OTHER'
+    return t
+
+def t_DEFINE_error(t):
+    t.type = 'OTHER'
+    t.value = t.value[0]
+    t.lexer.lexpos+=1 # Skip it if it's an error in a #define
+    return t
+
+t_ANY_ignore = ' \t\v\f\r'

+ 197 - 0
lib/python/ctypes/ctypesgencore/parser/preprocessor.py

@@ -0,0 +1,197 @@
+#!/usr/bin/env python
+
+'''Preprocess a C source file using gcc and convert the result into
+   a token stream
+
+Reference is C99:
+  * http://www.open-std.org/JTC1/SC22/WG14/www/docs/n1124.pdf
+
+'''
+
+__docformat__ = 'restructuredtext'
+
+import os, re, shlex, sys, tokenize, lex, yacc, traceback, subprocess
+import ctypes
+from lex import TOKEN
+import pplexer
+
+# --------------------------------------------------------------------------
+# Lexers
+# --------------------------------------------------------------------------
+
+class PreprocessorLexer(lex.Lexer):
+    def __init__(self):
+        lex.Lexer.__init__(self)
+        self.filename = '<input>'
+        self.in_define = False
+
+    def input(self, data, filename=None):
+        if filename:
+            self.filename = filename 
+        self.lasttoken = None
+        self.input_stack = []
+
+        lex.Lexer.input(self, data)
+
+    def push_input(self, data, filename):
+        self.input_stack.append(
+            (self.lexdata, self.lexpos, self.filename, self.lineno))
+        self.lexdata = data
+        self.lexpos = 0
+        self.lineno = 1
+        self.filename = filename
+        self.lexlen = len(self.lexdata)
+
+    def pop_input(self):
+        self.lexdata, self.lexpos, self.filename, self.lineno = \
+            self.input_stack.pop()
+        self.lexlen = len(self.lexdata)
+
+    def token(self):
+        result = lex.Lexer.token(self)
+        while result is None and self.input_stack:
+            self.pop_input()
+            result = lex.Lexer.token(self)
+
+        if result:
+            self.lasttoken = result.type
+            result.filename = self.filename
+        else:
+            self.lasttoken = None
+
+        return result
+
+class TokenListLexer(object):
+    def __init__(self, tokens):
+        self.tokens = tokens
+        self.pos = 0
+
+    def token(self):
+        if self.pos < len(self.tokens):
+            t = self.tokens[self.pos]
+            self.pos += 1
+            return t
+        else:
+            return None
+
+def symbol_to_token(sym):
+    if isinstance(sym, yacc.YaccSymbol):
+        return sym.value
+    elif isinstance(sym, lex.LexToken):
+        return sym
+    else:
+        assert False, 'Not a symbol: %r' % sym
+
+def create_token(type, value, production=None):
+    '''Create a token of type and value, at the position where 'production'
+    was reduced.  Don't specify production if the token is built-in'''
+    t = lex.LexToken()
+    t.type = type
+    t.value = value
+    t.lexpos = -1
+    if production:
+        t.lineno = production.slice[1].lineno
+        t.filename = production.slice[1].filename
+    else:
+        t.lineno = -1
+        t.filename = '<builtin>'
+    return t
+
+# --------------------------------------------------------------------------
+# Grammars
+# --------------------------------------------------------------------------
+
+class PreprocessorParser(object):
+    def __init__(self,options,cparser):
+        self.defines = ["inline=", "__inline__=", "__extension__=",
+                        "_Bool=uint8_t", "__const=const", "__asm__(x)=",
+                        "__asm(x)=", "CTYPESGEN=1"]
+
+        # On OSX, explicitly add these defines to keep from getting syntax
+        # errors in the OSX standard headers.
+        if os.uname()[0] == 'Darwin':
+            self.defines += ["__uint16_t=uint16_t",
+                             "__uint32_t=uint32_t",
+                             "__uint64_t=uint64_t"]
+
+        self.matches = []
+        self.output = []
+        self.lexer = lex.lex(cls=PreprocessorLexer,
+                             optimize=1,
+                             lextab='lextab',
+                             outputdir=os.path.dirname(__file__),
+                             module=pplexer)
+        
+        self.options = options
+        self.cparser = cparser # An instance of CParser
+
+    def parse(self, filename):
+        """Parse a file and save its output"""
+        
+        cmd = self.options.cpp
+        cmd += " -U __GNUC__ -dD"
+        for path in self.options.include_search_paths:
+            cmd += " -I%s" % path 
+        for define in self.defines:
+            cmd += ' "-D%s"' % define
+        cmd += " " + filename
+
+        self.cparser.handle_status(cmd)
+        
+        pp = subprocess.Popen(cmd,
+                              shell = True,
+                              stdout = subprocess.PIPE,
+                              stderr = subprocess.PIPE)
+        ppout, pperr = pp.communicate()
+        
+        for line in pperr.split("\n"):
+            if line:
+                self.cparser.handle_pp_error(line)
+        
+        # We separate lines that are #defines and lines that are source code
+        # We put all the source lines first, then all the #define lines.
+        
+        source_lines= []
+        define_lines = []
+        
+        for line in ppout.split("\n"):
+            line = line + "\n"
+            if line.startswith("# "):
+                # Line number information has to go with both groups
+                source_lines.append(line)
+                define_lines.append(line)
+            
+            elif line.startswith("#define"):
+                source_lines.append("\n")
+                define_lines.append(line)
+            
+            elif line.startswith("#"):
+                # It's a directive, but not a #define. Remove it
+                source_lines.append("\n")
+                define_lines.append("\n")
+            
+            else:
+                source_lines.append(line)
+                define_lines.append("\n")
+        
+        text = "".join(source_lines + define_lines)
+        
+        if self.options.save_preprocessed_headers:
+            self.cparser.handle_status("Saving preprocessed headers to %s." % \
+                self.options.save_preprocessed_headers)
+            try:
+                f = file(self.options.save_preprocessed_headers, "w")
+                f.write(text)
+                f.close()
+            except IOError:
+                self.cparser.handle_error("Couldn't save headers.")
+        
+        self.lexer.input(text)
+        self.output = []
+        
+        while True:
+            token = self.lexer.token()
+            if token is not None:
+                self.output.append(token)
+            else:
+                break

Failā izmaiņas netiks attēlotas, jo tās ir par lielu
+ 2261 - 0
lib/python/ctypes/ctypesgencore/parser/yacc.py


+ 10 - 0
lib/python/ctypes/ctypesgencore/printer/__init__.py

@@ -0,0 +1,10 @@
+#!/usr/bin/env python
+
+"""
+This module is the backend to ctypesgen; it contains classes to
+produce the final .py output files.
+"""
+
+from printer import WrapperPrinter
+
+__all__ = ["WrapperPrinter"]

+ 9 - 0
lib/python/ctypes/ctypesgencore/printer/defaultheader.py

@@ -0,0 +1,9 @@
+'''Wrapper for %(name)s
+
+Generated with:
+%(argv)s
+
+Do not modify this file.
+'''
+
+__docformat__ =  'restructuredtext'

+ 290 - 0
lib/python/ctypes/ctypesgencore/printer/preamble.py

@@ -0,0 +1,290 @@
+import ctypes, os, sys
+from ctypes import *
+
+_int_types = (c_int16, c_int32)
+if hasattr(ctypes, 'c_int64'):
+    # Some builds of ctypes apparently do not have c_int64
+    # defined; it's a pretty good bet that these builds do not
+    # have 64-bit pointers.
+    _int_types += (c_int64,)
+for t in _int_types:
+    if sizeof(t) == sizeof(c_size_t):
+        c_ptrdiff_t = t
+del t
+del _int_types
+
+class c_void(Structure):
+    # c_void_p is a buggy return type, converting to int, so
+    # POINTER(None) == c_void_p is actually written as
+    # POINTER(c_void), so it can be treated as a real pointer.
+    _fields_ = [('dummy', c_int)]
+
+def POINTER(obj):
+    p = ctypes.POINTER(obj)
+
+    # Convert None to a real NULL pointer to work around bugs
+    # in how ctypes handles None on 64-bit platforms
+    if not isinstance(p.from_param, classmethod):
+        def from_param(cls, x):
+            if x is None:
+                return cls()
+            else:
+                return x
+        p.from_param = classmethod(from_param)
+
+    return p
+
+class UserString:
+    def __init__(self, seq):
+        if isinstance(seq, basestring):
+            self.data = seq
+        elif isinstance(seq, UserString):
+            self.data = seq.data[:]
+        else:
+            self.data = str(seq)
+    def __str__(self): return str(self.data)
+    def __repr__(self): return repr(self.data)
+    def __int__(self): return int(self.data)
+    def __long__(self): return long(self.data)
+    def __float__(self): return float(self.data)
+    def __complex__(self): return complex(self.data)
+    def __hash__(self): return hash(self.data)
+
+    def __cmp__(self, string):
+        if isinstance(string, UserString):
+            return cmp(self.data, string.data)
+        else:
+            return cmp(self.data, string)
+    def __contains__(self, char):
+        return char in self.data
+
+    def __len__(self): return len(self.data)
+    def __getitem__(self, index): return self.__class__(self.data[index])
+    def __getslice__(self, start, end):
+        start = max(start, 0); end = max(end, 0)
+        return self.__class__(self.data[start:end])
+
+    def __add__(self, other):
+        if isinstance(other, UserString):
+            return self.__class__(self.data + other.data)
+        elif isinstance(other, basestring):
+            return self.__class__(self.data + other)
+        else:
+            return self.__class__(self.data + str(other))
+    def __radd__(self, other):
+        if isinstance(other, basestring):
+            return self.__class__(other + self.data)
+        else:
+            return self.__class__(str(other) + self.data)
+    def __mul__(self, n):
+        return self.__class__(self.data*n)
+    __rmul__ = __mul__
+    def __mod__(self, args):
+        return self.__class__(self.data % args)
+
+    # the following methods are defined in alphabetical order:
+    def capitalize(self): return self.__class__(self.data.capitalize())
+    def center(self, width, *args):
+        return self.__class__(self.data.center(width, *args))
+    def count(self, sub, start=0, end=sys.maxint):
+        return self.data.count(sub, start, end)
+    def decode(self, encoding=None, errors=None): # XXX improve this?
+        if encoding:
+            if errors:
+                return self.__class__(self.data.decode(encoding, errors))
+            else:
+                return self.__class__(self.data.decode(encoding))
+        else:
+            return self.__class__(self.data.decode())
+    def encode(self, encoding=None, errors=None): # XXX improve this?
+        if encoding:
+            if errors:
+                return self.__class__(self.data.encode(encoding, errors))
+            else:
+                return self.__class__(self.data.encode(encoding))
+        else:
+            return self.__class__(self.data.encode())
+    def endswith(self, suffix, start=0, end=sys.maxint):
+        return self.data.endswith(suffix, start, end)
+    def expandtabs(self, tabsize=8):
+        return self.__class__(self.data.expandtabs(tabsize))
+    def find(self, sub, start=0, end=sys.maxint):
+        return self.data.find(sub, start, end)
+    def index(self, sub, start=0, end=sys.maxint):
+        return self.data.index(sub, start, end)
+    def isalpha(self): return self.data.isalpha()
+    def isalnum(self): return self.data.isalnum()
+    def isdecimal(self): return self.data.isdecimal()
+    def isdigit(self): return self.data.isdigit()
+    def islower(self): return self.data.islower()
+    def isnumeric(self): return self.data.isnumeric()
+    def isspace(self): return self.data.isspace()
+    def istitle(self): return self.data.istitle()
+    def isupper(self): return self.data.isupper()
+    def join(self, seq): return self.data.join(seq)
+    def ljust(self, width, *args):
+        return self.__class__(self.data.ljust(width, *args))
+    def lower(self): return self.__class__(self.data.lower())
+    def lstrip(self, chars=None): return self.__class__(self.data.lstrip(chars))
+    def partition(self, sep):
+        return self.data.partition(sep)
+    def replace(self, old, new, maxsplit=-1):
+        return self.__class__(self.data.replace(old, new, maxsplit))
+    def rfind(self, sub, start=0, end=sys.maxint):
+        return self.data.rfind(sub, start, end)
+    def rindex(self, sub, start=0, end=sys.maxint):
+        return self.data.rindex(sub, start, end)
+    def rjust(self, width, *args):
+        return self.__class__(self.data.rjust(width, *args))
+    def rpartition(self, sep):
+        return self.data.rpartition(sep)
+    def rstrip(self, chars=None): return self.__class__(self.data.rstrip(chars))
+    def split(self, sep=None, maxsplit=-1):
+        return self.data.split(sep, maxsplit)
+    def rsplit(self, sep=None, maxsplit=-1):
+        return self.data.rsplit(sep, maxsplit)
+    def splitlines(self, keepends=0): return self.data.splitlines(keepends)
+    def startswith(self, prefix, start=0, end=sys.maxint):
+        return self.data.startswith(prefix, start, end)
+    def strip(self, chars=None): return self.__class__(self.data.strip(chars))
+    def swapcase(self): return self.__class__(self.data.swapcase())
+    def title(self): return self.__class__(self.data.title())
+    def translate(self, *args):
+        return self.__class__(self.data.translate(*args))
+    def upper(self): return self.__class__(self.data.upper())
+    def zfill(self, width): return self.__class__(self.data.zfill(width))
+
+class MutableString(UserString):
+    """mutable string objects
+
+    Python strings are immutable objects.  This has the advantage, that
+    strings may be used as dictionary keys.  If this property isn't needed
+    and you insist on changing string values in place instead, you may cheat
+    and use MutableString.
+
+    But the purpose of this class is an educational one: to prevent
+    people from inventing their own mutable string class derived
+    from UserString and than forget thereby to remove (override) the
+    __hash__ method inherited from UserString.  This would lead to
+    errors that would be very hard to track down.
+
+    A faster and better solution is to rewrite your program using lists."""
+    def __init__(self, string=""):
+        self.data = string
+    def __hash__(self):
+        raise TypeError, "unhashable type (it is mutable)"
+    def __setitem__(self, index, sub):
+        if index < 0:
+            index += len(self.data)
+        if index < 0 or index >= len(self.data): raise IndexError
+        self.data = self.data[:index] + sub + self.data[index+1:]
+    def __delitem__(self, index):
+        if index < 0:
+            index += len(self.data)
+        if index < 0 or index >= len(self.data): raise IndexError
+        self.data = self.data[:index] + self.data[index+1:]
+    def __setslice__(self, start, end, sub):
+        start = max(start, 0); end = max(end, 0)
+        if isinstance(sub, UserString):
+            self.data = self.data[:start]+sub.data+self.data[end:]
+        elif isinstance(sub, basestring):
+            self.data = self.data[:start]+sub+self.data[end:]
+        else:
+            self.data =  self.data[:start]+str(sub)+self.data[end:]
+    def __delslice__(self, start, end):
+        start = max(start, 0); end = max(end, 0)
+        self.data = self.data[:start] + self.data[end:]
+    def immutable(self):
+        return UserString(self.data)
+    def __iadd__(self, other):
+        if isinstance(other, UserString):
+            self.data += other.data
+        elif isinstance(other, basestring):
+            self.data += other
+        else:
+            self.data += str(other)
+        return self
+    def __imul__(self, n):
+        self.data *= n
+        return self
+
+class String(MutableString, Union):
+
+    _fields_ = [('raw', POINTER(c_char)),
+                ('data', c_char_p)]
+
+    def __init__(self, obj=""):
+        if isinstance(obj, (str, unicode, UserString)):
+            self.data = str(obj)
+        else:
+            self.raw = obj
+
+    def __len__(self):
+        return self.data and len(self.data) or 0
+    
+    def from_param(cls, obj):
+        # Convert None or 0
+        if obj is None or obj == 0:
+            return cls(POINTER(c_char)())
+
+        # Convert from String
+        elif isinstance(obj, String):
+            return obj
+
+        # Convert from str
+        elif isinstance(obj, str):
+            return cls(obj)
+        
+        # Convert from c_char_p
+        elif isinstance(obj, c_char_p):
+            return obj
+        
+        # Convert from POINTER(c_char)
+        elif isinstance(obj, POINTER(c_char)):
+            return obj
+        
+        # Convert from raw pointer
+        elif isinstance(obj, int):
+            return cls(cast(obj, POINTER(c_char)))
+
+        # Convert from object
+        else:
+            return String.from_param(obj._as_parameter_)
+    from_param = classmethod(from_param)
+
+def ReturnString(obj):
+    return String.from_param(obj)
+
+# As of ctypes 1.0, ctypes does not support custom error-checking
+# functions on callbacks, nor does it support custom datatypes on
+# callbacks, so we must ensure that all callbacks return
+# primitive datatypes.
+#
+# Non-primitive return values wrapped with UNCHECKED won't be
+# typechecked, and will be converted to c_void_p.
+def UNCHECKED(type):
+    if (hasattr(type, "_type_") and isinstance(type._type_, str)
+        and type._type_ != "P"):
+        return type
+    else:
+        return c_void_p
+
+# ctypes doesn't have direct support for variadic functions, so we have to write
+# our own wrapper class
+class _variadic_function(object):
+    def __init__(self,func,restype,argtypes):
+        self.func=func
+        self.func.restype=restype
+        self.argtypes=argtypes
+    def _as_parameter_(self):
+        # So we can pass this variadic function as a function pointer
+        return self.func
+    def __call__(self,*args):
+        fixed_args=[]
+        i=0
+        for argtype in self.argtypes:
+            # Typecheck what we can
+            fixed_args.append(argtype.from_param(args[i]))
+            i+=1
+        return self.func(*fixed_args+list(args[i:]))
+

+ 298 - 0
lib/python/ctypes/ctypesgencore/printer/printer.py

@@ -0,0 +1,298 @@
+#!/usr/bin/env python
+
+import os, sys, time
+from ctypesgencore.descriptions import *
+from ctypesgencore.ctypedescs import *
+from ctypesgencore.messages import *
+
+import ctypesgencore.libraryloader # So we can get the path to it
+import test # So we can find the path to local files in the printer package
+
+def path_to_local_file(name,known_local_module = test):
+    basedir=os.path.dirname(known_local_module.__file__)
+    return os.path.join(basedir,name)
+
+class WrapperPrinter:
+    def __init__(self,outpath,options,data):
+        status_message("Writing to %s." % outpath)
+        
+        self.file=file(outpath,"w")
+        self.options=options
+
+        if self.options.strip_build_path and \
+          self.options.strip_build_path[-1] != os.path.sep:
+            self.options.strip_build_path += os.path.sep
+        
+        self.print_header()
+        print >>self.file
+        
+        self.print_preamble()
+        print >>self.file
+        
+        self.print_loader()
+        print >>self.file
+                
+        self.print_group(self.options.libraries,"libraries",self.print_library)
+        self.print_group(self.options.modules,"modules",self.print_module)
+        
+        method_table = {
+            'function': self.print_function,
+            'macro': self.print_macro,
+            'struct': self.print_struct,
+            'struct-body': self.print_struct_members,
+            'typedef': self.print_typedef,
+            'variable': self.print_variable,
+            'enum': self.print_enum,
+            'constant': self.print_constant
+        }
+        
+        for kind,desc in data.output_order:
+            if desc.included:
+                method_table[kind](desc)
+                print >>self.file
+        
+        self.print_group(self.options.inserted_files,"inserted files",
+                         self.insert_file)
+    
+    def print_group(self,list,name,function):
+        if list:
+            print >>self.file,"# Begin %s" % name
+            print >>self.file
+            for obj in list:
+                function(obj)
+            print >>self.file
+            print >>self.file,"# %d %s" % (len(list),name)
+            print >>self.file,"# End %s" % name
+        else:
+            print >>self.file,"# No %s" % name
+        print >>self.file
+    
+    def srcinfo(self,src):
+        if src==None:
+            print >>self.file
+        else:
+            filename,lineno = src
+            if filename in ("<built-in>","<command line>"):
+                print >>self.file, "# %s" % filename
+            else:
+                if self.options.strip_build_path and \
+                  filename.startswith(self.options.strip_build_path):
+                    filename = filename[len(self.options.strip_build_path):]
+                print >>self.file, "# %s: %s" % (filename, lineno)
+    
+    def template_subs(self):
+        template_subs={
+            'date': time.ctime(),
+            'argv': ' '.join([x for x in sys.argv if not x.startswith("--strip-build-path")]),
+            'name': os.path.basename(self.options.headers[0])
+        }
+        
+        for opt,value in self.options.__dict__.iteritems():
+            if type(value)==str:
+                template_subs[opt]=value
+            elif isinstance(value,(list,tuple)):
+                template_subs[opt]=(os.path.sep).join(value)
+            else:
+                template_subs[opt]=repr(value)
+        
+        return template_subs
+    
+    def print_header(self):
+        template_file = None
+        
+        if self.options.header_template:
+            path = self.options.header_template
+            try:
+                template_file = file(path,"r")
+            except IOError:
+                error_message("Cannot load header template from file \"%s\" " \
+                    " - using default template." % path, cls = 'missing-file')
+        
+        if not template_file:
+            path = path_to_local_file("defaultheader.py")
+            template_file = file(path,"r")
+        
+        template_subs=self.template_subs()
+        self.file.write(template_file.read() % template_subs)
+        
+        template_file.close()
+    
+    def print_preamble(self):
+        path = path_to_local_file("preamble.py")
+        
+        print >>self.file, "# Begin preamble"
+        print >>self.file
+        preamble_file=file(path,"r")
+        self.file.write(preamble_file.read())
+        preamble_file.close()
+        print >>self.file
+        print >>self.file, "# End preamble"
+    
+    def print_loader(self):
+        print >>self.file, "_libs = {}"
+        print >>self.file, "_libdirs = %s" % self.options.compile_libdirs
+        print >>self.file
+        print >>self.file, "# Begin loader"
+        print >>self.file
+        path = path_to_local_file("libraryloader.py",
+                                      ctypesgencore.libraryloader)
+        loader_file=file(path,"r")
+        self.file.write(loader_file.read())
+        loader_file.close()
+        print >>self.file
+        print >>self.file, "# End loader"
+        print >>self.file
+        print >>self.file, "add_library_search_dirs([%s])" % \
+                ", ".join([repr(d) for d in self.options.runtime_libdirs])
+    
+    def print_library(self,library):
+        print >>self.file, '_libs["%s"] = load_library("%s")'%(library,library)
+    
+    def print_module(self,module):
+        print >>self.file, 'from %s import *' % name
+    
+    def print_constant(self,constant):
+        print >>self.file, '%s = %s' % \
+            (constant.name,constant.value.py_string(False)),
+        self.srcinfo(constant.src)
+    
+    def print_typedef(self,typedef):
+        print >>self.file, '%s = %s' % \
+            (typedef.name,typedef.ctype.py_string()),
+        self.srcinfo(typedef.src)
+    
+    def print_struct(self, struct):
+        self.srcinfo(struct.src)
+        base = {'union': 'Union', 'struct': 'Structure'}[struct.variety]
+        print >>self.file, 'class %s_%s(%s):' % \
+            (struct.variety, struct.tag, base)
+        print >>self.file, '    pass'
+    
+    def print_struct_members(self, struct):
+        if struct.opaque: return
+        print >>self.file, '%s_%s.__slots__ = [' % (struct.variety, struct.tag)
+        for name,ctype in struct.members:
+            print >>self.file, "    '%s'," % name
+        print >>self.file, ']'
+        print >>self.file, '%s_%s._fields_ = [' % (struct.variety, struct.tag)
+        for name,ctype in struct.members:
+            if isinstance(ctype,CtypesBitfield):
+                print >>self.file, "    ('%s', %s, %s)," % \
+                    (name, ctype.py_string(), ctype.bitfield.py_string(False))
+            else:
+                print >>self.file, "    ('%s', %s)," % (name, ctype.py_string())
+        print >>self.file, ']'
+    
+    def print_enum(self,enum):
+        print >>self.file, 'enum_%s = c_int' % enum.tag,
+        self.srcinfo(enum.src)
+        # Values of enumerator are output as constants.
+    
+    def print_function(self, function):
+        if function.variadic:
+            self.print_variadic_function(function)
+        else:
+            self.print_fixed_function(function)
+    
+    def print_fixed_function(self, function):
+        self.srcinfo(function.src)
+        if function.source_library:
+            print >>self.file, "if hasattr(_libs[%r], %r):" % \
+                (function.source_library,function.c_name())
+            print >>self.file, "    %s = _libs[%r].%s" % \
+                (function.py_name(),function.source_library,function.c_name())
+            print >>self.file, "    %s.restype = %s" % \
+                (function.py_name(),function.restype.py_string())
+            print >>self.file, "    %s.argtypes = [%s]" % (function.py_name(),
+                ', '.join([a.py_string() for a in function.argtypes]))
+        else:
+            print >>self.file, "for _lib in _libs.values():"
+            print >>self.file, "    if hasattr(_lib, %r):" % function.c_name()
+            print >>self.file, "        %s = _lib.%s" % (function.py_name(),function.c_name())
+            print >>self.file, "        %s.restype = %s" % (function.py_name(),function.restype.py_string())
+            print >>self.file, "        %s.argtypes = [%s]" % (function.py_name(),
+                ', '.join([a.py_string() for a in function.argtypes]))
+            print >>self.file, "        break"
+    
+    def print_variadic_function(self,function):
+        self.srcinfo(function.src)
+        if function.source_library:
+            print >>self.file, "if hasattr(_libs[%r], %r):" % \
+                (function.source_library,function.c_name())
+            print >>self.file, "    _func = _libs[%r].%s" % \
+                (function.source_library,function.c_name())
+            print >>self.file, "    _restype = %s" % function.restype.py_string()
+            print >>self.file, "    _argtypes = [%s]" % \
+                ', '.join([a.py_string() for a in function.argtypes])
+            print >>self.file, "    %s = _variadic_function(_func,_restype,_argtypes)" % \
+                function.py_name()
+        else:
+            print >>self.file, "for _lib in _libs.values():"
+            print >>self.file, "    if hasattr(_lib, %r):" % function.c_name()
+            print >>self.file, "        _func = _lib.%s" % \
+                (function.c_name())
+            print >>self.file, "        _restype = %s" % function.restype.py_string()
+            print >>self.file, "        _argtypes = [%s]" % \
+                ', '.join([a.py_string() for a in function.argtypes])
+            print >>self.file, "        %s = _variadic_function(_func,_restype,_argtypes)" % \
+                function.py_name()
+
+    
+    def print_variable(self, variable):
+        self.srcinfo(variable.src)
+        if variable.source_library:
+            print >>self.file, 'try:'
+            print >>self.file, '    %s = (%s).in_dll(_libs[%r], %r)' % \
+                (variable.py_name(),
+                 variable.ctype.py_string(),
+                 variable.source_library,
+                 variable.c_name())
+            print >>self.file, 'except:'
+            print >>self.file, '    pass'
+        else:
+            print >>self.file, "for _lib in _libs.values():"
+            print >>self.file, '    try:'
+            print >>self.file, '        %s = (%s).in_dll(_lib, %r)' % \
+                (variable.py_name(),
+                 variable.ctype.py_string(),
+                 variable.c_name())
+            print >>self.file, "        break"
+            print >>self.file, '    except:'
+            print >>self.file, '        pass'
+    
+    def print_macro(self, macro):
+        if macro.params:
+            self.print_func_macro(macro)
+        else:
+            self.print_simple_macro(macro)
+    
+    def print_simple_macro(self, macro):
+        # The macro translator makes heroic efforts but it occasionally fails.
+        # We want to contain the failures as much as possible.
+        # Hence the try statement.
+        self.srcinfo(macro.src)
+        print >>self.file, "try:"
+        print >>self.file, "    %s = %s" % (macro.name,macro.expr.py_string(True))
+        print >>self.file, "except:"
+        print >>self.file, "    pass"
+    
+    def print_func_macro(self, macro):
+        self.srcinfo(macro.src)
+        print >>self.file, "def %s(%s):" % \
+            (macro.name,", ".join(macro.params))
+        print >>self.file, "    return %s" % macro.expr.py_string(True)
+    
+    def insert_file(self,filename):
+        try:
+            inserted_file = file(filename,"r")
+        except IOError:
+            error_message("Cannot open file \"%s\". Skipped it." % filename,
+                          cls = 'missing-file')
+        
+        print >>self.file,"# Begin \"%s\"" % filename
+        print >>self.file
+        self.file.write(inserted_file.read())
+        print >>self.file
+        print >>self.file,"# End \"%s\"" % filename
+              
+        inserted_file.close()

+ 6 - 0
lib/python/ctypes/ctypesgencore/printer/test.py

@@ -0,0 +1,6 @@
+"""
+ctypesgencore.printer.printer imports this module so that it can find the path
+to defaulttemplate.py and defaultloader.py.
+"""
+
+pass

+ 12 - 0
lib/python/ctypes/ctypesgencore/processor/__init__.py

@@ -0,0 +1,12 @@
+#!/usr/bin/env python
+
+"""
+This module contains functions to operate on the DeclarationCollection produced
+by the parser module and prepare it for output.
+
+A convenience_function, process(), calls everything else.
+"""
+
+__all__ = ["process"]
+
+from pipeline import process

+ 137 - 0
lib/python/ctypes/ctypesgencore/processor/dependencies.py

@@ -0,0 +1,137 @@
+#!/usr/bin/env python
+
+"""
+The dependencies module determines which descriptions depend on which other
+descriptions.
+"""
+
+from ctypesgencore.descriptions import *
+from ctypesgencore.ctypedescs import *
+from ctypesgencore.messages import *
+
+def find_dependencies(data, opts):
+    """Visit each description in `data` and figure out which other descriptions
+it depends on, putting the results in desc.requirements. Also find errors in
+ctypedecls or expressions attached to the description and transfer them to the
+description."""
+    
+    struct_names = {}
+    enum_names = {}
+    typedef_names = {}
+    ident_names = {}
+    
+    # Start the lookup tables with names from imported modules
+    
+    for name in opts.other_known_names:
+        typedef_names[name] = None
+        ident_names[name] = None
+        if name.startswith("struct_") or name.startswith("enum_"):
+            variety = name.split("_")[0]
+            tag = "_".join(name.split("_")[1:])
+            struct_names[(variety,tag)] = None
+        if name.startswith("enum_"):
+            enum_names[name] = None
+    
+    def depend(desc, nametable, name):
+        """Try to add `name` as a requirement for `desc`, looking `name` up in
+`nametable`. Returns True if found."""
+
+        if name in nametable:
+            requirement = nametable[name]
+            if requirement: desc.add_requirements([requirement])
+            return True
+        else:
+            return False
+    
+    def find_dependencies_for(desc, kind):
+        """Find all the descriptions that `desc` depends on and add them as
+dependencies for `desc`. Also collect error messages regarding `desc` and 
+convert unlocateable descriptions into error messages."""
+
+        if kind == "constant": roots = [desc.value]
+        if kind == "struct": roots = []
+        if kind == "struct-body": roots = [desc.ctype]
+        if kind == "enum": roots = []
+        if kind == "typedef": roots = [desc.ctype]
+        if kind == "function": roots = desc.argtypes + [desc.restype]
+        if kind == "variable": roots = [desc.ctype]
+        if kind == "macro":
+            if desc.expr: roots = [desc.expr]
+            else: roots = []
+        
+        cstructs,cenums,ctypedefs,errors,identifiers = [], [], [], [], []
+        
+        for root in roots:
+            s, e, t, errs, i = visit_type_and_collect_info(root)
+            cstructs.extend(s)
+            cenums.extend(e)
+            ctypedefs.extend(t)
+            errors.extend(errs)
+            identifiers.extend(i)
+        
+        unresolvables = []
+        
+        for cstruct in cstructs:
+            if kind == "struct" and desc.variety == cstruct.variety and \
+                desc.tag == cstruct.tag:
+                continue
+            if not depend(desc, struct_names, (cstruct.variety, cstruct.tag)):
+                unresolvables.append("%s \"%s\"" % \
+                    (cstruct.variety, cstruct.tag))
+        
+        for cenum in cenums:
+            if kind == "enum" and desc.tag == cenum.tag:
+                continue
+            if not depend(desc, enum_names, cenum.tag):
+                unresolvables.append("enum \"%s\"" % cenum.tag)
+        
+        for ctypedef in ctypedefs:
+            if not depend(desc, typedef_names, ctypedef):
+                unresolvables.append("typedef \"%s\"" % ctypedef)
+        
+        for ident in identifiers:
+            if isinstance(desc, MacroDescription) and \
+                desc.params and ident in desc.params:
+                continue
+            if not depend(desc, ident_names, ident):
+                unresolvables.append("identifier \"%s\"" % ident)
+        
+        for u in unresolvables:
+            errors.append(("%s depends on an unknown %s." % \
+                          (desc.casual_name(), u), None))
+        
+        for err, cls in errors:
+            err += " %s will not be output" % desc.casual_name()
+            desc.error(err, cls = cls)
+        
+    def add_to_lookup_table(desc, kind):
+        """Add `desc` to the lookup table so that other descriptions that use
+it can find it."""
+        if kind == "struct":
+            if (desc.variety, desc.tag) not in struct_names:
+                struct_names[(desc.variety, desc.tag)] = desc
+        if kind == "enum":
+            if desc.tag not in enum_names:
+                enum_names[desc.tag] = desc
+        if kind == "typedef":
+            if desc.name not in typedef_names:
+                typedef_names[desc.name] = desc
+        if kind in ("function", "constant", "variable", "macro"):
+            if desc.name not in ident_names:
+                ident_names[desc.name] = desc
+
+    # Macros are handled differently from everything else because macros can
+    # call other macros that are referenced after them in the input file, but
+    # no other type of description can look ahead like that.
+
+    for kind, desc in data.output_order:
+        if kind!="macro":
+            find_dependencies_for(desc, kind)
+            add_to_lookup_table(desc, kind)
+
+    for kind, desc in data.output_order:
+        if kind=="macro":
+            add_to_lookup_table(desc, kind)
+    for kind, desc in data.output_order:
+        if kind=="macro":
+            find_dependencies_for(desc, kind)

+ 200 - 0
lib/python/ctypes/ctypesgencore/processor/operations.py

@@ -0,0 +1,200 @@
+#!/usr/bin/env python
+
+"""
+The operations module contains various functions to process the
+DescriptionCollection and prepare it for output.
+ctypesgencore.processor.pipeline calls the operations module.
+"""
+
+import ctypes, re, os, sys, keyword
+from ctypesgencore.descriptions import *
+from ctypesgencore.messages import *
+import ctypesgencore.libraryloader
+
+# Processor functions
+
+def automatically_typedef_structs(data,options):
+    """automatically_typedef_structs() aliases "struct_<tag>" to "<tag>" for
+    every struct and union."""
+    # XXX Check if it has already been aliased in the C code.
+    
+    for struct in data.structs:
+        if not struct.ctype.anonymous: # Don't alias anonymous structs
+            typedef=TypedefDescription(struct.tag,
+                                       struct.ctype,
+                                       src=struct.src)
+            typedef.add_requirements(set([struct]))
+            
+            data.typedefs.append(typedef)
+            data.all.insert(data.all.index(struct)+1,typedef)
+            data.output_order.append(("typedef", typedef))
+
+def remove_NULL(data, options):
+    """remove_NULL() removes any NULL definitions from the C headers because
+ctypesgen supplies its own NULL definition."""
+    
+    for macro in data.macros:
+        if macro.name=="NULL":
+            macro.include_rule = "never"
+
+def remove_descriptions_in_system_headers(data,opts):
+    """remove_descriptions_in_system_headers() removes descriptions if they came
+    from files outside of the header files specified from the command line."""
+    
+    known_headers = [os.path.basename(x) for x in opts.headers]
+    
+    for description in data.all:
+        if description.src!=None:
+            if description.src[0] == "<command line>":
+                description.include_rule = "if_needed"
+            elif description.src[0] == "<built-in>":
+                if not opts.builtin_symbols:
+                    description.include_rule="if_needed"
+            elif os.path.basename(description.src[0]) not in known_headers:
+                if not opts.all_headers:
+                    # If something else requires this, include it even though
+                    # it is in a system header file.
+                    description.include_rule="if_needed"
+
+def remove_macros(data,opts):
+    """remove_macros() removes macros if --no-macros is set."""
+    if not opts.include_macros:
+        for macro in data.macros:
+            macro.include_rule = "never"
+
+def filter_by_regexes_exclude(data,opts):
+    """filter_by_regexes_exclude() uses regular expressions specified by options
+    dictionary to filter symbols."""
+    if opts.exclude_symbols:
+        expr=re.compile(opts.exclude_symbols)
+        for object in data.all:
+            if expr.match(object.py_name()):
+                object.include_rule="never"
+
+def filter_by_regexes_include(data,opts):
+    """filter_by_regexes_include() uses regular expressions specified by options
+    dictionary to re-include symbols previously rejected by other operations."""
+    if opts.include_symbols:
+        expr=re.compile(opts.include_symbols)
+        for object in data.all:
+            if object.include_rule!="never":
+                if expr.match(object.py_name()):
+                    object.include_rule="yes"
+
+def fix_conflicting_names(data,opts):
+    """If any descriptions from the C code would overwrite Python builtins or
+    other important names, fix_conflicting_names() adds underscores to resolve
+    the name conflict."""
+    
+    # This is the order of priority for names
+    descriptions = data.functions + data.variables + data.structs + \
+        data.typedefs + data.enums + data.constants + data.macros
+    
+    # This dictionary maps names to a string representing where the name
+    # came from.
+    important_names={}
+    
+    preamble_names=set()
+    preamble_names=preamble_names.union(['DarwinLibraryLoader',
+        'LibraryLoader', 'LinuxLibraryLoader', 'WindowsLibraryLoader',
+        '_WindowsLibrary', 'add_library_search_dirs', '_environ_path', 'ctypes',
+        'load_library', 'loader', 'os', 're', 'sys'])
+    preamble_names=preamble_names.union(['ArgumentError', 'CFUNCTYPE',
+        'POINTER', 'ReturnString', 'String', 'Structure', 'UNCHECKED', 'Union',
+        'UserString', '_variadic_function', 'addressof', 'c_buffer', 'c_byte',
+        'c_char', 'c_char_p', 'c_double', 'c_float', 'c_int', 'c_int16',
+        'c_int32', 'c_int64', 'c_int8', 'c_long', 'c_longlong', 'c_ptrdiff_t',
+        'c_short', 'c_size_t', 'c_ubyte', 'c_uint', 'c_uint16', 'c_uint32',
+        'c_uint64', 'c_uint8', 'c_ulong', 'c_ulonglong', 'c_ushort', 'c_void',
+        'c_void_p', 'c_voidp', 'c_wchar', 'c_wchar_p', 'cast', 'ctypes', 'os',
+        'pointer', 'sizeof'])
+    for name in preamble_names:
+        important_names[name] = "a name needed by ctypes or ctypesgen"
+    for name in dir(__builtins__): important_names[name] = "a Python builtin"
+    for name in opts.other_known_names:
+        important_names[name] = "a name from an included Python module"
+    for name in keyword.kwlist: important_names[name] = "a Python keyword"
+    
+    for description in descriptions:
+        if description.py_name() in important_names:
+            conflict_name = important_names[description.py_name()]
+            
+            original_name=description.casual_name()
+            while description.py_name() in important_names:
+                if isinstance(description,
+                                (StructDescription, EnumDescription)):
+                    description.tag+="_"
+                else:
+                    description.name="_"+description.name
+            
+            if not description.dependents:
+                description.warning("%s has been renamed to %s due to a name " \
+                    "conflict with %s." % \
+                    (original_name,
+                    description.casual_name(),
+                    conflict_name),
+                    cls = 'rename')
+            else:
+                description.warning("%s has been renamed to %s due to a name " \
+                    "conflict with %s. Other objects depend on %s - those " \
+                    "objects will be skipped." % \
+                    (original_name, description.casual_name(),
+                    conflict_name, original_name),
+                    cls = 'rename')
+                
+                for dependent in description.dependents:
+                    dependent.include_rule = "never"
+            
+            if description.include_rule=="yes":
+                important_names[description.py_name()] = \
+                    description.casual_name()
+    
+    # Names of struct members don't conflict with much, but they can conflict
+    # with Python keywords.
+    
+    for struct in data.structs:
+        if not struct.opaque:
+            for i,(name,type) in enumerate(struct.members):
+                if name in keyword.kwlist:
+                    struct.members[i] = ("_"+name,type)
+                    struct.warning("Member \"%s\" of %s has been renamed to " \
+                        "\"%s\" because it has the same name as a Python " \
+                        "keyword." % (name, struct.casual_name(), "_"+name),
+                        cls = 'rename')
+    
+    # Macro arguments may be have names that conflict with Python keywords.
+    # In a perfect world, this would simply rename the parameter instead
+    # of throwing an error message.
+    
+    for macro in data.macros:
+        if macro.params:
+            for param in macro.params:
+                if param in keyword.kwlist:
+                    macro.error("One of the parameters to %s, \"%s\" has the " \
+                        "same name as a Python keyword. %s will be skipped." % \
+                        (macro.casual_name(), param, macro.casual_name()),
+                        cls = 'name-conflict')
+
+def find_source_libraries(data,opts):
+    """find_source_libraries() determines which library contains each function
+    and variable."""
+    
+    all_symbols=data.functions+data.variables
+    
+    for symbol in all_symbols:
+        symbol.source_library=None
+    
+    ctypesgencore.libraryloader.add_library_search_dirs(opts.compile_libdirs)
+    
+    for library_name in opts.libraries:
+        try:
+            library=ctypesgencore.libraryloader.load_library(library_name)
+        except ImportError,e:
+            warning_message("Could not load library \"%s\". Okay, I'll " \
+                "try to load it at runtime instead. " % (library_name),
+                cls = 'missing-library')
+            continue
+        for symbol in all_symbols:
+            if symbol.source_library==None:
+                if hasattr(library,symbol.c_name()):
+                    symbol.source_library=library_name

+ 134 - 0
lib/python/ctypes/ctypesgencore/processor/pipeline.py

@@ -0,0 +1,134 @@
+#!/usr/bin/env python
+
+import ctypes, re, os
+from ctypesgencore.processor.operations import *
+from ctypesgencore.processor.dependencies import find_dependencies
+from ctypesgencore.ctypedescs import *
+from ctypesgencore.messages import *
+
+"""
+A brief explanation of the processing steps:
+1. The dependencies module builds a dependency graph for the descriptions.
+
+2. Operation functions are called to perform various operations on the
+descriptions. The operation functions are found in operations.py.
+
+3. If an operation function decides to exclude a description from the output, it
+sets 'description.include_rule' to "never"; if an operation function decides not
+to include a description by default, but to allow if required, it sets
+'description.include_rule' to "if_needed".
+
+4. If an operation function encounters an error that makes a description unfit
+for output, it appends a string error message to 'description.errors'.
+'description.warnings' is a list of warning messages that will be displayed but
+will not prevent the description from being output.
+
+5. Based on 'description.include_rule', calculate_final_inclusion() decides
+which descriptions to include in the output. It sets 'description.included' to
+True or False.
+
+6. For each description, print_errors_encountered() checks if there are error
+messages in 'description.errors'. If so, print_errors_encountered() prints the
+error messages, but only if 'description.included' is True - it doesn't bother
+the user with error messages regarding descriptions that would not be in the
+output anyway. It also prints 'description.warnings'.
+
+7. calculate_final_inclusion() is called again to recalculate based on
+the errors that print_errors_encountered() has flagged.
+
+"""
+
+def process(data,options):
+    status_message("Processing description list.")
+    
+    find_dependencies(data,options)
+    
+    automatically_typedef_structs(data,options)
+    remove_NULL(data, options)
+    remove_descriptions_in_system_headers(data,options)
+    filter_by_regexes_exclude(data,options)
+    filter_by_regexes_include(data,options)
+    remove_macros(data,options)
+    fix_conflicting_names(data,options)
+    find_source_libraries(data,options)
+        
+    calculate_final_inclusion(data,options)
+    print_errors_encountered(data,options)
+    calculate_final_inclusion(data,options)
+
+def calculate_final_inclusion(data,opts):
+    """calculate_final_inclusion() calculates which descriptions will be included in the
+    output library.
+
+    An object with include_rule="never" is never included.
+    An object with include_rule="yes" is included if its requirements can be
+        included.
+    An object with include_rule="if_needed" is included if an object to be
+        included requires it and if its requirements can be included.
+    """
+    
+    def can_include_desc(desc):
+        if desc.can_include==None:
+            if desc.include_rule=="no":
+                desc.can_include=False
+            elif desc.include_rule=="yes" or desc.include_rule=="if_needed":
+                desc.can_include=True
+                for req in desc.requirements:
+                    if not can_include_desc(req):
+                        desc.can_include=False
+        return desc.can_include
+        
+    def do_include_desc(desc):
+        if desc.included:
+            return # We've already been here
+        desc.included = True
+        for req in desc.requirements:
+            do_include_desc(req)
+    
+    for desc in data.all:
+        desc.can_include=None # None means "Not Yet Decided"
+        desc.included=False
+        
+    for desc in data.all:
+        if desc.include_rule=="yes":
+            if can_include_desc(desc):
+                do_include_desc(desc)
+
+def print_errors_encountered(data,opts):
+    # See descriptions.py for an explanation of the error-handling mechanism
+    for desc in data.all:
+        # If description would not have been included, dont bother user by
+        # printing warnings.
+        if desc.included or opts.show_all_errors:
+            if opts.show_long_errors or len(desc.errors)+len(desc.warnings)<=2:
+                for (error,cls) in desc.errors:
+                    # Macro errors will always be displayed as warnings.
+                    if isinstance(desc, MacroDescription):
+                        if opts.show_macro_warnings:
+                            warning_message(error,cls)
+                    else:
+                        error_message(error,cls)
+                for (warning,cls) in desc.warnings:
+                    warning_message(warning,cls)
+            
+            else:
+                if desc.errors:
+                    error1,cls1 = desc.errors[0]
+                    error_message(error1,cls1)
+                    numerrs = len(desc.errors)-1
+                    numwarns = len(desc.warnings)
+                    if numwarns:
+                        error_message("%d more errors and %d more warnings " \
+                            "for %s" % (numerrs,numwarns,desc.casual_name()))
+                    else:
+                        error_message("%d more errors for %s " % \
+                            (numerrs,desc.casual_name()))
+                else:
+                    warning1,cls1 = desc.warnings[0]
+                    warning_message(warning1,cls1)
+                    warning_message("%d more errors for %s" % \
+                        (len(desc.warnings)-1, desc.casual_name()))
+        if desc.errors:
+            # process() will recalculate to take this into account
+            desc.include_rule = "never"
+