Browse Source

Update to upstream ctypesgen version (#1651)

* update ctypesgen to community version

* ctypesgen patches needed for mac

* ctypesgen patch for python printer of preamble and loader

* patches applied to ctypesgen source code

* patch for ctypesgen POINTER

* Use ReturnString to create String from str object

* remove lextab.py from gitignore

* remove sed-file for preamble/loader replacement

* black on preamble patch

* add README

- with instructions on updating ctypesgen version
- remove patches dir and separate files

* set RASTER3D_DEFAULT_WINDOW=0

* libimagery unittest: adapt to community ctypesgen

* add win patches to README

* patches needed for Windows

* update README and run Black on a patch
nilason 2 years ago
parent
commit
ca2d28a592
59 changed files with 7777 additions and 3714 deletions
  1. 0 1
      .gitignore
  2. 1 1
      include/grass/raster3d.h
  3. 4 2
      lib/imagery/testsuite/test_imagery_signature_management.py
  4. 19 19
      lib/imagery/testsuite/test_imagery_sigsetfile.py
  5. 11 10
      python/grass/ctypes/Makefile
  6. 233 0
      python/grass/ctypes/README.md
  7. 0 178
      python/grass/ctypes/ctypesgen.py
  8. 0 0
      python/grass/ctypes/ctypesgen/LICENSE
  9. 28 7
      python/grass/ctypes/ctypesgencore/__init__.py
  10. 118 124
      python/grass/ctypes/ctypesgencore/ctypedescs.py
  11. 44 21
      python/grass/ctypes/ctypesgencore/descriptions.py
  12. 72 75
      python/grass/ctypes/ctypesgencore/expressions.py
  13. 364 0
      python/grass/ctypes/ctypesgen/libraryloader.py
  14. 391 0
      python/grass/ctypes/ctypesgen/main.py
  15. 7 9
      python/grass/ctypes/ctypesgencore/messages.py
  16. 11 8
      python/grass/ctypes/ctypesgencore/options.py
  17. 2 0
      python/grass/ctypes/ctypesgen/parser/.gitignore
  18. 3 2
      python/grass/ctypes/ctypesgencore/parser/__init__.py
  19. 290 0
      python/grass/ctypes/ctypesgen/parser/cdeclarations.py
  20. 647 422
      python/grass/ctypes/ctypesgencore/parser/cgrammar.py
  21. 89 72
      python/grass/ctypes/ctypesgencore/parser/cparser.py
  22. 56 48
      python/grass/ctypes/ctypesgencore/parser/ctypesparser.py
  23. 98 97
      python/grass/ctypes/ctypesgencore/parser/datacollectingparser.py
  24. 149 176
      python/grass/ctypes/ctypesgencore/parser/lex.py
  25. 8 0
      python/grass/ctypes/ctypesgen/parser/lextab.py
  26. 307 0
      python/grass/ctypes/ctypesgen/parser/parsetab.py
  27. 402 0
      python/grass/ctypes/ctypesgen/parser/pplexer.py
  28. 91 84
      python/grass/ctypes/ctypesgencore/parser/preprocessor.py
  29. 318 275
      python/grass/ctypes/ctypesgencore/parser/yacc.py
  30. 1 1
      python/grass/ctypes/ctypesgencore/printer/__init__.py
  31. 150 0
      python/grass/ctypes/ctypesgen/printer_json/printer.py
  32. 6 0
      python/grass/ctypes/ctypesgen/printer_json/test.py
  33. 10 0
      python/grass/ctypes/ctypesgen/printer_python/__init__.py
  34. 9 0
      python/grass/ctypes/ctypesgen/printer_python/defaultheader.py
  35. 98 77
      python/grass/ctypes/ctypesgencore/printer/preamble.py
  36. 95 93
      python/grass/ctypes/preamble.py
  37. 448 0
      python/grass/ctypes/ctypesgen/printer_python/preamble/3_2.py
  38. 0 0
      python/grass/ctypes/ctypesgen/printer_python/preamble/__init__.py
  39. 460 0
      python/grass/ctypes/ctypesgen/printer_python/printer.py
  40. 6 0
      python/grass/ctypes/ctypesgen/printer_python/test.py
  41. 1 1
      python/grass/ctypes/ctypesgencore/processor/__init__.py
  42. 46 27
      python/grass/ctypes/ctypesgencore/processor/dependencies.py
  43. 119 61
      python/grass/ctypes/ctypesgencore/processor/operations.py
  44. 16 18
      python/grass/ctypes/ctypesgencore/processor/pipeline.py
  45. 2 0
      python/grass/ctypes/ctypesgen/test/.gitignore
  46. 88 0
      python/grass/ctypes/ctypesgen/test/ctypesgentest.py
  47. 2352 0
      python/grass/ctypes/ctypesgen/test/testsuite.py
  48. 89 0
      python/grass/ctypes/ctypesgen/version.py
  49. 0 319
      python/grass/ctypes/ctypesgencore/libraryloader.py
  50. 0 198
      python/grass/ctypes/ctypesgencore/parser/cdeclarations.py
  51. 0 282
      python/grass/ctypes/ctypesgencore/parser/parsetab.py
  52. 0 346
      python/grass/ctypes/ctypesgencore/parser/pplexer.py
  53. 0 9
      python/grass/ctypes/ctypesgencore/printer/defaultheader.py
  54. 0 362
      python/grass/ctypes/ctypesgencore/printer/printer.py
  55. 0 6
      python/grass/ctypes/ctypesgencore/printer/test.py
  56. 0 7
      python/grass/ctypes/fix.sed
  57. 0 270
      python/grass/ctypes/loader.py
  58. 12 0
      python/grass/ctypes/run.py
  59. 6 6
      python/grass/pygrass/vector/table.py

+ 0 - 1
.gitignore

@@ -24,7 +24,6 @@ lib/db/sqlp/sqlp.output
 lib/db/sqlp/sqlp.tab.c
 lib/db/sqlp/sqlp.tab.h
 lib/db/sqlp/sqlp.yy.c
-python/grass/ctypes/ctypesgencore/parser/lextab.py
 python/grass/script/setup.py.tmp
 raster/r.mapcalc/mapcalc.output
 raster/r.mapcalc/mapcalc.tab.c

+ 1 - 1
include/grass/raster3d.h

@@ -26,7 +26,7 @@
 #define RASTER3D_USE_CACHE_YZ -7
 #define RASTER3D_USE_CACHE_XYZ -8
 
-#define RASTER3D_DEFAULT_WINDOW NULL
+#define RASTER3D_DEFAULT_WINDOW 0 /* NULL pointer, now (int)0 because issues with ctypesgen */
 
 #define RASTER3D_DIRECTORY      "grid3"
 #define RASTER3D_CELL_ELEMENT   "cell"

+ 4 - 2
lib/imagery/testsuite/test_imagery_signature_management.py

@@ -40,17 +40,19 @@ from grass.lib.imagery import (
     I_make_signatures_dir,
 )
 
+H_DIRSEP = HOST_DIRSEP.decode("utf-8")
+
 
 class GetSignaturesElementTestCase(TestCase):
     def test_get_sig(self):
         cdir = ctypes.create_string_buffer(GNAME_MAX)
         I_get_signatures_dir(cdir, I_SIGFILE_TYPE_SIG)
-        self.assertEqual(utils.decode(cdir.value), f"signatures{HOST_DIRSEP}sig")
+        self.assertEqual(utils.decode(cdir.value), f"signatures{H_DIRSEP}sig")
 
     def test_get_sigset(self):
         cdir = ctypes.create_string_buffer(GNAME_MAX)
         I_get_signatures_dir(cdir, I_SIGFILE_TYPE_SIGSET)
-        self.assertEqual(utils.decode(cdir.value), f"signatures{HOST_DIRSEP}sigset")
+        self.assertEqual(utils.decode(cdir.value), f"signatures{H_DIRSEP}sigset")
 
 
 class MakeSignaturesElementTestCase(TestCase):

+ 19 - 19
lib/imagery/testsuite/test_imagery_sigsetfile.py

@@ -36,7 +36,7 @@ from grass.lib.imagery import (
     I_init_group_ref,
     I_add_file_to_group_ref,
     I_free_group_ref,
-    String,
+    ReturnString,
 )
 
 
@@ -71,11 +71,11 @@ class SigSetFileTestCase(TestCase):
         self.assertEqual(So.ClassSig[0].nsubclasses, 1)
 
         # Fill sigset struct with data
-        So.title = String("Signature title")
+        So.title = ReturnString("Signature title")
         So.bandrefs[0] = ctypes.create_string_buffer(b"The_Doors")
         So.ClassSig[0].used = 1
         So.ClassSig[0].classnum = 2
-        So.ClassSig[0].title = String("1st class")
+        So.ClassSig[0].title = ReturnString("1st class")
         So.ClassSig[0].type = 1
         So.ClassSig[0].SubSig[0].pi = 3.14
         So.ClassSig[0].SubSig[0].means[0] = 42.42
@@ -124,11 +124,11 @@ class SigSetFileTestCase(TestCase):
         self.assertEqual(So.ClassSig[0].nsubclasses, 1)
 
         # Fill sigset struct with data
-        So.title = String("Signature title")
+        So.title = ReturnString("Signature title")
         So.bandrefs[0] = ctypes.create_string_buffer(tempname(252).encode())
         So.ClassSig[0].used = 1
         So.ClassSig[0].classnum = 2
-        So.ClassSig[0].title = String("1st class")
+        So.ClassSig[0].title = ReturnString("1st class")
         So.ClassSig[0].type = 1
         So.ClassSig[0].SubSig[0].pi = 3.14
         So.ClassSig[0].SubSig[0].means[0] = 42.42
@@ -163,12 +163,12 @@ class SigSetFileTestCase(TestCase):
         self.assertEqual(So.ClassSig[0].nsubclasses, 1)
 
         # Fill sigset struct with data
-        So.title = String("Signature title")
+        So.title = ReturnString("Signature title")
         So.bandrefs[0] = ctypes.create_string_buffer(b"The_Doors")
         So.bandrefs[1] = ctypes.create_string_buffer(b"The_Who")
         So.ClassSig[0].used = 1
         So.ClassSig[0].classnum = 2
-        So.ClassSig[0].title = String("1st class")
+        So.ClassSig[0].title = ReturnString("1st class")
         So.ClassSig[0].type = 1
         So.ClassSig[0].SubSig[0].pi = 3.14
         So.ClassSig[0].SubSig[0].means[0] = 42.42
@@ -254,11 +254,11 @@ class SortSigSetByBandrefTest(TestCase):
         self.assertEqual(S.nclasses, 1)
         I_NewSubSig(ctypes.byref(S), ctypes.byref(S.ClassSig[0]))
         self.assertEqual(S.ClassSig[0].nsubclasses, 1)
-        S.title = String("Signature title")
+        S.title = ReturnString("Signature title")
         S.bandrefs[0] = ctypes.create_string_buffer(b"The_Troggs")
         S.ClassSig[0].used = 1
         S.ClassSig[0].classnum = 2
-        S.ClassSig[0].title = String("1st class")
+        S.ClassSig[0].title = ReturnString("1st class")
         S.ClassSig[0].type = 1
         S.ClassSig[0].SubSig[0].pi = 3.14
         S.ClassSig[0].SubSig[0].means[0] = 42.42
@@ -299,11 +299,11 @@ class SortSigSetByBandrefTest(TestCase):
         self.assertEqual(S.nclasses, 1)
         I_NewSubSig(ctypes.byref(S), ctypes.byref(S.ClassSig[0]))
         self.assertEqual(S.ClassSig[0].nsubclasses, 1)
-        S.title = String("Signature title")
+        S.title = ReturnString("Signature title")
         S.bandrefs[0] = ctypes.create_string_buffer(b"The_Troggs")
         S.ClassSig[0].used = 1
         S.ClassSig[0].classnum = 2
-        S.ClassSig[0].title = String("1st class")
+        S.ClassSig[0].title = ReturnString("1st class")
         S.ClassSig[0].type = 1
         S.ClassSig[0].SubSig[0].pi = 3.14
         S.ClassSig[0].SubSig[0].means[0] = 42.42
@@ -345,11 +345,11 @@ class SortSigSetByBandrefTest(TestCase):
         self.assertEqual(S.nclasses, 1)
         I_NewSubSig(ctypes.byref(S), ctypes.byref(S.ClassSig[0]))
         self.assertEqual(S.ClassSig[0].nsubclasses, 1)
-        S.title = String("Signature title")
+        S.title = ReturnString("Signature title")
         S.bandrefs[0] = ctypes.create_string_buffer(b"The_Who")
         S.ClassSig[0].used = 1
         S.ClassSig[0].classnum = 2
-        S.ClassSig[0].title = String("1st class")
+        S.ClassSig[0].title = ReturnString("1st class")
         S.ClassSig[0].type = 1
         S.ClassSig[0].SubSig[0].pi = 3.14
         S.ClassSig[0].SubSig[0].means[0] = 42.42
@@ -394,11 +394,11 @@ class SortSigSetByBandrefTest(TestCase):
         self.assertEqual(S.nclasses, 1)
         I_NewSubSig(ctypes.byref(S), ctypes.byref(S.ClassSig[0]))
         self.assertEqual(S.ClassSig[0].nsubclasses, 1)
-        S.title = String("Signature title")
+        S.title = ReturnString("Signature title")
         S.bandrefs[0] = ctypes.create_string_buffer(b"The_Doors")
         S.ClassSig[0].used = 1
         S.ClassSig[0].classnum = 2
-        S.ClassSig[0].title = String("1st class")
+        S.ClassSig[0].title = ReturnString("1st class")
         S.ClassSig[0].type = 1
         S.ClassSig[0].SubSig[0].pi = 3.14
         S.ClassSig[0].SubSig[0].means[0] = 42.42
@@ -439,12 +439,12 @@ class SortSigSetByBandrefTest(TestCase):
         self.assertEqual(S.nclasses, 1)
         I_NewSubSig(ctypes.byref(S), ctypes.byref(S.ClassSig[0]))
         self.assertEqual(S.ClassSig[0].nsubclasses, 1)
-        S.title = String("Signature title")
+        S.title = ReturnString("Signature title")
         S.bandrefs[0] = ctypes.create_string_buffer(b"The_Who")
         S.bandrefs[1] = ctypes.create_string_buffer(b"The_Doors")
         S.ClassSig[0].used = 1
         S.ClassSig[0].classnum = 2
-        S.ClassSig[0].title = String("1st class")
+        S.ClassSig[0].title = ReturnString("1st class")
         S.ClassSig[0].type = 1
         S.ClassSig[0].SubSig[0].pi = 3.14
         S.ClassSig[0].SubSig[0].means[0] = 42.42
@@ -498,12 +498,12 @@ class SortSigSetByBandrefTest(TestCase):
         self.assertEqual(S.nclasses, 1)
         I_NewSubSig(ctypes.byref(S), ctypes.byref(S.ClassSig[0]))
         self.assertEqual(S.ClassSig[0].nsubclasses, 1)
-        S.title = String("Signature title")
+        S.title = ReturnString("Signature title")
         S.bandrefs[0] = ctypes.create_string_buffer(b"The_Who")
         S.bandrefs[1] = ctypes.create_string_buffer(b"The_Doors")
         S.ClassSig[0].used = 1
         S.ClassSig[0].classnum = 2
-        S.ClassSig[0].title = String("1st class")
+        S.ClassSig[0].title = ReturnString("1st class")
         S.ClassSig[0].type = 1
         S.ClassSig[0].SubSig[0].pi = 3.14
         S.ClassSig[0].SubSig[0].means[0] = 42.42

+ 11 - 10
python/grass/ctypes/Makefile

@@ -61,8 +61,7 @@ ifneq ($(findstring darwin,$(ARCH)),)
 MAC_FLAGS  = "-D_Nullable="
 endif
 
-SED = sed
-CTYPESGEN = ./ctypesgen.py
+CTYPESGEN = ./run.py
 CTYPESFLAGS = --cpp "$(CC) -E $(CPPFLAGS) $(LFS_CFLAGS) $(MAC_FLAGS) $(EXTRA_CFLAGS) $(NLS_CFLAGS) $(DEFS) $(EXTRA_INC) $(INC) -D__GLIBC_HAVE_LONG_LONG"
 EXTRA_CLEAN_FILES := $(wildcard ctypesgencore/*.pyc) $(wildcard ctypesgencore/*/*.pyc)
 
@@ -76,12 +75,14 @@ PYDIR = $(ETC)/python
 GDIR = $(PYDIR)/grass
 DSTDIR = $(GDIR)/lib
 
-PYFILES  := $(patsubst %,$(DSTDIR)/%.py,$(MODULES) __init__ ctypes_preamble ctypes_loader)
-PYCFILES  := $(patsubst %,$(DSTDIR)/%.pyc,$(MODULES) __init__ ctypes_preamble ctypes_loader)
+PYFILES  := $(patsubst %,$(DSTDIR)/%.py,$(MODULES))
+PYCFILES  := $(patsubst %,$(DSTDIR)/%.pyc,$(MODULES))
 LPYFILES := $(patsubst %,$(OBJDIR)/%.py,$(MODULES))
 
+COPY_FILES = $(DSTDIR)/ctypes_loader.py $(DSTDIR)/ctypes_preamble.py
+
 ifeq ($(strip $(GRASS_LIBRARY_TYPE)),shlib)
-default:
+default: $(COPY_FILES)
 	$(MAKE) $(DSTDIR)
 	$(MAKE) $(LPYFILES) $(PYFILES) $(PYCFILES)
 else
@@ -90,13 +91,13 @@ default:
 	exit 1
 endif
 
-$(DSTDIR)/__init__.py: __init__.py | $(DSTDIR)
-	$(INSTALL_DATA) $< $@
+$(COPY_FILES): | $(DSTDIR)
+$(DSTDIR)/ctypes_loader.py: ctypesgen/libraryloader.py
+	cp -f $< $@
+$(DSTDIR)/ctypes_preamble.py: ctypesgen/printer_python/preamble/3_2.py
+	cp -f $< $@
 
 $(DSTDIR)/%.py: $(OBJDIR)/%.py | $(DSTDIR)
-	$(SED) -f fix.sed $< > $@
-
-$(DSTDIR)/ctypes_%.py: %.py | $(DSTDIR)
 	$(INSTALL_DATA) $< $@
 
 define module_rule

+ 233 - 0
python/grass/ctypes/README.md

@@ -0,0 +1,233 @@
+## Notes on ctypesgen
+
+Currently installed version:
+https://github.com/ctypesgen/ctypesgen/commit/0681f8ef1742206c171d44b7872c700f34ffe044 (3 March 2020)
+
+
+### How to update ctypesgen version
+
+1. Replace the GRASS directory `python/grass/ctypes/ctypesgen` with the `ctypesgen`
+   directory from ctypesgen source directory.
+2. Replace `python/grass/ctypes/run.py` with `run.py` from ctypesgen source directory.
+3. Apply the patches below.
+4. Update this document with info on installed ctypesgen version.
+5. If a patch has been addressed upstreams, also remove its section from this document.
+
+### Patches
+
+It is highly encouraged to report [upstreams](https://github.com/ctypesgen/ctypesgen) necessary patches for GRASS.
+
+#### POINTER patch
+
+https://trac.osgeo.org/grass/ticket/2748
+https://trac.osgeo.org/grass/ticket/3641
+
+Every generated GRASS library bridge part (gis.py, raster.py etc.) is a standalone
+product. E.g. parts of libgis (include/grass/gis.h) used in libraster
+(etc/python/grass/lib/raster.py) are also defined in raster.py. This way there
+is a definition of `struct_Cell_head` in both gis.py and raster.py -- a situation
+ctypes doesn't approve of. This patch seems to fix related errors in GRASS.
+Manually remove e.g. the gis.py parts in raster.py and make an "import" also did
+the work, but that would be more difficult to implement as part of ctypesgen
+file generation.
+
+```diff
+--- ctypesgen/printer_python/preamble/3_2.py.orig
++++ ctypesgen/printer_python/preamble/3_2.py
+@@ -14,6 +14,24 @@
+ del _int_types
+ 
+ 
++def POINTER(obj):
++    p = ctypes.POINTER(obj)
++
++    # Convert None to a real NULL pointer to work around bugs
++    # in how ctypes handles None on 64-bit platforms
++    if not isinstance(p.from_param, classmethod):
++
++        def from_param(cls, x):
++            if x is None:
++                return cls()
++            else:
++                return x
++
++        p.from_param = classmethod(from_param)
++
++    return p
++
++
+ class UserString:
+     def __init__(self, seq):
+         if isinstance(seq, bytes):
+
+```
+
+#### Loader and preamble patch
+
+Replaces sed introduced with:
+"Move ctypesgen boilerplate to common module"
+
+https://github.com/OSGeo/grass/commit/59eeff479cd39fd503e276d164977648938cc85b
+
+
+```diff
+--- ctypesgen/printer_python/printer.py.orig
++++ ctypesgen/printer_python/printer.py
+@@ -156,19 +156,22 @@
+         path, v = get_preamble(**m.groupdict())
+ 
+         self.file.write("# Begin preamble for Python v{}\n\n".format(v))
+-        preamble_file = open(path, "r")
+-        self.file.write(preamble_file.read())
+-        preamble_file.close()
++        self.file.write("from .ctypes_preamble import *\n")
++        self.file.write("from .ctypes_preamble import _variadic_function\n")
++        # preamble_file = open(path, "r")
++        # self.file.write(preamble_file.read())
++        # preamble_file.close()
+         self.file.write("\n# End preamble\n")
+ 
+     def print_loader(self):
+         self.file.write("_libs = {}\n")
+         self.file.write("_libdirs = %s\n\n" % self.options.compile_libdirs)
+         self.file.write("# Begin loader\n\n")
+-        path = path_to_local_file("libraryloader.py", libraryloader)
+-        loader_file = open(path, "r")
+-        self.file.write(loader_file.read())
+-        loader_file.close()
++        self.file.write("from .ctypes_loader import *\n")        
++        # path = path_to_local_file("libraryloader.py", libraryloader)
++        # loader_file = open(path, "r")
++        # self.file.write(loader_file.read())
++        # loader_file.close()
+         self.file.write("\n# End loader\n\n")
+         self.file.write(
+             "add_library_search_dirs([%s])"
+
+```
+
+#### Mac specific patches
+
+Enable Ctypesgen parsing of non-utf8 files on macOS
+https://github.com/OSGeo/grass/pull/385
+
+```diff
+--- ctypesgen/parser/preprocessor.py.orig
++++ ctypesgen/parser/preprocessor.py
+@@ -160,9 +160,32 @@
+         self.cparser.handle_status(cmd)
+ 
+         pp = subprocess.Popen(
+-            cmd, shell=True, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
++            cmd,
++            shell=True,
++            universal_newlines=True,
++            stdout=subprocess.PIPE,
++            stderr=subprocess.PIPE,
+         )
+-        ppout, pperr = pp.communicate()
++        try:
++            ppout, pperr = pp.communicate()
++        except UnicodeError:
++            # Fix for https://trac.osgeo.org/grass/ticket/3883,
++            # handling file(s) encoded with mac_roman
++            if sys.platform == "darwin":
++                pp = subprocess.Popen(
++                    cmd,
++                    shell=True,
++                    universal_newlines=False,  # read as binary
++                    stdout=subprocess.PIPE,
++                    stderr=subprocess.PIPE,
++                )
++                ppout, pperr = pp.communicate()
++
++                data = ppout.decode("utf8", errors="replace")
++                ppout = data.replace("\r\n", "\n").replace("\r", "\n")
++                pperr = pperr.decode("utf8", errors="replace")
++            else:
++                raise UnicodeError
+ 
+         for line in pperr.split("\n"):
+             if line:
+
+```
+
+
+macOS: use `@rpath` as dynamic linker
+https://github.com/OSGeo/grass/pull/981
+
+```diff
+--- ctypesgen/libraryloader.py.orig
++++ ctypesgen/libraryloader.py
+@@ -168,6 +168,7 @@
+         dyld_fallback_library_path = _environ_path("DYLD_FALLBACK_LIBRARY_PATH")
+         if not dyld_fallback_library_path:
+             dyld_fallback_library_path = [os.path.expanduser("~/lib"), "/usr/local/lib", "/usr/lib"]
++        dyld_fallback_library_path.extend(_environ_path('LD_RUN_PATH'))
+ 
+         dirs = []
+
+```
+
+
+#### Windows specific patches
+
+The type `__int64` isn't defined in ctypesgen
+https://trac.osgeo.org/grass/ticket/3506
+
+```diff
+--- ctypesgen/ctypedescs.py.orig
++++ ctypesgen/ctypedescs.py
+@@ -41,6 +41,7 @@ ctypes_type_map = {
+     ("int16_t", True, 0): "c_int16",
+     ("int32_t", True, 0): "c_int32",
+     ("int64_t", True, 0): "c_int64",
++    ("__int64", True, 0): "c_int64",
+     ("uint8_t", True, 0): "c_uint8",
+     ("uint16_t", True, 0): "c_uint16",
+     ("uint32_t", True, 0): "c_uint32",
+
+```
+
+Patch for OSGeo4W packaging, adapted from
+https://github.com/jef-n/OSGeo4W/blob/master/src/grass/osgeo4w/patch
+
+```diff
+--- ctypesgen/libraryloader.py.orig
++++ ctypesgen/libraryloader.py
+@@ -321,6 +321,12 @@
+ class WindowsLibraryLoader(LibraryLoader):
+     name_formats = ["%s.dll", "lib%s.dll", "%slib.dll", "%s"]
+ 
++    def __init__(self):
++        super().__init__()
++        for p in os.getenv("PATH").split(";"):
++            if os.path.exists(p) and hasattr(os, "add_dll_directory"):
++                os.add_dll_directory(p)
++
+     class Lookup(LibraryLoader.Lookup):
+         def __init__(self, path):
+             super(WindowsLibraryLoader.Lookup, self).__init__(path)
+
+```
+
+Invoke preprocessor via `sh.exe`, workaround to get the -I switches to be recognized.
+https://trac.osgeo.org/grass/ticket/1125#comment:21
+
+https://github.com/OSGeo/grass/commit/65eef4767aa416ca55f7e36f62dce7ce083fe450
+
+```diff
+--- ctypesgen/parser/preprocessor.py.orig
++++ ctypesgen/parser/preprocessor.py
+@@ -159,6 +159,9 @@
+ 
+         self.cparser.handle_status(cmd)
+ 
++        if sys.platform == "win32":
++            cmd = ["sh.exe", "-c", cmd]
++
+         pp = subprocess.Popen(
+             cmd, shell=True, universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
+         )
+
+```

+ 0 - 178
python/grass/ctypes/ctypesgen.py

@@ -1,178 +0,0 @@
-#!/usr/bin/env python3
-
-
-def find_names_in_modules(modules):
-    names = set()
-    for module in modules:
-        try:
-            mod = __import__(module)
-        except:
-            pass
-        else:
-            names.union(dir(module))
-    return names
-
-import optparse
-import sys
-
-import ctypesgencore
-from ctypesgencore import messages as msgs
-
-
-def option_callback_W(option, opt, value, parser):
-    # Options preceded by a "-Wl," are simply treated as though the "-Wl,"
-    # is not there? I don't understand the purpose of this code...
-    if len(value) < 4 or value[0:3] != 'l,-':
-        raise optparse.BadOptionError("not in '-Wl,<opt>' form: %s%s"
-                                      % (opt, value))
-    opt = value[2:]
-    if opt not in ['-L', '-R', '--rpath']:
-        raise optparse.BadOptionError("-Wl option must be -L, -R"
-                                      " or --rpath, not " + value[2:])
-    # Push the linker option onto the list for further parsing.
-    parser.rargs.insert(0, value)
-
-
-def option_callback_libdir(option, opt, value, parser):
-    # There are two sets of linker search paths: those for use at compile time
-    # and those for use at runtime. Search paths specified with -L, -R, or
-    # --rpath are added to both sets.
-    parser.values.compile_libdirs.append(value)
-    parser.values.runtime_libdirs.append(value)
-
-
-if __name__ == "__main__":
-    usage = 'usage: %prog [options] /path/to/header.h ...'
-    op = optparse.OptionParser(usage=usage)
-
-    # Parameters
-    op.add_option('-o', '--output', dest='output', metavar='FILE',
-                  help='write wrapper to FILE')
-    op.add_option('-l', '--library', dest='libraries', action='append',
-                  default=[], metavar='LIBRARY', help='link to LIBRARY')
-    op.add_option('', '--include', dest='other_headers', action='append',
-                  default=[], metavar='HEADER',
-                  help='include system header HEADER (e.g. stdio.h or stdlib.h)')
-    op.add_option('-m', '--module', '--link-module', action='append',
-                  dest='modules', metavar='MODULE', default=[],
-                  help='use symbols from Python module MODULE')
-    op.add_option('-I', '--includedir', dest='include_search_paths',
-                  action='append', default=[], metavar='INCLUDEDIR',
-                  help='add INCLUDEDIR as a directory to search for headers')
-    op.add_option('-W', action="callback", callback=option_callback_W,
-                  metavar="l,OPTION", type="str",
-                  help="where OPTION is -L, -R, or --rpath")
-    op.add_option("-L", "-R", "--rpath", "--libdir", action="callback",
-                  callback=option_callback_libdir, metavar="LIBDIR", type="str",
-                  help="Add LIBDIR to the search path (both compile-time and run-time)")
-    op.add_option('', "--compile-libdir", action="append",
-                  dest="compile_libdirs", metavar="LIBDIR", default=[],
-                  help="Add LIBDIR to the compile-time library search path.")
-    op.add_option('', "--runtime-libdir", action="append",
-                  dest="runtime_libdirs", metavar="LIBDIR", default=[],
-                  help="Add LIBDIR to the run-time library search path.")
-
-    # Parser options
-    op.add_option('', '--cpp', dest='cpp', default='gcc -E',
-                  help='The command to invoke the c preprocessor, including any '
-                  'necessary options (default: gcc -E)')
-    op.add_option('', '--save-preprocessed-headers', metavar='FILENAME',
-                  dest='save_preprocessed_headers', default=None,
-                  help='Save the preprocessed headers to the specified FILENAME')
-
-    # Processor options
-    op.add_option('-a', '--all-headers', action='store_true',
-                  dest='all_headers', default=False,
-                  help='include symbols from all headers, including system headers')
-    op.add_option('', '--builtin-symbols', action='store_true',
-                  dest='builtin_symbols', default=False,
-                  help='include symbols automatically generated by the preprocessor')
-    op.add_option('', '--no-macros', action='store_false', dest='include_macros',
-                  default=True, help="Don't output macros.")
-    op.add_option('-i', '--include-symbols', dest='include_symbols',
-                  default=None, help='regular expression for symbols to always include')
-    op.add_option('-x', '--exclude-symbols', dest='exclude_symbols',
-                  default=None, help='regular expression for symbols to exclude')
-    op.add_option('', '--no-stddef-types', action='store_true',
-                  dest='no_stddef_types', default=False,
-                  help='Do not support extra C types from stddef.h')
-    op.add_option('', '--no-gnu-types', action='store_true',
-                  dest='no_gnu_types', default=False,
-                  help='Do not support extra GNU C types')
-    op.add_option('', '--no-python-types', action='store_true',
-                  dest='no_python_types', default=False,
-                  help='Do not support extra C types built in to Python')
-
-    # Printer options
-    op.add_option('', '--header-template', dest='header_template', default=None,
-                  metavar='TEMPLATE',
-                  help='Use TEMPLATE as the header template in the output file.')
-    op.add_option('', '--strip-build-path', dest='strip_build_path',
-                  default=None, metavar='BUILD_PATH',
-                  help='Strip build path from header paths in the wrapper file.')
-    op.add_option('', '--insert-file', dest='inserted_files', default=[],
-                  action='append', metavar='FILENAME',
-                  help='Add the contents of FILENAME to the end of the wrapper file.')
-    op.add_option('', '--output-language', dest='output_language', metavar='LANGUAGE',
-                  default='python',
-                  help="Choose output language (`json' or `python' [default])")
-
-    # Error options
-    op.add_option('', "--all-errors", action="store_true", default=False,
-                  dest="show_all_errors", help="Display all warnings and errors even "
-                  "if they would not affect output.")
-    op.add_option('', "--show-long-errors", action="store_true", default=False,
-                  dest="show_long_errors", help="Display long error messages "
-                  "instead of abbreviating error messages.")
-    op.add_option('', "--no-macro-warnings", action="store_false", default=True,
-                  dest="show_macro_warnings", help="Do not print macro warnings.")
-
-    op.set_defaults(**ctypesgencore.options.default_values)
-
-    (options, args) = op.parse_args(list(sys.argv[1:]))
-    options.headers = args
-
-    # Figure out what names will be defined by imported Python modules
-    options.other_known_names = find_names_in_modules(options.modules)
-
-    # Required parameters
-    if len(args) < 1:
-        msgs.error_message('No header files specified', cls='usage')
-        sys.exit(1)
-
-    if options.output is None:
-        msgs.error_message('No output file specified', cls='usage')
-        sys.exit(1)
-
-    if len(options.libraries) == 0:
-        msgs.warning_message('No libraries specified', cls='usage')
-
-    # Check output language
-    printer = None
-    if options.output_language == "python":
-        printer = ctypesgencore.printer.WrapperPrinter
-    elif options.output_language == "json":
-        printer = ctypesgencore.printer_json.WrapperPrinter
-    else:
-        msgs.error_message("No such output language `" +
-                           options.output_language + "'", cls='usage')
-        sys.exit(1)
-
-    # Step 1: Parse
-    descriptions = ctypesgencore.parser.parse(options.headers, options)
-
-    # Step 2: Process
-    ctypesgencore.processor.process(descriptions, options)
-
-    # Step 3: Print
-    ctypesgencore.printer.WrapperPrinter(options.output, options, descriptions)
-
-    msgs.status_message("Wrapping complete.")
-
-    # Correct what may be a common mistake
-    if descriptions.all == []:
-        if not options.all_headers:
-            msgs.warning_message("There wasn't anything of use in the "
-                                 "specified header file(s). Perhaps you meant to run with "
-                                 "--all-headers to include objects from included sub-headers? ",
-                                 cls='usage')

python/grass/ctypes/ctypesgencore/LICENSE → python/grass/ctypes/ctypesgen/LICENSE


+ 28 - 7
python/grass/ctypes/ctypesgencore/__init__.py

@@ -1,4 +1,6 @@
-#!/usr/bin/env python3
+#!/usr/bin/env python
+# -*- coding: us-ascii -*-
+# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
 
 """
 Ctypesgencore is the module that contains the main body of ctypesgen - in fact,
@@ -31,7 +33,7 @@ Ctypesgen writes the descriptions to the output file, along with a header.
 
 The package ctypesgen.printer is responsible for the printing stage.
 
-There are three modules in ctypesgencore that describe the format that the
+There are three modules in ctypesgen that describe the format that the
 parser, processor, and printer modules use to pass information. They are:
 
 * descriptions: Classes to represent the descriptions.
@@ -42,15 +44,32 @@ parser, processor, and printer modules use to pass information. They are:
 format.
 """
 
-
-__all__ = ["parser", "processor", "printer",
-           "descriptions", "ctypedescs", "expressions",
-           "messages", "options"]
+__all__ = [
+    "parser",
+    "processor",
+    "printer",
+    "descriptions",
+    "ctypedescs",
+    "expressions",
+    "messages",
+    "options",
+    "version",
+]
 
 # Workhorse modules
 from . import parser
 from . import processor
-from . import printer
+from . import printer_python
+from . import version
+
+try:
+    from . import printer_json
+except ImportError:
+    pass
+
+__version__ = version.VERSION.partition("-")[-1]
+VERSION = __version__
+
 
 # Modules describing internal format
 from . import descriptions
@@ -60,3 +79,5 @@ from . import expressions
 # Helper modules
 from . import messages
 from . import options
+
+printer = printer_python  # Default the printer to generating Python

+ 118 - 124
python/grass/ctypes/ctypesgencore/ctypedescs.py

@@ -1,7 +1,7 @@
-#!/usr/bin/env python3
+#!/usr/bin/env python
 
-'''
-ctypesgencore.ctypedescs contains classes to represent a C type. All of them
+"""
+ctypesgen.ctypedescs contains classes to represent a C type. All of them
 classes are subclasses of CtypesType.
 
 Unlike in previous versions of ctypesgen, CtypesType and its subclasses are
@@ -17,64 +17,53 @@ representing an array of four integers could be created using:
 >>> ctype = CtypesArray(CtypesSimple("int",True,0),4)
 
 str(ctype) would evaluate to "c_int * 4".
-'''
+"""
 
 import warnings
 
-__docformat__ = 'restructuredtext'
+__docformat__ = "restructuredtext"
 
 ctypes_type_map = {
     # typename   signed  longs
-    ('void', True, 0): 'None',
-    ('int', True, 0): 'c_int',
-    ('int', False, 0): 'c_uint',
-    ('int', True, 1): 'c_long',
-    ('int', False, 1): 'c_ulong',
-    ('int', True, 2): 'c_longlong',
-    ('int', False, 2): 'c_ulonglong',
-    ('char', True, 0): 'c_char',
-    ('char', False, 0): 'c_ubyte',
-    ('short', True, 0): 'c_short',
-    ('short', False, 0): 'c_ushort',
-    ('float', True, 0): 'c_float',
-    ('double', True, 0): 'c_double',
-    ('size_t', True, 0): 'c_size_t',
-    ('int8_t', True, 0): 'c_int8',
-    ('int16_t', True, 0): 'c_int16',
-    ('int32_t', True, 0): 'c_int32',
-    ('int64_t', True, 0): 'c_int64',
-    ('apr_int64_t', True, 0): 'c_int64',
-    ('__int64', True, 0): 'c_int64',
-    ('off64_t', True, 0): 'c_int64',
-    ('uint8_t', True, 0): 'c_uint8',
-    ('uint16_t', True, 0): 'c_uint16',
-    ('uint32_t', True, 0): 'c_uint32',
-    ('uint64_t', True, 0): 'c_uint64',
-    ('apr_uint64_t', True, 0): 'c_uint64',
-    ('wchar_t', True, 0): 'c_wchar',
-    ('ptrdiff_t', True, 0): 'c_ptrdiff_t',  # Requires definition in preamble
-    ('ssize_t', True, 0): 'c_ptrdiff_t',  # Requires definition in preamble
-    ('va_list', True, 0): 'c_void_p',
+    ("void", True, 0): "None",
+    ("int", True, 0): "c_int",
+    ("int", False, 0): "c_uint",
+    ("int", True, 1): "c_long",
+    ("int", False, 1): "c_ulong",
+    ("char", True, 0): "c_char",
+    ("char", False, 0): "c_ubyte",
+    ("short", True, 0): "c_short",
+    ("short", False, 0): "c_ushort",
+    ("float", True, 0): "c_float",
+    ("double", True, 0): "c_double",
+    ("double", True, 1): "c_longdouble",
+    ("int8_t", True, 0): "c_int8",
+    ("int16_t", True, 0): "c_int16",
+    ("int32_t", True, 0): "c_int32",
+    ("int64_t", True, 0): "c_int64",
+    ("__int64", True, 0): "c_int64",
+    ("uint8_t", True, 0): "c_uint8",
+    ("uint16_t", True, 0): "c_uint16",
+    ("uint32_t", True, 0): "c_uint32",
+    ("uint64_t", True, 0): "c_uint64",
+    ("_Bool", True, 0): "c_bool",
 }
 
 ctypes_type_map_python_builtin = {
-    ('int', True, 2): 'c_longlong',
-    ('int', False, 2): 'c_ulonglong',
-    ('size_t', True, 0): 'c_size_t',
-    ('apr_int64_t', True, 0): 'c_int64',
-    ('off64_t', True, 0): 'c_int64',
-    ('apr_uint64_t', True, 0): 'c_uint64',
-    ('wchar_t', True, 0): 'c_wchar',
-    ('ptrdiff_t', True, 0): 'c_ptrdiff_t',  # Requires definition in preamble
-    ('ssize_t', True, 0): 'c_ptrdiff_t',  # Requires definition in preamble
-    ('va_list', True, 0): 'c_void_p',
+    ("int", True, 2): "c_longlong",
+    ("int", False, 2): "c_ulonglong",
+    ("size_t", True, 0): "c_size_t",
+    ("apr_int64_t", True, 0): "c_int64",
+    ("off64_t", True, 0): "c_int64",
+    ("apr_uint64_t", True, 0): "c_uint64",
+    ("wchar_t", True, 0): "c_wchar",
+    ("ptrdiff_t", True, 0): "c_ptrdiff_t",  # Requires definition in preamble
+    ("ssize_t", True, 0): "c_ptrdiff_t",  # Requires definition in preamble
+    ("va_list", True, 0): "c_void_p",
 }
 
 # This protocol is used for walking type trees.
-
-
 class CtypesTypeVisitor(object):
-
     def visit_struct(self, struct):
         pass
 
@@ -95,7 +84,6 @@ class CtypesTypeVisitor(object):
 
 def visit_type_and_collect_info(ctype):
     class Visitor(CtypesTypeVisitor):
-
         def visit_struct(self, struct):
             structs.append(struct)
 
@@ -110,6 +98,7 @@ def visit_type_and_collect_info(ctype):
 
         def visit_identifier(self, identifier):
             identifiers.append(identifier)
+
     structs = []
     enums = []
     typedefs = []
@@ -119,14 +108,13 @@ def visit_type_and_collect_info(ctype):
     ctype.visit(v)
     return structs, enums, typedefs, errors, identifiers
 
-# Remove one level of indirection from function pointer; needed for typedefs
-# and function parameters.
-
 
+# Remove one level of indirection from funtion pointer; needed for typedefs
+# and function parameters.
 def remove_function_pointer(t):
-    if isinstance(t, CtypesPointer) and isinstance(t.destination, CtypesFunction):
+    if type(t) == CtypesPointer and type(t.destination) == CtypesFunction:
         return t.destination
-    elif isinstance(t, CtypesPointer):
+    elif type(t) == CtypesPointer:
         t.destination = remove_function_pointer(t.destination)
         return t
     else:
@@ -134,12 +122,12 @@ def remove_function_pointer(t):
 
 
 class CtypesType(object):
-
     def __init__(self):
+        super(CtypesType, self).__init__()
         self.errors = []
 
     def __repr__(self):
-        return "<Ctype \"%s\">" % self.py_string()
+        return '<Ctype (%s) "%s">' % (type(self).__name__, self.py_string())
 
     def error(self, message, cls=None):
         self.errors.append((message, cls))
@@ -153,22 +141,21 @@ class CtypesSimple(CtypesType):
     """Represents a builtin type, like "char" or "int"."""
 
     def __init__(self, name, signed, longs):
-        CtypesType.__init__(self)
+        super(CtypesSimple, self).__init__()
         self.name = name
         self.signed = signed
         self.longs = longs
 
-    def py_string(self):
+    def py_string(self, ignore_can_be_ctype=None):
         return ctypes_type_map[(self.name, self.signed, self.longs)]
 
 
 class CtypesSpecial(CtypesType):
-
     def __init__(self, name):
-        CtypesType.__init__(self)
+        super(CtypesSpecial, self).__init__()
         self.name = name
 
-    def py_string(self):
+    def py_string(self, ignore_can_be_ctype=None):
         return self.name
 
 
@@ -176,53 +163,50 @@ class CtypesTypedef(CtypesType):
     """Represents a type defined by a typedef."""
 
     def __init__(self, name):
-        CtypesType.__init__(self)
+        super(CtypesTypedef, self).__init__()
         self.name = name
 
     def visit(self, visitor):
         if not self.errors:
             visitor.visit_typedef(self.name)
-        CtypesType.visit(self, visitor)
+        super(CtypesTypedef, self).visit(visitor)
 
-    def py_string(self):
+    def py_string(self, ignore_can_be_ctype=None):
         return self.name
 
 
 class CtypesBitfield(CtypesType):
-
     def __init__(self, base, bitfield):
-        CtypesType.__init__(self)
+        super(CtypesBitfield, self).__init__()
         self.base = base
         self.bitfield = bitfield
 
     def visit(self, visitor):
         self.base.visit(visitor)
-        CtypesType.visit(self, visitor)
+        super(CtypesBitfield, self).visit(visitor)
 
-    def py_string(self):
+    def py_string(self, ignore_can_be_ctype=None):
         return self.base.py_string()
 
 
 class CtypesPointer(CtypesType):
-
     def __init__(self, destination, qualifiers):
-        CtypesType.__init__(self)
+        super(CtypesPointer, self).__init__()
         self.destination = destination
         self.qualifiers = qualifiers
 
     def visit(self, visitor):
         if self.destination:
             self.destination.visit(visitor)
-        CtypesType.visit(self, visitor)
+        super(CtypesPointer, self).visit(visitor)
 
-    def py_string(self):
-        return 'POINTER(%s)' % self.destination.py_string()
+    def py_string(self, ignore_can_be_ctype=None):
+        return "POINTER(%s)" % self.destination.py_string()
 
 
 class CtypesArray(CtypesType):
-
     def __init__(self, base, count):
-        CtypesType.__init__(self)
+        super(CtypesArray, self).__init__()
         self.base = base
         self.count = count
 
@@ -230,42 +214,38 @@ class CtypesArray(CtypesType):
         self.base.visit(visitor)
         if self.count:
             self.count.visit(visitor)
-        CtypesType.visit(self, visitor)
+        super(CtypesArray, self).visit(visitor)
 
-    def py_string(self):
+    def py_string(self, ignore_can_be_ctype=None):
         if self.count is None:
-            return 'POINTER(%s)' % self.base.py_string()
-        if isinstance(self.base, CtypesArray):
-            return '(%s) * %s' % (self.base.py_string(),
-                                  self.count.py_string(False))
+            return "POINTER(%s)" % self.base.py_string()
+        if type(self.base) == CtypesArray:
+            return "(%s) * int(%s)" % (self.base.py_string(), self.count.py_string(False))
         else:
-            return '%s * %s' % (self.base.py_string(),
-                                self.count.py_string(False))
+            return "%s * int(%s)" % (self.base.py_string(), self.count.py_string(False))
 
 
 class CtypesNoErrorCheck(object):
-
-    def py_string(self):
-        return 'None'
+    def py_string(self, ignore_can_be_ctype=None):
+        return "None"
 
     def __bool__(self):
         return False
+
     __nonzero__ = __bool__
 
 
 class CtypesPointerCast(object):
-
     def __init__(self, target):
         self.target = target
 
-    def py_string(self):
-        return 'lambda v,*a : cast(v, {})'.format(self.target.py_string())
+    def py_string(self, ignore_can_be_ctype=None):
+        return "lambda v,*a : cast(v, {})".format(self.target.py_string())
 
 
 class CtypesFunction(CtypesType):
-
-    def __init__(self, restype, parameters, variadic=False):
-        CtypesType.__init__(self)
+    def __init__(self, restype, parameters, variadic, attrib=dict()):
+        super(CtypesFunction, self).__init__()
         self.restype = restype
         self.errcheck = CtypesNoErrorCheck()
 
@@ -273,58 +253,72 @@ class CtypesFunction(CtypesType):
         # when ctypes automagically returns it as an int.
         # Instead, convert to POINTER(c_void).  c_void is not a ctypes type,
         # you can make it any arbitrary type.
-        if isinstance(self.restype, CtypesPointer) and \
-           isinstance(self.restype.destination, CtypesSimple) and \
-           self.restype.destination.name == 'void':
+        if (
+            type(self.restype) == CtypesPointer
+            and type(self.restype.destination) == CtypesSimple
+            and self.restype.destination.name == "void"
+        ):
             # we will provide a means of converting this to a c_void_p
-            self.restype = CtypesPointer(CtypesSpecial('c_ubyte'), ())
-            self.errcheck = CtypesPointerCast(CtypesSpecial('c_void_p'))
+            self.restype = CtypesPointer(CtypesSpecial("c_ubyte"), ())
+            self.errcheck = CtypesPointerCast(CtypesSpecial("c_void_p"))
 
         # Return "String" instead of "POINTER(c_char)"
-        if self.restype.py_string() == 'POINTER(c_char)':
-            if 'const' in self.restype.qualifiers:
-                self.restype = CtypesSpecial('c_char_p')
+        if self.restype.py_string() == "POINTER(c_char)":
+            if "const" in self.restype.qualifiers:
+                self.restype = CtypesSpecial("c_char_p")
             else:
-                self.restype = CtypesSpecial('String')
+                self.restype = CtypesSpecial("String")
 
         self.argtypes = [remove_function_pointer(p) for p in parameters]
         self.variadic = variadic
+        self.attrib = attrib
 
     def visit(self, visitor):
         self.restype.visit(visitor)
         for a in self.argtypes:
             a.visit(visitor)
-        CtypesType.visit(self, visitor)
+        super(CtypesFunction, self).visit(visitor)
+
+    def py_string(self, ignore_can_be_ctype=None):
+        return "CFUNCTYPE(UNCHECKED(%s), %s)" % (
+            self.restype.py_string(),
+            ", ".join([a.py_string() for a in self.argtypes]),
+        )
 
-    def py_string(self):
-        return 'CFUNCTYPE(UNCHECKED(%s), %s)' % (self.restype.py_string(),
-                                                 ', '.join([a.py_string() for a in self.argtypes]))
 
 last_tagnum = 0
 
 
-def anonymous_struct_tag():
+def anonymous_struct_tagnum():
     global last_tagnum
     last_tagnum += 1
-    return 'anon_%d' % last_tagnum
+    return last_tagnum
 
+def fmt_anonymous_struct_tag(num):
+    return "anon_%d" % num
 
-class CtypesStruct(CtypesType):
+def anonymous_struct_tag():
+    return fmt_anonymous_struct_tag(anonymous_struct_tagnum())
 
-    def __init__(self, tag, packed, variety, members, src=None):
-        CtypesType.__init__(self)
+
+class CtypesStruct(CtypesType):
+    def __init__(self, tag, attrib, variety, members, src=None):
+        super(CtypesStruct, self).__init__()
         self.tag = tag
-        self.packed = packed
+        self.attrib = attrib
         self.variety = variety  # "struct" or "union"
         self.members = members
 
-        if not self.tag:
-            self.tag = anonymous_struct_tag()
+        if type(self.tag) == int or not self.tag:
+            if type(self.tag) == int:
+              self.tag = fmt_anonymous_struct_tag(self.tag)
+            else:
+              self.tag = anonymous_struct_tag()
             self.anonymous = True
         else:
             self.anonymous = False
 
-        if self.members is None:
+        if self.members == None:
             self.opaque = True
         else:
             self.opaque = False
@@ -332,7 +326,7 @@ class CtypesStruct(CtypesType):
         self.src = src
 
     def get_required_types(self):
-        types = CtypesType.get_required_types(self)
+        types = super(CtypesStruct, self).get_required_types()
         types.add((self.variety, self.tag))
         return types
 
@@ -341,7 +335,7 @@ class CtypesStruct(CtypesType):
         if not self.opaque:
             for name, ctype in self.members:
                 ctype.visit(visitor)
-        CtypesType.visit(self, visitor)
+        super(CtypesStruct, self).visit(visitor)
 
     def get_subtypes(self):
         if self.opaque:
@@ -349,22 +343,22 @@ class CtypesStruct(CtypesType):
         else:
             return set([m[1] for m in self.members])
 
-    def py_string(self):
+    def py_string(self, ignore_can_be_ctype=None):
         return "%s_%s" % (self.variety, self.tag)
 
+
 last_tagnum = 0
 
 
 def anonymous_enum_tag():
     global last_tagnum
     last_tagnum += 1
-    return 'anon_%d' % last_tagnum
+    return "anon_%d" % last_tagnum
 
 
 class CtypesEnum(CtypesType):
-
     def __init__(self, tag, enumerators, src=None):
-        CtypesType.__init__(self)
+        super(CtypesEnum, self).__init__()
         self.tag = tag
         self.enumerators = enumerators
 
@@ -374,7 +368,7 @@ class CtypesEnum(CtypesType):
         else:
             self.anonymous = False
 
-        if self.enumerators is None:
+        if self.enumerators == None:
             self.opaque = True
         else:
             self.opaque = False
@@ -383,7 +377,7 @@ class CtypesEnum(CtypesType):
 
     def visit(self, visitor):
         visitor.visit_enum(self)
-        CtypesType.visit(self, visitor)
+        super(CtypesEnum, self).visit(visitor)
 
-    def py_string(self):
-        return 'enum_%s' % self.tag
+    def py_string(self, ignore_can_be_ctype=None):
+        return "enum_%s" % self.tag

+ 44 - 21
python/grass/ctypes/ctypesgencore/descriptions.py

@@ -1,7 +1,7 @@
-#!/usr/bin/env python3
+#!/usr/bin/env python
 
 """
-ctypesgencore.descriptions contains classes to represent a description of a
+ctypesgen.descriptions contains classes to represent a description of a
 struct, union, enum, function, constant, variable, or macro. All the
 description classes are subclassed from an abstract base class, Description.
 The descriptions module also contains a class, DescriptionCollection, to hold
@@ -12,8 +12,9 @@ lists of Description objects.
 class DescriptionCollection(object):
     """Represents a collection of Descriptions."""
 
-    def __init__(self, constants, typedefs, structs, enums, functions, variables,
-                 macros, all, output_order):
+    def __init__(
+        self, constants, typedefs, structs, enums, functions, variables, macros, all, output_order
+    ):
         self.constants = constants
         self.typedefs = typedefs
         self.structs = structs
@@ -30,6 +31,7 @@ class Description(object):
     or macro description. Description is an abstract base class."""
 
     def __init__(self, src=None):
+        super(Description, self).__init__()
         self.src = src  # A tuple of (filename, lineno)
 
         # If object will be included in output file. Values are "yes", "never",
@@ -82,14 +84,14 @@ class ConstantDescription(Description):
     """Simple class to contain information about a constant."""
 
     def __init__(self, name, value, src=None):
-        Description.__init__(self, src)
+        super(ConstantDescription, self).__init__(src)
         # Name of constant, a string
         self.name = name
         # Value of constant, as an ExpressionNode object
         self.value = value
 
     def casual_name(self):
-        return "Constant \"%s\"" % self.name
+        return 'Constant "%s"' % self.name
 
     def py_name(self):
         return self.name
@@ -102,12 +104,12 @@ class TypedefDescription(Description):
     """Simple container class for a type definition."""
 
     def __init__(self, name, ctype, src=None):
-        Description.__init__(self, src)
+        super(TypedefDescription, self).__init__(src)
         self.name = name  # Name, a string
         self.ctype = ctype  # The base type as a ctypedescs.CtypeType object
 
     def casual_name(self):
-        return "Typedef \"%s\"" % self.name
+        return 'Typedef "%s"' % self.name
 
     def py_name(self):
         return self.name
@@ -119,11 +121,11 @@ class TypedefDescription(Description):
 class StructDescription(Description):
     """Simple container class for a structure or union definition."""
 
-    def __init__(self, tag, packed, variety, members, opaque, ctype, src=None):
-        Description.__init__(self, src)
+    def __init__(self, tag, attrib, variety, members, opaque, ctype, src=None):
+        super(StructDescription, self).__init__(src)
         # The name of the structure minus the "struct" or "union"
         self.tag = tag
-        self.packed = packed
+        self.attrib = attrib
         # A string "struct" or "union"
         self.variety = variety
         # A list of pairs of (name,ctype)
@@ -134,7 +136,7 @@ class StructDescription(Description):
         self.ctype = ctype
 
     def casual_name(self):
-        return "%s \"%s\"" % (self.variety.capitalize(), self.tag)
+        return '%s "%s"' % (self.variety.capitalize(), self.tag)
 
     def py_name(self):
         return "%s_%s" % (self.variety, self.tag)
@@ -147,7 +149,7 @@ class EnumDescription(Description):
     """Simple container class for an enum definition."""
 
     def __init__(self, tag, members, ctype, src=None):
-        Description.__init__(self, src)
+        super(EnumDescription, self).__init__(src)
         # The name of the enum, minus the "enum"
         self.tag = tag
         # A list of (name,value) pairs where value is a number
@@ -156,7 +158,7 @@ class EnumDescription(Description):
         self.ctype = ctype
 
     def casual_name(self):
-        return "Enum \"%s\"" % self.tag
+        return 'Enum "%s"' % self.tag
 
     def py_name(self):
         return "enum_%s" % self.tag
@@ -168,8 +170,8 @@ class EnumDescription(Description):
 class FunctionDescription(Description):
     """Simple container class for a C function."""
 
-    def __init__(self, name, restype, argtypes, errcheck, variadic=False, src=None):
-        Description.__init__(self, src)
+    def __init__(self, name, restype, argtypes, errcheck, variadic, attrib, src):
+        super(FunctionDescription, self).__init__(src)
         # Name, a string
         self.name = name
         # Name according to C - stored in case description is renamed
@@ -182,9 +184,11 @@ class FunctionDescription(Description):
         self.errcheck = errcheck
         # Does this function accept a variable number of arguments?
         self.variadic = variadic
+        # The set of attributes applied to the function (e.g. stdcall)
+        self.attrib = attrib
 
     def casual_name(self):
-        return "Function \"%s\"" % self.name
+        return 'Function "%s"' % self.name
 
     def py_name(self):
         return self.name
@@ -197,7 +201,7 @@ class VariableDescription(Description):
     """Simple container class for a C variable declaration."""
 
     def __init__(self, name, ctype, src=None):
-        Description.__init__(self, src)
+        super(VariableDescription, self).__init__(src)
         # Name, a string
         self.name = name
         # Name according to C - stored in case description is renamed
@@ -206,7 +210,7 @@ class VariableDescription(Description):
         self.ctype = ctype
 
     def casual_name(self):
-        return "Variable \"%s\"" % self.name
+        return 'Variable "%s"' % self.name
 
     def py_name(self):
         return self.name
@@ -219,16 +223,35 @@ class MacroDescription(Description):
     """Simple container class for a C macro."""
 
     def __init__(self, name, params, expr, src=None):
-        Description.__init__(self, src)
+        super(MacroDescription, self).__init__(src)
         self.name = name
         self.params = params
         self.expr = expr  # ExpressionNode for the macro's body
 
     def casual_name(self):
-        return "Macro \"%s\"" % self.name
+        return 'Macro "%s"' % self.name
 
     def py_name(self):
         return self.name
 
     def c_name(self):
         return self.name
+
+
+class UndefDescription(Description):
+    """Simple container class for a preprocessor #undef directive."""
+
+    def __init__(self, macro, src=None):
+        super(UndefDescription, self).__init__(src)
+        self.include_rule = "if_needed"
+
+        self.macro = macro
+
+    def casual_name(self):
+        return 'Undef "%s"' % self.macro.name
+
+    def py_name(self):
+        return "#undef:%s" % self.macro.name
+
+    def c_name(self):
+        return "#undef %s" % self.macro.name

+ 72 - 75
python/grass/ctypes/ctypesgencore/expressions.py

@@ -1,15 +1,15 @@
-#!/usr/bin/env python3
+#!/usr/bin/env python
 
-'''
+"""
 The expressions module contains classes to represent an expression. The main
 class is ExpressionNode. ExpressionNode's most useful method is py_string(),
 which returns a Python string representing that expression.
-'''
+"""
 
-import keyword
 import sys
-from .ctypedescs import *
 
+from .ctypedescs import *
+import keyword
 
 # Right now, the objects in this module are all oriented toward evaluation.
 # However, they don't have to be, since ctypes objects are mutable. For example,
@@ -27,8 +27,8 @@ from .ctypedescs import *
 
 
 class EvaluationContext(object):
-    '''Interface for evaluating expression nodes.
-    '''
+    """Interface for evaluating expression nodes.
+    """
 
     def evaluate_identifier(self, name):
         warnings.warn('Attempt to evaluate identifier "%s" failed' % name)
@@ -48,7 +48,6 @@ class EvaluationContext(object):
 
 
 class ExpressionNode(object):
-
     def __init__(self):
         self.errors = []
 
@@ -60,7 +59,7 @@ class ExpressionNode(object):
             string = repr(self.py_string(True))
         except ValueError:
             string = "<error in expression node>"
-        return "<ExpressionNode: %s>" % string
+        return "<%s: %s>" % (type(self).__name__, string)
 
     def visit(self, visitor):
         for error, cls in self.errors:
@@ -68,7 +67,6 @@ class ExpressionNode(object):
 
 
 class ConstantExpressionNode(ExpressionNode):
-
     def __init__(self, value):
         ExpressionNode.__init__(self)
         self.value = value
@@ -76,26 +74,17 @@ class ConstantExpressionNode(ExpressionNode):
     def evaluate(self, context):
         return self.value
 
-    try:
-        pos_inf = float('inf')
-        neg_inf = float('-inf')
-    except ValueError:
-        pos_inf = ()
-        neg_inf = ()
-
     def py_string(self, can_be_ctype):
-        if (sys.platform != 'win32' or (sys.platform == 'win32' and
-                                        sys.version_info >= (2, 6))):
+        if sys.platform != "win32" or (sys.platform == "win32" and sys.version_info >= (2, 6)):
             # Windows python did not get infinity support until 2.6
-            if self.value == ConstantExpressionNode.pos_inf:
+            if self.value == float("inf"):
                 return "float('inf')"
-            elif self.value == ConstantExpressionNode.neg_inf:
+            elif self.value == float("-inf"):
                 return "float('-inf')"
         return repr(self.value)
 
 
 class IdentifierExpressionNode(ExpressionNode):
-
     def __init__(self, name):
         ExpressionNode.__init__(self)
         self.name = name
@@ -114,7 +103,6 @@ class IdentifierExpressionNode(ExpressionNode):
 
 
 class ParameterExpressionNode(ExpressionNode):
-
     def __init__(self, name):
         ExpressionNode.__init__(self)
         self.name = name
@@ -132,7 +120,6 @@ class ParameterExpressionNode(ExpressionNode):
 
 
 class UnaryExpressionNode(ExpressionNode):
-
     def __init__(self, name, op, format, child_can_be_ctype, child):
         ExpressionNode.__init__(self)
         self.name = name
@@ -149,16 +136,13 @@ class UnaryExpressionNode(ExpressionNode):
         if self.op:
             return self.op(self.child.evaluate(context))
         else:
-            raise ValueError("The C operator \"%s\" can't be evaluated right "
-                             "now" % self.name)
+            raise ValueError('The C operator "%s" can\'t be evaluated right ' "now" % self.name)
 
     def py_string(self, can_be_ctype):
-        return self.format % \
-            self.child.py_string(self.child_can_be_ctype and can_be_ctype)
+        return self.format % self.child.py_string(self.child_can_be_ctype and can_be_ctype)
 
 
 class SizeOfExpressionNode(ExpressionNode):
-
     def __init__(self, child):
         ExpressionNode.__init__(self)
         self.child = child
@@ -175,13 +159,12 @@ class SizeOfExpressionNode(ExpressionNode):
 
     def py_string(self, can_be_ctype):
         if isinstance(self.child, CtypesType):
-            return 'sizeof(%s)' % self.child.py_string()
+            return "sizeof(%s)" % self.child.py_string()
         else:
-            return 'sizeof(%s)' % self.child.py_string(True)
+            return "sizeof(%s)" % self.child.py_string(True)
 
 
 class BinaryExpressionNode(ExpressionNode):
-
     def __init__(self, name, op, format, can_be_ctype, left, right):
         ExpressionNode.__init__(self)
         self.name = name
@@ -198,20 +181,18 @@ class BinaryExpressionNode(ExpressionNode):
 
     def evaluate(self, context):
         if self.op:
-            return self.op(self.left.evaluate(context),
-                           self.right.evaluate(context))
+            return self.op(self.left.evaluate(context), self.right.evaluate(context))
         else:
-            raise ValueError("The C operator \"%s\" can't be evaluated right "
-                             "now" % self.name)
+            raise ValueError('The C operator "%s" can\'t be evaluated right ' "now" % self.name)
 
     def py_string(self, can_be_ctype):
-        return self.format % \
-            (self.left.py_string(self.can_be_ctype[0] and can_be_ctype),
-             self.right.py_string(self.can_be_ctype[0] and can_be_ctype))
+        return self.format % (
+            self.left.py_string(self.can_be_ctype[0] and can_be_ctype),
+            self.right.py_string(self.can_be_ctype[0] and can_be_ctype),
+        )
 
 
 class ConditionalExpressionNode(ExpressionNode):
-
     def __init__(self, cond, yes, no):
         ExpressionNode.__init__(self)
         self.cond = cond
@@ -231,14 +212,14 @@ class ConditionalExpressionNode(ExpressionNode):
             return self.no.evaluate(context)
 
     def py_string(self, can_be_ctype):
-        return "(%s if %s else %s)" % \
-            (self.yes.py_string(can_be_ctype),
-             self.cond.py_string(True),
-             self.no.py_string(can_be_ctype))
+        return "%s and %s or %s" % (
+            self.cond.py_string(True),
+            self.yes.py_string(can_be_ctype),
+            self.no.py_string(can_be_ctype),
+        )
 
 
 class AttributeExpressionNode(ExpressionNode):
-
     def __init__(self, op, format, base, attribute):
         ExpressionNode.__init__(self)
         self.op = op
@@ -257,19 +238,18 @@ class AttributeExpressionNode(ExpressionNode):
         ExpressionNode.visit(self, visitor)
 
     def evaluate(self, context):
-        return self.op(self.base.evaluate(context), self.attribute)
+        return self.op(self.base.evalute(context), self.attribute)
 
     def py_string(self, can_be_ctype):
         if can_be_ctype:
-            return self.format % (self.base.py_string(can_be_ctype),
-                                  self.attribute)
+            return self.format % (self.base.py_string(can_be_ctype), self.attribute)
         else:
-            return "(%s.value)" % (self.format %
-                                   (self.base.py_string(can_be_ctype), self.attribute))
+            return "(%s.value)" % (
+                self.format % (self.base.py_string(can_be_ctype), self.attribute)
+            )
 
 
 class CallExpressionNode(ExpressionNode):
-
     def __init__(self, function, arguments):
         ExpressionNode.__init__(self)
         self.function = function
@@ -288,57 +268,74 @@ class CallExpressionNode(ExpressionNode):
     def py_string(self, can_be_ctype):
         function = self.function.py_string(can_be_ctype)
         arguments = [x.py_string(can_be_ctype) for x in self.arguments]
-        if can_be_ctype:
-            return '(%s (%s))' % (function, ", ".join(arguments))
-        else:
-            return '((%s (%s)).value)' % (function, ", ".join(arguments))
-
-# There seems not to be any reasonable way to translate C typecasts
-# into Python. Ctypesgen doesn't try, except for the special case of NULL.
+        return "(%s (%s))" % (function, ", ".join(arguments))
 
 
 class TypeCastExpressionNode(ExpressionNode):
+    """
+    Type cast expressions as handled by ctypesgen.  There is a strong
+    possibility that this does not support all types of casts.
+    """
 
     def __init__(self, base, ctype):
         ExpressionNode.__init__(self)
         self.base = base
         self.ctype = ctype
-        self.isnull = isinstance(ctype, CtypesPointer) and \
-            isinstance(base, ConstantExpressionNode) and \
-            base.value == 0
 
     def visit(self, visitor):
-        # No need to visit ctype because it isn't actually used
         self.base.visit(visitor)
+        self.ctype.visit(visitor)
         ExpressionNode.visit(self, visitor)
 
     def evaluate(self, context):
-        if self.isnull:
-            return None
-        else:
-            return self.base.evaluate(context)
+        return self.base.evaluate(context)
 
     def py_string(self, can_be_ctype):
-        if self.isnull:
-            return "None"
+        if isinstance(self.ctype, CtypesPointer):
+            return "cast({}, {})".format(self.base.py_string(True), self.ctype.py_string())
+        elif isinstance(self.ctype, CtypesStruct):
+            raise TypeError(
+                "conversion to non-scalar type ({}) requested from {}".format(
+                    self.ctype, self.base.py_string(False)
+                )
+            )
         else:
-            return self.base.py_string(can_be_ctype)
+            # In reality, this conversion should only really work if the types
+            # are scalar types.  We won't work really hard to test if the types
+            # are  indeed scalar.
+            # To be backwards compatible, we always return literals for builtin types.
+            # We use a function to convert to integer for c_char types since
+            # c_char can take integer or byte types, but the others can *only*
+            # take non-char arguments.
+            # ord_if_char must be provided by preambles
+            if isinstance(self.ctype, CtypesSimple) and (self.ctype.name, self.ctype.signed) == (
+                "char",
+                True,
+            ):
+                ord_if_char = ""
+            elif isinstance(self.ctype, CtypesSimple) and self.ctype.name == "void":
+                # This is a very simple type cast:  cast everything to (void)
+                # At least one macro from mingw does this
+                return "None"
+            else:
+                ord_if_char = "ord_if_char"
+
+            return "({to} ({ord_if_char}({frm}))).value".format(
+                to=self.ctype.py_string(), ord_if_char=ord_if_char, frm=self.base.py_string(False)
+            )
 
 
 class UnsupportedExpressionNode(ExpressionNode):
-
     def __init__(self, message):
         ExpressionNode.__init__(self)
         self.message = message
-        self.error(message, 'unsupported-type')
+        self.error(message, "unsupported-type")
 
     def evaluate(self, context):
-        raise ValueError("Tried to evaluate an unsupported expression "
-                         "node: %s" % self.message)
+        raise ValueError("Tried to evaluate an unsupported expression " "node: %s" % self.message)
 
     def __repr__(self):
         return "<UnsupportedExpressionNode>"
 
     def py_string(self, can_be_ctype):
-        raise ValueError("Called py_string() an unsupported expression "
-                         "node: %s" % self.message)
+        raise ValueError("Called py_string() an unsupported expression " "node: %s" % self.message)

+ 364 - 0
python/grass/ctypes/ctypesgen/libraryloader.py

@@ -0,0 +1,364 @@
+# ----------------------------------------------------------------------------
+# Copyright (c) 2008 David James
+# Copyright (c) 2006-2008 Alex Holkner
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+#  * Redistributions of source code must retain the above copyright
+#    notice, this list of conditions and the following disclaimer.
+#  * Redistributions in binary form must reproduce the above copyright
+#    notice, this list of conditions and the following disclaimer in
+#    the documentation and/or other materials provided with the
+#    distribution.
+#  * Neither the name of pyglet nor the names of its
+#    contributors may be used to endorse or promote products
+#    derived from this software without specific prior written
+#    permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+# ----------------------------------------------------------------------------
+
+import os.path, re, sys, glob
+import platform
+import ctypes
+import ctypes.util
+
+
+def _environ_path(name):
+    if name in os.environ:
+        return os.environ[name].split(":")
+    else:
+        return []
+
+
+class LibraryLoader(object):
+    # library names formatted specifically for platforms
+    name_formats = ["%s"]
+
+    class Lookup(object):
+        mode = ctypes.DEFAULT_MODE
+
+        def __init__(self, path):
+            super(LibraryLoader.Lookup, self).__init__()
+            self.access = dict(cdecl=ctypes.CDLL(path, self.mode))
+
+        def get(self, name, calling_convention="cdecl"):
+            if calling_convention not in self.access:
+                raise LookupError(
+                    "Unknown calling convention '{}' for function '{}'".format(
+                        calling_convention, name
+                    )
+                )
+            return getattr(self.access[calling_convention], name)
+
+        def has(self, name, calling_convention="cdecl"):
+            if calling_convention not in self.access:
+                return False
+            return hasattr(self.access[calling_convention], name)
+
+        def __getattr__(self, name):
+            return getattr(self.access["cdecl"], name)
+
+    def __init__(self):
+        self.other_dirs = []
+
+    def __call__(self, libname):
+        """Given the name of a library, load it."""
+        paths = self.getpaths(libname)
+
+        for path in paths:
+            try:
+                return self.Lookup(path)
+            except:
+                pass
+
+        raise ImportError("Could not load %s." % libname)
+
+    def getpaths(self, libname):
+        """Return a list of paths where the library might be found."""
+        if os.path.isabs(libname):
+            yield libname
+        else:
+            # search through a prioritized series of locations for the library
+
+            # we first search any specific directories identified by user
+            for dir_i in self.other_dirs:
+                for fmt in self.name_formats:
+                    # dir_i should be absolute already
+                    yield os.path.join(dir_i, fmt % libname)
+
+            # then we search the directory where the generated python interface is stored
+            for fmt in self.name_formats:
+                yield os.path.abspath(os.path.join(os.path.dirname(__file__), fmt % libname))
+
+            # now, use the ctypes tools to try to find the library
+            for fmt in self.name_formats:
+                path = ctypes.util.find_library(fmt % libname)
+                if path:
+                    yield path
+
+            # then we search all paths identified as platform-specific lib paths
+            for path in self.getplatformpaths(libname):
+                yield path
+
+            # Finally, we'll try the users current working directory
+            for fmt in self.name_formats:
+                yield os.path.abspath(os.path.join(os.path.curdir, fmt % libname))
+
+    def getplatformpaths(self, libname):
+        return []
+
+
+# Darwin (Mac OS X)
+
+
+class DarwinLibraryLoader(LibraryLoader):
+    name_formats = [
+        "lib%s.dylib",
+        "lib%s.so",
+        "lib%s.bundle",
+        "%s.dylib",
+        "%s.so",
+        "%s.bundle",
+        "%s",
+    ]
+
+    class Lookup(LibraryLoader.Lookup):
+        # Darwin requires dlopen to be called with mode RTLD_GLOBAL instead
+        # of the default RTLD_LOCAL.  Without this, you end up with
+        # libraries not being loadable, resulting in "Symbol not found"
+        # errors
+        mode = ctypes.RTLD_GLOBAL
+
+    def getplatformpaths(self, libname):
+        if os.path.pathsep in libname:
+            names = [libname]
+        else:
+            names = [format % libname for format in self.name_formats]
+
+        for dir in self.getdirs(libname):
+            for name in names:
+                yield os.path.join(dir, name)
+
+    def getdirs(self, libname):
+        """Implements the dylib search as specified in Apple documentation:
+
+        http://developer.apple.com/documentation/DeveloperTools/Conceptual/
+            DynamicLibraries/Articles/DynamicLibraryUsageGuidelines.html
+
+        Before commencing the standard search, the method first checks
+        the bundle's ``Frameworks`` directory if the application is running
+        within a bundle (OS X .app).
+        """
+
+        dyld_fallback_library_path = _environ_path("DYLD_FALLBACK_LIBRARY_PATH")
+        if not dyld_fallback_library_path:
+            dyld_fallback_library_path = [os.path.expanduser("~/lib"), "/usr/local/lib", "/usr/lib"]
+        dyld_fallback_library_path.extend(_environ_path('LD_RUN_PATH'))
+
+        dirs = []
+
+        if "/" in libname:
+            dirs.extend(_environ_path("DYLD_LIBRARY_PATH"))
+        else:
+            dirs.extend(_environ_path("LD_LIBRARY_PATH"))
+            dirs.extend(_environ_path("DYLD_LIBRARY_PATH"))
+
+        if hasattr(sys, "frozen") and sys.frozen == "macosx_app":
+            dirs.append(os.path.join(os.environ["RESOURCEPATH"], "..", "Frameworks"))
+
+        dirs.extend(dyld_fallback_library_path)
+
+        return dirs
+
+
+# Posix
+
+
+class PosixLibraryLoader(LibraryLoader):
+    _ld_so_cache = None
+
+    _include = re.compile(r"^\s*include\s+(?P<pattern>.*)")
+
+    class _Directories(dict):
+        def __init__(self):
+            self.order = 0
+
+        def add(self, directory):
+            if len(directory) > 1:
+                directory = directory.rstrip(os.path.sep)
+            # only adds and updates order if exists and not already in set
+            if not os.path.exists(directory):
+                return
+            o = self.setdefault(directory, self.order)
+            if o == self.order:
+                self.order += 1
+
+        def extend(self, directories):
+            for d in directories:
+                self.add(d)
+
+        def ordered(self):
+            return (i[0] for i in sorted(self.items(), key=lambda D: D[1]))
+
+    def _get_ld_so_conf_dirs(self, conf, dirs):
+        """
+        Recursive funtion to help parse all ld.so.conf files, including proper
+        handling of the `include` directive.
+        """
+
+        try:
+            with open(conf) as f:
+                for D in f:
+                    D = D.strip()
+                    if not D:
+                        continue
+
+                    m = self._include.match(D)
+                    if not m:
+                        dirs.add(D)
+                    else:
+                        for D2 in glob.glob(m.group("pattern")):
+                            self._get_ld_so_conf_dirs(D2, dirs)
+        except IOError:
+            pass
+
+    def _create_ld_so_cache(self):
+        # Recreate search path followed by ld.so.  This is going to be
+        # slow to build, and incorrect (ld.so uses ld.so.cache, which may
+        # not be up-to-date).  Used only as fallback for distros without
+        # /sbin/ldconfig.
+        #
+        # We assume the DT_RPATH and DT_RUNPATH binary sections are omitted.
+
+        directories = self._Directories()
+        for name in (
+            "LD_LIBRARY_PATH",
+            "SHLIB_PATH",  # HPUX
+            "LIBPATH",  # OS/2, AIX
+            "LIBRARY_PATH",  # BE/OS
+        ):
+            if name in os.environ:
+                directories.extend(os.environ[name].split(os.pathsep))
+
+        self._get_ld_so_conf_dirs("/etc/ld.so.conf", directories)
+
+        bitage = platform.architecture()[0]
+
+        unix_lib_dirs_list = []
+        if bitage.startswith("64"):
+            # prefer 64 bit if that is our arch
+            unix_lib_dirs_list += ["/lib64", "/usr/lib64"]
+
+        # must include standard libs, since those paths are also used by 64 bit
+        # installs
+        unix_lib_dirs_list += ["/lib", "/usr/lib"]
+        if sys.platform.startswith("linux"):
+            # Try and support multiarch work in Ubuntu
+            # https://wiki.ubuntu.com/MultiarchSpec
+            if bitage.startswith("32"):
+                # Assume Intel/AMD x86 compat
+                unix_lib_dirs_list += ["/lib/i386-linux-gnu", "/usr/lib/i386-linux-gnu"]
+            elif bitage.startswith("64"):
+                # Assume Intel/AMD x86 compat
+                unix_lib_dirs_list += ["/lib/x86_64-linux-gnu", "/usr/lib/x86_64-linux-gnu"]
+            else:
+                # guess...
+                unix_lib_dirs_list += glob.glob("/lib/*linux-gnu")
+        directories.extend(unix_lib_dirs_list)
+
+        cache = {}
+        lib_re = re.compile(r"lib(.*)\.s[ol]")
+        ext_re = re.compile(r"\.s[ol]$")
+        for dir in directories.ordered():
+            try:
+                for path in glob.glob("%s/*.s[ol]*" % dir):
+                    file = os.path.basename(path)
+
+                    # Index by filename
+                    cache_i = cache.setdefault(file, set())
+                    cache_i.add(path)
+
+                    # Index by library name
+                    match = lib_re.match(file)
+                    if match:
+                        library = match.group(1)
+                        cache_i = cache.setdefault(library, set())
+                        cache_i.add(path)
+            except OSError:
+                pass
+
+        self._ld_so_cache = cache
+
+    def getplatformpaths(self, libname):
+        if self._ld_so_cache is None:
+            self._create_ld_so_cache()
+
+        result = self._ld_so_cache.get(libname, set())
+        for i in result:
+            # we iterate through all found paths for library, since we may have
+            # actually found multiple architectures or other library types that
+            # may not load
+            yield i
+
+
+# Windows
+
+
+class WindowsLibraryLoader(LibraryLoader):
+    name_formats = ["%s.dll", "lib%s.dll", "%slib.dll", "%s"]
+
+    def __init__(self):
+        super().__init__()
+        for p in os.getenv("PATH").split(";"):
+            if os.path.exists(p) and hasattr(os, "add_dll_directory"):
+                os.add_dll_directory(p)
+
+    class Lookup(LibraryLoader.Lookup):
+        def __init__(self, path):
+            super(WindowsLibraryLoader.Lookup, self).__init__(path)
+            self.access["stdcall"] = ctypes.windll.LoadLibrary(path)
+
+
+# Platform switching
+
+# If your value of sys.platform does not appear in this dict, please contact
+# the Ctypesgen maintainers.
+
+loaderclass = {
+    "darwin": DarwinLibraryLoader,
+    "cygwin": WindowsLibraryLoader,
+    "win32": WindowsLibraryLoader,
+    "msys": WindowsLibraryLoader,
+}
+
+load_library = loaderclass.get(sys.platform, PosixLibraryLoader)()
+
+
+def add_library_search_dirs(other_dirs):
+    """
+    Add libraries to search paths.
+    If library paths are relative, convert them to absolute with respect to this
+    file's directory
+    """
+    for F in other_dirs:
+        if not os.path.isabs(F):
+            F = os.path.abspath(F)
+        load_library.other_dirs.append(F)
+
+
+del loaderclass

+ 391 - 0
python/grass/ctypes/ctypesgen/main.py

@@ -0,0 +1,391 @@
+# -*- coding: us-ascii -*-
+# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
+"""
+Main loop for ctypesgen.
+"""
+
+import optparse, sys
+
+from . import options as core_options
+from . import parser as core_parser
+from . import printer_python, printer_json, processor
+from . import messages as msgs
+from . import version
+
+
+def find_names_in_modules(modules):
+    names = set()
+    for module in modules:
+        try:
+            mod = __import__(module)
+        except:
+            pass
+        else:
+            names.update(dir(mod))
+    return names
+
+
+def option_callback_W(option, opt, value, parser):
+    # Options preceded by a "-Wl," are simply treated as though the "-Wl,"
+    # is not there? I don't understand the purpose of this code...
+    if len(value) < 4 or value[0:3] != "l,-":
+        raise optparse.BadOptionError("not in '-Wl,<opt>' form: %s%s" % (opt, value))
+    opt = value[2:]
+    if opt not in ["-L", "-R", "--rpath"]:
+        raise optparse.BadOptionError("-Wl option must be -L, -R" " or --rpath, not " + value[2:])
+    # Push the linker option onto the list for further parsing.
+    parser.rargs.insert(0, value)
+
+
+def option_callback_libdir(option, opt, value, parser):
+    # There are two sets of linker search paths: those for use at compile time
+    # and those for use at runtime. Search paths specified with -L, -R, or
+    # --rpath are added to both sets.
+    parser.values.compile_libdirs.append(value)
+    parser.values.runtime_libdirs.append(value)
+
+
+def main(givenargs=None):
+    usage = "usage: %prog [options] /path/to/header.h ..."
+    op = optparse.OptionParser(usage=usage, version=version.VERSION_NUMBER)
+
+    # Parameters
+    op.add_option(
+        "-o",
+        "--output",
+        dest="output",
+        metavar="FILE",
+        help="write wrapper to FILE [default stdout]",
+    )
+    op.add_option(
+        "-l",
+        "--library",
+        dest="libraries",
+        action="append",
+        default=[],
+        metavar="LIBRARY",
+        help="link to LIBRARY",
+    )
+    op.add_option(
+        "",
+        "--include",
+        dest="other_headers",
+        action="append",
+        default=[],
+        metavar="HEADER",
+        help="include system header HEADER (e.g. stdio.h or stdlib.h)",
+    )
+    op.add_option(
+        "-m",
+        "--module",
+        "--link-module",
+        action="append",
+        dest="modules",
+        metavar="MODULE",
+        default=[],
+        help="use symbols from Python module MODULE",
+    )
+    op.add_option(
+        "-I",
+        "--includedir",
+        dest="include_search_paths",
+        action="append",
+        default=[],
+        metavar="INCLUDEDIR",
+        help="add INCLUDEDIR as a directory to search for headers",
+    )
+    op.add_option(
+        "-W",
+        action="callback",
+        callback=option_callback_W,
+        metavar="l,OPTION",
+        type="str",
+        help="where OPTION is -L, -R, or --rpath",
+    )
+    op.add_option(
+        "-L",
+        "-R",
+        "--rpath",
+        "--libdir",
+        action="callback",
+        callback=option_callback_libdir,
+        metavar="LIBDIR",
+        type="str",
+        help="Add LIBDIR to the search path (both compile-time and run-time)",
+    )
+    op.add_option(
+        "",
+        "--compile-libdir",
+        action="append",
+        dest="compile_libdirs",
+        metavar="LIBDIR",
+        default=[],
+        help="Add LIBDIR to the compile-time library search path.",
+    )
+    op.add_option(
+        "",
+        "--runtime-libdir",
+        action="append",
+        dest="runtime_libdirs",
+        metavar="LIBDIR",
+        default=[],
+        help="Add LIBDIR to the run-time library search path.",
+    )
+
+    # Parser options
+    op.add_option(
+        "",
+        "--cpp",
+        dest="cpp",
+        default="gcc -E",
+        help="The command to invoke the c preprocessor, including any "
+        "necessary options (default: gcc -E)",
+    )
+    op.add_option(
+        "-D",
+        "--define",
+        action="append",
+        dest="cpp_defines",
+        metavar="MACRO",
+        default=[],
+        help="Add a definition to the preprocessor via commandline",
+    )
+    op.add_option(
+        "-U",
+        "--undefine",
+        action="append",
+        dest="cpp_undefines",
+        metavar="NAME",
+        default=[],
+        help="Instruct the preprocessor to undefine the specified macro via commandline",
+    )
+    op.add_option(
+        "",
+        "--save-preprocessed-headers",
+        metavar="FILENAME",
+        dest="save_preprocessed_headers",
+        default=None,
+        help="Save the preprocessed headers to the specified FILENAME",
+    )
+    op.add_option(
+        "",
+        "--optimize-lexer",
+        dest="optimize_lexer",
+        action="store_true",
+        default=False,
+        help="Run the lexer in optimized mode.  This mode requires write "
+        "access to lextab.py file stored within the ctypesgen package.",
+    )
+
+    # Processor options
+    op.add_option(
+        "-a",
+        "--all-headers",
+        action="store_true",
+        dest="all_headers",
+        default=False,
+        help="include symbols from all headers, including system headers",
+    )
+    op.add_option(
+        "",
+        "--builtin-symbols",
+        action="store_true",
+        dest="builtin_symbols",
+        default=False,
+        help="include symbols automatically generated by the preprocessor",
+    )
+    op.add_option(
+        "",
+        "--no-macros",
+        action="store_false",
+        dest="include_macros",
+        default=True,
+        help="Don't output macros.",
+    )
+    op.add_option(
+        "",
+        "--no-undefs",
+        action="store_false",
+        dest="include_undefs",
+        default=True,
+        help="Do not remove macro definitions as per #undef directives",
+    )
+    op.add_option(
+        "-i",
+        "--include-symbols",
+        action="append",
+        dest="include_symbols",
+        metavar="REGEXPR",
+        default=[],
+        help="Regular expression for symbols to always include.  Multiple "
+        "instances of this option will be combined into a single expression "
+        "doing something like '(expr1|expr2|expr3)'.",
+    )
+    op.add_option(
+        "-x",
+        "--exclude-symbols",
+        action="append",
+        dest="exclude_symbols",
+        metavar="REGEXPR",
+        default=[],
+        help="Regular expression for symbols to exclude.  Multiple instances "
+        "of this option will be combined into a single expression doing "
+        "something like '(expr1|expr2|expr3)'.",
+    )
+    op.add_option(
+        "",
+        "--no-stddef-types",
+        action="store_true",
+        dest="no_stddef_types",
+        default=False,
+        help="Do not support extra C types from stddef.h",
+    )
+    op.add_option(
+        "",
+        "--no-gnu-types",
+        action="store_true",
+        dest="no_gnu_types",
+        default=False,
+        help="Do not support extra GNU C types",
+    )
+    op.add_option(
+        "",
+        "--no-python-types",
+        action="store_true",
+        dest="no_python_types",
+        default=False,
+        help="Do not support extra C types built in to Python",
+    )
+
+    # Printer options
+    op.add_option(
+        "",
+        "--header-template",
+        dest="header_template",
+        default=None,
+        metavar="TEMPLATE",
+        help="Use TEMPLATE as the header template in the output file.",
+    )
+    op.add_option(
+        "",
+        "--strip-build-path",
+        dest="strip_build_path",
+        default=None,
+        metavar="BUILD_PATH",
+        help="Strip build path from header paths in the wrapper file.",
+    )
+    op.add_option(
+        "",
+        "--insert-file",
+        dest="inserted_files",
+        default=[],
+        action="append",
+        metavar="FILENAME",
+        help="Add the contents of FILENAME to the end of the wrapper file.",
+    )
+    op.add_option(
+        "",
+        "--output-language",
+        dest="output_language",
+        metavar="LANGUAGE",
+        default="py",
+        choices=("py", "py32", "py27", "py25", "json"),
+        help="Choose output language (`py'[default], `py32', `py27', `py25', or "
+        "`json').  The implementation for py32 does appear to be "
+        "compatible down to at least Python2.7.15.  py25 and py27 are in "
+        "any case _not_ compatible with >= Python3.  The default choice "
+        "(py) attempts to select `py32', `py27', or `py25' based on the "
+        "version of Python that runs this script.",
+    )
+    op.add_option(
+        "-P",
+        "--strip-prefix",
+        dest="strip_prefixes",
+        default=[],
+        action="append",
+        metavar="REGEXPR",
+        help="Regular expression to match prefix to strip from all symbols.  "
+        "Multiple instances of this option will be combined into a single "
+        "expression doing something like '(expr1|expr2|expr3)'.",
+    )
+
+    # Error options
+    op.add_option(
+        "",
+        "--all-errors",
+        action="store_true",
+        default=False,
+        dest="show_all_errors",
+        help="Display all warnings and errors even " "if they would not affect output.",
+    )
+    op.add_option(
+        "",
+        "--show-long-errors",
+        action="store_true",
+        default=False,
+        dest="show_long_errors",
+        help="Display long error messages " "instead of abbreviating error messages.",
+    )
+    op.add_option(
+        "",
+        "--no-macro-warnings",
+        action="store_false",
+        default=True,
+        dest="show_macro_warnings",
+        help="Do not print macro warnings.",
+    )
+    op.add_option(
+        "",
+        "--debug-level",
+        dest="debug_level",
+        default=0,
+        type="int",
+        help="Run ctypesgen with specified debug level (also applies to yacc parser)",
+    )
+
+    op.set_defaults(**core_options.default_values)
+
+    (options, args) = op.parse_args(givenargs)
+    options.headers = args
+
+    # Figure out what names will be defined by imported Python modules
+    options.other_known_names = find_names_in_modules(options.modules)
+
+    # Required parameters
+    if len(args) < 1:
+        msgs.error_message("No header files specified", cls="usage")
+        sys.exit(1)
+
+    if len(options.libraries) == 0:
+        msgs.warning_message("No libraries specified", cls="usage")
+
+    # Check output language
+    printer = None
+    if options.output_language.startswith("py"):
+        printer = printer_python.WrapperPrinter
+    elif options.output_language == "json":
+        printer = printer_json.WrapperPrinter
+    else:
+        msgs.error_message("No such output language `" + options.output_language + "'", cls="usage")
+        sys.exit(1)
+
+    # Step 1: Parse
+    descriptions = core_parser.parse(options.headers, options)
+
+    # Step 2: Process
+    processor.process(descriptions, options)
+
+    # Step 3: Print
+    printer(options.output, options, descriptions)
+
+    msgs.status_message("Wrapping complete.")
+
+    # Correct what may be a common mistake
+    if descriptions.all == []:
+        if not options.all_headers:
+            msgs.warning_message(
+                "There wasn't anything of use in the "
+                "specified header file(s). Perhaps you meant to run with "
+                "--all-headers to include objects from included sub-headers? ",
+                cls="usage",
+            )

+ 7 - 9
python/grass/ctypes/ctypesgencore/messages.py

@@ -1,7 +1,7 @@
-#!/usr/bin/env python3
+#!/usr/bin/env python
 
 """
-ctypesgencore.messages contains functions to display status, error, or warning
+ctypesgen.messages contains functions to display status, error, or warning
 messages to the user. Warning and error messages are also associated
 with a "message class", which is a string, which currently has no effect.
 
@@ -19,30 +19,28 @@ Warning classes are:
 'rename' - a description has been renamed to avoid a name conflict
 'other' - catchall.
 """
-from __future__ import print_function
 
 import sys
 import logging
 
 __all__ = ["error_message", "warning_message", "status_message"]
 
-log = logging.getLogger('ctypesgen')
+log = logging.getLogger("ctypesgen")
 ch = logging.StreamHandler()  # use stdio
 logging_fmt_str = "%(levelname)s: %(message)s"
 formatter = logging.Formatter(logging_fmt_str)
 ch.setFormatter(formatter)
 log.addHandler(ch)
-# default level that ctypesgen was using with original version
-log.setLevel(logging.INFO)
+log.setLevel(logging.INFO)  # default level that ctypesgen was using with original version
 
 
 def error_message(msg, cls=None):
-    print("Error: %s" % msg)
+    log.error("%s", msg)
 
 
 def warning_message(msg, cls=None):
-    print("Warning: %s" % msg)
+    log.warning("%s", msg)
 
 
 def status_message(msg):
-    print("Status: %s" % msg)
+    log.info("Status: %s", msg)

+ 11 - 8
python/grass/ctypes/ctypesgencore/options.py

@@ -1,16 +1,14 @@
-#!/usr/bin/env python3
+#!/usr/bin/env python
 
 """
 All of the components of ctypegencore require an argument called "options".
 In command-line usage, this would be an optparser.Values object. However, if
-ctypesgencore is used as a standard Python module, constructing this object
+ctypesgen is used as a standard Python module, constructing this object
 would be a pain. So this module exists to provide a "default" options object
 for convenience.
 """
 
-import copy
-import optparse
-
+import optparse, copy
 
 default_values = {
     "other_headers": [],
@@ -19,11 +17,13 @@ default_values = {
     "compile_libdirs": [],
     "runtime_libdirs": [],
     "cpp": "gcc -E",
+    "cpp_defines": [],
+    "cpp_undefines": [],
     "save_preprocessed_headers": None,
     "all_headers": False,
     "builtin_symbols": False,
-    "include_symbols": None,
-    "exclude_symbols": None,
+    "include_symbols": [],
+    "exclude_symbols": [],
     "show_all_errors": False,
     "show_long_errors": False,
     "show_macro_warnings": True,
@@ -31,12 +31,15 @@ default_values = {
     "inserted_files": [],
     "other_known_names": [],
     "include_macros": True,
+    "include_undefs": True,
     "libraries": [],
     "strip_build_path": None,
-    "output_language": "python",
+    "output_language": "py",
     "no_stddef_types": False,
     "no_gnu_types": False,
     "no_python_types": False,
+    "debug_level": 0,
+    "strip_prefixes": [],
 }
 
 

+ 2 - 0
python/grass/ctypes/ctypesgen/parser/.gitignore

@@ -0,0 +1,2 @@
+new_parsetab.py
+parser.out

+ 3 - 2
python/grass/ctypes/ctypesgencore/parser/__init__.py

@@ -1,4 +1,4 @@
-#!/usr/bin/env python3
+#!/usr/bin/env python
 
 """
 This package parses C header files and generates lists of functions, typedefs,
@@ -9,7 +9,7 @@ The public interface for this package is the function "parse". Use as follows:
 >>> descriptions = parse(["inputfile1.h","inputfile2.h"], options)
 where "options" is an optparse.Values object.
 
-parse() returns a DescriptionCollection object. See ctypesgencore.descriptions
+parse() returns a DescriptionCollection object. See ctypesgen.descriptions
 for more information.
 
 """
@@ -22,4 +22,5 @@ def parse(headers, options):
     parser.parse()
     return parser.data()
 
+
 __all__ = ["parse"]

+ 290 - 0
python/grass/ctypes/ctypesgen/parser/cdeclarations.py

@@ -0,0 +1,290 @@
+#!/usr/bin/env python
+
+"""
+This file contains classes that represent C declarations. cparser produces
+declarations in this format, and ctypesparser reformats them into a format that
+is not C-specific. The other modules don't need to touch these.
+"""
+
+__docformat__ = "restructuredtext"
+
+# --------------------------------------------------------------------------
+# C Object Model
+# --------------------------------------------------------------------------
+
+
+class Declaration(object):
+    def __init__(self):
+        self.declarator = None
+        self.type = Type()
+        self.storage = None
+        self.attrib = Attrib()
+
+    def __repr__(self):
+        d = {"declarator": self.declarator, "type": self.type}
+        if self.storage:
+            d["storage"] = self.storage
+        l = ["%s=%r" % (k, v) for k, v in d.items()]
+        return "Declaration(%s)" % ", ".join(l)
+
+
+class Declarator(object):
+    pointer = None
+
+    def __init__(self):
+        self.identifier = None
+        self.initializer = None
+        self.array = None
+        self.parameters = None
+        self.bitfield = None
+        self.attrib = Attrib()
+
+    # make pointer read-only to catch mistakes early
+    pointer = property(lambda self: None)
+
+    def __repr__(self):
+        s = self.identifier or ""
+        if self.bitfield:
+            s += ":%d" % self.bitfield
+        if self.array:
+            s += repr(self.array)
+        if self.initializer:
+            s += " = %r" % self.initializer
+        if self.parameters is not None:
+            s += "(" + ", ".join([repr(p) for p in self.parameters]) + ")"
+        return s
+
+
+class Pointer(Declarator):
+    pointer = None
+
+    def __init__(self):
+        super(Pointer, self).__init__()
+        self.qualifiers = []
+
+    def __repr__(self):
+        q = ""
+        if self.qualifiers:
+            q = "<%s>" % " ".join(self.qualifiers)
+        return "POINTER%s(%r)" % (q, self.pointer) + super(Pointer, self).__repr__()
+
+
+class Array(object):
+    def __init__(self):
+        self.size = None
+        self.array = None
+
+    def __repr__(self):
+        if self.size:
+            a = "[%r]" % self.size
+        else:
+            a = "[]"
+        if self.array:
+            return repr(self.array) + a
+        else:
+            return a
+
+
+class Parameter(object):
+    def __init__(self):
+        self.type = Type()
+        self.storage = None
+        self.declarator = None
+        self.attrib = Attrib()
+
+    def __repr__(self):
+        d = {"type": self.type}
+        if self.declarator:
+            d["declarator"] = self.declarator
+        if self.storage:
+            d["storage"] = self.storage
+        l = ["%s=%r" % (k, v) for k, v in d.items()]
+        return "Parameter(%s)" % ", ".join(l)
+
+
+class Type(object):
+    def __init__(self):
+        self.qualifiers = []
+        self.specifiers = []
+
+    def __repr__(self):
+        return " ".join(self.qualifiers + [str(s) for s in self.specifiers])
+
+
+# These are used only internally.
+
+
+class StorageClassSpecifier(str):
+    def __repr__(self):
+        return "StorageClassSpecifier({})".format(str(self))
+
+
+class TypeSpecifier(str):
+    def __repr__(self):
+        return "TypeSpecifier({})".format(str(self))
+
+
+class StructTypeSpecifier(object):
+    def __init__(self, is_union, attrib, tag, declarations):
+        self.is_union = is_union
+        self.attrib = attrib
+        self.tag = tag
+        self.declarations = declarations
+        self.filename = None
+        self.lineno = -1
+
+    def __repr__(self):
+        if self.is_union:
+            s = "union"
+        else:
+            s = "struct"
+        if self.attrib:
+            attrs = list()
+            for attr, val in self.attrib.items():
+                if val and type(val) == str:
+                    attrs.append("{}({})".format(attr, val))
+                elif val:
+                    attrs.append(attr)
+
+            s += " __attribute__(({}))".format(",".join(attrs))
+        if self.tag and type(self.tag) != int:
+            s += " %s" % self.tag
+        if self.declarations:
+            s += " {%s}" % "; ".join([repr(d) for d in self.declarations])
+        return s
+
+
+class EnumSpecifier(object):
+    def __init__(self, tag, enumerators, src=None):
+        self.tag = tag
+        self.enumerators = enumerators
+        self.filename = None
+        self.lineno = -1
+
+    def __repr__(self):
+        s = "enum"
+        if self.tag:
+            s += " %s" % self.tag
+        if self.enumerators:
+            s += " {%s}" % ", ".join([repr(e) for e in self.enumerators])
+        return s
+
+
+class Enumerator(object):
+    def __init__(self, name, expression):
+        self.name = name
+        self.expression = expression
+
+    def __repr__(self):
+        s = self.name
+        if self.expression:
+            s += " = %r" % self.expression
+        return s
+
+
+class TypeQualifier(str):
+    def __repr__(self):
+        return "TypeQualifier({})".format(str(self))
+
+
+class PragmaPack(object):
+    DEFAULT = None
+
+    def __init__(self):
+        self.current = self.DEFAULT
+        self.stack = list()
+
+    def set_default(self):
+        self.current = self.DEFAULT
+
+    def push(self, id=None, value=None):
+        item = (id, self.current)
+        self.stack.append(item)
+
+        if value is not None:
+            self.current = value
+
+    def pop(self, id=None):
+        if not self.stack:
+            if id:
+                return (
+                    "#pragma pack(pop, {id}) encountered without matching "
+                    "#pragma pack(push, {id})".format(id=id),
+                )
+            else:
+                return "#pragma pack(pop) encountered without matching #pragma pack(push)"
+
+        item = None
+        err = None
+
+        if id is not None:
+            i = len(self.stack) - 1
+            while i >= 0 and self.stack[i][0] != id:
+                i -= 1
+
+            if i >= 0:
+                item = self.stack[i]
+                self.stack = self.stack[:i]
+            else:
+                err = (
+                    "#pragma pack(pop, {id}) encountered without matching "
+                    "#pragma pack(push, {id}); popped last".format(id=id)
+                )
+
+        if item is None:
+            item = self.stack.pop()
+
+        self.current = item[1]
+        return err
+
+
+pragma_pack = PragmaPack()
+
+
+class Attrib(dict):
+    def __init__(self, *a, **kw):
+        if pragma_pack.current:
+            super(Attrib, self).__init__(packed=True, aligned=[pragma_pack.current])
+            super(Attrib, self).update(*a, **kw)
+        else:
+            super(Attrib, self).__init__(*a, **kw)
+        self._unalias()
+
+    def __repr__(self):
+        return "Attrib({})".format(dict(self))
+
+    def update(self, *a, **kw):
+        super(Attrib, self).update(*a, **kw)
+        self._unalias()
+
+    def _unalias(self):
+        """
+        Check for any attribute aliases and remove leading/trailing '__'
+
+        According to https://gcc.gnu.org/onlinedocs/gcc/Attribute-Syntax.html,
+        an attribute can also be preceeded/followed by a double underscore
+        ('__').
+        """
+
+        self.pop(None, None)  # remove dummy empty attribute
+
+        fixes = [attr for attr in self if attr.startswith("__") and attr.endswith("__")]
+        for attr in fixes:
+            self[attr[2 : (len(attr) - 2)]] = self.pop(attr)
+
+
+def apply_specifiers(specifiers, declaration):
+    """Apply specifiers to the declaration (declaration may be
+    a Parameter instead)."""
+    for s in specifiers:
+        if type(s) == StorageClassSpecifier:
+            if declaration.storage:
+                # Multiple storage classes, technically an error... ignore it
+                pass
+            declaration.storage = s
+        elif type(s) in (TypeSpecifier, StructTypeSpecifier, EnumSpecifier):
+            declaration.type.specifiers.append(s)
+        elif type(s) == TypeQualifier:
+            declaration.type.qualifiers.append(s)
+        elif type(s) == Attrib:
+            declaration.attrib.update(s)

File diff suppressed because it is too large
+ 647 - 422
python/grass/ctypes/ctypesgencore/parser/cgrammar.py


+ 89 - 72
python/grass/ctypes/ctypesgencore/parser/cparser.py

@@ -1,15 +1,13 @@
-#!/usr/bin/env python3
+#!/usr/bin/env python
 
-'''
+"""
 Parse a C source file.
 
 To use, subclass CParser and override its handle_* methods.  Then instantiate
 the class with a string to parse.
-'''
-from __future__ import print_function
+"""
 
-
-__docformat__ = 'restructuredtext'
+__docformat__ = "restructuredtext"
 
 import operator
 import os.path
@@ -18,11 +16,10 @@ import sys
 import time
 import warnings
 
-from . import cdeclarations
-from . import cgrammar
 from . import preprocessor
 from . import yacc
-
+from . import cgrammar
+from . import cdeclarations
 
 # --------------------------------------------------------------------------
 # Lexer
@@ -30,7 +27,6 @@ from . import yacc
 
 
 class CLexer(object):
-
     def __init__(self, cparser):
         self.cparser = cparser
         self.type_names = set()
@@ -49,24 +45,36 @@ class CLexer(object):
             if not t:
                 break
 
-            if t.type == 'PP_DEFINE':
+            if t.type == "PP_DEFINE":
                 self.in_define = True
-            elif t.type == 'PP_END_DEFINE':
+            elif t.type == "PP_END_DEFINE":
                 self.in_define = False
 
             # Transform PP tokens into C tokens
-            elif t.type == 'LPAREN':
-                t.type = '('
-            elif t.type == 'PP_NUMBER':
-                t.type = 'CONSTANT'
-            elif t.type == 'IDENTIFIER' and t.value in cgrammar.keywords:
+            elif t.type == "LPAREN":
+                t.type = "("
+            elif t.type == "PP_NUMBER":
+                t.type = "CONSTANT"
+            elif t.type == "IDENTIFIER" and t.value in cgrammar.keywords:
                 t.type = t.value.upper()
-            elif t.type == 'IDENTIFIER' and t.value in self.type_names:
-                if (self.pos < 2 or self.tokens[self.pos - 2].type not in
-                        ('VOID', '_BOOL', 'CHAR', 'SHORT', 'INT', 'LONG',
-                         'FLOAT', 'DOUBLE', 'SIGNED', 'UNSIGNED', 'ENUM',
-                         'STRUCT', 'UNION', 'TYPE_NAME')):
-                    t.type = 'TYPE_NAME'
+            elif t.type == "IDENTIFIER" and t.value in self.type_names:
+                if self.pos < 2 or self.tokens[self.pos - 2].type not in (
+                    "VOID",
+                    "_BOOL",
+                    "CHAR",
+                    "SHORT",
+                    "INT",
+                    "LONG",
+                    "FLOAT",
+                    "DOUBLE",
+                    "SIGNED",
+                    "UNSIGNED",
+                    "ENUM",
+                    "STRUCT",
+                    "UNION",
+                    "TYPE_NAME",
+                ):
+                    t.type = "TYPE_NAME"
 
             t.lexer = self
             t.clexpos = self.pos - 1
@@ -74,27 +82,31 @@ class CLexer(object):
             return t
         return None
 
+
 # --------------------------------------------------------------------------
 # Parser
 # --------------------------------------------------------------------------
 
 
 class CParser(object):
-    '''Parse a C source file.
+    """Parse a C source file.
 
     Subclass and override the handle_* methods.  Call `parse` with a string
     to parse.
-    '''
+    """
 
-    def __init__(self, options, stddef_types=True, gnu_types=True):
+    def __init__(self, options):
+        super(CParser, self).__init__()
         self.preprocessor_parser = preprocessor.PreprocessorParser(options, self)
         self.parser = yacc.Parser()
-        prototype = yacc.yacc(method='LALR',
-                              debug=False,
-                              module=cgrammar,
-                              write_tables=True,
-                              outputdir=os.path.dirname(__file__),
-                              optimize=True)
+        prototype = yacc.yacc(
+            method="LALR",
+            debug=False,
+            module=cgrammar,
+            write_tables=True,
+            outputdir=os.path.dirname(__file__),
+            optimize=True,
+        )
 
         # If yacc is reading tables from a file, then it won't find the error
         # function... need to set it manually
@@ -104,24 +116,24 @@ class CParser(object):
 
         self.lexer = CLexer(self)
         if not options.no_stddef_types:
-            self.lexer.type_names.add('wchar_t')
-            self.lexer.type_names.add('ptrdiff_t')
-            self.lexer.type_names.add('size_t')
+            self.lexer.type_names.add("wchar_t")
+            self.lexer.type_names.add("ptrdiff_t")
+            self.lexer.type_names.add("size_t")
         if not options.no_gnu_types:
-            self.lexer.type_names.add('__builtin_va_list')
-        if sys.platform == 'win32' and not options.no_python_types:
-            self.lexer.type_names.add('__int64')
+            self.lexer.type_names.add("__builtin_va_list")
+        if sys.platform == "win32" and not options.no_python_types:
+            self.lexer.type_names.add("__int64")
 
     def parse(self, filename, debug=False):
-        '''Parse a file.
+        """Parse a file.
 
         If `debug` is True, parsing state is dumped to stdout.
-        '''
+        """
 
-        self.handle_status('Preprocessing %s' % filename)
+        self.handle_status("Preprocessing %s" % filename)
         self.preprocessor_parser.parse(filename)
         self.lexer.input(self.preprocessor_parser.output)
-        self.handle_status('Parsing %s' % filename)
+        self.handle_status("Parsing %s" % filename)
         self.parser.parse(lexer=self.lexer, debug=debug)
 
     # ----------------------------------------------------------------------
@@ -129,59 +141,65 @@ class CParser(object):
     # ----------------------------------------------------------------------
 
     def handle_error(self, message, filename, lineno):
-        '''A parse error occurred.
+        """A parse error occured.
 
         The default implementation prints `lineno` and `message` to stderr.
         The parser will try to recover from errors by synchronising at the
         next semicolon.
-        '''
-        print('%s:%s %s' % (filename, lineno, message), file=sys.stderr)
+        """
+        sys.stderr.write("%s:%s %s\n" % (filename, lineno, message))
 
     def handle_pp_error(self, message):
-        '''The C preprocessor emitted an error.
+        """The C preprocessor emitted an error.
 
-        The default implementation prints the error to stderr. If processing
+        The default implementatin prints the error to stderr. If processing
         can continue, it will.
-        '''
-        print('Preprocessor:', message, file=sys.stderr)
+        """
+        sys.stderr.write("Preprocessor: {}\n".format(message))
 
     def handle_status(self, message):
-        '''Progress information.
+        """Progress information.
 
         The default implementationg prints message to stderr.
-        '''
-        print(message, file=sys.stderr)
+        """
+        sys.stderr.write("{}\n".format(message))
 
     def handle_define(self, name, params, value, filename, lineno):
-        '''#define `name` `value`
+        """#define `name` `value`
         or #define `name`(`params`) `value`
 
         name is a string
         params is None or a list of strings
         value is a ...?
-        '''
+        """
 
     def handle_define_constant(self, name, value, filename, lineno):
-        '''#define `name` `value`
+        """#define `name` `value`
 
         name is a string
         value is an ExpressionNode or None
-        '''
+        """
 
     def handle_define_macro(self, name, params, value, filename, lineno):
-        '''#define `name`(`params`) `value`
+        """#define `name`(`params`) `value`
 
         name is a string
         params is a list of strings
         value is an ExpressionNode or None
-        '''
+        """
+
+    def handle_undefine(self, name, filename, lineno):
+        """#undef `name`
+
+        name is a string
+        """
 
     def impl_handle_declaration(self, declaration, filename, lineno):
-        '''Internal method that calls `handle_declaration`.  This method
+        """Internal method that calls `handle_declaration`.  This method
         also adds any new type definitions to the lexer's list of valid type
         names, which affects the parsing of subsequent declarations.
-        '''
-        if declaration.storage == 'typedef':
+        """
+        if declaration.storage == "typedef":
             declarator = declaration.declarator
             if not declarator:
                 # XXX TEMPORARY while struct etc not filled
@@ -192,24 +210,24 @@ class CParser(object):
         self.handle_declaration(declaration, filename, lineno)
 
     def handle_declaration(self, declaration, filename, lineno):
-        '''A declaration was encountered.
+        """A declaration was encountered.
 
         `declaration` is an instance of Declaration.  Where a declaration has
         multiple initialisers, each is returned as a separate declaration.
-        '''
+        """
         pass
 
 
 class DebugCParser(CParser):
-    '''A convenience class that prints each invocation of a handle_* method to
+    """A convenience class that prints each invocation of a handle_* method to
     stdout.
-    '''
+    """
 
     def handle_define(self, name, value, filename, lineno):
-        print('#define name=%r, value=%r' % (name, value))
+        print("#define name=%r, value=%r" % (name, value))
 
     def handle_define_constant(self, name, value, filename, lineno):
-        print('#define constant name=%r, value=%r' % (name, value))
+        print("#define constant name=%r, value=%r" % (name, value))
 
     def handle_declaration(self, declaration, filename, lineno):
         print(declaration)
@@ -219,12 +237,11 @@ class DebugCParser(CParser):
 
     def handle_define_unparseable(self, name, params, value, filename, lineno):
         if params:
-            original_string = "#define %s(%s) %s" % \
-                (name, ",".join(params), " ".join(value))
+            original_string = "#define %s(%s) %s" % (name, ",".join(params), " ".join(value))
         else:
-            original_string = "#define %s %s" % \
-                (name, " ".join(value))
+            original_string = "#define %s %s" % (name, " ".join(value))
         print(original_string)
 
-if __name__ == '__main__':
+
+if __name__ == "__main__":
     DebugCParser().parse(sys.argv[1], debug=True)

+ 56 - 48
python/grass/ctypes/ctypesgencore/parser/ctypesparser.py

@@ -1,21 +1,20 @@
-#!/usr/bin/env python3
-
-'''
-ctypesgencore.parser.ctypesparser contains a class, CtypesParser, which is a
-subclass of ctypesgencore.parser.cparser.CParser. CtypesParser overrides the
+"""
+ctypesgen.parser.ctypesparser contains a class, CtypesParser, which is a
+subclass of ctypesgen.parser.cparser.CParser. CtypesParser overrides the
 handle_declaration() method of CParser. It turns the low-level type declarations
 produced by CParser into CtypesType instances and breaks the parser's general
 declarations into function, variable, typedef, constant, and type descriptions.
-'''
+"""
 
-__docformat__ = 'restructuredtext'
+__docformat__ = "restructuredtext"
 
 __all__ = ["CtypesParser"]
 
-from .cdeclarations import *
+from ..ctypedescs import *
+from ..expressions import *
+
 from .cparser import *
-from ctypesgencore.ctypedescs import *
-from ctypesgencore.expressions import *
+from .cdeclarations import *
 
 
 def make_enum_from_specifier(specifier):
@@ -28,19 +27,21 @@ def make_enum_from_specifier(specifier):
             value = e.expression
         else:
             if last_name:
-                value = BinaryExpressionNode("addition", (lambda x, y: x + y),
-                                             "(%s + %s)", (False, False),
-                                             IdentifierExpressionNode(
-                                                 last_name),
-                                             ConstantExpressionNode(1))
+                value = BinaryExpressionNode(
+                    "addition",
+                    (lambda x, y: x + y),
+                    "(%s + %s)",
+                    (False, False),
+                    IdentifierExpressionNode(last_name),
+                    ConstantExpressionNode(1),
+                )
             else:
                 value = ConstantExpressionNode(0)
 
         enumerators.append((e.name, value))
         last_name = e.name
 
-    return CtypesEnum(tag, enumerators,
-                      src=(specifier.filename, specifier.lineno))
+    return CtypesEnum(tag, enumerators, src=(specifier.filename, specifier.lineno))
 
 
 def get_decl_id(decl):
@@ -54,10 +55,10 @@ def get_decl_id(decl):
 
 
 class CtypesParser(CParser):
-    '''Parse a C file for declarations that can be used by ctypes.
+    """Parse a C file for declarations that can be used by ctypes.
 
     Subclass and override the handle_ctypes_* methods.
-    '''
+    """
 
     def __init__(self, options):
         super(CtypesParser, self).__init__(options)
@@ -72,9 +73,9 @@ class CtypesParser(CParser):
         if specifier.declarations:
             members = []
             for declaration in specifier.declarations:
-                t = self.get_ctypes_type(declaration.type,
-                                         declaration.declarator,
-                                         check_qualifiers=True)
+                t = self.get_ctypes_type(
+                    declaration.type, declaration.declarator, check_qualifiers=True
+                )
                 declarator = declaration.declarator
                 if declarator is None:
                     # Anonymous field in nested union/struct (C11/GCC).
@@ -87,12 +88,13 @@ class CtypesParser(CParser):
         else:
             members = None
 
-        return CtypesStruct(tag, specifier.is_packed, variety, members,
-                            src=(specifier.filename, specifier.lineno))
+        return CtypesStruct(
+            tag, specifier.attrib, variety, members, src=(specifier.filename, specifier.lineno)
+        )
 
     def get_ctypes_type(self, typ, declarator, check_qualifiers=False):
         signed = True
-        typename = 'int'
+        typename = "int"
         longs = 0
         t = None
 
@@ -101,11 +103,11 @@ class CtypesParser(CParser):
                 t = self.make_struct_from_specifier(specifier)
             elif isinstance(specifier, EnumSpecifier):
                 t = make_enum_from_specifier(specifier)
-            elif specifier == 'signed':
+            elif specifier == "signed":
                 signed = True
-            elif specifier == 'unsigned':
+            elif specifier == "unsigned":
                 signed = False
-            elif specifier == 'long':
+            elif specifier == "long":
                 longs += 1
             else:
                 typename = str(specifier)
@@ -122,12 +124,14 @@ class CtypesParser(CParser):
                 name = " ".join(typ.specifiers)
                 if typename in [x[0] for x in self.type_map.keys()]:
                     # It's an unsupported variant of a builtin type
-                    error = "Ctypes does not support the type \"%s\"." % name
+                    error = 'Ctypes does not support the type "%s".' % name
                 else:
-                    error = "Ctypes does not support adding additional " \
-                        "specifiers to typedefs, such as \"%s\"" % name
+                    error = (
+                        "Ctypes does not support adding additional "
+                        'specifiers to typedefs, such as "%s"' % name
+                    )
                 t = CtypesTypedef(name)
-                t.error(error, cls='unsupported-type')
+                t.error(error, cls="unsupported-type")
 
             if declarator and declarator.bitfield:
                 t = CtypesBitfield(t, declarator.bitfield)
@@ -155,8 +159,7 @@ class CtypesParser(CParser):
 
             qualifiers.extend(declarator.qualifiers)
 
-            t = CtypesPointer(t, tuple(typ.qualifiers) +
-                              tuple(declarator.qualifiers))
+            t = CtypesPointer(t, tuple(typ.qualifiers) + tuple(declarator.qualifiers))
 
             declarator = declarator.pointer
 
@@ -171,7 +174,7 @@ class CtypesParser(CParser):
                 ct = self.get_ctypes_type(param.type, param.declarator)
                 ct.identifier = param_name
                 params.append(ct)
-            t = CtypesFunction(t, params, variadic)
+            t = CtypesFunction(t, params, variadic, declarator.attrib)
 
         if declarator:
             a = declarator.array
@@ -179,10 +182,12 @@ class CtypesParser(CParser):
                 t = CtypesArray(t, a.size)
                 a = a.array
 
-        if (isinstance(t, CtypesPointer) and
-            isinstance(t.destination, CtypesSimple) and
-            t.destination.name == "char" and
-                t.destination.signed):
+        if (
+            isinstance(t, CtypesPointer)
+            and isinstance(t.destination, CtypesSimple)
+            and t.destination.name == "char"
+            and t.destination.signed
+        ):
             t = CtypesSpecial("String")
 
         return t
@@ -191,8 +196,7 @@ class CtypesParser(CParser):
         t = self.get_ctypes_type(declaration.type, declaration.declarator)
 
         if type(t) in (CtypesStruct, CtypesEnum):
-            self.handle_ctypes_new_type(
-                remove_function_pointer(t), filename, lineno)
+            self.handle_ctypes_new_type(remove_function_pointer(t), filename, lineno)
 
         declarator = declaration.declarator
         if declarator is None:
@@ -201,13 +205,15 @@ class CtypesParser(CParser):
         while declarator.pointer:
             declarator = declarator.pointer
         name = declarator.identifier
-        if declaration.storage == 'typedef':
-            self.handle_ctypes_typedef(
-                name, remove_function_pointer(t), filename, lineno)
-        elif isinstance(t, CtypesFunction):
+        if declaration.storage == "typedef":
+            self.handle_ctypes_typedef(name, remove_function_pointer(t), filename, lineno)
+        elif type(t) == CtypesFunction:
+            attrib = Attrib(t.attrib)
+            attrib.update(declaration.attrib)
             self.handle_ctypes_function(
-                name, t.restype, t.argtypes, t.errcheck, t.variadic, filename, lineno)
-        elif declaration.storage != 'static':
+                name, t.restype, t.argtypes, t.errcheck, t.variadic, attrib, filename, lineno
+            )
+        elif declaration.storage != "static":
             self.handle_ctypes_variable(name, t, filename, lineno)
 
     # ctypes parser interface.  Override these methods in your subclass.
@@ -218,7 +224,9 @@ class CtypesParser(CParser):
     def handle_ctypes_typedef(self, name, ctype, filename, lineno):
         pass
 
-    def handle_ctypes_function(self, name, restype, argtypes, errcheck, filename, lineno):
+    def handle_ctypes_function(
+        self, name, restype, argtypes, errcheck, variadic, attrib, filename, lineno
+    ):
         pass
 
     def handle_ctypes_variable(self, name, ctype, filename, lineno):

+ 98 - 97
python/grass/ctypes/ctypesgencore/parser/datacollectingparser.py

@@ -1,4 +1,4 @@
-#!/usr/bin/env python3
+#!/usr/bin/env python
 
 """
 DataCollectingParser subclasses ctypesparser.CtypesParser and builds Description
@@ -6,21 +6,17 @@ objects from the CtypesType objects and other information from CtypesParser.
 After parsing is complete, a DescriptionCollection object can be retrieved by
 calling DataCollectingParser.data().
 """
-from __future__ import print_function
-
-
-import os
-from tempfile import mkstemp
 
 from . import ctypesparser
-from ctypesgencore.ctypedescs import *
-from ctypesgencore.descriptions import *
-from ctypesgencore.expressions import *
-from ctypesgencore.messages import *
+from ..descriptions import *
+from ..ctypedescs import *
+from ..expressions import *
+from ..messages import *
+from tempfile import mkstemp
+import os
 
 
-class DataCollectingParser(ctypesparser.CtypesParser,
-                           ctypesparser.CtypesTypeVisitor):
+class DataCollectingParser(ctypesparser.CtypesParser, ctypesparser.CtypesTypeVisitor):
     """Main class for the Parser component. Steps for use:
     p=DataCollectingParser(names_of_header_files,options)
     p.parse()
@@ -28,7 +24,7 @@ class DataCollectingParser(ctypesparser.CtypesParser,
     """
 
     def __init__(self, headers, options):
-        ctypesparser.CtypesParser.__init__(self, options)
+        super(DataCollectingParser, self).__init__(options)
         self.headers = headers
         self.options = options
 
@@ -64,15 +60,16 @@ class DataCollectingParser(ctypesparser.CtypesParser,
 
     def parse(self):
         fd, fname = mkstemp(suffix=".h")
-        f = os.fdopen(fd, 'w')
-        for header in self.options.other_headers:
-            print('#include <%s>' % header, file=f)
-        for header in self.headers:
-            print('#include "%s"' % os.path.abspath(header), file=f)
-        f.flush()
-        f.close()
-        ctypesparser.CtypesParser.parse(self, fname, False)
-        os.remove(fname)
+        with os.fdopen(fd, "w") as f:
+            for header in self.options.other_headers:
+                f.write("#include <%s>\n" % header)
+            for header in self.headers:
+                f.write('#include "%s"\n' % os.path.abspath(header))
+            f.flush()
+        try:
+            super(DataCollectingParser, self).parse(fname, self.options.debug_level)
+        finally:
+            os.unlink(fname)
 
         for name, params, expr, (filename, lineno) in self.saved_macros:
             self.handle_macro(name, params, expr, filename, lineno)
@@ -85,36 +82,34 @@ class DataCollectingParser(ctypesparser.CtypesParser,
     def handle_define_unparseable(self, name, params, value, filename, lineno):
         # Called by CParser
         if params:
-            original_string = "#define %s(%s) %s" % \
-                (name, ",".join(params), " ".join(value))
+            original_string = "#define %s(%s) %s" % (name, ",".join(params), " ".join(value))
         else:
-            original_string = "#define %s %s" % \
-                (name, " ".join(value))
-        macro = MacroDescription(name, params, None,
-                                 src=(filename, lineno))
-        macro.error("Could not parse macro \"%s\"" % original_string,
-                    cls='macro')
+            original_string = "#define %s %s" % (name, " ".join(value))
+        macro = MacroDescription(name, params, None, src=(filename, lineno))
+        macro.error('Could not parse macro "%s"' % original_string, cls="macro")
         macro.original_string = original_string
         self.macros.append(macro)
         self.all.append(macro)
-        self.output_order.append(('macro', macro))
+        self.output_order.append(("macro", macro))
 
     def handle_define_macro(self, name, params, expr, filename, lineno):
         # Called by CParser
         # Save to handle later
         self.saved_macros.append((name, params, expr, (filename, lineno)))
 
+    def handle_undefine(self, macro, filename, lineno):
+        # save to handle later to get order correct
+        self.saved_macros.append(("#undef", None, macro, (filename, lineno)))
+
     def handle_ctypes_typedef(self, name, ctype, filename, lineno):
         # Called by CtypesParser
         ctype.visit(self)
 
-        typedef = TypedefDescription(name,
-                                     ctype,
-                                     src=(filename, repr(lineno)))
+        typedef = TypedefDescription(name, ctype, src=(filename, repr(lineno)))
 
         self.typedefs.append(typedef)
         self.all.append(typedef)
-        self.output_order.append(('typedef', typedef))
+        self.output_order.append(("typedef", typedef))
 
     def handle_ctypes_new_type(self, ctype, filename, lineno):
         # Called by CtypesParser
@@ -123,35 +118,31 @@ class DataCollectingParser(ctypesparser.CtypesParser,
         else:
             self.handle_struct(ctype, filename, lineno)
 
-    def handle_ctypes_function(self, name, restype, argtypes, errcheck,
-                               variadic, filename, lineno):
+    def handle_ctypes_function(
+        self, name, restype, argtypes, errcheck, variadic, attrib, filename, lineno
+    ):
         # Called by CtypesParser
         restype.visit(self)
         for argtype in argtypes:
             argtype.visit(self)
 
-        function = FunctionDescription(name,
-                                       restype,
-                                       argtypes,
-                                       errcheck,
-                                       variadic=variadic,
-                                       src=(filename, repr(lineno)))
+        function = FunctionDescription(
+            name, restype, argtypes, errcheck, variadic, attrib, src=(filename, repr(lineno))
+        )
 
         self.functions.append(function)
         self.all.append(function)
-        self.output_order.append(('function', function))
+        self.output_order.append(("function", function))
 
     def handle_ctypes_variable(self, name, ctype, filename, lineno):
         # Called by CtypesParser
         ctype.visit(self)
 
-        variable = VariableDescription(name,
-                                       ctype,
-                                       src=(filename, repr(lineno)))
+        variable = VariableDescription(name, ctype, src=(filename, repr(lineno)))
 
         self.variables.append(variable)
         self.all.append(variable)
-        self.output_order.append(('variable', variable))
+        self.output_order.append(("variable", variable))
 
     def handle_struct(self, ctypestruct, filename, lineno):
         # Called from within DataCollectingParser
@@ -169,18 +160,20 @@ class DataCollectingParser(ctypesparser.CtypesParser,
 
         if ctypestruct.opaque:
             if name not in self.already_seen_opaque_structs:
-                struct = StructDescription(ctypestruct.tag,
-                                           ctypestruct.packed,
-                                           ctypestruct.variety,
-                                           None,  # No members
-                                           True,  # Opaque
-                                           ctypestruct,
-                                           src=(filename, str(lineno)))
+                struct = StructDescription(
+                    ctypestruct.tag,
+                    ctypestruct.attrib,
+                    ctypestruct.variety,
+                    None,  # No members
+                    True,  # Opaque
+                    ctypestruct,
+                    src=(filename, str(lineno)),
+                )
 
                 self.already_seen_opaque_structs[name] = struct
                 self.structs.append(struct)
                 self.all.append(struct)
-                self.output_order.append(('struct', struct))
+                self.output_order.append(("struct", struct))
 
         else:
             for (membername, ctype) in ctypestruct.members:
@@ -194,22 +187,24 @@ class DataCollectingParser(ctypesparser.CtypesParser,
                 struct.ctype = ctypestruct
                 struct.src = ctypestruct.src
 
-                self.output_order.append(('struct-body', struct))
+                self.output_order.append(("struct-body", struct))
 
                 del self.already_seen_opaque_structs[name]
 
             else:
-                struct = StructDescription(ctypestruct.tag,
-                                           ctypestruct.packed,
-                                           ctypestruct.variety,
-                                           ctypestruct.members,
-                                           False,  # Not opaque
-                                           src=(filename, str(lineno)),
-                                           ctype=ctypestruct)
+                struct = StructDescription(
+                    ctypestruct.tag,
+                    ctypestruct.attrib,
+                    ctypestruct.variety,
+                    ctypestruct.members,
+                    False,  # Not opaque
+                    src=(filename, str(lineno)),
+                    ctype=ctypestruct,
+                )
                 self.structs.append(struct)
                 self.all.append(struct)
-                self.output_order.append(('struct', struct))
-                self.output_order.append(('struct-body', struct))
+                self.output_order.append(("struct", struct))
+                self.output_order.append(("struct-body", struct))
 
             self.already_seen_structs.add(name)
 
@@ -225,16 +220,13 @@ class DataCollectingParser(ctypesparser.CtypesParser,
 
         if ctypeenum.opaque:
             if tag not in self.already_seen_opaque_enums:
-                enum = EnumDescription(ctypeenum.tag,
-                                       None,
-                                       ctypeenum,
-                                       src=(filename, str(lineno)))
+                enum = EnumDescription(ctypeenum.tag, None, ctypeenum, src=(filename, str(lineno)))
                 enum.opaque = True
 
                 self.already_seen_opaque_enums[tag] = enum
                 self.enums.append(enum)
                 self.all.append(enum)
-                self.output_order.append(('enum', enum))
+                self.output_order.append(("enum", enum))
 
         else:
             if tag in self.already_seen_opaque_enums:
@@ -248,31 +240,32 @@ class DataCollectingParser(ctypesparser.CtypesParser,
                 del self.already_seen_opaque_enums[tag]
 
             else:
-                enum = EnumDescription(ctypeenum.tag,
-                                       ctypeenum.enumerators,
-                                       src=(filename, str(lineno)),
-                                       ctype=ctypeenum)
+                enum = EnumDescription(
+                    ctypeenum.tag,
+                    ctypeenum.enumerators,
+                    src=(filename, str(lineno)),
+                    ctype=ctypeenum,
+                )
                 enum.opaque = False
 
                 self.enums.append(enum)
                 self.all.append(enum)
-                self.output_order.append(('enum', enum))
+                self.output_order.append(("enum", enum))
 
             self.already_seen_enums.add(tag)
 
             for (enumname, expr) in ctypeenum.enumerators:
-                constant = ConstantDescription(enumname, expr,
-                                               src=(filename, lineno))
+                constant = ConstantDescription(enumname, expr, src=(filename, lineno))
 
                 self.constants.append(constant)
                 self.all.append(constant)
-                self.output_order.append(('constant', constant))
+                self.output_order.append(("constant", constant))
 
     def handle_macro(self, name, params, expr, filename, lineno):
         # Called from within DataCollectingParser
         src = (filename, lineno)
 
-        if expr is None:
+        if expr == None:
             expr = ConstantExpressionNode(True)
             constant = ConstantDescription(name, expr, src)
             self.constants.append(constant)
@@ -284,24 +277,30 @@ class DataCollectingParser(ctypesparser.CtypesParser,
         if isinstance(expr, CtypesType):
             if params:
                 macro = MacroDescription(name, "", src)
-                macro.error("%s has parameters but evaluates to a type. "
-                            "Ctypesgen does not support it." % macro.casual_name(),
-                            cls='macro')
+                macro.error(
+                    "%s has parameters but evaluates to a type. "
+                    "Ctypesgen does not support it." % macro.casual_name(),
+                    cls="macro",
+                )
                 self.macros.append(macro)
                 self.all.append(macro)
-                self.output_order.append(('macro', macro))
+                self.output_order.append(("macro", macro))
 
             else:
                 typedef = TypedefDescription(name, expr, src)
                 self.typedefs.append(typedef)
                 self.all.append(typedef)
-                self.output_order.append(('typedef', typedef))
+                self.output_order.append(("typedef", typedef))
 
+        elif name == "#undef":
+            undef = UndefDescription(expr, src)
+            self.all.append(undef)
+            self.output_order.append(("undef", undef))
         else:
             macro = MacroDescription(name, params, expr, src)
             self.macros.append(macro)
             self.all.append(macro)
-            self.output_order.append(('macro', macro))
+            self.output_order.append(("macro", macro))
 
         # Macros could possibly contain things like __FILE__, __LINE__, etc...
         # This could be supported, but it would be a lot of work. It would
@@ -309,11 +308,11 @@ class DataCollectingParser(ctypesparser.CtypesParser,
 
     def handle_error(self, message, filename, lineno):
         # Called by CParser
-        error_message("%s:%d: %s" % (filename, lineno, message), cls='cparser')
+        error_message("%s:%d: %s" % (filename, lineno, message), cls="cparser")
 
     def handle_pp_error(self, message):
         # Called by PreprocessorParser
-        error_message("%s: %s" % (self.options.cpp, message), cls='cparser')
+        error_message("%s: %s" % (self.options.cpp, message), cls="cparser")
 
     def handle_status(self, message):
         # Called by CParser
@@ -326,12 +325,14 @@ class DataCollectingParser(ctypesparser.CtypesParser,
         self.handle_enum(enum, enum.src[0], enum.src[1])
 
     def data(self):
-        return DescriptionCollection(self.constants,
-                                     self.typedefs,
-                                     self.structs,
-                                     self.enums,
-                                     self.functions,
-                                     self.variables,
-                                     self.macros,
-                                     self.all,
-                                     self.output_order)
+        return DescriptionCollection(
+            self.constants,
+            self.typedefs,
+            self.structs,
+            self.enums,
+            self.functions,
+            self.variables,
+            self.macros,
+            self.all,
+            self.output_order,
+        )

+ 149 - 176
python/grass/ctypes/ctypesgencore/parser/lex.py

@@ -1,4 +1,4 @@
-#-----------------------------------------------------------------------------
+# -----------------------------------------------------------------------------
 # ply: lex.py
 #
 # Author: David M. Beazley (dave@dabeaz.com)
@@ -19,83 +19,30 @@
 #
 # You should have received a copy of the GNU Lesser General Public
 # License along with this library; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 #
 # See the file LICENSE for a complete copy of the LGPL.
-#-----------------------------------------------------------------------------
-from __future__ import print_function
+# -----------------------------------------------------------------------------
 
 __version__ = "2.2"
 
-
-try:
-    from builtins import bytes
-    PY3 = True
-except ImportError:
-    # python2
-    bytes = str
-    PY3 = False
-
-
-import operator
-import os.path
-import re
-import sys
-import types
-import collections
-import functools
-
-if PY3:
-    _meth_func = "__func__"
-    _meth_self = "__self__"
-
-    _func_closure = "__closure__"
-    _func_code = "__code__"
-    _func_defaults = "__defaults__"
-    _func_globals = "__globals__"
-else:
-    _meth_func = "im_func"
-    _meth_self = "im_self"
-
-    _func_closure = "func_closure"
-    _func_code = "func_code"
-    _func_defaults = "func_defaults"
-    _func_globals = "func_globals"
-
-# define compatible function to support PY2 & PY3
-get_mth_func = operator.attrgetter(_meth_func)
-get_mth_self = operator.attrgetter(_meth_self)
-get_func_closure = operator.attrgetter(_func_closure)
-get_func_code = operator.attrgetter(_func_code)
-get_func_defaults = operator.attrgetter(_func_defaults)
-get_func_globals = operator.attrgetter(_func_globals)
-
+import re, sys, types, os.path
 
 # Regular expression used to match valid token names
-_is_identifier = re.compile(r'^[a-zA-Z0-9_]+$')
-
-# Available instance types.  This is used when lexers are defined by a class.
-# It's a little funky because I want to preserve backwards compatibility
-# with Python 2.0 where types.ObjectType is undefined.
-
-_INSTANCETYPE = getattr(types, 'InstanceType', object)
+_is_identifier = re.compile(r"^[a-zA-Z0-9_]+$")
 
+_INSTANCETYPE = object
 
 # Exception thrown when invalid token encountered and no default error
 # handler is defined.
-
-
 class LexError(Exception):
-
     def __init__(self, message, s):
         self.args = (message,)
         self.text = s
 
-# Token class
-
 
+# Token class
 class LexToken(object):
-
     def __str__(self):
         return "LexToken(%s,%r,%d,%d)" % (self.type, self.value, self.lineno, self.lexpos)
 
@@ -105,6 +52,7 @@ class LexToken(object):
     def skip(self, n):
         self.lexer.skip(n)
 
+
 # -----------------------------------------------------------------------------
 # Lexer class
 #
@@ -116,32 +64,31 @@ class LexToken(object):
 
 
 class Lexer:
-
     def __init__(self):
-        self.lexre = None             # Master regular expression. This is a list of
+        self.lexre = None  # Master regular expression. This is a list of
         # tuples (re,findex) where re is a compiled
         # regular expression and findex is a list
         # mapping regex group numbers to rules
-        self.lexretext = None         # Current regular expression strings
-        self.lexstatere = {}          # Dictionary mapping lexer states to master regexs
-        self.lexstateretext = {}      # Dictionary mapping lexer states to regex strings
-        self.lexstate = "INITIAL"     # Current lexer state
-        self.lexstatestack = []       # Stack of lexer states
-        self.lexstateinfo = None      # State information
-        self.lexstateignore = {}      # Dictionary of ignored characters for each state
-        self.lexstateerrorf = {}      # Dictionary of error functions for each state
-        self.lexreflags = 0           # Optional re compile flags
-        self.lexdata = None           # Actual input data (as a string)
-        self.lexpos = 0               # Current position in input text
-        self.lexlen = 0               # Length of the input text
-        self.lexerrorf = None         # Error rule (if any)
-        self.lextokens = None         # List of valid tokens
-        self.lexignore = ""           # Ignored characters
-        self.lexliterals = ""         # Literal characters that can be passed through
-        self.lexmodule = None         # Module
-        self.lineno = 1               # Current line number
-        self.lexdebug = 0             # Debugging mode
-        self.lexoptimize = 0          # Optimized mode
+        self.lexretext = None  # Current regular expression strings
+        self.lexstatere = {}  # Dictionary mapping lexer states to master regexs
+        self.lexstateretext = {}  # Dictionary mapping lexer states to regex strings
+        self.lexstate = "INITIAL"  # Current lexer state
+        self.lexstatestack = []  # Stack of lexer states
+        self.lexstateinfo = None  # State information
+        self.lexstateignore = {}  # Dictionary of ignored characters for each state
+        self.lexstateerrorf = {}  # Dictionary of error functions for each state
+        self.lexreflags = 0  # Optional re compile flags
+        self.lexdata = None  # Actual input data (as a string)
+        self.lexpos = 0  # Current position in input text
+        self.lexlen = 0  # Length of the input text
+        self.lexerrorf = None  # Error rule (if any)
+        self.lextokens = None  # List of valid tokens
+        self.lexignore = ""  # Ignored characters
+        self.lexliterals = ""  # Literal characters that can be passed through
+        self.lexmodule = None  # Module
+        self.lineno = 1  # Current line number
+        self.lexdebug = 0  # Debugging mode
+        self.lexoptimize = 0  # Optimized mode
 
     def clone(self, object=None):
         c = Lexer()
@@ -194,11 +141,12 @@ class Lexer:
     # writetab() - Write lexer information to a table file
     # ------------------------------------------------------------
     # <tm> 25 June 2008 added 'outputdir'
-    def writetab(self, tabfile, outputdir=''):
+    def writetab(self, tabfile, outputdir=""):
         tf = open(os.path.join(outputdir, tabfile) + ".py", "w")
         tf.write(
-            "# %s.py. This file automatically created by PLY (version %s). Don't edit!\n" %
-            (tabfile, __version__))
+            "# %s.py. This file automatically created by PLY (version %s). Don't edit!\n"
+            % (tabfile, __version__)
+        )
         tf.write("_lextokens    = %s\n" % repr(self.lextokens))
         tf.write("_lexreflags   = %s\n" % repr(self.lexreflags))
         tf.write("_lexliterals  = %s\n" % repr(self.lexliterals))
@@ -240,16 +188,15 @@ class Lexer:
             txtitem = []
             for i in range(len(lre)):
                 titem.append(
-                    (re.compile(
-                        lre[i][0], lextab._lexreflags), _names_to_funcs(
-                        lre[i][1], fdict)))
+                    (re.compile(lre[i][0], lextab._lexreflags), _names_to_funcs(lre[i][1], fdict))
+                )
                 txtitem.append(lre[i][0])
             self.lexstatere[key] = titem
             self.lexstateretext[key] = txtitem
         self.lexstateerrorf = {}
         for key, ef in lextab._lexstateerrorf.items():
             self.lexstateerrorf[key] = fdict[ef]
-        self.begin('INITIAL')
+        self.begin("INITIAL")
 
     # ------------------------------------------------------------
     # input() - Push a new string into the lexer
@@ -313,8 +260,7 @@ class Lexer:
         lexdata = self.lexdata
 
         while lexpos < lexlen:
-            # This code provides some short-circuit code for whitespace, tabs, and
-            # other ignored characters
+            # This code provides some short-circuit code for whitespace, tabs, and other ignored characters
             if lexdata[lexpos] in lexignore:
                 lexpos += 1
                 continue
@@ -348,7 +294,7 @@ class Lexer:
                     break
 
                 # if func not callable, it means it's an ignored token
-                if not isinstance(func, collections.abc.Callable):
+                if not hasattr(func, "__call__"):
                     break
 
                 # If token is processed by a function, call it
@@ -356,7 +302,7 @@ class Lexer:
 
                 # Every function must return a token, if nothing, we just move to next token
                 if not newtok:
-                    lexpos = self.lexpos        # This is here in case user has updated lexpos.
+                    lexpos = self.lexpos  # This is here in case user has updated lexpos.
 
                     # Added for pyglet/tools/wrapper/cparser.py by Alex
                     # Holkner on 20/Jan/2007
@@ -369,9 +315,16 @@ class Lexer:
                     # pyglet/tools/wrapper/cparser.py by Alex Holkner on
                     # 20/Jan/2007
                     if newtok.type not in self.lextokens and len(newtok.type) > 1:
-                        raise LexError("%s:%d: Rule '%s' returned an unknown token type '%s'" % (
-                            get_func_code(func).co_filename, get_func_code(func).co_firstlineno,
-                            func.__name__, newtok.type), lexdata[lexpos:])
+                        raise LexError(
+                            "%s:%d: Rule '%s' returned an unknown token type '%s'"
+                            % (
+                                func.__code__.co_filename,
+                                func.__code__.co_firstlineno,
+                                func.__name__,
+                                newtok.type,
+                            ),
+                            lexdata[lexpos:],
+                        )
 
                 return newtok
             else:
@@ -399,9 +352,9 @@ class Lexer:
                     if lexpos == self.lexpos:
                         # Error method didn't change text position at all. This is an error.
                         raise LexError(
-                            "Scanning error. Illegal character '%s'" %
-                            (lexdata[lexpos]), lexdata[
-                                lexpos:])
+                            "Scanning error. Illegal character '%s'" % (lexdata[lexpos]),
+                            lexdata[lexpos:],
+                        )
                     lexpos = self.lexpos
                     if not newtok:
                         continue
@@ -409,15 +362,16 @@ class Lexer:
 
                 self.lexpos = lexpos
                 raise LexError(
-                    "Illegal character '%s' at index %d" %
-                    (lexdata[lexpos], lexpos), lexdata[
-                        lexpos:])
+                    "Illegal character '%s' at index %d" % (lexdata[lexpos], lexpos),
+                    lexdata[lexpos:],
+                )
 
         self.lexpos = lexpos + 1
         if self.lexdata is None:
             raise RuntimeError("No input string given with input()")
         return None
 
+
 # -----------------------------------------------------------------------------
 # _validate_file()
 #
@@ -429,19 +383,20 @@ class Lexer:
 
 def _validate_file(filename):
     import os.path
+
     base, ext = os.path.splitext(filename)
-    if ext != '.py':
-        return 1        # No idea what the file is. Return OK
+    if ext != ".py":
+        return 1  # No idea what the file is. Return OK
 
     try:
         f = open(filename)
         lines = f.readlines()
         f.close()
     except IOError:
-        return 1                       # Oh well
+        return 1  # Oh well
 
-    fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(')
-    sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=')
+    fre = re.compile(r"\s*def\s+(t_[a-zA-Z_0-9]*)\(")
+    sre = re.compile(r"\s*(t_[a-zA-Z_0-9]*)\s*=")
     counthash = {}
     linen = 1
     noerror = 1
@@ -455,11 +410,15 @@ def _validate_file(filename):
             if not prev:
                 counthash[name] = linen
             else:
-                print("%s:%d: Rule %s redefined. Previously defined on line %d" % (filename, linen, name, prev))
+                print(
+                    "%s:%d: Rule %s redefined. Previously defined on line %d"
+                    % (filename, linen, name, prev)
+                )
                 noerror = 0
         linen += 1
     return noerror
 
+
 # -----------------------------------------------------------------------------
 # _funcs_to_names()
 #
@@ -477,6 +436,7 @@ def _funcs_to_names(funclist):
             result.append(f)
     return result
 
+
 # -----------------------------------------------------------------------------
 # _names_to_funcs()
 #
@@ -494,6 +454,7 @@ def _names_to_funcs(namelist, fdict):
             result.append(n)
     return result
 
+
 # -----------------------------------------------------------------------------
 # _form_master_re()
 #
@@ -534,6 +495,7 @@ def _form_master_re(relist, reflags, ldict):
         rlist, rre = _form_master_re(relist[m:], reflags, ldict)
         return llist + rlist, lre + rre
 
+
 # -----------------------------------------------------------------------------
 # def _statetoken(s,names)
 #
@@ -548,19 +510,20 @@ def _statetoken(s, names):
     nonstate = 1
     parts = s.split("_")
     for i in range(1, len(parts)):
-        if parts[i] not in names and parts[i] != 'ANY':
+        if parts[i] not in names and parts[i] != "ANY":
             break
     if i > 1:
         states = tuple(parts[1:i])
     else:
-        states = ('INITIAL',)
+        states = ("INITIAL",)
 
-    if 'ANY' in states:
+    if "ANY" in states:
         states = tuple(names.keys())
 
     tokenname = "_".join(parts[i:])
     return (states, tokenname)
 
+
 # -----------------------------------------------------------------------------
 # lex(module)
 #
@@ -568,13 +531,20 @@ def _statetoken(s, names):
 # -----------------------------------------------------------------------------
 # cls added for pyglet/tools/wrapper/cparser.py by Alex Holkner on 22/Jan/2007
 # <tm> 25 June 2008 added 'outputdir'
-
-
-def lex(module=None, object=None, debug=0, optimize=0,
-        lextab="lextab", reflags=0, nowarn=0, outputdir='', cls=Lexer):
+def lex(
+    module=None,
+    object=None,
+    debug=0,
+    optimize=0,
+    lextab="lextab",
+    reflags=0,
+    nowarn=0,
+    outputdir="",
+    cls=Lexer,
+):
     global lexer
     ldict = None
-    stateinfo = {'INITIAL': 'inclusive'}
+    stateinfo = {"INITIAL": "inclusive"}
     error = 0
     files = {}
     lexobj = cls()
@@ -610,8 +580,8 @@ def lex(module=None, object=None, debug=0, optimize=0,
         except RuntimeError:
             e, b, t = sys.exc_info()
             f = t.tb_frame
-            f = f.f_back           # Walk out to our calling function
-            ldict = f.f_globals    # Grab its globals dictionary
+            f = f.f_back  # Walk out to our calling function
+            ldict = f.f_globals  # Grab its globals dictionary
 
     if optimize and lextab:
         try:
@@ -625,7 +595,7 @@ def lex(module=None, object=None, debug=0, optimize=0,
             pass
 
     # Get the tokens, states, and literals variables (if any)
-    if (module and isinstance(module, _INSTANCETYPE)):
+    if module and isinstance(module, _INSTANCETYPE):
         tokens = getattr(module, "tokens", None)
         states = getattr(module, "states", None)
         literals = getattr(module, "literals", "")
@@ -658,8 +628,7 @@ def lex(module=None, object=None, debug=0, optimize=0,
 
     try:
         for c in literals:
-            if not (isinstance(c, bytes) or isinstance(
-                    c, str)) or len(c) > 1:
+            if not (isinstance(c, bytes) or isinstance(c, str)) or len(c) > 1:
                 print("lex: Invalid literal %s. Must be a single character" % repr(c))
                 error = 1
                 continue
@@ -678,7 +647,10 @@ def lex(module=None, object=None, debug=0, optimize=0,
         else:
             for s in states:
                 if not isinstance(s, tuple) or len(s) != 2:
-                    print("lex: invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')" % repr(s))
+                    print(
+                        "lex: invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')"
+                        % repr(s)
+                    )
                     error = 1
                     continue
                 name, statetype = s
@@ -686,7 +658,7 @@ def lex(module=None, object=None, debug=0, optimize=0,
                     print("lex: state name %s must be a string" % repr(name))
                     error = 1
                     continue
-                if not (statetype == 'inclusive' or statetype == 'exclusive'):
+                if not (statetype == "inclusive" or statetype == "exclusive"):
                     print("lex: state type for state %s must be 'inclusive' or 'exclusive'" % name)
                     error = 1
                     continue
@@ -697,20 +669,20 @@ def lex(module=None, object=None, debug=0, optimize=0,
                 stateinfo[name] = statetype
 
     # Get a list of symbols with the t_ or s_ prefix
-    tsymbols = [f for f in ldict.keys() if f[:2] == 't_']
+    tsymbols = [f for f in ldict.keys() if f[:2] == "t_"]
 
     # Now build up a list of functions and a list of strings
 
-    funcsym = {}        # Symbols defined as functions
-    strsym = {}        # Symbols defined as strings
-    toknames = {}        # Mapping of symbols to token names
+    funcsym = {}  # Symbols defined as functions
+    strsym = {}  # Symbols defined as strings
+    toknames = {}  # Mapping of symbols to token names
 
     for s in stateinfo.keys():
         funcsym[s] = []
         strsym[s] = []
 
-    ignore = {}        # Ignore strings by state
-    errorf = {}        # Error functions by state
+    ignore = {}  # Ignore strings by state
+    errorf = {}  # Error functions by state
 
     if len(tsymbols) == 0:
         raise SyntaxError("lex: no rules of the form t_rulename are defined.")
@@ -720,10 +692,10 @@ def lex(module=None, object=None, debug=0, optimize=0,
         states, tokname = _statetoken(f, stateinfo)
         toknames[f] = tokname
 
-        if isinstance(t, collections.abc.Callable):
+        if hasattr(t, "__call__"):
             for s in states:
                 funcsym[s].append((f, t))
-        elif (isinstance(t, bytes) or isinstance(t, str)):
+        elif isinstance(t, bytes) or isinstance(t, str):
             for s in states:
                 strsym[s].append((f, t))
         else:
@@ -732,21 +704,11 @@ def lex(module=None, object=None, debug=0, optimize=0,
 
     # Sort the functions by line number
     for f in funcsym.values():
-        if os.sys.version_info.major >= 3:
-            f.sort(key=lambda x: get_func_code(x[1]).co_firstlineno)
-        else:
-            f.sort(key=lambda x, y: cmp(get_func_code(x[1]).co_firstlineno,
-                                        get_func_code(y[1]).co_firstlineno))
+        f.sort(key=lambda x: x[1].__code__.co_firstlineno)
 
     # Sort the strings by regular expression length
     for s in strsym.values():
-        if os.sys.version_info.major >= 3:
-            s.sort(key=functools.cmp_to_key(lambda x, y:
-                                            (len(x[1]) < len(y[1])) -
-                                            (len(x[1]) > len(y[1]))))
-        else:
-            s.sort(key=lambda x, y: (len(x[1]) < len(y[1])) -
-                                    (len(x[1]) > len(y[1])))
+        s.sort(key=lambda x: len(x[1]))
 
     regexs = {}
 
@@ -756,38 +718,37 @@ def lex(module=None, object=None, debug=0, optimize=0,
 
         # Add rules defined by functions first
         for fname, f in funcsym[state]:
-            line = get_func_code(f).co_firstlineno
-            file_ = get_func_code(f).co_filename
-            files[file_] = None
+            line = f.__code__.co_firstlineno
+            file = f.__code__.co_filename
+            files[file] = None
             tokname = toknames[fname]
 
             ismethod = isinstance(f, types.MethodType)
 
             if not optimize:
-                nargs = get_func_code(f).co_argcount
+                nargs = f.__code__.co_argcount
                 if ismethod:
                     reqargs = 2
                 else:
                     reqargs = 1
                 if nargs > reqargs:
-                    print("%s:%d: Rule '%s' has too many arguments."
-                          % (file_, line, f.__name__))
+                    print("%s:%d: Rule '%s' has too many arguments." % (file, line, f.__name__))
                     error = 1
                     continue
 
                 if nargs < reqargs:
-                    print("%s:%d: Rule '%s' requires an argument."
-                          % (file_, line, f.__name__))
+                    print("%s:%d: Rule '%s' requires an argument." % (file, line, f.__name__))
                     error = 1
                     continue
 
-                if tokname == 'ignore':
-                    print("%s:%d: Rule '%s' must be defined as a string."
-                          % (file_, line, f.__name__))
+                if tokname == "ignore":
+                    print(
+                        "%s:%d: Rule '%s' must be defined as a string." % (file, line, f.__name__)
+                    )
                     error = 1
                     continue
 
-            if tokname == 'error':
+            if tokname == "error":
                 errorf[state] = f
                 continue
 
@@ -796,42 +757,50 @@ def lex(module=None, object=None, debug=0, optimize=0,
                     try:
                         c = re.compile("(?P<%s>%s)" % (f.__name__, f.__doc__), re.VERBOSE | reflags)
                         if c.match(""):
-                            print("%s:%d: Regular expression for rule '%s' "
-                                  "matches empty string."
-                                  % (file_, line, f.__name__))
+                            print(
+                                "%s:%d: Regular expression for rule '%s' matches empty string."
+                                % (file, line, f.__name__)
+                            )
                             error = 1
                             continue
                     except re.error as e:
-                        print("%s:%d: Invalid regular expression for rule '%s'. %s"
-                              % (file_, line, f.__name__, e))
-                        if '#' in f.__doc__:
-                            print("%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'."
-                                  % (file_, line, f.__name__))
+                        print(
+                            "%s:%d: Invalid regular expression for rule '%s'. %s"
+                            % (file, line, f.__name__, e)
+                        )
+                        if "#" in f.__doc__:
+                            print(
+                                "%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'."
+                                % (file, line, f.__name__)
+                            )
                         error = 1
                         continue
 
                     if debug:
-                        print("lex: Adding rule %s -> '%s' (state '%s')"
-                              % (f.__name__, f.__doc__, state))
+                        print(
+                            "lex: Adding rule %s -> '%s' (state '%s')"
+                            % (f.__name__, f.__doc__, state)
+                        )
 
                 # Okay. The regular expression seemed okay.  Let's append it to the master regular
                 # expression we're building
 
                 regex_list.append("(?P<%s>%s)" % (f.__name__, f.__doc__))
             else:
-                print("%s:%d: No regular expression defined for rule '%s'"
-                      % (file_, line, f.__name__))
+                print(
+                    "%s:%d: No regular expression defined for rule '%s'" % (file, line, f.__name__)
+                )
 
         # Now add all of the simple rules
         for name, r in strsym[state]:
             tokname = toknames[name]
 
-            if tokname == 'ignore':
+            if tokname == "ignore":
                 ignore[state] = r
                 continue
 
             if not optimize:
-                if tokname == 'error':
+                if tokname == "error":
                     raise SyntaxError("lex: Rule '%s' must be defined as a function" % name)
                     error = 1
                     continue
@@ -842,13 +811,13 @@ def lex(module=None, object=None, debug=0, optimize=0,
                     continue
                 try:
                     c = re.compile("(?P<%s>%s)" % (name, r), re.VERBOSE | reflags)
-                    if (c.match("")):
+                    if c.match(""):
                         print("lex: Regular expression for rule '%s' matches empty string." % name)
                         error = 1
                         continue
                 except re.error as e:
                     print("lex: Invalid regular expression for rule '%s'. %s" % (name, e))
-                    if '#' in r:
+                    if "#" in r:
                         print("lex: Make sure '#' in rule '%s' is escaped with '\\#'." % name)
 
                     error = 1
@@ -887,9 +856,9 @@ def lex(module=None, object=None, debug=0, optimize=0,
 
     # For inclusive states, we need to add the INITIAL state
     for state, type in stateinfo.items():
-        if state != "INITIAL" and type == 'inclusive':
-            lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL'])
-            lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL'])
+        if state != "INITIAL" and type == "inclusive":
+            lexobj.lexstatere[state].extend(lexobj.lexstatere["INITIAL"])
+            lexobj.lexstateretext[state].extend(lexobj.lexstateretext["INITIAL"])
 
     lexobj.lexstateinfo = stateinfo
     lexobj.lexre = lexobj.lexstatere["INITIAL"]
@@ -907,12 +876,12 @@ def lex(module=None, object=None, debug=0, optimize=0,
 
     # Check state information for ignore and error rules
     for s, stype in stateinfo.items():
-        if stype == 'exclusive':
+        if stype == "exclusive":
             if warn and s not in errorf:
                 print("lex: Warning. no error rule is defined for exclusive state '%s'" % s)
             if warn and s not in ignore and lexobj.lexignore:
                 print("lex: Warning. no ignore rule is defined for exclusive state '%s'" % s)
-        elif stype == 'inclusive':
+        elif stype == "inclusive":
             if s not in errorf:
                 errorf[s] = errorf.get("INITIAL", None)
             if s not in ignore:
@@ -929,6 +898,7 @@ def lex(module=None, object=None, debug=0, optimize=0,
 
     return lexobj
 
+
 # -----------------------------------------------------------------------------
 # runmain()
 #
@@ -971,11 +941,14 @@ def runmain(lexer=None, data=None):
 # when its docstring might need to be set in an alternative way
 # -----------------------------------------------------------------------------
 
+
 def TOKEN(r):
     def set_doc(f):
         f.__doc__ = r
         return f
+
     return set_doc
 
+
 # Alternative spelling of the TOKEN decorator
 Token = TOKEN

File diff suppressed because it is too large
+ 8 - 0
python/grass/ctypes/ctypesgen/parser/lextab.py


File diff suppressed because it is too large
+ 307 - 0
python/grass/ctypes/ctypesgen/parser/parsetab.py


+ 402 - 0
python/grass/ctypes/ctypesgen/parser/pplexer.py

@@ -0,0 +1,402 @@
+#!/usr/bin/env python
+
+"""Preprocess a C source file using gcc and convert the result into
+   a token stream
+
+Reference is C99:
+  * http://www.open-std.org/JTC1/SC22/WG14/www/docs/n1124.pdf
+
+"""
+
+__docformat__ = "restructuredtext"
+
+import os, re, shlex, sys, tokenize, traceback
+import ctypes
+from .lex import TOKEN
+
+tokens = (
+    "HEADER_NAME",
+    "IDENTIFIER",
+    "PP_NUMBER",
+    "CHARACTER_CONSTANT",
+    "STRING_LITERAL",
+    "OTHER",
+    "PTR_OP",
+    "INC_OP",
+    "DEC_OP",
+    "LEFT_OP",
+    "RIGHT_OP",
+    "LE_OP",
+    "GE_OP",
+    "EQ_OP",
+    "NE_OP",
+    "AND_OP",
+    "OR_OP",
+    "MUL_ASSIGN",
+    "DIV_ASSIGN",
+    "MOD_ASSIGN",
+    "ADD_ASSIGN",
+    "SUB_ASSIGN",
+    "LEFT_ASSIGN",
+    "RIGHT_ASSIGN",
+    "AND_ASSIGN",
+    "XOR_ASSIGN",
+    "OR_ASSIGN",
+    "PERIOD",
+    "ELLIPSIS",
+    "LPAREN",
+    "NEWLINE",
+    "PP_DEFINE",
+    "PP_DEFINE_NAME",
+    "PP_DEFINE_MACRO_NAME",
+    "PP_UNDEFINE",
+    "PP_MACRO_PARAM",
+    "PP_STRINGIFY",
+    "PP_IDENTIFIER_PASTE",
+    "PP_END_DEFINE",
+    "PRAGMA",
+    "PRAGMA_PACK",
+    "PRAGMA_END",
+)
+
+states = [("DEFINE", "exclusive"), ("PRAGMA", "exclusive")]
+
+subs = {
+    "D": "[0-9]",
+    "L": "[a-zA-Z_]",
+    "H": "[a-fA-F0-9]",
+    "E": "[Ee][+-]?\s*{D}+",
+    # new float suffixes supported in gcc 7
+    "FS": "([FflL]|D[FDL]|[fF]\d+x?)",
+    "IS": "[uUlL]*",
+}
+# Helper: substitute {foo} with subs[foo] in string (makes regexes more lexy)
+sub_pattern = re.compile("{([^}]*)}")
+
+
+def sub_repl_match(m):
+    return subs[m.groups()[0]]
+
+
+def sub(s):
+    return sub_pattern.sub(sub_repl_match, s)
+
+
+# --------------------------------------------------------------------------
+# Token value types
+# --------------------------------------------------------------------------
+
+# Numbers represented as int and float types.
+# For all other tokens, type is just str representation.
+
+
+class StringLiteral(str):
+    def __new__(cls, value):
+        assert value[0] == '"' and value[-1] == '"'
+        # Unescaping probably not perfect but close enough.
+        value = value[1:-1]  # .decode('string_escape')
+        return str.__new__(cls, value)
+
+
+# --------------------------------------------------------------------------
+# Token declarations
+# --------------------------------------------------------------------------
+
+punctuators = {
+    # value: (regex, type)
+    r"...": (r"\.\.\.", "ELLIPSIS"),
+    r">>=": (r">>=", "RIGHT_ASSIGN"),
+    r"<<=": (r"<<=", "LEFT_ASSIGN"),
+    r"+=": (r"\+=", "ADD_ASSIGN"),
+    r"-=": (r"-=", "SUB_ASSIGN"),
+    r"*=": (r"\*=", "MUL_ASSIGN"),
+    r"/=": (r"/=", "DIV_ASSIGN"),
+    r"%=": (r"%=", "MOD_ASSIGN"),
+    r"&=": (r"&=", "AND_ASSIGN"),
+    r"^=": (r"\^=", "XOR_ASSIGN"),
+    r"|=": (r"\|=", "OR_ASSIGN"),
+    r">>": (r">>", "RIGHT_OP"),
+    r"<<": (r"<<", "LEFT_OP"),
+    r"++": (r"\+\+", "INC_OP"),
+    r"--": (r"--", "DEC_OP"),
+    r"->": (r"->", "PTR_OP"),
+    r"&&": (r"&&", "AND_OP"),
+    r"||": (r"\|\|", "OR_OP"),
+    r"<=": (r"<=", "LE_OP"),
+    r">=": (r">=", "GE_OP"),
+    r"==": (r"==", "EQ_OP"),
+    r"!=": (r"!=", "NE_OP"),
+    r"<:": (r"<:", "["),
+    r":>": (r":>", "]"),
+    r"<%": (r"<%", "{"),
+    r"%>": (r"%>", "}"),
+    r";": (r";", ";"),
+    r"{": (r"{", "{"),
+    r"}": (r"}", "}"),
+    r",": (r",", ","),
+    r":": (r":", ":"),
+    r"=": (r"=", "="),
+    r")": (r"\)", ")"),
+    r"[": (r"\[", "["),
+    r"]": (r"]", "]"),
+    r".": (r"\.", "PERIOD"),
+    r"&": (r"&", "&"),
+    r"!": (r"!", "!"),
+    r"~": (r"~", "~"),
+    r"-": (r"-", "-"),
+    r"+": (r"\+", "+"),
+    r"*": (r"\*", "*"),
+    r"/": (r"/", "/"),
+    r"%": (r"%", "%"),
+    r"<": (r"<", "<"),
+    r">": (r">", ">"),
+    r"^": (r"\^", "^"),
+    r"|": (r"\|", "|"),
+    r"?": (r"\?", "?"),
+}
+
+
+def punctuator_regex(punctuators):
+    punctuator_regexes = [v[0] for v in punctuators.values()]
+    punctuator_regexes.sort(key=len, reverse=True)
+    return "(%s)" % "|".join(punctuator_regexes)
+
+
+# Process line-number directives from the preprocessor
+# See http://docs.freebsd.org/info/cpp/cpp.info.Output.html
+DIRECTIVE = r'\#\s+(\d+)\s+"([^"]+)"[ \d]*\n'
+
+
+@TOKEN(DIRECTIVE)
+def t_ANY_directive(t):
+    t.lexer.filename = t.groups[2]
+    t.lexer.lineno = int(t.groups[1])
+    return None
+
+
+@TOKEN(punctuator_regex(punctuators))
+def t_ANY_punctuator(t):
+    t.type = punctuators[t.value][1]
+    return t
+
+
+IDENTIFIER = sub("{L}({L}|{D})*")
+
+
+@TOKEN(IDENTIFIER)
+def t_INITIAL_identifier(t):
+    t.type = "IDENTIFIER"
+    return t
+
+
+@TOKEN(IDENTIFIER)
+def t_DEFINE_identifier(t):
+    if t.lexer.next_is_define_name:
+        # This identifier is the name of a macro
+        # We need to look ahead and see if this macro takes parameters or not.
+        if (
+            t.lexpos + len(t.value) < t.lexer.lexlen
+            and t.lexer.lexdata[t.lexpos + len(t.value)] == "("
+        ):
+
+            t.type = "PP_DEFINE_MACRO_NAME"
+
+            # Look ahead and read macro parameter list
+            lexdata = t.lexer.lexdata
+            pos = t.lexpos + len(t.value) + 1
+            while lexdata[pos] not in "\n)":
+                pos += 1
+            params = lexdata[t.lexpos + len(t.value) + 1 : pos]
+            paramlist = [x.strip() for x in params.split(",") if x.strip()]
+            t.lexer.macro_params = paramlist
+
+        else:
+            t.type = "PP_DEFINE_NAME"
+
+        t.lexer.next_is_define_name = False
+    elif t.value in t.lexer.macro_params:
+        t.type = "PP_MACRO_PARAM"
+    else:
+        t.type = "IDENTIFIER"
+    return t
+
+
+FLOAT_LITERAL = sub(
+    r"(?P<p1>{D}+)?(?P<dp>[.]?)(?P<p2>(?(p1){D}*|{D}+))"
+    r"(?P<exp>(?:[Ee][+-]?{D}+)?)(?P<suf>{FS}?)(?!\w)"
+)
+
+
+@TOKEN(FLOAT_LITERAL)
+def t_ANY_float(t):
+    t.type = "PP_NUMBER"
+    m = t.lexer.lexmatch
+
+    p1 = m.group("p1")
+    dp = m.group("dp")
+    p2 = m.group("p2")
+    exp = m.group("exp")
+    suf = m.group("suf")
+
+    if dp or exp or (suf and re.match(subs["FS"] + "$", suf)):
+        s = m.group(0)
+        if suf:
+            s = s[: -len(suf)]
+        # Attach a prefix so the parser can figure out if should become an
+        # integer, float, or long
+        t.value = "f" + s
+    elif suf and suf in ("Ll"):
+        t.value = "l" + p1
+    else:
+        t.value = "i" + p1
+
+    return t
+
+
+INT_LITERAL = sub(r"(?P<p1>(?:0x{H}+)|(?:{D}+))(?P<suf>{IS})")
+
+
+@TOKEN(INT_LITERAL)
+def t_ANY_int(t):
+    t.type = "PP_NUMBER"
+    m = t.lexer.lexmatch
+
+    if "L" in m.group(3) or "l" in m.group(2):
+        prefix = "l"
+    else:
+        prefix = "i"
+
+    g1 = m.group(2)
+    if g1.startswith("0x"):
+        # Convert base from hexadecimal
+        g1 = str(int(g1[2:], 16))
+    elif g1[0] == "0":
+        # Convert base from octal
+        g1 = str(int(g1, 8))
+
+    t.value = prefix + g1
+
+    return t
+
+
+CHARACTER_CONSTANT = sub(r"L?'(\\.|[^\\'])+'")
+
+
+@TOKEN(CHARACTER_CONSTANT)
+def t_ANY_character_constant(t):
+    t.type = "CHARACTER_CONSTANT"
+    return t
+
+
+STRING_LITERAL = sub(r'L?"(\\.|[^\\"])*"')
+
+
+@TOKEN(STRING_LITERAL)
+def t_ANY_string_literal(t):
+    t.type = "STRING_LITERAL"
+    t.value = StringLiteral(t.value)
+    return t
+
+
+@TOKEN(r"\(")
+def t_ANY_lparen(t):
+    if t.lexpos == 0 or t.lexer.lexdata[t.lexpos - 1] not in (" \t\f\v\n"):
+        t.type = "LPAREN"
+    else:
+        t.type = "("
+    return t
+
+
+@TOKEN(r"\n")
+def t_INITIAL_newline(t):
+    t.lexer.lineno += 1
+    return None
+
+
+@TOKEN(r"\#undef")
+def t_INITIAL_pp_undefine(t):
+    t.type = "PP_UNDEFINE"
+    t.lexer.begin("DEFINE")
+    t.lexer.next_is_define_name = True
+    t.lexer.macro_params = set()
+    return t
+
+
+@TOKEN(r"\#define")
+def t_INITIAL_pp_define(t):
+    t.type = "PP_DEFINE"
+    t.lexer.begin("DEFINE")
+    t.lexer.next_is_define_name = True
+    t.lexer.macro_params = set()
+    return t
+
+
+@TOKEN(r"\#pragma")
+def t_INITIAL_pragma(t):
+    t.type = "PRAGMA"
+    t.lexer.begin("PRAGMA")
+    return t
+
+
+@TOKEN(r"pack")
+def t_PRAGMA_pack(t):
+    t.type = "PRAGMA_PACK"
+    return t
+
+
+@TOKEN(r"\n")
+def t_PRAGMA_newline(t):
+    t.type = "PRAGMA_END"
+    t.lexer.begin("INITIAL")
+    t.lexer.lineno += 1
+    return t
+
+
+@TOKEN(IDENTIFIER)
+def t_PRAGMA_identifier(t):
+    t.type = "IDENTIFIER"
+    return t
+
+
+def t_PRAGMA_error(t):
+    t.type = "OTHER"
+    t.value = t.value[0:30]
+    t.lexer.lexpos += 1  # Skip it if it's an error in a #pragma
+    return t
+
+
+@TOKEN(r"\n")
+def t_DEFINE_newline(t):
+    t.type = "PP_END_DEFINE"
+    t.lexer.begin("INITIAL")
+    t.lexer.lineno += 1
+    del t.lexer.macro_params
+
+    # Damage control in case the token immediately after the #define failed
+    # to handle this
+    t.lexer.next_is_define_name = False
+    return t
+
+
+@TOKEN(r"(\#\#)|(\#)")
+def t_DEFINE_pp_param_op(t):
+    if t.value == "#":
+        t.type = "PP_STRINGIFY"
+    else:
+        t.type = "PP_IDENTIFIER_PASTE"
+    return t
+
+
+def t_INITIAL_error(t):
+    t.type = "OTHER"
+    return t
+
+
+def t_DEFINE_error(t):
+    t.type = "OTHER"
+    t.value = t.value[0]
+    t.lexer.lexpos += 1  # Skip it if it's an error in a #define
+    return t
+
+
+t_ANY_ignore = " \t\v\f\r"

+ 91 - 84
python/grass/ctypes/ctypesgencore/parser/preprocessor.py

@@ -1,29 +1,20 @@
-#!/usr/bin/env python3
+#!/usr/bin/env python
 
-'''Preprocess a C source file using gcc and convert the result into
+"""Preprocess a C source file using gcc and convert the result into
    a token stream
 
 Reference is C99:
   * http://www.open-std.org/JTC1/SC22/WG14/www/docs/n1124.pdf
 
-'''
+"""
 
-__docformat__ = 'restructuredtext'
-
-import os
-import re
-import shlex
-import subprocess
-import sys
-import tokenize
-import traceback
+__docformat__ = "restructuredtext"
 
+import os, re, shlex, sys, tokenize, traceback, subprocess
 import ctypes
-from . import lex
+from . import lex, yacc
+from .lex import TOKEN, LexError
 from . import pplexer
-from . import yacc
-from .lex import TOKEN
-
 
 # --------------------------------------------------------------------------
 # Lexers
@@ -31,10 +22,9 @@ from .lex import TOKEN
 
 
 class PreprocessorLexer(lex.Lexer):
-
     def __init__(self):
         lex.Lexer.__init__(self)
-        self.filename = '<input>'
+        self.filename = "<input>"
         self.in_define = False
 
     def input(self, data, filename=None):
@@ -46,8 +36,7 @@ class PreprocessorLexer(lex.Lexer):
         lex.Lexer.input(self, data)
 
     def push_input(self, data, filename):
-        self.input_stack.append(
-            (self.lexdata, self.lexpos, self.filename, self.lineno))
+        self.input_stack.append((self.lexdata, self.lexpos, self.filename, self.lineno))
         self.lexdata = data
         self.lexpos = 0
         self.lineno = 1
@@ -55,8 +44,7 @@ class PreprocessorLexer(lex.Lexer):
         self.lexlen = len(self.lexdata)
 
     def pop_input(self):
-        self.lexdata, self.lexpos, self.filename, self.lineno = \
-            self.input_stack.pop()
+        self.lexdata, self.lexpos, self.filename, self.lineno = self.input_stack.pop()
         self.lexlen = len(self.lexdata)
 
     def token(self):
@@ -75,7 +63,6 @@ class PreprocessorLexer(lex.Lexer):
 
 
 class TokenListLexer(object):
-
     def __init__(self, tokens):
         self.tokens = tokens
         self.pos = 0
@@ -95,12 +82,12 @@ def symbol_to_token(sym):
     elif isinstance(sym, lex.LexToken):
         return sym
     else:
-        assert False, 'Not a symbol: %r' % sym
+        assert False, "Not a symbol: %r" % sym
 
 
 def create_token(type, value, production=None):
-    '''Create a token of type and value, at the position where 'production'
-    was reduced.  Don`t specify production if the token is built-in'''
+    """Create a token of type and value, at the position where 'production'
+    was reduced.  Don't specify production if the token is built-in"""
     t = lex.LexToken()
     t.type = type
     t.value = value
@@ -110,35 +97,42 @@ def create_token(type, value, production=None):
         t.filename = production.slice[1].filename
     else:
         t.lineno = -1
-        t.filename = '<builtin>'
+        t.filename = "<builtin>"
     return t
 
+
 # --------------------------------------------------------------------------
 # Grammars
 # --------------------------------------------------------------------------
 
 
 class PreprocessorParser(object):
-
     def __init__(self, options, cparser):
-        self.defines = ["inline=", "__inline__=", "__extension__=",
-                        "_Bool=uint8_t", "__const=const", "__asm__(x)=",
-                        "__asm(x)=", "CTYPESGEN=1"]
+        self.defines = [
+            "inline=",
+            "__inline__=",
+            "__extension__=",
+            "__const=const",
+            "__asm__(x)=",
+            "__asm(x)=",
+            "CTYPESGEN=1",
+        ]
 
         # On OSX, explicitly add these defines to keep from getting syntax
         # errors in the OSX standard headers.
-        if hasattr(os, 'uname') and os.uname()[0] == 'Darwin':
-            self.defines += ["__uint16_t=uint16_t",
-                             "__uint32_t=uint32_t",
-                             "__uint64_t=uint64_t"]
+        if sys.platform == "darwin":
+            self.defines += ["__uint16_t=uint16_t", "__uint32_t=uint32_t", "__uint64_t=uint64_t"]
 
         self.matches = []
         self.output = []
-        self.lexer = lex.lex(cls=PreprocessorLexer,
-                             optimize=1,
-                             lextab='lextab',
-                             outputdir=os.path.dirname(__file__),
-                             module=pplexer)
+        optimize = options.optimize_lexer if hasattr(options, "optimize_lexer") else False
+        self.lexer = lex.lex(
+            cls=PreprocessorLexer,
+            optimize=optimize,
+            lextab="lextab",
+            outputdir=os.path.dirname(__file__),
+            module=pplexer,
+        )
 
         self.options = options
         self.cparser = cparser  # An instance of CParser
@@ -149,43 +143,50 @@ class PreprocessorParser(object):
         cmd = self.options.cpp
         cmd += " -U __GNUC__ -dD"
 
+        for undefine in self.options.cpp_undefines:
+            cmd += " -U%s" % undefine
+
         # This fixes Issue #6 where OS X 10.6+ adds a C extension that breaks
         # the parser.  Blocks shouldn't be needed for ctypesgen support anyway.
-        if sys.platform == 'darwin':
+        if sys.platform == "darwin":
             cmd += " -U __BLOCKS__"
 
         for path in self.options.include_search_paths:
-            cmd += " -I%s" % path
-        for define in self.defines:
+            cmd += ' -I"%s"' % path
+        for define in self.defines + self.options.cpp_defines:
             cmd += ' "-D%s"' % define
         cmd += ' "' + filename + '"'
 
         self.cparser.handle_status(cmd)
 
-        if sys.platform == 'win32':
-            cmd = ['sh.exe', '-c', cmd]
+        if sys.platform == "win32":
+            cmd = ["sh.exe", "-c", cmd]
 
-        pp = subprocess.Popen(cmd,
-                              shell=True,
-                              universal_newlines=True,
-                              stdout=subprocess.PIPE,
-                              stderr=subprocess.PIPE)
+        pp = subprocess.Popen(
+            cmd,
+            shell=True,
+            universal_newlines=True,
+            stdout=subprocess.PIPE,
+            stderr=subprocess.PIPE,
+        )
         try:
             ppout, pperr = pp.communicate()
         except UnicodeError:
             # Fix for https://trac.osgeo.org/grass/ticket/3883,
             # handling file(s) encoded with mac_roman
-            if sys.platform == 'darwin':
-                pp = subprocess.Popen(cmd,
-                                      shell=True,
-                                      universal_newlines=False,  # read as binary
-                                      stdout=subprocess.PIPE,
-                                      stderr=subprocess.PIPE)
+            if sys.platform == "darwin":
+                pp = subprocess.Popen(
+                    cmd,
+                    shell=True,
+                    universal_newlines=False,  # read as binary
+                    stdout=subprocess.PIPE,
+                    stderr=subprocess.PIPE,
+                )
                 ppout, pperr = pp.communicate()
 
-                data = ppout.decode('utf8', errors='replace')
-                ppout = data.replace('\r\n', '\n').replace('\r', '\n')
-                pperr = pperr.decode('utf8', errors='replace')
+                data = ppout.decode("utf8", errors="replace")
+                ppout = data.replace("\r\n", "\n").replace("\r", "\n")
+                pperr = pperr.decode("utf8", errors="replace")
             else:
                 raise UnicodeError
 
@@ -193,51 +194,57 @@ class PreprocessorParser(object):
             if line:
                 self.cparser.handle_pp_error(line)
 
-        # We separate lines that are #defines and lines that are source code
+        # We separate lines to two groups: directives and c-source.  Note that
+        # #pragma directives actually belong to the source category for this.
+        # This is necessary because some source files intermix preprocessor
+        # directives with source--this is not tolerated by ctypesgen's single
+        # grammar.
         # We put all the source lines first, then all the #define lines.
 
         source_lines = []
         define_lines = []
 
+        first_token_reg = re.compile(r"^#\s*([^ ]+)($|\s)")
+
         for line in ppout.split("\n"):
-            line = line.rstrip('\r')
-            line = line + "\n"
-            if line.startswith("# "):
+            line += "\n"
+            search = first_token_reg.match(line)
+            hash_token = search.group(1) if search else None
+
+            if (not hash_token) or hash_token == "pragma":
+                source_lines.append(line)
+                define_lines.append("\n")
+
+            elif hash_token.isdigit():
                 # Line number information has to go with both groups
                 source_lines.append(line)
                 define_lines.append(line)
 
-            elif line.startswith("#define"):
+            else:  # hash_token in ("define", "undef"):
                 source_lines.append("\n")
                 define_lines.append(line)
 
-            elif line.startswith("#"):
-                # It's a directive, but not a #define. Remove it
-                source_lines.append("\n")
-                define_lines.append("\n")
-
-            else:
-                source_lines.append(line)
-                define_lines.append("\n")
-
         text = "".join(source_lines + define_lines)
 
         if self.options.save_preprocessed_headers:
-            self.cparser.handle_status("Saving preprocessed headers to %s." %
-                                       self.options.save_preprocessed_headers)
+            self.cparser.handle_status(
+                "Saving preprocessed headers to %s." % self.options.save_preprocessed_headers
+            )
             try:
-                f = open(self.options.save_preprocessed_headers, "w")
-                f.write(text)
-                f.close()
+                with open(self.options.save_preprocessed_headers, "w") as f:
+                    f.write(text)
             except IOError:
                 self.cparser.handle_error("Couldn't save headers.")
 
         self.lexer.input(text)
         self.output = []
 
-        while True:
-            token = self.lexer.token()
-            if token is not None:
-                self.output.append(token)
-            else:
-                break
+        try:
+            while True:
+                token = self.lexer.token()
+                if token is not None:
+                    self.output.append(token)
+                else:
+                    break
+        except LexError as e:
+            self.cparser.handle_error("{}; {}".format(e, e.text.partition("\n")[0]), filename, 0)

File diff suppressed because it is too large
+ 318 - 275
python/grass/ctypes/ctypesgencore/parser/yacc.py


+ 1 - 1
python/grass/ctypes/ctypesgencore/printer/__init__.py

@@ -1,4 +1,4 @@
-#!/usr/bin/env python3
+#!/usr/bin/env python
 
 """
 This module is the backend to ctypesgen; it contains classes to

+ 150 - 0
python/grass/ctypes/ctypesgen/printer_json/printer.py

@@ -0,0 +1,150 @@
+#!/usr/bin/env python
+
+import os, sys, time, json
+from ctypesgen.descriptions import *
+from ctypesgen.ctypedescs import *
+from ctypesgen.messages import *
+
+import ctypesgen.libraryloader  # So we can get the path to it
+from . import test  # So we can find the path to local files in the printer package
+
+
+def path_to_local_file(name, known_local_module=test):
+    basedir = os.path.dirname(known_local_module.__file__)
+    return os.path.join(basedir, name)
+
+
+# From http://stackoverflow.com/questions/1036409/recursively-convert-python-object-graph-to-dictionary
+def todict(obj, classkey="Klass"):
+    if isinstance(obj, dict):
+        for k in obj.keys():
+            obj[k] = todict(obj[k], classkey)
+        return obj
+    elif isinstance(obj, str) or isinstance(obj, bytes):
+        # must handle strings before __iter__ test, since they now have __iter__
+        # in Python3
+        return obj
+    elif hasattr(obj, "__iter__"):
+        return [todict(v, classkey) for v in obj]
+    elif hasattr(obj, "__dict__"):
+        data = dict(
+            [
+                (key, todict(value, classkey))
+                for key, value in obj.__dict__.items()
+                if not callable(value) and not key.startswith("_")
+            ]
+        )
+        if classkey is not None and hasattr(obj, "__class__"):
+            data[classkey] = obj.__class__.__name__
+        return data
+    else:
+        return obj
+
+
+class WrapperPrinter:
+    def __init__(self, outpath, options, data):
+        status_message("Writing to %s." % (outpath or "stdout"))
+
+        self.file = open(outpath, "w") if outpath else sys.stdout
+        self.options = options
+
+        if self.options.strip_build_path and self.options.strip_build_path[-1] != os.path.sep:
+            self.options.strip_build_path += os.path.sep
+
+        self.print_group(self.options.libraries, "libraries", self.print_library)
+
+        method_table = {
+            "function": self.print_function,
+            "macro": self.print_macro,
+            "struct": self.print_struct,
+            "struct-body": self.print_struct_members,
+            "typedef": self.print_typedef,
+            "variable": self.print_variable,
+            "enum": self.print_enum,
+            "constant": self.print_constant,
+            "undef": self.print_undef,
+        }
+
+        res = []
+        for kind, desc in data.output_order:
+            if desc.included:
+                item = method_table[kind](desc)
+                if item:
+                    res.append(item)
+        self.file.write(json.dumps(res, sort_keys=True, indent=4))
+        self.file.write("\n")
+
+    def __del__(self):
+        self.file.close()
+
+    def print_group(self, list, name, function):
+        if list:
+            return [function(obj) for obj in list]
+
+    def print_library(self, library):
+        return {"load_library": library}
+
+    def print_constant(self, constant):
+        return {"type": "constant", "name": constant.name, "value": constant.value.py_string(False)}
+
+    def print_undef(self, undef):
+        return {"type": "undef", "value": undef.macro.py_string(False)}
+
+    def print_typedef(self, typedef):
+        return {"type": "typedef", "name": typedef.name, "ctype": todict(typedef.ctype)}
+
+    def print_struct(self, struct):
+        res = {"type": struct.variety, "name": struct.tag, "attrib": struct.attrib}
+        if not struct.opaque:
+            res["fields"] = []
+            for name, ctype in struct.members:
+                field = {"name": name, "ctype": todict(ctype)}
+                if isinstance(ctype, CtypesBitfield):
+                    field["bitfield"] = ctype.bitfield.py_string(False)
+                res["fields"].append(field)
+        return res
+
+    def print_struct_members(self, struct):
+        pass
+
+    def print_enum(self, enum):
+        res = {"type": "enum", "name": enum.tag}
+
+        if not enum.opaque:
+            res["fields"] = []
+            for name, ctype in enum.members:
+                field = {"name": name, "ctype": todict(ctype)}
+                res["fields"].append(field)
+        return res
+
+    def print_function(self, function):
+        res = {
+            "type": "function",
+            "name": function.c_name(),
+            "variadic": function.variadic,
+            "args": todict(function.argtypes),
+            "return": todict(function.restype),
+            "attrib": function.attrib,
+        }
+        if function.source_library:
+            res["source"] = function.source_library
+        return res
+
+    def print_variable(self, variable):
+        res = {"type": "variable", "ctype": todict(variable.ctype), "name": variable.c_name()}
+        if variable.source_library:
+            res["source"] = variable.source_library
+        return res
+
+    def print_macro(self, macro):
+        if macro.params:
+            return {
+                "type": "macro_function",
+                "name": macro.name,
+                "args": macro.params,
+                "body": macro.expr.py_string(True),
+            }
+        else:
+            # The macro translator makes heroic efforts but it occasionally fails.
+            # Beware the contents of the value!
+            return {"type": "macro", "name": macro.name, "value": macro.expr.py_string(True)}

+ 6 - 0
python/grass/ctypes/ctypesgen/printer_json/test.py

@@ -0,0 +1,6 @@
+"""
+ctypesgen.printer.printer imports this module so that it can find the path
+to defaulttemplate.py and defaultloader.py.
+"""
+
+pass

+ 10 - 0
python/grass/ctypes/ctypesgen/printer_python/__init__.py

@@ -0,0 +1,10 @@
+#!/usr/bin/env python
+
+"""
+This module is the backend to ctypesgen; it contains classes to
+produce the final .py output files.
+"""
+
+from .printer import WrapperPrinter
+
+__all__ = ["WrapperPrinter"]

+ 9 - 0
python/grass/ctypes/ctypesgen/printer_python/defaultheader.py

@@ -0,0 +1,9 @@
+r"""Wrapper for %(name)s
+
+Generated with:
+%(argv)s
+
+Do not modify this file.
+"""
+
+__docformat__ = "restructuredtext"

+ 98 - 77
python/grass/ctypes/ctypesgencore/printer/preamble.py

@@ -1,16 +1,8 @@
-import ctypes
-import os
-import sys
-import six
+import ctypes, os, sys
 from ctypes import *
-from grass.script.utils import encode
-
-if sys.version_info.major >= 3:
-    long = int
-    unicode = str
 
 _int_types = (c_int16, c_int32)
-if hasattr(ctypes, 'c_int64'):
+if hasattr(ctypes, "c_int64"):
     # Some builds of ctypes apparently do not have c_int64
     # defined; it's a pretty good bet that these builds do not
     # have 64-bit pointers.
@@ -26,7 +18,7 @@ class c_void(Structure):
     # c_void_p is a buggy return type, converting to int, so
     # POINTER(None) == c_void_p is actually written as
     # POINTER(c_void), so it can be treated as a real pointer.
-    _fields_ = [('dummy', c_int)]
+    _fields_ = [("dummy", c_int)]
 
 
 def POINTER(obj):
@@ -35,11 +27,13 @@ def POINTER(obj):
     # Convert None to a real NULL pointer to work around bugs
     # in how ctypes handles None on 64-bit platforms
     if not isinstance(p.from_param, classmethod):
+
         def from_param(cls, x):
             if x is None:
                 return cls()
             else:
                 return x
+
         p.from_param = classmethod(from_param)
 
     return p
@@ -47,26 +41,33 @@ def POINTER(obj):
 
 class UserString:
     def __init__(self, seq):
-        if isinstance(seq, str):
+        if isinstance(seq, basestring):
             self.data = seq
         elif isinstance(seq, UserString):
             self.data = seq.data[:]
         else:
             self.data = str(seq)
 
-    def __str__(self): return str(self.data)
+    def __str__(self):
+        return str(self.data)
 
-    def __repr__(self): return repr(self.data)
+    def __repr__(self):
+        return repr(self.data)
 
-    def __int__(self): return int(self.data)
+    def __int__(self):
+        return int(self.data)
 
-    def __long__(self): return long(self.data)
+    def __long__(self):
+        return long(self.data)
 
-    def __float__(self): return float(self.data)
+    def __float__(self):
+        return float(self.data)
 
-    def __complex__(self): return complex(self.data)
+    def __complex__(self):
+        return complex(self.data)
 
-    def __hash__(self): return hash(self.data)
+    def __hash__(self):
+        return hash(self.data)
 
     def __cmp__(self, string):
         if isinstance(string, UserString):
@@ -77,9 +78,11 @@ class UserString:
     def __contains__(self, char):
         return char in self.data
 
-    def __len__(self): return len(self.data)
+    def __len__(self):
+        return len(self.data)
 
-    def __getitem__(self, index): return self.__class__(self.data[index])
+    def __getitem__(self, index):
+        return self.__class__(self.data[index])
 
     def __getslice__(self, start, end):
         start = max(start, 0)
@@ -89,31 +92,33 @@ class UserString:
     def __add__(self, other):
         if isinstance(other, UserString):
             return self.__class__(self.data + other.data)
-        elif isinstance(other, str):
+        elif isinstance(other, basestring):
             return self.__class__(self.data + other)
         else:
             return self.__class__(self.data + str(other))
 
     def __radd__(self, other):
-        if isinstance(other, str):
+        if isinstance(other, basestring):
             return self.__class__(other + self.data)
         else:
             return self.__class__(str(other) + self.data)
 
     def __mul__(self, n):
         return self.__class__(self.data * n)
+
     __rmul__ = __mul__
 
     def __mod__(self, args):
         return self.__class__(self.data % args)
 
     # the following methods are defined in alphabetical order:
-    def capitalize(self): return self.__class__(self.data.capitalize())
+    def capitalize(self):
+        return self.__class__(self.data.capitalize())
 
     def center(self, width, *args):
         return self.__class__(self.data.center(width, *args))
 
-    def count(self, sub, start=0, end=sys.maxsize):
+    def count(self, sub, start=0, end=sys.maxint):
         return self.data.count(sub, start, end)
 
     def decode(self, encoding=None, errors=None):  # XXX improve this?
@@ -134,44 +139,56 @@ class UserString:
         else:
             return self.__class__(self.data.encode())
 
-    def endswith(self, suffix, start=0, end=sys.maxsize):
+    def endswith(self, suffix, start=0, end=sys.maxint):
         return self.data.endswith(suffix, start, end)
 
     def expandtabs(self, tabsize=8):
         return self.__class__(self.data.expandtabs(tabsize))
 
-    def find(self, sub, start=0, end=sys.maxsize):
+    def find(self, sub, start=0, end=sys.maxint):
         return self.data.find(sub, start, end)
 
-    def index(self, sub, start=0, end=sys.maxsize):
+    def index(self, sub, start=0, end=sys.maxint):
         return self.data.index(sub, start, end)
 
-    def isalpha(self): return self.data.isalpha()
+    def isalpha(self):
+        return self.data.isalpha()
 
-    def isalnum(self): return self.data.isalnum()
+    def isalnum(self):
+        return self.data.isalnum()
 
-    def isdecimal(self): return self.data.isdecimal()
+    def isdecimal(self):
+        return self.data.isdecimal()
 
-    def isdigit(self): return self.data.isdigit()
+    def isdigit(self):
+        return self.data.isdigit()
 
-    def islower(self): return self.data.islower()
+    def islower(self):
+        return self.data.islower()
 
-    def isnumeric(self): return self.data.isnumeric()
+    def isnumeric(self):
+        return self.data.isnumeric()
 
-    def isspace(self): return self.data.isspace()
+    def isspace(self):
+        return self.data.isspace()
 
-    def istitle(self): return self.data.istitle()
+    def istitle(self):
+        return self.data.istitle()
 
-    def isupper(self): return self.data.isupper()
+    def isupper(self):
+        return self.data.isupper()
 
-    def join(self, seq): return self.data.join(seq)
+    def join(self, seq):
+        return self.data.join(seq)
 
     def ljust(self, width, *args):
         return self.__class__(self.data.ljust(width, *args))
 
-    def lower(self): return self.__class__(self.data.lower())
+    def lower(self):
+        return self.__class__(self.data.lower())
 
-    def lstrip(self, chars=None): return self.__class__(self.data.lstrip(chars))
+    def lstrip(self, chars=None):
+        return self.__class__(self.data.lstrip(chars))
 
     def partition(self, sep):
         return self.data.partition(sep)
@@ -179,10 +196,10 @@ class UserString:
     def replace(self, old, new, maxsplit=-1):
         return self.__class__(self.data.replace(old, new, maxsplit))
 
-    def rfind(self, sub, start=0, end=sys.maxsize):
+    def rfind(self, sub, start=0, end=sys.maxint):
         return self.data.rfind(sub, start, end)
 
-    def rindex(self, sub, start=0, end=sys.maxsize):
+    def rindex(self, sub, start=0, end=sys.maxint):
         return self.data.rindex(sub, start, end)
 
     def rjust(self, width, *args):
@@ -191,7 +208,8 @@ class UserString:
     def rpartition(self, sep):
         return self.data.rpartition(sep)
 
-    def rstrip(self, chars=None): return self.__class__(self.data.rstrip(chars))
+    def rstrip(self, chars=None):
+        return self.__class__(self.data.rstrip(chars))
 
     def split(self, sep=None, maxsplit=-1):
         return self.data.split(sep, maxsplit)
@@ -199,23 +217,29 @@ class UserString:
     def rsplit(self, sep=None, maxsplit=-1):
         return self.data.rsplit(sep, maxsplit)
 
-    def splitlines(self, keepends=0): return self.data.splitlines(keepends)
+    def splitlines(self, keepends=0):
+        return self.data.splitlines(keepends)
 
-    def startswith(self, prefix, start=0, end=sys.maxsize):
+    def startswith(self, prefix, start=0, end=sys.maxint):
         return self.data.startswith(prefix, start, end)
 
-    def strip(self, chars=None): return self.__class__(self.data.strip(chars))
+    def strip(self, chars=None):
+        return self.__class__(self.data.strip(chars))
 
-    def swapcase(self): return self.__class__(self.data.swapcase())
+    def swapcase(self):
+        return self.__class__(self.data.swapcase())
 
-    def title(self): return self.__class__(self.data.title())
+    def title(self):
+        return self.__class__(self.data.title())
 
     def translate(self, *args):
         return self.__class__(self.data.translate(*args))
 
-    def upper(self): return self.__class__(self.data.upper())
+    def upper(self):
+        return self.__class__(self.data.upper())
 
-    def zfill(self, width): return self.__class__(self.data.zfill(width))
+    def zfill(self, width):
+        return self.__class__(self.data.zfill(width))
 
 
 class MutableString(UserString):
@@ -245,21 +269,21 @@ class MutableString(UserString):
             index += len(self.data)
         if index < 0 or index >= len(self.data):
             raise IndexError
-        self.data = self.data[:index] + sub + self.data[index + 1:]
+        self.data = self.data[:index] + sub + self.data[index + 1 :]
 
     def __delitem__(self, index):
         if index < 0:
             index += len(self.data)
         if index < 0 or index >= len(self.data):
             raise IndexError
-        self.data = self.data[:index] + self.data[index + 1:]
+        self.data = self.data[:index] + self.data[index + 1 :]
 
     def __setslice__(self, start, end, sub):
         start = max(start, 0)
         end = max(end, 0)
         if isinstance(sub, UserString):
             self.data = self.data[:start] + sub.data + self.data[end:]
-        elif isinstance(sub, str):
+        elif isinstance(sub, basestring):
             self.data = self.data[:start] + sub + self.data[end:]
         else:
             self.data = self.data[:start] + str(sub) + self.data[end:]
@@ -275,7 +299,7 @@ class MutableString(UserString):
     def __iadd__(self, other):
         if isinstance(other, UserString):
             self.data += other.data
-        elif isinstance(other, str):
+        elif isinstance(other, basestring):
             self.data += other
         else:
             self.data += str(other)
@@ -288,12 +312,11 @@ class MutableString(UserString):
 
 class String(MutableString, Union):
 
-    _fields_ = [('raw', POINTER(c_char)),
-                ('data', c_char_p)]
+    _fields_ = [("raw", POINTER(c_char)), ("data", c_char_p)]
 
     def __init__(self, obj=""):
-        if isinstance(obj, (str, unicode, bytes, UserString)):
-            self.data = encode(obj)
+        if isinstance(obj, (str, unicode, UserString)):
+            self.data = str(obj)
         else:
             self.raw = obj
 
@@ -310,11 +333,7 @@ class String(MutableString, Union):
             return obj
 
         # Convert from str
-        elif isinstance(obj, bytes):
-            return cls(obj)
-
-        # Convert from str/unicode
-        elif isinstance(obj, (str, unicode)):
+        elif isinstance(obj, str):
             return cls(obj)
 
         # Convert from c_char_p
@@ -329,19 +348,17 @@ class String(MutableString, Union):
         elif isinstance(obj, int):
             return cls(cast(obj, POINTER(c_char)))
 
-        # Convert from c_char array
-        elif isinstance(obj, c_char * len(obj)):
-            return obj
-
         # Convert from object
         else:
             return String.from_param(obj._as_parameter_)
+
     from_param = classmethod(from_param)
 
 
 def ReturnString(obj, func=None, arguments=None):
     return String.from_param(obj)
 
+
 # As of ctypes 1.0, ctypes does not support custom error-checking
 # functions on callbacks, nor does it support custom datatypes on
 # callbacks, so we must ensure that all callbacks return
@@ -349,27 +366,20 @@ def ReturnString(obj, func=None, arguments=None):
 #
 # Non-primitive return values wrapped with UNCHECKED won't be
 # typechecked, and will be converted to c_void_p.
-
-
 def UNCHECKED(type):
-    if (hasattr(type, "_type_") and isinstance(type._type_, str)
-            and type._type_ != "P"):
+    if hasattr(type, "_type_") and isinstance(type._type_, str) and type._type_ != "P":
         return type
     else:
         return c_void_p
 
+
 # ctypes doesn't have direct support for variadic functions, so we have to write
 # our own wrapper class
-
-
 class _variadic_function(object):
-
-    def __init__(self, func, restype, argtypes, errcheck):
+    def __init__(self, func, restype, argtypes):
         self.func = func
         self.func.restype = restype
         self.argtypes = argtypes
-        if errcheck:
-            self.func.errcheck = errcheck
 
     def _as_parameter_(self):
         # So we can pass this variadic function as a function pointer
@@ -383,3 +393,14 @@ class _variadic_function(object):
             fixed_args.append(argtype.from_param(args[i]))
             i += 1
         return self.func(*fixed_args + list(args[i:]))
+
+
+def ord_if_char(value):
+    """
+    Simple helper used for casts to simple builtin types:  if the argument is a
+    string type, it will be converted to it's ordinal value.
+
+    This function will raise an exception if the argument is string with more
+    than one characters.
+    """
+    return ord(value) if isinstance(value, str) else value

+ 95 - 93
python/grass/ctypes/preamble.py

@@ -1,16 +1,8 @@
-import ctypes
-import os
-import sys
-import six
+import ctypes, os, sys
 from ctypes import *
-from grass.script.utils import encode
-
-if sys.version_info.major >= 3:
-    long = int
-    unicode = str
 
 _int_types = (c_int16, c_int32)
-if hasattr(ctypes, 'c_int64'):
+if hasattr(ctypes, "c_int64"):
     # Some builds of ctypes apparently do not have c_int64
     # defined; it's a pretty good bet that these builds do not
     # have 64-bit pointers.
@@ -22,51 +14,35 @@ del t
 del _int_types
 
 
-class c_void(Structure):
-    # c_void_p is a buggy return type, converting to int, so
-    # POINTER(None) == c_void_p is actually written as
-    # POINTER(c_void), so it can be treated as a real pointer.
-    _fields_ = [('dummy', c_int)]
-
-
-def POINTER(obj):
-    p = ctypes.POINTER(obj)
-
-    # Convert None to a real NULL pointer to work around bugs
-    # in how ctypes handles None on 64-bit platforms
-    if not isinstance(p.from_param, classmethod):
-        def from_param(cls, x):
-            if x is None:
-                return cls()
-            else:
-                return x
-        p.from_param = classmethod(from_param)
-
-    return p
-
-
 class UserString:
     def __init__(self, seq):
-        if isinstance(seq, str):
+        if isinstance(seq, basestring):
             self.data = seq
         elif isinstance(seq, UserString):
             self.data = seq.data[:]
         else:
             self.data = str(seq)
 
-    def __str__(self): return str(self.data)
+    def __str__(self):
+        return str(self.data)
 
-    def __repr__(self): return repr(self.data)
+    def __repr__(self):
+        return repr(self.data)
 
-    def __int__(self): return int(self.data)
+    def __int__(self):
+        return int(self.data)
 
-    def __long__(self): return long(self.data)
+    def __long__(self):
+        return long(self.data)
 
-    def __float__(self): return float(self.data)
+    def __float__(self):
+        return float(self.data)
 
-    def __complex__(self): return complex(self.data)
+    def __complex__(self):
+        return complex(self.data)
 
-    def __hash__(self): return hash(self.data)
+    def __hash__(self):
+        return hash(self.data)
 
     def __cmp__(self, string):
         if isinstance(string, UserString):
@@ -77,9 +53,11 @@ class UserString:
     def __contains__(self, char):
         return char in self.data
 
-    def __len__(self): return len(self.data)
+    def __len__(self):
+        return len(self.data)
 
-    def __getitem__(self, index): return self.__class__(self.data[index])
+    def __getitem__(self, index):
+        return self.__class__(self.data[index])
 
     def __getslice__(self, start, end):
         start = max(start, 0)
@@ -89,31 +67,33 @@ class UserString:
     def __add__(self, other):
         if isinstance(other, UserString):
             return self.__class__(self.data + other.data)
-        elif isinstance(other, str):
+        elif isinstance(other, basestring):
             return self.__class__(self.data + other)
         else:
             return self.__class__(self.data + str(other))
 
     def __radd__(self, other):
-        if isinstance(other, str):
+        if isinstance(other, basestring):
             return self.__class__(other + self.data)
         else:
             return self.__class__(str(other) + self.data)
 
     def __mul__(self, n):
         return self.__class__(self.data * n)
+
     __rmul__ = __mul__
 
     def __mod__(self, args):
         return self.__class__(self.data % args)
 
     # the following methods are defined in alphabetical order:
-    def capitalize(self): return self.__class__(self.data.capitalize())
+    def capitalize(self):
+        return self.__class__(self.data.capitalize())
 
     def center(self, width, *args):
         return self.__class__(self.data.center(width, *args))
 
-    def count(self, sub, start=0, end=sys.maxsize):
+    def count(self, sub, start=0, end=sys.maxint):
         return self.data.count(sub, start, end)
 
     def decode(self, encoding=None, errors=None):  # XXX improve this?
@@ -134,44 +114,56 @@ class UserString:
         else:
             return self.__class__(self.data.encode())
 
-    def endswith(self, suffix, start=0, end=sys.maxsize):
+    def endswith(self, suffix, start=0, end=sys.maxint):
         return self.data.endswith(suffix, start, end)
 
     def expandtabs(self, tabsize=8):
         return self.__class__(self.data.expandtabs(tabsize))
 
-    def find(self, sub, start=0, end=sys.maxsize):
+    def find(self, sub, start=0, end=sys.maxint):
         return self.data.find(sub, start, end)
 
-    def index(self, sub, start=0, end=sys.maxsize):
+    def index(self, sub, start=0, end=sys.maxint):
         return self.data.index(sub, start, end)
 
-    def isalpha(self): return self.data.isalpha()
+    def isalpha(self):
+        return self.data.isalpha()
 
-    def isalnum(self): return self.data.isalnum()
+    def isalnum(self):
+        return self.data.isalnum()
 
-    def isdecimal(self): return self.data.isdecimal()
+    def isdecimal(self):
+        return self.data.isdecimal()
 
-    def isdigit(self): return self.data.isdigit()
+    def isdigit(self):
+        return self.data.isdigit()
 
-    def islower(self): return self.data.islower()
+    def islower(self):
+        return self.data.islower()
 
-    def isnumeric(self): return self.data.isnumeric()
+    def isnumeric(self):
+        return self.data.isnumeric()
 
-    def isspace(self): return self.data.isspace()
+    def isspace(self):
+        return self.data.isspace()
 
-    def istitle(self): return self.data.istitle()
+    def istitle(self):
+        return self.data.istitle()
 
-    def isupper(self): return self.data.isupper()
+    def isupper(self):
+        return self.data.isupper()
 
-    def join(self, seq): return self.data.join(seq)
+    def join(self, seq):
+        return self.data.join(seq)
 
     def ljust(self, width, *args):
         return self.__class__(self.data.ljust(width, *args))
 
-    def lower(self): return self.__class__(self.data.lower())
+    def lower(self):
+        return self.__class__(self.data.lower())
 
-    def lstrip(self, chars=None): return self.__class__(self.data.lstrip(chars))
+    def lstrip(self, chars=None):
+        return self.__class__(self.data.lstrip(chars))
 
     def partition(self, sep):
         return self.data.partition(sep)
@@ -179,10 +171,10 @@ class UserString:
     def replace(self, old, new, maxsplit=-1):
         return self.__class__(self.data.replace(old, new, maxsplit))
 
-    def rfind(self, sub, start=0, end=sys.maxsize):
+    def rfind(self, sub, start=0, end=sys.maxint):
         return self.data.rfind(sub, start, end)
 
-    def rindex(self, sub, start=0, end=sys.maxsize):
+    def rindex(self, sub, start=0, end=sys.maxint):
         return self.data.rindex(sub, start, end)
 
     def rjust(self, width, *args):
@@ -191,7 +183,8 @@ class UserString:
     def rpartition(self, sep):
         return self.data.rpartition(sep)
 
-    def rstrip(self, chars=None): return self.__class__(self.data.rstrip(chars))
+    def rstrip(self, chars=None):
+        return self.__class__(self.data.rstrip(chars))
 
     def split(self, sep=None, maxsplit=-1):
         return self.data.split(sep, maxsplit)
@@ -199,23 +192,29 @@ class UserString:
     def rsplit(self, sep=None, maxsplit=-1):
         return self.data.rsplit(sep, maxsplit)
 
-    def splitlines(self, keepends=0): return self.data.splitlines(keepends)
+    def splitlines(self, keepends=0):
+        return self.data.splitlines(keepends)
 
-    def startswith(self, prefix, start=0, end=sys.maxsize):
+    def startswith(self, prefix, start=0, end=sys.maxint):
         return self.data.startswith(prefix, start, end)
 
-    def strip(self, chars=None): return self.__class__(self.data.strip(chars))
+    def strip(self, chars=None):
+        return self.__class__(self.data.strip(chars))
 
-    def swapcase(self): return self.__class__(self.data.swapcase())
+    def swapcase(self):
+        return self.__class__(self.data.swapcase())
 
-    def title(self): return self.__class__(self.data.title())
+    def title(self):
+        return self.__class__(self.data.title())
 
     def translate(self, *args):
         return self.__class__(self.data.translate(*args))
 
-    def upper(self): return self.__class__(self.data.upper())
+    def upper(self):
+        return self.__class__(self.data.upper())
 
-    def zfill(self, width): return self.__class__(self.data.zfill(width))
+    def zfill(self, width):
+        return self.__class__(self.data.zfill(width))
 
 
 class MutableString(UserString):
@@ -245,21 +244,21 @@ class MutableString(UserString):
             index += len(self.data)
         if index < 0 or index >= len(self.data):
             raise IndexError
-        self.data = self.data[:index] + sub + self.data[index + 1:]
+        self.data = self.data[:index] + sub + self.data[index + 1 :]
 
     def __delitem__(self, index):
         if index < 0:
             index += len(self.data)
         if index < 0 or index >= len(self.data):
             raise IndexError
-        self.data = self.data[:index] + self.data[index + 1:]
+        self.data = self.data[:index] + self.data[index + 1 :]
 
     def __setslice__(self, start, end, sub):
         start = max(start, 0)
         end = max(end, 0)
         if isinstance(sub, UserString):
             self.data = self.data[:start] + sub.data + self.data[end:]
-        elif isinstance(sub, str):
+        elif isinstance(sub, basestring):
             self.data = self.data[:start] + sub + self.data[end:]
         else:
             self.data = self.data[:start] + str(sub) + self.data[end:]
@@ -275,7 +274,7 @@ class MutableString(UserString):
     def __iadd__(self, other):
         if isinstance(other, UserString):
             self.data += other.data
-        elif isinstance(other, str):
+        elif isinstance(other, basestring):
             self.data += other
         else:
             self.data += str(other)
@@ -288,12 +287,11 @@ class MutableString(UserString):
 
 class String(MutableString, Union):
 
-    _fields_ = [('raw', POINTER(c_char)),
-                ('data', c_char_p)]
+    _fields_ = [("raw", POINTER(c_char)), ("data", c_char_p)]
 
     def __init__(self, obj=""):
-        if isinstance(obj, (str, unicode, bytes, UserString)):
-            self.data = encode(obj)
+        if isinstance(obj, (str, unicode, UserString)):
+            self.data = str(obj)
         else:
             self.raw = obj
 
@@ -309,12 +307,8 @@ class String(MutableString, Union):
         elif isinstance(obj, String):
             return obj
 
-        # Convert from bytes
-        elif isinstance(obj, bytes):
-            return cls(obj)
-
-        # Convert from str/unicode
-        elif isinstance(obj, (str, unicode)):
+        # Convert from str
+        elif isinstance(obj, str):
             return cls(obj)
 
         # Convert from c_char_p
@@ -336,12 +330,14 @@ class String(MutableString, Union):
         # Convert from object
         else:
             return String.from_param(obj._as_parameter_)
+
     from_param = classmethod(from_param)
 
 
 def ReturnString(obj, func=None, arguments=None):
     return String.from_param(obj)
 
+
 # As of ctypes 1.0, ctypes does not support custom error-checking
 # functions on callbacks, nor does it support custom datatypes on
 # callbacks, so we must ensure that all callbacks return
@@ -349,21 +345,16 @@ def ReturnString(obj, func=None, arguments=None):
 #
 # Non-primitive return values wrapped with UNCHECKED won't be
 # typechecked, and will be converted to c_void_p.
-
-
 def UNCHECKED(type):
-    if (hasattr(type, "_type_") and isinstance(type._type_, str)
-            and type._type_ != "P"):
+    if hasattr(type, "_type_") and isinstance(type._type_, str) and type._type_ != "P":
         return type
     else:
         return c_void_p
 
+
 # ctypes doesn't have direct support for variadic functions, so we have to write
 # our own wrapper class
-
-
 class _variadic_function(object):
-
     def __init__(self, func, restype, argtypes, errcheck):
         self.func = func
         self.func.restype = restype
@@ -383,3 +374,14 @@ class _variadic_function(object):
             fixed_args.append(argtype.from_param(args[i]))
             i += 1
         return self.func(*fixed_args + list(args[i:]))
+
+
+def ord_if_char(value):
+    """
+    Simple helper used for casts to simple builtin types:  if the argument is a
+    string type, it will be converted to it's ordinal value.
+
+    This function will raise an exception if the argument is string with more
+    than one characters.
+    """
+    return ord(value) if isinstance(value, str) else value

+ 448 - 0
python/grass/ctypes/ctypesgen/printer_python/preamble/3_2.py

@@ -0,0 +1,448 @@
+import ctypes, os, sys
+from ctypes import *
+
+_int_types = (c_int16, c_int32)
+if hasattr(ctypes, "c_int64"):
+    # Some builds of ctypes apparently do not have c_int64
+    # defined; it's a pretty good bet that these builds do not
+    # have 64-bit pointers.
+    _int_types += (c_int64,)
+for t in _int_types:
+    if sizeof(t) == sizeof(c_size_t):
+        c_ptrdiff_t = t
+del t
+del _int_types
+
+
+def POINTER(obj):
+    p = ctypes.POINTER(obj)
+
+    # Convert None to a real NULL pointer to work around bugs
+    # in how ctypes handles None on 64-bit platforms
+    if not isinstance(p.from_param, classmethod):
+
+        def from_param(cls, x):
+            if x is None:
+                return cls()
+            else:
+                return x
+
+        p.from_param = classmethod(from_param)
+
+    return p
+
+
+class UserString:
+    def __init__(self, seq):
+        if isinstance(seq, bytes):
+            self.data = seq
+        elif isinstance(seq, UserString):
+            self.data = seq.data[:]
+        else:
+            self.data = str(seq).encode()
+
+    def __bytes__(self):
+        return self.data
+
+    def __str__(self):
+        return self.data.decode()
+
+    def __repr__(self):
+        return repr(self.data)
+
+    def __int__(self):
+        return int(self.data.decode())
+
+    def __long__(self):
+        return int(self.data.decode())
+
+    def __float__(self):
+        return float(self.data.decode())
+
+    def __complex__(self):
+        return complex(self.data.decode())
+
+    def __hash__(self):
+        return hash(self.data)
+
+    def __cmp__(self, string):
+        if isinstance(string, UserString):
+            return cmp(self.data, string.data)
+        else:
+            return cmp(self.data, string)
+
+    def __le__(self, string):
+        if isinstance(string, UserString):
+            return self.data <= string.data
+        else:
+            return self.data <= string
+
+    def __lt__(self, string):
+        if isinstance(string, UserString):
+            return self.data < string.data
+        else:
+            return self.data < string
+
+    def __ge__(self, string):
+        if isinstance(string, UserString):
+            return self.data >= string.data
+        else:
+            return self.data >= string
+
+    def __gt__(self, string):
+        if isinstance(string, UserString):
+            return self.data > string.data
+        else:
+            return self.data > string
+
+    def __eq__(self, string):
+        if isinstance(string, UserString):
+            return self.data == string.data
+        else:
+            return self.data == string
+
+    def __ne__(self, string):
+        if isinstance(string, UserString):
+            return self.data != string.data
+        else:
+            return self.data != string
+
+    def __contains__(self, char):
+        return char in self.data
+
+    def __len__(self):
+        return len(self.data)
+
+    def __getitem__(self, index):
+        return self.__class__(self.data[index])
+
+    def __getslice__(self, start, end):
+        start = max(start, 0)
+        end = max(end, 0)
+        return self.__class__(self.data[start:end])
+
+    def __add__(self, other):
+        if isinstance(other, UserString):
+            return self.__class__(self.data + other.data)
+        elif isinstance(other, bytes):
+            return self.__class__(self.data + other)
+        else:
+            return self.__class__(self.data + str(other).encode())
+
+    def __radd__(self, other):
+        if isinstance(other, bytes):
+            return self.__class__(other + self.data)
+        else:
+            return self.__class__(str(other).encode() + self.data)
+
+    def __mul__(self, n):
+        return self.__class__(self.data * n)
+
+    __rmul__ = __mul__
+
+    def __mod__(self, args):
+        return self.__class__(self.data % args)
+
+    # the following methods are defined in alphabetical order:
+    def capitalize(self):
+        return self.__class__(self.data.capitalize())
+
+    def center(self, width, *args):
+        return self.__class__(self.data.center(width, *args))
+
+    def count(self, sub, start=0, end=sys.maxsize):
+        return self.data.count(sub, start, end)
+
+    def decode(self, encoding=None, errors=None):  # XXX improve this?
+        if encoding:
+            if errors:
+                return self.__class__(self.data.decode(encoding, errors))
+            else:
+                return self.__class__(self.data.decode(encoding))
+        else:
+            return self.__class__(self.data.decode())
+
+    def encode(self, encoding=None, errors=None):  # XXX improve this?
+        if encoding:
+            if errors:
+                return self.__class__(self.data.encode(encoding, errors))
+            else:
+                return self.__class__(self.data.encode(encoding))
+        else:
+            return self.__class__(self.data.encode())
+
+    def endswith(self, suffix, start=0, end=sys.maxsize):
+        return self.data.endswith(suffix, start, end)
+
+    def expandtabs(self, tabsize=8):
+        return self.__class__(self.data.expandtabs(tabsize))
+
+    def find(self, sub, start=0, end=sys.maxsize):
+        return self.data.find(sub, start, end)
+
+    def index(self, sub, start=0, end=sys.maxsize):
+        return self.data.index(sub, start, end)
+
+    def isalpha(self):
+        return self.data.isalpha()
+
+    def isalnum(self):
+        return self.data.isalnum()
+
+    def isdecimal(self):
+        return self.data.isdecimal()
+
+    def isdigit(self):
+        return self.data.isdigit()
+
+    def islower(self):
+        return self.data.islower()
+
+    def isnumeric(self):
+        return self.data.isnumeric()
+
+    def isspace(self):
+        return self.data.isspace()
+
+    def istitle(self):
+        return self.data.istitle()
+
+    def isupper(self):
+        return self.data.isupper()
+
+    def join(self, seq):
+        return self.data.join(seq)
+
+    def ljust(self, width, *args):
+        return self.__class__(self.data.ljust(width, *args))
+
+    def lower(self):
+        return self.__class__(self.data.lower())
+
+    def lstrip(self, chars=None):
+        return self.__class__(self.data.lstrip(chars))
+
+    def partition(self, sep):
+        return self.data.partition(sep)
+
+    def replace(self, old, new, maxsplit=-1):
+        return self.__class__(self.data.replace(old, new, maxsplit))
+
+    def rfind(self, sub, start=0, end=sys.maxsize):
+        return self.data.rfind(sub, start, end)
+
+    def rindex(self, sub, start=0, end=sys.maxsize):
+        return self.data.rindex(sub, start, end)
+
+    def rjust(self, width, *args):
+        return self.__class__(self.data.rjust(width, *args))
+
+    def rpartition(self, sep):
+        return self.data.rpartition(sep)
+
+    def rstrip(self, chars=None):
+        return self.__class__(self.data.rstrip(chars))
+
+    def split(self, sep=None, maxsplit=-1):
+        return self.data.split(sep, maxsplit)
+
+    def rsplit(self, sep=None, maxsplit=-1):
+        return self.data.rsplit(sep, maxsplit)
+
+    def splitlines(self, keepends=0):
+        return self.data.splitlines(keepends)
+
+    def startswith(self, prefix, start=0, end=sys.maxsize):
+        return self.data.startswith(prefix, start, end)
+
+    def strip(self, chars=None):
+        return self.__class__(self.data.strip(chars))
+
+    def swapcase(self):
+        return self.__class__(self.data.swapcase())
+
+    def title(self):
+        return self.__class__(self.data.title())
+
+    def translate(self, *args):
+        return self.__class__(self.data.translate(*args))
+
+    def upper(self):
+        return self.__class__(self.data.upper())
+
+    def zfill(self, width):
+        return self.__class__(self.data.zfill(width))
+
+
+class MutableString(UserString):
+    """mutable string objects
+
+    Python strings are immutable objects.  This has the advantage, that
+    strings may be used as dictionary keys.  If this property isn't needed
+    and you insist on changing string values in place instead, you may cheat
+    and use MutableString.
+
+    But the purpose of this class is an educational one: to prevent
+    people from inventing their own mutable string class derived
+    from UserString and than forget thereby to remove (override) the
+    __hash__ method inherited from UserString.  This would lead to
+    errors that would be very hard to track down.
+
+    A faster and better solution is to rewrite your program using lists."""
+
+    def __init__(self, string=""):
+        self.data = string
+
+    def __hash__(self):
+        raise TypeError("unhashable type (it is mutable)")
+
+    def __setitem__(self, index, sub):
+        if index < 0:
+            index += len(self.data)
+        if index < 0 or index >= len(self.data):
+            raise IndexError
+        self.data = self.data[:index] + sub + self.data[index + 1 :]
+
+    def __delitem__(self, index):
+        if index < 0:
+            index += len(self.data)
+        if index < 0 or index >= len(self.data):
+            raise IndexError
+        self.data = self.data[:index] + self.data[index + 1 :]
+
+    def __setslice__(self, start, end, sub):
+        start = max(start, 0)
+        end = max(end, 0)
+        if isinstance(sub, UserString):
+            self.data = self.data[:start] + sub.data + self.data[end:]
+        elif isinstance(sub, bytes):
+            self.data = self.data[:start] + sub + self.data[end:]
+        else:
+            self.data = self.data[:start] + str(sub).encode() + self.data[end:]
+
+    def __delslice__(self, start, end):
+        start = max(start, 0)
+        end = max(end, 0)
+        self.data = self.data[:start] + self.data[end:]
+
+    def immutable(self):
+        return UserString(self.data)
+
+    def __iadd__(self, other):
+        if isinstance(other, UserString):
+            self.data += other.data
+        elif isinstance(other, bytes):
+            self.data += other
+        else:
+            self.data += str(other).encode()
+        return self
+
+    def __imul__(self, n):
+        self.data *= n
+        return self
+
+
+class String(MutableString, Union):
+
+    _fields_ = [("raw", POINTER(c_char)), ("data", c_char_p)]
+
+    def __init__(self, obj=b""):
+        if isinstance(obj, (bytes, UserString)):
+            self.data = bytes(obj)
+        else:
+            self.raw = obj
+
+    def __len__(self):
+        return self.data and len(self.data) or 0
+
+    def from_param(cls, obj):
+        # Convert None or 0
+        if obj is None or obj == 0:
+            return cls(POINTER(c_char)())
+
+        # Convert from String
+        elif isinstance(obj, String):
+            return obj
+
+        # Convert from bytes
+        elif isinstance(obj, bytes):
+            return cls(obj)
+
+        # Convert from str
+        elif isinstance(obj, str):
+            return cls(obj.encode())
+
+        # Convert from c_char_p
+        elif isinstance(obj, c_char_p):
+            return obj
+
+        # Convert from POINTER(c_char)
+        elif isinstance(obj, POINTER(c_char)):
+            return obj
+
+        # Convert from raw pointer
+        elif isinstance(obj, int):
+            return cls(cast(obj, POINTER(c_char)))
+
+        # Convert from c_char array
+        elif isinstance(obj, c_char * len(obj)):
+            return obj
+
+        # Convert from object
+        else:
+            return String.from_param(obj._as_parameter_)
+
+    from_param = classmethod(from_param)
+
+
+def ReturnString(obj, func=None, arguments=None):
+    return String.from_param(obj)
+
+
+# As of ctypes 1.0, ctypes does not support custom error-checking
+# functions on callbacks, nor does it support custom datatypes on
+# callbacks, so we must ensure that all callbacks return
+# primitive datatypes.
+#
+# Non-primitive return values wrapped with UNCHECKED won't be
+# typechecked, and will be converted to c_void_p.
+def UNCHECKED(type):
+    if hasattr(type, "_type_") and isinstance(type._type_, str) and type._type_ != "P":
+        return type
+    else:
+        return c_void_p
+
+
+# ctypes doesn't have direct support for variadic functions, so we have to write
+# our own wrapper class
+class _variadic_function(object):
+    def __init__(self, func, restype, argtypes, errcheck):
+        self.func = func
+        self.func.restype = restype
+        self.argtypes = argtypes
+        if errcheck:
+            self.func.errcheck = errcheck
+
+    def _as_parameter_(self):
+        # So we can pass this variadic function as a function pointer
+        return self.func
+
+    def __call__(self, *args):
+        fixed_args = []
+        i = 0
+        for argtype in self.argtypes:
+            # Typecheck what we can
+            fixed_args.append(argtype.from_param(args[i]))
+            i += 1
+        return self.func(*fixed_args + list(args[i:]))
+
+
+def ord_if_char(value):
+    """
+    Simple helper used for casts to simple builtin types:  if the argument is a
+    string type, it will be converted to it's ordinal value.
+
+    This function will raise an exception if the argument is string with more
+    than one characters.
+    """
+    return ord(value) if (isinstance(value, bytes) or isinstance(value, str)) else value

+ 0 - 0
python/grass/ctypes/ctypesgen/printer_python/preamble/__init__.py


+ 460 - 0
python/grass/ctypes/ctypesgen/printer_python/printer.py

@@ -0,0 +1,460 @@
+#!/usr/bin/env python
+
+import os, sys, time, glob, re
+from ..descriptions import *
+from ..ctypedescs import *
+from ..messages import *
+from .. import expressions
+
+from .. import libraryloader  # So we can get the path to it
+from . import test  # So we can find the path to local files in the printer package
+
+
+def path_to_local_file(name, known_local_module=test):
+    basedir = os.path.dirname(known_local_module.__file__)
+    return os.path.join(basedir, name)
+
+
+THIS_DIR = os.path.dirname(__file__)
+PREAMBLE_PATH = os.path.join(THIS_DIR, "preamble", "[0-9]_[0-9].py")
+
+
+def get_preamble(major=None, minor=None):
+    """get the available preambles"""
+    preambles = dict()
+    for fp in glob.glob(PREAMBLE_PATH):
+        m = re.search("(\d)_(\d).py$", fp)
+        if not m:
+            continue
+        preambles[(int(m.group(1)), int(m.group(2)))] = fp
+
+    if None not in (major, minor):
+        v = (int(major), int(minor))
+    else:
+        L = sorted(preambles.keys())
+        v = L[0]
+        for vi in L[1:]:
+            if vi > sys.version_info[:2]:
+                break
+            v = vi
+    return preambles[v], v
+
+
+class WrapperPrinter:
+    def __init__(self, outpath, options, data):
+        status_message("Writing to %s." % (outpath or "stdout"))
+
+        self.file = open(outpath, "w") if outpath else sys.stdout
+        self.options = options
+
+        if self.options.strip_build_path and self.options.strip_build_path[-1] != os.path.sep:
+            self.options.strip_build_path += os.path.sep
+
+        self.print_header()
+        self.file.write("\n")
+
+        self.print_preamble()
+        self.file.write("\n")
+
+        self.print_loader()
+        self.file.write("\n")
+
+        self.print_group(self.options.libraries, "libraries", self.print_library)
+        self.print_group(self.options.modules, "modules", self.print_module)
+
+        method_table = {
+            "function": self.print_function,
+            "macro": self.print_macro,
+            "struct": self.print_struct,
+            "struct-body": self.print_struct_members,
+            "typedef": self.print_typedef,
+            "variable": self.print_variable,
+            "enum": self.print_enum,
+            "constant": self.print_constant,
+            "undef": self.print_undef,
+        }
+
+        for kind, desc in data.output_order:
+            if desc.included:
+                method_table[kind](desc)
+                self.file.write("\n")
+
+        self.print_group(self.options.inserted_files, "inserted files", self.insert_file)
+        self.strip_prefixes()
+
+    def __del__(self):
+        self.file.close()
+
+    def print_group(self, list, name, function):
+        if list:
+            self.file.write("# Begin %s\n" % name)
+            for obj in list:
+                function(obj)
+            self.file.write("\n")
+            self.file.write("# %d %s\n" % (len(list), name))
+            self.file.write("# End %s\n" % name)
+        else:
+            self.file.write("# No %s\n" % name)
+        self.file.write("\n")
+
+    def srcinfo(self, src):
+        if src == None:
+            self.file.write("\n")
+        else:
+            filename, lineno = src
+            if filename in ("<built-in>", "<command line>"):
+                self.file.write("# %s\n" % filename)
+            else:
+                if self.options.strip_build_path and filename.startswith(
+                    self.options.strip_build_path
+                ):
+                    filename = filename[len(self.options.strip_build_path) :]
+                self.file.write("# %s: %s\n" % (filename, lineno))
+
+    def template_subs(self):
+        template_subs = {
+            "date": time.ctime(),
+            "argv": " ".join([x for x in sys.argv if not x.startswith("--strip-build-path")]),
+            "name": os.path.basename(self.options.headers[0]),
+        }
+
+        for opt, value in self.options.__dict__.items():
+            if type(value) == str:
+                template_subs[opt] = value
+            elif isinstance(value, (list, tuple)):
+                template_subs[opt] = (os.path.sep).join(value)
+            else:
+                template_subs[opt] = repr(value)
+
+        return template_subs
+
+    def print_header(self):
+        template_file = None
+
+        if self.options.header_template:
+            path = self.options.header_template
+            try:
+                template_file = open(path, "r")
+            except IOError:
+                error_message(
+                    'Cannot load header template from file "%s" '
+                    " - using default template." % path,
+                    cls="missing-file",
+                )
+
+        if not template_file:
+            path = path_to_local_file("defaultheader.py")
+            template_file = open(path, "r")
+
+        template_subs = self.template_subs()
+        self.file.write(template_file.read() % template_subs)
+
+        template_file.close()
+
+    def print_preamble(self):
+        m = re.match("py((?P<major>[0-9])(?P<minor>[0-9]))?", self.options.output_language)
+        path, v = get_preamble(**m.groupdict())
+
+        self.file.write("# Begin preamble for Python v{}\n\n".format(v))
+        self.file.write("from .ctypes_preamble import *\n")
+        self.file.write("from .ctypes_preamble import _variadic_function\n")
+        # preamble_file = open(path, "r")
+        # self.file.write(preamble_file.read())
+        # preamble_file.close()
+        self.file.write("\n# End preamble\n")
+
+    def print_loader(self):
+        self.file.write("_libs = {}\n")
+        self.file.write("_libdirs = %s\n\n" % self.options.compile_libdirs)
+        self.file.write("# Begin loader\n\n")
+        self.file.write("from .ctypes_loader import *\n")        
+        # path = path_to_local_file("libraryloader.py", libraryloader)
+        # loader_file = open(path, "r")
+        # self.file.write(loader_file.read())
+        # loader_file.close()
+        self.file.write("\n# End loader\n\n")
+        self.file.write(
+            "add_library_search_dirs([%s])"
+            % ", ".join([repr(d) for d in self.options.runtime_libdirs])
+        )
+        self.file.write("\n")
+
+    def print_library(self, library):
+        self.file.write('_libs["%s"] = load_library("%s")\n' % (library, library))
+
+    def print_module(self, module):
+        self.file.write("from %s import *\n" % module)
+
+    def print_constant(self, constant):
+        self.file.write("%s = %s" % (constant.name, constant.value.py_string(False)))
+        self.srcinfo(constant.src)
+
+    def print_undef(self, undef):
+        self.srcinfo(undef.src)
+        self.file.write(
+            "# #undef {macro}\n"
+            "try:\n"
+            "    del {macro}\n"
+            "except NameError:\n"
+            "    pass\n".format(macro=undef.macro.py_string(False))
+        )
+
+    def print_typedef(self, typedef):
+        self.file.write("%s = %s" % (typedef.name, typedef.ctype.py_string()))
+        self.srcinfo(typedef.src)
+
+    def print_struct(self, struct):
+        self.srcinfo(struct.src)
+        base = {"union": "Union", "struct": "Structure"}[struct.variety]
+        self.file.write("class %s_%s(%s):\n" "    pass\n" % (struct.variety, struct.tag, base))
+
+    def print_struct_members(self, struct):
+        if struct.opaque:
+            return
+
+        # is this supposed to be packed?
+        if struct.attrib.get("packed", False):
+            aligned = struct.attrib.get("aligned", [1])
+            assert len(aligned) == 1, "cgrammar gave more than one arg for aligned attribute"
+            aligned = aligned[0]
+            if isinstance(aligned, expressions.ExpressionNode):
+                # TODO: for non-constant expression nodes, this will fail:
+                aligned = aligned.evaluate(None)
+            self.file.write("{}_{}._pack_ = {}\n".format(struct.variety, struct.tag, aligned))
+
+        # handle unnamed fields.
+        unnamed_fields = []
+        names = set([x[0] for x in struct.members])
+        anon_prefix = "unnamed_"
+        n = 1
+        for mi in range(len(struct.members)):
+            mem = list(struct.members[mi])
+            if mem[0] is None:
+                while True:
+                    name = "%s%i" % (anon_prefix, n)
+                    n += 1
+                    if name not in names:
+                        break
+                mem[0] = name
+                names.add(name)
+                if type(mem[1]) is CtypesStruct:
+                    unnamed_fields.append(name)
+                struct.members[mi] = mem
+
+        self.file.write("%s_%s.__slots__ = [\n" % (struct.variety, struct.tag))
+        for name, ctype in struct.members:
+            self.file.write("    '%s',\n" % name)
+        self.file.write("]\n")
+
+        if len(unnamed_fields) > 0:
+            self.file.write("%s_%s._anonymous_ = [\n" % (struct.variety, struct.tag))
+            for name in unnamed_fields:
+                self.file.write("    '%s',\n" % name)
+            self.file.write("]\n")
+
+        self.file.write("%s_%s._fields_ = [\n" % (struct.variety, struct.tag))
+        for name, ctype in struct.members:
+            if isinstance(ctype, CtypesBitfield):
+                self.file.write(
+                    "    ('%s', %s, %s),\n"
+                    % (name, ctype.py_string(), ctype.bitfield.py_string(False))
+                )
+            else:
+                self.file.write("    ('%s', %s),\n" % (name, ctype.py_string()))
+        self.file.write("]\n")
+
+    def print_enum(self, enum):
+        self.file.write("enum_%s = c_int" % enum.tag)
+        self.srcinfo(enum.src)
+        # Values of enumerator are output as constants.
+
+    def print_function(self, function):
+        if function.variadic:
+            self.print_variadic_function(function)
+        else:
+            self.print_fixed_function(function)
+
+    def print_fixed_function(self, function):
+        self.srcinfo(function.src)
+
+        CC = "stdcall" if function.attrib.get("stdcall", False) else "cdecl"
+
+        # If we know what library the function lives in, look there.
+        # Otherwise, check all the libraries.
+        if function.source_library:
+            self.file.write(
+                'if _libs["{L}"].has("{CN}", "{CC}"):\n'
+                '    {PN} = _libs["{L}"].get("{CN}", "{CC}")\n'.format(
+                    L=function.source_library, CN=function.c_name(), PN=function.py_name(), CC=CC
+                )
+            )
+        else:
+            self.file.write(
+                "for _lib in _libs.values():\n"
+                '    if not _lib.has("{CN}", "{CC}"):\n'
+                "        continue\n"
+                '    {PN} = _lib.get("{CN}", "{CC}")\n'.format(
+                    CN=function.c_name(), PN=function.py_name(), CC=CC
+                )
+            )
+
+        # Argument types
+        self.file.write(
+            "    %s.argtypes = [%s]\n"
+            % (function.py_name(), ", ".join([a.py_string() for a in function.argtypes]))
+        )
+
+        # Return value
+        if function.restype.py_string() == "String":
+            self.file.write(
+                "    if sizeof(c_int) == sizeof(c_void_p):\n"
+                "        {PN}.restype = ReturnString\n"
+                "    else:\n"
+                "        {PN}.restype = {RT}\n"
+                "        {PN}.errcheck = ReturnString\n".format(
+                    PN=function.py_name(), RT=function.restype.py_string()
+                )
+            )
+        else:
+            self.file.write(
+                "    %s.restype = %s\n" % (function.py_name(), function.restype.py_string())
+            )
+            if function.errcheck:
+                self.file.write(
+                    "    %s.errcheck = %s\n" % (function.py_name(), function.errcheck.py_string())
+                )
+
+        if not function.source_library:
+            self.file.write("    break\n")
+
+    def print_variadic_function(self, function):
+        CC = "stdcall" if function.attrib.get("stdcall", False) else "cdecl"
+
+        self.srcinfo(function.src)
+        if function.source_library:
+            self.file.write(
+                'if _libs["{L}"].has("{CN}", "{CC}"):\n'
+                '    _func = _libs["{L}"].get("{CN}", "{CC}")\n'
+                "    _restype = {RT}\n"
+                "    _errcheck = {E}\n"
+                "    _argtypes = [{t0}]\n"
+                "    {PN} = _variadic_function(_func,_restype,_argtypes,_errcheck)\n".format(
+                    L=function.source_library,
+                    CN=function.c_name(),
+                    RT=function.restype.py_string(),
+                    E=function.errcheck.py_string(),
+                    t0=", ".join([a.py_string() for a in function.argtypes]),
+                    PN=function.py_name(),
+                    CC=CC,
+                )
+            )
+        else:
+            self.file.write(
+                "for _lib in _libs.values():\n"
+                '    if _lib.has("{CN}", "{CC}"):\n'
+                '        _func = _lib.get("{CN}", "{CC}")\n'
+                "        _restype = {RT}\n"
+                "        _errcheck = {E}\n"
+                "        _argtypes = [{t0}]\n"
+                "        {PN} = _variadic_function(_func,_restype,_argtypes,_errcheck)\n".format(
+                    CN=function.c_name(),
+                    RT=function.restype.py_string(),
+                    E=function.errcheck.py_string(),
+                    t0=", ".join([a.py_string() for a in function.argtypes]),
+                    PN=function.py_name(),
+                    CC=CC,
+                )
+            )
+
+    def print_variable(self, variable):
+        self.srcinfo(variable.src)
+        if variable.source_library:
+            self.file.write(
+                "try:\n"
+                '    {PN} = ({PS}).in_dll(_libs["{L}"], "{CN}")\n'
+                "except:\n"
+                "    pass\n".format(
+                    PN=variable.py_name(),
+                    PS=variable.ctype.py_string(),
+                    L=variable.source_library,
+                    CN=variable.c_name(),
+                )
+            )
+        else:
+            self.file.write(
+                "for _lib in _libs.values():\n"
+                "    try:\n"
+                '        {PN} = ({PS}).in_dll(_lib, "{CN}")\n'
+                "        break\n"
+                "    except:\n"
+                "        pass\n".format(
+                    PN=variable.py_name(), PS=variable.ctype.py_string(), CN=variable.c_name()
+                )
+            )
+
+    def print_macro(self, macro):
+        if macro.params:
+            self.print_func_macro(macro)
+        else:
+            self.print_simple_macro(macro)
+
+    def print_simple_macro(self, macro):
+        # The macro translator makes heroic efforts but it occasionally fails.
+        # We want to contain the failures as much as possible.
+        # Hence the try statement.
+        self.srcinfo(macro.src)
+        self.file.write(
+            "try:\n"
+            "    {MN} = {ME}\n"
+            "except:\n"
+            "    pass\n".format(MN=macro.name, ME=macro.expr.py_string(True))
+        )
+
+    def print_func_macro(self, macro):
+        self.srcinfo(macro.src)
+        self.file.write(
+            "def {MN}({MP}):\n"
+            "    return {ME}\n".format(
+                MN=macro.name, MP=", ".join(macro.params), ME=macro.expr.py_string(True)
+            )
+        )
+
+    def strip_prefixes(self):
+        if not self.options.strip_prefixes:
+            self.file.write("# No prefix-stripping\n\n")
+            return
+
+        self.file.write(
+            "# Begin prefix-stripping\n"
+            "\n"
+            "# Strip prefixes from all symbols following regular expression:\n"
+            "# {expr}\n"
+            "\n"
+            "import re as __re_module\n"
+            "\n"
+            "__strip_expr = __re_module.compile('{expr}')\n"
+            "for __k, __v in globals().copy().items():\n"
+            "    __m = __strip_expr.match(__k)\n"
+            "    if __m:\n"
+            "        globals()[__k[__m.end():]] = __v\n"
+            "        # remove symbol with prefix(?)\n"
+            "        # globals().pop(__k)\n"
+            "del __re_module, __k, __v, __m, __strip_expr\n"
+            "\n"
+            "# End prefix-stripping\n"
+            "\n".format(expr="({})".format("|".join(self.options.strip_prefixes)))
+        )
+
+    def insert_file(self, filename):
+        try:
+            inserted_file = open(filename, "r")
+        except IOError:
+            error_message('Cannot open file "%s". Skipped it.' % filename, cls="missing-file")
+
+        self.file.write(
+            '# Begin "{filename}"\n'
+            "\n{file}\n"
+            '# End "{filename}"\n'.format(filename=filename, file=inserted_file.read())
+        )
+
+        inserted_file.close()

+ 6 - 0
python/grass/ctypes/ctypesgen/printer_python/test.py

@@ -0,0 +1,6 @@
+"""
+ctypesgen.printer.printer imports this module so that it can find the path
+to defaulttemplate.py and defaultloader.py.
+"""
+
+pass

+ 1 - 1
python/grass/ctypes/ctypesgencore/processor/__init__.py

@@ -1,4 +1,4 @@
-#!/usr/bin/env python3
+#!/usr/bin/env python
 
 """
 This module contains functions to operate on the DeclarationCollection produced

+ 46 - 27
python/grass/ctypes/ctypesgencore/processor/dependencies.py

@@ -1,13 +1,13 @@
-#!/usr/bin/env python3
+#!/usr/bin/env python
 
 """
 The dependencies module determines which descriptions depend on which other
 descriptions.
 """
 
-from ctypesgencore.ctypedescs import *
-from ctypesgencore.descriptions import *
-from ctypesgencore.messages import *
+from ..descriptions import *
+from ..ctypedescs import *
+from ..messages import *
 
 
 def find_dependencies(data, opts):
@@ -45,6 +45,22 @@ description."""
         else:
             return False
 
+    def co_depend(desc, nametable, name):
+        """
+        Try to add `name` as a requirement for `desc`, looking `name` up in
+        `nametable`.  Also try to add desc as a requirement for `name`.
+
+        Returns Description of `name` if found.
+        """
+
+        requirement = nametable.get(name, None)
+        if requirement is None:
+            return
+
+        desc.add_requirements([requirement])
+        requirement.add_requirements([desc])
+        return requirement
+
     def find_dependencies_for(desc, kind):
         """Find all the descriptions that `desc` depends on and add them as
 dependencies for `desc`. Also collect error messages regarding `desc` and
@@ -52,23 +68,25 @@ convert unlocateable descriptions into error messages."""
 
         if kind == "constant":
             roots = [desc.value]
-        if kind == "struct":
+        elif kind == "struct":
             roots = []
-        if kind == "struct-body":
+        elif kind == "struct-body":
             roots = [desc.ctype]
-        if kind == "enum":
+        elif kind == "enum":
             roots = []
-        if kind == "typedef":
+        elif kind == "typedef":
             roots = [desc.ctype]
-        if kind == "function":
+        elif kind == "function":
             roots = desc.argtypes + [desc.restype]
-        if kind == "variable":
+        elif kind == "variable":
             roots = [desc.ctype]
-        if kind == "macro":
+        elif kind == "macro":
             if desc.expr:
                 roots = [desc.expr]
             else:
                 roots = []
+        elif kind == "undef":
+            roots = [desc.macro]
 
         cstructs, cenums, ctypedefs, errors, identifiers = [], [], [], [], []
 
@@ -83,33 +101,37 @@ convert unlocateable descriptions into error messages."""
         unresolvables = []
 
         for cstruct in cstructs:
-            if kind == "struct" and desc.variety == cstruct.variety and \
-                    desc.tag == cstruct.tag:
+            if kind == "struct" and desc.variety == cstruct.variety and desc.tag == cstruct.tag:
                 continue
             if not depend(desc, struct_names, (cstruct.variety, cstruct.tag)):
-                unresolvables.append("%s \"%s\"" %
-                                     (cstruct.variety, cstruct.tag))
+                unresolvables.append('%s "%s"' % (cstruct.variety, cstruct.tag))
 
         for cenum in cenums:
             if kind == "enum" and desc.tag == cenum.tag:
                 continue
             if not depend(desc, enum_names, cenum.tag):
-                unresolvables.append("enum \"%s\"" % cenum.tag)
+                unresolvables.append('enum "%s"' % cenum.tag)
 
         for ctypedef in ctypedefs:
             if not depend(desc, typedef_names, ctypedef):
-                unresolvables.append("typedef \"%s\"" % ctypedef)
+                unresolvables.append('typedef "%s"' % ctypedef)
 
         for ident in identifiers:
-            if isinstance(desc, MacroDescription) and \
-                    desc.params and ident in desc.params:
+            if isinstance(desc, MacroDescription) and desc.params and ident in desc.params:
                 continue
-            if not depend(desc, ident_names, ident):
-                unresolvables.append("identifier \"%s\"" % ident)
+
+            elif opts.include_undefs and isinstance(desc, UndefDescription):
+                macro_desc = None
+                if ident == desc.macro.name:
+                    macro_desc = co_depend(desc, ident_names, ident)
+                if macro_desc is None or not isinstance(macro_desc, MacroDescription):
+                    unresolvables.append('identifier "%s"' % ident)
+
+            elif not depend(desc, ident_names, ident):
+                unresolvables.append('identifier "%s"' % ident)
 
         for u in unresolvables:
-            errors.append(("%s depends on an unknown %s." %
-                           (desc.casual_name(), u), None))
+            errors.append(("%s depends on an unknown %s." % (desc.casual_name(), u), None))
 
         for err, cls in errors:
             err += " %s will not be output" % desc.casual_name()
@@ -136,13 +158,10 @@ it can find it."""
     # no other type of description can look ahead like that.
 
     for kind, desc in data.output_order:
+        add_to_lookup_table(desc, kind)
         if kind != "macro":
             find_dependencies_for(desc, kind)
-            add_to_lookup_table(desc, kind)
 
     for kind, desc in data.output_order:
         if kind == "macro":
-            add_to_lookup_table(desc, kind)
-    for kind, desc in data.output_order:
-        if kind == "macro":
             find_dependencies_for(desc, kind)

+ 119 - 61
python/grass/ctypes/ctypesgencore/processor/operations.py

@@ -1,21 +1,15 @@
-#!/usr/bin/env python3
+#!/usr/bin/env python
 
 """
 The operations module contains various functions to process the
 DescriptionCollection and prepare it for output.
-ctypesgencore.processor.pipeline calls the operations module.
+ctypesgen.processor.pipeline calls the operations module.
 """
 
-import keyword
-import os
-import re
-import sys
-
-import ctypes
-import ctypesgencore.libraryloader
-from ctypesgencore.descriptions import *
-from ctypesgencore.messages import *
-
+import ctypes, re, os, sys, keyword
+from ..descriptions import *
+from ..messages import *
+from .. import libraryloader
 
 # Processor functions
 
@@ -27,9 +21,7 @@ def automatically_typedef_structs(data, options):
 
     for struct in data.structs:
         if not struct.ctype.anonymous:  # Don't alias anonymous structs
-            typedef = TypedefDescription(struct.tag,
-                                         struct.ctype,
-                                         src=struct.src)
+            typedef = TypedefDescription(struct.tag, struct.ctype, src=struct.src)
             typedef.add_requirements(set([struct]))
 
             data.typedefs.append(typedef)
@@ -53,7 +45,7 @@ def remove_descriptions_in_system_headers(data, opts):
     known_headers = [os.path.basename(x) for x in opts.headers]
 
     for description in data.all:
-        if description.src is not None:
+        if description.src != None:
             if description.src[0] == "<command line>":
                 description.include_rule = "if_needed"
             elif description.src[0] == "<built-in>":
@@ -77,7 +69,7 @@ def filter_by_regexes_exclude(data, opts):
     """filter_by_regexes_exclude() uses regular expressions specified by options
     dictionary to filter symbols."""
     if opts.exclude_symbols:
-        expr = re.compile(opts.exclude_symbols)
+        expr = re.compile("({})".format("|".join(opts.exclude_symbols)))
         for object in data.all:
             if expr.match(object.py_name()):
                 object.include_rule = "never"
@@ -87,7 +79,7 @@ def filter_by_regexes_include(data, opts):
     """filter_by_regexes_include() uses regular expressions specified by options
     dictionary to re-include symbols previously rejected by other operations."""
     if opts.include_symbols:
-        expr = re.compile(opts.include_symbols)
+        expr = re.compile("({})".format("|".join(opts.include_symbols)))
         for object in data.all:
             if object.include_rule != "never":
                 if expr.match(object.py_name()):
@@ -100,8 +92,15 @@ def fix_conflicting_names(data, opts):
     the name conflict."""
 
     # This is the order of priority for names
-    descriptions = data.functions + data.variables + data.structs + \
-        data.typedefs + data.enums + data.constants + data.macros
+    descriptions = (
+        data.functions
+        + data.variables
+        + data.structs
+        + data.typedefs
+        + data.enums
+        + data.constants
+        + data.macros
+    )
 
     # This dictionary maps names to a string representing where the name
     # came from.
@@ -109,17 +108,72 @@ def fix_conflicting_names(data, opts):
 
     preamble_names = set()
     preamble_names = preamble_names.union(
-        ['DarwinLibraryLoader', 'LibraryLoader', 'LinuxLibraryLoader', 'WindowsLibraryLoader',
-         '_WindowsLibrary', 'add_library_search_dirs', '_environ_path', 'ctypes', 'load_library',
-         'loader', 'os', 're', 'sys'])
+        [
+            "DarwinLibraryLoader",
+            "LibraryLoader",
+            "LinuxLibraryLoader",
+            "WindowsLibraryLoader",
+            "_WindowsLibrary",
+            "add_library_search_dirs",
+            "_environ_path",
+            "ctypes",
+            "load_library",
+            "loader",
+            "os",
+            "re",
+            "sys",
+        ]
+    )
     preamble_names = preamble_names.union(
-        ['ArgumentError', 'CFUNCTYPE', 'POINTER', 'ReturnString', 'String', 'Structure',
-         'UNCHECKED', 'Union', 'UserString', '_variadic_function', 'addressof', 'c_buffer',
-         'c_byte', 'c_char', 'c_char_p', 'c_double', 'c_float', 'c_int', 'c_int16', 'c_int32',
-         'c_int64', 'c_int8', 'c_long', 'c_longlong', 'c_ptrdiff_t', 'c_short', 'c_size_t',
-         'c_ubyte', 'c_uint', 'c_uint16', 'c_uint32', 'c_uint64', 'c_uint8', 'c_ulong',
-         'c_ulonglong', 'c_ushort', 'c_void', 'c_void_p', 'c_voidp', 'c_wchar', 'c_wchar_p', 'cast',
-         'ctypes', 'os', 'pointer', 'sizeof'])
+        [
+            "ArgumentError",
+            "CFUNCTYPE",
+            "POINTER",
+            "ReturnString",
+            "String",
+            "Structure",
+            "UNCHECKED",
+            "Union",
+            "UserString",
+            "_variadic_function",
+            "addressof",
+            "c_buffer",
+            "c_byte",
+            "c_char",
+            "c_char_p",
+            "c_double",
+            "c_float",
+            "c_int",
+            "c_int16",
+            "c_int32",
+            "c_int64",
+            "c_int8",
+            "c_long",
+            "c_longlong",
+            "c_ptrdiff_t",
+            "c_short",
+            "c_size_t",
+            "c_ubyte",
+            "c_uint",
+            "c_uint16",
+            "c_uint32",
+            "c_uint64",
+            "c_uint8",
+            "c_ulong",
+            "c_ulonglong",
+            "c_ushort",
+            "c_void",
+            "c_void_p",
+            "c_voidp",
+            "c_wchar",
+            "c_wchar_p",
+            "cast",
+            "ctypes",
+            "os",
+            "pointer",
+            "sizeof",
+        ]
+    )
     for name in preamble_names:
         important_names[name] = "a name needed by ctypes or ctypesgen"
     for name in dir(__builtins__):
@@ -135,33 +189,31 @@ def fix_conflicting_names(data, opts):
 
             original_name = description.casual_name()
             while description.py_name() in important_names:
-                if isinstance(description,
-                              (StructDescription, EnumDescription)):
+                if isinstance(description, (StructDescription, EnumDescription)):
                     description.tag += "_"
                 else:
                     description.name = "_" + description.name
 
             if not description.dependents:
-                description.warning("%s has been renamed to %s due to a name "
-                                    "conflict with %s." %
-                                    (original_name,
-                                     description.casual_name(),
-                                     conflict_name),
-                                    cls='rename')
+                description.warning(
+                    "%s has been renamed to %s due to a name "
+                    "conflict with %s." % (original_name, description.casual_name(), conflict_name),
+                    cls="rename",
+                )
             else:
-                description.warning("%s has been renamed to %s due to a name "
-                                    "conflict with %s. Other objects depend on %s - those "
-                                    "objects will be skipped." %
-                                    (original_name, description.casual_name(),
-                                     conflict_name, original_name),
-                                    cls='rename')
+                description.warning(
+                    "%s has been renamed to %s due to a name "
+                    "conflict with %s. Other objects depend on %s - those "
+                    "objects will be skipped."
+                    % (original_name, description.casual_name(), conflict_name, original_name),
+                    cls="rename",
+                )
 
                 for dependent in description.dependents:
                     dependent.include_rule = "never"
 
             if description.include_rule == "yes":
-                important_names[description.py_name()] = \
-                    description.casual_name()
+                important_names[description.py_name()] = description.casual_name()
 
     # Names of struct members don't conflict with much, but they can conflict
     # with Python keywords.
@@ -171,10 +223,12 @@ def fix_conflicting_names(data, opts):
             for i, (name, type) in enumerate(struct.members):
                 if name in keyword.kwlist:
                     struct.members[i] = ("_" + name, type)
-                    struct.warning("Member \"%s\" of %s has been renamed to "
-                                   "\"%s\" because it has the same name as a Python "
-                                   "keyword." % (name, struct.casual_name(), "_" + name),
-                                   cls='rename')
+                    struct.warning(
+                        'Member "%s" of %s has been renamed to '
+                        '"%s" because it has the same name as a Python '
+                        "keyword." % (name, struct.casual_name(), "_" + name),
+                        cls="rename",
+                    )
 
     # Macro arguments may be have names that conflict with Python keywords.
     # In a perfect world, this would simply rename the parameter instead
@@ -184,10 +238,12 @@ def fix_conflicting_names(data, opts):
         if macro.params:
             for param in macro.params:
                 if param in keyword.kwlist:
-                    macro.error("One of the parameters to %s, \"%s\" has the "
-                                "same name as a Python keyword. %s will be skipped." %
-                                (macro.casual_name(), param, macro.casual_name()),
-                                cls='name-conflict')
+                    macro.error(
+                        'One of the parameters to %s, "%s" has the '
+                        "same name as a Python keyword. %s will be skipped."
+                        % (macro.casual_name(), param, macro.casual_name()),
+                        cls="name-conflict",
+                    )
 
 
 def find_source_libraries(data, opts):
@@ -199,17 +255,19 @@ def find_source_libraries(data, opts):
     for symbol in all_symbols:
         symbol.source_library = None
 
-    ctypesgencore.libraryloader.add_library_search_dirs(opts.compile_libdirs)
+    libraryloader.add_library_search_dirs(opts.compile_libdirs)
 
     for library_name in opts.libraries:
         try:
-            library = ctypesgencore.libraryloader.load_library(library_name)
-        except (ImportError,OSError) as e:
-            warning_message("Could not load library \"%s\". Okay, I'll "
-                            "try to load it at runtime instead. " % (library_name),
-                            cls='missing-library')
+            library = libraryloader.load_library(library_name)
+        except ImportError as e:
+            warning_message(
+                'Could not load library "%s". Okay, I\'ll '
+                "try to load it at runtime instead. " % (library_name),
+                cls="missing-library",
+            )
             continue
         for symbol in all_symbols:
-            if symbol.source_library is None:
+            if symbol.source_library == None:
                 if hasattr(library, symbol.c_name()):
                     symbol.source_library = library_name

+ 16 - 18
python/grass/ctypes/ctypesgencore/processor/pipeline.py

@@ -1,14 +1,10 @@
-#!/usr/bin/env python3
-
-import os
-import re
-
-import ctypes
-from ctypesgencore.ctypedescs import *
-from ctypesgencore.messages import *
-from ctypesgencore.processor.dependencies import find_dependencies
-from ctypesgencore.processor.operations import *
+#!/usr/bin/env python
 
+import ctypes, re, os
+from ..ctypedescs import *
+from ..messages import *
+from .operations import *
+from .dependencies import find_dependencies
 
 """
 A brief explanation of the processing steps:
@@ -76,7 +72,7 @@ def calculate_final_inclusion(data, opts):
     """
 
     def can_include_desc(desc):
-        if desc.can_include is None:
+        if desc.can_include == None:
             if desc.include_rule == "no":
                 desc.can_include = False
             elif desc.include_rule == "yes" or desc.include_rule == "if_needed":
@@ -106,7 +102,7 @@ def calculate_final_inclusion(data, opts):
 def print_errors_encountered(data, opts):
     # See descriptions.py for an explanation of the error-handling mechanism
     for desc in data.all:
-        # If description would not have been included, don't bother user by
+        # If description would not have been included, dont bother user by
         # printing warnings.
         if desc.included or opts.show_all_errors:
             if opts.show_long_errors or len(desc.errors) + len(desc.warnings) <= 2:
@@ -127,16 +123,18 @@ def print_errors_encountered(data, opts):
                     numerrs = len(desc.errors) - 1
                     numwarns = len(desc.warnings)
                     if numwarns:
-                        error_message("%d more errors and %d more warnings "
-                                      "for %s" % (numerrs, numwarns, desc.casual_name()))
+                        error_message(
+                            "%d more errors and %d more warnings "
+                            "for %s" % (numerrs, numwarns, desc.casual_name())
+                        )
                     else:
-                        error_message("%d more errors for %s " %
-                                      (numerrs, desc.casual_name()))
+                        error_message("%d more errors for %s " % (numerrs, desc.casual_name()))
                 else:
                     warning1, cls1 = desc.warnings[0]
                     warning_message(warning1, cls1)
-                    warning_message("%d more errors for %s" %
-                                    (len(desc.warnings) - 1, desc.casual_name()))
+                    warning_message(
+                        "%d more errors for %s" % (len(desc.warnings) - 1, desc.casual_name())
+                    )
         if desc.errors:
             # process() will recalculate to take this into account
             desc.include_rule = "never"

+ 2 - 0
python/grass/ctypes/ctypesgen/test/.gitignore

@@ -0,0 +1,2 @@
+temp.h
+temp.py

+ 88 - 0
python/grass/ctypes/ctypesgen/test/ctypesgentest.py

@@ -0,0 +1,88 @@
+# -*- coding: ascii -*-
+# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
+import os
+import sys
+import io
+import optparse
+import glob
+import json
+
+try:
+    # should succeed for py3
+    from importlib import reload as reload_module
+except:
+    reload_module = reload
+
+# ensure that we can load the ctypesgen library
+PACKAGE_DIR = os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir)
+sys.path.insert(0, PACKAGE_DIR)
+import ctypesgen
+
+"""ctypesgentest is a simple module for testing ctypesgen on various C constructs. It consists of a
+single function, test(). test() takes a string that represents a C header file, along with some
+keyword arguments representing options. It processes the header using ctypesgen and returns a tuple
+containing the resulting module object and the output that ctypesgen produced."""
+
+# set redirect_stdout to False if using console based debugger like pdb
+redirect_stdout = True
+
+
+def test(header, **more_options):
+
+    assert isinstance(header, str)
+    with open("temp.h", "w") as f:
+        f.write(header)
+
+    options = ctypesgen.options.get_default_options()
+    options.headers = ["temp.h"]
+    for opt, val in more_options.items():
+        setattr(options, opt, val)
+
+    if redirect_stdout:
+        # Redirect output
+        sys.stdout = io.StringIO()
+
+    # Step 1: Parse
+    descriptions = ctypesgen.parser.parse(options.headers, options)
+
+    # Step 2: Process
+    ctypesgen.processor.process(descriptions, options)
+
+    # Step 3: Print
+    printer = None
+    if options.output_language.startswith("py"):
+        ctypesgen.printer_python.WrapperPrinter("temp.py", options, descriptions)
+
+        # Load the module we have just produced
+        module = __import__("temp")
+        # import twice, this hack ensure that "temp" is force loaded
+        # (there *must* be a better way to do this)
+        reload_module(module)
+        retval = module
+
+    elif options.output_language == "json":
+        # for ease and consistency with test results, we are going to cheat by
+        # resetting the anonymous tag number
+        ctypesgen.ctypedescs.last_tagnum = 0
+        ctypesgen.printer_json.WrapperPrinter("temp.json", options, descriptions)
+        with open("temp.json") as f:
+            JSON = json.load(f)
+        retval = JSON
+    else:
+        raise RuntimeError("No such output language `" + options.output_language + "'")
+
+    if redirect_stdout:
+        # Un-redirect output
+        output = sys.stdout.getvalue()
+        sys.stdout.close()
+        sys.stdout = sys.__stdout__
+    else:
+        output = ""
+
+    return retval, output
+
+
+def cleanup(filepattern="temp.*"):
+    fnames = glob.glob(filepattern)
+    for fname in fnames:
+        os.unlink(fname)

File diff suppressed because it is too large
+ 2352 - 0
python/grass/ctypes/ctypesgen/test/testsuite.py


+ 89 - 0
python/grass/ctypes/ctypesgen/version.py

@@ -0,0 +1,89 @@
+#!/usr/bin/env python3
+# vim: ts=2:sw=2:tw=80:nowrap
+
+from subprocess import Popen, PIPE
+import os
+from os import path
+
+THIS_DIR = path.dirname(__file__)
+VERSION_FILE = path.join(THIS_DIR, "VERSION")
+DEFAULT_PREFIX = "ctypesgen"
+
+__all__ = ["VERSION", "version_tuple", "version", "compatible"]
+
+
+def version_tuple(v):
+    try:
+        vs = v.split("-")
+        t = tuple(int(i) for i in vs[1].split("."))
+        if len(vs) > 2:
+            t += (int(vs[2]),)
+        return t
+    except:
+        return (-1, -1, -1, v)
+
+
+def read_file_version():
+    f = open(VERSION_FILE)
+    v = f.readline()
+    f.close()
+    return v.strip()
+
+
+def version():
+    try:
+        args = {"cwd": THIS_DIR}
+        devnull = open(os.devnull, "w")
+        p = Popen(["git", "describe"], stdout=PIPE, stderr=devnull, **args)
+        out, err = p.communicate()
+        if p.returncode:
+            raise RuntimeError("no version defined?")
+        return out.strip().decode()
+    except:
+        # failover is to try VERSION_FILE instead
+        try:
+            return read_file_version()
+        except:
+            return DEFAULT_PREFIX + "-0.0.0"
+
+
+def version_number():
+    return version().partition("-")[-1]
+
+
+def compatible(v0, v1):
+    v0 = version_tuple(v0)
+    v1 = version_tuple(v1)
+    return v0[:2] == v1[:2]
+
+
+def write_version_file(v=None):
+    if v is None:
+        v = version()
+    f = open(VERSION_FILE, "w")
+    f.write(v)
+    f.close()
+
+
+VERSION = version()
+VERSION_NUMBER = version_number()
+
+
+if __name__ == "__main__":
+    import sys, argparse
+
+    p = argparse.ArgumentParser()
+    p.add_argument("--save", action="store_true", help="Store version to " + VERSION_FILE)
+    p.add_argument(
+        "--read-file-version",
+        action="store_true",
+        help="Read the version stored in " + VERSION_FILE,
+    )
+    args = p.parse_args()
+
+    v = version()
+    if args.save:
+        write_version_file(v)
+    if args.read_file_version:
+        v = read_file_version()
+    print(v)

+ 0 - 319
python/grass/ctypes/ctypesgencore/libraryloader.py

@@ -1,319 +0,0 @@
-# ----------------------------------------------------------------------------
-# Copyright (c) 2008 David James
-# Copyright (c) 2006-2008 Alex Holkner
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-#  * Redistributions of source code must retain the above copyright
-#    notice, this list of conditions and the following disclaimer.
-#  * Redistributions in binary form must reproduce the above copyright
-#    notice, this list of conditions and the following disclaimer in
-#    the documentation and/or other materials provided with the
-#    distribution.
-#  * Neither the name of pyglet nor the names of its
-#    contributors may be used to endorse or promote products
-#    derived from this software without specific prior written
-#    permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
-# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-# POSSIBILITY OF SUCH DAMAGE.
-# ----------------------------------------------------------------------------
-
-import glob
-import os.path
-import re
-import sys
-import platform
-
-import ctypes
-import ctypes.util
-
-
-def _environ_path(name):
-    if name in os.environ:
-        return os.environ[name].split(":")
-    else:
-        return []
-
-
-class LibraryLoader(object):
-
-    def __init__(self):
-        self.other_dirs = []
-
-    def load_library(self, libname):
-        """Given the name of a library, load it."""
-        paths = self.getpaths(libname)
-
-        for path in paths:
-            if os.path.exists(path):
-                return self.load(path)
-
-        raise ImportError("%s not found." % libname)
-
-    def load(self, path):
-        """Given a path to a library, load it."""
-        try:
-            # Darwin requires dlopen to be called with mode RTLD_GLOBAL instead
-            # of the default RTLD_LOCAL.  Without this, you end up with
-            # libraries not being loadable, resulting in "Symbol not found"
-            # errors
-            if sys.platform == 'darwin':
-                return ctypes.CDLL(path, ctypes.RTLD_GLOBAL)
-            else:
-                return ctypes.cdll.LoadLibrary(path)
-        except OSError as e:
-            raise ImportError(e)
-
-    def getpaths(self, libname):
-        """Return a list of paths where the library might be found."""
-        if os.path.isabs(libname):
-            yield libname
-
-        else:
-            for path in self.getplatformpaths(libname):
-                yield path
-
-            path = ctypes.util.find_library(libname)
-            if path:
-                yield path
-
-    def getplatformpaths(self, libname):
-        return []
-
-# Darwin (Mac OS X)
-
-
-class DarwinLibraryLoader(LibraryLoader):
-    name_formats = ["lib%s.dylib", "lib%s.so", "lib%s.bundle", "%s.dylib",
-                    "%s.so", "%s.bundle", "%s"]
-
-    def getplatformpaths(self, libname):
-        if os.path.pathsep in libname:
-            names = [libname]
-        else:
-            names = [format % libname for format in self.name_formats]
-
-        for dir in self.getdirs(libname):
-            for name in names:
-                yield os.path.join(dir, name)
-
-    def getdirs(self, libname):
-        '''Implements the dylib search as specified in Apple documentation:
-
-        http://developer.apple.com/documentation/DeveloperTools/Conceptual/
-            DynamicLibraries/Articles/DynamicLibraryUsageGuidelines.html
-
-        Before commencing the standard search, the method first checks
-        the bundle's ``Frameworks`` directory if the application is running
-        within a bundle (OS X .app).
-        '''
-
-        dyld_fallback_library_path = _environ_path("DYLD_FALLBACK_LIBRARY_PATH")
-        if not dyld_fallback_library_path:
-            dyld_fallback_library_path = [os.path.expanduser('~/lib'),
-                                          '/usr/local/lib', '/usr/lib']
-
-        dirs = []
-
-        if '/' in libname:
-            dirs.extend(_environ_path("DYLD_LIBRARY_PATH"))
-        else:
-            dirs.extend(_environ_path("LD_LIBRARY_PATH"))
-            dirs.extend(_environ_path("DYLD_LIBRARY_PATH"))
-
-        dirs.extend(self.other_dirs)
-        dirs.append(".")
-        dirs.append(os.path.dirname(__file__))
-
-        if hasattr(sys, 'frozen') and sys.frozen == 'macosx_app':
-            dirs.append(os.path.join(
-                os.environ['RESOURCEPATH'],
-                '..',
-                'Frameworks'))
-
-        dirs.extend(dyld_fallback_library_path)
-
-        return dirs
-
-# Posix
-
-
-class PosixLibraryLoader(LibraryLoader):
-    _ld_so_cache = None
-
-    def _create_ld_so_cache(self):
-        # Recreate search path followed by ld.so.  This is going to be
-        # slow to build, and incorrect (ld.so uses ld.so.cache, which may
-        # not be up-to-date).  Used only as fallback for distros without
-        # /sbin/ldconfig.
-        #
-        # We assume the DT_RPATH and DT_RUNPATH binary sections are omitted.
-
-        directories = []
-        for name in ("LD_LIBRARY_PATH",
-                     "SHLIB_PATH",  # HPUX
-                     "LIBPATH",  # OS/2, AIX
-                     "LIBRARY_PATH",  # BE/OS
-                     ):
-            if name in os.environ:
-                directories.extend(os.environ[name].split(os.pathsep))
-        directories.extend(self.other_dirs)
-        directories.append(".")
-        directories.append(os.path.dirname(__file__))
-
-        try:
-            directories.extend([dir.strip() for dir in open('/etc/ld.so.conf')])
-        except IOError:
-            pass
-
-        unix_lib_dirs_list = ['/lib', '/usr/lib', '/lib64', '/usr/lib64']
-        if sys.platform.startswith('linux'):
-            # Try and support multiarch work in Ubuntu
-            # https://wiki.ubuntu.com/MultiarchSpec
-            bitage = platform.architecture()[0]
-            if bitage.startswith('32'):
-                # Assume Intel/AMD x86 compat
-                unix_lib_dirs_list += ['/lib/i386-linux-gnu', '/usr/lib/i386-linux-gnu']
-            elif bitage.startswith('64'):
-                # Assume Intel/AMD x86 compat
-                unix_lib_dirs_list += ['/lib/x86_64-linux-gnu', '/usr/lib/x86_64-linux-gnu']
-            else:
-                # guess...
-                unix_lib_dirs_list += glob.glob('/lib/*linux-gnu')
-        directories.extend(unix_lib_dirs_list)
-
-        cache = {}
-        lib_re = re.compile(r'lib(.*)\.s[ol]')
-        ext_re = re.compile(r'\.s[ol]$')
-        for dir in directories:
-            try:
-                for path in glob.glob("%s/*.s[ol]*" % dir):
-                    file = os.path.basename(path)
-
-                    # Index by filename
-                    if file not in cache:
-                        cache[file] = path
-
-                    # Index by library name
-                    match = lib_re.match(file)
-                    if match:
-                        library = match.group(1)
-                        if library not in cache:
-                            cache[library] = path
-            except OSError:
-                pass
-
-        self._ld_so_cache = cache
-
-    def getplatformpaths(self, libname):
-        if self._ld_so_cache is None:
-            self._create_ld_so_cache()
-
-        result = self._ld_so_cache.get(libname)
-        if result:
-            yield result
-
-        path = ctypes.util.find_library(libname)
-        if path:
-            yield os.path.join("/lib", path)
-
-# Windows
-
-
-class _WindowsLibrary(object):
-
-    def __init__(self, path):
-        self.cdll = ctypes.cdll.LoadLibrary(path)
-        self.windll = ctypes.windll.LoadLibrary(path)
-
-    def __getattr__(self, name):
-        try:
-            return getattr(self.cdll, name)
-        except AttributeError:
-            try:
-                return getattr(self.windll, name)
-            except AttributeError:
-                raise
-
-
-class WindowsLibraryLoader(LibraryLoader):
-    name_formats = ["%s.dll", "lib%s.dll"]
-
-    def load_library(self, libname):
-        try:
-            result = LibraryLoader.load_library(self, libname)
-        except ImportError:
-            result = None
-            if os.path.sep not in libname:
-                for name in self.name_formats:
-                    try:
-                        result = getattr(ctypes.cdll, name % libname)
-                        if result:
-                            break
-                    except WindowsError:
-                        result = None
-            if result is None:
-                try:
-                    result = getattr(ctypes.cdll, libname)
-                except WindowsError:
-                    result = None
-            if result is None:
-                raise ImportError("%s not found." % libname)
-        return result
-
-    def load(self, path):
-        return _WindowsLibrary(path)
-
-    def getplatformpaths(self, libname):
-        if os.path.sep not in libname:
-            for name in self.name_formats:
-                dll_in_current_dir = os.path.abspath(name % libname)
-                if os.path.exists(dll_in_current_dir):
-                    yield dll_in_current_dir
-                path = ctypes.util.find_library(name % libname)
-                if path:
-                    yield path
-
-# Platform switching
-
-# If your value of sys.platform does not appear in this dict, please contact
-# the Ctypesgen maintainers.
-
-loaderclass = {
-    "darwin": DarwinLibraryLoader,
-    "cygwin": WindowsLibraryLoader,
-    "win32": WindowsLibraryLoader
-}
-
-loader = loaderclass.get(sys.platform, PosixLibraryLoader)()
-
-
-def add_library_search_dirs(other_dirs):
-    """
-    Add libraries to search paths.
-    If library paths are relative, convert them to absolute with respect to this
-    file's directory
-    """
-    THIS_DIR = os.path.dirname(__file__)
-    for F in other_dirs:
-        if not os.path.isabs(F):
-            F = os.path.abspath(os.path.join(THIS_DIR, F))
-        loader.other_dirs.append(F)
-
-load_library = loader.load_library
-
-del loaderclass

+ 0 - 198
python/grass/ctypes/ctypesgencore/parser/cdeclarations.py

@@ -1,198 +0,0 @@
-#!/usr/bin/env python3
-
-'''
-This file contains classes that represent C declarations. cparser produces
-declarations in this format, and ctypesparser reformats them into a format that
-is not C-specific. The other modules don't need to touch these.
-'''
-
-__docformat__ = 'restructuredtext'
-
-# --------------------------------------------------------------------------
-# C Object Model
-# --------------------------------------------------------------------------
-
-
-class Declaration(object):
-
-    def __init__(self):
-        self.declarator = None
-        self.type = Type()
-        self.storage = None
-
-    def __repr__(self):
-        d = {
-            'declarator': self.declarator,
-            'type': self.type,
-        }
-        if self.storage:
-            d['storage'] = self.storage
-        l = ['%s=%r' % (k, v) for k, v in d.items()]
-        return 'Declaration(%s)' % ', '.join(l)
-
-
-class Declarator(object):
-    pointer = None
-
-    def __init__(self):
-        self.identifier = None
-        self.initializer = None
-        self.array = None
-        self.parameters = None
-        self.bitfield = None
-
-    # make pointer read-only to catch mistakes early
-    pointer = property(lambda self: None)
-
-    def __repr__(self):
-        s = self.identifier or ''
-        if self.bitfield:
-            s += ":%d" % self.bitfield
-        if self.array:
-            s += repr(self.array)
-        if self.initializer:
-            s += ' = %r' % self.initializer
-        if self.parameters is not None:
-            s += '(' + ', '.join([repr(p) for p in self.parameters]) + ')'
-        return s
-
-
-class Pointer(Declarator):
-    pointer = None
-
-    def __init__(self):
-        super(Pointer, self).__init__()
-        self.qualifiers = []
-
-    def __repr__(self):
-        q = ''
-        if self.qualifiers:
-            q = '<%s>' % ' '.join(self.qualifiers)
-        return 'POINTER%s(%r)' % (q, self.pointer) + \
-            super(Pointer, self).__repr__()
-
-
-class Array(object):
-
-    def __init__(self):
-        self.size = None
-        self.array = None
-
-    def __repr__(self):
-        if self.size:
-            a = '[%r]' % self.size
-        else:
-            a = '[]'
-        if self.array:
-            return repr(self.array) + a
-        else:
-            return a
-
-
-class Parameter(object):
-
-    def __init__(self):
-        self.type = Type()
-        self.storage = None
-        self.declarator = None
-
-    def __repr__(self):
-        d = {
-            'type': self.type,
-        }
-        if self.declarator:
-            d['declarator'] = self.declarator
-        if self.storage:
-            d['storage'] = self.storage
-        l = ['%s=%r' % (k, v) for k, v in d.items()]
-        return 'Parameter(%s)' % ', '.join(l)
-
-
-class Type(object):
-
-    def __init__(self):
-        self.qualifiers = []
-        self.specifiers = []
-
-    def __repr__(self):
-        return ' '.join(self.qualifiers + [str(s) for s in self.specifiers])
-
-# These are used only internally.
-
-
-class StorageClassSpecifier(str):
-    pass
-
-
-class TypeSpecifier(str):
-    pass
-
-
-class StructTypeSpecifier(object):
-
-    def __init__(self, is_union, is_packed, tag, declarations):
-        self.is_union = is_union
-        self.is_packed = is_packed
-        self.tag = tag
-        self.declarations = declarations
-
-    def __repr__(self):
-        if self.is_union:
-            s = 'union'
-        else:
-            s = 'struct'
-        if self.is_packed:
-            s += ' __attribute__((packed))'
-        if self.tag:
-            s += ' %s' % self.tag
-        if self.declarations:
-            s += ' {%s}' % '; '.join([repr(d) for d in self.declarations])
-        return s
-
-
-class EnumSpecifier(object):
-
-    def __init__(self, tag, enumerators, src=None):
-        self.tag = tag
-        self.enumerators = enumerators
-        self.src = src
-
-    def __repr__(self):
-        s = 'enum'
-        if self.tag:
-            s += ' %s' % self.tag
-        if self.enumerators:
-            s += ' {%s}' % ', '.join([repr(e) for e in self.enumerators])
-        return s
-
-
-class Enumerator(object):
-
-    def __init__(self, name, expression):
-        self.name = name
-        self.expression = expression
-
-    def __repr__(self):
-        s = self.name
-        if self.expression:
-            s += ' = %r' % self.expression
-        return s
-
-
-class TypeQualifier(str):
-    pass
-
-
-def apply_specifiers(specifiers, declaration):
-    '''Apply specifiers to the declaration (declaration may be
-    a Parameter instead).'''
-    for s in specifiers:
-        if isinstance(s, StorageClassSpecifier):
-            if declaration.storage:
-                # Multiple storage classes, technically an error... ignore it
-                pass
-            declaration.storage = s
-        elif type(s) in (TypeSpecifier, StructTypeSpecifier, EnumSpecifier):
-            declaration.type.specifiers.append(s)
-        elif isinstance(s, TypeQualifier):
-            declaration.type.qualifiers.append(s)

File diff suppressed because it is too large
+ 0 - 282
python/grass/ctypes/ctypesgencore/parser/parsetab.py


+ 0 - 346
python/grass/ctypes/ctypesgencore/parser/pplexer.py

@@ -1,346 +0,0 @@
-#!/usr/bin/env python3
-
-'''Preprocess a C source file using gcc and convert the result into
-   a token stream
-
-Reference is C99:
-  * http://www.open-std.org/JTC1/SC22/WG14/www/docs/n1124.pdf
-
-'''
-
-__docformat__ = 'restructuredtext'
-
-import os
-import re
-import shlex
-import sys
-import tokenize
-import traceback
-
-import ctypes
-from . import lex
-from . import yacc
-from .lex import TOKEN
-
-
-PY2 = True
-if sys.version_info.major >= 3:
-    PY2 = False
-    long = int
-
-
-tokens = (
-    'HEADER_NAME', 'IDENTIFIER', 'PP_NUMBER', 'CHARACTER_CONSTANT',
-    'STRING_LITERAL', 'OTHER',
-
-    'PTR_OP', 'INC_OP', 'DEC_OP', 'LEFT_OP', 'RIGHT_OP', 'LE_OP', 'GE_OP',
-    'EQ_OP', 'NE_OP', 'AND_OP', 'OR_OP', 'MUL_ASSIGN', 'DIV_ASSIGN',
-    'MOD_ASSIGN', 'ADD_ASSIGN', 'SUB_ASSIGN', 'LEFT_ASSIGN', 'RIGHT_ASSIGN',
-    'AND_ASSIGN', 'XOR_ASSIGN', 'OR_ASSIGN', 'PERIOD', 'ELLIPSIS',
-
-    'LPAREN', 'NEWLINE',
-
-    'PP_DEFINE', 'PP_DEFINE_NAME', 'PP_DEFINE_MACRO_NAME', 'PP_MACRO_PARAM',
-    'PP_STRINGIFY', 'PP_IDENTIFIER_PASTE', 'PP_END_DEFINE'
-)
-
-states = [('DEFINE', "exclusive")]
-
-subs = {
-    'D': '[0-9]',
-    'L': '[a-zA-Z_]',
-    'H': '[a-fA-F0-9]',
-    'E': '[Ee][+-]?\s*{D}+',
-    'FS': '([FfLl]|d[dfl]|D[DFL]|[fFdD][0-9]+x?)',
-    'IS': '[uUlL]*',
-}
-# Helper: substitute {foo} with subs[foo] in string (makes regexes more lexy)
-sub_pattern = re.compile('{([^}]*)}')
-
-
-def sub_repl_match(m):
-    return subs[m.groups()[0]]
-
-
-def sub(s):
-    return sub_pattern.sub(sub_repl_match, s)
-
-# --------------------------------------------------------------------------
-# Token value types
-# --------------------------------------------------------------------------
-
-# Numbers represented as int and float types.
-# For all other tokens, type is just str representation.
-
-
-class StringLiteral(str):
-
-    def __new__(cls, value):
-        # Unescaping probably not perfect but close enough.
-        try:
-            value = re.sub(r'\\x([0-9a-fA-F])(?![0-9a-fA-F])',
-                           r'\\x0\\1', value[1:-1])
-        except ValueError as e:
-            raise ValueError("invalid \\x escape in %s" % value)
-
-        return str.__new__(cls, value)
-
-# --------------------------------------------------------------------------
-# Token declarations
-# --------------------------------------------------------------------------
-
-
-punctuators = {
-    # value: (regex, type)
-    r'...': (r'\.\.\.', 'ELLIPSIS'),
-    r'>>=': (r'>>=', 'RIGHT_ASSIGN'),
-    r'<<=': (r'<<=', 'LEFT_ASSIGN'),
-    r'+=': (r'\+=', 'ADD_ASSIGN'),
-    r'-=': (r'-=', 'SUB_ASSIGN'),
-    r'*=': (r'\*=', 'MUL_ASSIGN'),
-    r'/=': (r'/=', 'DIV_ASSIGN'),
-    r'%=': (r'%=', 'MOD_ASSIGN'),
-    r'&=': (r'&=', 'AND_ASSIGN'),
-    r'^=': (r'\^=', 'XOR_ASSIGN'),
-    r'|=': (r'\|=', 'OR_ASSIGN'),
-    r'>>': (r'>>', 'RIGHT_OP'),
-    r'<<': (r'<<', 'LEFT_OP'),
-    r'++': (r'\+\+', 'INC_OP'),
-    r'--': (r'--', 'DEC_OP'),
-    r'->': (r'->', 'PTR_OP'),
-    r'&&': (r'&&', 'AND_OP'),
-    r'||': (r'\|\|', 'OR_OP'),
-    r'<=': (r'<=', 'LE_OP'),
-    r'>=': (r'>=', 'GE_OP'),
-    r'==': (r'==', 'EQ_OP'),
-    r'!=': (r'!=', 'NE_OP'),
-    r'<:': (r'<:', '['),
-    r':>': (r':>', ']'),
-    r'<%': (r'<%', '{'),
-    r'%>': (r'%>', '}'),
-    r';': (r';', ';'),
-    r'{': (r'{', '{'),
-    r'}': (r'}', '}'),
-    r',': (r',', ','),
-    r':': (r':', ':'),
-    r'=': (r'=', '='),
-    r')': (r'\)', ')'),
-    r'[': (r'\[', '['),
-    r']': (r']', ']'),
-    r'.': (r'\.', 'PERIOD'),
-    r'&': (r'&', '&'),
-    r'!': (r'!', '!'),
-    r'~': (r'~', '~'),
-    r'-': (r'-', '-'),
-    r'+': (r'\+', '+'),
-    r'*': (r'\*', '*'),
-    r'/': (r'/', '/'),
-    r'%': (r'%', '%'),
-    r'<': (r'<', '<'),
-    r'>': (r'>', '>'),
-    r'^': (r'\^', '^'),
-    r'|': (r'\|', '|'),
-    r'?': (r'\?', '?')
-}
-
-
-def punctuator_regex(punctuators):
-    punctuator_regexes = [v[0] for v in punctuators.values()]
-    if PY2:
-        punctuator_regexes.sort(lambda a, b: -cmp(len(a), len(b)))
-    else:
-        punctuator_regexes.sort(key=lambda a: -len(a))
-    return '(%s)' % '|'.join(punctuator_regexes)
-
-
-# Process line-number directives from the preprocessor
-# See http://docs.freebsd.org/info/cpp/cpp.info.Output.html
-DIRECTIVE = r'\#\s+(\d+)\s+"([^"]+)"[ \d]*\n'
-
-
-@TOKEN(DIRECTIVE)
-def t_ANY_directive(t):
-    t.lexer.filename = t.groups[2]
-    t.lexer.lineno = int(t.groups[1])
-    return None
-
-
-@TOKEN(punctuator_regex(punctuators))
-def t_ANY_punctuator(t):
-    t.type = punctuators[t.value][1]
-    return t
-
-
-IDENTIFIER = sub('{L}({L}|{D})*')
-
-
-@TOKEN(IDENTIFIER)
-def t_INITIAL_identifier(t):
-    t.type = 'IDENTIFIER'
-    return t
-
-
-@TOKEN(IDENTIFIER)
-def t_DEFINE_identifier(t):
-    if t.lexer.next_is_define_name:
-        # This identifier is the name of a macro
-        # We need to look ahead and see if this macro takes parameters or not.
-        if t.lexpos + len(t.value) < t.lexer.lexlen and \
-                t.lexer.lexdata[t.lexpos + len(t.value)] == '(':
-
-            t.type = 'PP_DEFINE_MACRO_NAME'
-
-            # Look ahead and read macro parameter list
-            lexdata = t.lexer.lexdata
-            pos = t.lexpos + len(t.value) + 1
-            while lexdata[pos] not in '\n)':
-                pos += 1
-            params = lexdata[t.lexpos + len(t.value) + 1: pos]
-            paramlist = [x.strip() for x in params.split(",") if x.strip()]
-            t.lexer.macro_params = paramlist
-
-        else:
-            t.type = 'PP_DEFINE_NAME'
-
-        t.lexer.next_is_define_name = False
-    elif t.value in t.lexer.macro_params:
-        t.type = 'PP_MACRO_PARAM'
-    else:
-        t.type = 'IDENTIFIER'
-    return t
-
-
-FLOAT_LITERAL = sub(r"(?P<p1>{D}+)?(?P<dp>[.]?)(?P<p2>(?(p1){D}*|{D}+))"
-                    r"(?P<exp>(?:[Ee][+-]?{D}+)?)(?P<suf>{FS}?)(?!\w)")
-
-
-@TOKEN(FLOAT_LITERAL)
-def t_ANY_float(t):
-    t.type = 'PP_NUMBER'
-    m = t.lexer.lexmatch
-
-    p1 = m.group("p1")
-    dp = m.group("dp")
-    p2 = m.group("p2")
-    exp = m.group("exp")
-    suf = m.group("suf")
-
-    if dp or exp or (suf and suf not in ("Ll")):
-        s = m.group(0)
-        if suf:
-            s = s[:-len(suf)]
-        # Attach a prefix so the parser can figure out if should become an
-        # integer, float, or long
-        t.value = "f" + s
-    elif (suf and suf in ("Ll")):
-        t.value = "l" + p1
-    else:
-        t.value = "i" + p1
-
-    return t
-
-
-INT_LITERAL = sub(r"(?P<p1>(?:0x{H}+)|(?:{D}+))(?P<suf>{IS})")
-
-
-@TOKEN(INT_LITERAL)
-def t_ANY_int(t):
-    t.type = 'PP_NUMBER'
-    m = t.lexer.lexmatch
-
-    if "L" in m.group(3) or "l" in m.group(2):
-        prefix = "l"
-    else:
-        prefix = "i"
-
-    g1 = m.group(2)
-    if g1.startswith("0x"):
-        # Convert base from hexadecimal
-        g1 = str(long(g1[2:], 16))
-    elif g1[0] == "0":
-        # Convert base from octal
-        g1 = str(long(g1, 8))
-
-    t.value = prefix + g1
-
-    return t
-
-
-CHARACTER_CONSTANT = sub(r"L?'(\\.|[^\\'])+'")
-
-
-@TOKEN(CHARACTER_CONSTANT)
-def t_ANY_character_constant(t):
-    t.type = 'CHARACTER_CONSTANT'
-    return t
-
-
-STRING_LITERAL = sub(r'L?"(\\.|[^\\"])*"')
-
-
-@TOKEN(STRING_LITERAL)
-def t_ANY_string_literal(t):
-    t.type = 'STRING_LITERAL'
-    t.value = StringLiteral(t.value)
-    return t
-
-
-@TOKEN(r'\(')
-def t_ANY_lparen(t):
-    if t.lexpos == 0 or t.lexer.lexdata[t.lexpos - 1] not in (' \t\f\v\n'):
-        t.type = 'LPAREN'
-    else:
-        t.type = '('
-    return t
-
-
-@TOKEN(r'\n')
-def t_INITIAL_newline(t):
-    t.lexer.lineno += 1
-    return None
-
-
-@TOKEN(r'\#define')
-def t_INITIAL_pp_define(t):
-    t.type = 'PP_DEFINE'
-    t.lexer.begin("DEFINE")
-    t.lexer.next_is_define_name = True
-    t.lexer.macro_params = set()
-    return t
-
-
-@TOKEN(r'\n')
-def t_DEFINE_newline(t):
-    t.type = 'PP_END_DEFINE'
-    t.lexer.begin("INITIAL")
-    t.lexer.lineno += 1
-    del t.lexer.macro_params
-
-    # Damage control in case the token immediately after the #define failed
-    # to handle this
-    t.lexer.next_is_define_name = False
-    return t
-
-
-@TOKEN(r'(\#\#)|(\#)')
-def t_DEFINE_pp_param_op(t):
-    if t.value == '#':
-        t.type = 'PP_STRINGIFY'
-    else:
-        t.type = 'PP_IDENTIFIER_PASTE'
-    return t
-
-
-def t_INITIAL_error(t):
-    t.type = 'OTHER'
-    return t
-
-
-def t_DEFINE_error(t):
-    t.type = 'OTHER'
-    t.value = t.value[0]
-    t.lexer.lexpos += 1  # Skip it if it's an error in a #define
-    return t
-
-
-t_ANY_ignore = ' \t\v\f\r'

+ 0 - 9
python/grass/ctypes/ctypesgencore/printer/defaultheader.py

@@ -1,9 +0,0 @@
-'''Wrapper for %(name)s
-
-Generated with:
-%(argv)s
-
-Do not modify this file.
-'''
-
-__docformat__ = 'restructuredtext'

+ 0 - 362
python/grass/ctypes/ctypesgencore/printer/printer.py

@@ -1,362 +0,0 @@
-#!/usr/bin/env python3
-from __future__ import print_function
-
-
-import os
-import sys
-from . import test  # So we can find the path to local files in the printer package
-import time
-
-import ctypesgencore.libraryloader  # So we can get the path to it
-from ctypesgencore.ctypedescs import *
-from ctypesgencore.descriptions import *
-from ctypesgencore.messages import *
-
-
-def path_to_local_file(name, known_local_module=test):
-    basedir = os.path.dirname(known_local_module.__file__)
-    return os.path.join(basedir, name)
-
-
-class WrapperPrinter:
-
-    def __init__(self, outpath, options, data):
-        status_message("Writing to %s." % outpath)
-
-        self.file = open(outpath, "w")
-        self.options = options
-
-        if self.options.strip_build_path and \
-                self.options.strip_build_path[-1] != os.path.sep:
-            self.options.strip_build_path += os.path.sep
-
-        self.print_header()
-        print(file=self.file)
-
-        self.print_preamble()
-        print(file=self.file)
-
-        self.print_loader()
-        print(file=self.file)
-
-        self.print_group(self.options.libraries,
-                         "libraries", self.print_library)
-        self.print_group(self.options.modules, "modules", self.print_module)
-
-        method_table = {
-            'function': self.print_function,
-            'macro': self.print_macro,
-            'struct': self.print_struct,
-            'struct-body': self.print_struct_members,
-            'typedef': self.print_typedef,
-            'variable': self.print_variable,
-            'enum': self.print_enum,
-            'constant': self.print_constant
-        }
-
-        for kind, desc in data.output_order:
-            if desc.included:
-                method_table[kind](desc)
-                print(file=self.file)
-
-        self.print_group(self.options.inserted_files, "inserted files",
-                         self.insert_file)
-
-    def print_group(self, list, name, function):
-        if list:
-            print("# Begin %s" % name, file=self.file)
-            print(file=self.file)
-            for obj in list:
-                function(obj)
-            print(file=self.file)
-            print("# %d %s" % (len(list), name), file=self.file)
-            print("# End %s" % name, file=self.file)
-        else:
-            print("# No %s" % name, file=self.file)
-        print(file=self.file)
-
-    def srcinfo(self, src):
-        if src is None:
-            print(file=self.file)
-        else:
-            filename, lineno = src
-            if filename in ("<built-in>", "<command line>"):
-                print("# %s" % filename, file=self.file)
-            else:
-                if self.options.strip_build_path and \
-                        filename.startswith(self.options.strip_build_path):
-                    filename = filename[len(self.options.strip_build_path):]
-                print("# %s: %s" % (filename, lineno), file=self.file)
-
-    def template_subs(self):
-        template_subs = {
-            'date': time.ctime(),
-            'argv': ' '.join([x for x in sys.argv if not x.startswith("--strip-build-path")]),
-            'name': os.path.basename(self.options.headers[0])
-        }
-
-        for opt, value in self.options.__dict__.items():
-            if isinstance(value, str):
-                template_subs[opt] = value
-            elif isinstance(value, (list, tuple)):
-                template_subs[opt] = (os.path.sep).join(value)
-            else:
-                template_subs[opt] = repr(value)
-
-        return template_subs
-
-    def print_header(self):
-        template_file = None
-
-        if self.options.header_template:
-            path = self.options.header_template
-            try:
-                template_file = open(path, "r")
-            except IOError:
-                error_message("Cannot load header template from file \"%s\" "
-                              " - using default template." % path, cls='missing-file')
-
-        if not template_file:
-            path = path_to_local_file("defaultheader.py")
-            template_file = open(path, "r")
-
-        template_subs = self.template_subs()
-        self.file.write(template_file.read() % template_subs)
-
-        template_file.close()
-
-    def print_preamble(self):
-        path = path_to_local_file("preamble.py")
-
-        print("# Begin preamble", file=self.file)
-        print(file=self.file)
-        preamble_file = open(path, "r")
-        self.file.write(preamble_file.read())
-        preamble_file.close()
-        print(file=self.file)
-        print("# End preamble", file=self.file)
-
-    def print_loader(self):
-        print("_libs = {}", file=self.file)
-        print("_libdirs = %s" % self.options.compile_libdirs, file=self.file)
-        print(file=self.file)
-        print("# Begin loader", file=self.file)
-        print(file=self.file)
-        path = path_to_local_file("libraryloader.py",
-                                  ctypesgencore.libraryloader)
-        loader_file = open(path, "r")
-        self.file.write(loader_file.read())
-        loader_file.close()
-        print(file=self.file)
-        print("# End loader", file=self.file)
-        print(file=self.file)
-        print("add_library_search_dirs([%s])" %
-              ", ".join([repr(d) for d in self.options.runtime_libdirs]), file=self.file)
-
-    def print_library(self, library):
-        print('_libs["%s"] = load_library("%s")' % (library, library), file=self.file)
-
-    def print_module(self, module):
-        print('from %s import *' % name, file=self.file)
-
-    def print_constant(self, constant):
-        print('%s = %s' %
-              (constant.name, constant.value.py_string(False)), end=' ', file=self.file)
-        self.srcinfo(constant.src)
-
-    def print_typedef(self, typedef):
-        print('%s = %s' %
-              (typedef.name, typedef.ctype.py_string()), end=' ', file=self.file)
-        self.srcinfo(typedef.src)
-
-    def print_struct(self, struct):
-        self.srcinfo(struct.src)
-        base = {'union': 'Union', 'struct': 'Structure'}[struct.variety]
-        print('class %s_%s(%s):' %
-              (struct.variety, struct.tag, base), file=self.file)
-        print('    pass', file=self.file)
-
-    def print_struct_members(self, struct):
-        if struct.opaque:
-            return
-
-        # is this supposed to be packed?
-        if struct.packed:
-            print('{}_{}._pack_ = 1'.format(struct.variety, struct.tag),
-                  file=self.file)
-
-        # handle unnamed fields.
-        unnamed_fields = []
-        names = set([x[0] for x in struct.members])
-        anon_prefix = "unnamed_"
-        n = 1
-        for mi in range(len(struct.members)):
-            mem = list(struct.members[mi])
-            if mem[0] is None:
-                while True:
-                    name = "%s%i" % (anon_prefix, n)
-                    n += 1
-                    if name not in names:
-                        break
-                mem[0] = name
-                names.add(name)
-                if type(mem[1]) is CtypesStruct:
-                    unnamed_fields.append(name)
-                struct.members[mi] = mem
-
-        print('%s_%s.__slots__ = [' % (struct.variety, struct.tag), file=self.file)
-        for name, ctype in struct.members:
-            print("    '%s'," % name, file=self.file)
-        print(']', file=self.file)
-
-        if len(unnamed_fields) > 0:
-            print ('%s_%s._anonymous_ = [' % (struct.variety,
-                                              struct.tag), file=self.file)
-            for name in unnamed_fields:
-                print ("    '%s'," % name, file=self.file)
-            print (']', file=self.file)
-
-        print('%s_%s._fields_ = [' % (struct.variety, struct.tag), file=self.file)
-        for name, ctype in struct.members:
-            if isinstance(ctype, CtypesBitfield):
-                print("    ('%s', %s, %s)," %
-                      (name, ctype.py_string(), ctype.bitfield.py_string(False)), file=self.file)
-            else:
-                print("    ('%s', %s)," % (name, ctype.py_string()), file=self.file)
-        print(']', file=self.file)
-
-    def print_enum(self, enum):
-        print('enum_%s = c_int' % enum.tag, end=' ', file=self.file)
-        self.srcinfo(enum.src)
-        # Values of enumerator are output as constants.
-
-    def print_function(self, function):
-        if function.variadic:
-            self.print_variadic_function(function)
-        else:
-            self.print_fixed_function(function)
-
-    def print_fixed_function(self, function):
-        self.srcinfo(function.src)
-
-        # If we know what library the function lives in, look there.
-        # Otherwise, check all the libraries.
-        if function.source_library:
-            print("if hasattr(_libs[%r], %r):" %
-                  (function.source_library, function.c_name()), file=self.file)
-            print("    %s = _libs[%r].%s" %
-                  (function.py_name(), function.source_library, function.c_name()), file=self.file)
-        else:
-            print("for _lib in six.itervalues(_libs):", file=self.file)
-            print("    if not hasattr(_lib, %r):" % function.c_name(), file=self.file)
-            print("        continue", file=self.file)
-            print("    %s = _lib.%s" %
-                  (function.py_name(), function.c_name()), file=self.file)
-
-        # Argument types
-        print("    %s.argtypes = [%s]" % (function.py_name(),
-                                          ', '.join([a.py_string() for a in function.argtypes])), file=self.file)
-
-        # Return value
-        if function.restype.py_string() == "String":
-            print("    if sizeof(c_int) == sizeof(c_void_p):", file=self.file)
-            print("        %s.restype = ReturnString" %
-                  (function.py_name()), file=self.file)
-            print("    else:", file=self.file)
-            print("        %s.restype = %s" %
-                  (function.py_name(), function.restype.py_string()), file=self.file)
-            print("        %s.errcheck = ReturnString" %
-                  (function.py_name()), file=self.file)
-        else:
-            print("    %s.restype = %s" %
-                  (function.py_name(), function.restype.py_string()), file=self.file)
-            if function.errcheck:
-                print ("    %s.errcheck = %s" %
-                       (function.py_name(), function.errcheck.py_string()), file=self.file)
-
-        if not function.source_library:
-            print("    break", file=self.file)
-
-    def print_variadic_function(self, function):
-        self.srcinfo(function.src)
-        if function.source_library:
-            print("if hasattr(_libs[%r], %r):" %
-                  (function.source_library, function.c_name()), file=self.file)
-            print("    _func = _libs[%r].%s" %
-                  (function.source_library, function.c_name()), file=self.file)
-            print("    _restype = %s" % function.restype.py_string(), file=self.file)
-            print("    _errcheck = %s" % function.errcheck.py_string(), file=self.file)
-            print("    _argtypes = [%s]" %
-                  ', '.join([a.py_string() for a in function.argtypes]), file=self.file)
-            print("    %s = _variadic_function(_func,_restype,_argtypes,_errcheck)" %
-                  function.py_name(), file=self.file)
-        else:
-            print("for _lib in _libs.values():", file=self.file)
-            print("    if hasattr(_lib, %r):" % function.c_name(), file=self.file)
-            print("        _func = _lib.%s" %
-                  (function.c_name()), file=self.file)
-            print("        _restype = %s" % function.restype.py_string(), file=self.file)
-            print("        _errcheck = %s" % function.errcheck.py_string(), file=self.file)
-            print("        _argtypes = [%s]" %
-                  ', '.join([a.py_string() for a in function.argtypes]), file=self.file)
-            print("        %s = _variadic_function(_func,_restype,_argtypes,_errcheck)" %
-                  function.py_name(), file=self.file)
-
-    def print_variable(self, variable):
-        self.srcinfo(variable.src)
-        if variable.source_library:
-            print('try:', file=self.file)
-            print('    %s = (%s).in_dll(_libs[%r], %r)' %
-                  (variable.py_name(),
-                   variable.ctype.py_string(),
-                   variable.source_library,
-                   variable.c_name()), file=self.file)
-            print('except:', file=self.file)
-            print('    pass', file=self.file)
-        else:
-            print("for _lib in _libs.values():", file=self.file)
-            print('    try:', file=self.file)
-            print('        %s = (%s).in_dll(_lib, %r)' %
-                  (variable.py_name(),
-                   variable.ctype.py_string(),
-                   variable.c_name()), file=self.file)
-            print("        break", file=self.file)
-            print('    except:', file=self.file)
-            print('        pass', file=self.file)
-
-    def print_macro(self, macro):
-        if macro.params:
-            self.print_func_macro(macro)
-        else:
-            self.print_simple_macro(macro)
-
-    def print_simple_macro(self, macro):
-        # The macro translator makes heroic efforts but it occasionally fails.
-        # We want to contain the failures as much as possible.
-        # Hence the try statement.
-        self.srcinfo(macro.src)
-        print("try:", file=self.file)
-        print("    %s = %s" % (macro.name, macro.expr.py_string(True)), file=self.file)
-        print("except:", file=self.file)
-        print("    pass", file=self.file)
-
-    def print_func_macro(self, macro):
-        self.srcinfo(macro.src)
-        print("def %s(%s):" %
-              (macro.name, ", ".join(macro.params)), file=self.file)
-        print("    return %s" % macro.expr.py_string(True), file=self.file)
-
-    def insert_file(self, filename):
-        try:
-            inserted_file = open(filename, "r")
-        except IOError:
-            error_message("Cannot open file \"%s\". Skipped it." % filename,
-                          cls='missing-file')
-
-        print("# Begin \"%s\"" % filename, file=self.file)
-        print(file=self.file)
-        self.file.write(inserted_file.read())
-        print(file=self.file)
-        print("# End \"%s\"" % filename, file=self.file)
-
-        inserted_file.close()

+ 0 - 6
python/grass/ctypes/ctypesgencore/printer/test.py

@@ -1,6 +0,0 @@
-"""
-ctypesgencore.printer.printer imports this module so that it can find the path
-to defaulttemplate.py and defaultloader.py.
-"""
-
-pass

+ 0 - 7
python/grass/ctypes/fix.sed

@@ -1,7 +0,0 @@
-#!/usr/bin/sed -f
-/^# End loader$/a\
-from .ctypes_preamble import *\
-from .ctypes_preamble import _variadic_function\
-from .ctypes_loader import *
-/^# Begin preamble$/,/^# End preamble$/d
-/^# Begin loader$/,/^# End loader$/d

+ 0 - 270
python/grass/ctypes/loader.py

@@ -1,270 +0,0 @@
-# ----------------------------------------------------------------------------
-# Copyright (c) 2008 David James
-# Copyright (c) 2006-2008 Alex Holkner
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-#  * Redistributions of source code must retain the above copyright
-#    notice, this list of conditions and the following disclaimer.
-#  * Redistributions in binary form must reproduce the above copyright
-#    notice, this list of conditions and the following disclaimer in
-#    the documentation and/or other materials provided with the
-#    distribution.
-#  * Neither the name of pyglet nor the names of its
-#    contributors may be used to endorse or promote products
-#    derived from this software without specific prior written
-#    permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
-# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-# POSSIBILITY OF SUCH DAMAGE.
-# ----------------------------------------------------------------------------
-
-import glob
-import os.path
-import re
-import sys
-
-import ctypes
-import ctypes.util
-
-
-def _environ_path(name):
-    if name in os.environ:
-        return os.environ[name].split(":")
-    else:
-        return []
-
-
-class LibraryLoader(object):
-
-    def __init__(self):
-        self.other_dirs = []
-
-    def load_library(self, libname):
-        """Given the name of a library, load it."""
-        paths = self.getpaths(libname)
-
-        for path in paths:
-            if os.path.exists(path):
-                return self.load(path)
-
-        raise ImportError("%s not found." % libname)
-
-    def load(self, path):
-        """Given a path to a library, load it."""
-        try:
-            # Darwin requires dlopen to be called with mode RTLD_GLOBAL instead
-            # of the default RTLD_LOCAL.  Without this, you end up with
-            # libraries not being loadable, resulting in "Symbol not found"
-            # errors
-            if sys.platform == 'darwin':
-                return ctypes.CDLL(path, ctypes.RTLD_GLOBAL)
-            else:
-                return ctypes.cdll.LoadLibrary(path)
-        except OSError as e:
-            raise ImportError(e)
-
-    def getpaths(self, libname):
-        """Return a list of paths where the library might be found."""
-        if os.path.isabs(libname):
-            yield libname
-
-        else:
-            for path in self.getplatformpaths(libname):
-                yield path
-
-            path = ctypes.util.find_library(libname)
-            if path:
-                yield path
-
-    def getplatformpaths(self, libname):
-        return []
-
-# Darwin (Mac OS X)
-
-
-class DarwinLibraryLoader(LibraryLoader):
-    name_formats = ["lib%s.dylib", "lib%s.so", "lib%s.bundle", "%s.dylib",
-                    "%s.so", "%s.bundle", "%s"]
-
-    def getplatformpaths(self, libname):
-        if os.path.pathsep in libname:
-            names = [libname]
-        else:
-            names = [format % libname for format in self.name_formats]
-
-        for dir in self.getdirs(libname):
-            for name in names:
-                yield os.path.join(dir, name)
-
-    def getdirs(self, libname):
-        '''Implements the dylib search as specified in Apple documentation:
-
-        http://developer.apple.com/documentation/DeveloperTools/Conceptual/
-            DynamicLibraries/Articles/DynamicLibraryUsageGuidelines.html
-
-        Before commencing the standard search, the method first checks
-        the bundle's ``Frameworks`` directory if the application is running
-        within a bundle (OS X .app).
-        '''
-
-        dyld_fallback_library_path = _environ_path("DYLD_FALLBACK_LIBRARY_PATH")
-        if not dyld_fallback_library_path:
-            dyld_fallback_library_path = [os.path.expanduser('~/lib'),
-                                          '/usr/local/lib', '/usr/lib',
-                                          os.path.join(sys.prefix, 'lib')]
-        dyld_fallback_library_path.extend(_environ_path('LD_RUN_PATH'))
-
-        dirs = []
-
-        if '/' in libname:
-            dirs.extend(_environ_path("DYLD_LIBRARY_PATH"))
-        else:
-            dirs.extend(_environ_path("LD_LIBRARY_PATH"))
-            dirs.extend(_environ_path("DYLD_LIBRARY_PATH"))
-
-        dirs.extend(self.other_dirs)
-        dirs.append(".")
-
-        if hasattr(sys, 'frozen') and sys.frozen == 'macosx_app':
-            dirs.append(os.path.join(
-                os.environ['RESOURCEPATH'],
-                '..',
-                'Frameworks'))
-
-        dirs.extend(dyld_fallback_library_path)
-
-        return dirs
-
-# Posix
-
-
-class PosixLibraryLoader(LibraryLoader):
-    _ld_so_cache = None
-
-    def _create_ld_so_cache(self):
-        # Recreate search path followed by ld.so.  This is going to be
-        # slow to build, and incorrect (ld.so uses ld.so.cache, which may
-        # not be up-to-date).  Used only as fallback for distros without
-        # /sbin/ldconfig.
-        #
-        # We assume the DT_RPATH and DT_RUNPATH binary sections are omitted.
-
-        directories = []
-        for name in ("LD_LIBRARY_PATH",
-                     "SHLIB_PATH",  # HPUX
-                     "LIBPATH",  # OS/2, AIX
-                     "LIBRARY_PATH",  # BE/OS
-                     ):
-            if name in os.environ:
-                directories.extend(os.environ[name].split(os.pathsep))
-        directories.extend(self.other_dirs)
-        directories.append(".")
-
-        try:
-            directories.extend([dir.strip() for dir in open('/etc/ld.so.conf')])
-        except IOError:
-            pass
-
-        directories.extend(['/lib', '/usr/lib', '/lib64', '/usr/lib64'])
-
-        cache = {}
-        lib_re = re.compile(r'lib(.*)\.s[ol]')
-        ext_re = re.compile(r'\.s[ol]$')
-        for dir in directories:
-            try:
-                for path in glob.glob("%s/*.s[ol]*" % dir):
-                    file = os.path.basename(path)
-
-                    # Index by filename
-                    if file not in cache:
-                        cache[file] = path
-
-                    # Index by library name
-                    match = lib_re.match(file)
-                    if match:
-                        library = match.group(1)
-                        if library not in cache:
-                            cache[library] = path
-            except OSError:
-                pass
-
-        self._ld_so_cache = cache
-
-    def getplatformpaths(self, libname):
-        if self._ld_so_cache is None:
-            self._create_ld_so_cache()
-
-        result = self._ld_so_cache.get(libname)
-        if result:
-            yield result
-
-        path = ctypes.util.find_library(libname)
-        if path:
-            yield os.path.join("/lib", path)
-
-# Windows
-
-
-class _WindowsLibrary(object):
-
-    def __init__(self, path):
-        self.cdll = ctypes.cdll.LoadLibrary(path)
-        self.windll = ctypes.windll.LoadLibrary(path)
-
-    def __getattr__(self, name):
-        try:
-            return getattr(self.cdll, name)
-        except AttributeError:
-            try:
-                return getattr(self.windll, name)
-            except AttributeError:
-                raise
-
-
-class WindowsLibraryLoader(LibraryLoader):
-    name_formats = ["%s.dll", "lib%s.dll"]
-
-    def load(self, path):
-        return _WindowsLibrary(path)
-
-    def getplatformpaths(self, libname):
-        if os.path.sep not in libname:
-            for name in self.name_formats:
-                path = ctypes.util.find_library(name % libname)
-                if path:
-                    yield path
-
-# Platform switching
-
-# If your value of sys.platform does not appear in this dict, please contact
-# the Ctypesgen maintainers.
-
-loaderclass = {
-    "darwin": DarwinLibraryLoader,
-    "cygwin": WindowsLibraryLoader,
-    "win32": WindowsLibraryLoader
-}
-
-loader = loaderclass.get(sys.platform, PosixLibraryLoader)()
-
-
-def add_library_search_dirs(other_dirs):
-    loader.other_dirs = other_dirs
-
-load_library = loader.load_library
-
-del loaderclass

+ 12 - 0
python/grass/ctypes/run.py

@@ -0,0 +1,12 @@
+#!/usr/bin/env python3
+
+import sys, os
+
+THIS_DIR = os.path.dirname(__file__)
+# ensure that we can load the ctypesgen library
+sys.path.insert(0, THIS_DIR)
+
+import ctypesgen.main
+
+if __name__ == "__main__":
+    ctypesgen.main.main()

+ 6 - 6
python/grass/pygrass/vector/table.py

@@ -33,7 +33,7 @@ from grass.script.db import db_table_in_vector
 from grass.script.core import warning
 
 from grass.pygrass.vector import sql
-from grass.lib.ctypes_preamble import String
+from grass.lib.ctypes_preamble import ReturnString
 
 
 if sys.version_info.major >= 3:
@@ -678,7 +678,7 @@ class Link(object):
         return decode(self.c_fieldinfo.contents.name)
 
     def _set_name(self, name):
-        self.c_fieldinfo.contents.name = String(name)
+        self.c_fieldinfo.contents.name = ReturnString(name)
 
     name = property(fget=_get_name, fset=_set_name, doc="Set and obtain name vale")
 
@@ -686,7 +686,7 @@ class Link(object):
         return decode(self.c_fieldinfo.contents.table)
 
     def _set_table(self, new_name):
-        self.c_fieldinfo.contents.table = String(new_name)
+        self.c_fieldinfo.contents.table = ReturnString(new_name)
 
     table_name = property(
         fget=_get_table, fset=_set_table, doc="Set and obtain table name value"
@@ -696,7 +696,7 @@ class Link(object):
         return decode(self.c_fieldinfo.contents.key)
 
     def _set_key(self, key):
-        self.c_fieldinfo.contents.key = String(key)
+        self.c_fieldinfo.contents.key = ReturnString(key)
 
     key = property(fget=_get_key, fset=_set_key, doc="Set and obtain cat value")
 
@@ -704,7 +704,7 @@ class Link(object):
         return decode(self.c_fieldinfo.contents.database)
 
     def _set_database(self, database):
-        self.c_fieldinfo.contents.database = String(database)
+        self.c_fieldinfo.contents.database = ReturnString(database)
 
     database = property(
         fget=_get_database, fset=_set_database, doc="Set and obtain database value"
@@ -717,7 +717,7 @@ class Link(object):
         if driver not in ("sqlite", "pg"):
             str_err = "Driver not supported, use: %s." % ", ".join(DRIVERS)
             raise TypeError(str_err)
-        self.c_fieldinfo.contents.driver = String(driver)
+        self.c_fieldinfo.contents.driver = ReturnString(driver)
 
     driver = property(
         fget=_get_driver,