mkhtml.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429
  1. #!/usr/bin/env python3
  2. ############################################################################
  3. #
  4. # MODULE: Builds manual pages
  5. # AUTHOR(S): Markus Neteler
  6. # Glynn Clements
  7. # Martin Landa <landa.martin gmail.com>
  8. # PURPOSE: Create HTML manual page snippets
  9. # COPYRIGHT: (C) 2007-2017 by Glynn Clements
  10. # and the GRASS Development Team
  11. #
  12. # This program is free software under the GNU General
  13. # Public License (>=v2). Read the file COPYING that
  14. # comes with GRASS for details.
  15. #
  16. #############################################################################
  17. import sys
  18. import os
  19. import string
  20. import re
  21. from datetime import datetime
  22. import locale
  23. import json
  24. try:
  25. # Python 2 import
  26. from HTMLParser import HTMLParser
  27. except:
  28. # Python 3 import
  29. from html.parser import HTMLParser
  30. try:
  31. import urlparse
  32. except:
  33. import urllib.parse as urlparse
  34. if sys.version_info[0] == 2:
  35. PY2 = True
  36. else:
  37. PY2 = False
  38. if not PY2:
  39. unicode = str
  40. def _get_encoding():
  41. encoding = locale.getdefaultlocale()[1]
  42. if not encoding:
  43. encoding = 'UTF-8'
  44. return encoding
  45. def decode(bytes_):
  46. """Decode bytes with default locale and return (unicode) string
  47. No-op if parameter is not bytes (assumed unicode string).
  48. :param bytes bytes_: the bytes to decode
  49. """
  50. if isinstance(bytes_, unicode):
  51. return bytes_
  52. if isinstance(bytes_, bytes):
  53. enc = _get_encoding()
  54. return bytes_.decode(enc)
  55. return unicode(bytes_)
  56. pgm = sys.argv[1]
  57. src_file = "%s.html" % pgm
  58. tmp_file = "%s.tmp.html" % pgm
  59. trunk_url = "https://github.com/OSGeo/grass/tree/master/"
  60. addons_url = "https://github.com/OSGeo/grass-addons/tree/master/"
  61. header_base = """<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
  62. <html>
  63. <head>
  64. <title>GRASS GIS Manual: ${PGM}</title>
  65. <meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">
  66. <link rel="stylesheet" href="grassdocs.css" type="text/css">
  67. </head>
  68. <body bgcolor="white">
  69. <div id="container">
  70. <a href="index.html"><img src="grass_logo.png" alt="GRASS logo"></a>
  71. <hr class="header">
  72. """
  73. header_nopgm = """<h2>${PGM}</h2>
  74. """
  75. header_pgm = """<h2>NAME</h2>
  76. <em><b>${PGM}</b></em>
  77. """
  78. header_pgm_desc = """<h2>NAME</h2>
  79. <em><b>${PGM}</b></em> - ${PGM_DESC}
  80. """
  81. sourcecode = string.Template(
  82. """<h2>SOURCE CODE</h2>
  83. <p>Available at: <a href="${URL_SOURCE}">${PGM} source code</a> (<a href="${URL_LOG}">history</a>)</p>
  84. """
  85. )
  86. footer_index = string.Template(
  87. """<hr class="header">
  88. <p>
  89. <a href="index.html">Main index</a> |
  90. <a href="${INDEXNAME}.html">${INDEXNAMECAP} index</a> |
  91. <a href="topics.html">Topics index</a> |
  92. <a href="keywords.html">Keywords index</a> |
  93. <a href="graphical_index.html">Graphical index</a> |
  94. <a href="full_index.html">Full index</a>
  95. </p>
  96. <p>
  97. &copy; 2003-${YEAR}
  98. <a href="http://grass.osgeo.org">GRASS Development Team</a>,
  99. GRASS GIS ${GRASS_VERSION} Reference Manual
  100. </p>
  101. </div>
  102. </body>
  103. </html>
  104. """)
  105. footer_noindex = string.Template(
  106. """<hr class="header">
  107. <p>
  108. <a href="index.html">Main index</a> |
  109. <a href="topics.html">Topics index</a> |
  110. <a href="keywords.html">Keywords index</a> |
  111. <a href="graphical_index.html">Graphical index</a> |
  112. <a href="full_index.html">Full index</a>
  113. </p>
  114. <p>
  115. &copy; 2003-${YEAR}
  116. <a href="http://grass.osgeo.org">GRASS Development Team</a>,
  117. GRASS GIS ${GRASS_VERSION} Reference Manual
  118. </p>
  119. </div>
  120. </body>
  121. </html>
  122. """)
  123. def read_file(name):
  124. try:
  125. f = open(name, 'rb')
  126. s = f.read()
  127. f.close()
  128. if PY2:
  129. return s
  130. else:
  131. return decode(s)
  132. except IOError:
  133. return ""
  134. def create_toc(src_data):
  135. class MyHTMLParser(HTMLParser):
  136. def __init__(self):
  137. HTMLParser.__init__(self)
  138. self.reset()
  139. self.idx = 1
  140. self.tag_curr = ''
  141. self.tag_last = ''
  142. self.process_text = False
  143. self.data = []
  144. self.tags_allowed = ('h1', 'h2', 'h3')
  145. self.tags_ignored = ('img')
  146. self.text = ''
  147. def handle_starttag(self, tag, attrs):
  148. if tag in self.tags_allowed:
  149. self.process_text = True
  150. self.tag_last = self.tag_curr
  151. self.tag_curr = tag
  152. def handle_endtag(self, tag):
  153. if tag in self.tags_allowed:
  154. self.data.append((tag, '%s_%d' % (tag, self.idx),
  155. self.text))
  156. self.idx += 1
  157. self.process_text = False
  158. self.text = ''
  159. self.tag_curr = self.tag_last
  160. def handle_data(self, data):
  161. if not self.process_text:
  162. return
  163. if self.tag_curr in self.tags_allowed or self.tag_curr in self.tags_ignored:
  164. self.text += data
  165. else:
  166. self.text += '<%s>%s</%s>' % (self.tag_curr, data, self.tag_curr)
  167. # instantiate the parser and fed it some HTML
  168. parser = MyHTMLParser()
  169. parser.feed(src_data)
  170. return parser.data
  171. def escape_href(label):
  172. # remove html tags
  173. label = re.sub('<[^<]+?>', '', label)
  174. # fix &nbsp;
  175. label = label.replace('&nbsp;', '')
  176. # fix "
  177. label = label.replace('"', '')
  178. # replace space with underscore + lower
  179. return label.replace(' ', '-').lower()
  180. def write_toc(data):
  181. if not data:
  182. return
  183. fd = sys.stdout
  184. fd.write('<div class="toc">\n')
  185. fd.write('<h4 class="toc">Table of contents</h4>\n')
  186. fd.write('<ul class="toc">\n')
  187. first = True
  188. has_h2 = False
  189. in_h3 = False
  190. indent = 4
  191. for tag, href, text in data:
  192. if tag == 'h3' and not in_h3 and has_h2:
  193. fd.write('\n%s<ul class="toc">\n' % (' ' * indent))
  194. indent += 4
  195. in_h3 = True
  196. elif not first:
  197. fd.write('</li>\n')
  198. if tag == 'h2':
  199. has_h2 = True
  200. if in_h3:
  201. indent -= 4
  202. fd.write('%s</ul></li>\n' % (' ' * indent))
  203. in_h3 = False
  204. text = text.replace(u'\xa0', u' ')
  205. fd.write('%s<li class="toc"><a href="#%s" class="toc">%s</a>' % \
  206. (' ' * indent, escape_href(text), text))
  207. first = False
  208. fd.write('</li>\n</ul>\n')
  209. fd.write('</div>\n')
  210. def update_toc(data):
  211. ret_data = []
  212. pat = re.compile(r'(<(h[2|3])>)(.+)(</h[2|3]>)')
  213. idx = 1
  214. for line in data.splitlines():
  215. if pat.search(line):
  216. xline = pat.split(line)
  217. line = xline[1] + '<a name="%s">' % escape_href(xline[3]) + xline[3] + '</a>' + xline[4]
  218. idx += 1
  219. ret_data.append(line)
  220. return '\n'.join(ret_data)
  221. def get_addon_path(pgm):
  222. """Check if pgm is in addons list and get addon path
  223. :param pgm str: pgm
  224. :return tuple: (True, path) if pgm is addon else (None, None)
  225. """
  226. addon_base = os.getenv('GRASS_ADDON_BASE')
  227. if addon_base:
  228. """'addons_paths.json' is file created during install extension
  229. check get_addons_paths() function in the g.extension.py file
  230. """
  231. addons_paths = os.path.join(addon_base, 'addons_paths.json')
  232. if os.path.exists(addons_paths):
  233. with open(addons_paths, 'r') as f:
  234. addons_paths = json.load(f)
  235. for addon in addons_paths['tree']:
  236. split_path = addon['path'].split('/')
  237. root_dir, module_dir = split_path[0], split_path[-1]
  238. if 'grass7' == root_dir and pgm == module_dir:
  239. return True, addon['path']
  240. return None, None
  241. # process header
  242. src_data = read_file(src_file)
  243. name = re.search('(<!-- meta page name:)(.*)(-->)', src_data, re.IGNORECASE)
  244. pgm_desc = None
  245. if name:
  246. pgm = name.group(2).strip().split('-', 1)[0].strip()
  247. name_desc = re.search('(<!-- meta page name description:)(.*)(-->)', src_data, re.IGNORECASE)
  248. if name_desc:
  249. pgm_desc = name_desc.group(2).strip()
  250. desc = re.search('(<!-- meta page description:)(.*)(-->)', src_data,
  251. re.IGNORECASE)
  252. if desc:
  253. pgm = desc.group(2).strip()
  254. header_tmpl = string.Template(header_base + header_nopgm)
  255. else:
  256. if not pgm_desc:
  257. header_tmpl = string.Template(header_base + header_pgm)
  258. else:
  259. header_tmpl = string.Template(header_base + header_pgm_desc)
  260. if not re.search('<html>', src_data, re.IGNORECASE):
  261. tmp_data = read_file(tmp_file)
  262. if not re.search('<html>', tmp_data, re.IGNORECASE):
  263. sys.stdout.write(header_tmpl.substitute(PGM=pgm, PGM_DESC=pgm_desc))
  264. if tmp_data:
  265. for line in tmp_data.splitlines(True):
  266. if not re.search('</body>|</html>', line, re.IGNORECASE):
  267. sys.stdout.write(line)
  268. # create TOC
  269. write_toc(create_toc(src_data))
  270. # process body
  271. sys.stdout.write(update_toc(src_data))
  272. # if </html> is found, suppose a complete html is provided.
  273. # otherwise, generate module class reference:
  274. if re.search('</html>', src_data, re.IGNORECASE):
  275. sys.exit()
  276. index_names = {
  277. 'd' : 'display',
  278. 'db': 'database',
  279. 'g' : 'general',
  280. 'i' : 'imagery',
  281. 'm' : 'miscellaneous',
  282. 'ps': 'postscript',
  283. 'p' : 'paint',
  284. 'r' : 'raster',
  285. 'r3': 'raster3d',
  286. 's' : 'sites',
  287. 't' : 'temporal',
  288. 'v' : 'vector'
  289. }
  290. def to_title(name):
  291. """Convert name of command class/family to form suitable for title"""
  292. if name == 'raster3d':
  293. return '3D raster'
  294. elif name == 'postscript':
  295. return 'PostScript'
  296. else:
  297. return name.capitalize()
  298. index_titles = {}
  299. for key, name in index_names.items():
  300. index_titles[key] = to_title(name)
  301. # process footer
  302. index = re.search('(<!-- meta page index:)(.*)(-->)', src_data, re.IGNORECASE)
  303. if index:
  304. index_name = index.group(2).strip()
  305. if '|' in index_name:
  306. index_name, index_name_cap = index_name.split('|', 1)
  307. else:
  308. index_name_cap = to_title(index_name)
  309. else:
  310. mod_class = pgm.split('.', 1)[0]
  311. index_name = index_names.get(mod_class, '')
  312. index_name_cap = index_titles.get(mod_class, '')
  313. grass_version = os.getenv("VERSION_NUMBER", "unknown")
  314. year = os.getenv("VERSION_DATE")
  315. if not year:
  316. year = str(datetime.now().year)
  317. # check the names of scripts to assign the right folder
  318. topdir = os.path.abspath(os.getenv("MODULE_TOPDIR"))
  319. curdir = os.path.abspath(os.path.curdir)
  320. if curdir.startswith(topdir):
  321. source_url = trunk_url
  322. pgmdir = curdir.replace(topdir, '').lstrip(os.path.sep)
  323. else:
  324. # addons
  325. source_url = addons_url
  326. pgmdir = os.path.sep.join(curdir.split(os.path.sep)[-3:])
  327. url_source = ''
  328. if os.getenv('SOURCE_URL', ''):
  329. # addons
  330. for prefix in index_names.keys():
  331. cwd = os.getcwd()
  332. idx = cwd.find('{0}{1}.'.format(os.path.sep, prefix))
  333. if idx > -1:
  334. pgmname = cwd[idx+1:]
  335. classname = index_names[prefix]
  336. url_source = urlparse.urljoin('{0}{1}/'.format(
  337. os.environ['SOURCE_URL'], classname),
  338. pgmname
  339. )
  340. break
  341. else:
  342. url_source = urlparse.urljoin(source_url, pgmdir)
  343. if sys.platform == 'win32':
  344. url_source = url_source.replace(os.path.sep, '/')
  345. if index_name:
  346. tree = 'grass/tree'
  347. commits = 'grass/commits'
  348. is_addon, addon_path = get_addon_path(pgm=pgm)
  349. if is_addon:
  350. # Fix gui/wxpython addon url path
  351. url_source = urlparse.urljoin(
  352. os.environ['SOURCE_URL'], addon_path.split('/', 1)[1],
  353. )
  354. tree = 'grass-addons/tree'
  355. commits = 'grass-addons/commits'
  356. sys.stdout.write(sourcecode.substitute(
  357. URL_SOURCE=url_source, PGM=pgm, URL_LOG=url_source.replace(
  358. tree, commits)))
  359. sys.stdout.write(footer_index.substitute(INDEXNAME=index_name,
  360. INDEXNAMECAP=index_name_cap,
  361. YEAR=year,
  362. GRASS_VERSION=grass_version))
  363. else:
  364. sys.stdout.write(footer_noindex.substitute(YEAR=year,
  365. GRASS_VERSION=grass_version))