From 05289f504d8418d8015b57d131be7fec2c2c5760 Mon Sep 17 00:00:00 2001 From: Jeremy Mack Wright Date: Tue, 12 Jun 2018 11:44:45 -0400 Subject: [PATCH] Got module working with FreeCAD 0.17 running Python 3+ --- CQGui/Command.py | 7 +- Libs/docutils/__init__.py | 75 +- Libs/docutils/core.py | 12 +- Libs/docutils/docutils.conf | 5 + Libs/docutils/frontend.py | 63 +- Libs/docutils/io.py | 35 +- Libs/docutils/languages/fa.py | 61 + Libs/docutils/languages/lt.py | 67 +- Libs/docutils/languages/lv.py | 60 + Libs/docutils/languages/sv.py | 15 +- Libs/docutils/nodes.py | 20 +- Libs/docutils/parsers/rst/__init__.py | 33 +- .../parsers/rst/directives/__init__.py | 20 +- Libs/docutils/parsers/rst/directives/misc.py | 22 +- .../docutils/parsers/rst/directives/tables.py | 64 +- Libs/docutils/parsers/rst/languages/de.py | 28 +- Libs/docutils/parsers/rst/languages/fa.py | 102 + Libs/docutils/parsers/rst/languages/lv.py | 108 ++ Libs/docutils/parsers/rst/languages/sv.py | 107 +- Libs/docutils/parsers/rst/roles.py | 7 +- Libs/docutils/parsers/rst/states.py | 299 +-- Libs/docutils/parsers/rst/tableparser.py | 4 +- Libs/docutils/transforms/frontmatter.py | 6 +- Libs/docutils/transforms/peps.py | 8 +- Libs/docutils/transforms/references.py | 16 +- Libs/docutils/transforms/universal.py | 43 +- Libs/docutils/transforms/writer_aux.py | 2 +- Libs/docutils/utils/__init__.py | 74 +- Libs/docutils/utils/code_analyzer.py | 4 +- Libs/docutils/utils/error_reporting.py | 30 +- Libs/docutils/utils/math/__init__.py | 7 +- Libs/docutils/utils/math/latex2mathml.py | 17 +- Libs/docutils/utils/math/math2html.py | 406 ++-- Libs/docutils/utils/math/tex2mathml_extern.py | 147 ++ Libs/docutils/utils/punctuation_chars.py | 331 +--- Libs/docutils/utils/smartquotes.py | 504 +++-- Libs/docutils/utils/urischemes.py | 4 +- Libs/docutils/writers/__init__.py | 13 +- Libs/docutils/writers/_html_base.py | 1670 +++++++++++++++++ Libs/docutils/writers/docutils_xml.py | 22 +- Libs/docutils/writers/html4css1/__init__.py | 1215 ++---------- Libs/docutils/writers/html4css1/html4css1.css | 28 +- .../writers/html5_polyglot/__init__.py | 214 +++ .../{html4css1 => html5_polyglot}/math.css | 0 .../writers/html5_polyglot/minimal.css | 260 +++ .../docutils/writers/html5_polyglot/plain.css | 288 +++ .../writers/html5_polyglot/template.txt | 8 + Libs/docutils/writers/latex2e/__init__.py | 791 ++++---- Libs/docutils/writers/latex2e/default.tex | 1 - .../writers/latex2e/docutils-05-compat.sty | 738 ++++++++ Libs/docutils/writers/latex2e/xelatex.tex | 7 +- Libs/docutils/writers/manpage.py | 14 +- Libs/docutils/writers/odf_odt/__init__.py | 337 +++- Libs/docutils/writers/pep_html/pep.css | 2 +- Libs/docutils/writers/xetex/__init__.py | 30 +- Libs/pyqode/core/panels/folding.py | 5 +- Libs/pyqode/qt/__init__.py | 2 +- 57 files changed, 5924 insertions(+), 2534 deletions(-) create mode 100644 Libs/docutils/docutils.conf create mode 100644 Libs/docutils/languages/fa.py create mode 100644 Libs/docutils/languages/lv.py create mode 100644 Libs/docutils/parsers/rst/languages/fa.py create mode 100644 Libs/docutils/parsers/rst/languages/lv.py create mode 100644 Libs/docutils/utils/math/tex2mathml_extern.py create mode 100644 Libs/docutils/writers/_html_base.py create mode 100644 Libs/docutils/writers/html5_polyglot/__init__.py rename Libs/docutils/writers/{html4css1 => html5_polyglot}/math.css (100%) create mode 100644 Libs/docutils/writers/html5_polyglot/minimal.css create mode 100644 Libs/docutils/writers/html5_polyglot/plain.css create mode 100644 Libs/docutils/writers/html5_polyglot/template.txt create mode 100644 Libs/docutils/writers/latex2e/docutils-05-compat.sty diff --git a/CQGui/Command.py b/CQGui/Command.py index f644466..fab7bda 100644 --- a/CQGui/Command.py +++ b/CQGui/Command.py @@ -1,7 +1,6 @@ # -*- coding: utf-8 -*- """Adds all of the commands that are used for the menus of the CadQuery module""" -# (c) 2014-2016 Jeremy Wright Apache 2.0 License - +# (c) 2014-2018 Jeremy Wright Apache 2.0 License import imp, os, sys, tempfile import FreeCAD, FreeCADGui from PySide import QtGui, QtCore @@ -160,7 +159,7 @@ class CadQueryExecuteScript: scriptText = cqCodePane.toPlainText().encode('utf-8') # Check to see if we are executig a CQGI compliant script - if "show_object(" in scriptText or "debug(" in scriptText: + if b"show_object(" in scriptText or b"debug(" in scriptText: FreeCAD.Console.PrintMessage("Executing CQGI-compliant script.\r\n") # A repreentation of the CQ script with all the metadata attached @@ -451,7 +450,7 @@ class CadQueryValidateScript: scriptText = cqCodePane.toPlainText().encode('utf-8') - if ("show_object(" not in scriptText and "# show_object(" in scriptText and "#show_boject(" in scriptText) or ("debug(" not in scriptText and "# debug(" in scriptText and "#debug(" in scriptText): + if (b"show_object(" not in scriptText) and (b"debug(" not in scriptText): FreeCAD.Console.PrintError("Script did not call show_object or debug, no output available. Script must be CQGI compliant to get build output, variable editing and validation.\r\n") return diff --git a/Libs/docutils/__init__.py b/Libs/docutils/__init__.py index 270b883..bc43075 100644 --- a/Libs/docutils/__init__.py +++ b/Libs/docutils/__init__.py @@ -1,4 +1,4 @@ -# $Id: __init__.py 7756 2014-07-06 11:48:05Z grubert $ +# $Id: __init__.py 8147 2017-08-03 09:01:16Z grubert $ # Author: David Goodger # Copyright: This module has been placed in the public domain. @@ -50,22 +50,66 @@ Subpackages: - writers: Format-specific output translators. """ -__docformat__ = 'reStructuredText' - -__version__ = '0.12' -"""``major.minor.micro`` version number. The micro number is bumped for API -changes, for new functionality, and for interim project releases. The minor -number is bumped whenever there is a significant project release. The major -number will be bumped when the project is feature-complete, and perhaps if -there is a major change in the design.""" - -__version_details__ = 'release' -"""Extra version details (e.g. 'snapshot 2005-05-29, r3410', 'repository', -'release'), modified automatically & manually.""" - import sys -class ApplicationError(StandardError): + +__docformat__ = 'reStructuredText' + +__version__ = '0.14' +"""Docutils version identifier (complies with PEP 440):: + + major.minor[.micro][releaselevel[serial]][.dev] + +* The major number will be bumped when the project is feature-complete, and + later if there is a major change in the design or API. +* The minor number is bumped whenever there are new features. +* The micro number is bumped for bug-fix releases. Omitted if micro=0. +* The releaselevel identifier is used for pre-releases, one of 'a' (alpha), + 'b' (beta), or 'rc' (release candidate). Omitted for final releases. +* The serial release number identifies prereleases; omitted if 0. +* The '.dev' suffix indicates active development, not a release, before the + version indicated. + +For version comparison operations, use `__version_info__` +rather than parsing the text of `__version__`. +""" + +# workaround for Python < 2.6: +__version_info__ = (0, 14, 0, 'final', 0, True) +# To add in Docutils 0.15, replacing the line above: +""" +from collections import namedtuple +VersionInfo = namedtuple( + 'VersionInfo', 'major minor micro releaselevel serial release') +__version_info__ = VersionInfo( + major=0, + minor=15, + micro=0, + releaselevel='alpha', # development status: + # one of 'alpha', 'beta', 'candidate', 'final' + serial=0, # pre-release number (0 for final releases) + release=False # True for official releases and pre-releases + ) + +Comprehensive version information tuple. Can be used to test for a +minimally required version, e.g. :: + + if __version_info__ >= (0, 13, 0, 'candidate', 2, True) + +or in a self-documenting way like :: + + if __version_info__ >= docutils.VersionInfo( + major=0, minor=13, micro=0, + releaselevel='candidate', serial=2, release=True) +""" + +__version_details__ = '' +"""Optional extra version details (e.g. 'snapshot 2005-05-29, r3410'). +(For development and release status see `__version_info__`.) +""" + + +class ApplicationError(Exception): # Workaround: # In Python < 2.6, unicode() calls `str` on the # arg and therefore, e.g., unicode(StandardError(u'\u234')) fails @@ -74,6 +118,7 @@ class ApplicationError(StandardError): def __unicode__(self): return u', '.join(self.args) + class DataError(ApplicationError): pass diff --git a/Libs/docutils/core.py b/Libs/docutils/core.py index e3f4c9b..9689329 100644 --- a/Libs/docutils/core.py +++ b/Libs/docutils/core.py @@ -1,4 +1,4 @@ -# $Id: core.py 7466 2012-06-25 14:56:51Z milde $ +# $Id: core.py 8126 2017-06-23 09:34:28Z milde $ # Author: David Goodger # Copyright: This module has been placed in the public domain. @@ -218,10 +218,10 @@ class Publisher: self.apply_transforms() output = self.writer.write(self.document, self.destination) self.writer.assemble_parts() - except SystemExit, error: + except SystemExit as error: exit = 1 exit_status = error.code - except Exception, error: + except Exception as error: if not self.settings: # exception too early to report nicely raise if self.settings.traceback: # Propagate exceptions? @@ -279,9 +279,11 @@ class Publisher: print >>self._stderr, ("""\ Exiting due to error. Use "--traceback" to diagnose. Please report errors to . -Include "--traceback" output, Docutils version (%s [%s]), +Include "--traceback" output, Docutils version (%s%s), Python version (%s), your OS type & version, and the -command line used.""" % (__version__, __version_details__, +command line used.""" % (__version__, + docutils.__version_details__ and + ' [%s]'%docutils.__version_details__ or '', sys.version.split()[0])) def report_SystemMessage(self, error): diff --git a/Libs/docutils/docutils.conf b/Libs/docutils/docutils.conf new file mode 100644 index 0000000..cdce8d6 --- /dev/null +++ b/Libs/docutils/docutils.conf @@ -0,0 +1,5 @@ +# This configuration file is to prevent tools/buildhtml.py from +# processing text files in and below this directory. + +[buildhtml application] +prune: . diff --git a/Libs/docutils/frontend.py b/Libs/docutils/frontend.py index f837c62..307a0db 100644 --- a/Libs/docutils/frontend.py +++ b/Libs/docutils/frontend.py @@ -1,4 +1,4 @@ -# $Id: frontend.py 7584 2013-01-01 20:00:21Z milde $ +# $Id: frontend.py 8126 2017-06-23 09:34:28Z milde $ # Author: David Goodger # Copyright: This module has been placed in the public domain. @@ -33,14 +33,18 @@ import os import os.path import sys import warnings -import ConfigParser as CP +try: + import ConfigParser as CP +except: + import configparser as CP import codecs import optparse from optparse import SUPPRESS_HELP import docutils import docutils.utils import docutils.nodes -from docutils.utils.error_reporting import locale_encoding, ErrorOutput, ErrorString +from docutils.utils.error_reporting import (locale_encoding, SafeString, + ErrorOutput, ErrorString) def store_multiple(option, opt, value, parser, *args, **kwargs): @@ -61,7 +65,7 @@ def read_config_file(option, opt, value, parser): """ try: new_settings = parser.get_config_file_settings(value) - except ValueError, error: + except ValueError as error: parser.error(error) parser.values.update(new_settings, parser) @@ -205,10 +209,45 @@ def validate_strip_class(setting, value, option_parser, for cls in value: normalized = docutils.nodes.make_id(cls) if cls != normalized: - raise ValueError('invalid class value %r (perhaps %r?)' + raise ValueError('Invalid class value %r (perhaps %r?)' % (cls, normalized)) return value +def validate_smartquotes_locales(setting, value, option_parser, + config_parser=None, config_section=None): + """Check/normalize a comma separated list of smart quote definitions. + + Return a list of (language-tag, quotes) string tuples.""" + + # value is a comma separated string list: + value = validate_comma_separated_list(setting, value, option_parser, + config_parser, config_section) + # validate list elements + lc_quotes = [] + for item in value: + try: + lang, quotes = item.split(':', 1) + except AttributeError: + # this function is called for every option added to `value` + # -> ignore if already a tuple: + lc_quotes.append(item) + continue + except ValueError: + raise ValueError(u'Invalid value "%s".' + ' Format is ":".' + % item.encode('ascii', 'backslashreplace')) + # parse colon separated string list: + quotes = quotes.strip() + multichar_quotes = quotes.split(':') + if len(multichar_quotes) == 4: + quotes = multichar_quotes + elif len(quotes) != 4: + raise ValueError('Invalid value "%s". Please specify 4 quotes\n' + ' (primary open/close; secondary open/close).' + % item.encode('ascii', 'backslashreplace')) + lc_quotes.append((lang,quotes)) + return lc_quotes + def make_paths_absolute(pathdict, keys, base_path=None): """ Interpret filesystem path settings relative to the `base_path` given. @@ -310,7 +349,7 @@ class Option(optparse.Option): value = getattr(values, setting) try: new_value = self.validator(setting, value, parser) - except Exception, error: + except Exception as error: raise (optparse.OptionValueError( 'Error in option "%s":\n %s' % (opt, ErrorString(error))), @@ -534,8 +573,10 @@ class OptionParser(optparse.OptionParser, docutils.SettingsSpec): config_section = 'general' - version_template = ('%%prog (Docutils %s [%s], Python %s, on %s)' - % (docutils.__version__, docutils.__version_details__, + version_template = ('%%prog (Docutils %s%s, Python %s, on %s)' + % (docutils.__version__, + docutils.__version_details__ and + ' [%s]'%docutils.__version_details__ or '', sys.version.split()[0], sys.platform)) """Default version message.""" @@ -567,8 +608,8 @@ class OptionParser(optparse.OptionParser, docutils.SettingsSpec): if read_config_files and not self.defaults['_disable_config']: try: config_settings = self.get_standard_config_settings() - except ValueError, error: - self.error(error) + except ValueError as error: + self.error(SafeString(error)) self.set_defaults_from_dict(config_settings.__dict__) def populate_from_components(self, components): @@ -788,7 +829,7 @@ Skipping "%s" configuration file. new_value = option.validator( setting, value, option_parser, config_parser=self, config_section=section) - except Exception, error: + except Exception as error: raise (ValueError( 'Error in config file "%s", section "[%s]":\n' ' %s\n' diff --git a/Libs/docutils/io.py b/Libs/docutils/io.py index 1a14ac0..9ad7c4d 100644 --- a/Libs/docutils/io.py +++ b/Libs/docutils/io.py @@ -1,4 +1,4 @@ -# $Id: io.py 7596 2013-01-25 13:42:17Z milde $ +# $Id: io.py 8129 2017-06-27 14:55:22Z grubert $ # Author: David Goodger # Copyright: This module has been placed in the public domain. @@ -114,7 +114,7 @@ class Input(TransformSpec): self.successful_encoding = enc # Return decoded, removing BOMs. return decoded.replace(u'\ufeff', u'') - except (UnicodeError, LookupError), err: + except (UnicodeError, LookupError) as err: error = err # in Python 3, the is # local to the except clause raise UnicodeError( @@ -122,7 +122,7 @@ class Input(TransformSpec): '%s.\n(%s)' % (', '.join([repr(enc) for enc in encodings]), ErrorString(error))) - coding_slug = re.compile(b("coding[:=]\s*([-\w.]+)")) + coding_slug = re.compile(b(r"coding[:=]\s*([-\w.]+)")) """Encoding declaration pattern.""" byte_order_marks = ((codecs.BOM_UTF8, 'utf-8'), # 'utf-8-sig' new in v2.5 @@ -204,7 +204,7 @@ class FileInput(Input): """ def __init__(self, source=None, source_path=None, encoding=None, error_handler='strict', - autoclose=True, handle_io_errors=None, mode='rU'): + autoclose=True, mode='rU', **kwargs): """ :Parameters: - `source`: either a file-like object (which is read directly), or @@ -214,7 +214,6 @@ class FileInput(Input): - `error_handler`: the encoding error handler to use. - `autoclose`: close automatically after read (except when `sys.stdin` is the source). - - `handle_io_errors`: ignored, deprecated, will be removed. - `mode`: how the file is to be opened (see standard function `open`). The default 'rU' provides universal newline support for text files. @@ -222,6 +221,16 @@ class FileInput(Input): Input.__init__(self, source, source_path, encoding, error_handler) self.autoclose = autoclose self._stderr = ErrorOutput() + # deprecation warning + for key in kwargs: + if key == 'handle_io_errors': + sys.stderr.write('deprecation warning: ' + 'io.FileInput() argument `handle_io_errors` ' + 'is ignored since "Docutils 0.10 (2012-12-16)" ' + 'and will soon be removed.') + else: + raise TypeError('__init__() got an unexpected keyword ' + "argument '%s'" % key) if source is None: if source_path: @@ -234,7 +243,7 @@ class FileInput(Input): try: self.source = open(source_path, mode, **kwargs) - except IOError, error: + except IOError as error: raise InputError(error.errno, error.strerror, source_path) else: self.source = sys.stdin @@ -263,7 +272,7 @@ class FileInput(Input): data = b('\n').join(data.splitlines()) + b('\n') else: data = self.source.read() - except (UnicodeError, LookupError), err: # (in Py3k read() decodes) + except (UnicodeError, LookupError) as err: # (in Py3k read() decodes) if not self.encoding and self.source_path: # re-read in binary mode and decode with heuristics b_source = open(self.source_path, 'rb') @@ -353,7 +362,7 @@ class FileOutput(Output): kwargs = {} try: self.destination = open(self.destination_path, self.mode, **kwargs) - except IOError, error: + except IOError as error: raise OutputError(error.errno, error.strerror, self.destination_path) self.opened = True @@ -369,19 +378,19 @@ class FileOutput(Output): if ('b' not in self.mode and sys.version_info < (3,0) or check_encoding(self.destination, self.encoding) is False ): - if sys.version_info >= (3,0) and os.linesep != '\n': - data = data.replace('\n', os.linesep) # fix endings data = self.encode(data) + if sys.version_info >= (3,0) and os.linesep != '\n': + data = data.replace(b('\n'), b(os.linesep)) # fix endings try: # In Python < 2.5, try...except has to be nested in try...finally. try: self.destination.write(data) - except TypeError, e: + except TypeError as e: if sys.version_info >= (3,0) and isinstance(data, bytes): try: self.destination.buffer.write(data) except AttributeError: - if check_encoding(self.destination, + if check_encoding(self.destination, self.encoding) is False: raise ValueError('Encoding of %s (%s) differs \n' ' from specified encoding (%s)' % @@ -389,7 +398,7 @@ class FileOutput(Output): self.destination.encoding, self.encoding)) else: raise e - except (UnicodeError, LookupError), err: + except (UnicodeError, LookupError) as err: raise UnicodeError( 'Unable to encode output data. output-encoding is: ' '%s.\n(%s)' % (self.encoding, ErrorString(err))) diff --git a/Libs/docutils/languages/fa.py b/Libs/docutils/languages/fa.py new file mode 100644 index 0000000..48aa588 --- /dev/null +++ b/Libs/docutils/languages/fa.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +# $Id: fa.py 4564 2016-08-10 11:48:42Z +# Author: Shahin +# Copyright: This module has been placed in the public domain. + +# New language mappings are welcome. Before doing a new translation, please +# read . Two files must be +# translated for each language: one in docutils/languages, the other in +# docutils/parsers/rst/languages. + +""" +Persian-language mappings for language-dependent features of Docutils. +""" + +__docformat__ = 'reStructuredText' + +labels = { + # fixed: language-dependent + u'author': u'نویسنده', + u'authors': u'نویسندگان', + u'organization': u'سازمان', + u'address': u'آدرس', + u'contact': u'تماس', + u'version': u'نسخه', + u'revision': u'بازبینی', + u'status': u'وضعیت', + u'date': u'تاریخ', + u'copyright': u'کپی‌رایت', + u'dedication': u'تخصیص', + u'abstract': u'چکیده', + u'attention': u'توجه!', + u'caution': u'احتیاط!', + u'danger': u'خطر!', + u'error': u'خطا', + u'hint': u'راهنما', + u'important': u'مهم', + u'note': u'یادداشت', + u'tip': u'نکته', + u'warning': u'اخطار', + u'contents': u'محتوا'} +"""Mapping of node class name to label text.""" + +bibliographic_fields = { + # language-dependent: fixed + u'نویسنده': u'author', + u'نویسندگان': u'authors', + u'سازمان': u'organization', + u'آدرس': u'address', + u'تماس': u'contact', + u'نسخه': u'version', + u'بازبینی': u'revision', + u'وضعیت': u'status', + u'تاریخ': u'date', + u'کپی‌رایت': u'copyright', + u'تخصیص': u'dedication', + u'چکیده': u'abstract'} +"""Persian (lowcased) to canonical name mapping for bibliographic fields.""" + +author_separators = [u'؛', u'،'] +"""List of separator strings for the 'Authors' bibliographic field. Tried in +order.""" diff --git a/Libs/docutils/languages/lt.py b/Libs/docutils/languages/lt.py index 7e1569c..c5c3fdc 100644 --- a/Libs/docutils/languages/lt.py +++ b/Libs/docutils/languages/lt.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# $Id: lt.py 7668 2013-06-04 12:46:30Z milde $ +# $Id: lt.py 7911 2015-08-31 08:23:06Z milde $ # Author: Dalius Dobravolskas # Copyright: This module has been placed in the public domain. @@ -9,7 +9,7 @@ # docutils/parsers/rst/languages. """ -English-language mappings for language-dependent features of Docutils. +Lithuanian language mappings for language-dependent features of Docutils. """ __docformat__ = 'reStructuredText' @@ -54,68 +54,7 @@ bibliographic_fields = { 'autoriaus teisės': 'copyright', 'dedikacija': 'dedication', 'santrauka': 'abstract'} -"""English (lowcased) to canonical name mapping for bibliographic fields.""" - -author_separators = [';', ','] -"""List of separator strings for the 'Authors' bibliographic field. Tried in -order.""" -# -*- coding: utf-8 -*- -# $Id: lt.py 7668 2013-06-04 12:46:30Z milde $ -# Author: David Goodger -# Copyright: This module has been placed in the public domain. - -# New language mappings are welcome. Before doing a new translation, please -# read . Two files must be -# translated for each language: one in docutils/languages, the other in -# docutils/parsers/rst/languages. - -""" -English-language mappings for language-dependent features of Docutils. -""" - -__docformat__ = 'reStructuredText' - -labels = { - # fixed: language-dependent - 'author': 'Autorius', - 'authors': 'Autoriai', - 'organization': 'Organizacija', - 'address': 'Adresas', - 'contact': 'Kontaktas', - 'version': 'Versija', - 'revision': 'Revizija', - 'status': u'Būsena', - 'date': 'Data', - 'copyright': u'Autoriaus teisės', - 'dedication': 'Dedikacija', - 'abstract': 'Santrauka', - 'attention': u'Dėmesio!', - 'caution': 'Atsargiai!', - 'danger': '!PAVOJINGA!', - 'error': 'Klaida', - 'hint': u'Užuomina', - 'important': 'Svarbu', - 'note': 'Pastaba', - 'tip': 'Patarimas', - 'warning': u'Įspėjimas', - 'contents': 'Turinys'} -"""Mapping of node class name to label text.""" - -bibliographic_fields = { - # language-dependent: fixed - 'autorius': 'author', - 'autoriai': 'authors', - 'organizacija': 'organization', - 'adresas': 'address', - 'kontaktas': 'contact', - 'versija': 'version', - 'revizija': 'revision', - 'būsena': 'status', - 'data': 'date', - 'autoriaus teisės': 'copyright', - 'dedikacija': 'dedication', - 'santrauka': 'abstract'} -"""English (lowcased) to canonical name mapping for bibliographic fields.""" +"""Lithuanian (lowcased) to canonical name mapping for bibliographic fields.""" author_separators = [';', ','] """List of separator strings for the 'Authors' bibliographic field. Tried in diff --git a/Libs/docutils/languages/lv.py b/Libs/docutils/languages/lv.py new file mode 100644 index 0000000..f8125d4 --- /dev/null +++ b/Libs/docutils/languages/lv.py @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- +# $Id: lv.py 7975 2016-10-20 20:00:19Z milde $ +# Copyright: This module has been placed in the public domain. + +# New language mappings are welcome. Before doing a new translation, please +# read . Two files must be +# translated for each language: one in docutils/languages, the other in +# docutils/parsers/rst/languages. + +""" +Latvian-language mappings for language-dependent features of Docutils. +""" + +__docformat__ = 'reStructuredText' + +labels = { + # fixed: language-dependent + 'author': 'Autors', + 'authors': 'Autori', + 'organization': 'Organizācija', + 'address': 'Adrese', + 'contact': 'Kontakti', + 'version': 'Versija', + 'revision': 'Revīzija', + 'status': 'Statuss', + 'date': 'Datums', + 'copyright': 'Copyright', + 'dedication': 'Veltījums', + 'abstract': 'Atreferējums', + 'attention': 'Uzmanību!', + 'caution': 'Piesardzību!', + 'danger': '!BĪSTAMI!', + 'error': 'Kļūda', + 'hint': 'Ieteikums', + 'important': 'Svarīgi', + 'note': 'Piezīme', + 'tip': 'Padoms', + 'warning': 'Brīdinājums', + 'contents': 'Saturs'} +"""Mapping of node class name to label text.""" + +bibliographic_fields = { + # language-dependent: fixed + 'autors': 'author', + 'autori': 'authors', + 'organizācija': 'organization', + 'adrese': 'address', + 'kontakti': 'contact', + 'versija': 'version', + 'revīzija': 'revision', + 'statuss': 'status', + 'datums': 'date', + 'copyright': 'copyright', + 'veltījums': 'dedication', + 'atreferējums': 'abstract'} +"""English (lowcased) to canonical name mapping for bibliographic fields.""" + +author_separators = [';', ','] +"""List of separator strings for the 'Authors' bibliographic field. Tried in +order.""" diff --git a/Libs/docutils/languages/sv.py b/Libs/docutils/languages/sv.py index b35b7ac..dfc0aeb 100644 --- a/Libs/docutils/languages/sv.py +++ b/Libs/docutils/languages/sv.py @@ -1,4 +1,5 @@ -# $Id: sv.py 4564 2006-05-21 20:44:42Z wiemann $ +# -*- coding: utf-8 -*- +# $Id: sv.py 8006 2016-12-22 23:02:44Z milde $ # Author: Adam Chodorowski # Copyright: This module has been placed in the public domain. @@ -14,8 +15,8 @@ Swedish language mappings for language-dependent features of Docutils. __docformat__ = 'reStructuredText' labels = { - 'author': u'F\u00f6rfattare', - 'authors': u'F\u00f6rfattare', + 'author': u'Författare', + 'authors': u'Författare', 'organization': u'Organisation', 'address': u'Adress', 'contact': u'Kontakt', @@ -27,20 +28,20 @@ labels = { 'dedication': u'Dedikation', 'abstract': u'Sammanfattning', 'attention': u'Observera!', - 'caution': u'Varning!', + 'caution': u'Akta!', # 'Varning' already used for 'warning' 'danger': u'FARA!', 'error': u'Fel', - 'hint': u'V\u00e4gledning', + 'hint': u'Vink', 'important': u'Viktigt', 'note': u'Notera', 'tip': u'Tips', 'warning': u'Varning', - 'contents': u'Inneh\u00e5ll' } + 'contents': u'Innehåll' } """Mapping of node class name to label text.""" bibliographic_fields = { # 'Author' and 'Authors' identical in Swedish; assume the plural: - u'f\u00f6rfattare': 'authors', + u'författare': 'authors', u' n/a': 'author', u'organisation': 'organization', u'adress': 'address', diff --git a/Libs/docutils/nodes.py b/Libs/docutils/nodes.py index 7c172ee..61fde1e 100644 --- a/Libs/docutils/nodes.py +++ b/Libs/docutils/nodes.py @@ -1,4 +1,4 @@ -# $Id: nodes.py 7595 2013-01-21 17:33:56Z milde $ +# $Id: nodes.py 7788 2015-02-16 22:10:52Z milde $ # Author: David Goodger # Maintainer: docutils-develop@lists.sourceforge.net # Copyright: This module has been placed in the public domain. @@ -304,10 +304,8 @@ if sys.version_info < (3,): def __repr__(self): return unicode.__repr__(self)[1:] - - else: - reprunicode = unicode + reprunicode = str def ensure_str(s): @@ -533,7 +531,7 @@ class Element(Node): parts = [self.tagname] for name, value in self.attlist(): if value is None: # boolean attribute - parts.append(name) + parts.append('%s="True"' % name) continue if isinstance(value, list): values = [serial_escape('%s' % (v,)) for v in value] @@ -571,7 +569,7 @@ class Element(Node): assert key.step in (None, 1), 'cannot handle slice with stride' return self.children[key.start:key.stop] else: - raise TypeError, ('element index must be an integer, a slice, or ' + raise TypeError('element index must be an integer, a slice, or ' 'an attribute name string') def __setitem__(self, key, item): @@ -586,7 +584,7 @@ class Element(Node): self.setup_child(node) self.children[key.start:key.stop] = item else: - raise TypeError, ('element index must be an integer, a slice, or ' + raise TypeError('element index must be an integer, a slice, or ' 'an attribute name string') def __delitem__(self, key): @@ -598,7 +596,7 @@ class Element(Node): assert key.step in (None, 1), 'cannot handle slice with stride' del self.children[key.start:key.stop] else: - raise TypeError, ('element index must be an integer, a simple ' + raise TypeError('element index must be an integer, a simple ' 'slice, or an attribute name string') def __add__(self, other): @@ -954,7 +952,7 @@ class Element(Node): 'Losing "%s" attribute: %s' % (att, self[att]) self.parent.replace(self, new) - def first_child_matching_class(self, childclass, start=0, end=sys.maxint): + def first_child_matching_class(self, childclass, start=0, end=sys.maxsize): """ Return the index of the first child whose class exactly matches. @@ -974,7 +972,7 @@ class Element(Node): return None def first_child_not_matching_class(self, childclass, start=0, - end=sys.maxint): + end=sys.maxsize): """ Return the index of the first child whose class does *not* match. @@ -1674,7 +1672,7 @@ class system_message(Special, BackLinkable, PreBibliographic, Element): try: Element.__init__(self, '', *children, **attributes) except: - print 'system_message: children=%r' % (children,) + print('system_message: children=%r' % (children,)) raise def astext(self): diff --git a/Libs/docutils/parsers/rst/__init__.py b/Libs/docutils/parsers/rst/__init__.py index ed70e0e..35e6f55 100644 --- a/Libs/docutils/parsers/rst/__init__.py +++ b/Libs/docutils/parsers/rst/__init__.py @@ -1,4 +1,4 @@ -# $Id: __init__.py 7598 2013-01-30 12:39:24Z milde $ +# $Id: __init__.py 8068 2017-05-08 22:10:39Z milde $ # Author: David Goodger # Copyright: This module has been placed in the public domain. @@ -101,9 +101,9 @@ class Parser(docutils.parsers.Parser): ('Recognize and link to standalone RFC references (like "RFC 822").', ['--rfc-references'], {'action': 'store_true', 'validator': frontend.validate_boolean}), - ('Base URL for RFC references (default "http://www.faqs.org/rfcs/").', + ('Base URL for RFC references (default "http://tools.ietf.org/html/").', ['--rfc-base-url'], - {'metavar': '', 'default': 'http://www.faqs.org/rfcs/', + {'metavar': '', 'default': 'http://tools.ietf.org/html/', 'validator': frontend.validate_url_trailing_slash}), ('Set number of spaces for tab expansion (default 8).', ['--tab-width'], @@ -141,7 +141,26 @@ class Parser(docutils.parsers.Parser): ('Change straight quotation marks to typographic form: ' 'one of "yes", "no", "alt[ernative]" (default "no").', ['--smart-quotes'], - {'default': False, 'validator': frontend.validate_ternary}), + {'default': False, 'metavar': '', + 'validator': frontend.validate_ternary}), + ('Characters to use as "smart quotes" for . ', + ['--smartquotes-locales'], + {'metavar': '', + 'action': 'append', + 'validator': frontend.validate_smartquotes_locales}), + ('Inline markup recognized at word boundaries only ' + '(adjacent to punctuation or whitespace). ' + 'Force character-level inline markup recognition with ' + '"\\ " (backslash + space). Default.', + ['--word-level-inline-markup'], + {'action': 'store_false', 'dest': 'character_level_inline_markup'}), + ('Inline markup recognized anywhere, regardless of surrounding ' + 'characters. Backslash-escapes must be used to avoid unwanted ' + 'markup recognition. Useful for East Asian languages. ' + 'Experimental.', + ['--character-level-inline-markup'], + {'action': 'store_true', 'default': False, + 'dest': 'character_level_inline_markup'}), )) config_section = 'restructuredtext parser' @@ -249,12 +268,6 @@ class Directive(object): - ``lineno`` is the absolute line number of the first line of the directive. - - ``src`` is the name (or path) of the rst source of the directive. - - - ``srcline`` is the line number of the first line of the directive - in its source. It may differ from ``lineno``, if the main source - includes other sources with the ``.. include::`` directive. - - ``content_offset`` is the line offset of the first line of the content from the beginning of the current input. Used when initiating a nested parse. diff --git a/Libs/docutils/parsers/rst/directives/__init__.py b/Libs/docutils/parsers/rst/directives/__init__.py index 6e843b8..5109058 100644 --- a/Libs/docutils/parsers/rst/directives/__init__.py +++ b/Libs/docutils/parsers/rst/directives/__init__.py @@ -1,4 +1,4 @@ -# $Id: __init__.py 7621 2013-03-04 13:20:49Z milde $ +# $Id: __init__.py 8024 2017-02-06 00:41:48Z goodger $ # Author: David Goodger # Copyright: This module has been placed in the public domain. @@ -13,6 +13,7 @@ import codecs import sys from docutils import nodes +from docutils.utils import split_escaped_whitespace, escape2null, unescape from docutils.parsers.rst.languages import en as _fallback_language_module if sys.version_info < (2,5): from docutils._compat import __import__ @@ -189,7 +190,7 @@ def path(argument): def uri(argument): """ - Return the URI argument with whitespace removed. + Return the URI argument with unescaped whitespace removed. (Directive option conversion function.) Raise ``ValueError`` if no argument is found. @@ -197,7 +198,8 @@ def uri(argument): if argument is None: raise ValueError('argument required but none supplied') else: - uri = ''.join(argument.split()) + parts = split_escaped_whitespace(escape2null(argument)) + uri = ' '.join(''.join(unescape(part).split()) for part in parts) return uri def nonnegative_int(argument): @@ -402,3 +404,15 @@ def choice(argument, values): def format_values(values): return '%s, or "%s"' % (', '.join(['"%s"' % s for s in values[:-1]]), values[-1]) + +def value_or(values, other): + """ + The argument can be any of `values` or `argument_type`. + """ + def auto_or_other(argument): + if argument in values: + return argument + else: + return other(argument) + return auto_or_other + diff --git a/Libs/docutils/parsers/rst/directives/misc.py b/Libs/docutils/parsers/rst/directives/misc.py index 643bc70..f843bdc 100644 --- a/Libs/docutils/parsers/rst/directives/misc.py +++ b/Libs/docutils/parsers/rst/directives/misc.py @@ -1,4 +1,4 @@ -# $Id: misc.py 7487 2012-07-22 21:20:28Z milde $ +# $Id: misc.py 7961 2016-07-28 22:02:47Z milde $ # Authors: David Goodger ; Dethe Elza # Copyright: This module has been placed in the public domain. @@ -231,7 +231,7 @@ class Raw(Directive): raise self.severe(u'Problems with "%s" directive URL "%s":\n%s.' % (self.name, self.options['url'], ErrorString(error))) raw_file = io.StringInput(source=raw_text, source_path=source, - encoding=encoding, + encoding=encoding, error_handler=e_handler) try: text = raw_file.read() @@ -477,6 +477,24 @@ class Date(Directive): except UnicodeEncodeError: raise self.warning(u'Cannot encode date format string ' u'with locale encoding "%s".' % locale_encoding) + # @@@ + # Use timestamp from the `SOURCE_DATE_EPOCH`_ environment variable? + # Pro: Docutils-generated documentation + # can easily be part of `reproducible software builds`__ + # + # __ https://reproducible-builds.org/ + # + # Con: Changes the specs, hard to predict behaviour, + # no actual use case! + # + # See also the discussion about \date \time \year in TeX + # http://tug.org/pipermail/tex-k/2016-May/002704.html + # source_date_epoch = os.environ.get('SOURCE_DATE_EPOCH') + # if (source_date_epoch + # and self.state.document.settings.use_source_date_epoch): + # text = time.strftime(format_str, + # time.gmtime(int(source_date_epoch))) + # else: text = time.strftime(format_str) if sys.version_info< (3, 0): # `text` is a byte string that may contain non-ASCII characters: diff --git a/Libs/docutils/parsers/rst/directives/tables.py b/Libs/docutils/parsers/rst/directives/tables.py index 67fda63..f1977e4 100644 --- a/Libs/docutils/parsers/rst/directives/tables.py +++ b/Libs/docutils/parsers/rst/directives/tables.py @@ -1,4 +1,4 @@ -# $Id: tables.py 7747 2014-03-20 10:51:10Z milde $ +# $Id: tables.py 8039 2017-02-28 12:19:20Z milde $ # Authors: David Goodger ; David Priest # Copyright: This module has been placed in the public domain. @@ -20,6 +20,10 @@ from docutils.parsers.rst import Directive from docutils.parsers.rst import directives +def align(argument): + return directives.choice(argument, ('left', 'center', 'right')) + + class Table(Directive): """ @@ -29,7 +33,10 @@ class Table(Directive): optional_arguments = 1 final_argument_whitespace = True option_spec = {'class': directives.class_option, - 'name': directives.unchanged} + 'name': directives.unchanged, + 'align': align, + 'widths': directives.value_or(('auto', 'grid'), + directives.positive_int_list)} has_content = True def make_title(self): @@ -38,6 +45,8 @@ class Table(Directive): text_nodes, messages = self.state.inline_text(title_text, self.lineno) title = nodes.title(title_text, '', *text_nodes) + (title.source, + title.line) = self.state_machine.get_source_and_line(self.lineno) else: title = None messages = [] @@ -85,15 +94,19 @@ class Table(Directive): self.block_text, self.block_text), line=self.lineno) raise SystemMessagePropagation(error) + @property + def widths(self): + return self.options.get('widths', '') + def get_column_widths(self, max_cols): - if 'widths' in self.options: - col_widths = self.options['widths'] - if len(col_widths) != max_cols: + if type(self.widths) == list: + if len(self.widths) != max_cols: error = self.state_machine.reporter.error( '"%s" widths do not match the number of columns in table ' '(%s).' % (self.name, max_cols), nodes.literal_block( self.block_text, self.block_text), line=self.lineno) raise SystemMessagePropagation(error) + col_widths = self.widths elif max_cols: col_widths = [100 // max_cols] * max_cols else: @@ -130,6 +143,21 @@ class RSTTable(Table): return [error] table_node = node[0] table_node['classes'] += self.options.get('class', []) + if 'align' in self.options: + table_node['align'] = self.options.get('align') + tgroup = table_node[0] + if type(self.widths) == list: + colspecs = [child for child in tgroup.children + if child.tagname == 'colspec'] + for colspec, col_width in zip(colspecs, self.widths): + colspec['colwidth'] = col_width + # @@@ the colwidths argument for is not part of the + # XML Exchange Table spec (https://www.oasis-open.org/specs/tm9901.htm) + # and hence violates the docutils.dtd. + if self.widths == 'auto': + table_node['classes'] += ['colwidths-auto'] + elif self.widths: # "grid" or list of integers + table_node['classes'] += ['colwidths-given'] self.add_name(table_node) if title: table_node.insert(0, title) @@ -141,12 +169,14 @@ class CSVTable(Table): option_spec = {'header-rows': directives.nonnegative_int, 'stub-columns': directives.nonnegative_int, 'header': directives.unchanged, - 'widths': directives.positive_int_list, + 'widths': directives.value_or(('auto', ), + directives.positive_int_list), 'file': directives.path, 'url': directives.uri, 'encoding': directives.encoding, 'class': directives.class_option, 'name': directives.unchanged, + 'align': align, # field delimiter char 'delim': directives.single_char_or_whitespace_or_unicode, # treat whitespace after delimiter as significant @@ -235,8 +265,10 @@ class CSVTable(Table): return [error] table = (col_widths, table_head, table_body) table_node = self.state.build_table(table, self.content_offset, - stub_columns) + stub_columns, widths=self.widths) table_node['classes'] += self.options.get('class', []) + if 'align' in self.options: + table_node['align'] = self.options.get('align') self.add_name(table_node) if title: table_node.insert(0, title) @@ -356,13 +388,15 @@ class ListTable(Table): Implement tables whose data is encoded as a uniform two-level bullet list. For further ideas, see http://docutils.sf.net/docs/dev/rst/alternatives.html#list-driven-tables - """ + """ option_spec = {'header-rows': directives.nonnegative_int, 'stub-columns': directives.nonnegative_int, - 'widths': directives.positive_int_list, + 'widths': directives.value_or(('auto', ), + directives.positive_int_list), 'class': directives.class_option, - 'name': directives.unchanged} + 'name': directives.unchanged, + 'align': align} def run(self): if not self.content: @@ -385,6 +419,8 @@ class ListTable(Table): return [detail.args[0]] table_node = self.build_table_from_list(table_data, col_widths, header_rows, stub_columns) + if 'align' in self.options: + table_node['align'] = self.options.get('align') table_node['classes'] += self.options.get('class', []) self.add_name(table_node) if title: @@ -432,10 +468,16 @@ class ListTable(Table): def build_table_from_list(self, table_data, col_widths, header_rows, stub_columns): table = nodes.table() + if self.widths == 'auto': + table['classes'] += ['colwidths-auto'] + elif self.widths: # "grid" or list of integers + table['classes'] += ['colwidths-given'] tgroup = nodes.tgroup(cols=len(col_widths)) table += tgroup for col_width in col_widths: - colspec = nodes.colspec(colwidth=col_width) + colspec = nodes.colspec() + if col_width is not None: + colspec.attributes['colwidth'] = col_width if stub_columns: colspec.attributes['stub'] = 1 stub_columns -= 1 diff --git a/Libs/docutils/parsers/rst/languages/de.py b/Libs/docutils/parsers/rst/languages/de.py index a187876..92ea234 100644 --- a/Libs/docutils/parsers/rst/languages/de.py +++ b/Libs/docutils/parsers/rst/languages/de.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# $Id: de.py 7223 2011-11-21 16:43:06Z milde $ +# $Id: de.py 8006 2016-12-22 23:02:44Z milde $ # Authors: Engelbert Gruber ; # Lea Wiemann # Copyright: This module has been placed in the public domain. @@ -30,13 +30,14 @@ directives = { 'warnung': 'warning', 'ermahnung': 'admonition', 'kasten': 'sidebar', - 'seitenkasten': 'sidebar', + 'seitenkasten': 'sidebar', # kept for backwards compatibiltity + 'seitenleiste': 'sidebar', 'thema': 'topic', - 'zeilen-block': 'line-block', + 'zeilenblock': 'line-block', 'parsed-literal (translation required)': 'parsed-literal', 'rubrik': 'rubric', 'epigraph': 'epigraph', - 'highlights (translation required)': 'highlights', + 'highlights': 'highlights', u'pull-quote': 'pull-quote', # commonly used in German too u'seitenansprache': 'pull-quote', # cf. http://www.typografie.info/2/wiki.php?title=Seitenansprache 'zusammengesetzt': 'compound', @@ -45,7 +46,7 @@ directives = { #'fragen': 'questions', 'tabelle': 'table', 'csv-tabelle': 'csv-table', - 'list-table (translation required)': 'list-table', + 'listentabelle': 'list-table', u'mathe': 'math', u'formel': 'math', 'meta': 'meta', @@ -62,14 +63,14 @@ directives = { 'datum': 'date', 'klasse': 'class', 'rolle': 'role', - u'default-role (translation required)': 'default-role', - u'title (translation required)': 'title', + u'standardrolle': 'default-role', + u'titel': 'title', 'inhalt': 'contents', - 'kapitel-nummerierung': 'sectnum', - 'abschnitts-nummerierung': 'sectnum', - u'linkziel-fußfnoten': 'target-notes', - u'header (translation required)': 'header', - u'footer (translation required)': 'footer', + u'kapitelnummerierung': 'sectnum', + u'abschnittsnummerierung': 'sectnum', + u'linkziel-fußnoten': 'target-notes', + u'kopfzeilen': 'header', + u'fußzeilen': 'footer', #u'fußfnoten': 'footnotes', #'zitate': 'citations', } @@ -86,7 +87,8 @@ roles = { 'titel-referenz': 'title-reference', 'pep-referenz': 'pep-reference', 'rfc-referenz': 'rfc-reference', - 'betonung': 'emphasis', + 'betonung': 'emphasis', # for backwards compatibility + 'betont': 'emphasis', 'fett': 'strong', u'wörtlich': 'literal', u'mathe': 'math', diff --git a/Libs/docutils/parsers/rst/languages/fa.py b/Libs/docutils/parsers/rst/languages/fa.py new file mode 100644 index 0000000..5547759 --- /dev/null +++ b/Libs/docutils/parsers/rst/languages/fa.py @@ -0,0 +1,102 @@ +# -*- coding: utf-8 -*- +# $Id: fa.py 4564 2016-08-10 11:48:42Z +# Author: Shahin +# Copyright: This module has been placed in the public domain. + +# New language mappings are welcome. Before doing a new translation, please +# read . Two files must be +# translated for each language: one in docutils/languages, the other in +# docutils/parsers/rst/languages. + +""" +Persian-language mappings for language-dependent features of +reStructuredText. +""" + +__docformat__ = 'reStructuredText' + + +directives = { + # language-dependent: fixed + u'توجه': u'attention', + u'احتیاط': u'caution', + u'کد': u'code', + u'بلوک-کد': u'code', + u'کد-منبع': u'code', + u'خطر': u'danger', + u'خطا': u'error', + u'راهنما': u'hint', + u'مهم': u'important', + u'یادداشت': u'note', + u'نکته': u'tip', + u'اخطار': u'warning', + u'تذکر': u'admonition', + u'نوار-کناری': u'sidebar', + u'موضوع': u'topic', + u'بلوک-خط': u'line-block', + u'تلفظ-پردازش-شده': u'parsed-literal', + u'سر-فصل': u'rubric', + u'کتیبه': u'epigraph', + u'نکات-برجسته': u'highlights', + u'نقل-قول': u'pull-quote', + u'ترکیب': u'compound', + u'ظرف': u'container', + #'questions': u'questions', + u'جدول': u'table', + u'جدول-csv': u'csv-table', + u'جدول-لیست': u'list-table', + #'qa': u'questions', + #'faq': u'questions', + u'متا': u'meta', + u'ریاضی': u'math', + #'imagemap': u'imagemap', + u'تصویر': u'image', + u'شکل': u'figure', + u'شامل': u'include', + u'خام': u'raw', + u'جایگزین': u'replace', + u'یونیکد': u'unicode', + u'تاریخ': u'date', + u'کلاس': u'class', + u'قانون': u'role', + u'قانون-پیش‌فرض': u'default-role', + u'عنوان': u'title', + u'محتوا': u'contents', + u'شماره-فصل': u'sectnum', + u'شماره‌گذاری-فصل': u'sectnum', + u'سرآیند': u'header', + u'پاصفحه': u'footer', + #'footnotes': u'footnotes', + #'citations': u'citations', + u'یادداشت-هدف': u'target-notes', + } +"""Persian name to registered (in directives/__init__.py) directive name +mapping.""" + +roles = { + # language-dependent: fixed + u'مخفف': u'abbreviation', + u'سرنام': u'acronym', + u'کد': u'code', + u'شاخص': u'index', + u'زیرنویس': u'subscript', + u'بالانویس': u'superscript', + u'عنوان': u'title-reference', + u'نیرو': u'pep-reference', + u'rfc-reference (translation required)': u'rfc-reference', + u'تاکید': u'emphasis', + u'قوی': u'strong', + u'لفظی': u'literal', + u'ریاضی': u'math', + u'منبع-نام‌گذاری': u'named-reference', + u'منبع-ناشناس': u'anonymous-reference', + u'منبع-پانویس': u'footnote-reference', + u'منبع-نقل‌فول': u'citation-reference', + u'منبع-جایگزینی': u'substitution-reference', + u'هدف': u'target', + u'منبع-uri': u'uri-reference', + u'uri': u'uri-reference', + u'url': u'uri-reference', + u'خام': u'raw',} +"""Mapping of Persian role names to canonical role names for interpreted text. +""" diff --git a/Libs/docutils/parsers/rst/languages/lv.py b/Libs/docutils/parsers/rst/languages/lv.py new file mode 100644 index 0000000..3f8313c --- /dev/null +++ b/Libs/docutils/parsers/rst/languages/lv.py @@ -0,0 +1,108 @@ +# -*- coding: utf-8 -*- +# $Id: lv.py 7975 2016-10-20 20:00:19Z milde $ +# Copyright: This module has been placed in the public domain. + +# New language mappings are welcome. Before doing a new translation, please +# read . Two files must be +# translated for each language: one in docutils/languages, the other in +# docutils/parsers/rst/languages. + +""" +Latvian-language mappings for language-dependent features of +reStructuredText. +""" + +__docformat__ = 'reStructuredText' + + +directives = { + # language-dependent: fixed + 'uzmanību': 'attention', + 'piesardzību': 'caution', + 'kods': 'code', + 'koda-bloks': 'code', + 'pirmkods': 'code', + 'bīstami': 'danger', + 'kļūda': 'error', + 'ieteikums': 'hint', + 'svarīgi': 'important', + 'piezīme': 'note', + 'padoms': 'tip', + 'brīdinājums': 'warning', + 'aizrādījums': 'admonition', + 'sānjosla': 'sidebar', + 'tēma': 'topic', + 'rindu-bloks': 'line-block', + 'parsēts-literālis': 'parsed-literal', + 'rubrika': 'rubric', + 'epigrāfs': 'epigraph', + 'apskats': 'highlights', + 'izvilkuma-citāts': 'pull-quote', + 'savienojums': 'compound', + 'konteiners': 'container', + #'questions': 'questions', + 'tabula': 'table', + 'csv-tabula': 'csv-table', + 'sarakstveida-tabula': 'list-table', + #'qa': 'questions', + #'faq': 'questions', + 'meta': 'meta', + 'matemātika': 'math', + #'imagemap': 'imagemap', + 'attēls': 'image', + 'figūra': 'figure', + 'ietvert': 'include', + 'burtiski': 'raw', + 'aizvieto': 'replace', + 'unicode': 'unicode', + 'datums': 'date', + 'klase': 'class', + 'role': 'role', + 'noklusējuma-role': 'default-role', + 'virsraksts': 'title', + 'saturs': 'contents', + 'numurēt-sekcijas': 'sectnum', + 'galvene': 'header', + 'kājene': 'footer', + #'footnotes': 'footnotes', + #'citations': 'citations', + 'atsauces-apakšā': 'target-notes', + 'restructuredtext-testa-direktīva': 'restructuredtext-test-directive'} +"""English name to registered (in directives/__init__.py) directive name +mapping.""" + +roles = { + # language-dependent: fixed + 'saīsinājums': 'abbreviation', + 'īsi': 'abbreviation', + 'akronīms': 'acronym', + 'kods': 'code', + 'indekss': 'index', + 'i': 'index', + 'apakšraksts': 'subscript', + 'apakšā': 'subscript', + 'augšraksts': 'superscript', + 'augšā': 'superscript', + 'virsraksta-atsauce': 'title-reference', + 'virsraksts': 'title-reference', + 'v': 'title-reference', + 'atsauce-uz-pep': 'pep-reference', + 'pep': 'pep-reference', + 'atsauce-uz-rfc': 'rfc-reference', + 'rfc': 'rfc-reference', + 'izcēlums': 'emphasis', + 'blīvs': 'strong', + 'literālis': 'literal', + 'matemātika': 'math', + 'nosaukta-atsauce': 'named-reference', + 'nenosaukta-atsauce': 'anonymous-reference', + 'kājenes-atsauce': 'footnote-reference', + 'citātā-atsauce': 'citation-reference', + 'aizvietojuma-atsauce': 'substitution-reference', + 'mēr''kis': 'target', + 'atsauce-uz-uri': 'uri-reference', + 'uri': 'uri-reference', + 'url': 'uri-reference', + 'burtiski': 'raw',} +"""Mapping of English role names to canonical role names for interpreted text. +""" diff --git a/Libs/docutils/parsers/rst/languages/sv.py b/Libs/docutils/parsers/rst/languages/sv.py index 7025f5f..01363bd 100644 --- a/Libs/docutils/parsers/rst/languages/sv.py +++ b/Libs/docutils/parsers/rst/languages/sv.py @@ -1,4 +1,5 @@ -# $Id: sv.py 7119 2011-09-02 13:00:23Z milde $ +# -*- coding: utf-8 -*- +# $Id: sv.py 8012 2017-01-03 23:08:19Z milde $ # Author: Adam Chodorowski # Copyright: This module has been placed in the public domain. @@ -13,55 +14,55 @@ Swedish language mappings for language-dependent features of reStructuredText. __docformat__ = 'reStructuredText' - directives = { u'observera': 'attention', - u'caution (translation required)': 'caution', - u'code (translation required)': 'code', + u'akta': 'caution', # also 'försiktigt' + u'kod': 'code', u'fara': 'danger', u'fel': 'error', - u'v\u00e4gledning': 'hint', + u'vink': 'hint', # also 'hint' u'viktigt': 'important', u'notera': 'note', u'tips': 'tip', u'varning': 'warning', - u'admonition (translation required)': 'admonition', - u'sidebar (translation required)': 'sidebar', - u'\u00e4mne': 'topic', - u'line-block (translation required)': 'line-block', - u'parsed-literal (translation required)': 'parsed-literal', - u'mellanrubrik': 'rubric', - u'epigraph (translation required)': 'epigraph', - u'highlights (translation required)': 'highlights', + u'anmärkning': 'admonition', # literal 'tillrättavisning', 'förmaning' + u'sidorad': 'sidebar', + u'ämne': 'topic', + u'tema': 'topic', + u'rad-block': 'line-block', + u'parsed-literal (translation required)': 'parsed-literal', # 'tolkad-bokstavlig'? + u'rubrik': 'rubric', + u'epigraf': 'epigraph', + u'höjdpunkter': 'highlights', u'pull-quote (translation required)': 'pull-quote', - u'compound (translation required)': 'compound', - u'container (translation required)': 'container', - # u'fr\u00e5gor': 'questions', + u'sammansatt': 'compound', + u'container': 'container', + # u'frågor': 'questions', # NOTE: A bit long, but recommended by http://www.nada.kth.se/dataterm/: - # u'fr\u00e5gor-och-svar': 'questions', - # u'vanliga-fr\u00e5gor': 'questions', - u'table (translation required)': 'table', - u'csv-table (translation required)': 'csv-table', - u'list-table (translation required)': 'list-table', + # u'frågor-och-svar': 'questions', + # u'vanliga-frågor': 'questions', + u'tabell': 'table', + u'csv-tabell': 'csv-table', + u'list-tabell': 'list-table', u'meta': 'meta', - 'math (translation required)': 'math', + u'matematik': 'math', # u'bildkarta': 'imagemap', # FIXME: Translation might be too literal. u'bild': 'image', u'figur': 'figure', - u'inkludera': 'include', - u'r\u00e5': 'raw', # FIXME: Translation might be too literal. - u'ers\u00e4tt': 'replace', + u'inkludera': 'include', + u'rå': 'raw', + u'ersätta': 'replace', u'unicode': 'unicode', u'datum': 'date', - u'class (translation required)': 'class', - u'role (translation required)': 'role', - u'default-role (translation required)': 'default-role', - u'title (translation required)': 'title', - u'inneh\u00e5ll': 'contents', + u'klass': 'class', + u'roll': 'role', + u'standardroll': 'default-role', + u'titel': 'title', + u'innehåll': 'contents', u'sektionsnumrering': 'sectnum', u'target-notes (translation required)': 'target-notes', - u'header (translation required)': 'header', - u'footer (translation required)': 'footer', + u'sidhuvud': 'header', + u'sidfot': 'footer', # u'fotnoter': 'footnotes', # u'citeringar': 'citations', } @@ -69,26 +70,26 @@ directives = { mapping.""" roles = { - u'abbreviation (translation required)': 'abbreviation', - u'acronym (translation required)': 'acronym', - u'code (translation required)': 'code', - u'index (translation required)': 'index', - u'subscript (translation required)': 'subscript', - u'superscript (translation required)': 'superscript', - u'title-reference (translation required)': 'title-reference', - u'pep-reference (translation required)': 'pep-reference', - u'rfc-reference (translation required)': 'rfc-reference', - u'emphasis (translation required)': 'emphasis', - u'strong (translation required)': 'strong', - u'literal (translation required)': 'literal', - 'math (translation required)': 'math', - u'named-reference (translation required)': 'named-reference', - u'anonymous-reference (translation required)': 'anonymous-reference', - u'footnote-reference (translation required)': 'footnote-reference', - u'citation-reference (translation required)': 'citation-reference', - u'substitution-reference (translation required)': 'substitution-reference', - u'target (translation required)': 'target', - u'uri-reference (translation required)': 'uri-reference', - u'r\u00e5': 'raw',} + u'förkortning': 'abbreviation', + u'akronym': 'acronym', + u'kod': 'code', + u'index': 'index', + u'nedsänkt': 'subscript', + u'upphöjd': 'superscript', + u'titel-referens': 'title-reference', + u'pep-referens': 'pep-reference', + u'rfc-referens': 'rfc-reference', + u'betoning': 'emphasis', + u'stark': 'strong', + u'bokstavlig': 'literal', # also 'ordagranna' + u'matematik': 'math', + u'namngiven-referens': 'named-reference', + u'anonym-referens': 'anonymous-reference', + u'fotnot-referens': 'footnote-reference', + u'citat-referens': 'citation-reference', + u'ersättnings-referens': 'substitution-reference', + u'mål': 'target', + u'uri-referens': 'uri-reference', + u'rå': 'raw',} """Mapping of Swedish role names to canonical role names for interpreted text. """ diff --git a/Libs/docutils/parsers/rst/roles.py b/Libs/docutils/parsers/rst/roles.py index b6dca44..7fa8c1f 100644 --- a/Libs/docutils/parsers/rst/roles.py +++ b/Libs/docutils/parsers/rst/roles.py @@ -1,4 +1,4 @@ -# $Id: roles.py 7514 2012-09-14 14:27:12Z milde $ +# $Id: roles.py 7937 2016-05-24 10:48:48Z milde $ # Author: Edward Loper # Copyright: This module has been placed in the public domain. @@ -334,7 +334,7 @@ def code_role(role, rawtext, text, lineno, inliner, options={}, content=[]): node = nodes.literal(rawtext, '', classes=classes) - # analyze content and add nodes for every token + # analyse content and add nodes for every token for classes, value in tokens: # print (classes, value) if classes: @@ -351,9 +351,10 @@ code_role.options = {'class': directives.class_option, register_canonical_role('code', code_role) def math_role(role, rawtext, text, lineno, inliner, options={}, content=[]): + set_classes(options) i = rawtext.find('`') text = rawtext.split('`')[1] - node = nodes.math(rawtext, text) + node = nodes.math(rawtext, text, **options) return [node], [] register_canonical_role('math', math_role) diff --git a/Libs/docutils/parsers/rst/states.py b/Libs/docutils/parsers/rst/states.py index 30f48df..fa4c507 100644 --- a/Libs/docutils/parsers/rst/states.py +++ b/Libs/docutils/parsers/rst/states.py @@ -1,4 +1,4 @@ -# $Id: states.py 7640 2013-03-25 20:57:52Z milde $ +# $Id: states.py 8060 2017-04-19 20:00:04Z milde $ # Author: David Goodger # Copyright: This module has been placed in the public domain. @@ -117,6 +117,7 @@ from docutils.parsers.rst import directives, languages, tableparser, roles from docutils.parsers.rst.languages import en as _fallback_language_module from docutils.utils import escape2null, unescape, column_width from docutils.utils import punctuation_chars, roman, urischemes +from docutils.utils import split_escaped_whitespace class MarkupError(DataError): pass class UnknownInterpretedRoleError(DataError): pass @@ -225,7 +226,6 @@ class RSTState(StateWS): # enable the reporter to determine source and source-line if not hasattr(self.reporter, 'get_source_and_line'): self.reporter.get_source_and_line = self.state_machine.get_source_and_line - # print "adding get_source_and_line to reporter", self.state_machine.input_offset def goto_line(self, abs_line_offset): @@ -464,12 +464,144 @@ class Inliner: """ def __init__(self): - self.implicit_dispatch = [(self.patterns.uri, self.standalone_uri),] + self.implicit_dispatch = [] """List of (pattern, bound method) tuples, used by `self.implicit_inline`.""" def init_customizations(self, settings): - """Setting-based customizations; run when parsing begins.""" + # lookahead and look-behind expressions for inline markup rules + if getattr(settings, 'character_level_inline_markup', False): + start_string_prefix = u'(^|(?%s)(?P__?)' % self.simplename, + ('footnotelabel', r'\[', r'(?P\]_)', + [r'[0-9]+', # manually numbered + r'\#(%s)?' % self.simplename, # auto-numbered (w/ label?) + r'\*', # auto-symbol + r'(?P%s)' % self.simplename] # citation reference + ) + ] + ), + ('backquote', # interpreted text or phrase reference + '(?P(:%s:)?)' % self.simplename, # optional role + self.non_whitespace_after, + ['`(?!`)'] # but not literal + ) + ] + ) + self.start_string_prefix = start_string_prefix + self.end_string_suffix = end_string_suffix + self.parts = parts + + self.patterns = Struct( + initial=build_regexp(parts), + emphasis=re.compile(self.non_whitespace_escape_before + + r'(\*)' + end_string_suffix, re.UNICODE), + strong=re.compile(self.non_whitespace_escape_before + + r'(\*\*)' + end_string_suffix, re.UNICODE), + interpreted_or_phrase_ref=re.compile( + r""" + %(non_unescaped_whitespace_escape_before)s + ( + ` + (?P + (?P:%(simplename)s:)? + (?P__?)? + ) + ) + %(end_string_suffix)s + """ % args, re.VERBOSE | re.UNICODE), + embedded_link=re.compile( + r""" + ( + (?:[ \n]+|^) # spaces or beginning of line/string + < # open bracket + %(non_whitespace_after)s + (([^<>]|\x00[<>])+) # anything but unescaped angle brackets + %(non_whitespace_escape_before)s + > # close bracket + ) + $ # end of string + """ % args, re.VERBOSE | re.UNICODE), + literal=re.compile(self.non_whitespace_before + '(``)' + + end_string_suffix, re.UNICODE), + target=re.compile(self.non_whitespace_escape_before + + r'(`)' + end_string_suffix, re.UNICODE), + substitution_ref=re.compile(self.non_whitespace_escape_before + + r'(\|_{0,2})' + + end_string_suffix, re.UNICODE), + email=re.compile(self.email_pattern % args + '$', + re.VERBOSE | re.UNICODE), + uri=re.compile( + (r""" + %(start_string_prefix)s + (?P + (?P # absolute URI + (?P # scheme (http, ftp, mailto) + [a-zA-Z][a-zA-Z0-9.+-]* + ) + : + ( + ( # either: + (//?)? # hierarchical URI + %(uric)s* # URI characters + %(uri_end)s # final URI char + ) + ( # optional query + \?%(uric)s* + %(uri_end)s + )? + ( # optional fragment + \#%(uric)s* + %(uri_end)s + )? + ) + ) + | # *OR* + (?P # email address + """ + self.email_pattern + r""" + ) + ) + %(end_string_suffix)s + """) % args, re.VERBOSE | re.UNICODE), + pep=re.compile( + r""" + %(start_string_prefix)s + ( + (pep-(?P\d+)(.txt)?) # reference to source file + | + (PEP\s+(?P\d+)) # reference by name + ) + %(end_string_suffix)s""" % args, re.VERBOSE | re.UNICODE), + rfc=re.compile( + r""" + %(start_string_prefix)s + (RFC(-|\s+)?(?P\d+)) + %(end_string_suffix)s""" % args, re.VERBOSE | re.UNICODE)) + + self.implicit_dispatch.append((self.patterns.uri, + self.standalone_uri)) if settings.pep_references: self.implicit_dispatch.append((self.patterns.pep, self.pep_reference)) @@ -527,20 +659,11 @@ class Inliner: # Inline object recognition # ------------------------- - # lookahead and look-behind expressions for inline markup rules - start_string_prefix = (u'(^|(?<=\\s|[%s%s]))' % - (punctuation_chars.openers, - punctuation_chars.delimiters)) - end_string_suffix = (u'($|(?=\\s|[\x00%s%s%s]))' % - (punctuation_chars.closing_delimiters, - punctuation_chars.delimiters, - punctuation_chars.closers)) - # print start_string_prefix.encode('utf8') - # TODO: support non-ASCII whitespace in the following 4 patterns? - non_whitespace_before = r'(?%s)(?P__?)' % simplename, - ('footnotelabel', r'\[', r'(?P\]_)', - [r'[0-9]+', # manually numbered - r'\#(%s)?' % simplename, # auto-numbered (w/ label?) - r'\*', # auto-symbol - r'(?P%s)' % simplename] # citation reference - ) - ] - ), - ('backquote', # interpreted text or phrase reference - '(?P(:%s:)?)' % simplename, # optional role - non_whitespace_after, - ['`(?!`)'] # but not literal - ) - ] - ) - patterns = Struct( - initial=build_regexp(parts), - emphasis=re.compile(non_whitespace_escape_before - + r'(\*)' + end_string_suffix, re.UNICODE), - strong=re.compile(non_whitespace_escape_before - + r'(\*\*)' + end_string_suffix, re.UNICODE), - interpreted_or_phrase_ref=re.compile( - r""" - %(non_unescaped_whitespace_escape_before)s - ( - ` - (?P - (?P:%(simplename)s:)? - (?P__?)? - ) - ) - %(end_string_suffix)s - """ % locals(), re.VERBOSE | re.UNICODE), - embedded_link=re.compile( - r""" - ( - (?:[ \n]+|^) # spaces or beginning of line/string - < # open bracket - %(non_whitespace_after)s - ([^<>\x00]+(\x00_)?) # anything but angle brackets & nulls - # except escaped trailing low line - %(non_whitespace_before)s - > # close bracket w/o whitespace before - ) - $ # end of string - """ % locals(), re.VERBOSE | re.UNICODE), - literal=re.compile(non_whitespace_before + '(``)' - + end_string_suffix), - target=re.compile(non_whitespace_escape_before - + r'(`)' + end_string_suffix), - substitution_ref=re.compile(non_whitespace_escape_before - + r'(\|_{0,2})' - + end_string_suffix), - email=re.compile(email_pattern % locals() + '$', - re.VERBOSE | re.UNICODE), - uri=re.compile( - (r""" - %(start_string_prefix)s - (?P - (?P # absolute URI - (?P # scheme (http, ftp, mailto) - [a-zA-Z][a-zA-Z0-9.+-]* - ) - : - ( - ( # either: - (//?)? # hierarchical URI - %(uric)s* # URI characters - %(uri_end)s # final URI char - ) - ( # optional query - \?%(uric)s* - %(uri_end)s - )? - ( # optional fragment - \#%(uric)s* - %(uri_end)s - )? - ) - ) - | # *OR* - (?P # email address - """ + email_pattern + r""" - ) - ) - %(end_string_suffix)s - """) % locals(), re.VERBOSE | re.UNICODE), - pep=re.compile( - r""" - %(start_string_prefix)s - ( - (pep-(?P\d+)(.txt)?) # reference to source file - | - (PEP\s+(?P\d+)) # reference by name - ) - %(end_string_suffix)s""" % locals(), re.VERBOSE | re.UNICODE), - rfc=re.compile( - r""" - %(start_string_prefix)s - (RFC(-|\s+)?(?P\d+)) - %(end_string_suffix)s""" % locals(), re.VERBOSE | re.UNICODE)) def quoted_start(self, match): """Test if inline markup start-string is 'quoted'. @@ -787,8 +798,10 @@ class Inliner: match = self.patterns.embedded_link.search(escaped) if match: # embedded or text = unescape(escaped[:match.start(0)]) - aliastext = unescape(match.group(2), restore_backslashes=True) - if aliastext.endswith('_') and not (aliastext.endswith(r'\_') + aliastext = match.group(2) + underscore_escaped = aliastext.endswith('\x00_') + aliastext = unescape(aliastext) + if aliastext.endswith('_') and not (underscore_escaped or self.patterns.uri.match(aliastext)): aliastype = 'name' alias = normalize_name(aliastext[:-1]) @@ -796,7 +809,9 @@ class Inliner: target.indirect_reference_name = aliastext[:-1] else: aliastype = 'uri' - alias = ''.join(aliastext.split()) + alias_parts = split_escaped_whitespace(match.group(2)) + alias = ' '.join(''.join(unescape(part).split()) + for part in alias_parts) alias = self.adjust_uri(alias) if alias.endswith(r'\_'): alias = alias[:-2] + '_' @@ -1230,6 +1245,8 @@ class Body(RSTState): def bullet(self, match, context, next_state): """Bullet list item.""" bulletlist = nodes.bullet_list() + (bulletlist.source, + bulletlist.line) = self.state_machine.get_source_and_line() self.parent += bulletlist bulletlist['bullet'] = match.string[0] i, blank_finish = self.list_item(match.end()) @@ -1460,6 +1477,7 @@ class Body(RSTState): def option_marker(self, match, context, next_state): """Option list item.""" optionlist = nodes.option_list() + (optionlist.source, optionlist.line) = self.state_machine.get_source_and_line() try: listitem, blank_finish = self.option_list_item(match) except MarkupError, error: @@ -1547,6 +1565,9 @@ class Body(RSTState): def doctest(self, match, context, next_state): data = '\n'.join(self.state_machine.get_text_block()) + # TODO: prepend class value ['pycon'] (Python Console) + # parse with `directives.body.CodeBlock` (returns literal-block + # with class "code" and syntax highlight markup). self.parent += nodes.doctest_block(data, data) return [], next_state, [] @@ -1747,9 +1768,13 @@ class Body(RSTState): line=startline+offset) return [error] - def build_table(self, tabledata, tableline, stub_columns=0): + def build_table(self, tabledata, tableline, stub_columns=0, widths=None): colwidths, headrows, bodyrows = tabledata table = nodes.table() + if widths == 'auto': + table['classes'] += ['colwidths-auto'] + elif widths: # "grid" or list of integers + table['classes'] += ['colwidths-given'] tgroup = nodes.tgroup(cols=len(colwidths)) table += tgroup for colwidth in colwidths: @@ -1939,8 +1964,10 @@ class Body(RSTState): refname = self.is_reference(reference) if refname: return 'refname', refname - reference = ''.join([''.join(line.split()) for line in block]) - return 'refuri', unescape(reference) + ref_parts = split_escaped_whitespace(' '.join(block)) + reference = ' '.join(''.join(unescape(part).split()) + for part in ref_parts) + return 'refuri', reference def is_reference(self, reference): match = self.explicit.patterns.reference.match( diff --git a/Libs/docutils/parsers/rst/tableparser.py b/Libs/docutils/parsers/rst/tableparser.py index 2cd3879..e19388b 100644 --- a/Libs/docutils/parsers/rst/tableparser.py +++ b/Libs/docutils/parsers/rst/tableparser.py @@ -1,4 +1,4 @@ -# $Id: tableparser.py 7320 2012-01-19 22:33:02Z milde $ +# $Id: tableparser.py 7898 2015-05-29 20:49:28Z milde $ # Author: David Goodger # Copyright: This module has been placed in the public domain. @@ -511,8 +511,8 @@ class SimpleTableParser(TableParser): if i == lastcol and line[end:].strip(): text = line[start:].rstrip() new_end = start + len(text) - columns[i] = (start, new_end) main_start, main_end = self.columns[-1] + columns[i] = (start, max(main_end, new_end)) if new_end > main_end: self.columns[-1] = (main_start, new_end) elif line[end:nextstart].strip(): diff --git a/Libs/docutils/transforms/frontmatter.py b/Libs/docutils/transforms/frontmatter.py index b39ce11..92287da 100644 --- a/Libs/docutils/transforms/frontmatter.py +++ b/Libs/docutils/transforms/frontmatter.py @@ -1,4 +1,4 @@ -# $Id: frontmatter.py 7595 2013-01-21 17:33:56Z milde $ +# $Id: frontmatter.py 8117 2017-06-18 23:38:18Z milde $ # Author: David Goodger, Ueli Schlaepfer # Copyright: This module has been placed in the public domain. @@ -433,6 +433,10 @@ class DocInfo(Transform): and isinstance(field[-1][0], nodes.paragraph): utils.clean_rcs_keywords( field[-1][0], self.rcs_keyword_substitutions) + if normedname not in bibliofields: + classvalue = nodes.make_id(normedname) + if classvalue: + field['classes'].append(classvalue) docinfo.append(field) nodelist = [] if len(docinfo) != 0: diff --git a/Libs/docutils/transforms/peps.py b/Libs/docutils/transforms/peps.py index 821cbcc..94b47c1 100644 --- a/Libs/docutils/transforms/peps.py +++ b/Libs/docutils/transforms/peps.py @@ -1,4 +1,4 @@ -# $Id: peps.py 6433 2010-09-28 08:21:25Z milde $ +# $Id: peps.py 7995 2016-12-10 17:50:59Z milde $ # Author: David Goodger # Copyright: This module has been placed in the public domain. @@ -32,8 +32,8 @@ class Headers(Transform): default_priority = 360 pep_url = 'pep-%04d' - pep_cvs_url = ('http://svn.python.org/view/*checkout*' - '/peps/trunk/pep-%04d.txt') + pep_cvs_url = ('http://hg.python.org' + '/peps/file/default/pep-%04d.txt') rcs_keyword_substitutions = ( (re.compile(r'\$' r'RCSfile: (.+),v \$$', re.IGNORECASE), r'\1'), (re.compile(r'\$[a-zA-Z]+: (.+) \$$'), r'\1'),) @@ -113,7 +113,7 @@ class Headers(Transform): elif name in ('replaces', 'replaced-by', 'requires'): newbody = [] space = nodes.Text(' ') - for refpep in re.split(',?\s+', body.astext()): + for refpep in re.split(r',?\s+', body.astext()): pepno = int(refpep) newbody.append(nodes.reference( refpep, refpep, diff --git a/Libs/docutils/transforms/references.py b/Libs/docutils/transforms/references.py index e88460d..f271067 100644 --- a/Libs/docutils/transforms/references.py +++ b/Libs/docutils/transforms/references.py @@ -1,4 +1,4 @@ -# $Id: references.py 7624 2013-03-07 14:10:26Z milde $ +# $Id: references.py 8067 2017-05-04 20:10:03Z milde $ # Author: David Goodger # Copyright: This module has been placed in the public domain. @@ -710,6 +710,7 @@ class Substitutions(Transform): raise CircularSubstitutionDefinitionError else: nested[nested_name].append(key) + nested_ref['ref-origin'] = ref subreflist.append(nested_ref) except CircularSubstitutionDefinitionError: parent = ref.parent @@ -721,9 +722,13 @@ class Substitutions(Transform): line=parent.line, base_node=parent) parent.replace_self(msg) else: + # find original ref substitution which cased this error + ref_origin = ref + while ref_origin.hasattr('ref-origin'): + ref_origin = ref_origin['ref-origin'] msg = self.document.reporter.error( - 'Circular substitution definition referenced: "%s".' - % refname, base_node=ref) + 'Circular substitution definition referenced: ' + '"%s".' % refname, base_node=ref_origin) msgid = self.document.set_id(msg) prb = nodes.problematic( ref.rawsource, ref.rawsource, refid=msgid) @@ -893,7 +898,10 @@ class DanglingReferencesVisitor(nodes.SparseNodeVisitor): msgid = self.document.set_id(msg) prb = nodes.problematic( node.rawsource, node.rawsource, refid=msgid) - prbid = self.document.set_id(prb) + try: + prbid = node['ids'][0] + except IndexError: + prbid = self.document.set_id(prb) msg.add_backref(prbid) node.replace_self(prb) else: diff --git a/Libs/docutils/transforms/universal.py b/Libs/docutils/transforms/universal.py index 3f3b55f..40036c0 100644 --- a/Libs/docutils/transforms/universal.py +++ b/Libs/docutils/transforms/universal.py @@ -1,4 +1,4 @@ -# $Id: universal.py 7668 2013-06-04 12:46:30Z milde $ +# $Id: universal.py 8144 2017-07-26 21:25:08Z milde $ # -*- coding: utf-8 -*- # Authors: David Goodger ; Ueli Schlaepfer; Günter Milde # Maintainer: docutils-develop@lists.sourceforge.net @@ -49,6 +49,10 @@ class Decorations(Transform): def generate_footer(self): # @@@ Text is hard-coded for now. # Should be made dynamic (language-dependent). + # @@@ Use timestamp from the `SOURCE_DATE_EPOCH`_ environment variable + # for the datestamp? + # See https://sourceforge.net/p/docutils/patches/132/ + # and https://reproducible-builds.org/specs/source-date-epoch/ settings = self.document.settings if settings.generator or settings.datestamp or settings.source_link \ or settings.source_url: @@ -204,6 +208,7 @@ class StripClassesAndElements(Transform): if class_value in self.strip_elements: return 1 + class SmartQuotes(Transform): """ @@ -214,6 +219,20 @@ class SmartQuotes(Transform): default_priority = 850 + nodes_to_skip = (nodes.FixedTextElement, nodes.Special) + """Do not apply "smartquotes" to instances of these block-level nodes.""" + + literal_nodes = (nodes.image, nodes.literal, nodes.math, + nodes.raw, nodes.problematic) + """Do not change quotes in instances of these inline nodes.""" + + smartquotes_action = 'qDe' + """Setting to select smartquote transformations. + + The default 'qDe' educates normal quote characters: (", '), + em- and en-dashes (---, --) and ellipses (...). + """ + def __init__(self, document, startnode): Transform.__init__(self, document, startnode=startnode) self.unsupported_languages = set() @@ -226,11 +245,7 @@ class SmartQuotes(Transform): False: 'plain'} for txtnode in txtnodes: nodetype = texttype[isinstance(txtnode.parent, - (nodes.literal, - nodes.math, - nodes.image, - nodes.raw, - nodes.problematic))] + self.literal_nodes)] yield (nodetype, txtnode.astext()) @@ -245,12 +260,15 @@ class SmartQuotes(Transform): # print repr(alternative) document_language = self.document.settings.language_code + lc_smartquotes = self.document.settings.smartquotes_locales + if lc_smartquotes: + smartquotes.smartchars.quotes.update(dict(lc_smartquotes)) # "Educate" quotes in normal text. Handle each block of text # (TextElement node) as a unit to keep context around inline nodes: for node in self.document.traverse(nodes.TextElement): # skip preformatted text blocks and special elements: - if isinstance(node, (nodes.FixedTextElement, nodes.Special)): + if isinstance(node, self.nodes_to_skip): continue # nested TextElements are not "block-level" elements: if isinstance(node.parent, nodes.TextElement): @@ -269,7 +287,7 @@ class SmartQuotes(Transform): lang = lang.replace('-x-altquot', '') else: lang += '-x-altquot' - # drop subtags missing in quotes: + # drop unsupported subtags: for tag in utils.normalize_language_tag(lang): if tag in smartquotes.smartchars.quotes: lang = tag @@ -282,11 +300,12 @@ class SmartQuotes(Transform): lang = '' # Iterator educating quotes in plain text: - # '2': set all, using old school en- and em- dash shortcuts + # (see "utils/smartquotes.py" for the attribute setting) teacher = smartquotes.educate_tokens(self.get_tokens(txtnodes), - attr='2', language=lang) + attr=self.smartquotes_action, language=lang) for txtnode, newtext in zip(txtnodes, teacher): - txtnode.parent.replace(txtnode, nodes.Text(newtext)) + txtnode.parent.replace(txtnode, nodes.Text(newtext, + rawsource=txtnode.rawsource)) - self.unsupported_languages = set() # reset + self.unsupported_languages = set() # reset diff --git a/Libs/docutils/transforms/writer_aux.py b/Libs/docutils/transforms/writer_aux.py index ab26b0b..c5818d9 100644 --- a/Libs/docutils/transforms/writer_aux.py +++ b/Libs/docutils/transforms/writer_aux.py @@ -1,4 +1,4 @@ -# $Id: writer_aux.py 7320 2012-01-19 22:33:02Z milde $ +# $Id: writer_aux.py 7808 2015-02-27 17:03:32Z milde $ # Author: Lea Wiemann # Copyright: This module has been placed in the public domain. diff --git a/Libs/docutils/utils/__init__.py b/Libs/docutils/utils/__init__.py index c319217..3c16eb2 100644 --- a/Libs/docutils/utils/__init__.py +++ b/Libs/docutils/utils/__init__.py @@ -1,5 +1,5 @@ # coding: utf-8 -# $Id: __init__.py 7668 2013-06-04 12:46:30Z milde $ +# $Id: __init__.py 8141 2017-07-08 17:05:18Z goodger $ # Author: David Goodger # Copyright: This module has been placed in the public domain. @@ -13,9 +13,10 @@ import sys import os import os.path import re +import itertools import warnings import unicodedata -from docutils import ApplicationError, DataError +from docutils import ApplicationError, DataError, __version_info__ from docutils import nodes import docutils.io from docutils.utils.error_reporting import ErrorOutput, SafeString @@ -325,7 +326,7 @@ def assemble_option_dict(option_list, options_spec): raise DuplicateOptionError('duplicate option "%s"' % name) try: options[name] = convertor(value) - except (ValueError, TypeError), detail: + except (ValueError, TypeError) as detail: raise detail.__class__('(option: "%s"; value: %r)\n%s' % (name, value, ' '.join(detail.args))) return options @@ -575,7 +576,7 @@ def escape2null(text): parts.append('\x00' + text[found+1:found+2]) start = found + 2 # skip character after escape -def unescape(text, restore_backslashes=False): +def unescape(text, restore_backslashes=False, respect_whitespace=False): """ Return a string with nulls removed or restored to backslashes. Backslash-escaped spaces are also removed. @@ -587,6 +588,16 @@ def unescape(text, restore_backslashes=False): text = ''.join(text.split(sep)) return text +def split_escaped_whitespace(text): + """ + Split `text` on escaped whitespace (null+space or null+newline). + Return a list of strings. + """ + strings = text.split('\x00 ') + strings = [string.split('\x00\n') for string in strings] + # flatten list of lists of strings to list of strings: + return list(itertools.chain(*strings)) + def strip_combining_chars(text): if isinstance(text, str) and sys.version_info < (3,0): return text @@ -595,8 +606,10 @@ def strip_combining_chars(text): def find_combining_chars(text): """Return indices of all combining chars in Unicode string `text`. + >>> from docutils.utils import find_combining_chars >>> find_combining_chars(u'A t̆ab̆lĕ') [3, 6, 9] + """ if isinstance(text, str) and sys.version_info < (3,0): return [] @@ -605,8 +618,10 @@ def find_combining_chars(text): def column_indices(text): """Indices of Unicode string `text` when skipping combining characters. + >>> from docutils.utils import column_indices >>> column_indices(u'A t̆ab̆lĕ') [0, 1, 2, 4, 5, 7, 8] + """ # TODO: account for asian wide chars here instead of using dummy # replacements in the tableparser? @@ -663,17 +678,21 @@ def normalize_language_tag(tag): Example: + >>> from docutils.utils import normalize_language_tag >>> normalize_language_tag('de_AT-1901') ['de-at-1901', 'de-at', 'de-1901', 'de'] + >>> normalize_language_tag('de-CH-x_altquot') + ['de-ch-x-altquot', 'de-ch', 'de-x-altquot', 'de'] + """ # normalize: - tag = tag.lower().replace('_','-') + tag = tag.lower().replace('-','_') # split (except singletons, which mark the following tag as non-standard): - tag = re.sub(r'-([a-zA-Z0-9])-', r'-\1_', tag) - taglist = [] - subtags = [subtag.replace('_', '-') for subtag in tag.split('-')] + tag = re.sub(r'_([a-zA-Z0-9])_', r'_\1-', tag) + subtags = [subtag for subtag in tag.split('_')] base_tag = [subtags.pop(0)] # find all combinations of subtags + taglist = [] for n in range(len(subtags), 0, -1): for tags in unique_combinations(subtags, n): taglist.append('-'.join(base_tag+tags)) @@ -747,3 +766,42 @@ class DependencyList(object): except AttributeError: output_file = None return '%s(%r, %s)' % (self.__class__.__name__, output_file, self.list) + + +release_level_abbreviations = { + 'alpha': 'a', + 'beta': 'b', + 'candidate': 'rc', + 'final': '',} + +def version_identifier(version_info=None): + # to add in Docutils 0.15: + # version_info is a namedtuple, an instance of Docutils.VersionInfo. + """ + Given a `version_info` tuple (default is docutils.__version_info__), + build & return a version identifier string. + """ + if version_info is None: + version_info = __version_info__ + if version_info[2]: # version_info.micro + micro = '.%s' % version_info[2] + else: + micro = '' + releaselevel = release_level_abbreviations[ + version_info[3]] # version_info.releaselevel + if version_info[4]: # version_info.serial + serial = version_info[4] + else: + serial = '' + if version_info[5]: # version_info.release + dev = '' + else: + dev = '.dev' + version = '%s.%s%s%s%s%s' % ( + version_info[0], # version_info.major + version_info[1], # version_info.minor + micro, + releaselevel, + serial, + dev) + return version diff --git a/Libs/docutils/utils/code_analyzer.py b/Libs/docutils/utils/code_analyzer.py index be23a19..314a506 100644 --- a/Libs/docutils/utils/code_analyzer.py +++ b/Libs/docutils/utils/code_analyzer.py @@ -4,7 +4,7 @@ """Lexical analysis of formal languages (i.e. code) using Pygments.""" # :Author: Georg Brandl; Felix Wiemann; Günter Milde -# :Date: $Date: 2011-12-20 15:14:21 +0100 (Die, 20. Dez 2011) $ +# :Date: $Date: 2015-04-20 16:05:27 +0200 (Mo, 20 Apr 2015) $ # :Copyright: This module has been placed in the public domain. from docutils import ApplicationError @@ -13,7 +13,7 @@ try: from pygments.lexers import get_lexer_by_name from pygments.formatters.html import _get_ttype_class with_pygments = True -except ImportError: +except (ImportError, SyntaxError): # pygments 2.0.1 fails with Py 3.1 and 3.2 with_pygments = False # Filter the following token types from the list of class arguments: diff --git a/Libs/docutils/utils/error_reporting.py b/Libs/docutils/utils/error_reporting.py index 19348bf..8ea7108 100644 --- a/Libs/docutils/utils/error_reporting.py +++ b/Libs/docutils/utils/error_reporting.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- -# :Id: $Id: error_reporting.py 7668 2013-06-04 12:46:30Z milde $ +# :Id: $Id: error_reporting.py 8119 2017-06-22 20:59:19Z milde $ # :Copyright: © 2011 Günter Milde. # :License: Released under the terms of the `2-Clause BSD license`_, in short: # @@ -44,10 +44,20 @@ try: except ImportError: locale_encoding = None else: - locale_encoding = locale.getlocale()[1] or locale.getdefaultlocale()[1] - # locale.getpreferredencoding([do_setlocale=True|False]) - # has side-effects | might return a wrong guess. - # (cf. Update 1 in http://stackoverflow.com/questions/4082645/using-python-2-xs-locale-module-to-format-numbers-and-currency) + try: + locale_encoding = locale.getlocale()[1] or locale.getdefaultlocale()[1] + # locale.getpreferredencoding([do_setlocale=True|False]) + # has side-effects | might return a wrong guess. + # (cf. Update 1 in http://stackoverflow.com/questions/4082645/using-python-2-xs-locale-module-to-format-numbers-and-currency) + except ValueError as error: # OS X may set UTF-8 without language code + # see http://bugs.python.org/issue18378 + # and https://sourceforge.net/p/docutils/bugs/298/ + if "unknown locale: UTF-8" in error.args: + locale_encoding = "UTF-8" + else: + locale_encoding = None + except: # any other problems determining the locale -> use None + locale_encoding = None try: codecs.lookup(locale_encoding or '') # None -> '' except LookupError: @@ -72,7 +82,7 @@ class SafeString(object): def __str__(self): try: return str(self.data) - except UnicodeEncodeError, err: + except UnicodeEncodeError: if isinstance(self.data, Exception): args = [str(SafeString(arg, self.encoding, self.encoding_errors)) @@ -103,7 +113,7 @@ class SafeString(object): if isinstance(self.data, EnvironmentError): u = u.replace(": u'", ": '") # normalize filename quoting return u - except UnicodeError, error: # catch ..Encode.. and ..Decode.. errors + except UnicodeError as error: # catch ..Encode.. and ..Decode.. errors if isinstance(self.data, EnvironmentError): return u"[Errno %s] %s: '%s'" % (self.data.errno, SafeString(self.data.strerror, self.encoding, @@ -189,7 +199,11 @@ class ErrorOutput(object): self.stream.write(data) except UnicodeEncodeError: self.stream.write(data.encode(self.encoding, self.encoding_errors)) - except TypeError: # in Python 3, stderr expects unicode + except TypeError: + if isinstance(data, unicode): # passed stream may expect bytes + self.stream.write(data.encode(self.encoding, + self.encoding_errors)) + return if self.stream in (sys.stderr, sys.stdout): self.stream.buffer.write(data) # write bytes to raw stream else: diff --git a/Libs/docutils/utils/math/__init__.py b/Libs/docutils/utils/math/__init__.py index 0508e4f..673f93e 100644 --- a/Libs/docutils/utils/math/__init__.py +++ b/Libs/docutils/utils/math/__init__.py @@ -1,4 +1,4 @@ -# :Id: $Id: __init__.py 7218 2011-11-08 17:42:40Z milde $ +# :Id: $Id: __init__.py 7865 2015-04-12 10:06:43Z milde $ # :Author: Guenter Milde. # :License: Released under the terms of the `2-Clause BSD license`_, in short: # @@ -17,8 +17,9 @@ It contains various modules for conversion between different math formats :math2html: LaTeX math -> HTML conversion from eLyXer :latex2mathml: LaTeX math -> presentational MathML -:unichar2tex: Unicode character to LaTeX math translation table -:tex2unichar: LaTeX math to Unicode character translation dictionaries +:unichar2tex: Unicode character to LaTeX math translation table +:tex2unichar: LaTeX math to Unicode character translation dictionaries +:tex2mathml_extern: Wrapper for TeX -> MathML command line converters """ # helpers for Docutils math support diff --git a/Libs/docutils/utils/math/latex2mathml.py b/Libs/docutils/utils/math/latex2mathml.py index 7bbdbdd..bcb4877 100644 --- a/Libs/docutils/utils/math/latex2mathml.py +++ b/Libs/docutils/utils/math/latex2mathml.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- -# :Id: $Id: latex2mathml.py 7668 2013-06-04 12:46:30Z milde $ +# :Id: $Id: latex2mathml.py 7995 2016-12-10 17:50:59Z milde $ # :Copyright: © 2010 Günter Milde. # Based on rst2mathml.py from the latex_math sandbox project # © 2005 Jens Jørgen Mortensen @@ -151,8 +151,8 @@ mathscr = { } negatables = {'=': u'\u2260', - '\in': u'\u2209', - '\equiv': u'\u2262'} + r'\in': u'\u2209', + r'\equiv': u'\u2262'} # LaTeX to MathML translation stuff: class math: @@ -558,3 +558,14 @@ def handle_keyword(name, node, string): raise SyntaxError(u'Unknown LaTeX command: ' + name) return node, skip + +def tex2mathml(tex_math, inline=True): + """Return string with MathML code corresponding to `tex_math`. + + `inline`=True is for inline math and `inline`=False for displayed math. + """ + + mathml_tree = parse_latex_math(tex_math, inline=inline) + return ''.join(mathml_tree.xml()) + + diff --git a/Libs/docutils/utils/math/math2html.py b/Libs/docutils/utils/math/math2html.py index 2d3149e..1f61e23 100644 --- a/Libs/docutils/utils/math/math2html.py +++ b/Libs/docutils/utils/math/math2html.py @@ -14,7 +14,7 @@ # .. _2-Clause BSD license: http://www.spdx.org/licenses/BSD-2-Clause # Based on eLyXer: convert LyX source files to HTML output. -# http://elyxer.nongnu.org/ +# http://alexfernandez.github.io/elyxer/ # --end-- # Alex 20101110 @@ -112,7 +112,7 @@ class BibStylesConfig(object): u'@conference':u'$authors: “$title”, $journal,{ pp. $pages,} $year.{ URL $url.}{ $note.}', u'@inbook':u'$authors: $title.{{ $publisher,} $year.}{ URL $url.}{ $note.}', u'@incollection':u'$authors: $title{ in $booktitle{ ($editor, ed.)}}.{{ $publisher,} $year.}{ URL $url.}{ $note.}', - u'@inproceedings':u'$authors: “$title”, $journal,{ pp. $pages,} $year.{ URL $url.}{ $note.}', + u'@inproceedings':u'$authors: “$title”, $booktitle,{ pp. $pages,} $year.{ URL $url.}{ $note.}', u'@manual':u'$authors: $title.{{ $publisher,} $year.}{ URL $url.}{ $note.}', u'@mastersthesis':u'$authors: $title.{{ $publisher,} $year.}{ URL $url.}{ $note.}', u'@misc':u'$authors: $title.{{ $publisher,}{ $howpublished,} $year.}{ URL $url.}{ $note.}', @@ -245,7 +245,8 @@ class ContainerConfig(object): u'\\begin_inset Quotes':u'QuoteContainer', u'\\begin_inset Tabular':u'Table', u'\\begin_inset Text':u'InsetText', u'\\begin_inset VSpace':u'VerticalSpace', u'\\begin_inset Wrap':u'Wrap', - u'\\begin_inset listings':u'Listing', u'\\begin_inset space':u'Space', + u'\\begin_inset listings':u'Listing', + u'\\begin_inset script':u'ScriptInset', u'\\begin_inset space':u'Space', u'\\begin_layout':u'Layout', u'\\begin_layout Abstract':u'Abstract', u'\\begin_layout Author':u'Author', u'\\begin_layout Bibliography':u'Bibliography', @@ -291,7 +292,7 @@ class EscapeConfig(object): "Configuration class from elyxer.config file" chars = { - u'\n':u'', u' -- ':u' — ', u'\'':u'’', u'---':u'—', u'`':u'‘', + u'\n':u'', u' -- ':u' — ', u' --- ':u' — ', u'\'':u'’', u'`':u'‘', } commands = { @@ -324,21 +325,24 @@ class FormulaConfig(object): alphacommands = { u'\\AA':u'Å', u'\\AE':u'Æ', - u'\\AmS':u'AmS', u'\\DH':u'Ð', - u'\\L':u'Ł', u'\\O':u'Ø', u'\\OE':u'Œ', u'\\TH':u'Þ', u'\\aa':u'å', - u'\\ae':u'æ', u'\\alpha':u'α', u'\\beta':u'β', u'\\delta':u'δ', - u'\\dh':u'ð', u'\\epsilon':u'ϵ', u'\\eta':u'η', u'\\gamma':u'γ', - u'\\i':u'ı', u'\\imath':u'ı', u'\\iota':u'ι', u'\\j':u'ȷ', - u'\\jmath':u'ȷ', u'\\kappa':u'κ', u'\\l':u'ł', u'\\lambda':u'λ', + u'\\AmS':u'AmS', u'\\Angstroem':u'Å', + u'\\DH':u'Ð', u'\\Koppa':u'Ϟ', u'\\L':u'Ł', u'\\Micro':u'µ', u'\\O':u'Ø', + u'\\OE':u'Œ', u'\\Sampi':u'Ϡ', u'\\Stigma':u'Ϛ', u'\\TH':u'Þ', + u'\\aa':u'å', u'\\ae':u'æ', u'\\alpha':u'α', u'\\beta':u'β', + u'\\delta':u'δ', u'\\dh':u'ð', u'\\digamma':u'ϝ', u'\\epsilon':u'ϵ', + u'\\eta':u'η', u'\\eth':u'ð', u'\\gamma':u'γ', u'\\i':u'ı', + u'\\imath':u'ı', u'\\iota':u'ι', u'\\j':u'ȷ', u'\\jmath':u'ȷ', + u'\\kappa':u'κ', u'\\koppa':u'ϟ', u'\\l':u'ł', u'\\lambda':u'λ', u'\\mu':u'μ', u'\\nu':u'ν', u'\\o':u'ø', u'\\oe':u'œ', u'\\omega':u'ω', u'\\phi':u'φ', u'\\pi':u'π', u'\\psi':u'ψ', u'\\rho':u'ρ', - u'\\sigma':u'σ', u'\\ss':u'ß', u'\\tau':u'τ', u'\\textcrh':u'ħ', - u'\\th':u'þ', u'\\theta':u'θ', u'\\upsilon':u'υ', u'\\varDelta':u'∆', + u'\\sampi':u'ϡ', u'\\sigma':u'σ', u'\\ss':u'ß', u'\\stigma':u'ϛ', + u'\\tau':u'τ', u'\\tcohm':u'Ω', u'\\textcrh':u'ħ', u'\\th':u'þ', + u'\\theta':u'θ', u'\\upsilon':u'υ', u'\\varDelta':u'∆', u'\\varGamma':u'Γ', u'\\varLambda':u'Λ', u'\\varOmega':u'Ω', u'\\varPhi':u'Φ', u'\\varPi':u'Π', u'\\varPsi':u'Ψ', u'\\varSigma':u'Σ', u'\\varTheta':u'Θ', u'\\varUpsilon':u'Υ', u'\\varXi':u'Ξ', - u'\\varepsilon':u'ε', u'\\varkappa':u'ϰ', u'\\varphi':u'φ', - u'\\varpi':u'ϖ', u'\\varrho':u'ϱ', u'\\varsigma':u'ς', + u'\\varbeta':u'ϐ', u'\\varepsilon':u'ε', u'\\varkappa':u'ϰ', + u'\\varphi':u'φ', u'\\varpi':u'ϖ', u'\\varrho':u'ϱ', u'\\varsigma':u'ς', u'\\vartheta':u'ϑ', u'\\xi':u'ξ', u'\\zeta':u'ζ', } @@ -376,59 +380,75 @@ class FormulaConfig(object): commands = { u'\\ ':u' ', u'\\!':u'', u'\\#':u'#', u'\\$':u'$', u'\\%':u'%', - u'\\&':u'&', u'\\,':u' ', u'\\:':u' ', u'\\;':u' ', - u'\\APLdownarrowbox':u'⍗', u'\\APLleftarrowbox':u'⍇', + u'\\&':u'&', u'\\,':u' ', u'\\:':u' ', u'\\;':u' ', u'\\AC':u'∿', + u'\\APLcomment':u'⍝', u'\\APLdownarrowbox':u'⍗', u'\\APLinput':u'⍞', + u'\\APLinv':u'⌹', u'\\APLleftarrowbox':u'⍇', u'\\APLlog':u'⍟', u'\\APLrightarrowbox':u'⍈', u'\\APLuparrowbox':u'⍐', u'\\Box':u'□', - u'\\Bumpeq':u'≎', u'\\CIRCLE':u'●', u'\\Cap':u'⋒', u'\\CheckedBox':u'☑', - u'\\Circle':u'○', u'\\Coloneqq':u'⩴', u'\\Corresponds':u'≙', - u'\\Cup':u'⋓', u'\\Delta':u'Δ', u'\\Diamond':u'◇', u'\\Downarrow':u'⇓', - u'\\EUR':u'€', u'\\Game':u'⅁', u'\\Gamma':u'Γ', u'\\Im':u'ℑ', - u'\\Join':u'⨝', u'\\LEFTCIRCLE':u'◖', u'\\LEFTcircle':u'◐', - u'\\Lambda':u'Λ', u'\\Leftarrow':u'⇐', u'\\Lleftarrow':u'⇚', - u'\\Longleftarrow':u'⟸', u'\\Longleftrightarrow':u'⟺', - u'\\Longrightarrow':u'⟹', u'\\Lsh':u'↰', u'\\Mapsfrom':u'⇐|', - u'\\Mapsto':u'|⇒', u'\\Omega':u'Ω', u'\\P':u'¶', u'\\Phi':u'Φ', - u'\\Pi':u'Π', u'\\Pr':u'Pr', u'\\Psi':u'Ψ', u'\\RIGHTCIRCLE':u'◗', - u'\\RIGHTcircle':u'◑', u'\\Re':u'ℜ', u'\\Rrightarrow':u'⇛', - u'\\Rsh':u'↱', u'\\S':u'§', u'\\Sigma':u'Σ', u'\\Square':u'☐', - u'\\Subset':u'⋐', u'\\Supset':u'⋑', u'\\Theta':u'Θ', u'\\Uparrow':u'⇑', - u'\\Updownarrow':u'⇕', u'\\Upsilon':u'Υ', u'\\Vdash':u'⊩', - u'\\Vert':u'∥', u'\\Vvdash':u'⊪', u'\\XBox':u'☒', u'\\Xi':u'Ξ', - u'\\Yup':u'⅄', u'\\\\':u'
', u'\\_':u'_', u'\\aleph':u'ℵ', - u'\\amalg':u'∐', u'\\angle':u'∠', u'\\aquarius':u'♒', - u'\\arccos':u'arccos', u'\\arcsin':u'arcsin', u'\\arctan':u'arctan', - u'\\arg':u'arg', u'\\aries':u'♈', u'\\ast':u'∗', u'\\asymp':u'≍', + u'\\Bumpeq':u'≎', u'\\CIRCLE':u'●', u'\\Cap':u'⋒', + u'\\CapitalDifferentialD':u'ⅅ', u'\\CheckedBox':u'☑', u'\\Circle':u'○', + u'\\Coloneqq':u'⩴', u'\\ComplexI':u'ⅈ', u'\\ComplexJ':u'ⅉ', + u'\\Corresponds':u'≙', u'\\Cup':u'⋓', u'\\Delta':u'Δ', u'\\Diamond':u'◇', + u'\\Diamondblack':u'◆', u'\\Diamonddot':u'⟐', u'\\DifferentialD':u'ⅆ', + u'\\Downarrow':u'⇓', u'\\EUR':u'€', u'\\Euler':u'ℇ', + u'\\ExponetialE':u'ⅇ', u'\\Finv':u'Ⅎ', u'\\Game':u'⅁', u'\\Gamma':u'Γ', + u'\\Im':u'ℑ', u'\\Join':u'⨝', u'\\LEFTCIRCLE':u'◖', u'\\LEFTcircle':u'◐', + u'\\LHD':u'◀', u'\\Lambda':u'Λ', u'\\Lbag':u'⟅', u'\\Leftarrow':u'⇐', + u'\\Lleftarrow':u'⇚', u'\\Longleftarrow':u'⟸', + u'\\Longleftrightarrow':u'⟺', u'\\Longrightarrow':u'⟹', u'\\Lparen':u'⦅', + u'\\Lsh':u'↰', u'\\Mapsfrom':u'⇐|', u'\\Mapsto':u'|⇒', u'\\Omega':u'Ω', + u'\\P':u'¶', u'\\Phi':u'Φ', u'\\Pi':u'Π', u'\\Pr':u'Pr', u'\\Psi':u'Ψ', + u'\\Qoppa':u'Ϙ', u'\\RHD':u'▶', u'\\RIGHTCIRCLE':u'◗', + u'\\RIGHTcircle':u'◑', u'\\Rbag':u'⟆', u'\\Re':u'ℜ', u'\\Rparen':u'⦆', + u'\\Rrightarrow':u'⇛', u'\\Rsh':u'↱', u'\\S':u'§', u'\\Sigma':u'Σ', + u'\\Square':u'☐', u'\\Subset':u'⋐', u'\\Sun':u'☉', u'\\Supset':u'⋑', + u'\\Theta':u'Θ', u'\\Uparrow':u'⇑', u'\\Updownarrow':u'⇕', + u'\\Upsilon':u'Υ', u'\\Vdash':u'⊩', u'\\Vert':u'∥', u'\\Vvdash':u'⊪', + u'\\XBox':u'☒', u'\\Xi':u'Ξ', u'\\Yup':u'⅄', u'\\\\':u'
', + u'\\_':u'_', u'\\aleph':u'ℵ', u'\\amalg':u'∐', u'\\anchor':u'⚓', + u'\\angle':u'∠', u'\\aquarius':u'♒', u'\\arccos':u'arccos', + u'\\arcsin':u'arcsin', u'\\arctan':u'arctan', u'\\arg':u'arg', + u'\\aries':u'♈', u'\\arrowbullet':u'➢', u'\\ast':u'∗', u'\\asymp':u'≍', u'\\backepsilon':u'∍', u'\\backprime':u'‵', u'\\backsimeq':u'⋍', - u'\\backslash':u'\\', u'\\barwedge':u'⊼', u'\\because':u'∵', - u'\\beth':u'ℶ', u'\\between':u'≬', u'\\bigcap':u'∩', u'\\bigcirc':u'○', - u'\\bigcup':u'∪', u'\\bigodot':u'⊙', u'\\bigoplus':u'⊕', - u'\\bigotimes':u'⊗', u'\\bigsqcup':u'⊔', u'\\bigstar':u'★', - u'\\bigtriangledown':u'▽', u'\\bigtriangleup':u'△', u'\\biguplus':u'⊎', - u'\\bigvee':u'∨', u'\\bigwedge':u'∧', u'\\blacklozenge':u'⧫', - u'\\blacksmiley':u'☻', u'\\blacksquare':u'■', u'\\blacktriangle':u'▲', - u'\\blacktriangledown':u'▼', u'\\blacktriangleright':u'▶', u'\\bot':u'⊥', - u'\\bowtie':u'⋈', u'\\box':u'▫', u'\\boxdot':u'⊡', u'\\bullet':u'•', + u'\\backslash':u'\\', u'\\ballotx':u'✗', u'\\barwedge':u'⊼', + u'\\because':u'∵', u'\\beth':u'ℶ', u'\\between':u'≬', u'\\bigcap':u'∩', + u'\\bigcirc':u'○', u'\\bigcup':u'∪', u'\\bigodot':u'⊙', + u'\\bigoplus':u'⊕', u'\\bigotimes':u'⊗', u'\\bigsqcup':u'⊔', + u'\\bigstar':u'★', u'\\bigtriangledown':u'▽', u'\\bigtriangleup':u'△', + u'\\biguplus':u'⊎', u'\\bigvee':u'∨', u'\\bigwedge':u'∧', + u'\\biohazard':u'☣', u'\\blacklozenge':u'⧫', u'\\blacksmiley':u'☻', + u'\\blacksquare':u'■', u'\\blacktriangle':u'▲', + u'\\blacktriangledown':u'▼', u'\\blacktriangleleft':u'◂', + u'\\blacktriangleright':u'▶', u'\\blacktriangleup':u'▴', u'\\bot':u'⊥', + u'\\bowtie':u'⋈', u'\\box':u'▫', u'\\boxast':u'⧆', u'\\boxbar':u'◫', + u'\\boxbox':u'⧈', u'\\boxbslash':u'⧅', u'\\boxcircle':u'⧇', + u'\\boxdot':u'⊡', u'\\boxminus':u'⊟', u'\\boxplus':u'⊞', + u'\\boxslash':u'⧄', u'\\boxtimes':u'⊠', u'\\bullet':u'•', u'\\bumpeq':u'≏', u'\\cancer':u'♋', u'\\cap':u'∩', u'\\capricornus':u'♑', - u'\\cdot':u'⋅', u'\\cdots':u'⋯', u'\\centerdot':u'∙', - u'\\checkmark':u'✓', u'\\chi':u'χ', u'\\circ':u'○', u'\\circeq':u'≗', - u'\\circledR':u'®', u'\\circledast':u'⊛', u'\\circledcirc':u'⊚', - u'\\circleddash':u'⊝', u'\\clubsuit':u'♣', u'\\coloneqq':u'≔', + u'\\cat':u'⁀', u'\\cdot':u'⋅', u'\\cdots':u'⋯', u'\\cent':u'¢', + u'\\centerdot':u'∙', u'\\checkmark':u'✓', u'\\chi':u'χ', u'\\circ':u'∘', + u'\\circeq':u'≗', u'\\circlearrowleft':u'↺', u'\\circlearrowright':u'↻', + u'\\circledR':u'®', u'\\circledast':u'⊛', u'\\circledbslash':u'⦸', + u'\\circledcirc':u'⊚', u'\\circleddash':u'⊝', u'\\circledgtr':u'⧁', + u'\\circledless':u'⧀', u'\\clubsuit':u'♣', u'\\colon':u': ', u'\\coloneqq':u'≔', u'\\complement':u'∁', u'\\cong':u'≅', u'\\coprod':u'∐', u'\\copyright':u'©', u'\\cos':u'cos', u'\\cosh':u'cosh', u'\\cot':u'cot', - u'\\coth':u'coth', u'\\csc':u'csc', u'\\cup':u'∪', - u'\\curvearrowleft':u'↶', u'\\curvearrowright':u'↷', u'\\dag':u'†', - u'\\dagger':u'†', u'\\daleth':u'ℸ', u'\\dashleftarrow':u'⇠', - u'\\dashv':u'⊣', u'\\ddag':u'‡', u'\\ddagger':u'‡', u'\\ddots':u'⋱', - u'\\deg':u'deg', u'\\det':u'det', u'\\diagdown':u'╲', u'\\diagup':u'╱', - u'\\diamond':u'◇', u'\\diamondsuit':u'♦', u'\\dim':u'dim', u'\\div':u'÷', - u'\\divideontimes':u'⋇', u'\\dotdiv':u'∸', u'\\doteq':u'≐', - u'\\doteqdot':u'≑', u'\\dotplus':u'∔', u'\\dots':u'…', - u'\\doublebarwedge':u'⌆', u'\\downarrow':u'↓', u'\\downdownarrows':u'⇊', - u'\\downharpoonleft':u'⇃', u'\\downharpoonright':u'⇂', u'\\earth':u'♁', - u'\\ell':u'ℓ', u'\\emptyset':u'∅', u'\\eqcirc':u'≖', u'\\eqcolon':u'≕', - u'\\eqsim':u'≂', u'\\euro':u'€', u'\\exists':u'∃', u'\\exp':u'exp', - u'\\fallingdotseq':u'≒', u'\\female':u'♀', u'\\flat':u'♭', - u'\\forall':u'∀', u'\\frown':u'⌢', u'\\frownie':u'☹', u'\\gcd':u'gcd', + u'\\coth':u'coth', u'\\csc':u'csc', u'\\cup':u'∪', u'\\curlyvee':u'⋎', + u'\\curlywedge':u'⋏', u'\\curvearrowleft':u'↶', + u'\\curvearrowright':u'↷', u'\\dag':u'†', u'\\dagger':u'†', + u'\\daleth':u'ℸ', u'\\dashleftarrow':u'⇠', u'\\dashv':u'⊣', + u'\\ddag':u'‡', u'\\ddagger':u'‡', u'\\ddots':u'⋱', u'\\deg':u'deg', + u'\\det':u'det', u'\\diagdown':u'╲', u'\\diagup':u'╱', + u'\\diameter':u'⌀', u'\\diamond':u'◇', u'\\diamondsuit':u'♦', + u'\\dim':u'dim', u'\\div':u'÷', u'\\divideontimes':u'⋇', + u'\\dotdiv':u'∸', u'\\doteq':u'≐', u'\\doteqdot':u'≑', u'\\dotplus':u'∔', + u'\\dots':u'…', u'\\doublebarwedge':u'⌆', u'\\downarrow':u'↓', + u'\\downdownarrows':u'⇊', u'\\downharpoonleft':u'⇃', + u'\\downharpoonright':u'⇂', u'\\dsub':u'⩤', u'\\earth':u'♁', + u'\\eighthnote':u'♪', u'\\ell':u'ℓ', u'\\emptyset':u'∅', + u'\\eqcirc':u'≖', u'\\eqcolon':u'≕', u'\\eqsim':u'≂', u'\\euro':u'€', + u'\\exists':u'∃', u'\\exp':u'exp', u'\\fallingdotseq':u'≒', + u'\\fcmp':u'⨾', u'\\female':u'♀', u'\\flat':u'♭', u'\\forall':u'∀', + u'\\fourth':u'⁗', u'\\frown':u'⌢', u'\\frownie':u'☹', u'\\gcd':u'gcd', u'\\gemini':u'♊', u'\\geq)':u'≥', u'\\geqq':u'≧', u'\\geqslant':u'≥', u'\\gets':u'←', u'\\gg':u'≫', u'\\ggg':u'⋙', u'\\gimel':u'ℷ', u'\\gneqq':u'≩', u'\\gnsim':u'⋧', u'\\gtrdot':u'⋗', u'\\gtreqless':u'⋚', @@ -439,41 +459,44 @@ class FormulaConfig(object): u'\\hslash':u'ℏ', u'\\idotsint':u'∫⋯∫', u'\\iiint':u'', u'\\iint':u'', u'\\imath':u'ı', - u'\\inf':u'inf', u'\\infty':u'∞', u'\\invneg':u'⌐', u'\\jmath':u'ȷ', - u'\\jupiter':u'♃', u'\\ker':u'ker', u'\\land':u'∧', - u'\\landupint':u'', u'\\langle':u'⟨', - u'\\lbrace':u'{', u'\\lbrace)':u'{', u'\\lbrack':u'[', u'\\lceil':u'⌈', - u'\\ldots':u'…', u'\\leadsto':u'⇝', u'\\leftarrow)':u'←', - u'\\leftarrowtail':u'↢', u'\\leftarrowtobar':u'⇤', + u'\\inf':u'inf', u'\\infty':u'∞', u'\\intercal':u'⊺', + u'\\interleave':u'⫴', u'\\invamp':u'⅋', u'\\invneg':u'⌐', + u'\\jmath':u'ȷ', u'\\jupiter':u'♃', u'\\ker':u'ker', u'\\land':u'∧', + u'\\landupint':u'', u'\\lang':u'⟪', + u'\\langle':u'⟨', u'\\lblot':u'⦉', u'\\lbrace':u'{', u'\\lbrace)':u'{', + u'\\lbrack':u'[', u'\\lceil':u'⌈', u'\\ldots':u'…', u'\\leadsto':u'⇝', + u'\\leftarrow)':u'←', u'\\leftarrowtail':u'↢', u'\\leftarrowtobar':u'⇤', u'\\leftharpoondown':u'↽', u'\\leftharpoonup':u'↼', u'\\leftleftarrows':u'⇇', u'\\leftleftharpoons':u'⥢', u'\\leftmoon':u'☾', u'\\leftrightarrow':u'↔', u'\\leftrightarrows':u'⇆', u'\\leftrightharpoons':u'⇋', u'\\leftthreetimes':u'⋋', u'\\leo':u'♌', u'\\leq)':u'≤', u'\\leqq':u'≦', u'\\leqslant':u'≤', u'\\lessdot':u'⋖', u'\\lesseqgtr':u'⋛', u'\\lesseqqgtr':u'⪋', u'\\lessgtr':u'≶', - u'\\lesssim':u'≲', u'\\lfloor':u'⌊', u'\\lg':u'lg', u'\\lhd':u'⊲', - u'\\libra':u'♎', u'\\lightning':u'↯', u'\\liminf':u'liminf', - u'\\limsup':u'limsup', u'\\ll':u'≪', u'\\lll':u'⋘', u'\\ln':u'ln', + u'\\lesssim':u'≲', u'\\lfloor':u'⌊', u'\\lg':u'lg', u'\\lgroup':u'⟮', + u'\\lhd':u'⊲', u'\\libra':u'♎', u'\\lightning':u'↯', u'\\limg':u'⦇', + u'\\liminf':u'liminf', u'\\limsup':u'limsup', u'\\ll':u'≪', + u'\\llbracket':u'⟦', u'\\llcorner':u'⌞', u'\\lll':u'⋘', u'\\ln':u'ln', u'\\lneqq':u'≨', u'\\lnot':u'¬', u'\\lnsim':u'⋦', u'\\log':u'log', u'\\longleftarrow':u'⟵', u'\\longleftrightarrow':u'⟷', u'\\longmapsto':u'⟼', u'\\longrightarrow':u'⟶', u'\\looparrowleft':u'↫', u'\\looparrowright':u'↬', u'\\lor':u'∨', u'\\lozenge':u'◊', - u'\\ltimes':u'⋉', u'\\lyxlock':u'', u'\\male':u'♂', u'\\maltese':u'✠', - u'\\mapsfrom':u'↤', u'\\mapsto':u'↦', u'\\mathcircumflex':u'^', - u'\\max':u'max', u'\\measuredangle':u'∡', u'\\mercury':u'☿', - u'\\mho':u'℧', u'\\mid':u'∣', u'\\min':u'min', u'\\models':u'⊨', - u'\\mp':u'∓', u'\\multimap':u'⊸', u'\\nLeftarrow':u'⇍', - u'\\nLeftrightarrow':u'⇎', u'\\nRightarrow':u'⇏', u'\\nVDash':u'⊯', - u'\\nabla':u'∇', u'\\napprox':u'≉', u'\\natural':u'♮', u'\\ncong':u'≇', - u'\\nearrow':u'↗', u'\\neg':u'¬', u'\\neg)':u'¬', u'\\neptune':u'♆', - u'\\nequiv':u'≢', u'\\newline':u'
', u'\\nexists':u'∄', - u'\\ngeqslant':u'≱', u'\\ngtr':u'≯', u'\\ngtrless':u'≹', u'\\ni':u'∋', - u'\\ni)':u'∋', u'\\nleftarrow':u'↚', u'\\nleftrightarrow':u'↮', - u'\\nleqslant':u'≰', u'\\nless':u'≮', u'\\nlessgtr':u'≸', u'\\nmid':u'∤', - u'\\nolimits':u'', u'\\nonumber':u'', u'\\not':u'¬', u'\\not<':u'≮', - u'\\not=':u'≠', u'\\not>':u'≯', u'\\notbackslash':u'⍀', u'\\notin':u'∉', - u'\\notni':u'∌', u'\\notslash':u'⌿', u'\\nparallel':u'∦', - u'\\nprec':u'⊀', u'\\nrightarrow':u'↛', u'\\nsim':u'≁', u'\\nsimeq':u'≄', + u'\\lrcorner':u'⌟', u'\\ltimes':u'⋉', u'\\lyxlock':u'', u'\\male':u'♂', + u'\\maltese':u'✠', u'\\mapsfrom':u'↤', u'\\mapsto':u'↦', + u'\\mathcircumflex':u'^', u'\\max':u'max', u'\\measuredangle':u'∡', + u'\\medbullet':u'⚫', u'\\medcirc':u'⚪', u'\\mercury':u'☿', u'\\mho':u'℧', + u'\\mid':u'∣', u'\\min':u'min', u'\\models':u'⊨', u'\\mp':u'∓', + u'\\multimap':u'⊸', u'\\nLeftarrow':u'⇍', u'\\nLeftrightarrow':u'⇎', + u'\\nRightarrow':u'⇏', u'\\nVDash':u'⊯', u'\\nabla':u'∇', + u'\\napprox':u'≉', u'\\natural':u'♮', u'\\ncong':u'≇', u'\\nearrow':u'↗', + u'\\neg':u'¬', u'\\neg)':u'¬', u'\\neptune':u'♆', u'\\nequiv':u'≢', + u'\\newline':u'
', u'\\nexists':u'∄', u'\\ngeqslant':u'≱', + u'\\ngtr':u'≯', u'\\ngtrless':u'≹', u'\\ni':u'∋', u'\\ni)':u'∋', + u'\\nleftarrow':u'↚', u'\\nleftrightarrow':u'↮', u'\\nleqslant':u'≰', + u'\\nless':u'≮', u'\\nlessgtr':u'≸', u'\\nmid':u'∤', u'\\nolimits':u'', + u'\\nonumber':u'', u'\\not':u'¬', u'\\not<':u'≮', u'\\not=':u'≠', + u'\\not>':u'≯', u'\\notbackslash':u'⍀', u'\\notin':u'∉', u'\\notni':u'∌', + u'\\notslash':u'⌿', u'\\nparallel':u'∦', u'\\nprec':u'⊀', + u'\\nrightarrow':u'↛', u'\\nsim':u'≁', u'\\nsimeq':u'≄', u'\\nsqsubset':u'⊏̸', u'\\nsubseteq':u'⊈', u'\\nsucc':u'⊁', u'\\nsucccurlyeq':u'⋡', u'\\nsupset':u'⊅', u'\\nsupseteq':u'⊉', u'\\ntriangleleft':u'⋪', u'\\ntrianglelefteq':u'⋬', @@ -485,31 +508,40 @@ class FormulaConfig(object): u'\\ointclockwise':u'', u'\\ointctrclockwise':u'', u'\\ominus':u'⊖', u'\\oplus':u'⊕', u'\\oslash':u'⊘', u'\\otimes':u'⊗', - u'\\owns':u'∋', u'\\parallel':u'∥', u'\\partial':u'∂', u'\\perp':u'⊥', - u'\\pisces':u'♓', u'\\pitchfork':u'⋔', u'\\pluto':u'♇', u'\\pm':u'±', - u'\\pointer':u'➪', u'\\pounds':u'£', u'\\prec':u'≺', - u'\\preccurlyeq':u'≼', u'\\preceq':u'≼', u'\\precsim':u'≾', - u'\\prime':u'′', u'\\prompto':u'∝', u'\\qquad':u' ', u'\\quad':u' ', - u'\\quarternote':u'♩', u'\\rangle':u'⟩', u'\\rbrace':u'}', - u'\\rbrace)':u'}', u'\\rbrack':u']', u'\\rceil':u'⌉', u'\\rfloor':u'⌋', - u'\\rhd':u'⊳', u'\\rightarrow)':u'→', u'\\rightarrowtail':u'↣', + u'\\owns':u'∋', u'\\parallel':u'∥', u'\\partial':u'∂', u'\\pencil':u'✎', + u'\\perp':u'⊥', u'\\pisces':u'♓', u'\\pitchfork':u'⋔', u'\\pluto':u'♇', + u'\\pm':u'±', u'\\pointer':u'➪', u'\\pointright':u'☞', u'\\pounds':u'£', + u'\\prec':u'≺', u'\\preccurlyeq':u'≼', u'\\preceq':u'≼', + u'\\precsim':u'≾', u'\\prime':u'′', u'\\prompto':u'∝', u'\\qoppa':u'ϙ', + u'\\qquad':u' ', u'\\quad':u' ', u'\\quarternote':u'♩', + u'\\radiation':u'☢', u'\\rang':u'⟫', u'\\rangle':u'⟩', u'\\rblot':u'⦊', + u'\\rbrace':u'}', u'\\rbrace)':u'}', u'\\rbrack':u']', u'\\rceil':u'⌉', + u'\\recycle':u'♻', u'\\rfloor':u'⌋', u'\\rgroup':u'⟯', u'\\rhd':u'⊳', + u'\\rightangle':u'∟', u'\\rightarrow)':u'→', u'\\rightarrowtail':u'↣', u'\\rightarrowtobar':u'⇥', u'\\rightharpoondown':u'⇁', u'\\rightharpoonup':u'⇀', u'\\rightharpooondown':u'⇁', u'\\rightharpooonup':u'⇀', u'\\rightleftarrows':u'⇄', u'\\rightleftharpoons':u'⇌', u'\\rightmoon':u'☽', u'\\rightrightarrows':u'⇉', u'\\rightrightharpoons':u'⥤', - u'\\rightthreetimes':u'⋌', u'\\risingdotseq':u'≓', u'\\rtimes':u'⋊', + u'\\rightthreetimes':u'⋌', u'\\rimg':u'⦈', u'\\risingdotseq':u'≓', + u'\\rrbracket':u'⟧', u'\\rsub':u'⩥', u'\\rtimes':u'⋊', u'\\sagittarius':u'♐', u'\\saturn':u'♄', u'\\scorpio':u'♏', - u'\\searrow':u'↘', u'\\sec':u'sec', u'\\setminus':u'∖', u'\\sharp':u'♯', - u'\\simeq':u'≃', u'\\sin':u'sin', u'\\sinh':u'sinh', u'\\slash':u'∕', - u'\\smile':u'⌣', u'\\smiley':u'☺', u'\\spadesuit':u'♠', - u'\\sphericalangle':u'∢', u'\\sqcap':u'⊓', u'\\sqcup':u'⊔', - u'\\sqsubset':u'⊏', u'\\sqsubseteq':u'⊑', u'\\sqsupset':u'⊐', - u'\\sqsupseteq':u'⊒', u'\\square':u'□', u'\\star':u'⋆', + u'\\searrow':u'↘', u'\\sec':u'sec', u'\\second':u'″', u'\\setminus':u'∖', + u'\\sharp':u'♯', u'\\simeq':u'≃', u'\\sin':u'sin', u'\\sinh':u'sinh', + u'\\sixteenthnote':u'♬', u'\\skull':u'☠', u'\\slash':u'∕', + u'\\smallsetminus':u'∖', u'\\smalltriangledown':u'▿', + u'\\smalltriangleleft':u'◃', u'\\smalltriangleright':u'▹', + u'\\smalltriangleup':u'▵', u'\\smile':u'⌣', u'\\smiley':u'☺', + u'\\spadesuit':u'♠', u'\\spddot':u'¨', u'\\sphat':u'', + u'\\sphericalangle':u'∢', u'\\spot':u'⦁', u'\\sptilde':u'~', + u'\\sqcap':u'⊓', u'\\sqcup':u'⊔', u'\\sqsubset':u'⊏', + u'\\sqsubseteq':u'⊑', u'\\sqsupset':u'⊐', u'\\sqsupseteq':u'⊒', + u'\\square':u'□', u'\\sslash':u'⫽', u'\\star':u'⋆', u'\\steaming':u'☕', u'\\subseteqq':u'⫅', u'\\subsetneqq':u'⫋', u'\\succ':u'≻', u'\\succcurlyeq':u'≽', u'\\succeq':u'≽', u'\\succnsim':u'⋩', u'\\succsim':u'≿', u'\\sun':u'☼', u'\\sup':u'sup', u'\\supseteqq':u'⫆', - u'\\supsetneqq':u'⫌', u'\\surd':u'√', u'\\swarrow':u'↙', u'\\tan':u'tan', + u'\\supsetneqq':u'⫌', u'\\surd':u'√', u'\\swarrow':u'↙', + u'\\swords':u'⚔', u'\\talloblong':u'⫾', u'\\tan':u'tan', u'\\tanh':u'tanh', u'\\taurus':u'♉', u'\\textasciicircum':u'^', u'\\textasciitilde':u'~', u'\\textbackslash':u'\\', u'\\textcopyright':u'©\'', u'\\textdegree':u'°', u'\\textellipsis':u'…', @@ -520,20 +552,21 @@ class FormulaConfig(object): u'\\textregistered':u'®', u'\\textrightarrow':u'→', u'\\textsection':u'§', u'\\texttrademark':u'™', u'\\texttwosuperior':u'²', u'\\textvisiblespace':u' ', - u'\\therefore':u'∴', u'\\top':u'⊤', u'\\triangle':u'△', + u'\\therefore':u'∴', u'\\third':u'‴', u'\\top':u'⊤', u'\\triangle':u'△', u'\\triangleleft':u'⊲', u'\\trianglelefteq':u'⊴', u'\\triangleq':u'≜', u'\\triangleright':u'▷', u'\\trianglerighteq':u'⊵', u'\\twoheadleftarrow':u'↞', u'\\twoheadrightarrow':u'↠', - u'\\twonotes':u'♫', u'\\udot':u'⊍', u'\\unlhd':u'⊴', u'\\unrhd':u'⊵', - u'\\unrhl':u'⊵', u'\\uparrow':u'↑', u'\\updownarrow':u'↕', - u'\\upharpoonleft':u'↿', u'\\upharpoonright':u'↾', u'\\uplus':u'⊎', - u'\\upuparrows':u'⇈', u'\\uranus':u'♅', u'\\vDash':u'⊨', - u'\\varclubsuit':u'♧', u'\\vardiamondsuit':u'♦', u'\\varheartsuit':u'♥', - u'\\varnothing':u'∅', u'\\varspadesuit':u'♤', u'\\vdash':u'⊢', - u'\\vdots':u'⋮', u'\\vee':u'∨', u'\\vee)':u'∨', u'\\veebar':u'⊻', - u'\\vert':u'∣', u'\\virgo':u'♍', u'\\wedge':u'∧', u'\\wedge)':u'∧', - u'\\wp':u'℘', u'\\wr':u'≀', u'\\yen':u'¥', u'\\{':u'{', u'\\|':u'∥', - u'\\}':u'}', + u'\\twonotes':u'♫', u'\\udot':u'⊍', u'\\ulcorner':u'⌜', u'\\unlhd':u'⊴', + u'\\unrhd':u'⊵', u'\\unrhl':u'⊵', u'\\uparrow':u'↑', + u'\\updownarrow':u'↕', u'\\upharpoonleft':u'↿', u'\\upharpoonright':u'↾', + u'\\uplus':u'⊎', u'\\upuparrows':u'⇈', u'\\uranus':u'♅', + u'\\urcorner':u'⌝', u'\\vDash':u'⊨', u'\\varclubsuit':u'♧', + u'\\vardiamondsuit':u'♦', u'\\varheartsuit':u'♥', u'\\varnothing':u'∅', + u'\\varspadesuit':u'♤', u'\\vdash':u'⊢', u'\\vdots':u'⋮', u'\\vee':u'∨', + u'\\vee)':u'∨', u'\\veebar':u'⊻', u'\\vert':u'∣', u'\\virgo':u'♍', + u'\\warning':u'⚠', u'\\wasylozenge':u'⌑', u'\\wedge':u'∧', + u'\\wedge)':u'∧', u'\\wp':u'℘', u'\\wr':u'≀', u'\\yen':u'¥', + u'\\yinyang':u'☯', u'\\{':u'{', u'\\|':u'∥', u'\\}':u'}', } decoratedcommand = { @@ -580,7 +613,9 @@ class FormulaConfig(object): } hybridfunctions = { - + u'\\addcontentsline':[u'{$p!}{$q!}{$r!}',u'f0{}',u'ignored',], + u'\\addtocontents':[u'{$p!}{$q!}',u'f0{}',u'ignored',], + u'\\backmatter':[u'',u'f0{}',u'ignored',], u'\\binom':[u'{$1}{$2}',u'f2{(}f0{f1{$1}f1{$2}}f2{)}',u'span class="binom"',u'span class="binomstack"',u'span class="bigsymbol"',], u'\\boxed':[u'{$1}',u'f0{$1}',u'span class="boxed"',], u'\\cfrac':[u'[$p!]{$1}{$2}',u'f0{f3{(}f1{$1}f3{)/(}f2{$2}f3{)}}',u'span class="fullfraction"',u'span class="numerator align-$p"',u'span class="denominator"',u'span class="ignored"',], @@ -589,15 +624,21 @@ class FormulaConfig(object): u'\\dbinom':[u'{$1}{$2}',u'(f0{f1{f2{$1}}f1{f2{ }}f1{f2{$2}}})',u'span class="binomial"',u'span class="binomrow"',u'span class="binomcell"',], u'\\dfrac':[u'{$1}{$2}',u'f0{f3{(}f1{$1}f3{)/(}f2{$2}f3{)}}',u'span class="fullfraction"',u'span class="numerator"',u'span class="denominator"',u'span class="ignored"',], u'\\displaystyle':[u'{$1}',u'f0{$1}',u'span class="displaystyle"',], + u'\\fancyfoot':[u'[$p!]{$q!}',u'f0{}',u'ignored',], + u'\\fancyhead':[u'[$p!]{$q!}',u'f0{}',u'ignored',], u'\\fbox':[u'{$1}',u'f0{$1}',u'span class="fbox"',], u'\\fboxrule':[u'{$p!}',u'f0{}',u'ignored',], u'\\fboxsep':[u'{$p!}',u'f0{}',u'ignored',], u'\\fcolorbox':[u'{$p!}{$q!}{$1}',u'f0{$1}',u'span class="boxed" style="border-color: $p; background: $q;"',], u'\\frac':[u'{$1}{$2}',u'f0{f3{(}f1{$1}f3{)/(}f2{$2}f3{)}}',u'span class="fraction"',u'span class="numerator"',u'span class="denominator"',u'span class="ignored"',], u'\\framebox':[u'[$p!][$q!]{$1}',u'f0{$1}',u'span class="framebox align-$q" style="width: $p;"',], + u'\\frontmatter':[u'',u'f0{}',u'ignored',], u'\\href':[u'[$o]{$u!}{$t!}',u'f0{$t}',u'a href="$u"',], u'\\hspace':[u'{$p!}',u'f0{ }',u'span class="hspace" style="width: $p;"',], u'\\leftroot':[u'{$p!}',u'f0{ }',u'span class="leftroot" style="width: $p;px"',], + u'\\mainmatter':[u'',u'f0{}',u'ignored',], + u'\\markboth':[u'{$p!}{$q!}',u'f0{}',u'ignored',], + u'\\markright':[u'{$p!}',u'f0{}',u'ignored',], u'\\nicefrac':[u'{$1}{$2}',u'f0{f1{$1}⁄f2{$2}}',u'span class="fraction"',u'sup class="numerator"',u'sub class="denominator"',u'span class="ignored"',], u'\\parbox':[u'[$p!]{$w!}{$1}',u'f0{1}',u'div class="Boxed" style="width: $w;"',], u'\\raisebox':[u'{$p!}{$1}',u'f0{$1.font}',u'span class="raisebox" style="vertical-align: $p;"',], @@ -610,6 +651,7 @@ class FormulaConfig(object): u'\\tbinom':[u'{$1}{$2}',u'(f0{f1{f2{$1}}f1{f2{ }}f1{f2{$2}}})',u'span class="binomial"',u'span class="binomrow"',u'span class="binomcell"',], u'\\textcolor':[u'{$p!}{$1}',u'f0{$1}',u'span style="color: $p;"',], u'\\textstyle':[u'{$1}',u'f0{$1}',u'span class="textstyle"',], + u'\\thispagestyle':[u'{$p!}',u'f0{}',u'ignored',], u'\\unit':[u'[$0]{$1}',u'$0f0{$1.font}',u'span class="unit"',], u'\\unitfrac':[u'[$0]{$1}{$2}',u'$0f0{f1{$1.font}⁄f2{$2.font}}',u'span class="fraction"',u'sup class="unit"',u'sub class="unit"',], u'\\uproot':[u'{$p!}',u'f0{ }',u'span class="uproot" style="width: $p;px"',], @@ -627,24 +669,24 @@ class FormulaConfig(object): } limitcommands = { - u'\\int':u'∫', u'\\intop':u'∫', u'\\lim':u'lim', u'\\prod':u'∏', - u'\\smallint':u'∫', u'\\sum':u'∑', + u'\\biginterleave':u'⫼', u'\\bigsqcap':u'⨅', u'\\fint':u'⨏', + u'\\iiiint':u'⨌', u'\\int':u'∫', u'\\intop':u'∫', u'\\lim':u'lim', + u'\\prod':u'∏', u'\\smallint':u'∫', u'\\sqint':u'⨖', u'\\sum':u'∑', + u'\\varointclockwise':u'∲', u'\\varprod':u'⨉', u'\\zcmp':u'⨟', + u'\\zhide':u'⧹', u'\\zpipe':u'⨠', u'\\zproject':u'⨡', } - # TODO: setting for simple enlarged vs. piecewise symbols - for key in (u'\\int', u'\\intop', u'\\prod', u'\\sum'): - limitcommands[key] = '%s' % limitcommands[key] misccommands = { u'\\limits':u'LimitPreviousCommand', u'\\newcommand':u'MacroDefinition', u'\\renewcommand':u'MacroDefinition', u'\\setcounter':u'SetCounterFunction', u'\\tag':u'FormulaTag', - u'\\tag*':u'FormulaTag', + u'\\tag*':u'FormulaTag', u'\\today':u'TodayCommand', } modified = { - u'\n':u'', u' ':u'', u'$':u'', u'&':u' ', u'\'':u'’', u'+':u' + ', - u',':u', ', u'-':u' − ', u'/':u' ⁄ ', u'<':u' < ', u'=':u' = ', - u'>':u' > ', u'@':u'', u'~':u'', + u'\n':u'', u' ':u'', u'$':u'', u'&':u' ', u'\'':u'’', u'+':u' + ', + u',':u', ', u'-':u' − ', u'/':u' ⁄ ', u':':u' : ', u'<':u' < ', + u'=':u' = ', u'>':u' > ', u'@':u'', u'~':u'', } onefunctions = { @@ -664,13 +706,58 @@ class FormulaConfig(object): } spacedcommands = { - u'\\Leftrightarrow':u'⇔', u'\\Rightarrow':u'⇒', u'\\approx':u'≈', - u'\\dashrightarrow':u'⇢', u'\\equiv':u'≡', u'\\ge':u'≥', u'\\geq':u'≥', + u'\\Bot':u'⫫', u'\\Doteq':u'≑', u'\\DownArrowBar':u'⤓', + u'\\DownLeftTeeVector':u'⥞', u'\\DownLeftVectorBar':u'⥖', + u'\\DownRightTeeVector':u'⥟', u'\\DownRightVectorBar':u'⥗', + u'\\Equal':u'⩵', u'\\LeftArrowBar':u'⇤', u'\\LeftDownTeeVector':u'⥡', + u'\\LeftDownVectorBar':u'⥙', u'\\LeftTeeVector':u'⥚', + u'\\LeftTriangleBar':u'⧏', u'\\LeftUpTeeVector':u'⥠', + u'\\LeftUpVectorBar':u'⥘', u'\\LeftVectorBar':u'⥒', + u'\\Leftrightarrow':u'⇔', u'\\Longmapsfrom':u'⟽', u'\\Longmapsto':u'⟾', + u'\\MapsDown':u'↧', u'\\MapsUp':u'↥', u'\\Nearrow':u'⇗', + u'\\NestedGreaterGreater':u'⪢', u'\\NestedLessLess':u'⪡', + u'\\NotGreaterLess':u'≹', u'\\NotGreaterTilde':u'≵', + u'\\NotLessTilde':u'≴', u'\\Nwarrow':u'⇖', u'\\Proportion':u'∷', + u'\\RightArrowBar':u'⇥', u'\\RightDownTeeVector':u'⥝', + u'\\RightDownVectorBar':u'⥕', u'\\RightTeeVector':u'⥛', + u'\\RightTriangleBar':u'⧐', u'\\RightUpTeeVector':u'⥜', + u'\\RightUpVectorBar':u'⥔', u'\\RightVectorBar':u'⥓', + u'\\Rightarrow':u'⇒', u'\\Same':u'⩶', u'\\Searrow':u'⇘', + u'\\Swarrow':u'⇙', u'\\Top':u'⫪', u'\\UpArrowBar':u'⤒', u'\\VDash':u'⊫', + u'\\approx':u'≈', u'\\approxeq':u'≊', u'\\backsim':u'∽', u'\\barin':u'⋶', + u'\\barleftharpoon':u'⥫', u'\\barrightharpoon':u'⥭', u'\\bij':u'⤖', + u'\\coloneq':u'≔', u'\\corresponds':u'≙', u'\\curlyeqprec':u'⋞', + u'\\curlyeqsucc':u'⋟', u'\\dashrightarrow':u'⇢', u'\\dlsh':u'↲', + u'\\downdownharpoons':u'⥥', u'\\downuparrows':u'⇵', + u'\\downupharpoons':u'⥯', u'\\drsh':u'↳', u'\\eqslantgtr':u'⪖', + u'\\eqslantless':u'⪕', u'\\equiv':u'≡', u'\\ffun':u'⇻', u'\\finj':u'⤕', + u'\\ge':u'≥', u'\\geq':u'≥', u'\\ggcurly':u'⪼', u'\\gnapprox':u'⪊', + u'\\gneq':u'⪈', u'\\gtrapprox':u'⪆', u'\\hash':u'⋕', u'\\iddots':u'⋰', u'\\implies':u' ⇒ ', u'\\in':u'∈', u'\\le':u'≤', u'\\leftarrow':u'←', - u'\\leq':u'≤', u'\\ne':u'≠', u'\\neq':u'≠', u'\\not\\in':u'∉', - u'\\propto':u'∝', u'\\rightarrow':u'→', u'\\rightsquigarrow':u'⇝', - u'\\sim':u'~', u'\\subset':u'⊂', u'\\subseteq':u'⊆', u'\\supset':u'⊃', - u'\\supseteq':u'⊇', u'\\times':u'×', u'\\to':u'→', + u'\\leftarrowtriangle':u'⇽', u'\\leftbarharpoon':u'⥪', + u'\\leftrightarrowtriangle':u'⇿', u'\\leftrightharpoon':u'⥊', + u'\\leftrightharpoondown':u'⥐', u'\\leftrightharpoonup':u'⥎', + u'\\leftrightsquigarrow':u'↭', u'\\leftslice':u'⪦', + u'\\leftsquigarrow':u'⇜', u'\\leftupdownharpoon':u'⥑', u'\\leq':u'≤', + u'\\lessapprox':u'⪅', u'\\llcurly':u'⪻', u'\\lnapprox':u'⪉', + u'\\lneq':u'⪇', u'\\longmapsfrom':u'⟻', u'\\multimapboth':u'⧟', + u'\\multimapdotbothA':u'⊶', u'\\multimapdotbothB':u'⊷', + u'\\multimapinv':u'⟜', u'\\nVdash':u'⊮', u'\\ne':u'≠', u'\\neq':u'≠', + u'\\ngeq':u'≱', u'\\nleq':u'≰', u'\\nni':u'∌', u'\\not\\in':u'∉', + u'\\notasymp':u'≭', u'\\npreceq':u'⋠', u'\\nsqsubseteq':u'⋢', + u'\\nsqsupseteq':u'⋣', u'\\nsubset':u'⊄', u'\\nsucceq':u'⋡', + u'\\pfun':u'⇸', u'\\pinj':u'⤔', u'\\precapprox':u'⪷', u'\\preceqq':u'⪳', + u'\\precnapprox':u'⪹', u'\\precnsim':u'⋨', u'\\propto':u'∝', + u'\\psur':u'⤀', u'\\rightarrow':u'→', u'\\rightarrowtriangle':u'⇾', + u'\\rightbarharpoon':u'⥬', u'\\rightleftharpoon':u'⥋', + u'\\rightslice':u'⪧', u'\\rightsquigarrow':u'⇝', + u'\\rightupdownharpoon':u'⥏', u'\\sim':u'~', u'\\strictfi':u'⥼', + u'\\strictif':u'⥽', u'\\subset':u'⊂', u'\\subseteq':u'⊆', + u'\\subsetneq':u'⊊', u'\\succapprox':u'⪸', u'\\succeqq':u'⪴', + u'\\succnapprox':u'⪺', u'\\supset':u'⊃', u'\\supseteq':u'⊇', + u'\\supsetneq':u'⊋', u'\\times':u'×', u'\\to':u'→', + u'\\updownarrows':u'⇅', u'\\updownharpoons':u'⥮', u'\\upupharpoons':u'⥣', + u'\\vartriangleleft':u'⊲', u'\\vartriangleright':u'⊳', } starts = { @@ -695,7 +782,7 @@ class FormulaConfig(object): unmodified = { - u'characters':[u'.',u'*',u'€',u'(',u')',u'[',u']',u':',u'·',u'!',u';',u'|',u'§',u'"',], + u'characters':[u'.',u'*',u'€',u'(',u')',u'[',u']',u'·',u'!',u';',u'|',u'§',u'"',], } urls = { @@ -706,7 +793,7 @@ class GeneralConfig(object): "Configuration class from elyxer.config file" version = { - u'date':u'2011-06-27', u'lyxformat':u'413', u'number':u'1.2.3', + u'date':u'2015-02-26', u'lyxformat':u'413', u'number':u'1.2.5', } class HeaderConfig(object): @@ -735,6 +822,7 @@ class ImageConfig(object): u'imagemagick':u'convert[ -density $scale][ -define $format:use-cropbox=true] "$input" "$output"', u'inkscape':u'inkscape "$input" --export-png="$output"', + u'lyx':u'lyx -C "$input" "$output"', } cropboxformats = { @@ -860,6 +948,10 @@ class TagConfig(object): u'Comment':u'', u'Greyedout':u'span class="greyedout"', u'Note':u'', } + script = { + u'subscript':u'sub', u'superscript':u'sup', + } + shaped = { u'italic':u'i', u'slanted':u'i', u'smallcaps':u'span class="versalitas"', } @@ -889,7 +981,8 @@ class TranslationConfig(object): languages = { u'american':u'en', u'british':u'en', u'deutsch':u'de', u'dutch':u'nl', - u'english':u'en', u'french':u'fr', u'ngerman':u'de', u'spanish':u'es', + u'english':u'en', u'french':u'fr', u'ngerman':u'de', u'russian':u'ru', + u'spanish':u'es', } @@ -936,7 +1029,7 @@ class CommandLineParser(object): initial = args[0] del args[0] return key, self.readquoted(args, initial) - value = args[0] + value = args[0].decode('utf-8') del args[0] if isinstance(current, list): current.append(value) @@ -945,8 +1038,10 @@ class CommandLineParser(object): def readquoted(self, args, initial): "Read a value between quotes" + Trace.error('Oops') value = initial[1:] while len(args) > 0 and not args[0].endswith('"') and not args[0].startswith('--'): + Trace.error('Appending ' + args[0]) value += ' ' + args[0] del args[0] if len(args) == 0 or args[0].startswith('--'): @@ -983,6 +1078,7 @@ class Options(object): unicode = False iso885915 = False css = [] + favicon = '' title = None directory = None destdirectory = None @@ -1062,6 +1158,8 @@ class Options(object): Options.copyimages = True if Options.css == []: Options.css = ['http://elyxer.nongnu.org/lyx.css'] + if Options.favicon == '': + pass # no default favicon if Options.html: Options.simplemath = True if Options.toc and not Options.tocfor: @@ -1069,6 +1167,8 @@ class Options(object): Options.tocfor = Options.toctarget if Options.nocopy: Trace.error('Option --nocopy is deprecated; it is no longer needed') + if Options.jsmath: + Trace.error('Option --jsmath is deprecated; use --mathjax instead') # set in Trace if necessary for param in dir(Trace): if param.endswith('mode'): @@ -1088,6 +1188,7 @@ class Options(object): return Options.marginfoot = False Options.letterfoot = False + Options.hoverfoot = False options = Options.footnotes.split(',') for option in options: footoption = option + 'foot' @@ -1113,7 +1214,8 @@ class Options(object): Trace.error(' Options for HTML output:') Trace.error(' --title "title": set the generated page title') Trace.error(' --css "file.css": use a custom CSS file') - Trace.error(' --embedcss "file.css": embed styles from elyxer.a CSS file into the output') + Trace.error(' --embedcss "file.css": embed styles from a CSS file into the output') + Trace.error(' --favicon "icon.ico": insert the specified favicon in the header.') Trace.error(' --html: output HTML 4.0 instead of the default XHTML') Trace.error(' --unicode: full Unicode output') Trace.error(' --iso885915: output a document with ISO-8859-15 encoding') @@ -1143,8 +1245,8 @@ class Options(object): Trace.error(' --notoclabels: omit the part labels in the TOC, such as Chapter') Trace.error(' --lowmem: do the conversion on the fly (conserve memory)') Trace.error(' --raw: generate HTML without header or footer.') - Trace.error(' --jsmath "URL": use jsMath from elyxer.the given URL to display equations') - Trace.error(' --mathjax "URL": use MathJax from elyxer.the given URL to display equations') + Trace.error(' --mathjax remote: use MathJax remotely to display equations') + Trace.error(' --mathjax "URL": use MathJax from the given URL to display equations') Trace.error(' --googlecharts: use Google Charts to generate formula images') Trace.error(' --template "file": use a template, put everything in ') Trace.error(' --copyright: add a copyright notice at the bottom') @@ -1152,6 +1254,7 @@ class Options(object): Trace.error(' --toc: (deprecated) create a table of contents') Trace.error(' --toctarget "page": (deprecated) generate a TOC for the given page') Trace.error(' --nocopy: (deprecated) maintained for backwards compatibility') + Trace.error(' --jsmath "URL": use jsMath from the given URL to display equations') sys.exit() def showversion(self): @@ -3536,11 +3639,17 @@ class BarredText(TaggedText): return self.output.tag = TagConfig.barred[self.type] -class LangLine(BlackBox): +class LangLine(TaggedText): "A line with language information" def process(self): - self.lang = self.header[1] + "Only generate a span with lang info when the language is recognized." + lang = self.header[1] + if not lang in TranslationConfig.languages: + self.output = ContentsOutput() + return + isolang = TranslationConfig.languages[lang] + self.output = TaggedOutput().settag('span lang="' + isolang + '"', False) class InsetLength(BlackBox): "A length measure inside an inset." @@ -3908,8 +4017,7 @@ class Reference(Link): self.replace('@', partkey and partkey.number) self.replace(u'¶', partkey and partkey.tocentry) if not '$' in self.formatted or not partkey or not partkey.titlecontents: - if '$' in self.formatted: - Trace.error('No title in ' + unicode(partkey)) + # there is a $ left, but it should go away on preprocessing self.contents = [Constant(self.formatted)] return pieces = self.formatted.split('$') @@ -3993,7 +4101,7 @@ class FormulaCommand(FormulaBit): def emptycommand(self, pos): """Check for an empty command: look for command disguised as ending. - Special case against '{ \{ \} }' situation.""" + Special case against '{ \\{ \\} }' situation.""" command = '' if not pos.isout(): ending = pos.nextending() @@ -4452,7 +4560,7 @@ class EquationEnvironment(MultiRowFormula): self.parserows(pos) class BeginCommand(CommandBit): - "A \\begin{}...\end command and what it entails (array, cases, aligned)" + "A \\begin{}...\\end command and what it entails (array, cases, aligned)" commandmap = {FormulaConfig.array['begin']:''} @@ -4481,6 +4589,8 @@ class BeginCommand(CommandBit): FormulaCommand.types += [BeginCommand] +import datetime + class CombiningFunction(OneParamFunction): @@ -4697,6 +4807,16 @@ class BracketProcessor(MathsProcessor): command.output = ContentsOutput() command.contents = bracket.getcontents() +class TodayCommand(EmptyCommand): + "Shows today's date." + + commandmap = None + + def parsebit(self, pos): + "Parse a command without parameters" + self.output = FixedOutput() + self.html = [datetime.date.today().strftime('%b %d, %Y')] + FormulaCommand.types += [ DecoratingFunction, CombiningFunction, LimitCommand, BracketCommand, diff --git a/Libs/docutils/utils/math/tex2mathml_extern.py b/Libs/docutils/utils/math/tex2mathml_extern.py new file mode 100644 index 0000000..ccc5593 --- /dev/null +++ b/Libs/docutils/utils/math/tex2mathml_extern.py @@ -0,0 +1,147 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# :Id: $Id: tex2mathml_extern.py 7861 2015-04-10 23:48:51Z milde $ +# :Copyright: © 2015 Günter Milde. +# :License: Released under the terms of the `2-Clause BSD license`_, in short: +# +# Copying and distribution of this file, with or without modification, +# are permitted in any medium without royalty provided the copyright +# notice and this notice are preserved. +# This file is offered as-is, without any warranty. +# +# .. _2-Clause BSD license: http://www.spdx.org/licenses/BSD-2-Clause + +# Wrappers for TeX->MathML conversion by external tools +# ===================================================== + +import subprocess + +document_template = r"""\documentclass{article} +\usepackage{amsmath} +\begin{document} +%s +\end{document} +""" + +def latexml(math_code, reporter=None): + """Convert LaTeX math code to MathML with LaTeXML_ + + .. _LaTeXML: http://dlmf.nist.gov/LaTeXML/ + """ + p = subprocess.Popen(['latexml', + '-', # read from stdin + # '--preload=amsmath', + '--inputencoding=utf8', + ], + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + close_fds=True) + p.stdin.write((document_template % math_code).encode('utf8')) + p.stdin.close() + latexml_code = p.stdout.read() + latexml_err = p.stderr.read().decode('utf8') + if reporter and latexml_err.find('Error') >= 0 or not latexml_code: + reporter.error(latexml_err) + + post_p = subprocess.Popen(['latexmlpost', + '-', + '--nonumbersections', + '--format=xhtml', + # '--linelength=78', # experimental + '--' + ], + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + close_fds=True) + post_p.stdin.write(latexml_code) + post_p.stdin.close() + result = post_p.stdout.read().decode('utf8') + post_p_err = post_p.stderr.read().decode('utf8') + if reporter and post_p_err.find('Error') >= 0 or not result: + reporter.error(post_p_err) + + # extract MathML code: + start,end = result.find('')+7 + result = result[start:end] + if 'class="ltx_ERROR' in result: + raise SyntaxError(result) + return result + +def ttm(math_code, reporter=None): + """Convert LaTeX math code to MathML with TtM_ + + .. _TtM: http://hutchinson.belmont.ma.us/tth/mml/ + """ + p = subprocess.Popen(['ttm', + # '-i', # italic font for equations. Default roman. + '-u', # unicode character encoding. (Default iso-8859-1). + '-r', # output raw MathML (no preamble or postlude) + ], + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + close_fds=True) + p.stdin.write((document_template % math_code).encode('utf8')) + p.stdin.close() + result = p.stdout.read() + err = p.stderr.read().decode('utf8') + if err.find('**** Unknown') >= 0: + msg = '\n'.join([line for line in err.splitlines() + if line.startswith('****')]) + raise SyntaxError('\nMessage from external converter TtM:\n'+ msg) + if reporter and err.find('**** Error') >= 0 or not result: + reporter.error(err) + start,end = result.find('')+7 + result = result[start:end] + return result + +def blahtexml(math_code, inline=True, reporter=None): + """Convert LaTeX math code to MathML with blahtexml_ + + .. _blahtexml: http://gva.noekeon.org/blahtexml/ + """ + options = ['--mathml', + '--indented', + '--spacing', 'moderate', + '--mathml-encoding', 'raw', + '--other-encoding', 'raw', + '--doctype-xhtml+mathml', + '--annotate-TeX', + ] + if inline: + mathmode_arg = '' + else: + mathmode_arg = 'mode="display"' + options.append('--displaymath') + + p = subprocess.Popen(['blahtexml']+options, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + close_fds=True) + p.stdin.write(math_code.encode('utf8')) + p.stdin.close() + result = p.stdout.read().decode('utf8') + err = p.stderr.read().decode('utf8') + + print err + if result.find('') >= 0: + raise SyntaxError('\nMessage from external converter blahtexml:\n' + +result[result.find('')+9:result.find('')]) + if reporter and (err.find('**** Error') >= 0 or not result): + reporter.error(err) + start,end = result.find('')+9, result.find('') + result = ('\n' + '%s\n') % (mathmode_arg, result[start:end]) + return result + +# self-test + +if __name__ == "__main__": + example = ur'\frac{\partial \sin^2(\alpha)}{\partial \vec r} \varpi \, \text{Grüße}' + # print latexml(example).encode('utf8') + # print ttm(example)#.encode('utf8') + print blahtexml(example).encode('utf8') diff --git a/Libs/docutils/utils/punctuation_chars.py b/Libs/docutils/utils/punctuation_chars.py index d1a3997..041cf9c 100644 --- a/Libs/docutils/utils/punctuation_chars.py +++ b/Libs/docutils/utils/punctuation_chars.py @@ -1,6 +1,7 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- -# :Copyright: © 2011 Günter Milde. +# :Id: $Id: punctuation_chars.py 8016 2017-01-17 15:06:17Z milde $ +# :Copyright: © 2011, 2017 Günter Milde. # :License: Released under the terms of the `2-Clause BSD license`_, in short: # # Copying and distribution of this file, with or without modification, @@ -9,29 +10,38 @@ # This file is offered as-is, without any warranty. # # .. _2-Clause BSD license: http://www.spdx.org/licenses/BSD-2-Clause - -# :Id: $Id: punctuation_chars.py 7668 2013-06-04 12:46:30Z milde $ +# +# This file is generated by +# ``docutils/tools/dev/generate_punctuation_chars.py``. +# :: import sys, re import unicodedata -# punctuation characters around inline markup -# =========================================== -# -# This module provides the lists of characters for the implementation of -# the `inline markup recognition rules`_ in the reStructuredText parser -# (states.py) -# -# .. _inline markup recognition rules: -# ../../docs/ref/rst/restructuredtext.html#inline-markup +"""Docutils character category patterns. -# Docutils punctuation category sample strings -# -------------------------------------------- -# -# The sample strings are generated by punctuation_samples() and put here -# literal to avoid the time-consuming generation with every Docutils run. -# As the samples are used inside ``[ ]`` in regular expressions, hyphen and -# square brackets are escaped. :: + Patterns for the implementation of the `inline markup recognition rules`_ + in the reStructuredText parser `docutils.parsers.rst.states.py` based + on Unicode character categories. + The patterns are used inside ``[ ]`` in regular expressions. + + Rule (5) requires determination of matching open/close pairs. However, the + pairing of open/close quotes is ambiguous due to different typographic + conventions in different languages. The ``quote_pairs`` function tests + whether two characters form an open/close pair. + + The patterns are generated by + ``docutils/tools/dev/generate_punctuation_chars.py`` to prevent dependence + on the Python version and avoid the time-consuming generation with every + Docutils run. See there for motives and implementation details. + + The category of some characters changed with the development of the + Unicode standard. The current lists are generated with the help of the + "unicodedata" module of Python 2.7.13 (based on Unicode version 5.2.0). + + .. _inline markup recognition rules: + http://docutils.sf.net/docs/ref/rst/restructuredtext.html#inline-markup-recognition-rules +""" openers = (u'"\'(<\\[{\u0f3a\u0f3c\u169b\u2045\u207d\u208d\u2329\u2768' u'\u276a\u276c\u276e\u2770\u2772\u2774\u27c5\u27e6\u27e8\u27ea' @@ -84,272 +94,29 @@ closing_delimiters = u'\\\\.,;!?' # Matching open/close quotes # -------------------------- -# Rule (5) requires determination of matching open/close pairs. However, -# the pairing of open/close quotes is ambigue due to different typographic -# conventions in different languages. - -quote_pairs = {u'\xbb': u'\xbb', # Swedish - u'\u2018': u'\u201a', # Greek - u'\u2019': u'\u2019', # Swedish - u'\u201a': u'\u2018\u2019', # German, Polish - u'\u201c': u'\u201e', # German - u'\u201e': u'\u201c\u201d', - u'\u201d': u'\u201d', # Swedish - u'\u203a': u'\u203a', # Swedish - } +quote_pairs = {# open char: matching closing characters # usage example + u'\xbb': u'\xbb', # » » Swedish + u'\u2018': u'\u201a', # ‘ ‚ Albanian/Greek/Turkish + u'\u2019': u'\u2019', # ’ ’ Swedish + u'\u201a': u'\u2018\u2019', # ‚ ‘ German ‚ ’ Polish + u'\u201c': u'\u201e', # “ „ Albanian/Greek/Turkish + u'\u201e': u'\u201c\u201d', # „ “ German „ ” Polish + u'\u201d': u'\u201d', # ” ” Swedish + u'\u203a': u'\u203a', # › › Swedish + } +"""Additional open/close quote pairs.""" def match_chars(c1, c2): + """Test whether `c1` and `c2` are a matching open/close character pair. + + Matching open/close pairs are at the same position in + `punctuation_chars.openers` and `punctuation_chars.closers`. + The pairing of open/close quotes is ambiguous due to different + typographic conventions in different languages, + so we test for additional matches stored in `quote_pairs`. + """ try: i = openers.index(c1) except ValueError: # c1 not in openers return False - return c2 == closers[i] or c2 in quote_pairs.get(c1, '') - - -# Running this file as a standalone module checks the definitions against a -# re-calculation:: - -if __name__ == '__main__': - - -# Unicode punctuation character categories -# ---------------------------------------- - - unicode_punctuation_categories = { - # 'Pc': 'Connector', # not used in Docutils inline markup recognition - 'Pd': 'Dash', - 'Ps': 'Open', - 'Pe': 'Close', - 'Pi': 'Initial quote', # may behave like Ps or Pe depending on usage - 'Pf': 'Final quote', # may behave like Ps or Pe depending on usage - 'Po': 'Other' - } - """Unicode character categories for punctuation""" - - -# generate character pattern strings -# ================================== - - def unicode_charlists(categories, cp_min=0, cp_max=None): - """Return dictionary of Unicode character lists. - - For each of the `catagories`, an item contains a list with all Unicode - characters with `cp_min` <= code-point <= `cp_max` that belong to - the category. - - The default values check every code-point supported by Python - (`sys.maxint` is 0x10FFFF in a "wide" build and 0xFFFF in a "narrow" - build, i.e. ucs4 and ucs2 respectively). - """ - # Determine highest code point with one of the given categories - # (may shorten the search time considerably if there are many - # categories with not too high characters): - if cp_max is None: - cp_max = max(x for x in xrange(sys.maxunicode+1) - if unicodedata.category(unichr(x)) in categories) - # print cp_max # => 74867 for unicode_punctuation_categories - charlists = {} - for cat in categories: - charlists[cat] = [unichr(x) for x in xrange(cp_min, cp_max+1) - if unicodedata.category(unichr(x)) == cat] - return charlists - - -# Character categories in Docutils -# -------------------------------- - - def punctuation_samples(): - - """Docutils punctuation category sample strings. - - Return list of sample strings for the categories "Open", "Close", - "Delimiters" and "Closing-Delimiters" used in the `inline markup - recognition rules`_. - """ - - # Lists with characters in Unicode punctuation character categories - cp_min = 160 # ASCII chars have special rules for backwards compatibility - ucharlists = unicode_charlists(unicode_punctuation_categories, cp_min) - - # match opening/closing characters - # -------------------------------- - # Rearange the lists to ensure matching characters at the same - # index position. - - # low quotation marks are also used as closers (e.g. in Greek) - # move them to category Pi: - ucharlists['Ps'].remove(u'‚') # 201A SINGLE LOW-9 QUOTATION MARK - ucharlists['Ps'].remove(u'„') # 201E DOUBLE LOW-9 QUOTATION MARK - ucharlists['Pi'] += [u'‚', u'„'] - - ucharlists['Pi'].remove(u'‛') # 201B SINGLE HIGH-REVERSED-9 QUOTATION MARK - ucharlists['Pi'].remove(u'‟') # 201F DOUBLE HIGH-REVERSED-9 QUOTATION MARK - ucharlists['Pf'] += [u'‛', u'‟'] - - # 301F LOW DOUBLE PRIME QUOTATION MARK misses the opening pendant: - ucharlists['Ps'].insert(ucharlists['Pe'].index(u'\u301f'), u'\u301d') - - # print u''.join(ucharlists['Ps']).encode('utf8') - # print u''.join(ucharlists['Pe']).encode('utf8') - # print u''.join(ucharlists['Pi']).encode('utf8') - # print u''.join(ucharlists['Pf']).encode('utf8') - - # The Docutils character categories - # --------------------------------- - # - # The categorization of ASCII chars is non-standard to reduce - # both false positives and need for escaping. (see `inline markup - # recognition rules`_) - - # allowed before markup if there is a matching closer - openers = [u'"\'(<\\[{'] - for cat in ('Ps', 'Pi', 'Pf'): - openers.extend(ucharlists[cat]) - - # allowed after markup if there is a matching opener - closers = [u'"\')>\\]}'] - for cat in ('Pe', 'Pf', 'Pi'): - closers.extend(ucharlists[cat]) - - # non-matching, allowed on both sides - delimiters = [u'\\-/:'] - for cat in ('Pd', 'Po'): - delimiters.extend(ucharlists[cat]) - - # non-matching, after markup - closing_delimiters = [r'\\.,;!?'] - - # # Test open/close matching: - # for i in range(min(len(openers),len(closers))): - # print '%4d %s %s' % (i, openers[i].encode('utf8'), - # closers[i].encode('utf8')) - - return [u''.join(chars) for chars in (openers, closers, delimiters, - closing_delimiters)] - - def separate_wide_chars(s): - """Return (s1,s2) with characters above 0xFFFF in s2""" - maxunicode_narrow = 0xFFFF - l1 = [ch for ch in s if ord(ch) <= maxunicode_narrow] - l2 = [ch for ch in s if ord(ch) > maxunicode_narrow] - return ''.join(l1), ''.join(l2) - - def mark_intervals(s): - """Return s with shortcut notation for runs of consecutive characters - - Sort string and replace 'cdef' by 'c-f' and similar. - """ - l =[] - s = [ord(ch) for ch in s] - s.sort() - for n in s: - try: - if l[-1][-1]+1 == n: - l[-1].append(n) - else: - l.append([n]) - except IndexError: - l.append([n]) - - l2 = [] - for i in l: - i = [unichr(n) for n in i] - if len(i) > 2: - i = i[0], u'-', i[-1] - l2.extend(i) - - return ''.join(l2) - - def wrap_string(s, startstring= "(", - endstring = ")", wrap=65): - """Line-wrap a unicode string literal definition.""" - c = len(startstring) - contstring = "'\n" + ' ' * len(startstring) + "u'" - l = [startstring] - for ch in s: - c += 1 - if ch == '\\' and c > wrap: - c = len(startstring) - ch = contstring + ch - l.append(ch) - l.append(endstring) - return ''.join(l) - - -# print results -# ============= - -# (re) create and compare the samples: - - (o, c, d, cd) = punctuation_samples() - o, o_wide = separate_wide_chars(o) - c, c_wide = separate_wide_chars(c) - d, d_wide = separate_wide_chars(d) - d = d[:5] + mark_intervals(d[5:]) - d_wide = mark_intervals(d_wide) - if sys.maxunicode >= 0x10FFFF: # "wide" build - d += d_wide - if o != openers: - print '- openers = ur"""%s"""' % openers.encode('utf8') - print '+ openers = ur"""%s"""' % o.encode('utf8') - if o_wide: - print '+ openers-wide = ur"""%s"""' % o_wide.encode('utf8') - if c != closers: - print '- closers = ur"""%s"""' % closers.encode('utf8') - print '+ closers = ur"""%s"""' % c.encode('utf8') - if c_wide: - print '+ closers-wide = ur"""%s"""' % c_wide.encode('utf8') - if d != delimiters: - print '- delimiters = ur"%s"' % delimiters.encode('utf8') - print '+ delimiters = ur"%s"' % d.encode('utf8') - if cd != closing_delimiters: - print '- closing_delimiters = ur"%s"' % closing_delimiters.encode('utf8') - print '+ closing_delimiters = ur"%s"' % cd.encode('utf8') - # closing_delimiters are all ASCII characters - -# Print literal code to define the character sets: - - # `openers` and `closers` must be verbose and keep order because they are - # also used in `match_chars()`. - print wrap_string(repr(o), startstring='openers = (') - print wrap_string(repr(c), startstring='closers = (') - # delimiters: sort and use shortcut for intervals (saves ~150 characters): - print wrap_string(repr(d), startstring='delimiters = (') - # add characters in the upper plane only in a "wide" build: - print 'if sys.maxunicode >= 0x10FFFF: # "wide" build' - print wrap_string(repr(d_wide), startstring=' delimiters += (') - print 'closing_delimiters =', repr(cd) - -# test prints - - # print "wide" Unicode characters: - # ucharlists = unicode_charlists(unicode_punctuation_categories) - # for key in ucharlists: - # if key.endswith('wide'): - # print key, ucharlists[key] - - # print 'openers = ', repr(openers) - # print 'closers = ', repr(closers) - # print 'delimiters = ', repr(delimiters) - # print 'closing_delimiters = ', repr(closing_delimiters) - - # ucharlists = unicode_charlists(unicode_punctuation_categories) - # for cat, chars in ucharlists.items(): - # # print cat, chars - # # compact output (visible with a comprehensive font): - # print (u":%s: %s" % (cat, u''.join(chars))).encode('utf8') - -# verbose print - - # print 'openers:' - # for ch in openers: - # print ch.encode('utf8'), unicodedata.name(ch) - # print 'closers:' - # for ch in closers: - # print ch.encode('utf8'), unicodedata.name(ch) - # print 'delimiters:' - # for ch in delimiters: - # print ch.encode('utf8'), unicodedata.name(ch) - # print 'closing_delimiters:' - # for ch in closing_delimiters: - # print ch.encode('utf8'), unicodedata.name(ch) + return c2 == closers[i] or c2 in quote_pairs.get(c1, u'') diff --git a/Libs/docutils/utils/smartquotes.py b/Libs/docutils/utils/smartquotes.py index 68b1e83..e9dff6c 100644 --- a/Libs/docutils/utils/smartquotes.py +++ b/Libs/docutils/utils/smartquotes.py @@ -1,7 +1,7 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# :Id: $Id: smartquotes.py 7716 2013-08-21 21:54:57Z milde $ +# :Id: $Id: smartquotes.py 8095 2017-05-30 21:04:18Z milde $ # :Copyright: © 2010 Günter Milde, # original `SmartyPants`_: © 2003 John Gruber # smartypants.py: © 2004, 2007 Chad Miller @@ -17,25 +17,25 @@ r""" -======================== -SmartyPants for Docutils -======================== +========================= +Smart Quotes for Docutils +========================= Synopsis ======== -Smart-quotes for Docutils. +"SmartyPants" is a free web publishing plug-in for Movable Type, Blosxom, and +BBEdit that easily translates plain ASCII punctuation characters into "smart" +typographic punctuation characters. -The original "SmartyPants" is a free web publishing plug-in for Movable Type, -Blosxom, and BBEdit that easily translates plain ASCII punctuation characters -into "smart" typographic punctuation characters. +``smartquotes.py`` is an adaption of "SmartyPants" to Docutils_. -`smartypants.py`, endeavours to be a functional port of -SmartyPants to Python, for use with Pyblosxom_. +* Using Unicode instead of HTML entities for typographic punctuation + characters, it works for any output format that supports Unicode. +* Supports `language specific quote characters`__. + +__ http://en.wikipedia.org/wiki/Non-English_usage_of_quotation_marks -`smartquotes.py` is an adaption of Smartypants to Docutils_. By using Unicode -characters instead of HTML entities for typographic quotes, it works for any -output format that supports Unicode. Authors ======= @@ -43,7 +43,7 @@ Authors `John Gruber`_ did all of the hard work of writing this software in Perl for `Movable Type`_ and almost all of this useful documentation. `Chad Miller`_ ported it to Python to use with Pyblosxom_. -Adapted to Docutils_ by Günter Milde +Adapted to Docutils_ by Günter Milde. Additional Credits ================== @@ -160,102 +160,25 @@ appropriate, such as source code or example markup. Backslash Escapes ================= -If you need to use literal straight quotes (or plain hyphens and -periods), SmartyPants accepts the following backslash escape sequences -to force non-smart punctuation. It does so by transforming the escape -sequence into a character: +If you need to use literal straight quotes (or plain hyphens and periods), +`smartquotes` accepts the following backslash escape sequences to force +ASCII-punctuation. Mind, that you need two backslashes as Docutils expands it, +too. -======== ===== ========= -Escape Value Character -======== ===== ========= -``\\\\`` \ \\ -\\" " " -\\' ' ' -\\. . . -\\- - \- -\\` ` \` -======== ===== ========= +======== ========= +Escape Character +======== ========= +``\\`` \\ +``\\"`` \\" +``\\'`` \\' +``\\.`` \\. +``\\-`` \\- +``\\``` \\` +======== ========= This is useful, for example, when you want to use straight quotes as foot and inch marks: 6\\'2\\" tall; a 17\\" iMac. -Options -======= - -For Pyblosxom users, the ``smartypants_attributes`` attribute is where you -specify configuration options. - -Numeric values are the easiest way to configure SmartyPants' behavior: - -"0" - Suppress all transformations. (Do nothing.) -"1" - Performs default SmartyPants transformations: quotes (including - \`\`backticks'' -style), em-dashes, and ellipses. "``--``" (dash dash) - is used to signify an em-dash; there is no support for en-dashes. - -"2" - Same as smarty_pants="1", except that it uses the old-school typewriter - shorthand for dashes: "``--``" (dash dash) for en-dashes, "``---``" - (dash dash dash) - for em-dashes. - -"3" - Same as smarty_pants="2", but inverts the shorthand for dashes: - "``--``" (dash dash) for em-dashes, and "``---``" (dash dash dash) for - en-dashes. - -"-1" - Stupefy mode. Reverses the SmartyPants transformation process, turning - the characters produced by SmartyPants into their ASCII equivalents. - E.g. "“" is turned into a simple double-quote (\"), "—" is - turned into two dashes, etc. - - -The following single-character attribute values can be combined to toggle -individual transformations from within the smarty_pants attribute. For -example, to educate normal quotes and em-dashes, but not ellipses or -\`\`backticks'' -style quotes: - -``py['smartypants_attributes'] = "1"`` - -"q" - Educates normal quote characters: (") and ('). - -"b" - Educates \`\`backticks'' -style double quotes. - -"B" - Educates \`\`backticks'' -style double quotes and \`single' quotes. - -"d" - Educates em-dashes. - -"D" - Educates em-dashes and en-dashes, using old-school typewriter shorthand: - (dash dash) for en-dashes, (dash dash dash) for em-dashes. - -"i" - Educates em-dashes and en-dashes, using inverted old-school typewriter - shorthand: (dash dash) for em-dashes, (dash dash dash) for en-dashes. - -"e" - Educates ellipses. - -"w" - Translates any instance of ``"`` into a normal double-quote character. - This should be of no interest to most people, but of particular interest - to anyone who writes their posts using Dreamweaver, as Dreamweaver - inexplicably uses this entity to represent a literal double-quote - character. SmartyPants only educates normal quotes, not entities (because - ordinarily, entities are used for the explicit purpose of representing the - specific character they represent). The "w" option must be used in - conjunction with one (or both) of the other quote options ("q" or "b"). - Thus, if you wish to apply all SmartyPants transformations (quotes, en- - and em-dashes, and ellipses) and also translate ``"`` entities into - regular quotes so SmartyPants can educate them, you should pass the - following to the smarty_pants attribute: - Caveats ======= @@ -274,7 +197,7 @@ If you're the sort of person who just doesn't care, you might well want to continue not caring. Using straight quotes -- and sticking to the 7-bit ASCII character set in general -- is certainly a simpler way to live. -Even if you I *do* care about accurate typography, you still might want to +Even if you *do* care about accurate typography, you still might want to think twice before educating the quote characters in your weblog. One side effect of publishing curly quote characters is that it makes your weblog a bit harder for others to quote from using copy-and-paste. What @@ -300,21 +223,52 @@ Algorithmic Shortcomings ------------------------ One situation in which quotes will get curled the wrong way is when -apostrophes are used at the start of leading contractions. For example: +apostrophes are used at the start of leading contractions. For example:: -``'Twas the night before Christmas.`` + 'Twas the night before Christmas. In the case above, SmartyPants will turn the apostrophe into an opening -single-quote, when in fact it should be a closing one. I don't think -this problem can be solved in the general case -- every word processor -I've tried gets this wrong as well. In such cases, it's best to use the -proper character for closing single-quotes (``’``) by hand. +single-quote, when in fact it should be the `right single quotation mark` +character which is also "the preferred character to use for apostrophe" +(Unicode). I don't think this problem can be solved in the general case -- +every word processor I've tried gets this wrong as well. In such cases, it's +best to use the proper character for closing single-quotes (’) by hand. + +In English, the same character is used for apostrophe and closing single +quote (both plain and "smart" ones). For other locales (French, Italean, +Swiss, ...) "smart" single closing quotes differ from the curly apostrophe. + + .. class:: language-fr + + Il dit : "C'est 'super' !" + +If the apostrophe is used at the end of a word, it cannot be distinguished +from a single quote by the algorithm. Therefore, a text like:: + + .. class:: language-de-CH + + "Er sagt: 'Ich fass' es nicht.'" + +will get a single closing guillemet instead of an apostrophe. + +This can be prevented by use use of the curly apostrophe character (’) in +the source:: + + - "Er sagt: 'Ich fass' es nicht.'" + + "Er sagt: 'Ich fass’ es nicht.'" Version History =============== -1.7 2012-11-19 +1.8: 2017-04-24 + - Command line front-end. + +1.7.1: 2017-03-19 + - Update and extend language-dependent quotes. + - Differentiate apostrophe from single quote. + +1.7: 2012-11-19 - Internationalization: language-dependent quotes. 1.6.1: 2012-11-06 @@ -358,10 +312,72 @@ Version History - Initial release """ +options = r""" +Options +======= + +Numeric values are the easiest way to configure SmartyPants' behavior: + +:0: Suppress all transformations. (Do nothing.) + +:1: Performs default SmartyPants transformations: quotes (including + \`\`backticks'' -style), em-dashes, and ellipses. "``--``" (dash dash) + is used to signify an em-dash; there is no support for en-dashes + +:2: Same as smarty_pants="1", except that it uses the old-school typewriter + shorthand for dashes: "``--``" (dash dash) for en-dashes, "``---``" + (dash dash dash) + for em-dashes. + +:3: Same as smarty_pants="2", but inverts the shorthand for dashes: + "``--``" (dash dash) for em-dashes, and "``---``" (dash dash dash) for + en-dashes. + +:-1: Stupefy mode. Reverses the SmartyPants transformation process, turning + the characters produced by SmartyPants into their ASCII equivalents. + E.g. the LEFT DOUBLE QUOTATION MARK (“) is turned into a simple + double-quote (\"), "—" is turned into two dashes, etc. + + +The following single-character attribute values can be combined to toggle +individual transformations from within the smarty_pants attribute. For +example, ``"1"`` is equivalent to ``"qBde"``. + +:q: Educates normal quote characters: (") and ('). + +:b: Educates \`\`backticks'' -style double quotes. + +:B: Educates \`\`backticks'' -style double quotes and \`single' quotes. + +:d: Educates em-dashes. + +:D: Educates em-dashes and en-dashes, using old-school typewriter shorthand: + (dash dash) for en-dashes, (dash dash dash) for em-dashes. + +:i: Educates em-dashes and en-dashes, using inverted old-school typewriter + shorthand: (dash dash) for em-dashes, (dash dash dash) for en-dashes. + +:e: Educates ellipses. + +:w: Translates any instance of ``"`` into a normal double-quote character. + This should be of no interest to most people, but of particular interest + to anyone who writes their posts using Dreamweaver, as Dreamweaver + inexplicably uses this entity to represent a literal double-quote + character. SmartyPants only educates normal quotes, not entities (because + ordinarily, entities are used for the explicit purpose of representing the + specific character they represent). The "w" option must be used in + conjunction with one (or both) of the other quote options ("q" or "b"). + Thus, if you wish to apply all SmartyPants transformations (quotes, en- + and em-dashes, and ellipses) and also translate ``"`` entities into + regular quotes so SmartyPants can educate them, you should pass the + following to the smarty_pants attribute: +""" + + default_smartypants_attr = "1" -import re +import re, sys class smartchars(object): """Smart quotes and dashes @@ -370,75 +386,116 @@ class smartchars(object): endash = u'–' # "–" EN DASH emdash = u'—' # "—" EM DASH ellipsis = u'…' # "…" HORIZONTAL ELLIPSIS + apostrophe = u'’' # "’" RIGHT SINGLE QUOTATION MARK # quote characters (language-specific, set in __init__()) + # [1] http://en.wikipedia.org/wiki/Non-English_usage_of_quotation_marks + # [2] http://de.wikipedia.org/wiki/Anf%C3%BChrungszeichen#Andere_Sprachen + # [3] https://fr.wikipedia.org/wiki/Guillemet + # [4] http://typographisme.net/post/Les-espaces-typographiques-et-le-web + # [5] http://www.btb.termiumplus.gc.ca/tpv2guides/guides/redac/index-fra.html + # [6] https://en.wikipedia.org/wiki/Hebrew_punctuation#Quotation_marks + # [7] http://www.tustep.uni-tuebingen.de/bi/bi00/bi001t1-anfuehrung.pdf + # [8] http://www.korrekturavdelingen.no/anforselstegn.htm + # [9] Typografisk håndbok. Oslo: Spartacus. 2000. s. 67. ISBN 8243001530. + # [10] http://www.typografi.org/sitat/sitatart.html # - # English smart quotes (open primary, close primary, open secondary, close - # secondary) are: - # opquote = u'“' # "“" LEFT DOUBLE QUOTATION MARK - # cpquote = u'”' # "”" RIGHT DOUBLE QUOTATION MARK - # osquote = u'‘' # "‘" LEFT SINGLE QUOTATION MARK - # csquote = u'’' # "’" RIGHT SINGLE QUOTATION MARK - # For other languages see: - # http://en.wikipedia.org/wiki/Non-English_usage_of_quotation_marks - # http://de.wikipedia.org/wiki/Anf%C3%BChrungszeichen#Andere_Sprachen + # TODO: configuration option, e.g.:: + # + # smartquote-locales: nl: „“’’, # apostrophe for ``'s Gravenhage`` + # nr: se, # alias + # fr: « : »:‹ : ›, # :-separated list with NBSPs quotes = {'af': u'“”‘’', 'af-x-altquot': u'„”‚’', + 'bg': u'„“‚‘', # Bulgarian, https://bg.wikipedia.org/wiki/Кавички 'ca': u'«»“”', 'ca-x-altquot': u'“”‘’', 'cs': u'„“‚‘', 'cs-x-altquot': u'»«›‹', - 'da': u'»«‘’', + 'da': u'»«›‹', 'da-x-altquot': u'„“‚‘', + # 'da-x-altquot2': u'””’’', 'de': u'„“‚‘', 'de-x-altquot': u'»«›‹', - 'de-CH': u'«»‹›', + 'de-ch': u'«»‹›', 'el': u'«»“”', 'en': u'“”‘’', - 'en-UK': u'‘’“”', + 'en-uk-x-altquot': u'‘’“”', # Attention: " → ‘ and ' → “ ! 'eo': u'“”‘’', 'es': u'«»“”', - 'et': u'„“‚‘', # no secondary quote listed in - 'et-x-altquot': u'»«›‹', # the sources above (wikipedia.org) - 'eu': u'«»‹›', 'es-x-altquot': u'“”‘’', + 'et': u'„“‚‘', # no secondary quote listed in + 'et-x-altquot': u'«»‹›', # the sources above (wikipedia.org) + 'eu': u'«»‹›', 'fi': u'””’’', - 'fi-x-altquot': u'»»’’', - 'fr': (u'« ', u' »', u'‹ ', u' ›'), # with narrow no-break space - 'fr-x-altquot': u'«»‹›', # for use with manually set spaces - # 'fr-x-altquot': (u'“ ', u' ”', u'‘ ', u' ’'), # rarely used - 'fr-CH': u'«»‹›', + 'fi-x-altquot': u'»»››', + 'fr': (u'« ', u' »', u'“', u'”'), # full no-break space + 'fr-x-altquot': (u'« ', u' »', u'“', u'”'), # narrow no-break space + 'fr-ch': u'«»‹›', + 'fr-ch-x-altquot': (u'« ', u' »', u'‹ ', u' ›'), # narrow no-break space, http://typoguide.ch/ 'gl': u'«»“”', - 'he': u'”“»«', - 'he-x-altquot': u'„”‚’', + 'he': u'”“»«', # Hebrew is RTL, test position: + 'he-x-altquot': u'„”‚’', # low quotation marks are opening. + # 'he-x-altquot': u'“„‘‚', # RTL: low quotation marks opening + 'hr': u'„”‘’', # http://hrvatska-tipografija.com/polunavodnici/ + 'hr-x-altquot': u'»«›‹', + 'hsb': u'„“‚‘', + 'hsb-x-altquot':u'»«›‹', + 'hu': u'„”«»', + 'is': u'„“‚‘', 'it': u'«»“”', - 'it-CH': u'«»‹›', + 'it-ch': u'«»‹›', 'it-x-altquot': u'“”‘’', + # 'it-x-altquot2': u'“„‘‚', # [7] in headlines 'ja': u'「」『』', 'lt': u'„“‚‘', + 'lv': u'„“‚‘', + 'mk': u'„“‚‘', # Macedonian, https://mk.wikipedia.org/wiki/Правопис_и_правоговор_на_македонскиот_јазик 'nl': u'“”‘’', 'nl-x-altquot': u'„”‚’', + # 'nl-x-altquot2': u'””’’', + 'nb': u'«»’’', # Norsk bokmål (canonical form 'no') + 'nn': u'«»’’', # Nynorsk [10] + 'nn-x-altquot': u'«»‘’', # [8], [10] + # 'nn-x-altquot2': u'«»«»', # [9], [10 + # 'nn-x-altquot3': u'„“‚‘', # [10] + 'no': u'«»’’', # Norsk bokmål [10] + 'no-x-altquot': u'«»‘’', # [8], [10] + # 'no-x-altquot2': u'«»«»', # [9], [10 + # 'no-x-altquot3': u'„“‚‘', # [10] 'pl': u'„”«»', - 'pl-x-altquot': u'«»“”', + 'pl-x-altquot': u'«»‚’', + # 'pl-x-altquot2': u'„”‚’', # https://pl.wikipedia.org/wiki/Cudzys%C5%82%C3%B3w 'pt': u'«»“”', - 'pt-BR': u'“”‘’', + 'pt-br': u'“”‘’', 'ro': u'„”«»', - 'ro-x-altquot': u'«»„”', 'ru': u'«»„“', - 'sk': u'„“‚‘', + 'sh': u'„”‚’', # Serbo-Croatian + 'sh-x-altquot': u'»«›‹', + 'sk': u'„“‚‘', # Slovak 'sk-x-altquot': u'»«›‹', - 'sv': u'„“‚‘', - 'sv-x-altquot': u'»«›‹', - 'zh-CN': u'“”‘’', - 'it': u'«»“”', - 'zh-TW': u'「」『』', + 'sl': u'„“‚‘', # Slovenian + 'sl-x-altquot': u'»«›‹', + 'sq': u'«»‹›', # Albanian + 'sq-x-altquot': u'“„‘‚', + 'sr': u'„”’’', + 'sr-x-altquot': u'»«›‹', + 'sv': u'””’’', + 'sv-x-altquot': u'»»››', + 'tr': u'“”‘’', + 'tr-x-altquot': u'«»‹›', + # 'tr-x-altquot2': u'“„‘‚', # [7] antiquated? + 'uk': u'«»„“', + 'uk-x-altquot': u'„“‚‘', + 'zh-cn': u'“”‘’', + 'zh-tw': u'「」『』', } def __init__(self, language='en'): self.language = language try: (self.opquote, self.cpquote, - self.osquote, self.csquote) = self.quotes[language] + self.osquote, self.csquote) = self.quotes[language.lower()] except KeyError: self.opquote, self.cpquote, self.osquote, self.csquote = u'""\'\'' @@ -476,9 +533,8 @@ def educate_tokens(text_tokens, attr=default_smartypants_attr, language='en'): do_ellipses = False do_stupefy = False - if attr == "0": # Do nothing. - yield text - elif attr == "1": # Do everything, turn all options on. + # if attr == "0": # pass tokens unchanged (see below). + if attr == "1": # Do everything, turn all options on. do_quotes = True do_backticks = True do_dashes = 1 @@ -550,7 +606,10 @@ def educate_tokens(text_tokens, attr=default_smartypants_attr, language='en'): text = educateSingleBackticks(text, language) if do_quotes: - text = educateQuotes(prev_token_last_char+text, language)[1:] + # Replace plain quotes in context to prevent converstion to + # 2-character sequence in French. + context = prev_token_last_char.replace('"',';').replace("'",';') + text = educateQuotes(context+text, language)[1:] if do_stupefy: text = stupefyEntities(text, language) @@ -591,7 +650,8 @@ def educateQuotes(text, language='en'): text = re.sub(r"""'"(?=\w)""", smart.osquote+smart.opquote, text) # Special case for decade abbreviations (the '80s): - text = re.sub(r"""\b'(?=\d{2}s)""", smart.csquote, text) + if language.startswith('en'): # TODO similar cases in other languages? + text = re.sub(r"""'(?=\d{2}s)""", smart.apostrophe, text, re.UNICODE) close_class = r"""[^\ \t\r\n\[\{\(\-]""" dec_dashes = r"""–|—""" @@ -608,21 +668,31 @@ def educateQuotes(text, language='en'): ) ' # the quote (?=\w) # followed by a word character - """ % (dec_dashes,), re.VERBOSE) + """ % (dec_dashes,), re.VERBOSE | re.UNICODE) text = opening_single_quotes_regex.sub(r'\1'+smart.osquote, text) + # In many locales, single closing quotes are different from apostrophe: + if smart.csquote != smart.apostrophe: + apostrophe_regex = re.compile(r"(?<=(\w|\d))'(?=\w)", re.UNICODE) + text = apostrophe_regex.sub(smart.apostrophe, text) + # TODO: keep track of quoting level to recognize apostrophe in, e.g., + # "Ich fass' es nicht." + closing_single_quotes_regex = re.compile(r""" (%s) ' - (?!\s | s\b | \d) - """ % (close_class,), re.VERBOSE) + (?!\s | # whitespace + s\b | + \d # digits ('80s) + ) + """ % (close_class,), re.VERBOSE | re.UNICODE) text = closing_single_quotes_regex.sub(r'\1'+smart.csquote, text) closing_single_quotes_regex = re.compile(r""" (%s) ' (\s | s\b) - """ % (close_class,), re.VERBOSE) + """ % (close_class,), re.VERBOSE | re.UNICODE) text = closing_single_quotes_regex.sub(r'\1%s\2' % smart.csquote, text) # Any remaining single quotes should be opening ones: @@ -855,52 +925,98 @@ def tokenize(text): if __name__ == "__main__": - import locale - + import itertools try: - locale.setlocale(locale.LC_ALL, '') + import locale # module missing in Jython + locale.setlocale(locale.LC_ALL, '') # set to user defaults + defaultlanguage = locale.getdefaultlocale()[0] except: - pass + defaultlanguage = 'en' - from docutils.core import publish_string - docstring_html = publish_string(__doc__, writer_name='html') - - print docstring_html + # Normalize and drop unsupported subtags: + defaultlanguage = defaultlanguage.lower().replace('-','_') + # split (except singletons, which mark the following tag as non-standard): + defaultlanguage = re.sub(r'_([a-zA-Z0-9])_', r'_\1-', defaultlanguage) + _subtags = [subtag for subtag in defaultlanguage.split('_')] + _basetag = _subtags.pop(0) + # find all combinations of subtags + for n in range(len(_subtags), 0, -1): + for tags in itertools.combinations(_subtags, n): + _tag = '-'.join((_basetag,)+tags) + if _tag in smartchars.quotes: + defaultlanguage = _tag + break + else: + if _basetag in smartchars.quotes: + defaultlanguage = _basetag + else: + defaultlanguage = 'en' - # Unit test output goes out stderr. - import unittest - sp = smartyPants + import argparse + parser = argparse.ArgumentParser( + description='Filter stdin making ASCII punctuation "smart".') + # parser.add_argument("text", help="text to be acted on") + parser.add_argument("-a", "--action", default="1", + help="what to do with the input (see --actionhelp)") + parser.add_argument("-e", "--encoding", default="utf8", + help="text encoding") + parser.add_argument("-l", "--language", default=defaultlanguage, + help="text language (BCP47 tag), Default: %s"%defaultlanguage) + parser.add_argument("-q", "--alternative-quotes", action="store_true", + help="use alternative quote style") + parser.add_argument("--doc", action="store_true", + help="print documentation") + parser.add_argument("--actionhelp", action="store_true", + help="list available actions") + parser.add_argument("--stylehelp", action="store_true", + help="list available quote styles") + parser.add_argument("--test", action="store_true", + help="perform short self-test") + args = parser.parse_args() - class TestSmartypantsAllAttributes(unittest.TestCase): - # the default attribute is "1", which means "all". + if args.doc: + print (__doc__) + elif args.actionhelp: + print(options) + elif args.stylehelp: + print() + print("Available styles (primary open/close, secondary open/close)") + print("language tag quotes") + print("============ ======") + for key in sorted(smartchars.quotes.keys()): + print("%-14s %s" % (key, smartchars.quotes[key])) + elif args.test: + # Unit test output goes to stderr. + import unittest - def test_dates(self): - self.assertEqual(sp("1440-80's"), u"1440-80’s") - self.assertEqual(sp("1440-'80s"), u"1440-‘80s") - self.assertEqual(sp("1440---'80s"), u"1440–‘80s") - self.assertEqual(sp("1960s"), "1960s") # no effect. - self.assertEqual(sp("1960's"), u"1960’s") - self.assertEqual(sp("one two '60s"), u"one two ‘60s") - self.assertEqual(sp("'60s"), u"‘60s") + class TestSmartypantsAllAttributes(unittest.TestCase): + # the default attribute is "1", which means "all". + def test_dates(self): + self.assertEqual(smartyPants("1440-80's"), u"1440-80’s") + self.assertEqual(smartyPants("1440-'80s"), u"1440-’80s") + self.assertEqual(smartyPants("1440---'80s"), u"1440–’80s") + self.assertEqual(smartyPants("1960's"), u"1960’s") + self.assertEqual(smartyPants("one two '60s"), u"one two ’60s") + self.assertEqual(smartyPants("'60s"), u"’60s") - def test_ordinal_numbers(self): - self.assertEqual(sp("21st century"), "21st century") # no effect. - self.assertEqual(sp("3rd"), "3rd") # no effect. + def test_educated_quotes(self): + self.assertEqual(smartyPants('"Isn\'t this fun?"'), u'“Isn’t this fun?”') - def test_educated_quotes(self): - self.assertEqual(sp('''"Isn't this fun?"'''), u'“Isn’t this fun?”') + def test_html_tags(self): + text = 'more' + self.assertEqual(smartyPants(text), text) - def test_html_tags(self): - text = 'more' - self.assertEqual(sp(text), text) + suite = unittest.TestLoader().loadTestsFromTestCase( + TestSmartypantsAllAttributes) + unittest.TextTestRunner().run(suite) - unittest.main() - - - - -__author__ = "Chad Miller " -__version__ = "1.5_1.6: Fri, 27 Jul 2007 07:06:40 -0400" -__url__ = "http://wiki.chad.org/SmartyPantsPy" -__description__ = "Smart-quotes, smart-ellipses, and smart-dashes for weblog entries in pyblosxom" + else: + if args.alternative_quotes: + if '-x-altquot' in args.language: + args.language = args.language.replace('-x-altquot', '') + else: + args.language += '-x-altquot' + text = sys.stdin.read().decode(args.encoding) + print(smartyPants(text, attr=args.action, + language=args.language).encode(args.encoding)) diff --git a/Libs/docutils/utils/urischemes.py b/Libs/docutils/utils/urischemes.py index 53d76eb..253bc5f 100644 --- a/Libs/docutils/utils/urischemes.py +++ b/Libs/docutils/utils/urischemes.py @@ -1,4 +1,4 @@ -# $Id: urischemes.py 7464 2012-06-25 13:16:03Z milde $ +# $Id: urischemes.py 7922 2015-09-22 15:28:09Z milde $ # Author: David Goodger # Copyright: This module has been placed in the public domain. @@ -113,7 +113,7 @@ schemes = { 'tel': ('a connection to a terminal that handles normal voice ' 'telephone calls, a voice mailbox or another voice messaging ' 'system or a service that can be operated using DTMF tones; ' - 'RFC 2806.'), + 'RFC 3966.'), 'telephone': 'telephone', 'telnet': 'Reference to interactive sessions; RFC 4248', 'tftp': 'Trivial File Transfer Protocol; RFC 3617', diff --git a/Libs/docutils/writers/__init__.py b/Libs/docutils/writers/__init__.py index 5e254e1..3208c8a 100644 --- a/Libs/docutils/writers/__init__.py +++ b/Libs/docutils/writers/__init__.py @@ -1,4 +1,4 @@ -# $Id: __init__.py 7648 2013-04-18 07:36:22Z milde $ +# $Id: __init__.py 7969 2016-08-18 21:40:00Z milde $ # Author: David Goodger # Copyright: This module has been placed in the public domain. @@ -120,13 +120,18 @@ class UnfilteredWriter(Writer): _writer_aliases = { - 'html': 'html4css1', + 'html': 'html4css1', # may change to html5 some day + 'html4': 'html4css1', + 'html5': 'html5_polyglot', 'latex': 'latex2e', 'pprint': 'pseudoxml', 'pformat': 'pseudoxml', 'pdf': 'rlpdf', - 'xml': 'docutils_xml', - 's5': 's5_html'} + 's5': 's5_html', + 'xelatex': 'xetex', + 'xhtml': 'html5_polyglot', + 'xhtml10': 'html4css1', + 'xml': 'docutils_xml'} def get_writer_class(writer_name): """Return the Writer class from the `writer_name` module.""" diff --git a/Libs/docutils/writers/_html_base.py b/Libs/docutils/writers/_html_base.py new file mode 100644 index 0000000..f92ddc1 --- /dev/null +++ b/Libs/docutils/writers/_html_base.py @@ -0,0 +1,1670 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# :Author: David Goodger, Günter Milde +# Based on the html4css1 writer by David Goodger. +# :Maintainer: docutils-develop@lists.sourceforge.net +# :Revision: $Revision: 8118 $ +# :Date: $Date: 2005-06-28$ +# :Copyright: © 2016 David Goodger, Günter Milde +# :License: Released under the terms of the `2-Clause BSD license`_, in short: +# +# Copying and distribution of this file, with or without modification, +# are permitted in any medium without royalty provided the copyright +# notice and this notice are preserved. +# This file is offered as-is, without any warranty. +# +# .. _2-Clause BSD license: http://www.spdx.org/licenses/BSD-2-Clause + +"""common definitions for Docutils HTML writers""" + +import sys +import os.path +import re +import urllib + +try: # check for the Python Imaging Library + import PIL.Image +except ImportError: + try: # sometimes PIL modules are put in PYTHONPATH's root + import Image + class PIL(object): pass # dummy wrapper + PIL.Image = Image + except ImportError: + PIL = None + +import docutils +from docutils import nodes, utils, writers, languages, io +from docutils.utils.error_reporting import SafeString +from docutils.transforms import writer_aux +from docutils.utils.math import (unichar2tex, pick_math_environment, + math2html, latex2mathml, tex2mathml_extern) + + +class Writer(writers.Writer): + + supported = ('html', 'xhtml') # update in subclass + """Formats this writer supports.""" + + # default_stylesheets = [] # set in subclass! + # default_stylesheet_dirs = ['.'] # set in subclass! + default_template = 'template.txt' + # default_template_path = ... # set in subclass! + # settings_spec = ... # set in subclass! + + settings_defaults = {'output_encoding_error_handler': 'xmlcharrefreplace'} + + # config_section = ... # set in subclass! + config_section_dependencies = ['writers', 'html writers'] + + visitor_attributes = ( + 'head_prefix', 'head', 'stylesheet', 'body_prefix', + 'body_pre_docinfo', 'docinfo', 'body', 'body_suffix', + 'title', 'subtitle', 'header', 'footer', 'meta', 'fragment', + 'html_prolog', 'html_head', 'html_title', 'html_subtitle', + 'html_body') + + def get_transforms(self): + return writers.Writer.get_transforms(self) + [writer_aux.Admonitions] + + def translate(self): + self.visitor = visitor = self.translator_class(self.document) + self.document.walkabout(visitor) + for attr in self.visitor_attributes: + setattr(self, attr, getattr(visitor, attr)) + self.output = self.apply_template() + + def apply_template(self): + template_file = open(self.document.settings.template, 'rb') + template = unicode(template_file.read(), 'utf-8') + template_file.close() + subs = self.interpolation_dict() + return template % subs + + def interpolation_dict(self): + subs = {} + settings = self.document.settings + for attr in self.visitor_attributes: + subs[attr] = ''.join(getattr(self, attr)).rstrip('\n') + subs['encoding'] = settings.output_encoding + subs['version'] = docutils.__version__ + return subs + + def assemble_parts(self): + writers.Writer.assemble_parts(self) + for part in self.visitor_attributes: + self.parts[part] = ''.join(getattr(self, part)) + + +class HTMLTranslator(nodes.NodeVisitor): + + """ + Generic Docutils to HTML translator. + + See the `html4css1` and `html5_polyglot` writers for full featured + HTML writers. + + .. IMPORTANT:: + The `visit_*` and `depart_*` methods use a + heterogeneous stack, `self.context`. + When subclassing, make sure to be consistent in its use! + + Examples for robust coding: + + a) Override both `visit_*` and `depart_*` methods, don't call the + parent functions. + + b) Extend both and unconditionally call the parent functions:: + + def visit_example(self, node): + if foo: + self.body.append('
') + html4css1.HTMLTranslator.visit_example(self, node) + + def depart_example(self, node): + html4css1.HTMLTranslator.depart_example(self, node) + if foo: + self.body.append('
') + + c) Extend both, calling the parent functions under the same + conditions:: + + def visit_example(self, node): + if foo: + self.body.append('
\n') + else: # call the parent method + _html_base.HTMLTranslator.visit_example(self, node) + + def depart_example(self, node): + if foo: + self.body.append('
\n') + else: # call the parent method + _html_base.HTMLTranslator.depart_example(self, node) + + d) Extend one method (call the parent), but don't otherwise use the + `self.context` stack:: + + def depart_example(self, node): + _html_base.HTMLTranslator.depart_example(self, node) + if foo: + # implementation-specific code + # that does not use `self.context` + self.body.append('\n') + + This way, changes in stack use will not bite you. + """ + + xml_declaration = '\n' + doctype = '\n' + doctype_mathml = doctype + + head_prefix_template = ('\n\n') + content_type = ('\n') + generator = ('\n') + + # Template for the MathJax script in the header: + mathjax_script = '\n' + + mathjax_url = 'file:/usr/share/javascript/mathjax/MathJax.js' + """ + URL of the MathJax javascript library. + + The MathJax library ought to be installed on the same + server as the rest of the deployed site files and specified + in the `math-output` setting appended to "mathjax". + See `Docutils Configuration`__. + + __ http://docutils.sourceforge.net/docs/user/config.html#math-output + + The fallback tries a local MathJax installation at + ``/usr/share/javascript/mathjax/MathJax.js``. + """ + + stylesheet_link = '\n' + embedded_stylesheet = '\n' + words_and_spaces = re.compile(r'\S+| +|\n') + # wrap point inside word: + in_word_wrap_point = re.compile(r'.+\W\W.+|[-?].+', re.U) + lang_attribute = 'lang' # name changes to 'xml:lang' in XHTML 1.1 + + special_characters = {ord('&'): u'&', + ord('<'): u'<', + ord('"'): u'"', + ord('>'): u'>', + ord('@'): u'@', # may thwart address harvesters + } + """Character references for characters with a special meaning in HTML.""" + + + def __init__(self, document): + nodes.NodeVisitor.__init__(self, document) + self.settings = settings = document.settings + lcode = settings.language_code + self.language = languages.get_language(lcode, document.reporter) + self.meta = [self.generator % docutils.__version__] + self.head_prefix = [] + self.html_prolog = [] + if settings.xml_declaration: + self.head_prefix.append(self.xml_declaration + % settings.output_encoding) + # self.content_type = "" + # encoding not interpolated: + self.html_prolog.append(self.xml_declaration) + self.head = self.meta[:] + self.stylesheet = [self.stylesheet_call(path) + for path in utils.get_stylesheet_list(settings)] + self.body_prefix = ['\n\n'] + # document title, subtitle display + self.body_pre_docinfo = [] + # author, date, etc. + self.docinfo = [] + self.body = [] + self.fragment = [] + self.body_suffix = ['\n\n'] + self.section_level = 0 + self.initial_header_level = int(settings.initial_header_level) + + self.math_output = settings.math_output.split() + self.math_output_options = self.math_output[1:] + self.math_output = self.math_output[0].lower() + + self.context = [] + """Heterogeneous stack. + + Used by visit_* and depart_* functions in conjunction with the tree + traversal. Make sure that the pops correspond to the pushes.""" + + self.topic_classes = [] # TODO: replace with self_in_contents + self.colspecs = [] + self.compact_p = True + self.compact_simple = False + self.compact_field_list = False + self.in_docinfo = False + self.in_sidebar = False + self.in_footnote_list = False + self.title = [] + self.subtitle = [] + self.header = [] + self.footer = [] + self.html_head = [self.content_type] # charset not interpolated + self.html_title = [] + self.html_subtitle = [] + self.html_body = [] + self.in_document_title = 0 # len(self.body) or 0 + self.in_mailto = False + self.author_in_authors = False # for html4css1 + self.math_header = [] + + def astext(self): + return ''.join(self.head_prefix + self.head + + self.stylesheet + self.body_prefix + + self.body_pre_docinfo + self.docinfo + + self.body + self.body_suffix) + + def encode(self, text): + """Encode special characters in `text` & return.""" + # Use only named entities known in both XML and HTML + # other characters are automatically encoded "by number" if required. + # @@@ A codec to do these and all other HTML entities would be nice. + text = unicode(text) + return text.translate(self.special_characters) + + def cloak_mailto(self, uri): + """Try to hide a mailto: URL from harvesters.""" + # Encode "@" using a URL octet reference (see RFC 1738). + # Further cloaking with HTML entities will be done in the + # `attval` function. + return uri.replace('@', '%40') + + def cloak_email(self, addr): + """Try to hide the link text of a email link from harversters.""" + # Surround at-signs and periods with tags. ("@" has + # already been encoded to "@" by the `encode` method.) + addr = addr.replace('@', '@') + addr = addr.replace('.', '.') + return addr + + def attval(self, text, + whitespace=re.compile('[\n\r\t\v\f]')): + """Cleanse, HTML encode, and return attribute value text.""" + encoded = self.encode(whitespace.sub(' ', text)) + if self.in_mailto and self.settings.cloak_email_addresses: + # Cloak at-signs ("%40") and periods with HTML entities. + encoded = encoded.replace('%40', '%40') + encoded = encoded.replace('.', '.') + return encoded + + def stylesheet_call(self, path): + """Return code to reference or embed stylesheet file `path`""" + if self.settings.embed_stylesheet: + try: + content = io.FileInput(source_path=path, + encoding='utf-8').read() + self.settings.record_dependencies.add(path) + except IOError, err: + msg = u"Cannot embed stylesheet '%s': %s." % ( + path, SafeString(err.strerror)) + self.document.reporter.error(msg) + return '<--- %s --->\n' % msg + return self.embedded_stylesheet % content + # else link to style file: + if self.settings.stylesheet_path: + # adapt path relative to output (cf. config.html#stylesheet-path) + path = utils.relative_path(self.settings._destination, path) + return self.stylesheet_link % self.encode(path) + + def starttag(self, node, tagname, suffix='\n', empty=False, **attributes): + """ + Construct and return a start tag given a node (id & class attributes + are extracted), tag name, and optional attributes. + """ + tagname = tagname.lower() + prefix = [] + atts = {} + ids = [] + for (name, value) in attributes.items(): + atts[name.lower()] = value + classes = [] + languages = [] + # unify class arguments and move language specification + for cls in node.get('classes', []) + atts.pop('class', '').split() : + if cls.startswith('language-'): + languages.append(cls[9:]) + elif cls.strip() and cls not in classes: + classes.append(cls) + if languages: + # attribute name is 'lang' in XHTML 1.0 but 'xml:lang' in 1.1 + atts[self.lang_attribute] = languages[0] + if classes: + atts['class'] = ' '.join(classes) + assert 'id' not in atts + ids.extend(node.get('ids', [])) + if 'ids' in atts: + ids.extend(atts['ids']) + del atts['ids'] + if ids: + atts['id'] = ids[0] + for id in ids[1:]: + # Add empty "span" elements for additional IDs. Note + # that we cannot use empty "a" elements because there + # may be targets inside of references, but nested "a" + # elements aren't allowed in XHTML (even if they do + # not all have a "href" attribute). + if empty or isinstance(node, + (nodes.bullet_list, nodes.docinfo, + nodes.definition_list, nodes.enumerated_list, + nodes.field_list, nodes.option_list, + nodes.table)): + # Insert target right in front of element. + prefix.append('' % id) + else: + # Non-empty tag. Place the auxiliary tag + # *inside* the element, as the first child. + suffix += '' % id + attlist = atts.items() + attlist.sort() + parts = [tagname] + for name, value in attlist: + # value=None was used for boolean attributes without + # value, but this isn't supported by XHTML. + assert value is not None + if isinstance(value, list): + values = [unicode(v) for v in value] + parts.append('%s="%s"' % (name.lower(), + self.attval(' '.join(values)))) + else: + parts.append('%s="%s"' % (name.lower(), + self.attval(unicode(value)))) + if empty: + infix = ' /' + else: + infix = '' + return ''.join(prefix) + '<%s%s>' % (' '.join(parts), infix) + suffix + + def emptytag(self, node, tagname, suffix='\n', **attributes): + """Construct and return an XML-compatible empty tag.""" + return self.starttag(node, tagname, suffix, empty=True, **attributes) + + def set_class_on_child(self, node, class_, index=0): + """ + Set class `class_` on the visible child no. index of `node`. + Do nothing if node has fewer children than `index`. + """ + children = [n for n in node if not isinstance(n, nodes.Invisible)] + try: + child = children[index] + except IndexError: + return + child['classes'].append(class_) + + def visit_Text(self, node): + text = node.astext() + encoded = self.encode(text) + if self.in_mailto and self.settings.cloak_email_addresses: + encoded = self.cloak_email(encoded) + self.body.append(encoded) + + def depart_Text(self, node): + pass + + def visit_abbreviation(self, node): + # @@@ implementation incomplete ("title" attribute) + self.body.append(self.starttag(node, 'abbr', '')) + + def depart_abbreviation(self, node): + self.body.append('') + + def visit_acronym(self, node): + # @@@ implementation incomplete ("title" attribute) + self.body.append(self.starttag(node, 'acronym', '')) + + def depart_acronym(self, node): + self.body.append('') + + def visit_address(self, node): + self.visit_docinfo_item(node, 'address', meta=False) + self.body.append(self.starttag(node, 'pre', + suffix= '', CLASS='address')) + + def depart_address(self, node): + self.body.append('\n\n') + self.depart_docinfo_item() + + def visit_admonition(self, node): + node['classes'].insert(0, 'admonition') + self.body.append(self.starttag(node, 'div')) + + def depart_admonition(self, node=None): + self.body.append('\n') + + attribution_formats = {'dash': (u'\u2014', ''), + 'parentheses': ('(', ')'), + 'parens': ('(', ')'), + 'none': ('', '')} + + def visit_attribution(self, node): + prefix, suffix = self.attribution_formats[self.settings.attribution] + self.context.append(suffix) + self.body.append( + self.starttag(node, 'p', prefix, CLASS='attribution')) + + def depart_attribution(self, node): + self.body.append(self.context.pop() + '

\n') + + def visit_author(self, node): + if not(isinstance(node.parent, nodes.authors)): + self.visit_docinfo_item(node, 'author') + self.body.append('

') + + def depart_author(self, node): + self.body.append('

') + if isinstance(node.parent, nodes.authors): + self.body.append('\n') + else: + self.depart_docinfo_item() + + def visit_authors(self, node): + self.visit_docinfo_item(node, 'authors') + + def depart_authors(self, node): + self.depart_docinfo_item() + + def visit_block_quote(self, node): + self.body.append(self.starttag(node, 'blockquote')) + + def depart_block_quote(self, node): + self.body.append('\n') + + def check_simple_list(self, node): + """Check for a simple list that can be rendered compactly.""" + visitor = SimpleListChecker(self.document) + try: + node.walk(visitor) + except nodes.NodeFound: + return False + else: + return True + + # Compact lists + # ------------ + # Include definition lists and field lists (in addition to ordered + # and unordered lists) in the test if a list is "simple" (cf. the + # html4css1.HTMLTranslator docstring and the SimpleListChecker class at + # the end of this file). + + def is_compactable(self, node): + # print "is_compactable %s ?" % node.__class__, + # explicite class arguments have precedence + if 'compact' in node['classes']: + return True + if 'open' in node['classes']: + return False + # check config setting: + if (isinstance(node, (nodes.field_list, nodes.definition_list)) + and not self.settings.compact_field_lists): + # print "`compact-field-lists` is False" + return False + if (isinstance(node, (nodes.enumerated_list, nodes.bullet_list)) + and not self.settings.compact_lists): + # print "`compact-lists` is False" + return False + # more special cases: + if (self.topic_classes == ['contents']): # TODO: self.in_contents + return True + # check the list items: + return self.check_simple_list(node) + + def visit_bullet_list(self, node): + atts = {} + old_compact_simple = self.compact_simple + self.context.append((self.compact_simple, self.compact_p)) + self.compact_p = None + self.compact_simple = self.is_compactable(node) + if self.compact_simple and not old_compact_simple: + atts['class'] = 'simple' + self.body.append(self.starttag(node, 'ul', **atts)) + + def depart_bullet_list(self, node): + self.compact_simple, self.compact_p = self.context.pop() + self.body.append('\n') + + def visit_caption(self, node): + self.body.append(self.starttag(node, 'p', '', CLASS='caption')) + + def depart_caption(self, node): + self.body.append('

\n') + + # citations + # --------- + # Use definition list instead of table for bibliographic references. + # Join adjacent citation entries. + + def visit_citation(self, node): + if not self.in_footnote_list: + self.body.append('
\n') + self.in_footnote_list = True + + def depart_citation(self, node): + self.body.append('\n') + if not isinstance(node.next_node(descend=False, siblings=True), + nodes.citation): + self.body.append('
\n') + self.in_footnote_list = False + + def visit_citation_reference(self, node): + href = '#' + if 'refid' in node: + href += node['refid'] + elif 'refname' in node: + href += self.document.nameids[node['refname']] + # else: # TODO system message (or already in the transform)? + # 'Citation reference missing.' + self.body.append(self.starttag( + node, 'a', '[', CLASS='citation-reference', href=href)) + + def depart_citation_reference(self, node): + self.body.append(']') + + # classifier + # ---------- + # don't insert classifier-delimiter here (done by CSS) + + def visit_classifier(self, node): + self.body.append(self.starttag(node, 'span', '', CLASS='classifier')) + + def depart_classifier(self, node): + self.body.append('
') + + def visit_colspec(self, node): + self.colspecs.append(node) + # "stubs" list is an attribute of the tgroup element: + node.parent.stubs.append(node.attributes.get('stub')) + + def depart_colspec(self, node): + # write out when all colspecs are processed + if isinstance(node.next_node(descend=False, siblings=True), + nodes.colspec): + return + if 'colwidths-auto' in node.parent.parent['classes'] or ( + 'colwidths-auto' in self.settings.table_style and + ('colwidths-given' not in node.parent.parent['classes'])): + return + total_width = sum(node['colwidth'] for node in self.colspecs) + self.body.append(self.starttag(node, 'colgroup')) + for node in self.colspecs: + colwidth = int(node['colwidth'] * 100.0 / total_width + 0.5) + self.body.append(self.emptytag(node, 'col', + style='width: %i%%' % colwidth)) + self.body.append('\n') + + def visit_comment(self, node, + sub=re.compile('-(?=-)').sub): + """Escape double-dashes in comment text.""" + self.body.append('\n' % sub('- ', node.astext())) + # Content already processed: + raise nodes.SkipNode + + def visit_compound(self, node): + self.body.append(self.starttag(node, 'div', CLASS='compound')) + if len(node) > 1: + node[0]['classes'].append('compound-first') + node[-1]['classes'].append('compound-last') + for child in node[1:-1]: + child['classes'].append('compound-middle') + + def depart_compound(self, node): + self.body.append('\n') + + def visit_container(self, node): + self.body.append(self.starttag(node, 'div', CLASS='docutils container')) + + def depart_container(self, node): + self.body.append('\n') + + def visit_contact(self, node): + self.visit_docinfo_item(node, 'contact', meta=False) + + def depart_contact(self, node): + self.depart_docinfo_item() + + def visit_copyright(self, node): + self.visit_docinfo_item(node, 'copyright') + + def depart_copyright(self, node): + self.depart_docinfo_item() + + def visit_date(self, node): + self.visit_docinfo_item(node, 'date') + + def depart_date(self, node): + self.depart_docinfo_item() + + def visit_decoration(self, node): + pass + + def depart_decoration(self, node): + pass + + def visit_definition(self, node): + self.body.append('\n') + self.body.append(self.starttag(node, 'dd', '')) + + def depart_definition(self, node): + self.body.append('\n') + + def visit_definition_list(self, node): + classes = node.setdefault('classes', []) + if self.is_compactable(node): + classes.append('simple') + self.body.append(self.starttag(node, 'dl')) + + def depart_definition_list(self, node): + self.body.append('\n') + + def visit_definition_list_item(self, node): + # pass class arguments, ids and names to definition term: + node.children[0]['classes'] = ( + node.get('classes', []) + node.children[0].get('classes', [])) + node.children[0]['ids'] = ( + node.get('ids', []) + node.children[0].get('ids', [])) + node.children[0]['names'] = ( + node.get('names', []) + node.children[0].get('names', [])) + + def depart_definition_list_item(self, node): + pass + + def visit_description(self, node): + self.body.append(self.starttag(node, 'dd', '')) + + def depart_description(self, node): + self.body.append('\n') + + def visit_docinfo(self, node): + classes = 'docinfo' + if (self.is_compactable(node)): + classes += ' simple' + self.body.append(self.starttag(node, 'dl', CLASS=classes)) + + def depart_docinfo(self, node): + self.body.append('\n') + + def visit_docinfo_item(self, node, name, meta=True): + if meta: + meta_tag = '\n' \ + % (name, self.attval(node.astext())) + self.add_meta(meta_tag) + self.body.append('
%s
\n' + % (name, self.language.labels[name])) + self.body.append(self.starttag(node, 'dd', '', CLASS=name)) + + def depart_docinfo_item(self): + self.body.append('\n') + + def visit_doctest_block(self, node): + self.body.append(self.starttag(node, 'pre', suffix='', + CLASS='code python doctest')) + + def depart_doctest_block(self, node): + self.body.append('\n\n') + + def visit_document(self, node): + title = (node.get('title', '') or os.path.basename(node['source']) + or 'docutils document without title') + self.head.append('%s\n' % self.encode(title)) + + def depart_document(self, node): + self.head_prefix.extend([self.doctype, + self.head_prefix_template % + {'lang': self.settings.language_code}]) + self.html_prolog.append(self.doctype) + self.meta.insert(0, self.content_type % self.settings.output_encoding) + self.head.insert(0, self.content_type % self.settings.output_encoding) + if self.math_header: + if self.math_output == 'mathjax': + self.head.extend(self.math_header) + else: + self.stylesheet.extend(self.math_header) + # skip content-type meta tag with interpolated charset value: + self.html_head.extend(self.head[1:]) + self.body_prefix.append(self.starttag(node, 'div', CLASS='document')) + self.body_suffix.insert(0, '\n') + self.fragment.extend(self.body) # self.fragment is the "naked" body + self.html_body.extend(self.body_prefix[1:] + self.body_pre_docinfo + + self.docinfo + self.body + + self.body_suffix[:-1]) + assert not self.context, 'len(context) = %s' % len(self.context) + + def visit_emphasis(self, node): + self.body.append(self.starttag(node, 'em', '')) + + def depart_emphasis(self, node): + self.body.append('') + + def visit_entry(self, node): + atts = {'class': []} + if isinstance(node.parent.parent, nodes.thead): + atts['class'].append('head') + if node.parent.parent.parent.stubs[node.parent.column]: + # "stubs" list is an attribute of the tgroup element + atts['class'].append('stub') + if atts['class']: + tagname = 'th' + atts['class'] = ' '.join(atts['class']) + else: + tagname = 'td' + del atts['class'] + node.parent.column += 1 + if 'morerows' in node: + atts['rowspan'] = node['morerows'] + 1 + if 'morecols' in node: + atts['colspan'] = node['morecols'] + 1 + node.parent.column += node['morecols'] + self.body.append(self.starttag(node, tagname, '', **atts)) + self.context.append('\n' % tagname.lower()) + # TODO: why does the html4css1 writer insert an NBSP into empty cells? + # if len(node) == 0: # empty cell + # self.body.append(' ') # no-break space + + def depart_entry(self, node): + self.body.append(self.context.pop()) + + def visit_enumerated_list(self, node): + atts = {} + if 'start' in node: + atts['start'] = node['start'] + if 'enumtype' in node: + atts['class'] = node['enumtype'] + if self.is_compactable(node): + atts['class'] = (atts.get('class', '') + ' simple').strip() + self.body.append(self.starttag(node, 'ol', **atts)) + + def depart_enumerated_list(self, node): + self.body.append('\n') + + def visit_field_list(self, node): + # Keep simple paragraphs in the field_body to enable CSS + # rule to start body on new line if the label is too long + classes = 'field-list' + if (self.is_compactable(node)): + classes += ' simple' + self.body.append(self.starttag(node, 'dl', CLASS=classes)) + + def depart_field_list(self, node): + self.body.append('\n') + + def visit_field(self, node): + pass + + def depart_field(self, node): + pass + + # as field is ignored, pass class arguments to field-name and field-body: + + def visit_field_name(self, node): + self.body.append(self.starttag(node, 'dt', '', + CLASS=''.join(node.parent['classes']))) + + def depart_field_name(self, node): + self.body.append('\n') + + def visit_field_body(self, node): + self.body.append(self.starttag(node, 'dd', '', + CLASS=''.join(node.parent['classes']))) + # prevent misalignment of following content if the field is empty: + if not node.children: + self.body.append('

') + + def depart_field_body(self, node): + self.body.append('\n') + + def visit_figure(self, node): + atts = {'class': 'figure'} + if node.get('width'): + atts['style'] = 'width: %s' % node['width'] + if node.get('align'): + atts['class'] += " align-" + node['align'] + self.body.append(self.starttag(node, 'div', **atts)) + + def depart_figure(self, node): + self.body.append('\n') + + # use HTML 5