changeset 6112:4716268c34e3

merge branches
author Thomas Waldmann <tw AT waldmann-edv DOT de>
date Wed, 07 Sep 2016 04:31:59 +0200
parents 1fdd537e9d83 (diff) cefd695e7572 (current diff)
children a0ec7f89be84
files MoinMoin/action/AttachFile.py MoinMoin/theme/__init__.py
diffstat 339 files changed, 74896 insertions(+), 45672 deletions(-) [+]
line wrap: on
line diff
--- a/MoinMoin/PageEditor.py	Wed Sep 07 03:05:27 2016 +0200
+++ b/MoinMoin/PageEditor.py	Wed Sep 07 04:31:59 2016 +0200
@@ -25,7 +25,6 @@
 from MoinMoin.widget.dialog import Status
 from MoinMoin.logfile import editlog, eventlog
 from MoinMoin.mail.sendmail import encodeSpamSafeEmail
-from MoinMoin.support.python_compatibility import set
 from MoinMoin.util import filesys, timefuncs, web
 from MoinMoin.util.abuse import log_attempt
 from MoinMoin.events import PageDeletedEvent, PageRenamedEvent, PageCopiedEvent, PageRevertedEvent
--- a/MoinMoin/_tests/maketestwiki.py	Wed Sep 07 03:05:27 2016 +0200
+++ b/MoinMoin/_tests/maketestwiki.py	Wed Sep 07 04:31:59 2016 +0200
@@ -11,12 +11,11 @@
 """
 
 import os, sys, shutil, errno
+import tarfile
 
 filename = globals().get("__file__") or sys.argv[0]
 moinpath = os.path.abspath(os.path.join(os.path.dirname(filename), os.pardir, os.pardir))
 
-from MoinMoin.support import tarfile
-
 WIKI = os.path.abspath(os.path.join(moinpath, 'tests', 'wiki'))
 SHARE = os.path.abspath(os.path.join(moinpath, 'wiki'))
 
--- a/MoinMoin/_tests/test_wsgiapp.py	Wed Sep 07 03:05:27 2016 +0200
+++ b/MoinMoin/_tests/test_wsgiapp.py	Wed Sep 07 04:31:59 2016 +0200
@@ -26,7 +26,7 @@
                 output = ''.join(appiter)
                 print output
                 assert status[:3] == '200'
-                assert ('Content-Type', 'text/html; charset=utf-8') in headers
+                assert ('Content-Type', 'text/html; charset=utf-8') in list(headers)
                 for needle in (DOC_TYPE, page):
                     assert needle in output
             yield _test_
--- a/MoinMoin/action/AttachFile.py	Wed Sep 07 03:05:27 2016 +0200
+++ b/MoinMoin/action/AttachFile.py	Wed Sep 07 04:31:59 2016 +0200
@@ -29,6 +29,7 @@
 
 import os, time, zipfile, errno, datetime
 from StringIO import StringIO
+import tarfile
 
 from werkzeug import http_date
 
@@ -44,7 +45,6 @@
 from MoinMoin.util import filesys, timefuncs
 from MoinMoin.security.textcha import TextCha
 from MoinMoin.events import FileAttachedEvent, FileRemovedEvent, send_event
-from MoinMoin.support import tarfile
 
 action_name = __name__.split('.')[-1]
 
--- a/MoinMoin/action/LikePages.py	Wed Sep 07 03:05:27 2016 +0200
+++ b/MoinMoin/action/LikePages.py	Wed Sep 07 04:31:59 2016 +0200
@@ -12,9 +12,9 @@
 """
 
 import re
+import difflib
 
 from MoinMoin import config, wikiutil
-from MoinMoin.support import difflib
 from MoinMoin.Page import Page
 
 
--- a/MoinMoin/action/SyncPages.py	Wed Sep 07 03:05:27 2016 +0200
+++ b/MoinMoin/action/SyncPages.py	Wed Sep 07 04:31:59 2016 +0200
@@ -19,7 +19,6 @@
 from MoinMoin.Page import Page
 from MoinMoin.wikisync import TagStore, UnsupportedWikiException, SyncPage, NotAllowedException
 from MoinMoin.wikisync import MoinLocalWiki, MoinRemoteWiki, UP, DOWN, BOTH, MIMETYPE_MOIN
-from MoinMoin.support.python_compatibility import set
 from MoinMoin.util.bdiff import decompress, patch, compress, textdiff
 from MoinMoin.util import diff3, rpc_aggregator
 
--- a/MoinMoin/action/__init__.py	Wed Sep 07 03:05:27 2016 +0200
+++ b/MoinMoin/action/__init__.py	Wed Sep 07 04:31:59 2016 +0200
@@ -32,7 +32,6 @@
 from MoinMoin.util import pysupport
 from MoinMoin import config, wikiutil
 from MoinMoin.Page import Page
-from MoinMoin.support.python_compatibility import set
 
 # create a list of extension actions from the package directory
 modules = pysupport.getPackageModules(__file__)
--- a/MoinMoin/action/backup.py	Wed Sep 07 03:05:27 2016 +0200
+++ b/MoinMoin/action/backup.py	Wed Sep 07 04:31:59 2016 +0200
@@ -17,9 +17,9 @@
 """
 
 import os, re, time
+import tarfile
 
 from MoinMoin import wikiutil
-from MoinMoin.support import tarfile
 
 
 def addFiles(path, tar, exclude_func):
--- a/MoinMoin/action/cache.py	Wed Sep 07 03:05:27 2016 +0200
+++ b/MoinMoin/action/cache.py	Wed Sep 07 04:31:59 2016 +0200
@@ -28,6 +28,7 @@
 """
 
 from datetime import datetime
+import hmac
 
 from MoinMoin import log
 logging = log.getLogger(__name__)
@@ -39,7 +40,6 @@
 from MoinMoin import config, caching
 from MoinMoin.util import filesys
 from MoinMoin.action import AttachFile
-from MoinMoin.support.python_compatibility import hmac_new
 
 action_name = __name__.split('.')[-1]
 
@@ -99,7 +99,7 @@
         raise AssertionError('cache_key called with unsupported parameters')
 
     hmac_data = hmac_data.encode('utf-8')
-    key = hmac_new(secret, hmac_data).hexdigest()
+    key = hmac.new(secret, hmac_data).hexdigest()
     return key
 
 
--- a/MoinMoin/action/fullsearch.py	Wed Sep 07 03:05:27 2016 +0200
+++ b/MoinMoin/action/fullsearch.py	Wed Sep 07 04:31:59 2016 +0200
@@ -11,7 +11,7 @@
 import re, time
 from MoinMoin.Page import Page
 from MoinMoin import wikiutil
-from parsedatetime.parsedatetime import Calendar
+from parsedatetime import Calendar
 from MoinMoin.web.utils import check_surge_protect
 
 def checkTitleSearch(request):
--- a/MoinMoin/action/info.py	Wed Sep 07 03:05:27 2016 +0200
+++ b/MoinMoin/action/info.py	Wed Sep 07 04:31:59 2016 +0200
@@ -8,6 +8,7 @@
                 2006-2008 MoinMoin:ThomasWaldmann
     @license: GNU GPL, see COPYING for details.
 """
+import hashlib
 
 from MoinMoin import config, wikiutil, action
 from MoinMoin.Page import Page
@@ -33,8 +34,7 @@
                       f.text(_("Page size: %d") % page.size()),
                       f.paragraph(0))
 
-        from MoinMoin.support.python_compatibility import hash_new
-        digest = hash_new('sha1', page.get_raw_body().encode(config.charset)).hexdigest().upper()
+        digest = hashlib.new('sha1', page.get_raw_body().encode(config.charset)).hexdigest().upper()
         request.write(f.paragraph(1),
                       f.rawHTML('%(label)s <tt>%(value)s</tt>' % {
                           'label': _("SHA digest of this page's content is:"),
--- a/MoinMoin/action/serveopenid.py	Wed Sep 07 03:05:27 2016 +0200
+++ b/MoinMoin/action/serveopenid.py	Wed Sep 07 04:31:59 2016 +0200
@@ -8,7 +8,6 @@
     @license: GNU GPL, see COPYING for details.
 """
 
-from MoinMoin.support.python_compatibility import rsplit
 from MoinMoin.util.moinoid import MoinOpenIDStore, strbase64
 from MoinMoin import wikiutil
 from openid.consumer.discover import OPENID_1_0_TYPE, \
@@ -117,7 +116,7 @@
 
         # we can very well split on the last slash since usernames
         # must not contain slashes
-        base, received_name = rsplit(identity, '/', 1)
+        base, received_name = identity.rsplit('/', 1)
         check_name = received_name
 
         if received_name == '':
--- a/MoinMoin/config/_tests/test_multiconfig.py	Wed Sep 07 03:05:27 2016 +0200
+++ b/MoinMoin/config/_tests/test_multiconfig.py	Wed Sep 07 04:31:59 2016 +0200
@@ -7,7 +7,6 @@
 """
 
 import py
-from MoinMoin.support.python_compatibility import set
 
 
 class TestPasswordChecker:
--- a/MoinMoin/config/multiconfig.py	Wed Sep 07 03:05:27 2016 +0200
+++ b/MoinMoin/config/multiconfig.py	Wed Sep 07 04:31:59 2016 +0200
@@ -8,6 +8,7 @@
     @license: GNU GPL, see COPYING for details.
 """
 
+import hashlib
 import re
 import os
 import sys
@@ -27,7 +28,6 @@
 import MoinMoin.web.session
 from MoinMoin.packages import packLine
 from MoinMoin.security import AccessControlList
-from MoinMoin.support.python_compatibility import set
 
 _url_re_cache = None
 _farmconfig_mtime = None
@@ -626,7 +626,6 @@
         plugin packages as "moin_plugin_<sha1(path)>.plugin".
         """
         import imp
-        from MoinMoin.support.python_compatibility import hash_new
 
         plugin_dirs = [self.plugin_dir] + self.plugin_dirs
         self._plugin_modules = []
@@ -636,7 +635,7 @@
             imp.acquire_lock()
             try:
                 for pdir in plugin_dirs:
-                    csum = 'p_%s' % hash_new('sha1', pdir).hexdigest()
+                    csum = 'p_%s' % hashlib.new('sha1', pdir).hexdigest()
                     modname = '%s.%s' % (self.siteid, csum)
                     # If the module is not loaded, try to load it
                     if not modname in sys.modules:
--- a/MoinMoin/conftest.py	Wed Sep 07 03:05:27 2016 +0200
+++ b/MoinMoin/conftest.py	Wed Sep 07 04:31:59 2016 +0200
@@ -29,7 +29,6 @@
 moindir = rootdir.join("..")
 sys.path.insert(0, str(moindir))
 
-from MoinMoin.support.python_compatibility import set
 from MoinMoin.web.request import TestRequest, Client
 from MoinMoin.wsgiapp import Application, init
 from MoinMoin._tests import maketestwiki, wikiconfig
--- a/MoinMoin/events/emailnotify.py	Wed Sep 07 03:05:27 2016 +0200
+++ b/MoinMoin/events/emailnotify.py	Wed Sep 07 04:31:59 2016 +0200
@@ -12,7 +12,6 @@
 from MoinMoin import user
 from MoinMoin.Page import Page
 from MoinMoin.mail import sendmail
-from MoinMoin.support.python_compatibility import set
 from MoinMoin.user import User, superusers
 from MoinMoin.action.AttachFile import getAttachUrl
 
--- a/MoinMoin/events/jabbernotify.py	Wed Sep 07 03:05:27 2016 +0200
+++ b/MoinMoin/events/jabbernotify.py	Wed Sep 07 04:31:59 2016 +0200
@@ -15,7 +15,6 @@
 
 from MoinMoin.Page import Page
 from MoinMoin.user import User, superusers
-from MoinMoin.support.python_compatibility import set
 from MoinMoin.action.AttachFile import getAttachUrl
 
 import MoinMoin.events.notification as notification
--- a/MoinMoin/formatter/text_html.py	Wed Sep 07 03:05:27 2016 +0200
+++ b/MoinMoin/formatter/text_html.py	Wed Sep 07 04:31:59 2016 +0200
@@ -14,7 +14,6 @@
 from MoinMoin import wikiutil, i18n
 from MoinMoin.Page import Page
 from MoinMoin.action import AttachFile
-from MoinMoin.support.python_compatibility import set
 
 # insert IDs into output wherever they occur
 # warning: breaks toggle line numbers javascript
--- a/MoinMoin/macro/AdvancedSearch.py	Wed Sep 07 03:05:27 2016 +0200
+++ b/MoinMoin/macro/AdvancedSearch.py	Wed Sep 07 04:31:59 2016 +0200
@@ -13,7 +13,6 @@
 from MoinMoin.i18n import languages
 from MoinMoin.widget import html
 from MoinMoin.util.web import makeSelection
-from MoinMoin.support.python_compatibility import sorted
 import mimetypes
 
 Dependencies = ['pages']
--- a/MoinMoin/macro/FootNote.py	Wed Sep 07 03:05:27 2016 +0200
+++ b/MoinMoin/macro/FootNote.py	Wed Sep 07 04:31:59 2016 +0200
@@ -9,10 +9,10 @@
                 2007 Johannes Berg
     @license: GNU GPL, see COPYING for details.
 """
+import hashlib
 
 from MoinMoin import config, wikiutil
 from MoinMoin.parser.text_moin_wiki import Parser as WikiParser
-from MoinMoin.support.python_compatibility import hash_new
 
 Dependencies = ["time"] # footnote macro cannot be cached
 
@@ -33,7 +33,7 @@
         idx = request.footnote_ctr
         request.footnote_ctr += 1
 
-        shahex = hash_new('sha1', args.encode(config.charset)).hexdigest()
+        shahex = hashlib.new('sha1', args.encode(config.charset)).hexdigest()
         backlink_id = "fndef-%s-%d" % (shahex, idx)
         fwdlink_id = "fnref-%s" % shahex
 
--- a/MoinMoin/mail/_tests/test_sendmail.py	Wed Sep 07 03:05:27 2016 +0200
+++ b/MoinMoin/mail/_tests/test_sendmail.py	Wed Sep 07 04:31:59 2016 +0200
@@ -6,8 +6,8 @@
     @license: GNU GPL, see COPYING for details.
 """
 
-from email.Charset import Charset, QP
-from email.Header import Header
+from email.charset import Charset, QP
+from email.header import Header
 from MoinMoin.mail import sendmail
 from MoinMoin import config
 
--- a/MoinMoin/mail/mailimport.py	Wed Sep 07 03:05:27 2016 +0200
+++ b/MoinMoin/mail/mailimport.py	Wed Sep 07 04:31:59 2016 +0200
@@ -11,14 +11,13 @@
 
 import sys, re, time
 import email
-from email.Utils import getaddresses, parsedate_tz, mktime_tz
+from email.utils import getaddresses, parsedate_tz, mktime_tz
+from email.header import decode_header
 
 from MoinMoin import wikiutil, user
 from MoinMoin.action.AttachFile import add_attachment, AttachmentAlreadyExists
 from MoinMoin.Page import Page
 from MoinMoin.PageEditor import PageEditor
-# python, at least up to 2.4, ships a broken parser for headers
-from MoinMoin.support.HeaderFixed import decode_header
 
 infile = sys.stdin
 
--- a/MoinMoin/mail/sendmail.py	Wed Sep 07 03:05:27 2016 +0200
+++ b/MoinMoin/mail/sendmail.py	Wed Sep 07 04:31:59 2016 +0200
@@ -8,7 +8,7 @@
 """
 
 import os, re
-from email.Header import Header
+from email.header import Header
 
 from MoinMoin import log
 logging = log.getLogger(__name__)
@@ -68,9 +68,9 @@
     @return: (is_ok, Description of error or OK message)
     """
     import smtplib, socket
-    from email.Message import Message
-    from email.Charset import Charset, QP
-    from email.Utils import formatdate, make_msgid
+    from email.message import Message
+    from email.charset import Charset, QP
+    from email.utils import formatdate, make_msgid
 
     _ = request.getText
     cfg = request.cfg
--- a/MoinMoin/parser/_ParserBase.py	Wed Sep 07 03:05:27 2016 +0200
+++ b/MoinMoin/parser/_ParserBase.py	Wed Sep 07 04:31:59 2016 +0200
@@ -24,13 +24,13 @@
 
 """
 
+import hashlib
 import re
 
 from MoinMoin import log
 logging = log.getLogger(__name__)
 
 from MoinMoin import config, wikiutil
-from MoinMoin.support.python_compatibility import hash_new
 from MoinMoin.parser import parse_start_step
 
 
@@ -220,7 +220,7 @@
 
         result = [] # collects output
 
-        self._code_id = hash_new('sha1', self.raw.encode(config.charset)).hexdigest()
+        self._code_id = hashlib.new('sha1', self.raw.encode(config.charset)).hexdigest()
         result.append(formatter.code_area(1, self._code_id, self.parsername, self.show_nums, self.num_start, self.num_step))
 
         self.lastpos = 0
--- a/MoinMoin/parser/_tests/test_text_moin_wiki.py	Wed Sep 07 03:05:27 2016 +0200
+++ b/MoinMoin/parser/_tests/test_text_moin_wiki.py	Wed Sep 07 04:31:59 2016 +0200
@@ -338,7 +338,7 @@
         self.do(test)
 
     def do(self, test):
-        expected = r'&lt;tablewidth="80"&gt;'
+        expected = r'&lt;tablewidth=&quot;80&quot;&gt;'
         result = self.parse(test)
         assert re.search(expected, result)
 
--- a/MoinMoin/parser/highlight.py	Wed Sep 07 03:05:27 2016 +0200
+++ b/MoinMoin/parser/highlight.py	Wed Sep 07 04:31:59 2016 +0200
@@ -5,7 +5,7 @@
     @copyright: 2008 Radomir Dopieralski <moindev@sheep.art.pl>
     @license: GNU GPL, see COPYING for details.
 """
-
+import hashlib
 import re
 
 import pygments
@@ -16,7 +16,6 @@
 
 from MoinMoin import config, wikiutil
 from MoinMoin.parser import parse_start_step
-from MoinMoin.support.python_compatibility import hash_new
 from MoinMoin.Page import Page
 
 Dependencies = ['user'] # the "Toggle line numbers link" depends on user's language
@@ -161,7 +160,7 @@
             fmt.result.append(formatter.line_anchordef(lineno))
 
         fmt.result.append(formatter.div(1, css_class="highlight %s" % self.syntax))
-        self._code_id = hash_new('sha1', self.raw.encode(config.charset)).hexdigest()
+        self._code_id = hashlib.new('sha1', self.raw.encode(config.charset)).hexdigest()
         msg = None
         if self.filename is not None:
             try:
--- a/MoinMoin/parser/text_moin_wiki.py	Wed Sep 07 03:05:27 2016 +0200
+++ b/MoinMoin/parser/text_moin_wiki.py	Wed Sep 07 04:31:59 2016 +0200
@@ -15,7 +15,6 @@
 
 from MoinMoin import config, wikiutil, macro
 from MoinMoin.Page import Page
-from MoinMoin.support.python_compatibility import set
 
 Dependencies = ['user'] # {{{#!wiki comment ... }}} has different output depending on the user's profile settings
 
--- a/MoinMoin/script/maint/mkpagepacks.py	Wed Sep 07 03:05:27 2016 +0200
+++ b/MoinMoin/script/maint/mkpagepacks.py	Wed Sep 07 04:31:59 2016 +0200
@@ -13,7 +13,6 @@
 import time
 from datetime import datetime
 
-from MoinMoin.support.python_compatibility import set
 from MoinMoin import wikiutil
 from MoinMoin.action.AttachFile import _get_files
 from MoinMoin.Page import Page
--- a/MoinMoin/script/migration/1089999.py	Wed Sep 07 03:05:27 2016 +0200
+++ b/MoinMoin/script/migration/1089999.py	Wed Sep 07 04:31:59 2016 +0200
@@ -14,8 +14,8 @@
 """
 
 import os, errno
+import tarfile
 
-from MoinMoin.support import tarfile
 from MoinMoin.action.AttachFile import getAttachDir
 
 
--- a/MoinMoin/script/migration/_conv160_wiki.py	Wed Sep 07 03:05:27 2016 +0200
+++ b/MoinMoin/script/migration/_conv160_wiki.py	Wed Sep 07 04:31:59 2016 +0200
@@ -33,7 +33,6 @@
 from MoinMoin import config, wikiutil, macro
 from MoinMoin.action import AttachFile
 from MoinMoin.Page import Page
-from MoinMoin.support.python_compatibility import rsplit
 
 from text_moin158_wiki import Parser
 
@@ -177,7 +176,7 @@
         return new_name
 
     def _replace_target(self, target):
-        target_and_anchor = rsplit(target, '#', 1)
+        target_and_anchor = target.rsplit('#', 1)
         if len(target_and_anchor) > 1:
             target, anchor = target_and_anchor
             target = self._replace(('PAGE', target))
--- a/MoinMoin/script/migration/_conv160a_wiki.py	Wed Sep 07 03:05:27 2016 +0200
+++ b/MoinMoin/script/migration/_conv160a_wiki.py	Wed Sep 07 04:31:59 2016 +0200
@@ -21,7 +21,6 @@
 from MoinMoin import config, macro, wikiutil
 from MoinMoin.action import AttachFile
 from MoinMoin.Page import Page
-from MoinMoin.support.python_compatibility import rsplit
 
 import wikiutil160a
 from text_moin160a_wiki import Parser
@@ -168,7 +167,7 @@
         return new_name
 
     def _replace_target(self, target):
-        target_and_anchor = rsplit(target, '#', 1)
+        target_and_anchor = target.rsplit('#', 1)
         if len(target_and_anchor) > 1:
             target, anchor = target_and_anchor
             target = self._replace(('PAGE', target))
--- a/MoinMoin/script/migration/text_moin158_wiki.py	Wed Sep 07 03:05:27 2016 +0200
+++ b/MoinMoin/script/migration/text_moin158_wiki.py	Wed Sep 07 04:31:59 2016 +0200
@@ -7,6 +7,7 @@
 """
 
 import os, re
+import hashlib
 from MoinMoin import config, wikiutil
 from MoinMoin import macro as wikimacro
 from MoinMoin.Page import Page
@@ -748,8 +749,6 @@
 
     def _heading_repl(self, word):
         """Handle section headings."""
-        from MoinMoin.support.python_compatibility import hash_new
-
         h = word.strip()
         level = 1
         while h[level:level+1] == '=':
@@ -768,7 +767,7 @@
         if self.titles[pntt] > 1:
             unique_id = '-%d' % self.titles[pntt]
         result = self._closeP()
-        result += self.formatter.heading(1, depth, id="head-"+hash_new('sha1', pntt.encode(config.charset)).hexdigest()+unique_id)
+        result += self.formatter.heading(1, depth, id="head-"+hashlib.new('sha1', pntt.encode(config.charset)).hexdigest()+unique_id)
 
         return (result + self.formatter.text(title_text) +
                 self.formatter.heading(0, depth))
--- a/MoinMoin/script/migration/text_moin160a_wiki.py	Wed Sep 07 03:05:27 2016 +0200
+++ b/MoinMoin/script/migration/text_moin160a_wiki.py	Wed Sep 07 04:31:59 2016 +0200
@@ -6,7 +6,7 @@
                 2006 by MoinMoin:ThomasWaldmann
     @license: GNU GPL, see COPYING for details.
 """
-
+import hashlib
 import re
 
 import wikiutil160a as wikiutil
@@ -767,8 +767,6 @@
 
     def _heading_repl(self, word):
         """Handle section headings."""
-        from MoinMoin.support.python_compatibility import hash_new
-
         h = word.strip()
         level = 1
         while h[level:level+1] == '=':
@@ -786,7 +784,7 @@
         if self.titles[pntt] > 1:
             unique_id = '-%d' % self.titles[pntt]
         result = self._closeP()
-        result += self.formatter.heading(1, depth, id="head-"+hash_new('sha1', pntt.encode(config.charset)).hexdigest()+unique_id)
+        result += self.formatter.heading(1, depth, id="head-"+hashlib.new('sha1', pntt.encode(config.charset)).hexdigest()+unique_id)
 
         return (result + self.formatter.text(title_text) +
                 self.formatter.heading(0, depth))
--- a/MoinMoin/script/migration/wikiutil160a.py	Wed Sep 07 03:05:27 2016 +0200
+++ b/MoinMoin/script/migration/wikiutil160a.py	Wed Sep 07 04:31:59 2016 +0200
@@ -9,6 +9,7 @@
 
 import cgi
 import codecs
+import hashlib
 import os
 import re
 import time
@@ -1578,9 +1579,8 @@
 
 def createTicket(request, tm=None):
     """Create a ticket using a site-specific secret (the config)"""
-    from MoinMoin.support.python_compatibility import hash_new
     ticket = tm or "%010x" % time.time()
-    digest = hash_new('sha1', ticket)
+    digest = hashlib.new('sha1', ticket)
 
     varnames = ['data_dir', 'data_underlay_dir', 'language_default',
                 'mail_smarthost', 'mail_from', 'page_front_page',
--- a/MoinMoin/security/antispam.py	Wed Sep 07 03:05:27 2016 +0200
+++ b/MoinMoin/security/antispam.py	Wed Sep 07 04:31:59 2016 +0200
@@ -11,7 +11,6 @@
 from MoinMoin import log
 logging = log.getLogger(__name__)
 
-from MoinMoin.support.python_compatibility import frozenset
 from MoinMoin.security import Permissions
 from MoinMoin import caching, wikiutil
 
--- a/MoinMoin/security/textcha.py	Wed Sep 07 03:05:27 2016 +0200
+++ b/MoinMoin/security/textcha.py	Wed Sep 07 04:31:59 2016 +0200
@@ -19,7 +19,7 @@
     @copyright: 2007 by MoinMoin:ThomasWaldmann
     @license: GNU GPL, see COPYING for details.
 """
-
+import hmac
 import re
 import random
 
@@ -30,7 +30,6 @@
 
 from MoinMoin import wikiutil
 from werkzeug.security import safe_str_cmp as safe_str_equal
-from MoinMoin.support.python_compatibility import hmac_new
 
 SHA1_LEN = 40 # length of hexdigest
 TIMESTAMP_LEN = 10 # length of timestamp
@@ -85,7 +84,7 @@
 
     def _compute_signature(self, question, timestamp):
         signature = u"%s%d" % (question, timestamp)
-        return hmac_new(self.secret, signature.encode('utf-8')).hexdigest()
+        return hmac.new(self.secret, signature.encode('utf-8')).hexdigest()
 
     def _init_qa(self, question=None):
         """ Initialize the question / answer.
--- a/MoinMoin/support/HeaderFixed.py	Wed Sep 07 03:05:27 2016 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,77 +0,0 @@
-# copied from email.Header because the original is broken
-
-# Copyright (C) 2002-2004 Python Software Foundation
-# Author: Ben Gertzfield, Barry Warsaw
-# Contact: email-sig@python.org
-
-import sys, binascii
-
-from email.Header import ecre
-
-import email.quopriMIME
-import email.base64MIME
-from email.Errors import HeaderParseError
-from email.Charset import Charset
-
-SPACE = ' '
-
-if sys.version_info[:3] < (2, 9, 0): # insert the version number
-                                     # of a fixed python here
-
-    def decode_header(header):
-        """Decode a message header value without converting charset.
-    
-        Returns a list of (decoded_string, charset) pairs containing each of the
-        decoded parts of the header.  Charset is None for non-encoded parts of the
-        header, otherwise a lower-case string containing the name of the character
-        set specified in the encoded string.
-    
-        An email.Errors.HeaderParseError may be raised when certain decoding error
-        occurs (e.g. a base64 decoding exception).
-        """
-        # If no encoding, just return the header
-        header = str(header)
-        if not ecre.search(header):
-            return [(header, None)]
-        decoded = []
-        dec = ''
-        for line in header.splitlines():
-            # This line might not have an encoding in it
-            if not ecre.search(line):
-                decoded.append((line, None))
-                continue
-            parts = ecre.split(line)
-            while parts:
-                unenc = parts.pop(0).rstrip()
-                if unenc:
-                    # Should we continue a long line?
-                    if decoded and decoded[-1][1] is None:
-                        decoded[-1] = (decoded[-1][0] + SPACE + unenc, None)
-                    else:
-                        decoded.append((unenc, None))
-                if parts:
-                    charset, encoding = [s.lower() for s in parts[0:2]]
-                    encoded = parts[2]
-                    dec = None
-                    if encoding == 'q':
-                        dec = email.quopriMIME.header_decode(encoded)
-                    elif encoding == 'b':
-                        try:
-                            dec = email.base64MIME.decode(encoded)
-                        except binascii.Error:
-                            # Turn this into a higher level exception.  BAW: Right
-                            # now we throw the lower level exception away but
-                            # when/if we get exception chaining, we'll preserve it.
-                            raise HeaderParseError
-                    if dec is None:
-                        dec = encoded
-    
-                    if decoded and decoded[-1][1] == charset:
-                        decoded[-1] = (decoded[-1][0] + dec, decoded[-1][1])
-                    else:
-                        decoded.append((dec, charset))
-                del parts[0:3]
-        return decoded
-
-else:
-    from email.Header import decode_header
--- a/MoinMoin/support/difflib.py	Wed Sep 07 03:05:27 2016 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,2029 +0,0 @@
-#! /usr/bin/env python
-# Python 2.4.3 (maybe other versions, too) has a broken difflib, sometimes
-# raising a "maximum recursion depth exceeded in cmp" exception.
-# This is taken from python.org SVN repo revision 54230 with patches
-# 36160 and 34415 reversed for python2.3 compatibility.
-# Also, startswith(tuple) [2.5] was changed to multiple startswith(elem).
-
-"""
-Module difflib -- helpers for computing deltas between objects.
-
-Function get_close_matches(word, possibilities, n=3, cutoff=0.6):
-    Use SequenceMatcher to return list of the best "good enough" matches.
-
-Function context_diff(a, b):
-    For two lists of strings, return a delta in context diff format.
-
-Function ndiff(a, b):
-    Return a delta: the difference between `a` and `b` (lists of strings).
-
-Function restore(delta, which):
-    Return one of the two sequences that generated an ndiff delta.
-
-Function unified_diff(a, b):
-    For two lists of strings, return a delta in unified diff format.
-
-Class SequenceMatcher:
-    A flexible class for comparing pairs of sequences of any type.
-
-Class Differ:
-    For producing human-readable deltas from sequences of lines of text.
-
-Class HtmlDiff:
-    For producing HTML side by side comparison with change highlights.
-"""
-
-__all__ = ['get_close_matches', 'ndiff', 'restore', 'SequenceMatcher',
-           'Differ','IS_CHARACTER_JUNK', 'IS_LINE_JUNK', 'context_diff',
-           'unified_diff', 'HtmlDiff']
-
-def _calculate_ratio(matches, length):
-    if length:
-        return 2.0 * matches / length
-    return 1.0
-
-class SequenceMatcher:
-
-    """
-    SequenceMatcher is a flexible class for comparing pairs of sequences of
-    any type, so long as the sequence elements are hashable.  The basic
-    algorithm predates, and is a little fancier than, an algorithm
-    published in the late 1980's by Ratcliff and Obershelp under the
-    hyperbolic name "gestalt pattern matching".  The basic idea is to find
-    the longest contiguous matching subsequence that contains no "junk"
-    elements (R-O doesn't address junk).  The same idea is then applied
-    recursively to the pieces of the sequences to the left and to the right
-    of the matching subsequence.  This does not yield minimal edit
-    sequences, but does tend to yield matches that "look right" to people.
-
-    SequenceMatcher tries to compute a "human-friendly diff" between two
-    sequences.  Unlike e.g. UNIX(tm) diff, the fundamental notion is the
-    longest *contiguous* & junk-free matching subsequence.  That's what
-    catches peoples' eyes.  The Windows(tm) windiff has another interesting
-    notion, pairing up elements that appear uniquely in each sequence.
-    That, and the method here, appear to yield more intuitive difference
-    reports than does diff.  This method appears to be the least vulnerable
-    to synching up on blocks of "junk lines", though (like blank lines in
-    ordinary text files, or maybe "<P>" lines in HTML files).  That may be
-    because this is the only method of the 3 that has a *concept* of
-    "junk" <wink>.
-
-    Example, comparing two strings, and considering blanks to be "junk":
-
-    >>> s = SequenceMatcher(lambda x: x == " ",
-    ...                     "private Thread currentThread;",
-    ...                     "private volatile Thread currentThread;")
-    >>>
-
-    .ratio() returns a float in [0, 1], measuring the "similarity" of the
-    sequences.  As a rule of thumb, a .ratio() value over 0.6 means the
-    sequences are close matches:
-
-    >>> print round(s.ratio(), 3)
-    0.866
-    >>>
-
-    If you're only interested in where the sequences match,
-    .get_matching_blocks() is handy:
-
-    >>> for block in s.get_matching_blocks():
-    ...     print "a[%d] and b[%d] match for %d elements" % block
-    a[0] and b[0] match for 8 elements
-    a[8] and b[17] match for 21 elements
-    a[29] and b[38] match for 0 elements
-
-    Note that the last tuple returned by .get_matching_blocks() is always a
-    dummy, (len(a), len(b), 0), and this is the only case in which the last
-    tuple element (number of elements matched) is 0.
-
-    If you want to know how to change the first sequence into the second,
-    use .get_opcodes():
-
-    >>> for opcode in s.get_opcodes():
-    ...     print "%6s a[%d:%d] b[%d:%d]" % opcode
-     equal a[0:8] b[0:8]
-    insert a[8:8] b[8:17]
-     equal a[8:29] b[17:38]
-
-    See the Differ class for a fancy human-friendly file differencer, which
-    uses SequenceMatcher both to compare sequences of lines, and to compare
-    sequences of characters within similar (near-matching) lines.
-
-    See also function get_close_matches() in this module, which shows how
-    simple code building on SequenceMatcher can be used to do useful work.
-
-    Timing:  Basic R-O is cubic time worst case and quadratic time expected
-    case.  SequenceMatcher is quadratic time for the worst case and has
-    expected-case behavior dependent in a complicated way on how many
-    elements the sequences have in common; best case time is linear.
-
-    Methods:
-
-    __init__(isjunk=None, a='', b='')
-        Construct a SequenceMatcher.
-
-    set_seqs(a, b)
-        Set the two sequences to be compared.
-
-    set_seq1(a)
-        Set the first sequence to be compared.
-
-    set_seq2(b)
-        Set the second sequence to be compared.
-
-    find_longest_match(alo, ahi, blo, bhi)
-        Find longest matching block in a[alo:ahi] and b[blo:bhi].
-
-    get_matching_blocks()
-        Return list of triples describing matching subsequences.
-
-    get_opcodes()
-        Return list of 5-tuples describing how to turn a into b.
-
-    ratio()
-        Return a measure of the sequences' similarity (float in [0,1]).
-
-    quick_ratio()
-        Return an upper bound on .ratio() relatively quickly.
-
-    real_quick_ratio()
-        Return an upper bound on ratio() very quickly.
-    """
-
-    def __init__(self, isjunk=None, a='', b=''):
-        """Construct a SequenceMatcher.
-
-        Optional arg isjunk is None (the default), or a one-argument
-        function that takes a sequence element and returns true iff the
-        element is junk.  None is equivalent to passing "lambda x: 0", i.e.
-        no elements are considered to be junk.  For example, pass
-            lambda x: x in " \\t"
-        if you're comparing lines as sequences of characters, and don't
-        want to synch up on blanks or hard tabs.
-
-        Optional arg a is the first of two sequences to be compared.  By
-        default, an empty string.  The elements of a must be hashable.  See
-        also .set_seqs() and .set_seq1().
-
-        Optional arg b is the second of two sequences to be compared.  By
-        default, an empty string.  The elements of b must be hashable. See
-        also .set_seqs() and .set_seq2().
-        """
-
-        # Members:
-        # a
-        #      first sequence
-        # b
-        #      second sequence; differences are computed as "what do
-        #      we need to do to 'a' to change it into 'b'?"
-        # b2j
-        #      for x in b, b2j[x] is a list of the indices (into b)
-        #      at which x appears; junk elements do not appear
-        # fullbcount
-        #      for x in b, fullbcount[x] == the number of times x
-        #      appears in b; only materialized if really needed (used
-        #      only for computing quick_ratio())
-        # matching_blocks
-        #      a list of (i, j, k) triples, where a[i:i+k] == b[j:j+k];
-        #      ascending & non-overlapping in i and in j; terminated by
-        #      a dummy (len(a), len(b), 0) sentinel
-        # opcodes
-        #      a list of (tag, i1, i2, j1, j2) tuples, where tag is
-        #      one of
-        #          'replace'   a[i1:i2] should be replaced by b[j1:j2]
-        #          'delete'    a[i1:i2] should be deleted
-        #          'insert'    b[j1:j2] should be inserted
-        #          'equal'     a[i1:i2] == b[j1:j2]
-        # isjunk
-        #      a user-supplied function taking a sequence element and
-        #      returning true iff the element is "junk" -- this has
-        #      subtle but helpful effects on the algorithm, which I'll
-        #      get around to writing up someday <0.9 wink>.
-        #      DON'T USE!  Only __chain_b uses this.  Use isbjunk.
-        # isbjunk
-        #      for x in b, isbjunk(x) == isjunk(x) but much faster;
-        #      it's really the has_key method of a hidden dict.
-        #      DOES NOT WORK for x in a!
-        # isbpopular
-        #      for x in b, isbpopular(x) is true iff b is reasonably long
-        #      (at least 200 elements) and x accounts for more than 1% of
-        #      its elements.  DOES NOT WORK for x in a!
-
-        self.isjunk = isjunk
-        self.a = self.b = None
-        self.set_seqs(a, b)
-
-    def set_seqs(self, a, b):
-        """Set the two sequences to be compared.
-
-        >>> s = SequenceMatcher()
-        >>> s.set_seqs("abcd", "bcde")
-        >>> s.ratio()
-        0.75
-        """
-
-        self.set_seq1(a)
-        self.set_seq2(b)
-
-    def set_seq1(self, a):
-        """Set the first sequence to be compared.
-
-        The second sequence to be compared is not changed.
-
-        >>> s = SequenceMatcher(None, "abcd", "bcde")
-        >>> s.ratio()
-        0.75
-        >>> s.set_seq1("bcde")
-        >>> s.ratio()
-        1.0
-        >>>
-
-        SequenceMatcher computes and caches detailed information about the
-        second sequence, so if you want to compare one sequence S against
-        many sequences, use .set_seq2(S) once and call .set_seq1(x)
-        repeatedly for each of the other sequences.
-
-        See also set_seqs() and set_seq2().
-        """
-
-        if a is self.a:
-            return
-        self.a = a
-        self.matching_blocks = self.opcodes = None
-
-    def set_seq2(self, b):
-        """Set the second sequence to be compared.
-
-        The first sequence to be compared is not changed.
-
-        >>> s = SequenceMatcher(None, "abcd", "bcde")
-        >>> s.ratio()
-        0.75
-        >>> s.set_seq2("abcd")
-        >>> s.ratio()
-        1.0
-        >>>
-
-        SequenceMatcher computes and caches detailed information about the
-        second sequence, so if you want to compare one sequence S against
-        many sequences, use .set_seq2(S) once and call .set_seq1(x)
-        repeatedly for each of the other sequences.
-
-        See also set_seqs() and set_seq1().
-        """
-
-        if b is self.b:
-            return
-        self.b = b
-        self.matching_blocks = self.opcodes = None
-        self.fullbcount = None
-        self.__chain_b()
-
-    # For each element x in b, set b2j[x] to a list of the indices in
-    # b where x appears; the indices are in increasing order; note that
-    # the number of times x appears in b is len(b2j[x]) ...
-    # when self.isjunk is defined, junk elements don't show up in this
-    # map at all, which stops the central find_longest_match method
-    # from starting any matching block at a junk element ...
-    # also creates the fast isbjunk function ...
-    # b2j also does not contain entries for "popular" elements, meaning
-    # elements that account for more than 1% of the total elements, and
-    # when the sequence is reasonably large (>= 200 elements); this can
-    # be viewed as an adaptive notion of semi-junk, and yields an enormous
-    # speedup when, e.g., comparing program files with hundreds of
-    # instances of "return NULL;" ...
-    # note that this is only called when b changes; so for cross-product
-    # kinds of matches, it's best to call set_seq2 once, then set_seq1
-    # repeatedly
-
-    def __chain_b(self):
-        # Because isjunk is a user-defined (not C) function, and we test
-        # for junk a LOT, it's important to minimize the number of calls.
-        # Before the tricks described here, __chain_b was by far the most
-        # time-consuming routine in the whole module!  If anyone sees
-        # Jim Roskind, thank him again for profile.py -- I never would
-        # have guessed that.
-        # The first trick is to build b2j ignoring the possibility
-        # of junk.  I.e., we don't call isjunk at all yet.  Throwing
-        # out the junk later is much cheaper than building b2j "right"
-        # from the start.
-        b = self.b
-        n = len(b)
-        self.b2j = b2j = {}
-        populardict = {}
-        for i, elt in enumerate(b):
-            if elt in b2j:
-                indices = b2j[elt]
-                if n >= 200 and len(indices) * 100 > n:
-                    populardict[elt] = 1
-                    del indices[:]
-                else:
-                    indices.append(i)
-            else:
-                b2j[elt] = [i]
-
-        # Purge leftover indices for popular elements.
-        for elt in populardict:
-            del b2j[elt]
-
-        # Now b2j.keys() contains elements uniquely, and especially when
-        # the sequence is a string, that's usually a good deal smaller
-        # than len(string).  The difference is the number of isjunk calls
-        # saved.
-        isjunk = self.isjunk
-        junkdict = {}
-        if isjunk:
-            for d in populardict, b2j:
-                for elt in d.keys():
-                    if isjunk(elt):
-                        junkdict[elt] = 1
-                        del d[elt]
-
-        # Now for x in b, isjunk(x) == x in junkdict, but the
-        # latter is much faster.  Note too that while there may be a
-        # lot of junk in the sequence, the number of *unique* junk
-        # elements is probably small.  So the memory burden of keeping
-        # this dict alive is likely trivial compared to the size of b2j.
-        self.isbjunk = junkdict.has_key
-        self.isbpopular = populardict.has_key
-
-    def find_longest_match(self, alo, ahi, blo, bhi):
-        """Find longest matching block in a[alo:ahi] and b[blo:bhi].
-
-        If isjunk is not defined:
-
-        Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
-            alo <= i <= i+k <= ahi
-            blo <= j <= j+k <= bhi
-        and for all (i',j',k') meeting those conditions,
-            k >= k'
-            i <= i'
-            and if i == i', j <= j'
-
-        In other words, of all maximal matching blocks, return one that
-        starts earliest in a, and of all those maximal matching blocks that
-        start earliest in a, return the one that starts earliest in b.
-
-        >>> s = SequenceMatcher(None, " abcd", "abcd abcd")
-        >>> s.find_longest_match(0, 5, 0, 9)
-        (0, 4, 5)
-
-        If isjunk is defined, first the longest matching block is
-        determined as above, but with the additional restriction that no
-        junk element appears in the block.  Then that block is extended as
-        far as possible by matching (only) junk elements on both sides.  So
-        the resulting block never matches on junk except as identical junk
-        happens to be adjacent to an "interesting" match.
-
-        Here's the same example as before, but considering blanks to be
-        junk.  That prevents " abcd" from matching the " abcd" at the tail
-        end of the second sequence directly.  Instead only the "abcd" can
-        match, and matches the leftmost "abcd" in the second sequence:
-
-        >>> s = SequenceMatcher(lambda x: x==" ", " abcd", "abcd abcd")
-        >>> s.find_longest_match(0, 5, 0, 9)
-        (1, 0, 4)
-
-        If no blocks match, return (alo, blo, 0).
-
-        >>> s = SequenceMatcher(None, "ab", "c")
-        >>> s.find_longest_match(0, 2, 0, 1)
-        (0, 0, 0)
-        """
-
-        # CAUTION:  stripping common prefix or suffix would be incorrect.
-        # E.g.,
-        #    ab
-        #    acab
-        # Longest matching block is "ab", but if common prefix is
-        # stripped, it's "a" (tied with "b").  UNIX(tm) diff does so
-        # strip, so ends up claiming that ab is changed to acab by
-        # inserting "ca" in the middle.  That's minimal but unintuitive:
-        # "it's obvious" that someone inserted "ac" at the front.
-        # Windiff ends up at the same place as diff, but by pairing up
-        # the unique 'b's and then matching the first two 'a's.
-
-        a, b, b2j, isbjunk = self.a, self.b, self.b2j, self.isbjunk
-        besti, bestj, bestsize = alo, blo, 0
-        # find longest junk-free match
-        # during an iteration of the loop, j2len[j] = length of longest
-        # junk-free match ending with a[i-1] and b[j]
-        j2len = {}
-        nothing = []
-        for i in xrange(alo, ahi):
-            # look at all instances of a[i] in b; note that because
-            # b2j has no junk keys, the loop is skipped if a[i] is junk
-            j2lenget = j2len.get
-            newj2len = {}
-            for j in b2j.get(a[i], nothing):
-                # a[i] matches b[j]
-                if j < blo:
-                    continue
-                if j >= bhi:
-                    break
-                k = newj2len[j] = j2lenget(j-1, 0) + 1
-                if k > bestsize:
-                    besti, bestj, bestsize = i-k+1, j-k+1, k
-            j2len = newj2len
-
-        # Extend the best by non-junk elements on each end.  In particular,
-        # "popular" non-junk elements aren't in b2j, which greatly speeds
-        # the inner loop above, but also means "the best" match so far
-        # doesn't contain any junk *or* popular non-junk elements.
-        while besti > alo and bestj > blo and \
-              not isbjunk(b[bestj-1]) and \
-              a[besti-1] == b[bestj-1]:
-            besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
-        while besti+bestsize < ahi and bestj+bestsize < bhi and \
-              not isbjunk(b[bestj+bestsize]) and \
-              a[besti+bestsize] == b[bestj+bestsize]:
-            bestsize += 1
-
-        # Now that we have a wholly interesting match (albeit possibly
-        # empty!), we may as well suck up the matching junk on each
-        # side of it too.  Can't think of a good reason not to, and it
-        # saves post-processing the (possibly considerable) expense of
-        # figuring out what to do with it.  In the case of an empty
-        # interesting match, this is clearly the right thing to do,
-        # because no other kind of match is possible in the regions.
-        while besti > alo and bestj > blo and \
-              isbjunk(b[bestj-1]) and \
-              a[besti-1] == b[bestj-1]:
-            besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
-        while besti+bestsize < ahi and bestj+bestsize < bhi and \
-              isbjunk(b[bestj+bestsize]) and \
-              a[besti+bestsize] == b[bestj+bestsize]:
-            bestsize = bestsize + 1
-
-        return besti, bestj, bestsize
-
-    def get_matching_blocks(self):
-        """Return list of triples describing matching subsequences.
-
-        Each triple is of the form (i, j, n), and means that
-        a[i:i+n] == b[j:j+n].  The triples are monotonically increasing in
-        i and in j.  New in Python 2.5, it's also guaranteed that if
-        (i, j, n) and (i', j', n') are adjacent triples in the list, and
-        the second is not the last triple in the list, then i+n != i' or
-        j+n != j'.  IOW, adjacent triples never describe adjacent equal
-        blocks.
-
-        The last triple is a dummy, (len(a), len(b), 0), and is the only
-        triple with n==0.
-
-        >>> s = SequenceMatcher(None, "abxcd", "abcd")
-        >>> s.get_matching_blocks()
-        [(0, 0, 2), (3, 2, 2), (5, 4, 0)]
-        """
-
-        if self.matching_blocks is not None:
-            return self.matching_blocks
-        la, lb = len(self.a), len(self.b)
-
-        # This is most naturally expressed as a recursive algorithm, but
-        # at least one user bumped into extreme use cases that exceeded
-        # the recursion limit on their box.  So, now we maintain a list
-        # ('queue`) of blocks we still need to look at, and append partial
-        # results to `matching_blocks` in a loop; the matches are sorted
-        # at the end.
-        queue = [(0, la, 0, lb)]
-        matching_blocks = []
-        while queue:
-            alo, ahi, blo, bhi = queue.pop()
-            i, j, k = x = self.find_longest_match(alo, ahi, blo, bhi)
-            # a[alo:i] vs b[blo:j] unknown
-            # a[i:i+k] same as b[j:j+k]
-            # a[i+k:ahi] vs b[j+k:bhi] unknown
-            if k:   # if k is 0, there was no matching block
-                matching_blocks.append(x)
-                if alo < i and blo < j:
-                    queue.append((alo, i, blo, j))
-                if i+k < ahi and j+k < bhi:
-                    queue.append((i+k, ahi, j+k, bhi))
-        matching_blocks.sort()
-
-        # It's possible that we have adjacent equal blocks in the
-        # matching_blocks list now.  Starting with 2.5, this code was added
-        # to collapse them.
-        i1 = j1 = k1 = 0
-        non_adjacent = []
-        for i2, j2, k2 in matching_blocks:
-            # Is this block adjacent to i1, j1, k1?
-            if i1 + k1 == i2 and j1 + k1 == j2:
-                # Yes, so collapse them -- this just increases the length of
-                # the first block by the length of the second, and the first
-                # block so lengthened remains the block to compare against.
-                k1 += k2
-            else:
-                # Not adjacent.  Remember the first block (k1==0 means it's
-                # the dummy we started with), and make the second block the
-                # new block to compare against.
-                if k1:
-                    non_adjacent.append((i1, j1, k1))
-                i1, j1, k1 = i2, j2, k2
-        if k1:
-            non_adjacent.append((i1, j1, k1))
-
-        non_adjacent.append( (la, lb, 0) )
-        self.matching_blocks = non_adjacent
-        return self.matching_blocks
-
-    def get_opcodes(self):
-        """Return list of 5-tuples describing how to turn a into b.
-
-        Each tuple is of the form (tag, i1, i2, j1, j2).  The first tuple
-        has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the
-        tuple preceding it, and likewise for j1 == the previous j2.
-
-        The tags are strings, with these meanings:
-
-        'replace':  a[i1:i2] should be replaced by b[j1:j2]
-        'delete':   a[i1:i2] should be deleted.
-                    Note that j1==j2 in this case.
-        'insert':   b[j1:j2] should be inserted at a[i1:i1].
-                    Note that i1==i2 in this case.
-        'equal':    a[i1:i2] == b[j1:j2]
-
-        >>> a = "qabxcd"
-        >>> b = "abycdf"
-        >>> s = SequenceMatcher(None, a, b)
-        >>> for tag, i1, i2, j1, j2 in s.get_opcodes():
-        ...    print ("%7s a[%d:%d] (%s) b[%d:%d] (%s)" %
-        ...           (tag, i1, i2, a[i1:i2], j1, j2, b[j1:j2]))
-         delete a[0:1] (q) b[0:0] ()
-          equal a[1:3] (ab) b[0:2] (ab)
-        replace a[3:4] (x) b[2:3] (y)
-          equal a[4:6] (cd) b[3:5] (cd)
-         insert a[6:6] () b[5:6] (f)
-        """
-
-        if self.opcodes is not None:
-            return self.opcodes
-        i = j = 0
-        self.opcodes = answer = []
-        for ai, bj, size in self.get_matching_blocks():
-            # invariant:  we've pumped out correct diffs to change
-            # a[:i] into b[:j], and the next matching block is
-            # a[ai:ai+size] == b[bj:bj+size].  So we need to pump
-            # out a diff to change a[i:ai] into b[j:bj], pump out
-            # the matching block, and move (i,j) beyond the match
-            tag = ''
-            if i < ai and j < bj:
-                tag = 'replace'
-            elif i < ai:
-                tag = 'delete'
-            elif j < bj:
-                tag = 'insert'
-            if tag:
-                answer.append( (tag, i, ai, j, bj) )
-            i, j = ai+size, bj+size
-            # the list of matching blocks is terminated by a
-            # sentinel with size 0
-            if size:
-                answer.append( ('equal', ai, i, bj, j) )
-        return answer
-
-    def get_grouped_opcodes(self, n=3):
-        """ Isolate change clusters by eliminating ranges with no changes.
-
-        Return a generator of groups with upto n lines of context.
-        Each group is in the same format as returned by get_opcodes().
-
-        >>> from pprint import pprint
-        >>> a = map(str, range(1,40))
-        >>> b = a[:]
-        >>> b[8:8] = ['i']     # Make an insertion
-        >>> b[20] += 'x'       # Make a replacement
-        >>> b[23:28] = []      # Make a deletion
-        >>> b[30] += 'y'       # Make another replacement
-        >>> pprint(list(SequenceMatcher(None,a,b).get_grouped_opcodes()))
-        [[('equal', 5, 8, 5, 8), ('insert', 8, 8, 8, 9), ('equal', 8, 11, 9, 12)],
-         [('equal', 16, 19, 17, 20),
-          ('replace', 19, 20, 20, 21),
-          ('equal', 20, 22, 21, 23),
-          ('delete', 22, 27, 23, 23),
-          ('equal', 27, 30, 23, 26)],
-         [('equal', 31, 34, 27, 30),
-          ('replace', 34, 35, 30, 31),
-          ('equal', 35, 38, 31, 34)]]
-        """
-
-        codes = self.get_opcodes()
-        if not codes:
-            codes = [("equal", 0, 1, 0, 1)]
-        # Fixup leading and trailing groups if they show no changes.
-        if codes[0][0] == 'equal':
-            tag, i1, i2, j1, j2 = codes[0]
-            codes[0] = tag, max(i1, i2-n), i2, max(j1, j2-n), j2
-        if codes[-1][0] == 'equal':
-            tag, i1, i2, j1, j2 = codes[-1]
-            codes[-1] = tag, i1, min(i2, i1+n), j1, min(j2, j1+n)
-
-        nn = n + n
-        group = []
-        for tag, i1, i2, j1, j2 in codes:
-            # End the current group and start a new one whenever
-            # there is a large range with no changes.
-            if tag == 'equal' and i2-i1 > nn:
-                group.append((tag, i1, min(i2, i1+n), j1, min(j2, j1+n)))
-                yield group
-                group = []
-                i1, j1 = max(i1, i2-n), max(j1, j2-n)
-            group.append((tag, i1, i2, j1 ,j2))
-        if group and not (len(group)==1 and group[0][0] == 'equal'):
-            yield group
-
-    def ratio(self):
-        """Return a measure of the sequences' similarity (float in [0,1]).
-
-        Where T is the total number of elements in both sequences, and
-        M is the number of matches, this is 2.0*M / T.
-        Note that this is 1 if the sequences are identical, and 0 if
-        they have nothing in common.
-
-        .ratio() is expensive to compute if you haven't already computed
-        .get_matching_blocks() or .get_opcodes(), in which case you may
-        want to try .quick_ratio() or .real_quick_ratio() first to get an
-        upper bound.
-
-        >>> s = SequenceMatcher(None, "abcd", "bcde")
-        >>> s.ratio()
-        0.75
-        >>> s.quick_ratio()
-        0.75
-        >>> s.real_quick_ratio()
-        1.0
-        """
-
-        matches = reduce(lambda sum, triple: sum + triple[-1],
-                         self.get_matching_blocks(), 0)
-        return _calculate_ratio(matches, len(self.a) + len(self.b))
-
-    def quick_ratio(self):
-        """Return an upper bound on ratio() relatively quickly.
-
-        This isn't defined beyond that it is an upper bound on .ratio(), and
-        is faster to compute.
-        """
-
-        # viewing a and b as multisets, set matches to the cardinality
-        # of their intersection; this counts the number of matches
-        # without regard to order, so is clearly an upper bound
-        if self.fullbcount is None:
-            self.fullbcount = fullbcount = {}
-            for elt in self.b:
-                fullbcount[elt] = fullbcount.get(elt, 0) + 1
-        fullbcount = self.fullbcount
-        # avail[x] is the number of times x appears in 'b' less the
-        # number of times we've seen it in 'a' so far ... kinda
-        avail = {}
-        availhas, matches = avail.has_key, 0
-        for elt in self.a:
-            if availhas(elt):
-                numb = avail[elt]
-            else:
-                numb = fullbcount.get(elt, 0)
-            avail[elt] = numb - 1
-            if numb > 0:
-                matches = matches + 1
-        return _calculate_ratio(matches, len(self.a) + len(self.b))
-
-    def real_quick_ratio(self):
-        """Return an upper bound on ratio() very quickly.
-
-        This isn't defined beyond that it is an upper bound on .ratio(), and
-        is faster to compute than either .ratio() or .quick_ratio().
-        """
-
-        la, lb = len(self.a), len(self.b)
-        # can't have more matches than the number of elements in the
-        # shorter sequence
-        return _calculate_ratio(min(la, lb), la + lb)
-
-def get_close_matches(word, possibilities, n=3, cutoff=0.6):
-    """Use SequenceMatcher to return list of the best "good enough" matches.
-
-    word is a sequence for which close matches are desired (typically a
-    string).
-
-    possibilities is a list of sequences against which to match word
-    (typically a list of strings).
-
-    Optional arg n (default 3) is the maximum number of close matches to
-    return.  n must be > 0.
-
-    Optional arg cutoff (default 0.6) is a float in [0, 1].  Possibilities
-    that don't score at least that similar to word are ignored.
-
-    The best (no more than n) matches among the possibilities are returned
-    in a list, sorted by similarity score, most similar first.
-
-    >>> get_close_matches("appel", ["ape", "apple", "peach", "puppy"])
-    ['apple', 'ape']
-    >>> import keyword as _keyword
-    >>> get_close_matches("wheel", _keyword.kwlist)
-    ['while']
-    >>> get_close_matches("apple", _keyword.kwlist)
-    []
-    >>> get_close_matches("accept", _keyword.kwlist)
-    ['except']
-    """
-
-    if not n >  0:
-        raise ValueError("n must be > 0: %r" % (n,))
-    if not 0.0 <= cutoff <= 1.0:
-        raise ValueError("cutoff must be in [0.0, 1.0]: %r" % (cutoff,))
-    result = []
-    s = SequenceMatcher()
-    s.set_seq2(word)
-    for x in possibilities:
-        s.set_seq1(x)
-        if s.real_quick_ratio() >= cutoff and \
-           s.quick_ratio() >= cutoff and \
-           s.ratio() >= cutoff:
-            result.append((s.ratio(), x))
-
-    # Sort by score.    
-    result.sort()   
-    # Retain only the best n.   
-    result = result[-n:]    
-    # Move best-scorer to head of list.     
-    result.reverse()    
-    # Strip scores.     
-    return [x for score, x in result]
-
-def _count_leading(line, ch):
-    """
-    Return number of `ch` characters at the start of `line`.
-
-    Example:
-
-    >>> _count_leading('   abc', ' ')
-    3
-    """
-
-    i, n = 0, len(line)
-    while i < n and line[i] == ch:
-        i += 1
-    return i
-
-class Differ:
-    r"""
-    Differ is a class for comparing sequences of lines of text, and
-    producing human-readable differences or deltas.  Differ uses
-    SequenceMatcher both to compare sequences of lines, and to compare
-    sequences of characters within similar (near-matching) lines.
-
-    Each line of a Differ delta begins with a two-letter code:
-
-        '- '    line unique to sequence 1
-        '+ '    line unique to sequence 2
-        '  '    line common to both sequences
-        '? '    line not present in either input sequence
-
-    Lines beginning with '? ' attempt to guide the eye to intraline
-    differences, and were not present in either input sequence.  These lines
-    can be confusing if the sequences contain tab characters.
-
-    Note that Differ makes no claim to produce a *minimal* diff.  To the
-    contrary, minimal diffs are often counter-intuitive, because they synch
-    up anywhere possible, sometimes accidental matches 100 pages apart.
-    Restricting synch points to contiguous matches preserves some notion of
-    locality, at the occasional cost of producing a longer diff.
-
-    Example: Comparing two texts.
-
-    First we set up the texts, sequences of individual single-line strings
-    ending with newlines (such sequences can also be obtained from the
-    `readlines()` method of file-like objects):
-
-    >>> text1 = '''  1. Beautiful is better than ugly.
-    ...   2. Explicit is better than implicit.
-    ...   3. Simple is better than complex.
-    ...   4. Complex is better than complicated.
-    ... '''.splitlines(1)
-    >>> len(text1)
-    4
-    >>> text1[0][-1]
-    '\n'
-    >>> text2 = '''  1. Beautiful is better than ugly.
-    ...   3.   Simple is better than complex.
-    ...   4. Complicated is better than complex.
-    ...   5. Flat is better than nested.
-    ... '''.splitlines(1)
-
-    Next we instantiate a Differ object:
-
-    >>> d = Differ()
-
-    Note that when instantiating a Differ object we may pass functions to
-    filter out line and character 'junk'.  See Differ.__init__ for details.
-
-    Finally, we compare the two:
-
-    >>> result = list(d.compare(text1, text2))
-
-    'result' is a list of strings, so let's pretty-print it:
-
-    >>> from pprint import pprint as _pprint
-    >>> _pprint(result)
-    ['    1. Beautiful is better than ugly.\n',
-     '-   2. Explicit is better than implicit.\n',
-     '-   3. Simple is better than complex.\n',
-     '+   3.   Simple is better than complex.\n',
-     '?     ++\n',
-     '-   4. Complex is better than complicated.\n',
-     '?            ^                     ---- ^\n',
-     '+   4. Complicated is better than complex.\n',
-     '?           ++++ ^                      ^\n',
-     '+   5. Flat is better than nested.\n']
-
-    As a single multi-line string it looks like this:
-
-    >>> print ''.join(result),
-        1. Beautiful is better than ugly.
-    -   2. Explicit is better than implicit.
-    -   3. Simple is better than complex.
-    +   3.   Simple is better than complex.
-    ?     ++
-    -   4. Complex is better than complicated.
-    ?            ^                     ---- ^
-    +   4. Complicated is better than complex.
-    ?           ++++ ^                      ^
-    +   5. Flat is better than nested.
-
-    Methods:
-
-    __init__(linejunk=None, charjunk=None)
-        Construct a text differencer, with optional filters.
-
-    compare(a, b)
-        Compare two sequences of lines; generate the resulting delta.
-    """
-
-    def __init__(self, linejunk=None, charjunk=None):
-        """
-        Construct a text differencer, with optional filters.
-
-        The two optional keyword parameters are for filter functions:
-
-        - `linejunk`: A function that should accept a single string argument,
-          and return true iff the string is junk. The module-level function
-          `IS_LINE_JUNK` may be used to filter out lines without visible
-          characters, except for at most one splat ('#').  It is recommended
-          to leave linejunk None; as of Python 2.3, the underlying
-          SequenceMatcher class has grown an adaptive notion of "noise" lines
-          that's better than any static definition the author has ever been
-          able to craft.
-
-        - `charjunk`: A function that should accept a string of length 1. The
-          module-level function `IS_CHARACTER_JUNK` may be used to filter out
-          whitespace characters (a blank or tab; **note**: bad idea to include
-          newline in this!).  Use of IS_CHARACTER_JUNK is recommended.
-        """
-
-        self.linejunk = linejunk
-        self.charjunk = charjunk
-
-    def compare(self, a, b):
-        r"""
-        Compare two sequences of lines; generate the resulting delta.
-
-        Each sequence must contain individual single-line strings ending with
-        newlines. Such sequences can be obtained from the `readlines()` method
-        of file-like objects.  The delta generated also consists of newline-
-        terminated strings, ready to be printed as-is via the writeline()
-        method of a file-like object.
-
-        Example:
-
-        >>> print ''.join(Differ().compare('one\ntwo\nthree\n'.splitlines(1),
-        ...                                'ore\ntree\nemu\n'.splitlines(1))),
-        - one
-        ?  ^
-        + ore
-        ?  ^
-        - two
-        - three
-        ?  -
-        + tree
-        + emu
-        """
-
-        cruncher = SequenceMatcher(self.linejunk, a, b)
-        for tag, alo, ahi, blo, bhi in cruncher.get_opcodes():
-            if tag == 'replace':
-                g = self._fancy_replace(a, alo, ahi, b, blo, bhi)
-            elif tag == 'delete':
-                g = self._dump('-', a, alo, ahi)
-            elif tag == 'insert':
-                g = self._dump('+', b, blo, bhi)
-            elif tag == 'equal':
-                g = self._dump(' ', a, alo, ahi)
-            else:
-                raise ValueError, 'unknown tag %r' % (tag,)
-
-            for line in g:
-                yield line
-
-    def _dump(self, tag, x, lo, hi):
-        """Generate comparison results for a same-tagged range."""
-        for i in xrange(lo, hi):
-            yield '%s %s' % (tag, x[i])
-
-    def _plain_replace(self, a, alo, ahi, b, blo, bhi):
-        assert alo < ahi and blo < bhi
-        # dump the shorter block first -- reduces the burden on short-term
-        # memory if the blocks are of very different sizes
-        if bhi - blo < ahi - alo:
-            first  = self._dump('+', b, blo, bhi)
-            second = self._dump('-', a, alo, ahi)
-        else:
-            first  = self._dump('-', a, alo, ahi)
-            second = self._dump('+', b, blo, bhi)
-
-        for g in first, second:
-            for line in g:
-                yield line
-
-    def _fancy_replace(self, a, alo, ahi, b, blo, bhi):
-        r"""
-        When replacing one block of lines with another, search the blocks
-        for *similar* lines; the best-matching pair (if any) is used as a
-        synch point, and intraline difference marking is done on the
-        similar pair. Lots of work, but often worth it.
-
-        Example:
-
-        >>> d = Differ()
-        >>> results = d._fancy_replace(['abcDefghiJkl\n'], 0, 1,
-        ...                            ['abcdefGhijkl\n'], 0, 1)
-        >>> print ''.join(results),
-        - abcDefghiJkl
-        ?    ^  ^  ^
-        + abcdefGhijkl
-        ?    ^  ^  ^
-        """
-
-        # don't synch up unless the lines have a similarity score of at
-        # least cutoff; best_ratio tracks the best score seen so far
-        best_ratio, cutoff = 0.74, 0.75
-        cruncher = SequenceMatcher(self.charjunk)
-        eqi, eqj = None, None   # 1st indices of equal lines (if any)
-
-        # search for the pair that matches best without being identical
-        # (identical lines must be junk lines, & we don't want to synch up
-        # on junk -- unless we have to)
-        for j in xrange(blo, bhi):
-            bj = b[j]
-            cruncher.set_seq2(bj)
-            for i in xrange(alo, ahi):
-                ai = a[i]
-                if ai == bj:
-                    if eqi is None:
-                        eqi, eqj = i, j
-                    continue
-                cruncher.set_seq1(ai)
-                # computing similarity is expensive, so use the quick
-                # upper bounds first -- have seen this speed up messy
-                # compares by a factor of 3.
-                # note that ratio() is only expensive to compute the first
-                # time it's called on a sequence pair; the expensive part
-                # of the computation is cached by cruncher
-                if cruncher.real_quick_ratio() > best_ratio and \
-                      cruncher.quick_ratio() > best_ratio and \
-                      cruncher.ratio() > best_ratio:
-                    best_ratio, best_i, best_j = cruncher.ratio(), i, j
-        if best_ratio < cutoff:
-            # no non-identical "pretty close" pair
-            if eqi is None:
-                # no identical pair either -- treat it as a straight replace
-                for line in self._plain_replace(a, alo, ahi, b, blo, bhi):
-                    yield line
-                return
-            # no close pair, but an identical pair -- synch up on that
-            best_i, best_j, best_ratio = eqi, eqj, 1.0
-        else:
-            # there's a close pair, so forget the identical pair (if any)
-            eqi = None
-
-        # a[best_i] very similar to b[best_j]; eqi is None iff they're not
-        # identical
-
-        # pump out diffs from before the synch point
-        for line in self._fancy_helper(a, alo, best_i, b, blo, best_j):
-            yield line
-
-        # do intraline marking on the synch pair
-        aelt, belt = a[best_i], b[best_j]
-        if eqi is None:
-            # pump out a '-', '?', '+', '?' quad for the synched lines
-            atags = btags = ""
-            cruncher.set_seqs(aelt, belt)
-            for tag, ai1, ai2, bj1, bj2 in cruncher.get_opcodes():
-                la, lb = ai2 - ai1, bj2 - bj1
-                if tag == 'replace':
-                    atags += '^' * la
-                    btags += '^' * lb
-                elif tag == 'delete':
-                    atags += '-' * la
-                elif tag == 'insert':
-                    btags += '+' * lb
-                elif tag == 'equal':
-                    atags += ' ' * la
-                    btags += ' ' * lb
-                else:
-                    raise ValueError, 'unknown tag %r' % (tag,)
-            for line in self._qformat(aelt, belt, atags, btags):
-                yield line
-        else:
-            # the synch pair is identical
-            yield '  ' + aelt
-
-        # pump out diffs from after the synch point
-        for line in self._fancy_helper(a, best_i+1, ahi, b, best_j+1, bhi):
-            yield line
-
-    def _fancy_helper(self, a, alo, ahi, b, blo, bhi):
-        g = []
-        if alo < ahi:
-            if blo < bhi:
-                g = self._fancy_replace(a, alo, ahi, b, blo, bhi)
-            else:
-                g = self._dump('-', a, alo, ahi)
-        elif blo < bhi:
-            g = self._dump('+', b, blo, bhi)
-
-        for line in g:
-            yield line
-
-    def _qformat(self, aline, bline, atags, btags):
-        r"""
-        Format "?" output and deal with leading tabs.
-
-        Example:
-
-        >>> d = Differ()
-        >>> results = d._qformat('\tabcDefghiJkl\n', '\t\tabcdefGhijkl\n',
-        ...                      '  ^ ^  ^      ', '+  ^ ^  ^      ')
-        >>> for line in results: print repr(line)
-        ...
-        '- \tabcDefghiJkl\n'
-        '? \t ^ ^  ^\n'
-        '+ \t\tabcdefGhijkl\n'
-        '? \t  ^ ^  ^\n'
-        """
-
-        # Can hurt, but will probably help most of the time.
-        common = min(_count_leading(aline, "\t"),
-                     _count_leading(bline, "\t"))
-        common = min(common, _count_leading(atags[:common], " "))
-        atags = atags[common:].rstrip()
-        btags = btags[common:].rstrip()
-
-        yield "- " + aline
-        if atags:
-            yield "? %s%s\n" % ("\t" * common, atags)
-
-        yield "+ " + bline
-        if btags:
-            yield "? %s%s\n" % ("\t" * common, btags)
-
-# With respect to junk, an earlier version of ndiff simply refused to
-# *start* a match with a junk element.  The result was cases like this:
-#     before: private Thread currentThread;
-#     after:  private volatile Thread currentThread;
-# If you consider whitespace to be junk, the longest contiguous match
-# not starting with junk is "e Thread currentThread".  So ndiff reported
-# that "e volatil" was inserted between the 't' and the 'e' in "private".
-# While an accurate view, to people that's absurd.  The current version
-# looks for matching blocks that are entirely junk-free, then extends the
-# longest one of those as far as possible but only with matching junk.
-# So now "currentThread" is matched, then extended to suck up the
-# preceding blank; then "private" is matched, and extended to suck up the
-# following blank; then "Thread" is matched; and finally ndiff reports
-# that "volatile " was inserted before "Thread".  The only quibble
-# remaining is that perhaps it was really the case that " volatile"
-# was inserted after "private".  I can live with that <wink>.
-
-import re
-
-def IS_LINE_JUNK(line, pat=re.compile(r"\s*#?\s*$").match):
-    r"""
-    Return 1 for ignorable line: iff `line` is blank or contains a single '#'.
-
-    Examples:
-
-    >>> IS_LINE_JUNK('\n')
-    True
-    >>> IS_LINE_JUNK('  #   \n')
-    True
-    >>> IS_LINE_JUNK('hello\n')
-    False
-    """
-
-    return pat(line) is not None
-
-def IS_CHARACTER_JUNK(ch, ws=" \t"):
-    r"""
-    Return 1 for ignorable character: iff `ch` is a space or tab.
-
-    Examples:
-
-    >>> IS_CHARACTER_JUNK(' ')
-    True
-    >>> IS_CHARACTER_JUNK('\t')
-    True
-    >>> IS_CHARACTER_JUNK('\n')
-    False
-    >>> IS_CHARACTER_JUNK('x')
-    False
-    """
-
-    return ch in ws
-
-
-def unified_diff(a, b, fromfile='', tofile='', fromfiledate='',
-                 tofiledate='', n=3, lineterm='\n'):
-    r"""
-    Compare two sequences of lines; generate the delta as a unified diff.
-
-    Unified diffs are a compact way of showing line changes and a few
-    lines of context.  The number of context lines is set by 'n' which
-    defaults to three.
-
-    By default, the diff control lines (those with ---, +++, or @@) are
-    created with a trailing newline.  This is helpful so that inputs
-    created from file.readlines() result in diffs that are suitable for
-    file.writelines() since both the inputs and outputs have trailing
-    newlines.
-
-    For inputs that do not have trailing newlines, set the lineterm
-    argument to "" so that the output will be uniformly newline free.
-
-    The unidiff format normally has a header for filenames and modification
-    times.  Any or all of these may be specified using strings for
-    'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.  The modification
-    times are normally expressed in the format returned by time.ctime().
-
-    Example:
-
-    >>> for line in unified_diff('one two three four'.split(),
-    ...             'zero one tree four'.split(), 'Original', 'Current',
-    ...             'Sat Jan 26 23:30:50 1991', 'Fri Jun 06 10:20:52 2003',
-    ...             lineterm=''):
-    ...     print line
-    --- Original Sat Jan 26 23:30:50 1991
-    +++ Current Fri Jun 06 10:20:52 2003
-    @@ -1,4 +1,4 @@
-    +zero
-     one
-    -two
-    -three
-    +tree
-     four
-    """
-
-    started = False
-    for group in SequenceMatcher(None,a,b).get_grouped_opcodes(n):
-        if not started:
-            yield '--- %s %s%s' % (fromfile, fromfiledate, lineterm)
-            yield '+++ %s %s%s' % (tofile, tofiledate, lineterm)
-            started = True
-        i1, i2, j1, j2 = group[0][1], group[-1][2], group[0][3], group[-1][4]
-        yield "@@ -%d,%d +%d,%d @@%s" % (i1+1, i2-i1, j1+1, j2-j1, lineterm)
-        for tag, i1, i2, j1, j2 in group:
-            if tag == 'equal':
-                for line in a[i1:i2]:
-                    yield ' ' + line
-                continue
-            if tag == 'replace' or tag == 'delete':
-                for line in a[i1:i2]:
-                    yield '-' + line
-            if tag == 'replace' or tag == 'insert':
-                for line in b[j1:j2]:
-                    yield '+' + line
-
-# See http://www.unix.org/single_unix_specification/
-def context_diff(a, b, fromfile='', tofile='',
-                 fromfiledate='', tofiledate='', n=3, lineterm='\n'):
-    r"""
-    Compare two sequences of lines; generate the delta as a context diff.
-
-    Context diffs are a compact way of showing line changes and a few
-    lines of context.  The number of context lines is set by 'n' which
-    defaults to three.
-
-    By default, the diff control lines (those with *** or ---) are
-    created with a trailing newline.  This is helpful so that inputs
-    created from file.readlines() result in diffs that are suitable for
-    file.writelines() since both the inputs and outputs have trailing
-    newlines.
-
-    For inputs that do not have trailing newlines, set the lineterm
-    argument to "" so that the output will be uniformly newline free.
-
-    The context diff format normally has a header for filenames and
-    modification times.  Any or all of these may be specified using
-    strings for 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
-    The modification times are normally expressed in the format returned
-    by time.ctime().  If not specified, the strings default to blanks.
-
-    Example:
-
-    >>> print ''.join(context_diff('one\ntwo\nthree\nfour\n'.splitlines(1),
-    ...       'zero\none\ntree\nfour\n'.splitlines(1), 'Original', 'Current',
-    ...       'Sat Jan 26 23:30:50 1991', 'Fri Jun 06 10:22:46 2003')),
-    *** Original Sat Jan 26 23:30:50 1991
-    --- Current Fri Jun 06 10:22:46 2003
-    ***************
-    *** 1,4 ****
-      one
-    ! two
-    ! three
-      four
-    --- 1,4 ----
-    + zero
-      one
-    ! tree
-      four
-    """
-
-    started = False
-    prefixmap = {'insert':'+ ', 'delete':'- ', 'replace':'! ', 'equal':'  '}
-    for group in SequenceMatcher(None,a,b).get_grouped_opcodes(n):
-        if not started:
-            yield '*** %s %s%s' % (fromfile, fromfiledate, lineterm)
-            yield '--- %s %s%s' % (tofile, tofiledate, lineterm)
-            started = True
-
-        yield '***************%s' % (lineterm,)
-        if group[-1][2] - group[0][1] >= 2:
-            yield '*** %d,%d ****%s' % (group[0][1]+1, group[-1][2], lineterm)
-        else:
-            yield '*** %d ****%s' % (group[-1][2], lineterm)
-        visiblechanges = [e for e in group if e[0] in ('replace', 'delete')]
-        if visiblechanges:
-            for tag, i1, i2, _, _ in group:
-                if tag != 'insert':
-                    for line in a[i1:i2]:
-                        yield prefixmap[tag] + line
-
-        if group[-1][4] - group[0][3] >= 2:
-            yield '--- %d,%d ----%s' % (group[0][3]+1, group[-1][4], lineterm)
-        else:
-            yield '--- %d ----%s' % (group[-1][4], lineterm)
-        visiblechanges = [e for e in group if e[0] in ('replace', 'insert')]
-        if visiblechanges:
-            for tag, _, _, j1, j2 in group:
-                if tag != 'delete':
-                    for line in b[j1:j2]:
-                        yield prefixmap[tag] + line
-
-def ndiff(a, b, linejunk=None, charjunk=IS_CHARACTER_JUNK):
-    r"""
-    Compare `a` and `b` (lists of strings); return a `Differ`-style delta.
-
-    Optional keyword parameters `linejunk` and `charjunk` are for filter
-    functions (or None):
-
-    - linejunk: A function that should accept a single string argument, and
-      return true iff the string is junk.  The default is None, and is
-      recommended; as of Python 2.3, an adaptive notion of "noise" lines is
-      used that does a good job on its own.
-
-    - charjunk: A function that should accept a string of length 1. The
-      default is module-level function IS_CHARACTER_JUNK, which filters out
-      whitespace characters (a blank or tab; note: bad idea to include newline
-      in this!).
-
-    Tools/scripts/ndiff.py is a command-line front-end to this function.
-
-    Example:
-
-    >>> diff = ndiff('one\ntwo\nthree\n'.splitlines(1),
-    ...              'ore\ntree\nemu\n'.splitlines(1))
-    >>> print ''.join(diff),
-    - one
-    ?  ^
-    + ore
-    ?  ^
-    - two
-    - three
-    ?  -
-    + tree
-    + emu
-    """
-    return Differ(linejunk, charjunk).compare(a, b)
-
-def _mdiff(fromlines, tolines, context=None, linejunk=None,
-           charjunk=IS_CHARACTER_JUNK):
-    r"""Returns generator yielding marked up from/to side by side differences.
-
-    Arguments:
-    fromlines -- list of text lines to compared to tolines
-    tolines -- list of text lines to be compared to fromlines
-    context -- number of context lines to display on each side of difference,
-               if None, all from/to text lines will be generated.
-    linejunk -- passed on to ndiff (see ndiff documentation)
-    charjunk -- passed on to ndiff (see ndiff documentation)
-
-    This function returns an interator which returns a tuple:
-    (from line tuple, to line tuple, boolean flag)
-
-    from/to line tuple -- (line num, line text)
-        line num -- integer or None (to indicate a context seperation)
-        line text -- original line text with following markers inserted:
-            '\0+' -- marks start of added text
-            '\0-' -- marks start of deleted text
-            '\0^' -- marks start of changed text
-            '\1' -- marks end of added/deleted/changed text
-
-    boolean flag -- None indicates context separation, True indicates
-        either "from" or "to" line contains a change, otherwise False.
-
-    This function/iterator was originally developed to generate side by side
-    file difference for making HTML pages (see HtmlDiff class for example
-    usage).
-
-    Note, this function utilizes the ndiff function to generate the side by
-    side difference markup.  Optional ndiff arguments may be passed to this
-    function and they in turn will be passed to ndiff.
-    """
-    import re
-
-    # regular expression for finding intraline change indices
-    change_re = re.compile('(\++|\-+|\^+)')
-
-    # create the difference iterator to generate the differences
-    diff_lines_iterator = ndiff(fromlines,tolines,linejunk,charjunk)
-
-    def _make_line(lines, format_key, side, num_lines=[0,0]):
-        """Returns line of text with user's change markup and line formatting.
-
-        lines -- list of lines from the ndiff generator to produce a line of
-                 text from.  When producing the line of text to return, the
-                 lines used are removed from this list.
-        format_key -- '+' return first line in list with "add" markup around
-                          the entire line.
-                      '-' return first line in list with "delete" markup around
-                          the entire line.
-                      '?' return first line in list with add/delete/change
-                          intraline markup (indices obtained from second line)
-                      None return first line in list with no markup
-        side -- indice into the num_lines list (0=from,1=to)
-        num_lines -- from/to current line number.  This is NOT intended to be a
-                     passed parameter.  It is present as a keyword argument to
-                     maintain memory of the current line numbers between calls
-                     of this function.
-
-        Note, this function is purposefully not defined at the module scope so
-        that data it needs from its parent function (within whose context it
-        is defined) does not need to be of module scope.
-        """
-        num_lines[side] += 1
-        # Handle case where no user markup is to be added, just return line of
-        # text with user's line format to allow for usage of the line number.
-        if format_key is None:
-            return (num_lines[side],lines.pop(0)[2:])
-        # Handle case of intraline changes
-        if format_key == '?':
-            text, markers = lines.pop(0), lines.pop(0)
-            # find intraline changes (store change type and indices in tuples)
-            sub_info = []
-            def record_sub_info(match_object,sub_info=sub_info):
-                sub_info.append([match_object.group(1)[0],match_object.span()])
-                return match_object.group(1)
-            change_re.sub(record_sub_info,markers)
-            # process each tuple inserting our special marks that won't be
-            # noticed by an xml/html escaper.
-            for key,(begin,end) in sub_info[::-1]:
-                text = text[0:begin]+'\0'+key+text[begin:end]+'\1'+text[end:]
-            text = text[2:]
-        # Handle case of add/delete entire line
-        else:
-            text = lines.pop(0)[2:]
-            # if line of text is just a newline, insert a space so there is
-            # something for the user to highlight and see.
-            if not text:
-                text = ' '
-            # insert marks that won't be noticed by an xml/html escaper.
-            text = '\0' + format_key + text + '\1'
-        # Return line of text, first allow user's line formatter to do its
-        # thing (such as adding the line number) then replace the special
-        # marks with what the user's change markup.
-        return (num_lines[side],text)
-
-    def _line_iterator():
-        """Yields from/to lines of text with a change indication.
-
-        This function is an iterator.  It itself pulls lines from a
-        differencing iterator, processes them and yields them.  When it can
-        it yields both a "from" and a "to" line, otherwise it will yield one
-        or the other.  In addition to yielding the lines of from/to text, a
-        boolean flag is yielded to indicate if the text line(s) have
-        differences in them.
-
-        Note, this function is purposefully not defined at the module scope so
-        that data it needs from its parent function (within whose context it
-        is defined) does not need to be of module scope.
-        """
-        lines = []
-        num_blanks_pending, num_blanks_to_yield = 0, 0
-        while True:
-            # Load up next 4 lines so we can look ahead, create strings which
-            # are a concatenation of the first character of each of the 4 lines
-            # so we can do some very readable comparisons.
-            while len(lines) < 4:
-                try:
-                    lines.append(diff_lines_iterator.next())
-                except StopIteration:
-                    lines.append('X')
-            s = ''.join([line[0] for line in lines])
-            if s.startswith('X'):
-                # When no more lines, pump out any remaining blank lines so the
-                # corresponding add/delete lines get a matching blank line so
-                # all line pairs get yielded at the next level.
-                num_blanks_to_yield = num_blanks_pending
-            elif s.startswith('-?+?'):
-                # simple intraline change
-                yield _make_line(lines,'?',0), _make_line(lines,'?',1), True
-                continue
-            elif s.startswith('--++'):
-                # in delete block, add block coming: we do NOT want to get
-                # caught up on blank lines yet, just process the delete line
-                num_blanks_pending -= 1
-                yield _make_line(lines,'-',0), None, True
-                continue
-            elif s.startswith('--?+') or \
-                 s.startswith('--+') or \
-                 s.startswith('- '):
-                # in delete block and see a intraline change or unchanged line
-                # coming: yield the delete line and then blanks
-                from_line,to_line = _make_line(lines,'-',0), None
-                num_blanks_to_yield,num_blanks_pending = num_blanks_pending-1,0
-            elif s.startswith('-+?'):
-                # intraline change
-                yield _make_line(lines,None,0), _make_line(lines,'?',1), True
-                continue
-            elif s.startswith('-?+'):
-                # intraline change
-                yield _make_line(lines,'?',0), _make_line(lines,None,1), True
-                continue
-            elif s.startswith('-'):
-                # delete FROM line
-                num_blanks_pending -= 1
-                yield _make_line(lines,'-',0), None, True
-                continue
-            elif s.startswith('+--'):
-                # in add block, delete block coming: we do NOT want to get
-                # caught up on blank lines yet, just process the add line
-                num_blanks_pending += 1
-                yield None, _make_line(lines,'+',1), True
-                continue
-            elif s.startswith('+ ') or \
-                 s.startswith('+-'):
-                # will be leaving an add block: yield blanks then add line
-                from_line, to_line = None, _make_line(lines,'+',1)
-                num_blanks_to_yield,num_blanks_pending = num_blanks_pending+1,0
-            elif s.startswith('+'):
-                # inside an add block, yield the add line
-                num_blanks_pending += 1
-                yield None, _make_line(lines,'+',1), True
-                continue
-            elif s.startswith(' '):
-                # unchanged text, yield it to both sides
-                yield _make_line(lines[:],None,0),_make_line(lines,None,1),False
-                continue
-            # Catch up on the blank lines so when we yield the next from/to
-            # pair, they are lined up.
-            while(num_blanks_to_yield < 0):
-                num_blanks_to_yield += 1
-                yield None,('','\n'),True
-            while(num_blanks_to_yield > 0):
-                num_blanks_to_yield -= 1
-                yield ('','\n'),None,True
-            if s.startswith('X'):
-                raise StopIteration
-            else:
-                yield from_line,to_line,True
-
-    def _line_pair_iterator():
-        """Yields from/to lines of text with a change indication.
-
-        This function is an iterator.  It itself pulls lines from the line
-        iterator.  Its difference from that iterator is that this function
-        always yields a pair of from/to text lines (with the change
-        indication).  If necessary it will collect single from/to lines
-        until it has a matching pair from/to pair to yield.
-
-        Note, this function is purposefully not defined at the module scope so
-        that data it needs from its parent function (within whose context it
-        is defined) does not need to be of module scope.
-        """
-        line_iterator = _line_iterator()
-        fromlines,tolines=[],[]
-        while True:
-            # Collecting lines of text until we have a from/to pair
-            while (len(fromlines)==0 or len(tolines)==0):
-                from_line, to_line, found_diff =line_iterator.next()
-                if from_line is not None:
-                    fromlines.append((from_line,found_diff))
-                if to_line is not None:
-                    tolines.append((to_line,found_diff))
-            # Once we have a pair, remove them from the collection and yield it
-            from_line, fromDiff = fromlines.pop(0)
-            to_line, to_diff = tolines.pop(0)
-            yield (from_line,to_line,fromDiff or to_diff)
-
-    # Handle case where user does not want context differencing, just yield
-    # them up without doing anything else with them.
-    line_pair_iterator = _line_pair_iterator()
-    if context is None:
-        while True:
-            yield line_pair_iterator.next()
-    # Handle case where user wants context differencing.  We must do some
-    # storage of lines until we know for sure that they are to be yielded.
-    else:
-        context += 1
-        lines_to_write = 0
-        while True:
-            # Store lines up until we find a difference, note use of a
-            # circular queue because we only need to keep around what
-            # we need for context.
-            index, contextLines = 0, [None]*(context)
-            found_diff = False
-            while(found_diff is False):
-                from_line, to_line, found_diff = line_pair_iterator.next()
-                i = index % context
-                contextLines[i] = (from_line, to_line, found_diff)
-                index += 1
-            # Yield lines that we have collected so far, but first yield
-            # the user's separator.
-            if index > context:
-                yield None, None, None
-                lines_to_write = context
-            else:
-                lines_to_write = index
-                index = 0
-            while(lines_to_write):
-                i = index % context
-                index += 1
-                yield contextLines[i]
-                lines_to_write -= 1
-            # Now yield the context lines after the change
-            lines_to_write = context-1
-            while(lines_to_write):
-                from_line, to_line, found_diff = line_pair_iterator.next()
-                # If another change within the context, extend the context
-                if found_diff:
-                    lines_to_write = context-1
-                else:
-                    lines_to_write -= 1
-                yield from_line, to_line, found_diff
-
-
-_file_template = """
-<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
-          "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
-
-<html>
-
-<head>
-    <meta http-equiv="Content-Type"
-          content="text/html; charset=ISO-8859-1" />
-    <title></title>
-    <style type="text/css">%(styles)s
-    </style>
-</head>
-
-<body>
-    %(table)s%(legend)s
-</body>
-
-</html>"""
-
-_styles = """
-        table.diff {font-family:Courier; border:medium;}
-        .diff_header {background-color:#e0e0e0}
-        td.diff_header {text-align:right}
-        .diff_next {background-color:#c0c0c0}
-        .diff_add {background-color:#aaffaa}
-        .diff_chg {background-color:#ffff77}
-        .diff_sub {background-color:#ffaaaa}"""
-
-_table_template = """
-    <table class="diff" id="difflib_chg_%(prefix)s_top"
-           cellspacing="0" cellpadding="0" rules="groups" >
-        <colgroup></colgroup> <colgroup></colgroup> <colgroup></colgroup>
-        <colgroup></colgroup> <colgroup></colgroup> <colgroup></colgroup>
-        %(header_row)s
-        <tbody>
-%(data_rows)s        </tbody>
-    </table>"""
-
-_legend = """
-    <table class="diff" summary="Legends">
-        <tr> <th colspan="2"> Legends </th> </tr>
-        <tr> <td> <table border="" summary="Colors">
-                      <tr><th> Colors </th> </tr>
-                      <tr><td class="diff_add">&nbsp;Added&nbsp;</td></tr>
-                      <tr><td class="diff_chg">Changed</td> </tr>
-                      <tr><td class="diff_sub">Deleted</td> </tr>
-                  </table></td>
-             <td> <table border="" summary="Links">
-                      <tr><th colspan="2"> Links </th> </tr>
-                      <tr><td>(f)irst change</td> </tr>
-                      <tr><td>(n)ext change</td> </tr>
-                      <tr><td>(t)op</td> </tr>
-                  </table></td> </tr>
-    </table>"""
-
-class HtmlDiff(object):
-    """For producing HTML side by side comparison with change highlights.
-
-    This class can be used to create an HTML table (or a complete HTML file
-    containing the table) showing a side by side, line by line comparison
-    of text with inter-line and intra-line change highlights.  The table can
-    be generated in either full or contextual difference mode.
-
-    The following methods are provided for HTML generation:
-
-    make_table -- generates HTML for a single side by side table
-    make_file -- generates complete HTML file with a single side by side table
-
-    See tools/scripts/diff.py for an example usage of this class.
-    """
-
-    _file_template = _file_template
-    _styles = _styles
-    _table_template = _table_template
-    _legend = _legend
-    _default_prefix = 0
-
-    def __init__(self,tabsize=8,wrapcolumn=None,linejunk=None,
-                 charjunk=IS_CHARACTER_JUNK):
-        """HtmlDiff instance initializer
-
-        Arguments:
-        tabsize -- tab stop spacing, defaults to 8.
-        wrapcolumn -- column number where lines are broken and wrapped,
-            defaults to None where lines are not wrapped.
-        linejunk,charjunk -- keyword arguments passed into ndiff() (used to by
-            HtmlDiff() to generate the side by side HTML differences).  See
-            ndiff() documentation for argument default values and descriptions.
-        """
-        self._tabsize = tabsize
-        self._wrapcolumn = wrapcolumn
-        self._linejunk = linejunk
-        self._charjunk = charjunk
-
-    def make_file(self,fromlines,tolines,fromdesc='',todesc='',context=False,
-                  numlines=5):
-        """Returns HTML file of side by side comparison with change highlights
-
-        Arguments:
-        fromlines -- list of "from" lines
-        tolines -- list of "to" lines
-        fromdesc -- "from" file column header string
-        todesc -- "to" file column header string
-        context -- set to True for contextual differences (defaults to False
-            which shows full differences).
-        numlines -- number of context lines.  When context is set True,
-            controls number of lines displayed before and after the change.
-            When context is False, controls the number of lines to place
-            the "next" link anchors before the next change (so click of
-            "next" link jumps to just before the change).
-        """
-
-        return self._file_template % dict(
-            styles = self._styles,
-            legend = self._legend,
-            table = self.make_table(fromlines,tolines,fromdesc,todesc,
-                                    context=context,numlines=numlines))
-
-    def _tab_newline_replace(self,fromlines,tolines):
-        """Returns from/to line lists with tabs expanded and newlines removed.
-
-        Instead of tab characters being replaced by the number of spaces
-        needed to fill in to the next tab stop, this function will fill
-        the space with tab characters.  This is done so that the difference
-        algorithms can identify changes in a file when tabs are replaced by
-        spaces and vice versa.  At the end of the HTML generation, the tab
-        characters will be replaced with a nonbreakable space.
-        """
-        def expand_tabs(line):
-            # hide real spaces
-            line = line.replace(' ','\0')
-            # expand tabs into spaces
-            line = line.expandtabs(self._tabsize)
-            # relace spaces from expanded tabs back into tab characters
-            # (we'll replace them with markup after we do differencing)
-            line = line.replace(' ','\t')
-            return line.replace('\0',' ').rstrip('\n')
-        fromlines = [expand_tabs(line) for line in fromlines]
-        tolines = [expand_tabs(line) for line in tolines]
-        return fromlines,tolines
-
-    def _split_line(self,data_list,line_num,text):
-        """Builds list of text lines by splitting text lines at wrap point
-
-        This function will determine if the input text line needs to be
-        wrapped (split) into separate lines.  If so, the first wrap point
-        will be determined and the first line appended to the output
-        text line list.  This function is used recursively to handle
-        the second part of the split line to further split it.
-        """
-        # if blank line or context separator, just add it to the output list
-        if not line_num:
-            data_list.append((line_num,text))
-            return
-
-        # if line text doesn't need wrapping, just add it to the output list
-        size = len(text)
-        max = self._wrapcolumn
-        if (size <= max) or ((size -(text.count('\0')*3)) <= max):
-            data_list.append((line_num,text))
-            return
-
-        # scan text looking for the wrap point, keeping track if the wrap
-        # point is inside markers
-        i = 0
-        n = 0
-        mark = ''
-        while n < max and i < size:
-            if text[i] == '\0':
-                i += 1
-                mark = text[i]
-                i += 1
-            elif text[i] == '\1':
-                i += 1
-                mark = ''
-            else:
-                i += 1
-                n += 1
-
-        # wrap point is inside text, break it up into separate lines
-        line1 = text[:i]
-        line2 = text[i:]
-
-        # if wrap point is inside markers, place end marker at end of first
-        # line and start marker at beginning of second line because each
-        # line will have its own table tag markup around it.
-        if mark:
-            line1 = line1 + '\1'
-            line2 = '\0' + mark + line2
-
-        # tack on first line onto the output list
-        data_list.append((line_num,line1))
-
-        # use this routine again to wrap the remaining text
-        self._split_line(data_list,'>',line2)
-
-    def _line_wrapper(self,diffs):
-        """Returns iterator that splits (wraps) mdiff text lines"""
-
-        # pull from/to data and flags from mdiff iterator
-        for fromdata,todata,flag in diffs:
-            # check for context separators and pass them through
-            if flag is None:
-                yield fromdata,todata,flag
-                continue
-            (fromline,fromtext),(toline,totext) = fromdata,todata
-            # for each from/to line split it at the wrap column to form
-            # list of text lines.
-            fromlist,tolist = [],[]
-            self._split_line(fromlist,fromline,fromtext)
-            self._split_line(tolist,toline,totext)
-            # yield from/to line in pairs inserting blank lines as
-            # necessary when one side has more wrapped lines
-            while fromlist or tolist:
-                if fromlist:
-                    fromdata = fromlist.pop(0)
-                else:
-                    fromdata = ('',' ')
-                if tolist:
-                    todata = tolist.pop(0)
-                else:
-                    todata = ('',' ')
-                yield fromdata,todata,flag
-
-    def _collect_lines(self,diffs):
-        """Collects mdiff output into separate lists
-
-        Before storing the mdiff from/to data into a list, it is converted
-        into a single line of text with HTML markup.
-        """
-
-        fromlist,tolist,flaglist = [],[],[]
-        # pull from/to data and flags from mdiff style iterator
-        for fromdata,todata,flag in diffs:
-            try:
-                # store HTML markup of the lines into the lists
-                fromlist.append(self._format_line(0,flag,*fromdata))
-                tolist.append(self._format_line(1,flag,*todata))
-            except TypeError:
-                # exceptions occur for lines where context separators go
-                fromlist.append(None)
-                tolist.append(None)
-            flaglist.append(flag)
-        return fromlist,tolist,flaglist
-
-    def _format_line(self,side,flag,linenum,text):
-        """Returns HTML markup of "from" / "to" text lines
-
-        side -- 0 or 1 indicating "from" or "to" text
-        flag -- indicates if difference on line
-        linenum -- line number (used for line number column)
-        text -- line text to be marked up
-        """
-        try:
-            linenum = '%d' % linenum
-            id = ' id="%s%s"' % (self._prefix[side],linenum)
-        except TypeError:
-            # handle blank lines where linenum is '>' or ''
-            id = ''
-        # replace those things that would get confused with HTML symbols
-        text=text.replace("&","&amp;").replace(">","&gt;").replace("<","&lt;")
-
-        # make space non-breakable so they don't get compressed or line wrapped
-        text = text.replace(' ','&nbsp;').rstrip()
-
-        return '<td class="diff_header"%s>%s</td><td nowrap="nowrap">%s</td>' \
-               % (id,linenum,text)
-
-    def _make_prefix(self):
-        """Create unique anchor prefixes"""
-
-        # Generate a unique anchor prefix so multiple tables
-        # can exist on the same HTML page without conflicts.
-        fromprefix = "from%d_" % HtmlDiff._default_prefix
-        toprefix = "to%d_" % HtmlDiff._default_prefix
-        HtmlDiff._default_prefix += 1
-        # store prefixes so line format method has access
-        self._prefix = [fromprefix,toprefix]
-
-    def _convert_flags(self,fromlist,tolist,flaglist,context,numlines):
-        """Makes list of "next" links"""
-
-        # all anchor names will be generated using the unique "to" prefix
-        toprefix = self._prefix[1]
-
-        # process change flags, generating middle column of next anchors/links
-        next_id = ['']*len(flaglist)
-        next_href = ['']*len(flaglist)
-        num_chg, in_change = 0, False
-        last = 0
-        for i,flag in enumerate(flaglist):
-            if flag:
-                if not in_change:
-                    in_change = True
-                    last = i
-                    # at the beginning of a change, drop an anchor a few lines
-                    # (the context lines) before the change for the previous
-                    # link
-                    i = max([0,i-numlines])
-                    next_id[i] = ' id="difflib_chg_%s_%d"' % (toprefix,num_chg)
-                    # at the beginning of a change, drop a link to the next
-                    # change
-                    num_chg += 1
-                    next_href[last] = '<a href="#difflib_chg_%s_%d">n</a>' % (
-                         toprefix,num_chg)
-            else:
-                in_change = False
-        # check for cases where there is no content to avoid exceptions
-        if not flaglist:
-            flaglist = [False]
-            next_id = ['']
-            next_href = ['']
-            last = 0
-            if context:
-                fromlist = ['<td></td><td>&nbsp;No Differences Found&nbsp;</td>']
-                tolist = fromlist
-            else:
-                fromlist = tolist = ['<td></td><td>&nbsp;Empty File&nbsp;</td>']
-        # if not a change on first line, drop a link
-        if not flaglist[0]:
-            next_href[0] = '<a href="#difflib_chg_%s_0">f</a>' % toprefix
-        # redo the last link to link to the top
-        next_href[last] = '<a href="#difflib_chg_%s_top">t</a>' % (toprefix)
-
-        return fromlist,tolist,flaglist,next_href,next_id
-
-    def make_table(self,fromlines,tolines,fromdesc='',todesc='',context=False,
-                   numlines=5):
-        """Returns HTML table of side by side comparison with change highlights
-
-        Arguments:
-        fromlines -- list of "from" lines
-        tolines -- list of "to" lines
-        fromdesc -- "from" file column header string
-        todesc -- "to" file column header string
-        context -- set to True for contextual differences (defaults to False
-            which shows full differences).
-        numlines -- number of context lines.  When context is set True,
-            controls number of lines displayed before and after the change.
-            When context is False, controls the number of lines to place
-            the "next" link anchors before the next change (so click of
-            "next" link jumps to just before the change).
-        """
-
-        # make unique anchor prefixes so that multiple tables may exist
-        # on the same page without conflict.
-        self._make_prefix()
-
-        # change tabs to spaces before it gets more difficult after we insert
-        # markkup
-        fromlines,tolines = self._tab_newline_replace(fromlines,tolines)
-
-        # create diffs iterator which generates side by side from/to data
-        if context:
-            context_lines = numlines
-        else:
-            context_lines = None
-        diffs = _mdiff(fromlines,tolines,context_lines,linejunk=self._linejunk,
-                      charjunk=self._charjunk)
-
-        # set up iterator to wrap lines that exceed desired width
-        if self._wrapcolumn:
-            diffs = self._line_wrapper(diffs)
-
-        # collect up from/to lines and flags into lists (also format the lines)
-        fromlist,tolist,flaglist = self._collect_lines(diffs)
-
-        # process change flags, generating middle column of next anchors/links
-        fromlist,tolist,flaglist,next_href,next_id = self._convert_flags(
-            fromlist,tolist,flaglist,context,numlines)
-
-        s = []
-        fmt = '            <tr><td class="diff_next"%s>%s</td>%s' + \
-              '<td class="diff_next">%s</td>%s</tr>\n'
-        for i in range(len(flaglist)):
-            if flaglist[i] is None:
-                # mdiff yields None on separator lines skip the bogus ones
-                # generated for the first line
-                if i > 0:
-                    s.append('        </tbody>        \n        <tbody>\n')
-            else:
-                s.append( fmt % (next_id[i],next_href[i],fromlist[i],
-                                           next_href[i],tolist[i]))
-        if fromdesc or todesc:
-            header_row = '<thead><tr>%s%s%s%s</tr></thead>' % (
-                '<th class="diff_next"><br /></th>',
-                '<th colspan="2" class="diff_header">%s</th>' % fromdesc,
-                '<th class="diff_next"><br /></th>',
-                '<th colspan="2" class="diff_header">%s</th>' % todesc)
-        else:
-            header_row = ''
-
-        table = self._table_template % dict(
-            data_rows=''.join(s),
-            header_row=header_row,
-            prefix=self._prefix[1])
-
-        return table.replace('\0+','<span class="diff_add">'). \
-                     replace('\0-','<span class="diff_sub">'). \
-                     replace('\0^','<span class="diff_chg">'). \
-                     replace('\1','</span>'). \
-                     replace('\t','&nbsp;')
-
-del re
-
-def restore(delta, which):
-    r"""
-    Generate one of the two sequences that generated a delta.
-
-    Given a `delta` produced by `Differ.compare()` or `ndiff()`, extract
-    lines originating from file 1 or 2 (parameter `which`), stripping off line
-    prefixes.
-
-    Examples:
-
-    >>> diff = ndiff('one\ntwo\nthree\n'.splitlines(1),
-    ...              'ore\ntree\nemu\n'.splitlines(1))
-    >>> diff = list(diff)
-    >>> print ''.join(restore(diff, 1)),
-    one
-    two
-    three
-    >>> print ''.join(restore(diff, 2)),
-    ore
-    tree
-    emu
-    """
-    try:
-        tag = {1: "- ", 2: "+ "}[int(which)]
-    except KeyError:
-        raise ValueError, ('unknown delta choice (must be 1 or 2): %r'
-                           % which)
-    prefixes = ("  ", tag)
-    for line in delta:
-        if line[:2] in prefixes:
-            yield line[2:]
-
-def _test():
-    import doctest, difflib
-    return doctest.testmod(difflib)
-
-if __name__ == "__main__":
-    _test()
--- a/MoinMoin/support/parsedatetime/__init__.py	Wed Sep 07 03:05:27 2016 +0200
+++ b/MoinMoin/support/parsedatetime/__init__.py	Wed Sep 07 04:31:59 2016 +0200
@@ -1,28 +1,2782 @@
-"""
-parsedatetime.py contains the C{Calendar} class where the C{parse()}
-method can be found.
-
-parsedatetime_consts.py contains the C{Constants} class that builds the
-various regex values using locale information if available.
+# -*- coding: utf-8 -*-
+#
+# vim: sw=2 ts=2 sts=2
+#
+# Copyright 2004-2016 Mike Taylor
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""parsedatetime
+
+Parse human-readable date/time text.
+
+Requires Python 2.6 or later
 """
 
-version = '0.8.7'
-author  = 'Mike Taylor and Darshana Chhajed'
-license = """
-Copyright (c) 2004-2008 Mike Taylor
-Copyright (c) 2006-2008 Darshana Chhajed
-All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
+from __future__ import with_statement, absolute_import, unicode_literals
+
+import re
+import time
+import logging
+import warnings
+import datetime
+import calendar
+import contextlib
+import email.utils
+
+from .pdt_locales import (locales as _locales,
+                          get_icu, load_locale)
+from .context import pdtContext, pdtContextStack
+from .warns import pdt20DeprecationWarning
+
+
+__author__ = 'Mike Taylor'
+__email__ = 'bear@bear.im'
+__copyright__ = 'Copyright (c) 2016 Mike Taylor'
+__license__ = 'Apache License 2.0'
+__version__ = '2.1'
+__url__ = 'https://github.com/bear/parsedatetime'
+__download_url__ = 'https://pypi.python.org/pypi/parsedatetime'
+__description__ = 'Parse human-readable date/time text.'
+
+# as a library, do *not* setup logging
+# see docs.python.org/2/howto/logging.html#configuring-logging-for-a-library
+# Set default logging handler to avoid "No handler found" warnings.
+
+try:  # Python 2.7+
+    from logging import NullHandler
+except ImportError:
+    class NullHandler(logging.Handler):
+
+        def emit(self, record):
+            pass
+
+log = logging.getLogger(__name__)
+log.addHandler(NullHandler())
+
+debug = False
+
+pdtLocales = dict([(x, load_locale(x)) for x in _locales])
+
+
+# Copied from feedparser.py
+# Universal Feedparser
+# Copyright (c) 2002-2006, Mark Pilgrim, All rights reserved.
+# Originally a def inside of _parse_date_w3dtf()
+def _extract_date(m):
+    year = int(m.group('year'))
+    if year < 100:
+        year = 100 * int(time.gmtime()[0] / 100) + int(year)
+    if year < 1000:
+        return 0, 0, 0
+    julian = m.group('julian')
+    if julian:
+        julian = int(julian)
+        month = julian / 30 + 1
+        day = julian % 30 + 1
+        jday = None
+        while jday != julian:
+            t = time.mktime((year, month, day, 0, 0, 0, 0, 0, 0))
+            jday = time.gmtime(t)[-2]
+            diff = abs(jday - julian)
+            if jday > julian:
+                if diff < day:
+                    day = day - diff
+                else:
+                    month = month - 1
+                    day = 31
+            elif jday < julian:
+                if day + diff < 28:
+                    day = day + diff
+                else:
+                    month = month + 1
+        return year, month, day
+    month = m.group('month')
+    day = 1
+    if month is None:
+        month = 1
+    else:
+        month = int(month)
+        day = m.group('day')
+        if day:
+            day = int(day)
+        else:
+            day = 1
+    return year, month, day
+
+
+# Copied from feedparser.py
+# Universal Feedparser
+# Copyright (c) 2002-2006, Mark Pilgrim, All rights reserved.
+# Originally a def inside of _parse_date_w3dtf()
+def _extract_time(m):
+    if not m:
+        return 0, 0, 0
+    hours = m.group('hours')
+    if not hours:
+        return 0, 0, 0
+    hours = int(hours)
+    minutes = int(m.group('minutes'))
+    seconds = m.group('seconds')
+    if seconds:
+        seconds = seconds.replace(',', '.').split('.', 1)[0]
+        seconds = int(seconds)
+    else:
+        seconds = 0
+    return hours, minutes, seconds
+
+
+def _pop_time_accuracy(m, ctx):
+    if not m:
+        return
+    if m.group('hours'):
+        ctx.updateAccuracy(ctx.ACU_HOUR)
+    if m.group('minutes'):
+        ctx.updateAccuracy(ctx.ACU_MIN)
+    if m.group('seconds'):
+        ctx.updateAccuracy(ctx.ACU_SEC)
+
+
+# Copied from feedparser.py
+# Universal Feedparser
+# Copyright (c) 2002-2006, Mark Pilgrim, All rights reserved.
+# Modified to return a tuple instead of mktime
+#
+# Original comment:
+#   W3DTF-style date parsing adapted from PyXML xml.utils.iso8601, written by
+#   Drake and licensed under the Python license.  Removed all range checking
+#   for month, day, hour, minute, and second, since mktime will normalize
+#   these later
+def __closure_parse_date_w3dtf():
+    # the __extract_date and __extract_time methods were
+    # copied-out so they could be used by my code --bear
+    def __extract_tzd(m):
+        '''Return the Time Zone Designator as an offset in seconds from UTC.'''
+        if not m:
+            return 0
+        tzd = m.group('tzd')
+        if not tzd:
+            return 0
+        if tzd == 'Z':
+            return 0
+        hours = int(m.group('tzdhours'))
+        minutes = m.group('tzdminutes')
+        if minutes:
+            minutes = int(minutes)
+        else:
+            minutes = 0
+        offset = (hours * 60 + minutes) * 60
+        if tzd[0] == '+':
+            return -offset
+        return offset
+
+    def _parse_date_w3dtf(dateString):
+        m = __datetime_rx.match(dateString)
+        if m is None or m.group() != dateString:
+            return
+        return _extract_date(m) + _extract_time(m) + (0, 0, 0)
+
+    __date_re = (r'(?P<year>\d\d\d\d)'
+                 r'(?:(?P<dsep>-|)'
+                 r'(?:(?P<julian>\d\d\d)'
+                 r'|(?P<month>\d\d)(?:(?P=dsep)(?P<day>\d\d))?))?')
+    __tzd_re = r'(?P<tzd>[-+](?P<tzdhours>\d\d)(?::?(?P<tzdminutes>\d\d))|Z)'
+    # __tzd_rx = re.compile(__tzd_re)
+    __time_re = (r'(?P<hours>\d\d)(?P<tsep>:|)(?P<minutes>\d\d)'
+                 r'(?:(?P=tsep)(?P<seconds>\d\d(?:[.,]\d+)?))?' +
+                 __tzd_re)
+    __datetime_re = '%s(?:T%s)?' % (__date_re, __time_re)
+    __datetime_rx = re.compile(__datetime_re)
+
+    return _parse_date_w3dtf
+
+
+_parse_date_w3dtf = __closure_parse_date_w3dtf()
+del __closure_parse_date_w3dtf
+
+_monthnames = set([
+    'jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul',
+    'aug', 'sep', 'oct', 'nov', 'dec',
+    'january', 'february', 'march', 'april', 'may', 'june', 'july',
+    'august', 'september', 'october', 'november', 'december'])
+_daynames = set(['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun'])
+
+
+# Copied from feedparser.py
+# Universal Feedparser
+# Copyright (c) 2002-2006, Mark Pilgrim, All rights reserved.
+# Modified to return a tuple instead of mktime
+def _parse_date_rfc822(dateString):
+    '''Parse an RFC822, RFC1123, RFC2822, or asctime-style date'''
+    data = dateString.split()
+    if data[0][-1] in (',', '.') or data[0].lower() in _daynames:
+        del data[0]
+    if len(data) == 4:
+        s = data[3]
+        s = s.split('+', 1)
+        if len(s) == 2:
+            data[3:] = s
+        else:
+            data.append('')
+        dateString = " ".join(data)
+    if len(data) < 5:
+        dateString += ' 00:00:00 GMT'
+    return email.utils.parsedate_tz(dateString)
+
+
+# rfc822.py defines several time zones, but we define some extra ones.
+# 'ET' is equivalent to 'EST', etc.
+# _additional_timezones = {'AT': -400, 'ET': -500,
+#                          'CT': -600, 'MT': -700,
+#                          'PT': -800}
+# email.utils._timezones.update(_additional_timezones)
+
+VERSION_FLAG_STYLE = 1
+VERSION_CONTEXT_STYLE = 2
+
+
+class Calendar(object):
+
+    """
+    A collection of routines to input, parse and manipulate date and times.
+    The text can either be 'normal' date values or it can be human readable.
+    """
+
+    def __init__(self, constants=None, version=VERSION_FLAG_STYLE):
+        """
+        Default constructor for the L{Calendar} class.
+
+        @type  constants: object
+        @param constants: Instance of the class L{Constants}
+        @type  version:   integer
+        @param version:   Default style version of current Calendar instance.
+                          Valid value can be 1 (L{VERSION_FLAG_STYLE}) or
+                          2 (L{VERSION_CONTEXT_STYLE}). See L{parse()}.
+
+        @rtype:  object
+        @return: L{Calendar} instance
+        """
+        # if a constants reference is not included, use default
+        if constants is None:
+            self.ptc = Constants()
+        else:
+            self.ptc = constants
+
+        self.version = version
+        if version == VERSION_FLAG_STYLE:
+            warnings.warn(
+                'Flag style will be deprecated in parsedatetime 2.0. '
+                'Instead use the context style by instantiating `Calendar()` '
+                'with argument `version=parsedatetime.VERSION_CONTEXT_STYLE`.',
+                pdt20DeprecationWarning)
+        self._ctxStack = pdtContextStack()
+
+    @contextlib.contextmanager
+    def context(self):
+        ctx = pdtContext()
+        self._ctxStack.push(ctx)
+        yield ctx
+        ctx = self._ctxStack.pop()
+        if not self._ctxStack.isEmpty():
+            self.currentContext.update(ctx)
+
+    @property
+    def currentContext(self):
+        return self._ctxStack.last()
+
+    def _convertUnitAsWords(self, unitText):
+        """
+        Converts text units into their number value.
+
+        @type  unitText: string
+        @param unitText: number text to convert
+
+        @rtype:  integer
+        @return: numerical value of unitText
+        """
+        word_list, a, b = re.split(r"[,\s-]+", unitText), 0, 0
+        for word in word_list:
+            x = self.ptc.small.get(word)
+            if x is not None:
+                a += x
+            elif word == "hundred":
+                a *= 100
+            else:
+                x = self.ptc.magnitude.get(word)
+                if x is not None:
+                    b += a * x
+                    a = 0
+                elif word in self.ptc.ignore:
+                    pass
+                else:
+                    raise Exception("Unknown number: " + word)
+        return a + b
+
+    def _buildTime(self, source, quantity, modifier, units):
+        """
+        Take C{quantity}, C{modifier} and C{unit} strings and convert them
+        into values. After converting, calcuate the time and return the
+        adjusted sourceTime.
+
+        @type  source:   time
+        @param source:   time to use as the base (or source)
+        @type  quantity: string
+        @param quantity: quantity string
+        @type  modifier: string
+        @param modifier: how quantity and units modify the source time
+        @type  units:    string
+        @param units:    unit of the quantity (i.e. hours, days, months, etc)
+
+        @rtype:  struct_time
+        @return: C{struct_time} of the calculated time
+        """
+        ctx = self.currentContext
+        debug and log.debug('_buildTime: [%s][%s][%s]',
+                            quantity, modifier, units)
+
+        if source is None:
+            source = time.localtime()
+
+        if quantity is None:
+            quantity = ''
+        else:
+            quantity = quantity.strip()
+
+        qty = self._quantityToReal(quantity)
+
+        if modifier in self.ptc.Modifiers:
+            qty = qty * self.ptc.Modifiers[modifier]
+
+            if units is None or units == '':
+                units = 'dy'
+
+        # plurals are handled by regex's (could be a bug tho)
+
+        (yr, mth, dy, hr, mn, sec, _, _, _) = source
+
+        start = datetime.datetime(yr, mth, dy, hr, mn, sec)
+        target = start
+        # realunit = next((key for key, values in self.ptc.units.items()
+        #                  if any(imap(units.__contains__, values))), None)
+        realunit = units
+        for key, values in self.ptc.units.items():
+            if units in values:
+                realunit = key
+                break
+
+        debug and log.debug('units %s --> realunit %s (qty=%s)',
+                            units, realunit, qty)
+
+        try:
+            if realunit in ('years', 'months'):
+                target = self.inc(start, **{realunit[:-1]: qty})
+            elif realunit in ('days', 'hours', 'minutes', 'seconds', 'weeks'):
+                delta = datetime.timedelta(**{realunit: qty})
+                target = start + delta
+        except OverflowError:
+            # OverflowError is raise when target.year larger than 9999
+            pass
+        else:
+            ctx.updateAccuracy(realunit)
+
+        return target.timetuple()
+
+    def parseDate(self, dateString, sourceTime=None):
+        """
+        Parse short-form date strings::
+
+            '05/28/2006' or '04.21'
+
+        @type  dateString: string
+        @param dateString: text to convert to a C{datetime}
+        @type  sourceTime:     struct_time
+        @param sourceTime:     C{struct_time} value to use as the base
+
+        @rtype:  struct_time
+        @return: calculated C{struct_time} value of dateString
+        """
+        if sourceTime is None:
+            yr, mth, dy, hr, mn, sec, wd, yd, isdst = time.localtime()
+        else:
+            yr, mth, dy, hr, mn, sec, wd, yd, isdst = sourceTime
+
+        # values pulled from regex's will be stored here and later
+        # assigned to mth, dy, yr based on information from the locale
+        # -1 is used as the marker value because we want zero values
+        # to be passed thru so they can be flagged as errors later
+        v1 = -1
+        v2 = -1
+        v3 = -1
+        accuracy = []
+
+        s = dateString
+        m = self.ptc.CRE_DATE2.search(s)
+        if m is not None:
+            index = m.start()
+            v1 = int(s[:index])
+            s = s[index + 1:]
+
+        m = self.ptc.CRE_DATE2.search(s)
+        if m is not None:
+            index = m.start()
+            v2 = int(s[:index])
+            v3 = int(s[index + 1:])
+        else:
+            v2 = int(s.strip())
+
+        v = [v1, v2, v3]
+        d = {'m': mth, 'd': dy, 'y': yr}
+
+        # yyyy/mm/dd format
+        dp_order = self.ptc.dp_order if v1 <= 31 else ['y', 'm', 'd']
+
+        for i in range(0, 3):
+            n = v[i]
+            c = dp_order[i]
+            if n >= 0:
+                d[c] = n
+                accuracy.append({'m': pdtContext.ACU_MONTH,
+                                 'd': pdtContext.ACU_DAY,
+                                 'y': pdtContext.ACU_YEAR}[c])
+
+        # if the year is not specified and the date has already
+        # passed, increment the year
+        if v3 == -1 and ((mth > d['m']) or (mth == d['m'] and dy > d['d'])):
+            yr = d['y'] + self.ptc.YearParseStyle
+        else:
+            yr = d['y']
+
+        mth = d['m']
+        dy = d['d']
+
+        # birthday epoch constraint
+        if yr < self.ptc.BirthdayEpoch:
+            yr += 2000
+        elif yr < 100:
+            yr += 1900
+
+        daysInCurrentMonth = self.ptc.daysInMonth(mth, yr)
+        debug and log.debug('parseDate: %s %s %s %s',
+                            yr, mth, dy, daysInCurrentMonth)
+
+        with self.context() as ctx:
+            if mth > 0 and mth <= 12 and dy > 0 and \
+                    dy <= daysInCurrentMonth:
+                sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
+                ctx.updateAccuracy(*accuracy)
+            else:
+                # return current time if date string is invalid
+                sourceTime = time.localtime()
+
+        return sourceTime
+
+    def parseDateText(self, dateString, sourceTime=None):
+        """
+        Parse long-form date strings::
+
+            'May 31st, 2006'
+            'Jan 1st'
+            'July 2006'
+
+        @type  dateString: string
+        @param dateString: text to convert to a datetime
+        @type  sourceTime:     struct_time
+        @param sourceTime:     C{struct_time} value to use as the base
+
+        @rtype:  struct_time
+        @return: calculated C{struct_time} value of dateString
+        """
+        if sourceTime is None:
+            yr, mth, dy, hr, mn, sec, wd, yd, isdst = time.localtime()
+        else:
+            yr, mth, dy, hr, mn, sec, wd, yd, isdst = sourceTime
+
+        currentMth = mth
+        currentDy = dy
+        accuracy = []
+
+        debug and log.debug('parseDateText currentMth %s currentDy %s',
+                            mth, dy)
+
+        s = dateString.lower()
+        m = self.ptc.CRE_DATE3.search(s)
+        mth = m.group('mthname')
+        mth = self.ptc.MonthOffsets[mth]
+        accuracy.append('month')
+
+        if m.group('day') is not None:
+            dy = int(m.group('day'))
+            accuracy.append('day')
+        else:
+            dy = 1
+
+        if m.group('year') is not None:
+            yr = int(m.group('year'))
+            accuracy.append('year')
+
+            # birthday epoch constraint
+            if yr < self.ptc.BirthdayEpoch:
+                yr += 2000
+            elif yr < 100:
+                yr += 1900
+
+        elif (mth < currentMth) or (mth == currentMth and dy < currentDy):
+            # if that day and month have already passed in this year,
+            # then increment the year by 1
+            yr += self.ptc.YearParseStyle
+
+        with self.context() as ctx:
+            if dy > 0 and dy <= self.ptc.daysInMonth(mth, yr):
+                sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
+                ctx.updateAccuracy(*accuracy)
+            else:
+                # Return current time if date string is invalid
+                sourceTime = time.localtime()
+
+        debug and log.debug('parseDateText returned '
+                            'mth %d dy %d yr %d sourceTime %s',
+                            mth, dy, yr, sourceTime)
+
+        return sourceTime
+
+    def evalRanges(self, datetimeString, sourceTime=None):
+        """
+        Evaluate the C{datetimeString} text and determine if
+        it represents a date or time range.
+
+        @type  datetimeString: string
+        @param datetimeString: datetime text to evaluate
+        @type  sourceTime:     struct_time
+        @param sourceTime:     C{struct_time} value to use as the base
+
+        @rtype:  tuple
+        @return: tuple of: start datetime, end datetime and the invalid flag
+        """
+        rangeFlag = retFlag = 0
+        startStr = endStr = ''
+
+        s = datetimeString.strip().lower()
+
+        if self.ptc.rangeSep in s:
+            s = s.replace(self.ptc.rangeSep, ' %s ' % self.ptc.rangeSep)
+            s = s.replace('  ', ' ')
+
+        for cre, rflag in [(self.ptc.CRE_TIMERNG1, 1),
+                           (self.ptc.CRE_TIMERNG2, 2),
+                           (self.ptc.CRE_TIMERNG4, 7),
+                           (self.ptc.CRE_TIMERNG3, 3),
+                           (self.ptc.CRE_DATERNG1, 4),
+                           (self.ptc.CRE_DATERNG2, 5),
+                           (self.ptc.CRE_DATERNG3, 6)]:
+            m = cre.search(s)
+            if m is not None:
+                rangeFlag = rflag
+                break
+
+        debug and log.debug('evalRanges: rangeFlag = %s [%s]', rangeFlag, s)
+
+        if m is not None:
+            if (m.group() != s):
+                # capture remaining string
+                parseStr = m.group()
+                chunk1 = s[:m.start()]
+                chunk2 = s[m.end():]
+                s = '%s %s' % (chunk1, chunk2)
+
+                sourceTime, ctx = self.parse(s, sourceTime,
+                                             VERSION_CONTEXT_STYLE)
+
+                if not ctx.hasDateOrTime:
+                    sourceTime = None
+            else:
+                parseStr = s
+
+        if rangeFlag in (1, 2):
+            m = re.search(self.ptc.rangeSep, parseStr)
+            startStr = parseStr[:m.start()]
+            endStr = parseStr[m.start() + 1:]
+            retFlag = 2
+
+        elif rangeFlag in (3, 7):
+            m = re.search(self.ptc.rangeSep, parseStr)
+            # capturing the meridian from the end time
+            if self.ptc.usesMeridian:
+                ampm = re.search(self.ptc.am[0], parseStr)
+
+                # appending the meridian to the start time
+                if ampm is not None:
+                    startStr = parseStr[:m.start()] + self.ptc.meridian[0]
+                else:
+                    startStr = parseStr[:m.start()] + self.ptc.meridian[1]
+            else:
+                startStr = parseStr[:m.start()]
+
+            endStr = parseStr[m.start() + 1:]
+            retFlag = 2
+
+        elif rangeFlag == 4:
+            m = re.search(self.ptc.rangeSep, parseStr)
+            startStr = parseStr[:m.start()]
+            endStr = parseStr[m.start() + 1:]
+            retFlag = 1
+
+        elif rangeFlag == 5:
+            m = re.search(self.ptc.rangeSep, parseStr)
+            endStr = parseStr[m.start() + 1:]
+
+            # capturing the year from the end date
+            date = self.ptc.CRE_DATE3.search(endStr)
+            endYear = date.group('year')
+
+            # appending the year to the start date if the start date
+            # does not have year information and the end date does.
+            # eg : "Aug 21 - Sep 4, 2007"
+            if endYear is not None:
+                startStr = (parseStr[:m.start()]).strip()
+                date = self.ptc.CRE_DATE3.search(startStr)
+                startYear = date.group('year')
+
+                if startYear is None:
+                    startStr = startStr + ', ' + endYear
+            else:
+                startStr = parseStr[:m.start()]
+
+            retFlag = 1
+
+        elif rangeFlag == 6:
+            m = re.search(self.ptc.rangeSep, parseStr)
+
+            startStr = parseStr[:m.start()]
+
+            # capturing the month from the start date
+            mth = self.ptc.CRE_DATE3.search(startStr)
+            mth = mth.group('mthname')
+
+            # appending the month name to the end date
+            endStr = mth + parseStr[(m.start() + 1):]
+
+            retFlag = 1
+
+        else:
+            # if range is not found
+            startDT = endDT = time.localtime()
+
+        if retFlag:
+            startDT, sctx = self.parse(startStr, sourceTime,
+                                       VERSION_CONTEXT_STYLE)
+            endDT, ectx = self.parse(endStr, sourceTime,
+                                     VERSION_CONTEXT_STYLE)
+
+            if not sctx.hasDateOrTime or not ectx.hasDateOrTime:
+                retFlag = 0
+
+        return startDT, endDT, retFlag
+
+    def _CalculateDOWDelta(self, wd, wkdy, offset, style, currentDayStyle):
+        """
+        Based on the C{style} and C{currentDayStyle} determine what
+        day-of-week value is to be returned.
+
+        @type  wd:              integer
+        @param wd:              day-of-week value for the current day
+        @type  wkdy:            integer
+        @param wkdy:            day-of-week value for the parsed day
+        @type  offset:          integer
+        @param offset:          offset direction for any modifiers (-1, 0, 1)
+        @type  style:           integer
+        @param style:           normally the value
+                                set in C{Constants.DOWParseStyle}
+        @type  currentDayStyle: integer
+        @param currentDayStyle: normally the value
+                                set in C{Constants.CurrentDOWParseStyle}
+
+        @rtype:  integer
+        @return: calculated day-of-week
+        """
+        diffBase = wkdy - wd
+        origOffset = offset
+
+        if offset == 2:
+            # no modifier is present.
+            # i.e. string to be parsed is just DOW
+            if wkdy * style > wd * style or \
+                    currentDayStyle and wkdy == wd:
+                # wkdy located in current week
+                offset = 0
+            elif style in (-1, 1):
+                # wkdy located in last (-1) or next (1) week
+                offset = style
+            else:
+                # invalid style, or should raise error?
+                offset = 0
+
+        # offset = -1 means last week
+        # offset = 0 means current week
+        # offset = 1 means next week
+        diff = diffBase + 7 * offset
+        if style == 1 and diff < -7:
+            diff += 7
+        elif style == -1 and diff > 7:
+            diff -= 7
+
+        debug and log.debug("wd %s, wkdy %s, offset %d, "
+                            "style %d, currentDayStyle %d",
+                            wd, wkdy, origOffset, style, currentDayStyle)
+
+        return diff
+
+    def _quantityToReal(self, quantity):
+        """
+        Convert a quantity, either spelled-out or numeric, to a float
+
+        @type    quantity: string
+        @param   quantity: quantity to parse to float
+        @rtype:  int
+        @return: the quantity as an float, defaulting to 0.0
+        """
+        if not quantity:
+            return 1.0
+
+        try:
+            return float(quantity.replace(',', '.'))
+        except ValueError:
+            pass
+
+        try:
+            return float(self.ptc.numbers[quantity])
+        except KeyError:
+            pass
+
+        return 0.0
+
+    def _evalModifier(self, modifier, chunk1, chunk2, sourceTime):
+        """
+        Evaluate the C{modifier} string and following text (passed in
+        as C{chunk1} and C{chunk2}) and if they match any known modifiers
+        calculate the delta and apply it to C{sourceTime}.
+
+        @type  modifier:   string
+        @param modifier:   modifier text to apply to sourceTime
+        @type  chunk1:     string
+        @param chunk1:     text chunk that preceded modifier (if any)
+        @type  chunk2:     string
+        @param chunk2:     text chunk that followed modifier (if any)
+        @type  sourceTime: struct_time
+        @param sourceTime: C{struct_time} value to use as the base
+
+        @rtype:  tuple
+        @return: tuple of: remaining text and the modified sourceTime
+        """
+        ctx = self.currentContext
+        offset = self.ptc.Modifiers[modifier]
+
+        if sourceTime is not None:
+            (yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
+        else:
+            (yr, mth, dy, hr, mn, sec, wd, yd, isdst) = time.localtime()
+
+        if self.ptc.StartTimeFromSourceTime:
+            startHour = hr
+            startMinute = mn
+            startSecond = sec
+        else:
+            startHour = 9
+            startMinute = 0
+            startSecond = 0
+
+        # capture the units after the modifier and the remaining
+        # string after the unit
+        m = self.ptc.CRE_REMAINING.search(chunk2)
+        if m is not None:
+            index = m.start() + 1
+            unit = chunk2[:m.start()]
+            chunk2 = chunk2[index:]
+        else:
+            unit = chunk2
+            chunk2 = ''
+
+        debug and log.debug("modifier [%s] chunk1 [%s] "
+                            "chunk2 [%s] unit [%s]",
+                            modifier, chunk1, chunk2, unit)
+
+        if unit in self.ptc.units['months']:
+            currentDaysInMonth = self.ptc.daysInMonth(mth, yr)
+            if offset == 0:
+                dy = currentDaysInMonth
+                sourceTime = (yr, mth, dy, startHour, startMinute,
+                              startSecond, wd, yd, isdst)
+            elif offset == 2:
+                # if day is the last day of the month, calculate the last day
+                # of the next month
+                if dy == currentDaysInMonth:
+                    dy = self.ptc.daysInMonth(mth + 1, yr)
+
+                start = datetime.datetime(yr, mth, dy, startHour,
+                                          startMinute, startSecond)
+                target = self.inc(start, month=1)
+                sourceTime = target.timetuple()
+            else:
+                start = datetime.datetime(yr, mth, 1, startHour,
+                                          startMinute, startSecond)
+                target = self.inc(start, month=offset)
+                sourceTime = target.timetuple()
+            ctx.updateAccuracy(ctx.ACU_MONTH)
+
+        elif unit in self.ptc.units['weeks']:
+            if offset == 0:
+                start = datetime.datetime(yr, mth, dy, 17, 0, 0)
+                target = start + datetime.timedelta(days=(4 - wd))
+                sourceTime = target.timetuple()
+            elif offset == 2:
+                start = datetime.datetime(yr, mth, dy, startHour,
+                                          startMinute, startSecond)
+                target = start + datetime.timedelta(days=7)
+                sourceTime = target.timetuple()
+            else:
+                start = datetime.datetime(yr, mth, dy, startHour,
+                                          startMinute, startSecond)
+                target = start + offset * datetime.timedelta(weeks=1)
+                sourceTime = target.timetuple()
+            ctx.updateAccuracy(ctx.ACU_WEEK)
+
+        elif unit in self.ptc.units['days']:
+            if offset == 0:
+                sourceTime = (yr, mth, dy, 17, 0, 0, wd, yd, isdst)
+                ctx.updateAccuracy(ctx.ACU_HALFDAY)
+            elif offset == 2:
+                start = datetime.datetime(yr, mth, dy, hr, mn, sec)
+                target = start + datetime.timedelta(days=1)
+                sourceTime = target.timetuple()
+            else:
+                start = datetime.datetime(yr, mth, dy, startHour,
+                                          startMinute, startSecond)
+                target = start + datetime.timedelta(days=offset)
+                sourceTime = target.timetuple()
+            ctx.updateAccuracy(ctx.ACU_DAY)
+
+        elif unit in self.ptc.units['hours']:
+            if offset == 0:
+                sourceTime = (yr, mth, dy, hr, 0, 0, wd, yd, isdst)
+            else:
+                start = datetime.datetime(yr, mth, dy, hr, 0, 0)
+                target = start + datetime.timedelta(hours=offset)
+                sourceTime = target.timetuple()
+            ctx.updateAccuracy(ctx.ACU_HOUR)
+
+        elif unit in self.ptc.units['years']:
+            if offset == 0:
+                sourceTime = (yr, 12, 31, hr, mn, sec, wd, yd, isdst)
+            elif offset == 2:
+                sourceTime = (yr + 1, mth, dy, hr, mn, sec, wd, yd, isdst)
+            else:
+                sourceTime = (yr + offset, 1, 1, startHour, startMinute,
+                              startSecond, wd, yd, isdst)
+            ctx.updateAccuracy(ctx.ACU_YEAR)
+
+        elif modifier == 'eom':
+            dy = self.ptc.daysInMonth(mth, yr)
+            sourceTime = (yr, mth, dy, startHour, startMinute,
+                          startSecond, wd, yd, isdst)
+            ctx.updateAccuracy(ctx.ACU_DAY)
+
+        elif modifier == 'eoy':
+            mth = 12
+            dy = self.ptc.daysInMonth(mth, yr)
+            sourceTime = (yr, mth, dy, startHour, startMinute,
+                          startSecond, wd, yd, isdst)
+            ctx.updateAccuracy(ctx.ACU_MONTH)
+
+        elif self.ptc.CRE_WEEKDAY.match(unit):
+            m = self.ptc.CRE_WEEKDAY.match(unit)
+            debug and log.debug('CRE_WEEKDAY matched')
+            wkdy = m.group()
+
+            if modifier == 'eod':
+                ctx.updateAccuracy(ctx.ACU_HOUR)
+                # Calculate the upcoming weekday
+                sourceTime, subctx = self.parse(wkdy, sourceTime,
+                                                VERSION_CONTEXT_STYLE)
+                sTime = self.ptc.getSource(modifier, sourceTime)
+                if sTime is not None:
+                    sourceTime = sTime
+                    ctx.updateAccuracy(ctx.ACU_HALFDAY)
+            else:
+                # unless one of these modifiers is being applied to the
+                # day-of-week, we want to start with target as the day
+                # in the current week.
+                dowOffset = offset
+                if modifier not in ['next', 'last', 'prior', 'previous']:
+                    dowOffset = 0
+
+                wkdy = self.ptc.WeekdayOffsets[wkdy]
+                diff = self._CalculateDOWDelta(
+                    wd, wkdy, dowOffset, self.ptc.DOWParseStyle,
+                    self.ptc.CurrentDOWParseStyle)
+                start = datetime.datetime(yr, mth, dy, startHour,
+                                          startMinute, startSecond)
+                target = start + datetime.timedelta(days=diff)
+
+                if chunk1 != '':
+                    # consider "one day before thursday": we need to parse chunk1 ("one day")
+                    # and apply according to the offset ("before"), rather than allowing the
+                    # remaining parse step to apply "one day" without the offset direction.
+                    t, subctx = self.parse(chunk1, sourceTime, VERSION_CONTEXT_STYLE)
+                    if subctx.hasDateOrTime:
+                        delta = time.mktime(t) - time.mktime(sourceTime)
+                        target = start + datetime.timedelta(days=diff) + datetime.timedelta(seconds=delta * offset)
+                        chunk1 = ''
+
+                sourceTime = target.timetuple()
+            ctx.updateAccuracy(ctx.ACU_DAY)
+
+        elif self.ptc.CRE_TIME.match(unit):
+            m = self.ptc.CRE_TIME.match(unit)
+            debug and log.debug('CRE_TIME matched')
+            (yr, mth, dy, hr, mn, sec, wd, yd, isdst), subctx = \
+                self.parse(unit, None, VERSION_CONTEXT_STYLE)
+
+            start = datetime.datetime(yr, mth, dy, hr, mn, sec)
+            target = start + datetime.timedelta(days=offset)
+            sourceTime = target.timetuple()
+
+        else:
+            # check if the remaining text is parsable and if so,
+            # use it as the base time for the modifier source time
+
+            debug and log.debug('check for modifications '
+                                'to source time [%s] [%s]',
+                                chunk1, unit)
+
+            unit = unit.strip()
+            if unit:
+                s = '%s %s' % (unit, chunk2)
+                t, subctx = self.parse(s, sourceTime, VERSION_CONTEXT_STYLE)
+
+                if subctx.hasDate:  # working with dates
+                    u = unit.lower()
+                    if u in self.ptc.Months or \
+                            u in self.ptc.shortMonths:
+                        yr, mth, dy, hr, mn, sec, wd, yd, isdst = t
+                        start = datetime.datetime(
+                            yr, mth, dy, hr, mn, sec)
+                        t = self.inc(start, year=offset).timetuple()
+                    elif u in self.ptc.Weekdays:
+                        t = t + datetime.timedelta(weeks=offset)
+
+                if subctx.hasDateOrTime:
+                    sourceTime = t
+                    chunk2 = ''
+
+            chunk1 = chunk1.strip()
+
+            # if the word after next is a number, the string is more than
+            # likely to be "next 4 hrs" which we will have to combine the
+            # units with the rest of the string
+            if chunk1:
+                try:
+                    m = list(self.ptc.CRE_NUMBER.finditer(chunk1))[-1]
+                except IndexError:
+                    pass
+                else:
+                    qty = None
+                    debug and log.debug('CRE_NUMBER matched')
+                    qty = self._quantityToReal(m.group()) * offset
+                    chunk1 = '%s%s%s' % (chunk1[:m.start()],
+                                         qty, chunk1[m.end():])
+                t, subctx = self.parse(chunk1, sourceTime,
+                                       VERSION_CONTEXT_STYLE)
+
+                chunk1 = ''
+
+                if subctx.hasDateOrTime:
+                    sourceTime = t
+
+            debug and log.debug('looking for modifier %s', modifier)
+            sTime = self.ptc.getSource(modifier, sourceTime)
+            if sTime is not None:
+                debug and log.debug('modifier found in sources')
+                sourceTime = sTime
+                ctx.updateAccuracy(ctx.ACU_HALFDAY)
+
+        debug and log.debug('returning chunk = "%s %s" and sourceTime = %s',
+                            chunk1, chunk2, sourceTime)
+
+        return '%s %s' % (chunk1, chunk2), sourceTime
+
+    def _evalDT(self, datetimeString, sourceTime):
+        """
+        Calculate the datetime from known format like RFC822 or W3CDTF
+
+        Examples handled::
+            RFC822, W3CDTF formatted dates
+            HH:MM[:SS][ am/pm]
+            MM/DD/YYYY
+            DD MMMM YYYY
+
+        @type  datetimeString: string
+        @param datetimeString: text to try and parse as more "traditional"
+                               date/time text
+        @type  sourceTime:     struct_time
+        @param sourceTime:     C{struct_time} value to use as the base
+
+        @rtype:  datetime
+        @return: calculated C{struct_time} value or current C{struct_time}
+                 if not parsed
+        """
+        ctx = self.currentContext
+        s = datetimeString.strip()
+
+        # Given string date is a RFC822 date
+        if sourceTime is None:
+            sourceTime = _parse_date_rfc822(s)
+            debug and log.debug(
+                'attempt to parse as rfc822 - %s', str(sourceTime))
+
+            if sourceTime is not None:
+                (yr, mth, dy, hr, mn, sec, wd, yd, isdst, _) = sourceTime
+                ctx.updateAccuracy(ctx.ACU_YEAR, ctx.ACU_MONTH, ctx.ACU_DAY)
+
+                if hr != 0 and mn != 0 and sec != 0:
+                    ctx.updateAccuracy(ctx.ACU_HOUR, ctx.ACU_MIN, ctx.ACU_SEC)
+
+                sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
+
+        # Given string date is a W3CDTF date
+        if sourceTime is None:
+            sourceTime = _parse_date_w3dtf(s)
+
+            if sourceTime is not None:
+                ctx.updateAccuracy(ctx.ACU_YEAR, ctx.ACU_MONTH, ctx.ACU_DAY,
+                                   ctx.ACU_HOUR, ctx.ACU_MIN, ctx.ACU_SEC)
+
+        if sourceTime is None:
+            sourceTime = time.localtime()
+
+        return sourceTime
+
+    def _evalUnits(self, datetimeString, sourceTime):
+        """
+        Evaluate text passed by L{_partialParseUnits()}
+        """
+        s = datetimeString.strip()
+        sourceTime = self._evalDT(datetimeString, sourceTime)
+
+        # Given string is a time string with units like "5 hrs 30 min"
+        modifier = ''  # TODO
+
+        m = self.ptc.CRE_UNITS.search(s)
+        if m is not None:
+            units = m.group('units')
+            quantity = s[:m.start('units')]
+
+        sourceTime = self._buildTime(sourceTime, quantity, modifier, units)
+        return sourceTime
+
+    def _evalQUnits(self, datetimeString, sourceTime):
+        """
+        Evaluate text passed by L{_partialParseQUnits()}
+        """
+        s = datetimeString.strip()
+        sourceTime = self._evalDT(datetimeString, sourceTime)
+
+        # Given string is a time string with single char units like "5 h 30 m"
+        modifier = ''  # TODO
+
+        m = self.ptc.CRE_QUNITS.search(s)
+        if m is not None:
+            units = m.group('qunits')
+            quantity = s[:m.start('qunits')]
+
+        sourceTime = self._buildTime(sourceTime, quantity, modifier, units)
+        return sourceTime
+
+    def _evalDateStr(self, datetimeString, sourceTime):
+        """
+        Evaluate text passed by L{_partialParseDateStr()}
+        """
+        s = datetimeString.strip()
+        sourceTime = self._evalDT(datetimeString, sourceTime)
+
+        # Given string is in the format  "May 23rd, 2005"
+        debug and log.debug('checking for MMM DD YYYY')
+        return self.parseDateText(s, sourceTime)
+
+    def _evalDateStd(self, datetimeString, sourceTime):
+        """
+        Evaluate text passed by L{_partialParseDateStd()}
+        """
+        s = datetimeString.strip()
+        sourceTime = self._evalDT(datetimeString, sourceTime)
+
+        # Given string is in the format 07/21/2006
+        return self.parseDate(s, sourceTime)
+
+    def _evalDayStr(self, datetimeString, sourceTime):
+        """
+        Evaluate text passed by L{_partialParseDaystr()}
+        """
+        s = datetimeString.strip()
+        sourceTime = self._evalDT(datetimeString, sourceTime)
+
+        # Given string is a natural language date string like today, tomorrow..
+        (yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
+
+        try:
+            offset = self.ptc.dayOffsets[s]
+        except KeyError:
+            offset = 0
+
+        if self.ptc.StartTimeFromSourceTime:
+            startHour = hr
+            startMinute = mn
+            startSecond = sec
+        else:
+            startHour = 9
+            startMinute = 0
+            startSecond = 0
+
+        self.currentContext.updateAccuracy(pdtContext.ACU_DAY)
+        start = datetime.datetime(yr, mth, dy, startHour,
+                                  startMinute, startSecond)
+        target = start + datetime.timedelta(days=offset)
+        return target.timetuple()
+
+    def _evalWeekday(self, datetimeString, sourceTime):
+        """
+        Evaluate text passed by L{_partialParseWeekday()}
+        """
+        s = datetimeString.strip()
+        sourceTime = self._evalDT(datetimeString, sourceTime)
+
+        # Given string is a weekday
+        yr, mth, dy, hr, mn, sec, wd, yd, isdst = sourceTime
+
+        start = datetime.datetime(yr, mth, dy, hr, mn, sec)
+        wkdy = self.ptc.WeekdayOffsets[s]
+
+        if wkdy > wd:
+            qty = self._CalculateDOWDelta(wd, wkdy, 2,
+                                          self.ptc.DOWParseStyle,
+                                          self.ptc.CurrentDOWParseStyle)
+        else:
+            qty = self._CalculateDOWDelta(wd, wkdy, 2,
+                                          self.ptc.DOWParseStyle,
+                                          self.ptc.CurrentDOWParseStyle)
+
+        self.currentContext.updateAccuracy(pdtContext.ACU_DAY)
+        target = start + datetime.timedelta(days=qty)
+        return target.timetuple()
+
+    def _evalTimeStr(self, datetimeString, sourceTime):
+        """
+        Evaluate text passed by L{_partialParseTimeStr()}
+        """
+        s = datetimeString.strip()
+        sourceTime = self._evalDT(datetimeString, sourceTime)
+
+        if s in self.ptc.re_values['now']:
+            self.currentContext.updateAccuracy(pdtContext.ACU_NOW)
+        else:
+            # Given string is a natural language time string like
+            # lunch, midnight, etc
+            sTime = self.ptc.getSource(s, sourceTime)
+            if sTime:
+                sourceTime = sTime
+            self.currentContext.updateAccuracy(pdtContext.ACU_HALFDAY)
+
+        return sourceTime
+
+    def _evalMeridian(self, datetimeString, sourceTime):
+        """
+        Evaluate text passed by L{_partialParseMeridian()}
+        """
+        s = datetimeString.strip()
+        sourceTime = self._evalDT(datetimeString, sourceTime)
+
+        # Given string is in the format HH:MM(:SS)(am/pm)
+        yr, mth, dy, hr, mn, sec, wd, yd, isdst = sourceTime
+
+        m = self.ptc.CRE_TIMEHMS2.search(s)
+        if m is not None:
+            dt = s[:m.start('meridian')].strip()
+            if len(dt) <= 2:
+                hr = int(dt)
+                mn = 0
+                sec = 0
+            else:
+                hr, mn, sec = _extract_time(m)
+
+            if hr == 24:
+                hr = 0
+
+            meridian = m.group('meridian').lower()
+
+            # if 'am' found and hour is 12 - force hour to 0 (midnight)
+            if (meridian in self.ptc.am) and hr == 12:
+                hr = 0
+
+            # if 'pm' found and hour < 12, add 12 to shift to evening
+            if (meridian in self.ptc.pm) and hr < 12:
+                hr += 12
+
+        # time validation
+        if hr < 24 and mn < 60 and sec < 60:
+            sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
+            _pop_time_accuracy(m, self.currentContext)
+
+        return sourceTime
+
+    def _evalTimeStd(self, datetimeString, sourceTime):
+        """
+        Evaluate text passed by L{_partialParseTimeStd()}
+        """
+        s = datetimeString.strip()
+        sourceTime = self._evalDT(datetimeString, sourceTime)
+
+        # Given string is in the format HH:MM(:SS)
+        yr, mth, dy, hr, mn, sec, wd, yd, isdst = sourceTime
+
+        m = self.ptc.CRE_TIMEHMS.search(s)
+        if m is not None:
+            hr, mn, sec = _extract_time(m)
+        if hr == 24:
+            hr = 0
+
+        # time validation
+        if hr < 24 and mn < 60 and sec < 60:
+            sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
+            _pop_time_accuracy(m, self.currentContext)
+
+        return sourceTime
+
+    def _UnitsTrapped(self, s, m, key):
+        # check if a day suffix got trapped by a unit match
+        # for example Dec 31st would match for 31s (aka 31 seconds)
+        # Dec 31st
+        #     ^ ^
+        #     | +-- m.start('units')
+        #     |     and also m2.start('suffix')
+        #     +---- m.start('qty')
+        #           and also m2.start('day')
+        m2 = self.ptc.CRE_DAY2.search(s)
+        if m2 is not None:
+            t = '%s%s' % (m2.group('day'), m.group(key))
+            if m.start(key) == m2.start('suffix') and \
+                    m.start('qty') == m2.start('day') and \
+                    m.group('qty') == t:
+                return True
+            else:
+                return False
+        else:
+            return False
+
+    def _partialParseModifier(self, s, sourceTime):
+        """
+        test if giving C{s} matched CRE_MODIFIER, used by L{parse()}
+
+        @type  s:          string
+        @param s:          date/time text to evaluate
+        @type  sourceTime: struct_time
+        @param sourceTime: C{struct_time} value to use as the base
+
+        @rtype:  tuple
+        @return: tuple of remained date/time text, datetime object and
+                 an boolean value to describ if matched or not
+
+        """
+        parseStr = None
+        chunk1 = chunk2 = ''
+
+        # Modifier like next/prev/from/after/prior..
+        m = self.ptc.CRE_MODIFIER.search(s)
+        if m is not None:
+            if m.group() != s:
+                # capture remaining string
+                parseStr = m.group()
+                chunk1 = s[:m.start()].strip()
+                chunk2 = s[m.end():].strip()
+            else:
+                parseStr = s
+
+        if parseStr:
+            debug and log.debug('found (modifier) [%s][%s][%s]',
+                                parseStr, chunk1, chunk2)
+            s, sourceTime = self._evalModifier(parseStr, chunk1,
+                                               chunk2, sourceTime)
+
+        return s, sourceTime, bool(parseStr)
+
+    def _partialParseUnits(self, s, sourceTime):
+        """
+        test if giving C{s} matched CRE_UNITS, used by L{parse()}
+
+        @type  s:          string
+        @param s:          date/time text to evaluate
+        @type  sourceTime: struct_time
+        @param sourceTime: C{struct_time} value to use as the base
+
+        @rtype:  tuple
+        @return: tuple of remained date/time text, datetime object and
+                 an boolean value to describ if matched or not
+
+        """
+        parseStr = None
+        chunk1 = chunk2 = ''
+
+        # Quantity + Units
+        m = self.ptc.CRE_UNITS.search(s)
+        if m is not None:
+            debug and log.debug('CRE_UNITS matched')
+            if self._UnitsTrapped(s, m, 'units'):
+                debug and log.debug('day suffix trapped by unit match')
+            else:
+                if (m.group('qty') != s):
+                    # capture remaining string
+                    parseStr = m.group('qty')
+                    chunk1 = s[:m.start('qty')].strip()
+                    chunk2 = s[m.end('qty'):].strip()
+
+                    if chunk1[-1:] == '-':
+                        parseStr = '-%s' % parseStr
+                        chunk1 = chunk1[:-1]
+
+                    s = '%s %s' % (chunk1, chunk2)
+                else:
+                    parseStr = s
+                    s = ''
+
+        if parseStr:
+            debug and log.debug('found (units) [%s][%s][%s]',
+                                parseStr, chunk1, chunk2)
+            sourceTime = self._evalUnits(parseStr, sourceTime)
+
+        return s, sourceTime, bool(parseStr)
+
+    def _partialParseQUnits(self, s, sourceTime):
+        """
+        test if giving C{s} matched CRE_QUNITS, used by L{parse()}
+
+        @type  s:          string
+        @param s:          date/time text to evaluate
+        @type  sourceTime: struct_time
+        @param sourceTime: C{struct_time} value to use as the base
+
+        @rtype:  tuple
+        @return: tuple of remained date/time text, datetime object and
+                 an boolean value to describ if matched or not
+
+        """
+        parseStr = None
+        chunk1 = chunk2 = ''
+
+        # Quantity + Units
+        m = self.ptc.CRE_QUNITS.search(s)
+        if m is not None:
+            debug and log.debug('CRE_QUNITS matched')
+            if self._UnitsTrapped(s, m, 'qunits'):
+                debug and log.debug(
+                    'day suffix trapped by qunit match')
+            else:
+                if (m.group('qty') != s):
+                    # capture remaining string
+                    parseStr = m.group('qty')
+                    chunk1 = s[:m.start('qty')].strip()
+                    chunk2 = s[m.end('qty'):].strip()
+
+                    if chunk1[-1:] == '-':
+                        parseStr = '-%s' % parseStr
+                        chunk1 = chunk1[:-1]
+
+                    s = '%s %s' % (chunk1, chunk2)
+                else:
+                    parseStr = s
+                    s = ''
+
+        if parseStr:
+            debug and log.debug('found (qunits) [%s][%s][%s]',
+                                parseStr, chunk1, chunk2)
+            sourceTime = self._evalQUnits(parseStr, sourceTime)
+
+        return s, sourceTime, bool(parseStr)
+
+    def _partialParseDateStr(self, s, sourceTime):
+        """
+        test if giving C{s} matched CRE_DATE3, used by L{parse()}
+
+        @type  s:          string
+        @param s:          date/time text to evaluate
+        @type  sourceTime: struct_time
+        @param sourceTime: C{struct_time} value to use as the base
+
+        @rtype:  tuple
+        @return: tuple of remained date/time text, datetime object and
+                 an boolean value to describ if matched or not
+
+        """
+        parseStr = None
+        chunk1 = chunk2 = ''
+
+        m = self.ptc.CRE_DATE3.search(s)
+        # NO LONGER NEEDED, THE REGEXP HANDLED MTHNAME NOW
+        # for match in self.ptc.CRE_DATE3.finditer(s):
+        # to prevent "HH:MM(:SS) time strings" expressions from
+        # triggering this regex, we checks if the month field
+        # exists in the searched expression, if it doesn't exist,
+        # the date field is not valid
+        #     if match.group('mthname'):
+        #         m = self.ptc.CRE_DATE3.search(s, match.start())
+        #         valid_date = True
+        #         break
+
+        # String date format
+        if m is not None:
+
+            if (m.group('date') != s):
+                # capture remaining string
+                mStart = m.start('date')
+                mEnd = m.end('date')
+
+                # we need to check that anything following the parsed
+                # date is a time expression because it is often picked
+                # up as a valid year if the hour is 2 digits
+                fTime = False
+                mm = self.ptc.CRE_TIMEHMS2.search(s)
+                # "February 24th 1PM" doesn't get caught
+                # "February 24th 12PM" does
+                mYear = m.group('year')
+                if mm is not None and mYear is not None:
+                    fTime = True
+                else:
+                    # "February 24th 12:00"
+                    mm = self.ptc.CRE_TIMEHMS.search(s)
+                    if mm is not None and mYear is None:
+                        fTime = True
+                if fTime:
+                    hoursStart = mm.start('hours')
+
+                    if hoursStart < m.end('year'):
+                        mEnd = hoursStart
+
+                parseStr = s[mStart:mEnd]
+                chunk1 = s[:mStart]
+                chunk2 = s[mEnd:]
+
+                s = '%s %s' % (chunk1, chunk2)
+            else:
+                parseStr = s
+                s = ''
+
+        if parseStr:
+            debug and log.debug(
+                'found (date3) [%s][%s][%s]', parseStr, chunk1, chunk2)
+            sourceTime = self._evalDateStr(parseStr, sourceTime)
+
+        return s, sourceTime, bool(parseStr)
+
+    def _partialParseDateStd(self, s, sourceTime):
+        """
+        test if giving C{s} matched CRE_DATE, used by L{parse()}
+
+        @type  s:          string
+        @param s:          date/time text to evaluate
+        @type  sourceTime: struct_time
+        @param sourceTime: C{struct_time} value to use as the base
+
+        @rtype:  tuple
+        @return: tuple of remained date/time text, datetime object and
+                 an boolean value to describ if matched or not
+
+        """
+        parseStr = None
+        chunk1 = chunk2 = ''
+
+        # Standard date format
+        m = self.ptc.CRE_DATE.search(s)
+        if m is not None:
+
+            if (m.group('date') != s):
+                # capture remaining string
+                parseStr = m.group('date')
+                chunk1 = s[:m.start('date')]
+                chunk2 = s[m.end('date'):]
+                s = '%s %s' % (chunk1, chunk2)
+            else:
+                parseStr = s
+                s = ''
+
+        if parseStr:
+            debug and log.debug(
+                'found (date) [%s][%s][%s]', parseStr, chunk1, chunk2)
+            sourceTime = self._evalDateStd(parseStr, sourceTime)
+
+        return s, sourceTime, bool(parseStr)
+
+    def _partialParseDayStr(self, s, sourceTime):
+        """
+        test if giving C{s} matched CRE_DAY, used by L{parse()}
+
+        @type  s:          string
+        @param s:          date/time text to evaluate
+        @type  sourceTime: struct_time
+        @param sourceTime: C{struct_time} value to use as the base
+
+        @rtype:  tuple
+        @return: tuple of remained date/time text, datetime object and
+                 an boolean value to describ if matched or not
+
+        """
+        parseStr = None
+        chunk1 = chunk2 = ''
+
+        # Natural language day strings
+        m = self.ptc.CRE_DAY.search(s)
+        if m is not None:
+
+            if (m.group() != s):
+                # capture remaining string
+                parseStr = m.group()
+                chunk1 = s[:m.start()]
+                chunk2 = s[m.end():]
+                s = '%s %s' % (chunk1, chunk2)
+            else:
+                parseStr = s
+                s = ''
+
+        if parseStr:
+            debug and log.debug(
+                'found (day) [%s][%s][%s]', parseStr, chunk1, chunk2)
+            sourceTime = self._evalDayStr(parseStr, sourceTime)
+
+        return s, sourceTime, bool(parseStr)
+
+    def _partialParseWeekday(self, s, sourceTime):
+        """
+        test if giving C{s} matched CRE_WEEKDAY, used by L{parse()}
+
+        @type  s:          string
+        @param s:          date/time text to evaluate
+        @type  sourceTime: struct_time
+        @param sourceTime: C{struct_time} value to use as the base
+
+        @rtype:  tuple
+        @return: tuple of remained date/time text, datetime object and
+                 an boolean value to describ if matched or not
+
+        """
+        parseStr = None
+        chunk1 = chunk2 = ''
+
+        # Weekday
+        m = self.ptc.CRE_WEEKDAY.search(s)
+        if m is not None:
+            gv = m.group()
+            if s not in self.ptc.dayOffsets:
+
+                if (gv != s):
+                    # capture remaining string
+                    parseStr = gv
+                    chunk1 = s[:m.start()]
+                    chunk2 = s[m.end():]
+                    s = '%s %s' % (chunk1, chunk2)
+                else:
+                    parseStr = s
+                    s = ''
+
+        if parseStr:
+            debug and log.debug(
+                'found (weekday) [%s][%s][%s]', parseStr, chunk1, chunk2)
+            sourceTime = self._evalWeekday(parseStr, sourceTime)
+
+        return s, sourceTime, bool(parseStr)
+
+    def _partialParseTimeStr(self, s, sourceTime):
+        """
+        test if giving C{s} matched CRE_TIME, used by L{parse()}
+
+        @type  s:          string
+        @param s:          date/time text to evaluate
+        @type  sourceTime: struct_time
+        @param sourceTime: C{struct_time} value to use as the base
+
+        @rtype:  tuple
+        @return: tuple of remained date/time text, datetime object and
+                 an boolean value to describ if matched or not
+
+        """
+        parseStr = None
+        chunk1 = chunk2 = ''
+
+        # Natural language time strings
+        m = self.ptc.CRE_TIME.search(s)
+        if m is not None or s in self.ptc.re_values['now']:
+
+            if (m and m.group() != s):
+                # capture remaining string
+                parseStr = m.group()
+                chunk1 = s[:m.start()]
+                chunk2 = s[m.end():]
+                s = '%s %s' % (chunk1, chunk2)
+            else:
+                parseStr = s
+                s = ''
+
+        if parseStr:
+            debug and log.debug(
+                'found (time) [%s][%s][%s]', parseStr, chunk1, chunk2)
+            sourceTime = self._evalTimeStr(parseStr, sourceTime)
+
+        return s, sourceTime, bool(parseStr)
+
+    def _partialParseMeridian(self, s, sourceTime):
+        """
+        test if giving C{s} matched CRE_TIMEHMS2, used by L{parse()}
+
+        @type  s:          string
+        @param s:          date/time text to evaluate
+        @type  sourceTime: struct_time
+        @param sourceTime: C{struct_time} value to use as the base
+
+        @rtype:  tuple
+        @return: tuple of remained date/time text, datetime object and
+                 an boolean value to describ if matched or not
+
+        """
+        parseStr = None
+        chunk1 = chunk2 = ''
+
+        # HH:MM(:SS) am/pm time strings
+        m = self.ptc.CRE_TIMEHMS2.search(s)
+        if m is not None:
+
+            if m.group('minutes') is not None:
+                if m.group('seconds') is not None:
+                    parseStr = '%s:%s:%s' % (m.group('hours'),
+                                             m.group('minutes'),
+                                             m.group('seconds'))
+                else:
+                    parseStr = '%s:%s' % (m.group('hours'),
+                                          m.group('minutes'))
+            else:
+                parseStr = m.group('hours')
+            parseStr += ' ' + m.group('meridian')
+
+            chunk1 = s[:m.start()]
+            chunk2 = s[m.end():]
+
+            s = '%s %s' % (chunk1, chunk2)
+
+        if parseStr:
+            debug and log.debug('found (meridian) [%s][%s][%s]',
+                                parseStr, chunk1, chunk2)
+            sourceTime = self._evalMeridian(parseStr, sourceTime)
+
+        return s, sourceTime, bool(parseStr)
+
+    def _partialParseTimeStd(self, s, sourceTime):
+        """
+        test if giving C{s} matched CRE_TIMEHMS, used by L{parse()}
+
+        @type  s:          string
+        @param s:          date/time text to evaluate
+        @type  sourceTime: struct_time
+        @param sourceTime: C{struct_time} value to use as the base
+
+        @rtype:  tuple
+        @return: tuple of remained date/time text, datetime object and
+                 an boolean value to describ if matched or not
+
+        """
+        parseStr = None
+        chunk1 = chunk2 = ''
+
+        # HH:MM(:SS) time strings
+        m = self.ptc.CRE_TIMEHMS.search(s)
+        if m is not None:
+
+            if m.group('seconds') is not None:
+                parseStr = '%s:%s:%s' % (m.group('hours'),
+                                         m.group('minutes'),
+                                         m.group('seconds'))
+                chunk1 = s[:m.start('hours')]
+                chunk2 = s[m.end('seconds'):]
+            else:
+                parseStr = '%s:%s' % (m.group('hours'),
+                                      m.group('minutes'))
+                chunk1 = s[:m.start('hours')]
+                chunk2 = s[m.end('minutes'):]
+
+            s = '%s %s' % (chunk1, chunk2)
+
+        if parseStr:
+            debug and log.debug(
+                'found (hms) [%s][%s][%s]', parseStr, chunk1, chunk2)
+            sourceTime = self._evalTimeStd(parseStr, sourceTime)
+
+        return s, sourceTime, bool(parseStr)
+
+    def parseDT(self, datetimeString, sourceTime=None,
+                tzinfo=None, version=None):
+        """
+        C{datetimeString} is as C{.parse}, C{sourceTime} has the same semantic
+        meaning as C{.parse}, but now also accepts datetime objects.  C{tzinfo}
+        accepts a tzinfo object.  It is advisable to use pytz.
+
+
+        @type  datetimeString: string
+        @param datetimeString: date/time text to evaluate
+        @type  sourceTime:     struct_time, datetime, date, time
+        @param sourceTime:     time value to use as the base
+        @type  tzinfo:         tzinfo
+        @param tzinfo:         Timezone to apply to generated datetime objs.
+        @type  version:        integer
+        @param version:        style version, default will use L{Calendar}
+                               parameter version value
+
+        @rtype:  tuple
+        @return: tuple of: modified C{sourceTime} and the result flag/context
+
+        see .parse for return code details.
+        """
+        # if sourceTime has a timetuple method, use thet, else, just pass the
+        # entire thing to parse and prey the user knows what the hell they are
+        # doing.
+        sourceTime = getattr(sourceTime, 'timetuple', (lambda: sourceTime))()
+        # You REALLY SHOULD be using pytz.  Using localize if available,
+        # hacking if not.  Note, None is a valid tzinfo object in the case of
+        # the ugly hack.
+        localize = getattr(
+            tzinfo,
+            'localize',
+            (lambda dt: dt.replace(tzinfo=tzinfo)),  # ugly hack is ugly :(
+        )
+
+        # Punt
+        time_struct, ret_code = self.parse(
+            datetimeString,
+            sourceTime=sourceTime,
+            version=version)
+
+        # Comments from GHI indicate that it is desired to have the same return
+        # signature on this method as that one it punts to, with the exception
+        # of using datetime objects instead of time_structs.
+        dt = localize(datetime.datetime(*time_struct[:6]))
+        return dt, ret_code
+
+    def parse(self, datetimeString, sourceTime=None, version=None):
+        """
+        Splits the given C{datetimeString} into tokens, finds the regex
+        patterns that match and then calculates a C{struct_time} value from
+        the chunks.
+
+        If C{sourceTime} is given then the C{struct_time} value will be
+        calculated from that value, otherwise from the current date/time.
+
+        If the C{datetimeString} is parsed and date/time value found, then::
+
+            If C{version} equals to L{VERSION_FLAG_STYLE}, the second item of
+            the returned tuple will be a flag to let you know what kind of
+            C{struct_time} value is being returned::
+
+                0 = not parsed at all
+                1 = parsed as a C{date}
+                2 = parsed as a C{time}
+                3 = parsed as a C{datetime}
+
+            If C{version} equals to L{VERSION_CONTEXT_STYLE}, the second value
+            will be an instance of L{pdtContext}
+
+        @type  datetimeString: string
+        @param datetimeString: date/time text to evaluate
+        @type  sourceTime:     struct_time
+        @param sourceTime:     C{struct_time} value to use as the base
+        @type  version:        integer
+        @param version:        style version, default will use L{Calendar}
+                               parameter version value
+
+        @rtype:  tuple
+        @return: tuple of: modified C{sourceTime} and the result flag/context
+        """
+        debug and log.debug('parse()')
+
+        datetimeString = re.sub(r'(\w)\.(\s)', r'\1\2', datetimeString)
+        datetimeString = re.sub(r'(\w)[\'"](\s|$)', r'\1 \2', datetimeString)
+        datetimeString = re.sub(r'(\s|^)[\'"](\w)', r'\1 \2', datetimeString)
+
+        if sourceTime:
+            if isinstance(sourceTime, datetime.datetime):
+                debug and log.debug('coercing datetime to timetuple')
+                sourceTime = sourceTime.timetuple()
+            else:
+                if not isinstance(sourceTime, time.struct_time) and \
+                        not isinstance(sourceTime, tuple):
+                    raise ValueError('sourceTime is not a struct_time')
+        else:
+            sourceTime = time.localtime()
+
+        with self.context() as ctx:
+            s = datetimeString.lower().strip()
+            debug and log.debug('remainedString (before parsing): [%s]', s)
+
+            while s:
+                for parseMeth in (self._partialParseModifier,
+                                  self._partialParseUnits,
+                                  self._partialParseQUnits,
+                                  self._partialParseDateStr,
+                                  self._partialParseDateStd,
+                                  self._partialParseDayStr,
+                                  self._partialParseWeekday,
+                                  self._partialParseTimeStr,
+                                  self._partialParseMeridian,
+                                  self._partialParseTimeStd):
+                    retS, retTime, matched = parseMeth(s, sourceTime)
+                    if matched:
+                        s, sourceTime = retS.strip(), retTime
+                        break
+                else:
+                    # nothing matched
+                    s = ''
+
+                debug and log.debug('hasDate: [%s], hasTime: [%s]',
+                                    ctx.hasDate, ctx.hasTime)
+                debug and log.debug('remainedString: [%s]', s)
+
+            # String is not parsed at all
+            if sourceTime is None:
+                debug and log.debug('not parsed [%s]', str(sourceTime))
+                sourceTime = time.localtime()
+
+        if not isinstance(sourceTime, time.struct_time):
+            sourceTime = time.struct_time(sourceTime)
+
+        version = self.version if version is None else version
+        if version == VERSION_CONTEXT_STYLE:
+            return sourceTime, ctx
+        else:
+            return sourceTime, ctx.dateTimeFlag
+
+    def inc(self, source, month=None, year=None):
+        """
+        Takes the given C{source} date, or current date if none is
+        passed, and increments it according to the values passed in
+        by month and/or year.
+
+        This routine is needed because Python's C{timedelta()} function
+        does not allow for month or year increments.
+
+        @type  source: struct_time
+        @param source: C{struct_time} value to increment
+        @type  month:  float or integer
+        @param month:  optional number of months to increment
+        @type  year:   float or integer
+        @param year:   optional number of years to increment
+
+        @rtype:  datetime
+        @return: C{source} incremented by the number of months and/or years
+        """
+        yr = source.year
+        mth = source.month
+        dy = source.day
+
+        try:
+            month = float(month)
+        except (TypeError, ValueError):
+            month = 0
+
+        try:
+            year = float(year)
+        except (TypeError, ValueError):
+            year = 0
+        finally:
+            month += year * 12
+            year = 0
+
+        subMi = 0.0
+        maxDay = 0
+        if month:
+            mi = int(month)
+            subMi = month - mi
+
+            y = int(mi / 12.0)
+            m = mi - y * 12
+
+            mth = mth + m
+            if mth < 1:  # cross start-of-year?
+                y -= 1  # yes - decrement year
+                mth += 12  # and fix month
+            elif mth > 12:  # cross end-of-year?
+                y += 1  # yes - increment year
+                mth -= 12  # and fix month
+
+            yr += y
+
+            # if the day ends up past the last day of
+            # the new month, set it to the last day
+            maxDay = self.ptc.daysInMonth(mth, yr)
+            if dy > maxDay:
+                dy = maxDay
+
+        if yr > datetime.MAXYEAR or yr < datetime.MINYEAR:
+            raise OverflowError('year is out of range')
+
+        d = source.replace(year=yr, month=mth, day=dy)
+        if subMi:
+            d += datetime.timedelta(days=subMi * maxDay)
+        return source + (d - source)
+
+    def nlp(self, inputString, sourceTime=None, version=None):
+        """Utilizes parse() after making judgements about what datetime
+        information belongs together.
+
+        It makes logical groupings based on proximity and returns a parsed
+        datetime for each matched grouping of datetime text, along with
+        location info within the given inputString.
+
+        @type  inputString: string
+        @param inputString: natural language text to evaluate
+        @type  sourceTime:  struct_time
+        @param sourceTime:  C{struct_time} value to use as the base
+        @type  version:     integer
+        @param version:     style version, default will use L{Calendar}
+                            parameter version value
+
+        @rtype:  tuple or None
+        @return: tuple of tuples in the format (parsed_datetime as
+                 datetime.datetime, flags as int, start_pos as int,
+                 end_pos as int, matched_text as string) or None if there
+                 were no matches
+        """
+
+        orig_inputstring = inputString
+
+        # replace periods at the end of sentences w/ spaces
+        # opposed to removing them altogether in order to
+        # retain relative positions (identified by alpha, period, space).
+        # this is required for some of the regex patterns to match
+        inputString = re.sub(r'(\w)(\.)(\s)', r'\1 \3', inputString).lower()
+        inputString = re.sub(r'(\w)(\'|")(\s|$)', r'\1 \3', inputString)
+        inputString = re.sub(r'(\s|^)(\'|")(\w)', r'\1 \3', inputString)
+
+        startpos = 0  # the start position in the inputString during the loop
+
+        # list of lists in format:
+        # [startpos, endpos, matchedstring, flags, type]
+        matches = []
+
+        while startpos < len(inputString):
+
+            # empty match
+            leftmost_match = [0, 0, None, 0, None]
+
+            # Modifier like next\prev..
+            m = self.ptc.CRE_MODIFIER.search(inputString[startpos:])
+            if m is not None:
+                if leftmost_match[1] == 0 or \
+                        leftmost_match[0] > m.start() + startpos:
+                    leftmost_match[0] = m.start() + startpos
+                    leftmost_match[1] = m.end() + startpos
+                    leftmost_match[2] = m.group()
+                    leftmost_match[3] = 0
+                    leftmost_match[4] = 'modifier'
+
+            # Quantity + Units
+            m = self.ptc.CRE_UNITS.search(inputString[startpos:])
+            if m is not None:
+                debug and log.debug('CRE_UNITS matched')
+                if self._UnitsTrapped(inputString[startpos:], m, 'units'):
+                    debug and log.debug('day suffix trapped by unit match')
+                else:
+
+                    if leftmost_match[1] == 0 or \
+                            leftmost_match[0] > m.start('qty') + startpos:
+                        leftmost_match[0] = m.start('qty') + startpos
+                        leftmost_match[1] = m.end('qty') + startpos
+                        leftmost_match[2] = m.group('qty')
+                        leftmost_match[3] = 3
+                        leftmost_match[4] = 'units'
+
+                        if m.start('qty') > 0 and \
+                                inputString[m.start('qty') - 1] == '-':
+                            leftmost_match[0] = leftmost_match[0] - 1
+                            leftmost_match[2] = '-' + leftmost_match[2]
+
+            # Quantity + Units
+            m = self.ptc.CRE_QUNITS.search(inputString[startpos:])
+            if m is not None:
+                debug and log.debug('CRE_QUNITS matched')
+                if self._UnitsTrapped(inputString[startpos:], m, 'qunits'):
+                    debug and log.debug('day suffix trapped by qunit match')
+                else:
+                    if leftmost_match[1] == 0 or \
+                            leftmost_match[0] > m.start('qty') + startpos:
+                        leftmost_match[0] = m.start('qty') + startpos
+                        leftmost_match[1] = m.end('qty') + startpos
+                        leftmost_match[2] = m.group('qty')
+                        leftmost_match[3] = 3
+                        leftmost_match[4] = 'qunits'
+
+                        if m.start('qty') > 0 and \
+                                inputString[m.start('qty') - 1] == '-':
+                            leftmost_match[0] = leftmost_match[0] - 1
+                            leftmost_match[2] = '-' + leftmost_match[2]
+
+            m = self.ptc.CRE_DATE3.search(inputString[startpos:])
+            # NO LONGER NEEDED, THE REGEXP HANDLED MTHNAME NOW
+            # for match in self.ptc.CRE_DATE3.finditer(inputString[startpos:]):
+            # to prevent "HH:MM(:SS) time strings" expressions from
+            # triggering this regex, we checks if the month field exists
+            # in the searched expression, if it doesn't exist, the date
+            # field is not valid
+            #     if match.group('mthname'):
+            #         m = self.ptc.CRE_DATE3.search(inputString[startpos:],
+            #                                       match.start())
+            #         break
+
+            # String date format
+            if m is not None:
+                if leftmost_match[1] == 0 or \
+                        leftmost_match[0] > m.start('date') + startpos:
+                    leftmost_match[0] = m.start('date') + startpos
+                    leftmost_match[1] = m.end('date') + startpos
+                    leftmost_match[2] = m.group('date')
+                    leftmost_match[3] = 1
+                    leftmost_match[4] = 'dateStr'
+
+            # Standard date format
+            m = self.ptc.CRE_DATE.search(inputString[startpos:])
+            if m is not None:
+                if leftmost_match[1] == 0 or \
+                        leftmost_match[0] > m.start('date') + startpos:
+                    leftmost_match[0] = m.start('date') + startpos
+                    leftmost_match[1] = m.end('date') + startpos
+                    leftmost_match[2] = m.group('date')
+                    leftmost_match[3] = 1
+                    leftmost_match[4] = 'dateStd'
+
+            # Natural language day strings
+            m = self.ptc.CRE_DAY.search(inputString[startpos:])
+            if m is not None:
+                if leftmost_match[1] == 0 or \
+                        leftmost_match[0] > m.start() + startpos:
+                    leftmost_match[0] = m.start() + startpos
+                    leftmost_match[1] = m.end() + startpos
+                    leftmost_match[2] = m.group()
+                    leftmost_match[3] = 1
+                    leftmost_match[4] = 'dayStr'
+
+            # Weekday
+            m = self.ptc.CRE_WEEKDAY.search(inputString[startpos:])
+            if m is not None:
+                if inputString[startpos:] not in self.ptc.dayOffsets:
+                    if leftmost_match[1] == 0 or \
+                            leftmost_match[0] > m.start() + startpos:
+                        leftmost_match[0] = m.start() + startpos
+                        leftmost_match[1] = m.end() + startpos
+                        leftmost_match[2] = m.group()
+                        leftmost_match[3] = 1
+                        leftmost_match[4] = 'weekdy'
+
+            # Natural language time strings
+            m = self.ptc.CRE_TIME.search(inputString[startpos:])
+            if m is not None:
+                if leftmost_match[1] == 0 or \
+                        leftmost_match[0] > m.start() + startpos:
+                    leftmost_match[0] = m.start() + startpos
+                    leftmost_match[1] = m.end() + startpos
+                    leftmost_match[2] = m.group()
+                    leftmost_match[3] = 2
+                    leftmost_match[4] = 'timeStr'
+
+            # HH:MM(:SS) am/pm time strings
+            m = self.ptc.CRE_TIMEHMS2.search(inputString[startpos:])
+            if m is not None:
+                if leftmost_match[1] == 0 or \
+                        leftmost_match[0] > m.start('hours') + startpos:
+                    leftmost_match[0] = m.start('hours') + startpos
+                    leftmost_match[1] = m.end('meridian') + startpos
+                    leftmost_match[2] = inputString[leftmost_match[0]:
+                                                    leftmost_match[1]]
+                    leftmost_match[3] = 2
+                    leftmost_match[4] = 'meridian'
+
+            # HH:MM(:SS) time strings
+            m = self.ptc.CRE_TIMEHMS.search(inputString[startpos:])
+            if m is not None:
+                if leftmost_match[1] == 0 or \
+                        leftmost_match[0] > m.start('hours') + startpos:
+                    leftmost_match[0] = m.start('hours') + startpos
+                    if m.group('seconds') is not None:
+                        leftmost_match[1] = m.end('seconds') + startpos
+                    else:
+                        leftmost_match[1] = m.end('minutes') + startpos
+                    leftmost_match[2] = inputString[leftmost_match[0]:
+                                                    leftmost_match[1]]
+                    leftmost_match[3] = 2
+                    leftmost_match[4] = 'timeStd'
+
+            # Units only; must be preceded by a modifier
+            if len(matches) > 0 and matches[-1][3] == 0:
+                m = self.ptc.CRE_UNITS_ONLY.search(inputString[startpos:])
+                # Ensure that any match is immediately proceded by the
+                # modifier. "Next is the word 'month'" should not parse as a
+                # date while "next month" should
+                if m is not None and \
+                        inputString[startpos:startpos +
+                                    m.start()].strip() == '':
+                    debug and log.debug('CRE_UNITS_ONLY matched [%s]',
+                                        m.group())
+                    if leftmost_match[1] == 0 or \
+                            leftmost_match[0] > m.start() + startpos:
+                        leftmost_match[0] = m.start() + startpos
+                        leftmost_match[1] = m.end() + startpos
+                        leftmost_match[2] = m.group()
+                        leftmost_match[3] = 3
+                        leftmost_match[4] = 'unitsOnly'
+
+            # set the start position to the end pos of the leftmost match
+            startpos = leftmost_match[1]
+
+            # nothing was detected
+            # so break out of the loop
+            if startpos == 0:
+                startpos = len(inputString)
+            else:
+                if leftmost_match[3] > 0:
+                    m = self.ptc.CRE_NLP_PREFIX.search(
+                        inputString[:leftmost_match[0]] +
+                        ' ' + str(leftmost_match[3]))
+                    if m is not None:
+                        leftmost_match[0] = m.start('nlp_prefix')
+                        leftmost_match[2] = inputString[leftmost_match[0]:
+                                                        leftmost_match[1]]
+                matches.append(leftmost_match)
+
+        # find matches in proximity with one another and
+        # return all the parsed values
+        proximity_matches = []
+        if len(matches) > 1:
+            combined = ''
+            from_match_index = 0
+            date = matches[0][3] == 1
+            time = matches[0][3] == 2
+            units = matches[0][3] == 3
+            for i in range(1, len(matches)):
+
+                # test proximity (are there characters between matches?)
+                endofprevious = matches[i - 1][1]
+                begofcurrent = matches[i][0]
+                if orig_inputstring[endofprevious:
+                                    begofcurrent].lower().strip() != '':
+                    # this one isn't in proximity, but maybe
+                    # we have enough to make a datetime
+                    # TODO: make sure the combination of
+                    # formats (modifier, dateStd, etc) makes logical sense
+                    # before parsing together
+                    if date or time or units:
+                        combined = orig_inputstring[matches[from_match_index]
+                                                    [0]:matches[i - 1][1]]
+                        parsed_datetime, flags = self.parse(combined,
+                                                            sourceTime,
+                                                            version)
+                        proximity_matches.append((
+                            datetime.datetime(*parsed_datetime[:6]),
+                            flags,
+                            matches[from_match_index][0],
+                            matches[i - 1][1],
+                            combined))
+                    # not in proximity, reset starting from current
+                    from_match_index = i
+                    date = matches[i][3] == 1
+                    time = matches[i][3] == 2
+                    units = matches[i][3] == 3
+                    continue
+                else:
+                    if matches[i][3] == 1:
+                        date = True
+                    if matches[i][3] == 2:
+                        time = True
+                    if matches[i][3] == 3:
+                        units = True
+
+            # check last
+            # we have enough to make a datetime
+            if date or time or units:
+                combined = orig_inputstring[matches[from_match_index][0]:
+                                            matches[len(matches) - 1][1]]
+                parsed_datetime, flags = self.parse(combined, sourceTime,
+                                                    version)
+                proximity_matches.append((
+                    datetime.datetime(*parsed_datetime[:6]),
+                    flags,
+                    matches[from_match_index][0],
+                    matches[len(matches) - 1][1],
+                    combined))
+
+        elif len(matches) == 0:
+            return None
+        else:
+            if matches[0][3] == 0:  # not enough info to parse
+                return None
+            else:
+                combined = orig_inputstring[matches[0][0]:matches[0][1]]
+                parsed_datetime, flags = self.parse(matches[0][2], sourceTime,
+                                                    version)
+                proximity_matches.append((
+                    datetime.datetime(*parsed_datetime[:6]),
+                    flags,
+                    matches[0][0],
+                    matches[0][1],
+                    combined))
+
+        return tuple(proximity_matches)
+
+
+def _initSymbols(ptc):
+    """
+    Initialize symbols and single character constants.
+    """
+    # build am and pm lists to contain
+    # original case, lowercase, first-char and dotted
+    # versions of the meridian text
+    ptc.am = ['', '']
+    ptc.pm = ['', '']
+    for idx, xm in enumerate(ptc.locale.meridian[:2]):
+        # 0: am
+        # 1: pm
+        target = ['am', 'pm'][idx]
+        setattr(ptc, target, [xm])
+        target = getattr(ptc, target)
+        if xm:
+            lxm = xm.lower()
+            target.extend((xm[0], '{0}.{1}.'.format(*xm),
+                           lxm, lxm[0], '{0}.{1}.'.format(*lxm)))
+
+
+class Constants(object):
+
+    """
+    Default set of constants for parsedatetime.
+
+    If PyICU is present, then the class will first try to get PyICU
+    to return a locale specified by C{localeID}.  If either C{localeID} is
+    None or if the locale does not exist within PyICU, then each of the
+    locales defined in C{fallbackLocales} is tried in order.
+
+    If PyICU is not present or none of the specified locales can be used,
+    then the class will initialize itself to the en_US locale.
+
+    if PyICU is not present or not requested, only the locales defined by
+    C{pdtLocales} will be searched.
+    """
+
+    def __init__(self, localeID=None, usePyICU=True,
+                 fallbackLocales=['en_US']):
+        self.localeID = localeID
+        self.fallbackLocales = fallbackLocales[:]
+
+        if 'en_US' not in self.fallbackLocales:
+            self.fallbackLocales.append('en_US')
+
+        # define non-locale specific constants
+        self.locale = None
+        self.usePyICU = usePyICU
+
+        # starting cache of leap years
+        # daysInMonth will add to this if during
+        # runtime it gets a request for a year not found
+        self._leapYears = list(range(1904, 2097, 4))
+
+        self.Second = 1
+        self.Minute = 60  # 60 * self.Second
+        self.Hour = 3600  # 60 * self.Minute
+        self.Day = 86400  # 24 * self.Hour
+        self.Week = 604800  # 7   * self.Day
+        self.Month = 2592000  # 30  * self.Day
+        self.Year = 31536000  # 365 * self.Day
+
+        self._DaysInMonthList = (31, 28, 31, 30, 31, 30,
+                                 31, 31, 30, 31, 30, 31)
+        self.rangeSep = '-'
+        self.BirthdayEpoch = 50
+
+        # When True the starting time for all relative calculations will come
+        # from the given SourceTime, otherwise it will be 9am
+
+        self.StartTimeFromSourceTime = False
+
+        # YearParseStyle controls how we parse "Jun 12", i.e. dates that do
+        # not have a year present.  The default is to compare the date given
+        # to the current date, and if prior, then assume the next year.
+        # Setting this to 0 will prevent that.
+
+        self.YearParseStyle = 1
+
+        # DOWParseStyle controls how we parse "Tuesday"
+        # If the current day was Thursday and the text to parse is "Tuesday"
+        # then the following table shows how each style would be returned
+        # -1, 0, +1
+        #
+        # Current day marked as ***
+        #
+        #          Sun Mon Tue Wed Thu Fri Sat
+        # week -1
+        # current         -1,0     ***
+        # week +1          +1
+        #
+        # If the current day was Monday and the text to parse is "Tuesday"
+        # then the following table shows how each style would be returned
+        # -1, 0, +1
+        #
+        #          Sun Mon Tue Wed Thu Fri Sat
+        # week -1           -1
+        # current      *** 0,+1
+        # week +1
+
+        self.DOWParseStyle = 1
+
+        # CurrentDOWParseStyle controls how we parse "Friday"
+        # If the current day was Friday and the text to parse is "Friday"
+        # then the following table shows how each style would be returned
+        # True/False. This also depends on DOWParseStyle.
+        #
+        # Current day marked as ***
+        #
+        # DOWParseStyle = 0
+        #          Sun Mon Tue Wed Thu Fri Sat
+        # week -1
+        # current                      T,F
+        # week +1
+        #
+        # DOWParseStyle = -1
+        #          Sun Mon Tue Wed Thu Fri Sat
+        # week -1                       F
+        # current                       T
+        # week +1
+        #
+        # DOWParseStyle = +1
+        #
+        #          Sun Mon Tue Wed Thu Fri Sat
+        # week -1
+        # current                       T
+        # week +1                       F
+
+        self.CurrentDOWParseStyle = False
+
+        if self.usePyICU:
+            self.locale = get_icu(self.localeID)
+
+            if self.locale.icu is None:
+                self.usePyICU = False
+                self.locale = None
+
+        if self.locale is None:
+            if self.localeID not in pdtLocales:
+                for localeId in range(0, len(self.fallbackLocales)):
+                    self.localeID = self.fallbackLocales[localeId]
+                    if self.localeID in pdtLocales:
+                        break
+
+            self.locale = pdtLocales[self.localeID]
+
+        if self.locale is not None:
+
+            def _getLocaleDataAdjusted(localeData):
+                """
+                If localeData is defined as ["mon|mnd", 'tu|tues'...] then this
+                function splits those definitions on |
+                """
+                adjusted = []
+                for d in localeData:
+                    if '|' in d:
+                        adjusted += d.split("|")
+                    else:
+                        adjusted.append(d)
+                return adjusted
+
+            mths = _getLocaleDataAdjusted(self.locale.Months)
+            smths = _getLocaleDataAdjusted(self.locale.shortMonths)
+            swds = _getLocaleDataAdjusted(self.locale.shortWeekdays)
+            wds = _getLocaleDataAdjusted(self.locale.Weekdays)
+
+            re_join = lambda g: '|'.join(re.escape(i) for i in g)
+
+            # escape any regex special characters that may be found
+            self.locale.re_values['months'] = re_join(mths)
+            self.locale.re_values['shortmonths'] = re_join(smths)
+            self.locale.re_values['days'] = re_join(wds)
+            self.locale.re_values['shortdays'] = re_join(swds)
+            self.locale.re_values['dayoffsets'] = \
+                re_join(self.locale.dayOffsets)
+            self.locale.re_values['numbers'] = \
+                re_join(self.locale.numbers)
+            self.locale.re_values['decimal_mark'] = \
+                re.escape(self.locale.decimal_mark)
+
+            units = [unit for units in self.locale.units.values()
+                     for unit in units]  # flatten
+            units.sort(key=len, reverse=True)  # longest first
+            self.locale.re_values['units'] = re_join(units)
+            self.locale.re_values['modifiers'] = re_join(self.locale.Modifiers)
+            self.locale.re_values['sources'] = re_join(self.locale.re_sources)
+
+            # For distinguishing numeric dates from times, look for timeSep
+            # and meridian, if specified in the locale
+            self.locale.re_values['timecomponents'] = \
+                re_join(self.locale.timeSep + self.locale.meridian)
+
+            # build weekday offsets - yes, it assumes the Weekday and
+            # shortWeekday lists are in the same order and Mon..Sun
+            # (Python style)
+            def _buildOffsets(offsetDict, localeData, indexStart):
+                o = indexStart
+                for key in localeData:
+                    if '|' in key:
+                        for k in key.split('|'):
+                            offsetDict[k] = o
+                    else:
+                        offsetDict[key] = o
+                    o += 1
+
+            _buildOffsets(self.locale.WeekdayOffsets,
+                          self.locale.Weekdays, 0)
+            _buildOffsets(self.locale.WeekdayOffsets,
+                          self.locale.shortWeekdays, 0)
+
+            # build month offsets - yes, it assumes the Months and shortMonths
+            # lists are in the same order and Jan..Dec
+            _buildOffsets(self.locale.MonthOffsets,
+                          self.locale.Months, 1)
+            _buildOffsets(self.locale.MonthOffsets,
+                          self.locale.shortMonths, 1)
+
+        _initSymbols(self)
+
+        # TODO: add code to parse the date formats and build the regexes up
+        # from sub-parts, find all hard-coded uses of date/time separators
+
+        # not being used in code, but kept in case others are manually
+        # utilizing this regex for their own purposes
+        self.RE_DATE4 = r'''(?P<date>
+                                (
+                                    (
+                                        (?P<day>\d\d?)
+                                        (?P<suffix>{daysuffix})?
+                                        (,)?
+                                        (\s)?
+                                    )
+                                    (?P<mthname>
+                                        \b({months}|{shortmonths})\b
+                                    )\s?
+                                    (?P<year>\d\d
+                                        (\d\d)?
+                                    )?
+                                )
+                            )'''.format(**self.locale.re_values)
+
+        # still not completely sure of the behavior of the regex and
+        # whether it would be best to consume all possible irrelevant
+        # characters before the option groups (but within the {1,3} repetition
+        # group or inside of each option group, as it currently does
+        # however, right now, all tests are passing that were,
+        # including fixing the bug of matching a 4-digit year as ddyy
+        # when the day is absent from the string
+        self.RE_DATE3 = r'''(?P<date>
+                                (?:
+                                    (?:^|\s)
+                                    (?P<mthname>
+                                        {months}|{shortmonths}
+                                    )\b
+                                    |
+                                    (?:^|\s)
+                                    (?P<day>[1-9]|[012]\d|3[01])
+                                    (?P<suffix>{daysuffix}|)\b
+                                    (?!\s*(?:{timecomponents}))
+                                    |
+                                    ,?\s
+                                    (?P<year>\d\d(?:\d\d|))\b
+                                    (?!\s*(?:{timecomponents}))
+                                ){{1,3}}
+                                (?(mthname)|$-^)
+                            )'''.format(**self.locale.re_values)
+
+        # not being used in code, but kept in case others are manually
+        # utilizing this regex for their own purposes
+        self.RE_MONTH = r'''(\s|^)
+                            (?P<month>
+                                (
+                                    (?P<mthname>
+                                        \b({months}|{shortmonths})\b
+                                    )
+                                    (\s?
+                                        (?P<year>(\d{{4}}))
+                                    )?
+                                )
+                            )
+                            (?=\s|$|[^\w])'''.format(**self.locale.re_values)
+
+        self.RE_WEEKDAY = r'''\b
+                              (?:
+                                  {days}|{shortdays}
+                              )
+                              \b'''.format(**self.locale.re_values)
+
+        self.RE_NUMBER = (r'(\b(?:{numbers})\b|\d+(?:{decimal_mark}\d+|))'
+                          .format(**self.locale.re_values))
+
+        self.RE_SPECIAL = (r'(?P<special>^[{specials}]+)\s+'
+                           .format(**self.locale.re_values))
+
+        self.RE_UNITS_ONLY = (r'''\b({units})\b'''
+                              .format(**self.locale.re_values))
+
+        self.RE_UNITS = r'''\b(?P<qty>
+                                -?
+                                (?:\d+(?:{decimal_mark}\d+|)|(?:{numbers})\b)\s*
+                                (?P<units>{units})
+                            )\b'''.format(**self.locale.re_values)
+
+        self.RE_QUNITS = r'''\b(?P<qty>
+                                 -?
+                                 (?:\d+(?:{decimal_mark}\d+|)|(?:{numbers})s)\s?
+                                 (?P<qunits>{qunits})
+                             )\b'''.format(**self.locale.re_values)
+
+        self.RE_MODIFIER = r'''\b(?:
+                                   {modifiers}
+                               )\b'''.format(**self.locale.re_values)
+
+        self.RE_TIMEHMS = r'''([\s(\["'-]|^)
+                              (?P<hours>\d\d?)
+                              (?P<tsep>{timeseparator}|)
+                              (?P<minutes>\d\d)
+                              (?:(?P=tsep)
+                                  (?P<seconds>\d\d
+                                      (?:[\.,]\d+)?
+                                  )
+                              )?\b'''.format(**self.locale.re_values)
+
+        self.RE_TIMEHMS2 = r'''([\s(\["'-]|^)
+                               (?P<hours>\d\d?)
+                               (?:
+                                   (?P<tsep>{timeseparator}|)
+                                   (?P<minutes>\d\d?)
+                                   (?:(?P=tsep)
+                                       (?P<seconds>\d\d?
+                                           (?:[\.,]\d+)?
+                                       )
+                                   )?
+                               )?'''.format(**self.locale.re_values)
+
+        # 1, 2, and 3 here refer to the type of match date, time, or units
+        self.RE_NLP_PREFIX = r'''\b(?P<nlp_prefix>
+                                  (on)
+                                  (\s)+1
+                                  |
+                                  (at|in)
+                                  (\s)+2
+                                  |
+                                  (in)
+                                  (\s)+3
+                                 )'''
+
+        if 'meridian' in self.locale.re_values:
+            self.RE_TIMEHMS2 += (r'\s?(?P<meridian>{meridian})\b'
+                                 .format(**self.locale.re_values))
+        else:
+            self.RE_TIMEHMS2 += r'\b'
+
+        # Always support common . and - separators
+        dateSeps = ''.join(re.escape(s)
+                           for s in self.locale.dateSep + ['-', '.'])
+
+        self.RE_DATE = r'''([\s(\["'-]|^)
+                           (?P<date>
+                                \d\d?[{0}]\d\d?(?:[{0}]\d\d(?:\d\d)?)?
+                                |
+                                \d{{4}}[{0}]\d\d?[{0}]\d\d?
+                            )
+                           \b'''.format(dateSeps)
+
+        self.RE_DATE2 = r'[{0}]'.format(dateSeps)
+
+        assert 'dayoffsets' in self.locale.re_values
+
+        self.RE_DAY = r'''\b
+                          (?:
+                              {dayoffsets}
+                          )
+                          \b'''.format(**self.locale.re_values)
+
+        self.RE_DAY2 = r'''(?P<day>\d\d?)
+                           (?P<suffix>{daysuffix})?
+                       '''.format(**self.locale.re_values)
+
+        self.RE_TIME = r'''\b
+                           (?:
+                               {sources}
+                           )
+                           \b'''.format(**self.locale.re_values)
+
+        self.RE_REMAINING = r'\s+'
+
+        # Regex for date/time ranges
+        self.RE_RTIMEHMS = r'''(\s?|^)
+                               (\d\d?){timeseparator}
+                               (\d\d)
+                               ({timeseparator}(\d\d))?
+                               (\s?|$)'''.format(**self.locale.re_values)
+
+        self.RE_RTIMEHMS2 = (r'''(\s?|^)
+                                 (\d\d?)
+                                 ({timeseparator}(\d\d?))?
+                                 ({timeseparator}(\d\d?))?'''
+                             .format(**self.locale.re_values))
+
+        if 'meridian' in self.locale.re_values:
+            self.RE_RTIMEHMS2 += (r'\s?({meridian})'
+                                  .format(**self.locale.re_values))
+
+        self.RE_RDATE = r'(\d+([%s]\d+)+)' % dateSeps
+        self.RE_RDATE3 = r'''(
+                                (
+                                    (
+                                        \b({months})\b
+                                    )\s?
+                                    (
+                                        (\d\d?)
+                                        (\s?|{daysuffix}|$)+
+                                    )?
+                                    (,\s?\d{{4}})?
+                                )
+                            )'''.format(**self.locale.re_values)
+
+        # "06/07/06 - 08/09/06"
+        self.DATERNG1 = (r'{0}\s?{rangeseparator}\s?{0}'
+                         .format(self.RE_RDATE, **self.locale.re_values))
+
+        # "march 31 - june 1st, 2006"
+        self.DATERNG2 = (r'{0}\s?{rangeseparator}\s?{0}'
+                         .format(self.RE_RDATE3, **self.locale.re_values))
+
+        # "march 1rd -13th"
+        self.DATERNG3 = (r'{0}\s?{rangeseparator}\s?(\d\d?)\s?(rd|st|nd|th)?'
+                         .format(self.RE_RDATE3, **self.locale.re_values))
+
+        # "4:00:55 pm - 5:90:44 am", '4p-5p'
+        self.TIMERNG1 = (r'{0}\s?{rangeseparator}\s?{0}'
+                         .format(self.RE_RTIMEHMS2, **self.locale.re_values))
+
+        self.TIMERNG2 = (r'{0}\s?{rangeseparator}\s?{0}'
+                         .format(self.RE_RTIMEHMS, **self.locale.re_values))
+
+        # "4-5pm "
+        self.TIMERNG3 = (r'\d\d?\s?{rangeseparator}\s?{0}'
+                         .format(self.RE_RTIMEHMS2, **self.locale.re_values))
+
+        # "4:30-5pm "
+        self.TIMERNG4 = (r'{0}\s?{rangeseparator}\s?{1}'
+                         .format(self.RE_RTIMEHMS, self.RE_RTIMEHMS2,
+                                 **self.locale.re_values))
+
+        self.re_option = re.IGNORECASE + re.VERBOSE
+        self.cre_source = {'CRE_SPECIAL': self.RE_SPECIAL,
+                           'CRE_NUMBER': self.RE_NUMBER,
+                           'CRE_UNITS': self.RE_UNITS,
+                           'CRE_UNITS_ONLY': self.RE_UNITS_ONLY,
+                           'CRE_QUNITS': self.RE_QUNITS,
+                           'CRE_MODIFIER': self.RE_MODIFIER,
+                           'CRE_TIMEHMS': self.RE_TIMEHMS,
+                           'CRE_TIMEHMS2': self.RE_TIMEHMS2,
+                           'CRE_DATE': self.RE_DATE,
+                           'CRE_DATE2': self.RE_DATE2,
+                           'CRE_DATE3': self.RE_DATE3,
+                           'CRE_DATE4': self.RE_DATE4,
+                           'CRE_MONTH': self.RE_MONTH,
+                           'CRE_WEEKDAY': self.RE_WEEKDAY,
+                           'CRE_DAY': self.RE_DAY,
+                           'CRE_DAY2': self.RE_DAY2,
+                           'CRE_TIME': self.RE_TIME,
+                           'CRE_REMAINING': self.RE_REMAINING,
+                           'CRE_RTIMEHMS': self.RE_RTIMEHMS,
+                           'CRE_RTIMEHMS2': self.RE_RTIMEHMS2,
+                           'CRE_RDATE': self.RE_RDATE,
+                           'CRE_RDATE3': self.RE_RDATE3,
+                           'CRE_TIMERNG1': self.TIMERNG1,
+                           'CRE_TIMERNG2': self.TIMERNG2,
+                           'CRE_TIMERNG3': self.TIMERNG3,
+                           'CRE_TIMERNG4': self.TIMERNG4,
+                           'CRE_DATERNG1': self.DATERNG1,
+                           'CRE_DATERNG2': self.DATERNG2,
+                           'CRE_DATERNG3': self.DATERNG3,
+                           'CRE_NLP_PREFIX': self.RE_NLP_PREFIX}
+        self.cre_keys = set(self.cre_source.keys())
+
+    def __getattr__(self, name):
+        if name in self.cre_keys:
+            value = re.compile(self.cre_source[name], self.re_option)
+            setattr(self, name, value)
+            return value
+        elif name in self.locale.locale_keys:
+            return getattr(self.locale, name)
+        else:
+            raise AttributeError(name)
+
+    def daysInMonth(self, month, year):
+        """
+        Take the given month (1-12) and a given year (4 digit) return
+        the number of days in the month adjusting for leap year as needed
+        """
+        result = None
+        debug and log.debug('daysInMonth(%s, %s)', month, year)
+        if month > 0 and month <= 12:
+            result = self._DaysInMonthList[month - 1]
+
+            if month == 2:
+                if year in self._leapYears:
+                    result += 1
+                else:
+                    if calendar.isleap(year):
+                        self._leapYears.append(year)
+                        result += 1
+
+        return result
+
+    def getSource(self, sourceKey, sourceTime=None):
+        """
+        GetReturn a date/time tuple based on the giving source key
+        and the corresponding key found in self.re_sources.
+
+        The current time is used as the default and any specified
+        item found in self.re_sources is inserted into the value
+        and the generated dictionary is returned.
+        """
+        if sourceKey not in self.re_sources:
+            return None
+
+        if sourceTime is None:
+            (yr, mth, dy, hr, mn, sec, wd, yd, isdst) = time.localtime()
+        else:
+            (yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
+
+        defaults = {'yr': yr, 'mth': mth, 'dy': dy,
+                    'hr': hr, 'mn': mn, 'sec': sec}
+
+        source = self.re_sources[sourceKey]
+
+        values = {}
+
+        for key, default in defaults.items():
+            values[key] = source.get(key, default)
+
+        return (values['yr'], values['mth'], values['dy'],
+                values['hr'], values['mn'], values['sec'],
+                wd, yd, isdst)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/support/parsedatetime/context.py	Wed Sep 07 04:31:59 2016 +0200
@@ -0,0 +1,187 @@
+# -*- coding: utf-8 -*-
+"""
+parsedatetime/context.py
+
+Context related classes
+
+"""
+
+from threading import local
+
+
+class pdtContextStack(object):
+    """
+    A thread-safe stack to store context(s)
+
+    Internally used by L{Calendar} object
+    """
+
+    def __init__(self):
+        self.__local = local()
+
+    @property
+    def __stack(self):
+        if not hasattr(self.__local, 'stack'):
+            self.__local.stack = []
+        return self.__local.stack
+
+    def push(self, ctx):
+        self.__stack.append(ctx)
+
+    def pop(self):
+        try:
+            return self.__stack.pop()
+        except IndexError:
+            return None
+
+    def last(self):
+        try:
+            return self.__stack[-1]
+        except IndexError:
+            raise RuntimeError('context stack is empty')
+
+    def isEmpty(self):
+        return not self.__stack
+
+
+class pdtContext(object):
+    """
+    Context contains accuracy flag detected by L{Calendar.parse()}
+
+    Accuracy flag uses bitwise-OR operation and is combined by:
+
+        ACU_YEAR - "next year", "2014"
+        ACU_MONTH - "March", "July 2014"
+        ACU_WEEK - "last week", "next 3 weeks"
+        ACU_DAY - "tomorrow", "July 4th 2014"
+        ACU_HALFDAY - "morning", "tonight"
+        ACU_HOUR - "18:00", "next hour"
+        ACU_MIN - "18:32", "next 10 minutes"
+        ACU_SEC - "18:32:55"
+        ACU_NOW - "now"
+
+    """
+
+    __slots__ = ('accuracy',)
+
+    ACU_YEAR = 2 ** 0
+    ACU_MONTH = 2 ** 1
+    ACU_WEEK = 2 ** 2
+    ACU_DAY = 2 ** 3
+    ACU_HALFDAY = 2 ** 4
+    ACU_HOUR = 2 ** 5
+    ACU_MIN = 2 ** 6
+    ACU_SEC = 2 ** 7
+    ACU_NOW = 2 ** 8
+
+    ACU_DATE = ACU_YEAR | ACU_MONTH | ACU_WEEK | ACU_DAY
+    ACU_TIME = ACU_HALFDAY | ACU_HOUR | ACU_MIN | ACU_SEC | ACU_NOW
+
+    _ACCURACY_MAPPING = [
+        (ACU_YEAR, 'year'),
+        (ACU_MONTH, 'month'),
+        (ACU_WEEK, 'week'),
+        (ACU_DAY, 'day'),
+        (ACU_HALFDAY, 'halfday'),
+        (ACU_HOUR, 'hour'),
+        (ACU_MIN, 'min'),
+        (ACU_SEC, 'sec'),
+        (ACU_NOW, 'now')]
+
+    _ACCURACY_REVERSE_MAPPING = {
+        'year': ACU_YEAR,
+        'years': ACU_YEAR,
+        'month': ACU_MONTH,
+        'months': ACU_MONTH,
+        'week': ACU_WEEK,
+        'weeks': ACU_WEEK,
+        'day': ACU_DAY,
+        'days': ACU_DAY,
+        'halfday': ACU_HALFDAY,
+        'morning': ACU_HALFDAY,
+        'afternoon': ACU_HALFDAY,
+        'evening': ACU_HALFDAY,
+        'night': ACU_HALFDAY,
+        'tonight': ACU_HALFDAY,
+        'midnight': ACU_HALFDAY,
+        'hour': ACU_HOUR,
+        'hours': ACU_HOUR,
+        'min': ACU_MIN,
+        'minute': ACU_MIN,
+        'mins': ACU_MIN,
+        'minutes': ACU_MIN,
+        'sec': ACU_SEC,
+        'second': ACU_SEC,
+        'secs': ACU_SEC,
+        'seconds': ACU_SEC,
+        'now': ACU_NOW}
+
+    def __init__(self, accuracy=0):
+        """
+        Default constructor of L{pdtContext} class.
+
+        @type  accuracy: integer
+        @param accuracy: Accuracy flag
+
+        @rtype:  object
+        @return: L{pdtContext} instance
+        """
+        self.accuracy = accuracy
+
+    def updateAccuracy(self, *accuracy):
+        """
+        Updates current accuracy flag
+        """
+        for acc in accuracy:
+            if not isinstance(acc, int):
+                acc = self._ACCURACY_REVERSE_MAPPING[acc]
+            self.accuracy |= acc
+
+    def update(self, context):
+        """
+        Uses another L{pdtContext} instance to update current one
+        """
+        self.updateAccuracy(context.accuracy)
+
+    @property
+    def hasDate(self):
+        """
+        Returns True if current context is accurate to date
+        """
+        return bool(self.accuracy & self.ACU_DATE)
+
+    @property
+    def hasTime(self):
+        """
+        Returns True if current context is accurate to time
+        """
+        return bool(self.accuracy & self.ACU_TIME)
+
+    @property
+    def dateTimeFlag(self):
+        """
+        Returns the old date/time flag code
+        """
+        return int(self.hasDate and 1) | int(self.hasTime and 2)
+
+    @property
+    def hasDateOrTime(self):
+        """
+        Returns True if current context is accurate to date/time
+        """
+        return bool(self.accuracy)
+
+    def __repr__(self):
+        accuracy_repr = []
+        for acc, name in self._ACCURACY_MAPPING:
+            if acc & self.accuracy:
+                accuracy_repr.append('pdtContext.ACU_%s' % name.upper())
+        if accuracy_repr:
+            accuracy_repr = 'accuracy=' + ' | '.join(accuracy_repr)
+        else:
+            accuracy_repr = ''
+
+        return 'pdtContext(%s)' % accuracy_repr
+
+    def __eq__(self, ctx):
+        return self.accuracy == ctx.accuracy
--- a/MoinMoin/support/parsedatetime/parsedatetime.py	Wed Sep 07 03:05:27 2016 +0200
+++ b/MoinMoin/support/parsedatetime/parsedatetime.py	Wed Sep 07 04:31:59 2016 +0200
@@ -1,1541 +1,2 @@
-#!/usr/bin/env python
-
-"""
-Parse human-readable date/time text.
-"""
-
-__license__ = """
-Copyright (c) 2004-2008 Mike Taylor
-Copyright (c) 2006-2008 Darshana Chhajed
-All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-_debug = False
-
-
-import re
-import time
-import datetime
-import rfc822
-import parsedatetime_consts
-
-
-# Copied from feedparser.py
-# Universal Feedparser
-# Copyright (c) 2002-2006, Mark Pilgrim, All rights reserved.
-# Originally a def inside of _parse_date_w3dtf()
-def _extract_date(m):
-    year = int(m.group('year'))
-    if year < 100:
-        year = 100 * int(time.gmtime()[0] / 100) + int(year)
-    if year < 1000:
-        return 0, 0, 0
-    julian = m.group('julian')
-    if julian:
-        julian = int(julian)
-        month = julian / 30 + 1
-        day = julian % 30 + 1
-        jday = None
-        while jday != julian:
-            t = time.mktime((year, month, day, 0, 0, 0, 0, 0, 0))
-            jday = time.gmtime(t)[-2]
-            diff = abs(jday - julian)
-            if jday > julian:
-                if diff < day:
-                    day = day - diff
-                else:
-                    month = month - 1
-                    day = 31
-            elif jday < julian:
-                if day + diff < 28:
-                    day = day + diff
-                else:
-                    month = month + 1
-        return year, month, day
-    month = m.group('month')
-    day = 1
-    if month is None:
-        month = 1
-    else:
-        month = int(month)
-        day = m.group('day')
-        if day:
-            day = int(day)
-        else:
-            day = 1
-    return year, month, day
-
-# Copied from feedparser.py
-# Universal Feedparser
-# Copyright (c) 2002-2006, Mark Pilgrim, All rights reserved.
-# Originally a def inside of _parse_date_w3dtf()
-def _extract_time(m):
-    if not m:
-        return 0, 0, 0
-    hours = m.group('hours')
-    if not hours:
-        return 0, 0, 0
-    hours = int(hours)
-    minutes = int(m.group('minutes'))
-    seconds = m.group('seconds')
-    if seconds:
-        seconds = int(seconds)
-    else:
-        seconds = 0
-    return hours, minutes, seconds
-
-
-# Copied from feedparser.py
-# Universal Feedparser
-# Copyright (c) 2002-2006, Mark Pilgrim, All rights reserved.
-# Modified to return a tuple instead of mktime
-#
-# Original comment:
-#   W3DTF-style date parsing adapted from PyXML xml.utils.iso8601, written by
-#   Drake and licensed under the Python license.  Removed all range checking
-#   for month, day, hour, minute, and second, since mktime will normalize
-#   these later
-def _parse_date_w3dtf(dateString):
-    # the __extract_date and __extract_time methods were
-    # copied-out so they could be used by my code --bear
-    def __extract_tzd(m):
-        '''Return the Time Zone Designator as an offset in seconds from UTC.'''
-        if not m:
-            return 0
-        tzd = m.group('tzd')
-        if not tzd:
-            return 0
-        if tzd == 'Z':
-            return 0
-        hours = int(m.group('tzdhours'))
-        minutes = m.group('tzdminutes')
-        if minutes:
-            minutes = int(minutes)
-        else:
-            minutes = 0
-        offset = (hours*60 + minutes) * 60
-        if tzd[0] == '+':
-            return -offset
-        return offset
-
-    __date_re = ('(?P<year>\d\d\d\d)'
-                 '(?:(?P<dsep>-|)'
-                 '(?:(?P<julian>\d\d\d)'
-                 '|(?P<month>\d\d)(?:(?P=dsep)(?P<day>\d\d))?))?')
-    __tzd_re = '(?P<tzd>[-+](?P<tzdhours>\d\d)(?::?(?P<tzdminutes>\d\d))|Z)'
-    __tzd_rx = re.compile(__tzd_re)
-    __time_re = ('(?P<hours>\d\d)(?P<tsep>:|)(?P<minutes>\d\d)'
-                 '(?:(?P=tsep)(?P<seconds>\d\d(?:[.,]\d+)?))?'
-                 + __tzd_re)
-    __datetime_re = '%s(?:T%s)?' % (__date_re, __time_re)
-    __datetime_rx = re.compile(__datetime_re)
-    m = __datetime_rx.match(dateString)
-    if (m is None) or (m.group() != dateString): return
-    return _extract_date(m) + _extract_time(m) + (0, 0, 0)
-
-
-# Copied from feedparser.py
-# Universal Feedparser
-# Copyright (c) 2002-2006, Mark Pilgrim, All rights reserved.
-# Modified to return a tuple instead of mktime
-#
-def _parse_date_rfc822(dateString):
-    '''Parse an RFC822, RFC1123, RFC2822, or asctime-style date'''
-    data = dateString.split()
-    if data[0][-1] in (',', '.') or data[0].lower() in rfc822._daynames:
-        del data[0]
-    if len(data) == 4:
-        s = data[3]
-        i = s.find('+')
-        if i > 0:
-            data[3:] = [s[:i], s[i+1:]]
-        else:
-            data.append('')
-        dateString = " ".join(data)
-    if len(data) < 5:
-        dateString += ' 00:00:00 GMT'
-    return rfc822.parsedate_tz(dateString)
-
-# rfc822.py defines several time zones, but we define some extra ones.
-# 'ET' is equivalent to 'EST', etc.
-_additional_timezones = {'AT': -400, 'ET': -500,
-                         'CT': -600, 'MT': -700,
-                         'PT': -800}
-rfc822._timezones.update(_additional_timezones)
-
-
-class Calendar:
-    """
-    A collection of routines to input, parse and manipulate date and times.
-    The text can either be 'normal' date values or it can be human readable.
-    """
-
-    def __init__(self, constants=None):
-        """
-        Default constructor for the L{Calendar} class.
-
-        @type  constants: object
-        @param constants: Instance of the class L{parsedatetime_consts.Constants}
-
-        @rtype:  object
-        @return: L{Calendar} instance
-        """
-          # if a constants reference is not included, use default
-        if constants is None:
-            self.ptc = parsedatetime_consts.Constants()
-        else:
-            self.ptc = constants
-
-        self.weekdyFlag    = False  # monday/tuesday/...
-        self.dateStdFlag   = False  # 07/21/06
-        self.dateStrFlag   = False  # July 21st, 2006
-        self.timeStdFlag   = False  # 5:50 
-        self.meridianFlag  = False  # am/pm
-        self.dayStrFlag    = False  # tomorrow/yesterday/today/..
-        self.timeStrFlag   = False  # lunch/noon/breakfast/...
-        self.modifierFlag  = False  # after/before/prev/next/..
-        self.modifier2Flag = False  # after/before/prev/next/..
-        self.unitsFlag     = False  # hrs/weeks/yrs/min/..
-        self.qunitsFlag    = False  # h/m/t/d..
-
-        self.timeFlag      = 0
-        self.dateFlag      = 0
-
-
-    def _convertUnitAsWords(self, unitText):
-        """
-        Converts text units into their number value
-
-        Five = 5
-        Twenty Five = 25
-        Two hundred twenty five = 225
-        Two thousand and twenty five = 2025
-        Two thousand twenty five = 2025
-
-        @type  unitText: string
-        @param unitText: number text to convert
-
-        @rtype:  integer
-        @return: numerical value of unitText
-        """
-        # TODO: implement this
-        pass
-
-
-    def _buildTime(self, source, quantity, modifier, units):
-        """
-        Take C{quantity}, C{modifier} and C{unit} strings and convert them into values.
-        After converting, calcuate the time and return the adjusted sourceTime.
-
-        @type  source:   time
-        @param source:   time to use as the base (or source)
-        @type  quantity: string
-        @param quantity: quantity string
-        @type  modifier: string
-        @param modifier: how quantity and units modify the source time
-        @type  units:    string
-        @param units:    unit of the quantity (i.e. hours, days, months, etc)
-
-        @rtype:  struct_time
-        @return: C{struct_time} of the calculated time
-        """
-        if _debug:
-            print '_buildTime: [%s][%s][%s]' % (quantity, modifier, units)
-
-        if source is None:
-            source = time.localtime()
-
-        if quantity is None:
-            quantity = ''
-        else:
-            quantity = quantity.strip()
-
-        if len(quantity) == 0:
-            qty = 1
-        else:
-            try:
-                qty = int(quantity)
-            except ValueError:
-                qty = 0
-
-        if modifier in self.ptc.Modifiers:
-            qty = qty * self.ptc.Modifiers[modifier]
-
-            if units is None or units == '':
-                units = 'dy'
-
-        # plurals are handled by regex's (could be a bug tho)
-
-        (yr, mth, dy, hr, mn, sec, _, _, _) = source
-
-        start  = datetime.datetime(yr, mth, dy, hr, mn, sec)
-        target = start
-
-        if units.startswith('y'):
-            target        = self.inc(start, year=qty)
-            self.dateFlag = 1
-        elif units.endswith('th') or units.endswith('ths'):
-            target        = self.inc(start, month=qty)
-            self.dateFlag = 1
-        else:
-            if units.startswith('d'):
-                target        = start + datetime.timedelta(days=qty)
-                self.dateFlag = 1
-            elif units.startswith('h'):
-                target        = start + datetime.timedelta(hours=qty)
-                self.timeFlag = 2
-            elif units.startswith('m'):
-                target        = start + datetime.timedelta(minutes=qty)
-                self.timeFlag = 2
-            elif units.startswith('s'):
-                target        = start + datetime.timedelta(seconds=qty)
-                self.timeFlag = 2
-            elif units.startswith('w'):
-                target        = start + datetime.timedelta(weeks=qty)
-                self.dateFlag = 1
-
-        return target.timetuple()
-
-
-    def parseDate(self, dateString):
-        """
-        Parse short-form date strings::
-
-            '05/28/2006' or '04.21'
-
-        @type  dateString: string
-        @param dateString: text to convert to a C{datetime}
-
-        @rtype:  struct_time
-        @return: calculated C{struct_time} value of dateString
-        """
-        yr, mth, dy, hr, mn, sec, wd, yd, isdst = time.localtime()
-
-        # values pulled from regex's will be stored here and later
-        # assigned to mth, dy, yr based on information from the locale
-        # -1 is used as the marker value because we want zero values
-        # to be passed thru so they can be flagged as errors later
-        v1 = -1
-        v2 = -1
-        v3 = -1
-
-        s = dateString
-        m = self.ptc.CRE_DATE2.search(s)
-        if m is not None:
-            index = m.start()
-            v1    = int(s[:index])
-            s     = s[index + 1:]
-
-        m = self.ptc.CRE_DATE2.search(s)
-        if m is not None:
-            index = m.start()
-            v2    = int(s[:index])
-            v3    = int(s[index + 1:])
-        else:
-            v2 = int(s.strip())
-
-        v = [ v1, v2, v3 ]
-        d = { 'm': mth, 'd': dy, 'y': yr }
-
-        for i in range(0, 3):
-            n = v[i]
-            c = self.ptc.dp_order[i]
-            if n >= 0:
-                d[c] = n
-
-        # if the year is not specified and the date has already
-        # passed, increment the year
-        if v3 == -1 and ((mth > d['m']) or (mth == d['m'] and dy > d['d'])):
-            yr = d['y'] + 1
-        else:
-            yr  = d['y']
-
-        mth = d['m']
-        dy  = d['d']
-
-        # birthday epoch constraint
-        if yr < self.ptc.BirthdayEpoch:
-            yr += 2000
-        elif yr < 100:
-            yr += 1900
-
-        if _debug:
-            print 'parseDate: ', yr, mth, dy, self.ptc.daysInMonth(mth, yr)
-
-        if (mth > 0 and mth <= 12) and \
-           (dy > 0 and dy <= self.ptc.daysInMonth(mth, yr)):
-            sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
-        else:
-            self.dateFlag = 0
-            self.timeFlag = 0
-            sourceTime    = time.localtime() # return current time if date
-                                             # string is invalid
-
-        return sourceTime
-
-
-    def parseDateText(self, dateString):
-        """
-        Parse long-form date strings::
-
-            'May 31st, 2006'
-            'Jan 1st'
-            'July 2006'
-
-        @type  dateString: string
-        @param dateString: text to convert to a datetime
-
-        @rtype:  struct_time
-        @return: calculated C{struct_time} value of dateString
-        """
-        yr, mth, dy, hr, mn, sec, wd, yd, isdst = time.localtime()
-
-        currentMth = mth
-        currentDy  = dy
-
-        s   = dateString.lower()
-        m   = self.ptc.CRE_DATE3.search(s)
-        mth = m.group('mthname')
-        mth = self.ptc.MonthOffsets[mth]
-
-        if m.group('day') !=  None:
-            dy = int(m.group('day'))
-        else:
-            dy = 1
-
-        if m.group('year') !=  None:
-            yr = int(m.group('year'))
-
-            # birthday epoch constraint
-            if yr < self.ptc.BirthdayEpoch:
-                yr += 2000
-            elif yr < 100:
-                yr += 1900
-
-        elif (mth < currentMth) or (mth == currentMth and dy < currentDy):
-            # if that day and month have already passed in this year,
-            # then increment the year by 1
-            yr += 1
-
-        if dy > 0 and dy <= self.ptc.daysInMonth(mth, yr):
-            sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
-        else:
-            # Return current time if date string is invalid
-            self.dateFlag = 0
-            self.timeFlag = 0
-            sourceTime    = time.localtime()
-
-        return sourceTime
-
-
-    def evalRanges(self, datetimeString, sourceTime=None):
-        """
-        Evaluate the C{datetimeString} text and determine if
-        it represents a date or time range.
-
-        @type  datetimeString: string
-        @param datetimeString: datetime text to evaluate
-        @type  sourceTime:     struct_time
-        @param sourceTime:     C{struct_time} value to use as the base
-
-        @rtype:  tuple
-        @return: tuple of: start datetime, end datetime and the invalid flag
-        """
-        startTime = ''
-        endTime   = ''
-        startDate = ''
-        endDate   = ''
-        rangeFlag = 0
-
-        s = datetimeString.strip().lower()
-
-        if self.ptc.rangeSep in s:
-            s = s.replace(self.ptc.rangeSep, ' %s ' % self.ptc.rangeSep)
-            s = s.replace('  ', ' ')
-
-        m = self.ptc.CRE_TIMERNG1.search(s)
-        if m is not None:
-            rangeFlag = 1
-        else:
-            m = self.ptc.CRE_TIMERNG2.search(s)
-            if m is not None:
-                rangeFlag = 2
-            else:
-                m = self.ptc.CRE_TIMERNG4.search(s)
-                if m is not None:
-                    rangeFlag = 7
-                else:
-                    m = self.ptc.CRE_TIMERNG3.search(s)
-                    if m is not None:
-                        rangeFlag = 3
-                    else:
-                        m = self.ptc.CRE_DATERNG1.search(s)
-                        if m is not None:
-                            rangeFlag = 4
-                        else:
-                            m = self.ptc.CRE_DATERNG2.search(s)
-                            if m is not None:
-                                rangeFlag = 5
-                            else:
-                                m = self.ptc.CRE_DATERNG3.search(s)
-                                if m is not None:
-                                    rangeFlag = 6
-
-        if _debug:
-            print 'evalRanges: rangeFlag =', rangeFlag, '[%s]' % s
-
-        if m is not None:
-            if (m.group() != s):
-                # capture remaining string
-                parseStr = m.group()
-                chunk1   = s[:m.start()]
-                chunk2   = s[m.end():]
-                s        = '%s %s' % (chunk1, chunk2)
-                flag     = 1
-
-                sourceTime, flag = self.parse(s, sourceTime)
-
-                if flag == 0:
-                    sourceTime = None
-            else:
-                parseStr = s
-
-        if rangeFlag == 1:
-            m                = re.search(self.ptc.rangeSep, parseStr)
-            startTime, sflag = self.parse((parseStr[:m.start()]),       sourceTime)
-            endTime, eflag   = self.parse((parseStr[(m.start() + 1):]), sourceTime)
-
-            if (eflag != 0)  and (sflag != 0):
-                return (startTime, endTime, 2)
-
-        elif rangeFlag == 2:
-            m                = re.search(self.ptc.rangeSep, parseStr)
-            startTime, sflag = self.parse((parseStr[:m.start()]),       sourceTime)
-            endTime, eflag   = self.parse((parseStr[(m.start() + 1):]), sourceTime)
-
-            if (eflag != 0)  and (sflag != 0):
-                return (startTime, endTime, 2)
-
-        elif rangeFlag == 3 or rangeFlag == 7:
-            m = re.search(self.ptc.rangeSep, parseStr)
-            # capturing the meridian from the end time
-            if self.ptc.usesMeridian:
-                ampm = re.search(self.ptc.am[0], parseStr)
-
-                # appending the meridian to the start time
-                if ampm is not None:
-                    startTime, sflag = self.parse((parseStr[:m.start()] + self.ptc.meridian[0]), sourceTime)
-                else:
-                    startTime, sflag = self.parse((parseStr[:m.start()] + self.ptc.meridian[1]), sourceTime)
-            else:
-                startTime, sflag = self.parse((parseStr[:m.start()]), sourceTime)
-
-            endTime, eflag = self.parse(parseStr[(m.start() + 1):], sourceTime)
-
-            if (eflag != 0)  and (sflag != 0):
-                return (startTime, endTime, 2)
-
-        elif rangeFlag == 4:
-            m                = re.search(self.ptc.rangeSep, parseStr)
-            startDate, sflag = self.parse((parseStr[:m.start()]),       sourceTime)
-            endDate, eflag   = self.parse((parseStr[(m.start() + 1):]), sourceTime)
-
-            if (eflag != 0)  and (sflag != 0):
-                return (startDate, endDate, 1)
-
-        elif rangeFlag == 5:
-            m       = re.search(self.ptc.rangeSep, parseStr)
-            endDate = parseStr[(m.start() + 1):]
-
-            # capturing the year from the end date
-            date    = self.ptc.CRE_DATE3.search(endDate)
-            endYear = date.group('year')
-
-            # appending the year to the start date if the start date
-            # does not have year information and the end date does.
-            # eg : "Aug 21 - Sep 4, 2007"
-            if endYear is not None:
-                startDate = (parseStr[:m.start()]).strip()
-                date      = self.ptc.CRE_DATE3.search(startDate)
-                startYear = date.group('year')
-
-                if startYear is None:
-                    startDate = startDate + ', ' + endYear
-            else:
-                startDate = parseStr[:m.start()]
-
-            startDate, sflag = self.parse(startDate, sourceTime)
-            endDate, eflag   = self.parse(endDate, sourceTime)
-
-            if (eflag != 0)  and (sflag != 0):
-                return (startDate, endDate, 1)
-
-        elif rangeFlag == 6:
-            m = re.search(self.ptc.rangeSep, parseStr)
-
-            startDate = parseStr[:m.start()]
-
-            # capturing the month from the start date
-            mth = self.ptc.CRE_DATE3.search(startDate)
-            mth = mth.group('mthname')
-
-            # appending the month name to the end date
-            endDate = mth + parseStr[(m.start() + 1):]
-
-            startDate, sflag = self.parse(startDate, sourceTime)
-            endDate, eflag   = self.parse(endDate, sourceTime)
-
-            if (eflag != 0)  and (sflag != 0):
-                return (startDate, endDate, 1)
-        else:
-            # if range is not found
-            sourceTime = time.localtime()
-
-            return (sourceTime, sourceTime, 0)
-
-
-    def _CalculateDOWDelta(self, wd, wkdy, offset, style, currentDayStyle):
-        """
-        Based on the C{style} and C{currentDayStyle} determine what
-        day-of-week value is to be returned.
-
-        @type  wd:              integer
-        @param wd:              day-of-week value for the current day
-        @type  wkdy:            integer
-        @param wkdy:            day-of-week value for the parsed day
-        @type  offset:          integer
-        @param offset:          offset direction for any modifiers (-1, 0, 1)
-        @type  style:           integer
-        @param style:           normally the value set in C{Constants.DOWParseStyle}
-        @type  currentDayStyle: integer
-        @param currentDayStyle: normally the value set in C{Constants.CurrentDOWParseStyle}
-
-        @rtype:  integer
-        @return: calculated day-of-week
-        """
-        if offset == 1:
-            # modifier is indicating future week eg: "next".
-            # DOW is calculated as DOW of next week
-            diff = 7 - wd + wkdy
-
-        elif offset == -1:
-            # modifier is indicating past week eg: "last","previous"
-            # DOW is calculated as DOW of previous week
-            diff = wkdy - wd - 7
-
-        elif offset == 0:
-            # modifier is indiacting current week eg: "this"
-            # DOW is calculated as DOW of this week
-            diff = wkdy - wd
-
-        elif offset == 2:
-            # no modifier is present.
-            # i.e. string to be parsed is just DOW
-            if style == 1:
-                # next occurance of the DOW is calculated
-                if currentDayStyle == True:
-                    if wkdy >= wd:
-                        diff = wkdy - wd
-                    else:
-                        diff = 7 - wd + wkdy
-                else:
-                    if wkdy > wd:
-                        diff = wkdy - wd
-                    else:
-                        diff = 7 - wd + wkdy
-
-            elif style == -1:
-                # last occurance of the DOW is calculated
-                if currentDayStyle == True:
-                    if wkdy <= wd:
-                        diff = wkdy - wd
-                    else:
-                        diff = wkdy - wd - 7
-                else:
-                    if wkdy < wd:
-                        diff = wkdy - wd
-                    else:
-                        diff = wkdy - wd - 7
-            else:
-                # occurance of the DOW in the current week is calculated
-                diff = wkdy - wd
-
-        if _debug:
-            print "wd %s, wkdy %s, offset %d, style %d\n" % (wd, wkdy, offset, style)
-
-        return diff
-
-
-    def _evalModifier(self, modifier, chunk1, chunk2, sourceTime):
-        """
-        Evaluate the C{modifier} string and following text (passed in
-        as C{chunk1} and C{chunk2}) and if they match any known modifiers
-        calculate the delta and apply it to C{sourceTime}.
-
-        @type  modifier:   string
-        @param modifier:   modifier text to apply to sourceTime
-        @type  chunk1:     string
-        @param chunk1:     first text chunk that followed modifier (if any)
-        @type  chunk2:     string
-        @param chunk2:     second text chunk that followed modifier (if any)
-        @type  sourceTime: struct_time
-        @param sourceTime: C{struct_time} value to use as the base
-
-        @rtype:  tuple
-        @return: tuple of: remaining text and the modified sourceTime
-        """
-        offset = self.ptc.Modifiers[modifier]
-
-        if sourceTime is not None:
-            (yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
-        else:
-            (yr, mth, dy, hr, mn, sec, wd, yd, isdst) = time.localtime()
-
-        # capture the units after the modifier and the remaining
-        # string after the unit
-        m = self.ptc.CRE_REMAINING.search(chunk2)
-        if m is not None:
-            index  = m.start() + 1
-            unit   = chunk2[:m.start()]
-            chunk2 = chunk2[index:]
-        else:
-            unit   = chunk2
-            chunk2 = ''
-
-        flag = False
-
-        if unit == 'month' or \
-           unit == 'mth' or \
-           unit == 'm':
-            if offset == 0:
-                dy         = self.ptc.daysInMonth(mth, yr)
-                sourceTime = (yr, mth, dy, 9, 0, 0, wd, yd, isdst)
-            elif offset == 2:
-                # if day is the last day of the month, calculate the last day
-                # of the next month
-                if dy == self.ptc.daysInMonth(mth, yr):
-                    dy = self.ptc.daysInMonth(mth + 1, yr)
-
-                start      = datetime.datetime(yr, mth, dy, 9, 0, 0)
-                target     = self.inc(start, month=1)
-                sourceTime = target.timetuple()
-            else:
-                start      = datetime.datetime(yr, mth, 1, 9, 0, 0)
-                target     = self.inc(start, month=offset)
-                sourceTime = target.timetuple()
-
-            flag = True
-            self.dateFlag = 1
-
-        if unit == 'week' or \
-             unit == 'wk' or \
-             unit == 'w':
-            if offset == 0:
-                start      = datetime.datetime(yr, mth, dy, 17, 0, 0)
-                target     = start + datetime.timedelta(days=(4 - wd))
-                sourceTime = target.timetuple()
-            elif offset == 2:
-                start      = datetime.datetime(yr, mth, dy, 9, 0, 0)
-                target     = start + datetime.timedelta(days=7)
-                sourceTime = target.timetuple()
-            else:
-                return self._evalModifier(modifier, chunk1, "monday " + chunk2, sourceTime)
-
-            flag          = True
-            self.dateFlag = 1
-
-        if unit == 'day' or \
-            unit == 'dy' or \
-            unit == 'd':
-            if offset == 0:
-                sourceTime    = (yr, mth, dy, 17, 0, 0, wd, yd, isdst)
-                self.timeFlag = 2
-            elif offset == 2:
-                start      = datetime.datetime(yr, mth, dy, hr, mn, sec)
-                target     = start + datetime.timedelta(days=1)
-                sourceTime = target.timetuple()
-            else:
-                start      = datetime.datetime(yr, mth, dy, 9, 0, 0)
-                target     = start + datetime.timedelta(days=offset)
-                sourceTime = target.timetuple()
-
-            flag          = True
-            self.dateFlag = 1
-
-        if unit == 'hour' or \
-           unit == 'hr':
-            if offset == 0:
-                sourceTime = (yr, mth, dy, hr, 0, 0, wd, yd, isdst)
-            else:
-                start      = datetime.datetime(yr, mth, dy, hr, 0, 0)
-                target     = start + datetime.timedelta(hours=offset)
-                sourceTime = target.timetuple()
-
-            flag          = True
-            self.timeFlag = 2
-
-        if unit == 'year' or \
-             unit == 'yr' or \
-             unit == 'y':
-            if offset == 0:
-                sourceTime = (yr, 12, 31, hr, mn, sec, wd, yd, isdst)
-            elif offset == 2:
-                sourceTime = (yr + 1, mth, dy, hr, mn, sec, wd, yd, isdst)
-            else:
-                sourceTime = (yr + offset, 1, 1, 9, 0, 0, wd, yd, isdst)
-
-            flag          = True
-            self.dateFlag = 1
-
-        if flag == False:
-            m = self.ptc.CRE_WEEKDAY.match(unit)
-            if m is not None:
-                wkdy          = m.group()
-                self.dateFlag = 1
-
-                if modifier == 'eod':
-                    # Calculate the  upcoming weekday
-                    self.modifierFlag = False
-                    (sourceTime, _)   = self.parse(wkdy, sourceTime)
-                    sources           = self.ptc.buildSources(sourceTime)
-                    self.timeFlag     = 2
-
-                    if modifier in sources:
-                        sourceTime = sources[modifier]
-
-                else:
-                    wkdy       = self.ptc.WeekdayOffsets[wkdy]
-                    diff       = self._CalculateDOWDelta(wd, wkdy, offset,
-                                                         self.ptc.DOWParseStyle,
-                                                         self.ptc.CurrentDOWParseStyle)
-                    start      = datetime.datetime(yr, mth, dy, 9, 0, 0)
-                    target     = start + datetime.timedelta(days=diff)
-                    sourceTime = target.timetuple()
-
-                flag          = True
-                self.dateFlag = 1
-
-        if not flag:
-            m = self.ptc.CRE_TIME.match(unit)
-            if m is not None:
-                self.modifierFlag = False
-                (yr, mth, dy, hr, mn, sec, wd, yd, isdst), _ = self.parse(unit)
-
-                start      = datetime.datetime(yr, mth, dy, hr, mn, sec)
-                target     = start + datetime.timedelta(days=offset)
-                sourceTime = target.timetuple()
-                flag       = True
-            else:
-                self.modifierFlag = False
-
-                # check if the remaining text is parsable and if so,
-                # use it as the base time for the modifier source time
-                t, flag2 = self.parse('%s %s' % (chunk1, unit), sourceTime)
-
-                if flag2 != 0:
-                    sourceTime = t
-
-                sources = self.ptc.buildSources(sourceTime)
-
-                if modifier in sources:
-                    sourceTime    = sources[modifier]
-                    flag          = True
-                    self.timeFlag = 2
-
-        # if the word after next is a number, the string is more than likely
-        # to be "next 4 hrs" which we will have to combine the units with the
-        # rest of the string
-        if not flag:
-            if offset < 0:
-                # if offset is negative, the unit has to be made negative
-                unit = '-%s' % unit
-
-            chunk2 = '%s %s' % (unit, chunk2)
-
-        self.modifierFlag = False
-
-        #return '%s %s' % (chunk1, chunk2), sourceTime
-        return '%s' % chunk2, sourceTime
-
-    def _evalModifier2(self, modifier, chunk1 , chunk2, sourceTime):
-        """
-        Evaluate the C{modifier} string and following text (passed in
-        as C{chunk1} and C{chunk2}) and if they match any known modifiers
-        calculate the delta and apply it to C{sourceTime}.
-
-        @type  modifier:   string
-        @param modifier:   modifier text to apply to C{sourceTime}
-        @type  chunk1:     string
-        @param chunk1:     first text chunk that followed modifier (if any)
-        @type  chunk2:     string
-        @param chunk2:     second text chunk that followed modifier (if any)
-        @type  sourceTime: struct_time
-        @param sourceTime: C{struct_time} value to use as the base
-
-        @rtype:  tuple
-        @return: tuple of: remaining text and the modified sourceTime
-        """
-        offset = self.ptc.Modifiers[modifier]
-        digit  = r'\d+'
-
-        self.modifier2Flag = False
-
-        # If the string after the negative modifier starts with digits,
-        # then it is likely that the string is similar to ' before 3 days'
-        # or 'evening prior to 3 days'.
-        # In this case, the total time is calculated by subtracting '3 days'
-        # from the current date.
-        # So, we have to identify the quantity and negate it before parsing
-        # the string.
-        # This is not required for strings not starting with digits since the
-        # string is enough to calculate the sourceTime
-        if chunk2 != '':
-            if offset < 0:
-                m = re.match(digit, chunk2.strip())
-                if m is not None:
-                    qty    = int(m.group()) * -1
-                    chunk2 = chunk2[m.end():]
-                    chunk2 = '%d%s' % (qty, chunk2)
-
-            sourceTime, flag1 = self.parse(chunk2, sourceTime)
-            if flag1 == 0:
-                flag1 = True
-            else:
-                flag1 = False
-            flag2 = False
-        else:
-            flag1 = False
-
-        if chunk1 != '':
-            if offset < 0:
-                m = re.search(digit, chunk1.strip())
-                if m is not None:
-                    qty    = int(m.group()) * -1
-                    chunk1 = chunk1[m.end():]
-                    chunk1 = '%d%s' % (qty, chunk1)
-
-            tempDateFlag       = self.dateFlag
-            tempTimeFlag       = self.timeFlag
-            sourceTime2, flag2 = self.parse(chunk1, sourceTime)
-        else:
-            return sourceTime, (flag1 and flag2)
-
-        # if chunk1 is not a datetime and chunk2 is then do not use datetime
-        # value returned by parsing chunk1
-        if not (flag1 == False and flag2 == 0):
-            sourceTime = sourceTime2
-        else:
-            self.timeFlag = tempTimeFlag
-            self.dateFlag = tempDateFlag
-
-        return sourceTime, (flag1 and flag2)
-
-
-    def _evalString(self, datetimeString, sourceTime=None):
-        """
-        Calculate the datetime based on flags set by the L{parse()} routine
-
-        Examples handled::
-            RFC822, W3CDTF formatted dates
-            HH:MM[:SS][ am/pm]
-            MM/DD/YYYY
-            DD MMMM YYYY
-
-        @type  datetimeString: string
-        @param datetimeString: text to try and parse as more "traditional"
-                               date/time text
-        @type  sourceTime:     struct_time
-        @param sourceTime:     C{struct_time} value to use as the base
-
-        @rtype:  datetime
-        @return: calculated C{struct_time} value or current C{struct_time}
-                 if not parsed
-        """
-        s   = datetimeString.strip()
-        now = time.localtime()
-
-        # Given string date is a RFC822 date
-        if sourceTime is None:
-            sourceTime = _parse_date_rfc822(s)
-
-            if sourceTime is not None:
-                (yr, mth, dy, hr, mn, sec, wd, yd, isdst, _) = sourceTime
-                self.dateFlag = 1
-
-                if (hr != 0) and (mn != 0) and (sec != 0):
-                    self.timeFlag = 2
-
-                sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
-
-        # Given string date is a W3CDTF date
-        if sourceTime is None:
-            sourceTime = _parse_date_w3dtf(s)
-
-            if sourceTime is not None:
-                self.dateFlag = 1
-                self.timeFlag = 2
-
-        if sourceTime is None:
-            s = s.lower()
-
-        # Given string is in the format HH:MM(:SS)(am/pm)
-        if self.meridianFlag:
-            if sourceTime is None:
-                (yr, mth, dy, hr, mn, sec, wd, yd, isdst) = now
-            else:
-                (yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
-
-            m = self.ptc.CRE_TIMEHMS2.search(s)
-            if m is not None:
-                dt = s[:m.start('meridian')].strip()
-                if len(dt) <= 2:
-                    hr  = int(dt)
-                    mn  = 0
-                    sec = 0
-                else:
-                    hr, mn, sec = _extract_time(m)
-
-                if hr == 24:
-                    hr = 0
-
-                sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
-                meridian   = m.group('meridian').lower()
-
-                  # if 'am' found and hour is 12 - force hour to 0 (midnight)
-                if (meridian in self.ptc.am) and hr == 12:
-                    sourceTime = (yr, mth, dy, 0, mn, sec, wd, yd, isdst)
-
-                  # if 'pm' found and hour < 12, add 12 to shift to evening
-                if (meridian in self.ptc.pm) and hr < 12:
-                    sourceTime = (yr, mth, dy, hr + 12, mn, sec, wd, yd, isdst)
-
-              # invalid time
-            if hr > 24 or mn > 59 or sec > 59:
-                sourceTime    = now
-                self.dateFlag = 0
-                self.timeFlag = 0
-
-            self.meridianFlag = False
-
-          # Given string is in the format HH:MM(:SS)
-        if self.timeStdFlag:
-            if sourceTime is None:
-                (yr, mth, dy, hr, mn, sec, wd, yd, isdst) = now
-            else:
-                (yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
-
-            m = self.ptc.CRE_TIMEHMS.search(s)
-            if m is not None:
-                hr, mn, sec = _extract_time(m)
-            if hr == 24:
-                hr = 0
-
-            if hr > 24 or mn > 59 or sec > 59:
-                # invalid time
-                sourceTime    = now
-                self.dateFlag = 0
-                self.timeFlag = 0
-            else:
-                sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
-
-            self.timeStdFlag = False
-
-        # Given string is in the format 07/21/2006
-        if self.dateStdFlag:
-            sourceTime       = self.parseDate(s)
-            self.dateStdFlag = False
-
-        # Given string is in the format  "May 23rd, 2005"
-        if self.dateStrFlag:
-            sourceTime       = self.parseDateText(s)
-            self.dateStrFlag = False
-
-        # Given string is a weekday
-        if self.weekdyFlag:
-            (yr, mth, dy, hr, mn, sec, wd, yd, isdst) = now
-
-            start = datetime.datetime(yr, mth, dy, hr, mn, sec)
-            wkdy  = self.ptc.WeekdayOffsets[s]
-
-            if wkdy > wd:
-                qty = self._CalculateDOWDelta(wd, wkdy, 2,
-                                              self.ptc.DOWParseStyle,
-                                              self.ptc.CurrentDOWParseStyle)
-            else:
-                qty = self._CalculateDOWDelta(wd, wkdy, 2,
-                                              self.ptc.DOWParseStyle,
-                                              self.ptc.CurrentDOWParseStyle)
-
-            target = start + datetime.timedelta(days=qty)
-            wd     = wkdy
-
-            sourceTime      = target.timetuple()
-            self.weekdyFlag = False
-
-        # Given string is a natural language time string like
-        # lunch, midnight, etc
-        if self.timeStrFlag:
-            if s in self.ptc.re_values['now']:
-                sourceTime = now
-            else:
-                sources = self.ptc.buildSources(sourceTime)
-
-                if s in sources:
-                    sourceTime = sources[s]
-                else:
-                    sourceTime    = now
-                    self.dateFlag = 0
-                    self.timeFlag = 0
-
-            self.timeStrFlag = False
-
-        # Given string is a natural language date string like today, tomorrow..
-        if self.dayStrFlag:
-            if sourceTime is None:
-                sourceTime = now
-
-            (yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
-
-            if s in self.ptc.dayOffsets:
-                offset = self.ptc.dayOffsets[s]
-            else:
-                offset = 0
-
-            start      = datetime.datetime(yr, mth, dy, 9, 0, 0)
-            target     = start + datetime.timedelta(days=offset)
-            sourceTime = target.timetuple()
-
-            self.dayStrFlag = False
-
-        # Given string is a time string with units like "5 hrs 30 min"
-        if self.unitsFlag:
-            modifier = ''  # TODO
-
-            if sourceTime is None:
-                sourceTime = now
-
-            m = self.ptc.CRE_UNITS.search(s)
-            if m is not None:
-                units    = m.group('units')
-                quantity = s[:m.start('units')]
-
-            sourceTime     = self._buildTime(sourceTime, quantity, modifier, units)
-            self.unitsFlag = False
-
-        # Given string is a time string with single char units like "5 h 30 m"
-        if self.qunitsFlag:
-            modifier = ''  # TODO
-
-            if sourceTime is None:
-                sourceTime = now
-
-            m = self.ptc.CRE_QUNITS.search(s)
-            if m is not None:
-                units    = m.group('qunits')
-                quantity = s[:m.start('qunits')]
-
-            sourceTime      = self._buildTime(sourceTime, quantity, modifier, units)
-            self.qunitsFlag = False
-
-          # Given string does not match anything
-        if sourceTime is None:
-            sourceTime    = now
-            self.dateFlag = 0
-            self.timeFlag = 0
-
-        return sourceTime
-
-
-    def parse(self, datetimeString, sourceTime=None):
-        """
-        Splits the given C{datetimeString} into tokens, finds the regex
-        patterns that match and then calculates a C{struct_time} value from
-        the chunks.
-
-        If C{sourceTime} is given then the C{struct_time} value will be
-        calculated from that value, otherwise from the current date/time.
-
-        If the C{datetimeString} is parsed and date/time value found then
-        the second item of the returned tuple will be a flag to let you know
-        what kind of C{struct_time} value is being returned::
-
-            0 = not parsed at all
-            1 = parsed as a C{date}
-            2 = parsed as a C{time}
-            3 = parsed as a C{datetime}
-
-        @type  datetimeString: string
-        @param datetimeString: date/time text to evaluate
-        @type  sourceTime:     struct_time
-        @param sourceTime:     C{struct_time} value to use as the base
-
-        @rtype:  tuple
-        @return: tuple of: modified C{sourceTime} and the result flag
-        """
-
-        if sourceTime:
-            if isinstance(sourceTime, datetime.datetime):
-                if _debug:
-                    print 'coercing datetime to timetuple'
-                sourceTime = sourceTime.timetuple()
-            else:
-                if not isinstance(sourceTime, time.struct_time) and \
-                   not isinstance(sourceTime, tuple):
-                    raise Exception('sourceTime is not a struct_time')
-
-        s         = datetimeString.strip().lower()
-        parseStr  = ''
-        totalTime = sourceTime
-
-        if s == '' :
-            if sourceTime is not None:
-                return (sourceTime, self.dateFlag + self.timeFlag)
-            else:
-                return (time.localtime(), 0)
-
-        self.timeFlag = 0
-        self.dateFlag = 0
-
-        while len(s) > 0:
-            flag   = False
-            chunk1 = ''
-            chunk2 = ''
-
-            if _debug:
-                print 'parse (top of loop): [%s][%s]' % (s, parseStr)
-
-            if parseStr == '':
-                # Modifier like next\prev..
-                m = self.ptc.CRE_MODIFIER.search(s)
-                if m is not None:
-                    self.modifierFlag = True
-                    if (m.group('modifier') != s):
-                        # capture remaining string
-                        parseStr = m.group('modifier')
-                        chunk1   = s[:m.start('modifier')].strip()
-                        chunk2   = s[m.end('modifier'):].strip()
-                        flag     = True
-                    else:
-                        parseStr = s
-
-            if parseStr == '':
-                # Modifier like from\after\prior..
-                m = self.ptc.CRE_MODIFIER2.search(s)
-                if m is not None:
-                    self.modifier2Flag = True
-                    if (m.group('modifier') != s):
-                        # capture remaining string
-                        parseStr = m.group('modifier')
-                        chunk1   = s[:m.start('modifier')].strip()
-                        chunk2   = s[m.end('modifier'):].strip()
-                        flag     = True
-                    else:
-                        parseStr = s
-
-            if parseStr == '':
-                valid_date = False
-                for match in self.ptc.CRE_DATE3.finditer(s):
-                    # to prevent "HH:MM(:SS) time strings" expressions from triggering
-                    # this regex, we checks if the month field exists in the searched 
-                    # expression, if it doesn't exist, the date field is not valid
-                    if match.group('mthname'):
-                        m = self.ptc.CRE_DATE3.search(s, match.start())
-                        valid_date = True
-                        break
-
-                # String date format
-                if valid_date:
-                    self.dateStrFlag = True
-                    self.dateFlag    = 1
-                    if (m.group('date') != s):
-                        # capture remaining string
-                        parseStr = m.group('date')
-                        chunk1   = s[:m.start('date')]
-                        chunk2   = s[m.end('date'):]
-                        s        = '%s %s' % (chunk1, chunk2)
-                        flag     = True
-                    else:
-                        parseStr = s
-
-            if parseStr == '':
-                # Standard date format
-                m = self.ptc.CRE_DATE.search(s)
-                if m is not None:
-                    self.dateStdFlag = True
-                    self.dateFlag    = 1
-                    if (m.group('date') != s):
-                        # capture remaining string
-                        parseStr = m.group('date')
-                        chunk1   = s[:m.start('date')]
-                        chunk2   = s[m.end('date'):]
-                        s        = '%s %s' % (chunk1, chunk2)
-                        flag     = True
-                    else:
-                        parseStr = s
-
-            if parseStr == '':
-                # Natural language day strings
-                m = self.ptc.CRE_DAY.search(s)
-                if m is not None:
-                    self.dayStrFlag = True
-                    self.dateFlag   = 1
-                    if (m.group('day') != s):
-                        # capture remaining string
-                        parseStr = m.group('day')
-                        chunk1   = s[:m.start('day')]
-                        chunk2   = s[m.end('day'):]
-                        s        = '%s %s' % (chunk1, chunk2)
-                        flag     = True
-                    else:
-                        parseStr = s
-
-            if parseStr == '':
-                # Quantity + Units
-                m = self.ptc.CRE_UNITS.search(s)
-                if m is not None:
-                    self.unitsFlag = True
-                    if (m.group('qty') != s):
-                        # capture remaining string
-                        parseStr = m.group('qty')
-                        chunk1   = s[:m.start('qty')].strip()
-                        chunk2   = s[m.end('qty'):].strip()
-
-                        if chunk1[-1:] == '-':
-                            parseStr = '-%s' % parseStr
-                            chunk1   = chunk1[:-1]
-
-                        s    = '%s %s' % (chunk1, chunk2)
-                        flag = True
-                    else:
-                        parseStr = s
-
-            if parseStr == '':
-                # Quantity + Units
-                m = self.ptc.CRE_QUNITS.search(s)
-                if m is not None:
-                    self.qunitsFlag = True
-
-                    if (m.group('qty') != s):
-                        # capture remaining string
-                        parseStr = m.group('qty')
-                        chunk1   = s[:m.start('qty')].strip()
-                        chunk2   = s[m.end('qty'):].strip()
-
-                        if chunk1[-1:] == '-':
-                            parseStr = '-%s' % parseStr
-                            chunk1   = chunk1[:-1]
-
-                        s    = '%s %s' % (chunk1, chunk2)
-                        flag = True
-                    else:
-                        parseStr = s 
-
-            if parseStr == '':
-                # Weekday
-                m = self.ptc.CRE_WEEKDAY.search(s)
-                if m is not None:
-                    gv = m.group('weekday')
-                    if s not in self.ptc.dayOffsets:
-                        self.weekdyFlag = True
-                        self.dateFlag   = 1
-                        if (gv != s):
-                            # capture remaining string
-                            parseStr = gv
-                            chunk1   = s[:m.start('weekday')]
-                            chunk2   = s[m.end('weekday'):]
-                            s        = '%s %s' % (chunk1, chunk2)
-                            flag     = True
-                        else:
-                            parseStr = s
-
-            if parseStr == '':
-                # Natural language time strings
-                m = self.ptc.CRE_TIME.search(s)
-                if m is not None:
-                    self.timeStrFlag = True
-                    self.timeFlag    = 2
-                    if (m.group('time') != s):
-                        # capture remaining string
-                        parseStr = m.group('time')
-                        chunk1   = s[:m.start('time')]
-                        chunk2   = s[m.end('time'):]
-                        s        = '%s %s' % (chunk1, chunk2)
-                        flag     = True
-                    else:
-                        parseStr = s
-
-            if parseStr == '':
-                # HH:MM(:SS) am/pm time strings
-                m = self.ptc.CRE_TIMEHMS2.search(s)
-                if m is not None:
-                    self.meridianFlag = True
-                    self.timeFlag     = 2
-                    if m.group('minutes') is not None:
-                        if m.group('seconds') is not None:
-                            parseStr = '%s:%s:%s %s' % (m.group('hours'),
-                                                        m.group('minutes'),
-                                                        m.group('seconds'),
-                                                        m.group('meridian'))
-                        else:
-                            parseStr = '%s:%s %s' % (m.group('hours'),
-                                                     m.group('minutes'),
-                                                     m.group('meridian'))
-                    else:
-                        parseStr = '%s %s' % (m.group('hours'),
-                                              m.group('meridian'))
-
-                    chunk1 = s[:m.start('hours')]
-                    chunk2 = s[m.end('meridian'):]
-
-                    s    = '%s %s' % (chunk1, chunk2)
-                    flag = True
-
-            if parseStr == '':
-                # HH:MM(:SS) time strings
-                m = self.ptc.CRE_TIMEHMS.search(s)
-                if m is not None:
-                    self.timeStdFlag = True
-                    self.timeFlag    = 2
-                    if m.group('seconds') is not None:
-                        parseStr = '%s:%s:%s' % (m.group('hours'),
-                                                 m.group('minutes'),
-                                                 m.group('seconds'))
-                        chunk1   = s[:m.start('hours')]
-                        chunk2   = s[m.end('seconds'):]
-                    else:
-                        parseStr = '%s:%s' % (m.group('hours'),
-                                              m.group('minutes'))
-                        chunk1   = s[:m.start('hours')]
-                        chunk2   = s[m.end('minutes'):]
-
-                    s    = '%s %s' % (chunk1, chunk2)
-                    flag = True
-
-            # if string does not match any regex, empty string to
-            # come out of the while loop
-            if not flag:
-                s = ''
-
-            if _debug:
-                print 'parse (bottom) [%s][%s][%s][%s]' % (s, parseStr, chunk1, chunk2)
-                print 'weekday %s, dateStd %s, dateStr %s, time %s, timeStr %s, meridian %s' % \
-                       (self.weekdyFlag, self.dateStdFlag, self.dateStrFlag, self.timeStdFlag, self.timeStrFlag, self.meridianFlag)
-                print 'dayStr %s, modifier %s, modifier2 %s, units %s, qunits %s' % \
-                       (self.dayStrFlag, self.modifierFlag, self.modifier2Flag, self.unitsFlag, self.qunitsFlag)
-
-            # evaluate the matched string
-            if parseStr != '':
-                if self.modifierFlag == True:
-                    t, totalTime = self._evalModifier(parseStr, chunk1, chunk2, totalTime)
-                    # t is the unparsed part of the chunks.
-                    # If it is not date/time, return current
-                    # totalTime as it is; else return the output
-                    # after parsing t.
-                    if (t != '') and (t != None):
-                        tempDateFlag       = self.dateFlag
-                        tempTimeFlag       = self.timeFlag
-                        (totalTime2, flag) = self.parse(t, totalTime)
-
-                        if flag == 0 and totalTime is not None:
-                            self.timeFlag = tempTimeFlag
-                            self.dateFlag = tempDateFlag
-
-                            return (totalTime, self.dateFlag + self.timeFlag)
-                        else:
-                            return (totalTime2, self.dateFlag + self.timeFlag)
-
-                elif self.modifier2Flag == True:
-                    totalTime, invalidFlag = self._evalModifier2(parseStr, chunk1, chunk2, totalTime)
-
-                    if invalidFlag == True:
-                        self.dateFlag = 0
-                        self.timeFlag = 0
-
-                else:
-                    totalTime = self._evalString(parseStr, totalTime)
-                    parseStr  = ''
-
-        # String is not parsed at all
-        if totalTime is None or totalTime == sourceTime:
-            totalTime     = time.localtime()
-            self.dateFlag = 0
-            self.timeFlag = 0
-
-        return (totalTime, self.dateFlag + self.timeFlag)
-
-
-    def inc(self, source, month=None, year=None):
-        """
-        Takes the given C{source} date, or current date if none is
-        passed, and increments it according to the values passed in
-        by month and/or year.
-
-        This routine is needed because Python's C{timedelta()} function
-        does not allow for month or year increments.
-
-        @type  source: struct_time
-        @param source: C{struct_time} value to increment
-        @type  month:  integer
-        @param month:  optional number of months to increment
-        @type  year:   integer
-        @param year:   optional number of years to increment
-
-        @rtype:  datetime
-        @return: C{source} incremented by the number of months and/or years
-        """
-        yr  = source.year
-        mth = source.month
-        dy  = source.day
-
-        if year:
-            try:
-                yi = int(year)
-            except ValueError:
-                yi = 0
-
-            yr += yi
-
-        if month:
-            try:
-                mi = int(month)
-            except ValueError:
-                mi = 0
-
-            m = abs(mi)
-            y = m / 12      # how many years are in month increment
-            m = m % 12      # get remaining months
-
-            if mi < 0:
-                mth = mth - m           # sub months from start month
-                if mth < 1:             # cross start-of-year?
-                    y   -= 1            #   yes - decrement year
-                    mth += 12           #         and fix month
-            else:
-                mth = mth + m           # add months to start month
-                if mth > 12:            # cross end-of-year?
-                    y   += 1            #   yes - increment year
-                    mth -= 12           #         and fix month
-
-            yr += y
-
-            # if the day ends up past the last day of
-            # the new month, set it to the last day
-            if dy > self.ptc.daysInMonth(mth, yr):
-                dy = self.ptc.daysInMonth(mth, yr)
-
-        d = source.replace(year=yr, month=mth, day=dy)
-
-        return source + (d - source)
-
+# Backward compatibility fix.
+from . import *
--- a/MoinMoin/support/parsedatetime/parsedatetime_consts.py	Wed Sep 07 03:05:27 2016 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1119 +0,0 @@
-#!/usr/bin/env python
-
-"""
-parsedatetime constants and helper functions to determine
-regex values from Locale information if present.
-
-Also contains the internal Locale classes to give some sane
-defaults if PyICU is not found.
-"""
-
-__license__ = """
-Copyright (c) 2004-2008 Mike Taylor
-Copyright (c) 2006-2008 Darshana Chhajed
-Copyright (c)      2007 Bernd Zeimetz <bzed@debian.org>
-All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-   http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-try:
-    import PyICU as pyicu
-except:
-    pyicu = None
-
-
-import datetime
-import calendar
-import time
-import re
-
-
-class pdtLocale_en:
-    """
-    en_US Locale constants
-
-    This class will be used to initialize L{Constants} if PyICU is not located.
-
-    Defined as class variables are the lists and strings needed by parsedatetime
-    to evaluate strings for USA
-    """
-
-    localeID      = 'en_US'   # don't use a unicode string
-    dateSep       = [ u'/', u'.' ]
-    timeSep       = [ u':' ]
-    meridian      = [ u'AM', u'PM' ]
-    usesMeridian  = True
-    uses24        = False
-
-    Weekdays      = [ u'monday', u'tuesday', u'wednesday',
-                      u'thursday', u'friday', u'saturday', u'sunday',
-                    ]
-    shortWeekdays = [ u'mon', u'tues', u'wed',
-                      u'thu', u'fri', u'sat', u'sun',
-                    ]
-    Months        = [ u'january', u'february', u'march',
-                      u'april',   u'may',      u'june',
-                      u'july',    u'august',   u'september',
-                      u'october', u'november', u'december',
-                    ]
-    shortMonths   = [ u'jan', u'feb', u'mar',
-                      u'apr', u'may', u'jun',
-                      u'jul', u'aug', u'sep',
-                      u'oct', u'nov', u'dec',
-                    ]
-    dateFormats   = { 'full':   'EEEE, MMMM d, yyyy',
-                      'long':   'MMMM d, yyyy',
-                      'medium': 'MMM d, yyyy',
-                      'short':  'M/d/yy',
-                    }
-    timeFormats   = { 'full':   'h:mm:ss a z',
-                      'long':   'h:mm:ss a z',
-                      'medium': 'h:mm:ss a',
-                      'short':  'h:mm a',
-                    }
-
-    dp_order = [ u'm', u'd', u'y' ]
-
-      # this will be added to re_consts later
-    units = { 'seconds': [ 'second', 'sec' ],
-              'minutes': [ 'minute', 'min' ],
-              'hours':   [ 'hour',   'hr'  ],
-              'days':    [ 'day',    'dy'  ],
-              'weeks':   [ 'week',   'wk'  ],
-              'months':  [ 'month',  'mth' ],
-              'years':   [ 'year',   'yr'  ],
-            }
-
-      # text constants to be used by regex's later
-    re_consts     = { 'specials':       'in|on|of|at',
-                      'timeseperator':  ':',
-                      'rangeseperator': '-',
-                      'daysuffix':      'rd|st|nd|th',
-                      'meridian':       'am|pm|a.m.|p.m.|a|p',
-                      'qunits':         'h|m|s|d|w|m|y',
-                      'now':            [ 'now' ],
-                    }
-
-      # Used to adjust the returned date before/after the source
-    modifiers = { 'from':       1,
-                  'before':    -1,
-                  'after':      1,
-                  'ago':       -1,
-                  'prior':     -1,
-                  'prev':      -1,
-                  'last':      -1,
-                  'next':       1,
-                  'previous':  -1,
-                  'in a':       2,
-                  'end of':     0,
-                  'eod':        0,
-                  'eo':         0
-                }
-
-    dayoffsets = { 'tomorrow':   1,
-                   'today':      0,
-                   'yesterday': -1,
-                 }
-
-      # special day and/or times, i.e. lunch, noon, evening
-      # each element in the dictionary is a dictionary that is used
-      # to fill in any value to be replace - the current date/time will
-      # already have been populated by the method buildSources
-    re_sources    = { 'noon':      { 'hr': 12, 'mn': 0, 'sec': 0 },
-                      'lunch':     { 'hr': 12, 'mn': 0, 'sec': 0 },
-                      'morning':   { 'hr':  6, 'mn': 0, 'sec': 0 },
-                      'breakfast': { 'hr':  8, 'mn': 0, 'sec': 0 },
-                      'dinner':    { 'hr': 19, 'mn': 0, 'sec': 0 },
-                      'evening':   { 'hr': 18, 'mn': 0, 'sec': 0 },
-                      'midnight':  { 'hr':  0, 'mn': 0, 'sec': 0 },
-                      'night':     { 'hr': 21, 'mn': 0, 'sec': 0 },
-                      'tonight':   { 'hr': 21, 'mn': 0, 'sec': 0 },
-                      'eod':       { 'hr': 17, 'mn': 0, 'sec': 0 },
-                    }
-
-
-class pdtLocale_au:
-    """
-    en_AU Locale constants
-
-    This class will be used to initialize L{Constants} if PyICU is not located.
-
-    Defined as class variables are the lists and strings needed by parsedatetime
-    to evaluate strings for Australia
-    """
-
-    localeID      = 'en_AU'   # don't use a unicode string
-    dateSep       = [ u'-', u'/' ]
-    timeSep       = [ u':' ]
-    meridian      = [ u'AM', u'PM' ]
-    usesMeridian  = True
-    uses24        = False
-
-    Weekdays      = [ u'monday', u'tuesday', u'wednesday',
-                      u'thursday', u'friday', u'saturday', u'sunday',
-                    ]
-    shortWeekdays = [ u'mon', u'tues', u'wed',
-                      u'thu', u'fri', u'sat', u'sun',
-                    ]
-    Months        = [ u'january', u'february', u'march',
-                      u'april',   u'may',      u'june',
-                      u'july',    u'august',   u'september',
-                      u'october', u'november', u'december',
-                    ]
-    shortMonths   = [ u'jan', u'feb', u'mar',
-                      u'apr', u'may', u'jun',
-                      u'jul', u'aug', u'sep',
-                      u'oct', u'nov', u'dec',
-                    ]
-    dateFormats   = { 'full':   'EEEE, d MMMM yyyy',
-                      'long':   'd MMMM yyyy',
-                      'medium': 'dd/MM/yyyy',
-                      'short':  'd/MM/yy',
-                    }
-    timeFormats   = { 'full':   'h:mm:ss a z',
-                      'long':   'h:mm:ss a',
-                      'medium': 'h:mm:ss a',
-                      'short':  'h:mm a',
-                    }
-
-    dp_order = [ u'd', u'm', u'y' ]
-
-      # this will be added to re_consts later
-    units = { 'seconds': [ 'second', 'sec' ],
-              'minutes': [ 'minute', 'min' ],
-              'hours':   [ 'hour',   'hr'  ],
-              'days':    [ 'day',    'dy'  ],
-              'weeks':   [ 'week',   'wk'  ],
-              'months':  [ 'month',  'mth' ],
-              'years':   [ 'year',   'yr'  ],
-            }
-
-      # text constants to be used by regex's later
-    re_consts     = { 'specials':       'in|on|of|at',
-                      'timeseperator':  ':',
-                      'rangeseperator': '-',
-                      'daysuffix':      'rd|st|nd|th',
-                      'meridian':       'am|pm|a.m.|p.m.|a|p',
-                      'qunits':         'h|m|s|d|w|m|y',
-                      'now':            [ 'now' ],
-                    }
-
-      # Used to adjust the returned date before/after the source
-    modifiers = { 'from':       1,
-                  'before':    -1,
-                  'after':      1,
-                  'ago':        1,
-                  'prior':     -1,
-                  'prev':      -1,
-                  'last':      -1,
-                  'next':       1,
-                  'previous':  -1,
-                  'in a':       2,
-                  'end of':     0,
-                  'eo':         0,
-                }
-
-    dayoffsets = { 'tomorrow':   1,
-                   'today':      0,
-                   'yesterday': -1,
-                 }
-
-      # special day and/or times, i.e. lunch, noon, evening
-      # each element in the dictionary is a dictionary that is used
-      # to fill in any value to be replace - the current date/time will
-      # already have been populated by the method buildSources
-    re_sources    = { 'noon':      { 'hr': 12, 'mn': 0, 'sec': 0 },
-                      'lunch':     { 'hr': 12, 'mn': 0, 'sec': 0 },
-                      'morning':   { 'hr':  6, 'mn': 0, 'sec': 0 },
-                      'breakfast': { 'hr':  8, 'mn': 0, 'sec': 0 },
-                      'dinner':    { 'hr': 19, 'mn': 0, 'sec': 0 },
-                      'evening':   { 'hr': 18, 'mn': 0, 'sec': 0 },
-                      'midnight':  { 'hr':  0, 'mn': 0, 'sec': 0 },
-                      'night':     { 'hr': 21, 'mn': 0, 'sec': 0 },
-                      'tonight':   { 'hr': 21, 'mn': 0, 'sec': 0 },
-                      'eod':       { 'hr': 17, 'mn': 0, 'sec': 0 },
-                    }
-
-
-class pdtLocale_es:
-    """
-    es Locale constants
-
-    This class will be used to initialize L{Constants} if PyICU is not located.
-
-    Defined as class variables are the lists and strings needed by parsedatetime
-    to evaluate strings in Spanish
-
-    Note that I don't speak Spanish so many of the items below are still in English
-    """
-
-    localeID      = 'es'   # don't use a unicode string
-    dateSep       = [ u'/' ]
-    timeSep       = [ u':' ]
-    meridian      = []
-    usesMeridian  = False
-    uses24        = True
-
-    Weekdays      = [ u'lunes', u'martes', u'mi\xe9rcoles',
-                      u'jueves', u'viernes', u's\xe1bado', u'domingo',
-                    ]
-    shortWeekdays = [ u'lun', u'mar', u'mi\xe9',
-                      u'jue', u'vie', u's\xe1b', u'dom',
-                    ]
-    Months        = [ u'enero', u'febrero', u'marzo',
-                      u'abril', u'mayo', u'junio',
-                      u'julio', u'agosto', u'septiembre',
-                      u'octubre', u'noviembre', u'diciembre'
-                    ]
-    shortMonths   = [ u'ene', u'feb', u'mar',
-                      u'abr', u'may', u'jun',
-                      u'jul', u'ago', u'sep',
-                      u'oct', u'nov', u'dic'
-                    ]
-    dateFormats   = { 'full':   "EEEE d' de 'MMMM' de 'yyyy",
-                      'long':   "d' de 'MMMM' de 'yyyy",
-                      'medium': "dd-MMM-yy",
-                      'short':  "d/MM/yy",
-                    }
-    timeFormats   = { 'full':   "HH'H'mm' 'ss z",
-                      'long':   "HH:mm:ss z",
-                      'medium': "HH:mm:ss",
-                      'short':  "HH:mm",
-                    }
-
-    dp_order = [ u'd', u'm', u'y' ]
-
-      # this will be added to re_consts later
-    units = { 'seconds': [ 'second', 'sec' ],
-              'minutes': [ 'minute', 'min' ],
-              'hours':   [ 'hour',   'hr'  ],
-              'days':    [ 'day',    'dy'  ],
-              'weeks':   [ 'week',   'wk'  ],
-              'months':  [ 'month',  'mth' ],
-              'years':   [ 'year',   'yr'  ],
-            }
-
-      # text constants to be used by regex's later
-    re_consts     = { 'specials':       'in|on|of|at',
-                      'timeseperator':  timeSep,
-                      'dateseperator':  dateSep,
-                      'rangeseperator': '-',
-                      'daysuffix':      'rd|st|nd|th',
-                      'qunits':         'h|m|s|d|w|m|y',
-                      'now':            [ 'now' ],
-                    }
-
-      # Used to adjust the returned date before/after the source
-    modifiers = { 'from':      1,
-                  'before':   -1,
-                  'after':     1,
-                  'ago':       1,
-                  'prior':    -1,
-                  'prev':     -1,
-                  'last':     -1,
-                  'next':      1,
-                  'previous': -1,
-                  'in a':      2,
-                  'end of':    0,
-                  'eo':        0,
-                }
-
-    dayoffsets = { 'tomorrow':   1,
-                   'today':      0,
-                   'yesterday': -1,
-                 }
-
-      # special day and/or times, i.e. lunch, noon, evening
-      # each element in the dictionary is a dictionary that is used
-      # to fill in any value to be replace - the current date/time will
-      # already have been populated by the method buildSources
-    re_sources    = { 'noon':      { 'hr': 12, 'mn': 0, 'sec': 0 },
-                      'lunch':     { 'hr': 12, 'mn': 0, 'sec': 0 },
-                      'morning':   { 'hr':  6, 'mn': 0, 'sec': 0 },
-                      'breakfast': { 'hr':  8, 'mn': 0, 'sec': 0 },
-                      'dinner':    { 'hr': 19, 'mn': 0, 'sec': 0 },
-                      'evening':   { 'hr': 18, 'mn': 0, 'sec': 0 },
-                      'midnight':  { 'hr':  0, 'mn': 0, 'sec': 0 },
-                      'night':     { 'hr': 21, 'mn': 0, 'sec': 0 },
-                      'tonight':   { 'hr': 21, 'mn': 0, 'sec': 0 },
-                      'eod':       { 'hr': 17, 'mn': 0, 'sec': 0 },
-                    }
-
-
-class pdtLocale_de:
-    """
-    de_DE Locale constants
-
-    This class will be used to initialize L{Constants} if PyICU is not located.
-
-    Contributed by Debian parsedatetime package maintainer Bernd Zeimetz <bzed@debian.org>
-
-    Defined as class variables are the lists and strings needed by parsedatetime
-    to evaluate strings for German
-    """
-
-    localeID      = 'de_DE'   # don't use a unicode string
-    dateSep       = [ u'.' ]
-    timeSep       = [ u':' ]
-    meridian      = [ ]
-    usesMeridian  = False
-    uses24        = True
-
-    Weekdays      = [ u'montag', u'dienstag', u'mittwoch',
-                      u'donnerstag', u'freitag', u'samstag', u'sonntag',
-                    ]
-    shortWeekdays = [ u'mo', u'di', u'mi',
-                      u'do', u'fr', u'sa', u'so',
-                    ]
-    Months        = [ u'januar',  u'februar',  u'm\xe4rz',
-                      u'april',   u'mai',      u'juni',
-                      u'juli',    u'august',   u'september',
-                      u'oktober', u'november', u'dezember',
-                    ]
-    shortMonths   = [ u'jan', u'feb', u'mrz',
-                      u'apr', u'mai', u'jun',
-                      u'jul', u'aug', u'sep',
-                      u'okt', u'nov', u'dez',
-                    ]
-    dateFormats   = { 'full':   u'EEEE, d. MMMM yyyy',
-                      'long':   u'd. MMMM yyyy',
-                      'medium': u'dd.MM.yyyy',
-                      'short':  u'dd.MM.yy'
-                    }
-
-    timeFormats   = { 'full':   u'HH:mm:ss v',
-                      'long':   u'HH:mm:ss z',
-                      'medium': u'HH:mm:ss',
-                      'short':  u'HH:mm'
-                    }
-
-    dp_order = [ u'd', u'm', u'y' ]
-
-      # this will be added to re_consts later
-    units = { 'seconds': [ 'sekunden', 'sek',  's' ],
-              'minutes': [ 'minuten',  'min' , 'm' ],
-              'hours':   [ 'stunden',  'std',  'h' ],
-              'days':    [ 'tage',     't' ],
-              'weeks':   [ 'wochen',   'w' ],
-              'months':  [ 'monate' ], #the short version would be a capital M,
-                                       #as I understand it we can't distinguis
-                                       #between m for minutes and M for months.
-              'years':   [ 'jahre',    'j' ],
-            }
-
-      # text constants to be used by regex's later
-    re_consts     = { 'specials':       'am|dem|der|im|in|den|zum',
-                      'timeseperator':  ':',
-                      'rangeseperator': '-',
-                      'daysuffix':      '',
-                      'qunits':         'h|m|s|t|w|m|j',
-                      'now':            [ 'jetzt' ],
-                    }
-
-      # Used to adjust the returned date before/after the source
-      #still looking for insight on how to translate all of them to german.
-    modifiers = { u'from':         1,
-                  u'before':      -1,
-                  u'after':        1,
-                  u'vergangener': -1,
-                  u'vorheriger':  -1,
-                  u'prev':        -1,
-                  u'letzter':     -1,
-                  u'n\xe4chster':  1,
-                  u'dieser':       0,
-                  u'previous':    -1,
-                  u'in a':         2,
-                  u'end of':       0,
-                  u'eod':          0,
-                  u'eo':           0,
-                }
-
-     #morgen/abermorgen does not work, see http://code.google.com/p/parsedatetime/issues/detail?id=19
-    dayoffsets = { u'morgen':        1,
-                   u'heute':         0,
-                   u'gestern':      -1,
-                   u'vorgestern':   -2,
-                   u'\xfcbermorgen': 2,
-                 }
-
-      # special day and/or times, i.e. lunch, noon, evening
-      # each element in the dictionary is a dictionary that is used
-      # to fill in any value to be replace - the current date/time will
-      # already have been populated by the method buildSources
-    re_sources    = { u'mittag':      { 'hr': 12, 'mn': 0, 'sec': 0 },
-                      u'mittags':     { 'hr': 12, 'mn': 0, 'sec': 0 },
-                      u'mittagessen': { 'hr': 12, 'mn': 0, 'sec': 0 },
-                      u'morgen':      { 'hr':  6, 'mn': 0, 'sec': 0 },
-                      u'morgens':     { 'hr':  6, 'mn': 0, 'sec': 0 },
-                      u'fr\e4hst\xe4ck': { 'hr':  8, 'mn': 0, 'sec': 0 },
-                      u'abendessen':  { 'hr': 19, 'mn': 0, 'sec': 0 },
-                      u'abend':       { 'hr': 18, 'mn': 0, 'sec': 0 },
-                      u'abends':      { 'hr': 18, 'mn': 0, 'sec': 0 },
-                      u'mitternacht': { 'hr':  0, 'mn': 0, 'sec': 0 },
-                      u'nacht':       { 'hr': 21, 'mn': 0, 'sec': 0 },
-                      u'nachts':      { 'hr': 21, 'mn': 0, 'sec': 0 },
-                      u'heute abend': { 'hr': 21, 'mn': 0, 'sec': 0 },
-                      u'heute nacht': { 'hr': 21, 'mn': 0, 'sec': 0 },
-                      u'feierabend':  { 'hr': 17, 'mn': 0, 'sec': 0 },
-                    }
-
-
-pdtLocales = { 'en_US': pdtLocale_en,
-               'en_AU': pdtLocale_au,
-               'es_ES': pdtLocale_es,
-               'de_DE': pdtLocale_de,
-             }
-
-
-def _initLocale(ptc):
-    """
-    Helper function to initialize the different lists and strings
-    from either PyICU or one of the internal pdt Locales and store
-    them into ptc.
-    """
-
-    def lcase(x):
-        return x.lower()
-
-    if pyicu and ptc.usePyICU:
-        ptc.icuLocale = None
-
-        if ptc.localeID is not None:
-            ptc.icuLocale = pyicu.Locale(ptc.localeID)
-
-        if ptc.icuLocale is None:
-            for id in range(0, len(ptc.fallbackLocales)):
-                ptc.localeID  = ptc.fallbackLocales[id]
-                ptc.icuLocale = pyicu.Locale(ptc.localeID)
-
-                if ptc.icuLocale is not None:
-                    break
-
-        ptc.icuSymbols = pyicu.DateFormatSymbols(ptc.icuLocale)
-
-          # grab ICU list of weekdays, skipping first entry which
-          # is always blank
-        wd  = map(lcase, ptc.icuSymbols.getWeekdays()[1:])
-        swd = map(lcase, ptc.icuSymbols.getShortWeekdays()[1:])
-
-          # store them in our list with Monday first (ICU puts Sunday first)
-        ptc.Weekdays      = wd[1:] + wd[0:1]
-        ptc.shortWeekdays = swd[1:] + swd[0:1]
-        ptc.Months        = map(lcase, ptc.icuSymbols.getMonths())
-        ptc.shortMonths   = map(lcase, ptc.icuSymbols.getShortMonths())
-
-          # not quite sure how to init this so for now
-          # set it to none so it will be set to the en_US defaults for now
-        ptc.re_consts   = None
-        ptc.icu_df      = { 'full':   pyicu.DateFormat.createDateInstance(pyicu.DateFormat.kFull,   ptc.icuLocale),
-                            'long':   pyicu.DateFormat.createDateInstance(pyicu.DateFormat.kLong,   ptc.icuLocale),
-                            'medium': pyicu.DateFormat.createDateInstance(pyicu.DateFormat.kMedium, ptc.icuLocale),
-                            'short':  pyicu.DateFormat.createDateInstance(pyicu.DateFormat.kShort,  ptc.icuLocale),
-                          }
-        ptc.icu_tf      = { 'full':   pyicu.DateFormat.createTimeInstance(pyicu.DateFormat.kFull,   ptc.icuLocale),
-                            'long':   pyicu.DateFormat.createTimeInstance(pyicu.DateFormat.kLong,   ptc.icuLocale),
-                            'medium': pyicu.DateFormat.createTimeInstance(pyicu.DateFormat.kMedium, ptc.icuLocale),
-                            'short':  pyicu.DateFormat.createTimeInstance(pyicu.DateFormat.kShort,  ptc.icuLocale),
-                          }
-        ptc.dateFormats = { 'full':   ptc.icu_df['full'].toPattern(),
-                            'long':   ptc.icu_df['long'].toPattern(),
-                            'medium': ptc.icu_df['medium'].toPattern(),
-                            'short':  ptc.icu_df['short'].toPattern(),
-                          }
-        ptc.timeFormats = { 'full':   ptc.icu_tf['full'].toPattern(),
-                            'long':   ptc.icu_tf['long'].toPattern(),
-                            'medium': ptc.icu_tf['medium'].toPattern(),
-                            'short':  ptc.icu_tf['short'].toPattern(),
-                          }
-    else:
-        if not ptc.localeID in pdtLocales:
-            for id in range(0, len(ptc.fallbackLocales)):
-                ptc.localeID  = ptc.fallbackLocales[id]
-
-                if ptc.localeID in pdtLocales:
-                    break
-
-        ptc.locale   = pdtLocales[ptc.localeID]
-        ptc.usePyICU = False
-
-        ptc.Weekdays      = ptc.locale.Weekdays
-        ptc.shortWeekdays = ptc.locale.shortWeekdays
-        ptc.Months        = ptc.locale.Months
-        ptc.shortMonths   = ptc.locale.shortMonths
-        ptc.dateFormats   = ptc.locale.dateFormats
-        ptc.timeFormats   = ptc.locale.timeFormats
-
-      # these values are used to setup the various bits 
-      # of the regex values used to parse
-      #
-      # check if a local set of constants has been
-      # provided, if not use en_US as the default
-    if ptc.localeID in pdtLocales:
-        ptc.re_sources = pdtLocales[ptc.localeID].re_sources
-        ptc.re_values  = pdtLocales[ptc.localeID].re_consts
-
-        units = pdtLocales[ptc.localeID].units
-
-        ptc.Modifiers  = pdtLocales[ptc.localeID].modifiers
-        ptc.dayOffsets = pdtLocales[ptc.localeID].dayoffsets
-
-          # for now, pull over any missing keys from the US set
-        for key in pdtLocales['en_US'].re_consts:
-            if not key in ptc.re_values:
-                ptc.re_values[key] = pdtLocales['en_US'].re_consts[key]
-    else:
-        ptc.re_sources = pdtLocales['en_US'].re_sources
-        ptc.re_values  = pdtLocales['en_US'].re_consts
-        ptc.Modifiers  = pdtLocales['en_US'].modifiers
-        ptc.dayOffsets = pdtLocales['en_US'].dayoffsets
-        units          = pdtLocales['en_US'].units
-
-      # escape any regex special characters that may be found
-    wd   = tuple(map(re.escape, ptc.Weekdays))
-    swd  = tuple(map(re.escape, ptc.shortWeekdays))
-    mth  = tuple(map(re.escape, ptc.Months))
-    smth = tuple(map(re.escape, ptc.shortMonths))
-
-    ptc.re_values['months']      = '%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s' % mth
-    ptc.re_values['shortmonths'] = '%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s' % smth
-    ptc.re_values['days']        = '%s|%s|%s|%s|%s|%s|%s' % wd
-    ptc.re_values['shortdays']   = '%s|%s|%s|%s|%s|%s|%s' % swd
-
-    l = []
-    for unit in units:
-        l.append('|'.join(units[unit]))
-
-    ptc.re_values['units'] = '|'.join(l)
-    ptc.Units              = ptc.re_values['units'].split('|')
-
-
-def _initSymbols(ptc):
-    """
-    Helper function to initialize the single character constants
-    and other symbols needed.
-    """
-    ptc.timeSep  = [ u':' ]
-    ptc.dateSep  = [ u'/' ]
-    ptc.meridian = [ u'AM', u'PM' ]
-
-    ptc.usesMeridian = True
-    ptc.uses24       = False
-
-    if pyicu and ptc.usePyICU:
-        am = u''
-        pm = u''
-        ts = ''
-
-        # ICU doesn't seem to provide directly the
-        # date or time seperator - so we have to
-        # figure it out
-        o = ptc.icu_tf['short']
-        s = ptc.timeFormats['short']
-
-        ptc.usesMeridian = u'a' in s
-        ptc.uses24       = u'H' in s
-
-        # '11:45 AM' or '11:45'
-        s = o.format(datetime.datetime(2003, 10, 30, 11, 45))
-
-        # ': AM' or ':'
-        s = s.replace('11', '').replace('45', '')
-
-        if len(s) > 0:
-            ts = s[0]
-
-        if ptc.usesMeridian:
-            # '23:45 AM' or '23:45'
-            am = s[1:].strip()
-            s  = o.format(datetime.datetime(2003, 10, 30, 23, 45))
-
-            if ptc.uses24:
-                s = s.replace('23', '')
-            else:
-                s = s.replace('11', '')
-
-            # 'PM' or ''
-            pm = s.replace('45', '').replace(ts, '').strip()
-
-        ptc.timeSep  = [ ts ]
-        ptc.meridian = [ am, pm ]
-
-        o = ptc.icu_df['short']
-        s = o.format(datetime.datetime(2003, 10, 30, 11, 45))
-        s = s.replace('10', '').replace('30', '').replace('03', '').replace('2003', '')
-
-        if len(s) > 0:
-            ds = s[0]
-        else:
-            ds = '/'
-
-        ptc.dateSep = [ ds ]
-        s           = ptc.dateFormats['short']
-        l           = s.lower().split(ds)
-        dp_order    = []
-
-        for s in l:
-            if len(s) > 0:
-                dp_order.append(s[:1])
-
-        ptc.dp_order = dp_order
-    else:
-        ptc.timeSep      = ptc.locale.timeSep
-        ptc.dateSep      = ptc.locale.dateSep
-        ptc.meridian     = ptc.locale.meridian
-        ptc.usesMeridian = ptc.locale.usesMeridian
-        ptc.uses24       = ptc.locale.uses24
-        ptc.dp_order     = ptc.locale.dp_order
-
-      # build am and pm lists to contain
-      # original case, lowercase and first-char
-      # versions of the meridian text
-
-    if len(ptc.meridian) > 0:
-        am     = ptc.meridian[0]
-        ptc.am = [ am ]
-
-        if len(am) > 0:
-            ptc.am.append(am[0])
-            am = am.lower()
-            ptc.am.append(am)
-            ptc.am.append(am[0])
-    else:
-        am     = ''
-        ptc.am = [ '', '' ]
-
-    if len(ptc.meridian) > 1:
-        pm     = ptc.meridian[1]
-        ptc.pm = [ pm ]
-
-        if len(pm) > 0:
-            ptc.pm.append(pm[0])
-            pm = pm.lower()
-            ptc.pm.append(pm)
-            ptc.pm.append(pm[0])
-    else:
-        pm     = ''
-        ptc.pm = [ '', '' ]
-
-
-def _initPatterns(ptc):
-    """
-    Helper function to take the different localized bits from ptc and
-    create the regex strings.
-    """
-    # TODO add code to parse the date formats and build the regexes up from sub-parts
-    # TODO find all hard-coded uses of date/time seperators
-
-    ptc.RE_DATE4     = r'''(?P<date>(((?P<day>\d\d?)(?P<suffix>%(daysuffix)s)?(,)?(\s)?)
-                                      (?P<mthname>(%(months)s|%(shortmonths)s))\s?
-                                      (?P<year>\d\d(\d\d)?)?
-                                    )
-                           )''' % ptc.re_values
-
-    # I refactored DATE3 to fix Issue 16 http://code.google.com/p/parsedatetime/issues/detail?id=16
-    # I suspect the final line was for a trailing time - but testing shows it's not needed
-    # ptc.RE_DATE3     = r'''(?P<date>((?P<mthname>(%(months)s|%(shortmonths)s))\s?
-    #                                  ((?P<day>\d\d?)(\s?|%(daysuffix)s|$)+)?
-    #                                  (,\s?(?P<year>\d\d(\d\d)?))?))
-    #                        (\s?|$|[^0-9a-zA-Z])''' % ptc.re_values
-    ptc.RE_DATE3     = r'''(?P<date>(
-                                     (((?P<mthname>(%(months)s|%(shortmonths)s))|
-                                     ((?P<day>\d\d?)(?P<suffix>%(daysuffix)s)?))(\s)?){1,2}
-                                     ((,)?(\s)?(?P<year>\d\d(\d\d)?))?
-                                    )
-                           )''' % ptc.re_values
-    ptc.RE_MONTH     = r'''(\s?|^)
-                           (?P<month>(
-                                      (?P<mthname>(%(months)s|%(shortmonths)s))
-                                      (\s?(?P<year>(\d\d\d\d)))?
-                                     ))
-                           (\s?|$|[^0-9a-zA-Z])''' % ptc.re_values
-    ptc.RE_WEEKDAY   = r'''(\s?|^)
-                           (?P<weekday>(%(days)s|%(shortdays)s))
-                           (\s?|$|[^0-9a-zA-Z])''' % ptc.re_values
-
-    ptc.RE_SPECIAL   = r'(?P<special>^[%(specials)s]+)\s+' % ptc.re_values
-    ptc.RE_UNITS     = r'''(?P<qty>(-?\d+\s*
-                                    (?P<units>((%(units)s)s?))
-                                   ))''' % ptc.re_values
-    ptc.RE_QUNITS    = r'''(?P<qty>(-?\d+\s?
-                                    (?P<qunits>%(qunits)s)
-                                    (\s?|,|$)
-                                   ))''' % ptc.re_values
-    ptc.RE_MODIFIER  = r'''(\s?|^)
-                           (?P<modifier>
-                            (previous|prev|last|next|eod|eo|(end\sof)|(in\sa)))''' % ptc.re_values
-    ptc.RE_MODIFIER2 = r'''(\s?|^)
-                           (?P<modifier>
-                            (from|before|after|ago|prior))
-                           (\s?|$|[^0-9a-zA-Z])''' % ptc.re_values
-    ptc.RE_TIMEHMS   = r'''(\s?|^)
-                           (?P<hours>\d\d?)
-                           (?P<tsep>%(timeseperator)s|)
-                           (?P<minutes>\d\d)
-                           (?:(?P=tsep)(?P<seconds>\d\d(?:[.,]\d+)?))?''' % ptc.re_values
-    ptc.RE_TIMEHMS2  = r'''(?P<hours>(\d\d?))
-                           ((?P<tsep>%(timeseperator)s|)
-                            (?P<minutes>(\d\d?))
-                            (?:(?P=tsep)
-                               (?P<seconds>\d\d?
-                                (?:[.,]\d+)?))?)?''' % ptc.re_values
-
-    if 'meridian' in ptc.re_values:
-        ptc.RE_TIMEHMS2 += r'\s?(?P<meridian>(%(meridian)s))' % ptc.re_values
-
-    dateSeps = ''.join(ptc.dateSep) + '.'
-
-    ptc.RE_DATE      = r'''(\s?|^)
-                           (?P<date>(\d\d?[%s]\d\d?([%s]\d\d(\d\d)?)?))
-                           (\s?|$|[^0-9a-zA-Z])''' % (dateSeps, dateSeps)
-    ptc.RE_DATE2     = r'[%s]' % dateSeps
-    ptc.RE_DAY       = r'''(\s?|^)
-                           (?P<day>(today|tomorrow|yesterday))
-                           (\s?|$|[^0-9a-zA-Z])''' % ptc.re_values
-    ptc.RE_DAY2      = r'''(?P<day>\d\d?)|(?P<suffix>%(daysuffix)s)
-                        ''' % ptc.re_values
-    ptc.RE_TIME      = r'''(\s?|^)
-                           (?P<time>(morning|breakfast|noon|lunch|evening|midnight|tonight|dinner|night|now))
-                           (\s?|$|[^0-9a-zA-Z])''' % ptc.re_values
-    ptc.RE_REMAINING = r'\s+'
-
-    # Regex for date/time ranges
-    ptc.RE_RTIMEHMS  = r'''(\s?|^)
-                           (\d\d?)%(timeseperator)s
-                           (\d\d)
-                           (%(timeseperator)s(\d\d))?
-                           (\s?|$)''' % ptc.re_values
-    ptc.RE_RTIMEHMS2 = r'''(\s?|^)
-                           (\d\d?)
-                           (%(timeseperator)s(\d\d?))?
-                           (%(timeseperator)s(\d\d?))?''' % ptc.re_values
-
-    if 'meridian' in ptc.re_values:
-        ptc.RE_RTIMEHMS2 += r'\s?(%(meridian)s)' % ptc.re_values
-
-    ptc.RE_RDATE  = r'(\d+([%s]\d+)+)' % dateSeps
-    ptc.RE_RDATE3 = r'''((((%(months)s))\s?
-                         ((\d\d?)
-                          (\s?|%(daysuffix)s|$)+)?
-                         (,\s?\d\d\d\d)?))''' % ptc.re_values
-
-    # "06/07/06 - 08/09/06"
-    ptc.DATERNG1 = ptc.RE_RDATE + r'\s?%(rangeseperator)s\s?' + ptc.RE_RDATE
-    ptc.DATERNG1 = ptc.DATERNG1 % ptc.re_values
-
-    # "march 31 - june 1st, 2006"
-    ptc.DATERNG2 = ptc.RE_RDATE3 + r'\s?%(rangeseperator)s\s?' + ptc.RE_RDATE3
-    ptc.DATERNG2 = ptc.DATERNG2 % ptc.re_values
-
-    # "march 1rd -13th"
-    ptc.DATERNG3 = ptc.RE_RDATE3 + r'\s?%(rangeseperator)s\s?(\d\d?)\s?(rd|st|nd|th)?'
-    ptc.DATERNG3 = ptc.DATERNG3 % ptc.re_values
-
-    # "4:00:55 pm - 5:90:44 am", '4p-5p'
-    ptc.TIMERNG1 = ptc.RE_RTIMEHMS2 + r'\s?%(rangeseperator)s\s?' + ptc.RE_RTIMEHMS2
-    ptc.TIMERNG1 = ptc.TIMERNG1 % ptc.re_values
-
-    # "4:00 - 5:90 ", "4:55:55-3:44:55"
-    ptc.TIMERNG2 = ptc.RE_RTIMEHMS + r'\s?%(rangeseperator)s\s?' + ptc.RE_RTIMEHMS
-    ptc.TIMERNG2 = ptc.TIMERNG2 % ptc.re_values
-
-    # "4-5pm "
-    ptc.TIMERNG3 = r'\d\d?\s?%(rangeseperator)s\s?' + ptc.RE_RTIMEHMS2
-    ptc.TIMERNG3 = ptc.TIMERNG3 % ptc.re_values
-
-    # "4:30-5pm "
-    ptc.TIMERNG4 = ptc.RE_RTIMEHMS + r'\s?%(rangeseperator)s\s?' + ptc.RE_RTIMEHMS2
-    ptc.TIMERNG4 = ptc.TIMERNG4 % ptc.re_values
-
-
-def _initConstants(ptc):
-    """
-    Create localized versions of the units, week and month names
-    """
-      # build weekday offsets - yes, it assumes the Weekday and shortWeekday
-      # lists are in the same order and Mon..Sun (Python style)
-    ptc.WeekdayOffsets = {}
-
-    o = 0
-    for key in ptc.Weekdays:
-        ptc.WeekdayOffsets[key] = o
-        o += 1
-    o = 0
-    for key in ptc.shortWeekdays:
-        ptc.WeekdayOffsets[key] = o
-        o += 1
-
-      # build month offsets - yes, it assumes the Months and shortMonths
-      # lists are in the same order and Jan..Dec
-    ptc.MonthOffsets = {}
-
-    o = 1
-    for key in ptc.Months:
-        ptc.MonthOffsets[key] = o
-        o += 1
-    o = 1
-    for key in ptc.shortMonths:
-        ptc.MonthOffsets[key] = o
-        o += 1
-
-    # ptc.DaySuffixes = ptc.re_consts['daysuffix'].split('|')
-
-
-class Constants:
-    """
-    Default set of constants for parsedatetime.
-
-    If PyICU is present, then the class will first try to get PyICU
-    to return a locale specified by C{localeID}.  If either C{localeID} is
-    None or if the locale does not exist within PyICU, then each of the
-    locales defined in C{fallbackLocales} is tried in order.
-
-    If PyICU is not present or none of the specified locales can be used,
-    then the class will initialize itself to the en_US locale.
-
-    if PyICU is not present or not requested, only the locales defined by
-    C{pdtLocales} will be searched.
-    """
-    def __init__(self, localeID=None, usePyICU=True, fallbackLocales=['en_US']):
-        self.localeID        = localeID
-        self.fallbackLocales = fallbackLocales
-
-        if 'en_US' not in self.fallbackLocales:
-            self.fallbackLocales.append('en_US')
-
-          # define non-locale specific constants
-
-        self.locale   = None
-        self.usePyICU = usePyICU
-
-        # starting cache of leap years
-        # daysInMonth will add to this if during
-        # runtime it gets a request for a year not found
-        self._leapYears = [ 1904, 1908, 1912, 1916, 1920, 1924, 1928, 1932, 1936, 1940, 1944,
-                            1948, 1952, 1956, 1960, 1964, 1968, 1972, 1976, 1980, 1984, 1988,
-                            1992, 1996, 2000, 2004, 2008, 2012, 2016, 2020, 2024, 2028, 2032,
-                            2036, 2040, 2044, 2048, 2052, 2056, 2060, 2064, 2068, 2072, 2076,
-                            2080, 2084, 2088, 2092, 2096 ]
-
-        self.Second =   1
-        self.Minute =  60 * self.Second
-        self.Hour   =  60 * self.Minute
-        self.Day    =  24 * self.Hour
-        self.Week   =   7 * self.Day
-        self.Month  =  30 * self.Day
-        self.Year   = 365 * self.Day
-
-        self.rangeSep = u'-'
-
-        self._DaysInMonthList = (31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31)
-
-        self.BirthdayEpoch = 50
-
-        # DOWParseStyle controls how we parse "Tuesday"
-        # If the current day was Thursday and the text to parse is "Tuesday"
-        # then the following table shows how each style would be returned
-        # -1, 0, +1
-        #
-        # Current day marked as ***
-        #
-        #          Sun Mon Tue Wed Thu Fri Sat
-        # week -1
-        # current         -1,0     ***
-        # week +1          +1
-        #
-        # If the current day was Monday and the text to parse is "Tuesday"
-        # then the following table shows how each style would be returned
-        # -1, 0, +1
-        #
-        #          Sun Mon Tue Wed Thu Fri Sat
-        # week -1           -1
-        # current      *** 0,+1
-        # week +1
-
-        self.DOWParseStyle = 1
-
-        # CurrentDOWParseStyle controls how we parse "Friday"
-        # If the current day was Friday and the text to parse is "Friday"
-        # then the following table shows how each style would be returned
-        # True/False. This also depends on DOWParseStyle.
-        #
-        # Current day marked as ***
-        #
-        # DOWParseStyle = 0
-        #          Sun Mon Tue Wed Thu Fri Sat
-        # week -1
-        # current                      T,F
-        # week +1
-        #
-        # DOWParseStyle = -1
-        #          Sun Mon Tue Wed Thu Fri Sat
-        # week -1                       F
-        # current                       T
-        # week +1
-        #
-        # DOWParseStyle = +1
-        #
-        #          Sun Mon Tue Wed Thu Fri Sat
-        # week -1
-        # current                       T
-        # week +1                       F
-
-        self.CurrentDOWParseStyle = False
-
-        # initalize attributes to empty values to ensure
-        # they are defined
-        self.re_sources     = None
-        self.re_values      = None
-        self.Modifiers      = None
-        self.dayOffsets     = None
-        self.WeekdayOffsets = None
-        self.MonthOffsets   = None
-        self.dateSep        = None
-        self.timeSep        = None
-        self.am             = None
-        self.pm             = None
-        self.meridian       = None
-        self.usesMeridian   = None
-        self.uses24         = None
-        self.dp_order       = None
-
-        self.RE_DATE4     = r''
-        self.RE_DATE3     = r''
-        self.RE_MONTH     = r''
-        self.RE_WEEKDAY   = r''
-        self.RE_SPECIAL   = r''
-        self.RE_UNITS     = r''
-        self.RE_QUNITS    = r''
-        self.RE_MODIFIER  = r''
-        self.RE_MODIFIER2 = r''
-        self.RE_TIMEHMS   = r''
-        self.RE_TIMEHMS2  = r''
-        self.RE_DATE      = r''
-        self.RE_DATE2     = r''
-        self.RE_DAY       = r''
-        self.RE_DAY2      = r''
-        self.RE_TIME      = r''
-        self.RE_REMAINING = r''
-        self.RE_RTIMEHMS  = r''
-        self.RE_RTIMEHMS2 = r''
-        self.RE_RDATE     = r''
-        self.RE_RDATE3    = r''
-        self.DATERNG1     = r''
-        self.DATERNG2     = r''
-        self.DATERNG3     = r''
-        self.TIMERNG1     = r''
-        self.TIMERNG2     = r''
-        self.TIMERNG3     = r''
-        self.TIMERNG4     = r''
-
-        _initLocale(self)
-        _initConstants(self)
-        _initSymbols(self)
-        _initPatterns(self)
-
-        self.re_option = re.IGNORECASE + re.VERBOSE
-        self.cre_source = { 'CRE_SPECIAL':   self.RE_SPECIAL,
-                            'CRE_UNITS':     self.RE_UNITS,
-                            'CRE_QUNITS':    self.RE_QUNITS,
-                            'CRE_MODIFIER':  self.RE_MODIFIER,
-                            'CRE_MODIFIER2': self.RE_MODIFIER2,
-                            'CRE_TIMEHMS':   self.RE_TIMEHMS,
-                            'CRE_TIMEHMS2':  self.RE_TIMEHMS2,
-                            'CRE_DATE':      self.RE_DATE,
-                            'CRE_DATE2':     self.RE_DATE2,
-                            'CRE_DATE3':     self.RE_DATE3,
-                            'CRE_DATE4':     self.RE_DATE4,
-                            'CRE_MONTH':     self.RE_MONTH,
-                            'CRE_WEEKDAY':   self.RE_WEEKDAY,
-                            'CRE_DAY':       self.RE_DAY,
-                            'CRE_DAY2':      self.RE_DAY2,
-                            'CRE_TIME':      self.RE_TIME,
-                            'CRE_REMAINING': self.RE_REMAINING,
-                            'CRE_RTIMEHMS':  self.RE_RTIMEHMS,
-                            'CRE_RTIMEHMS2': self.RE_RTIMEHMS2,
-                            'CRE_RDATE':     self.RE_RDATE,
-                            'CRE_RDATE3':    self.RE_RDATE3,
-                            'CRE_TIMERNG1':  self.TIMERNG1,
-                            'CRE_TIMERNG2':  self.TIMERNG2,
-                            'CRE_TIMERNG3':  self.TIMERNG3,
-                            'CRE_TIMERNG4':  self.TIMERNG4,
-                            'CRE_DATERNG1':  self.DATERNG1,
-                            'CRE_DATERNG2':  self.DATERNG2,
-                            'CRE_DATERNG3':  self.DATERNG3,
-                          }
-        self.cre_keys = self.cre_source.keys()
-
-
-    def __getattr__(self, name):
-        if name in self.cre_keys:
-            value = re.compile(self.cre_source[name], self.re_option)
-            setattr(self, name, value)
-            return value
-        else:
-            raise AttributeError, name
-
-    def daysInMonth(self, month, year):
-        """
-        Take the given month (1-12) and a given year (4 digit) return
-        the number of days in the month adjusting for leap year as needed
-        """
-        result = None
-
-        if month > 0 and month <= 12:
-            result = self._DaysInMonthList[month - 1]
-
-            if month == 2:
-                if year in self._leapYears:
-                    result += 1
-                else:
-                    if calendar.isleap(year):
-                        self._leapYears.append(year)
-                        result += 1
-
-        return result
-
-    def buildSources(self, sourceTime=None):
-        """
-        Return a dictionary of date/time tuples based on the keys
-        found in self.re_sources.
-
-        The current time is used as the default and any specified
-        item found in self.re_sources is inserted into the value
-        and the generated dictionary is returned.
-        """
-        if sourceTime is None:
-            (yr, mth, dy, hr, mn, sec, wd, yd, isdst) = time.localtime()
-        else:
-            (yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
-
-        sources  = {}
-        defaults = { 'yr': yr, 'mth': mth, 'dy':  dy,
-                     'hr': hr, 'mn':  mn,  'sec': sec, }
-
-        for item in self.re_sources:
-            values = {}
-            source = self.re_sources[item]
-
-            for key in defaults.keys():
-                if key in source:
-                    values[key] = source[key]
-                else:
-                    values[key] = defaults[key]
-
-            sources[item] = ( values['yr'], values['mth'], values['dy'],
-                              values['hr'], values['mn'], values['sec'], wd, yd, isdst )
-
-        return sources
-
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/support/parsedatetime/pdt_locales/__init__.py	Wed Sep 07 04:31:59 2016 +0200
@@ -0,0 +1,30 @@
+# -*- encoding: utf-8 -*-
+
+"""
+pdt_locales
+
+All of the included locale classes shipped with pdt.
+"""
+
+from __future__ import absolute_import
+from .icu import get_icu
+
+locales = ['de_DE', 'en_AU', 'en_US', 'es', 'nl_NL', 'pt_BR', 'ru_RU']
+
+__locale_caches = {}
+
+__all__ = ['get_icu', 'load_locale']
+
+
+def load_locale(locale, icu=False):
+    """
+    Return data of locale
+    :param locale:
+    :return:
+    """
+    if locale not in locales:
+        raise NotImplementedError("The locale '%s' is not supported" % locale)
+    if locale not in __locale_caches:
+        mod = __import__(__name__, fromlist=[locale], level=0)
+        __locale_caches[locale] = getattr(mod, locale)
+    return __locale_caches[locale]
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/support/parsedatetime/pdt_locales/base.py	Wed Sep 07 04:31:59 2016 +0200
@@ -0,0 +1,199 @@
+from __future__ import unicode_literals
+
+locale_keys = set([
+    'MonthOffsets', 'Months', 'WeekdayOffsets', 'Weekdays',
+    'dateFormats', 'dateSep', 'dayOffsets', 'dp_order',
+    'localeID', 'meridian', 'Modifiers', 're_sources', 're_values',
+    'shortMonths', 'shortWeekdays', 'timeFormats', 'timeSep', 'units',
+    'uses24', 'usesMeridian', 'numbers', 'decimal_mark', 'small',
+    'magnitude', 'ignore'])
+
+localeID = None
+
+dateSep = ['/', '.']
+timeSep = [':']
+meridian = ['AM', 'PM']
+usesMeridian = True
+uses24 = True
+WeekdayOffsets = {}
+MonthOffsets = {}
+
+# always lowercase any lookup values - helper code expects that
+Weekdays = [
+    'monday', 'tuesday', 'wednesday', 'thursday',
+    'friday', 'saturday', 'sunday',
+]
+
+shortWeekdays = [
+    'mon', 'tues|tue', 'wed', 'thu', 'fri', 'sat', 'sun',
+]
+
+Months = [
+    'january', 'february', 'march', 'april', 'may', 'june', 'july',
+    'august', 'september', 'october', 'november', 'december',
+]
+
+shortMonths = [
+    'jan', 'feb', 'mar', 'apr', 'may', 'jun',
+    'jul', 'aug', 'sep', 'oct', 'nov', 'dec',
+]
+
+# use the same formats as ICU by default
+dateFormats = {
+    'full': 'EEEE, MMMM d, yyyy',
+    'long': 'MMMM d, yyyy',
+    'medium': 'MMM d, yyyy',
+    'short': 'M/d/yy'
+}
+
+timeFormats = {
+    'full': 'h:mm:ss a z',
+    'long': 'h:mm:ss a z',
+    'medium': 'h:mm:ss a',
+    'short': 'h:mm a',
+}
+
+dp_order = ['m', 'd', 'y']
+
+# Used to parse expressions like "in 5 hours"
+numbers = {
+    'zero': 0,
+    'one': 1,
+    'a': 1,
+    'an': 1,
+    'two': 2,
+    'three': 3,
+    'four': 4,
+    'five': 5,
+    'six': 6,
+    'seven': 7,
+    'eight': 8,
+    'nine': 9,
+    'ten': 10,
+    'eleven': 11,
+    'thirteen': 13,
+    'fourteen': 14,
+    'fifteen': 15,
+    'sixteen': 16,
+    'seventeen': 17,
+    'eighteen': 18,
+    'nineteen': 19,
+    'twenty': 20,
+}
+
+decimal_mark = '.'
+
+
+# this will be added to re_values later
+units = {
+    'seconds': ['second', 'seconds', 'sec', 's'],
+    'minutes': ['minute', 'minutes', 'min', 'm'],
+    'hours': ['hour', 'hours', 'hr', 'h'],
+    'days': ['day', 'days', 'dy', 'd'],
+    'weeks': ['week', 'weeks', 'wk', 'w'],
+    'months': ['month', 'months', 'mth'],
+    'years': ['year', 'years', 'yr', 'y'],
+}
+
+
+# text constants to be used by later regular expressions
+re_values = {
+    'specials': 'in|on|of|at',
+    'timeseparator': ':',
+    'rangeseparator': '-',
+    'daysuffix': 'rd|st|nd|th',
+    'meridian': 'am|pm|a.m.|p.m.|a|p',
+    'qunits': 'h|m|s|d|w|y',
+    'now': ['now'],
+}
+
+# Used to adjust the returned date before/after the source
+Modifiers = {
+    'from': 1,
+    'before': -1,
+    'after': 1,
+    'ago': -1,
+    'prior': -1,
+    'prev': -1,
+    'last': -1,
+    'next': 1,
+    'previous': -1,
+    'end of': 0,
+    'this': 0,
+    'eod': 1,
+    'eom': 1,
+    'eoy': 1,
+}
+
+dayOffsets = {
+    'tomorrow': 1,
+    'today': 0,
+    'yesterday': -1,
+}
+
+# special day and/or times, i.e. lunch, noon, evening
+# each element in the dictionary is a dictionary that is used
+# to fill in any value to be replace - the current date/time will
+# already have been populated by the method buildSources
+re_sources = {
+    'noon': {'hr': 12, 'mn': 0, 'sec': 0},
+    'afternoon': {'hr': 13, 'mn': 0, 'sec': 0},
+    'lunch': {'hr': 12, 'mn': 0, 'sec': 0},
+    'morning': {'hr': 6, 'mn': 0, 'sec': 0},
+    'breakfast': {'hr': 8, 'mn': 0, 'sec': 0},
+    'dinner': {'hr': 19, 'mn': 0, 'sec': 0},
+    'evening': {'hr': 18, 'mn': 0, 'sec': 0},
+    'midnight': {'hr': 0, 'mn': 0, 'sec': 0},
+    'night': {'hr': 21, 'mn': 0, 'sec': 0},
+    'tonight': {'hr': 21, 'mn': 0, 'sec': 0},
+    'eod': {'hr': 17, 'mn': 0, 'sec': 0},
+}
+
+small = {
+    'zero': 0,
+    'one': 1,
+    'a': 1,
+    'an': 1,
+    'two': 2,
+    'three': 3,
+    'four': 4,
+    'five': 5,
+    'six': 6,
+    'seven': 7,
+    'eight': 8,
+    'nine': 9,
+    'ten': 10,
+    'eleven': 11,
+    'twelve': 12,
+    'thirteen': 13,
+    'fourteen': 14,
+    'fifteen': 15,
+    'sixteen': 16,
+    'seventeen': 17,
+    'eighteen': 18,
+    'nineteen': 19,
+    'twenty': 20,
+    'thirty': 30,
+    'forty': 40,
+    'fifty': 50,
+    'sixty': 60,
+    'seventy': 70,
+    'eighty': 80,
+    'ninety': 90
+}
+
+magnitude = {
+    'thousand': 1000,
+    'million': 1000000,
+    'billion': 1000000000,
+    'trillion': 1000000000000,
+    'quadrillion': 1000000000000000,
+    'quintillion': 1000000000000000000,
+    'sextillion': 1000000000000000000000,
+    'septillion': 1000000000000000000000000,
+    'octillion': 1000000000000000000000000000,
+    'nonillion': 1000000000000000000000000000000,
+    'decillion': 1000000000000000000000000000000000,
+}
+
+ignore = ('and', ',')
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/support/parsedatetime/pdt_locales/de_DE.py	Wed Sep 07 04:31:59 2016 +0200
@@ -0,0 +1,118 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+from .base import *  # noqa
+
+# don't use an unicode string
+localeID = 'de_DE'
+dateSep = ['.']
+timeSep = [':']
+meridian = []
+usesMeridian = False
+uses24 = True
+decimal_mark = ','
+
+Weekdays = [
+    'montag', 'dienstag', 'mittwoch',
+    'donnerstag', 'freitag', 'samstag', 'sonntag',
+]
+shortWeekdays = ['mo', 'di', 'mi', 'do', 'fr', 'sa', 'so']
+Months = [
+    'januar', 'februar', 'märz',
+    'april', 'mai', 'juni',
+    'juli', 'august', 'september',
+    'oktober', 'november', 'dezember',
+]
+shortMonths = [
+    'jan', 'feb', 'mrz', 'apr', 'mai', 'jun',
+    'jul', 'aug', 'sep', 'okt', 'nov', 'dez',
+]
+
+dateFormats = {
+    'full': 'EEEE, d. MMMM yyyy',
+    'long': 'd. MMMM yyyy',
+    'medium': 'dd.MM.yyyy',
+    'short': 'dd.MM.yy',
+}
+
+timeFormats = {
+    'full': 'HH:mm:ss v',
+    'long': 'HH:mm:ss z',
+    'medium': 'HH:mm:ss',
+    'short': 'HH:mm',
+}
+
+dp_order = ['d', 'm', 'y']
+
+# the short version would be a capital M,
+# as I understand it we can't distinguish
+# between m for minutes and M for months.
+units = {
+    'seconds': ['sekunden', 'sek', 's'],
+    'minutes': ['minuten', 'min', 'm'],
+    'hours': ['stunden', 'std', 'h'],
+    'days': ['tag', 'tage', 't'],
+    'weeks': ['wochen', 'w'],
+    'months': ['monat', 'monate'],
+    'years': ['jahr', 'jahre', 'j'],
+}
+
+re_values = re_values.copy()
+re_values.update({
+    'specials': 'am|dem|der|im|in|den|zum',
+    'timeseparator': ':',
+    'rangeseparator': '-',
+    'daysuffix': '',
+    'qunits': 'h|m|s|t|w|m|j',
+    'now': ['jetzt'],
+})
+
+# Used to adjust the returned date before/after the source
+# still looking for insight on how to translate all of them to german.
+Modifiers = {
+    'from': 1,
+    'before': -1,
+    'after': 1,
+    'vergangener': -1,
+    'vorheriger': -1,
+    'prev': -1,
+    'letzter': -1,
+    'nächster': 1,
+    'dieser': 0,
+    'previous': -1,
+    'in a': 2,
+    'end of': 0,
+    'eod': 0,
+    'eo': 0,
+}
+
+# morgen/abermorgen does not work, see
+# http://code.google.com/p/parsedatetime/issues/detail?id=19
+dayOffsets = {
+    'morgen': 1,
+    'heute': 0,
+    'gestern': -1,
+    'vorgestern': -2,
+    'übermorgen': 2,
+}