changeset 1510:0d89219ff974

merged xapian branch
author Thomas Waldmann <tw AT waldmann-edv DOT de>
date Sun, 27 Aug 2006 15:29:01 +0200
parents 88c25ce3d813 (current diff) 6cd9bcede8cf (diff)
children 490733f328cf
files MoinMoin/action/AttachFile.py MoinMoin/config/multiconfig.py MoinMoin/search/builtin.py
diffstat 24 files changed, 4355 insertions(+), 414 deletions(-) [+]
line wrap: on
line diff
--- a/MoinMoin/PageEditor.py	Sun Aug 27 14:48:06 2006 +0200
+++ b/MoinMoin/PageEditor.py	Sun Aug 27 15:29:01 2006 +0200
@@ -488,7 +488,15 @@
             self.error = None
             # Save page text with a comment about the old name
             savetext = u"## page was renamed from %s\n%s" % (self.page_name, savetext)
-            newpage.saveText(savetext, 0, comment=comment)
+            newpage.saveText(savetext, 0, comment=comment, index=0)
+
+            if self.request.cfg.xapian_search:
+                from MoinMoin.search.Xapian import Index
+                index = Index(self.request)
+                if index.exists():
+                    index.remove_item(self.page_name, now=0)
+                    index.update_page(newpagename)
+
             return True, None
         except OSError, err:
             # Try to understand what happened. Maybe its better to check
@@ -517,10 +525,18 @@
         try:
             # First save a final backup copy of the current page
             # (recreating the page allows access to the backups again)
-            msg = self.saveText(u"deleted\n", 0, comment=comment or u'')
+            msg = self.saveText(u"deleted\n", 0, comment=comment or u'',
+                    index=0)
             msg = msg.replace(
                 _("Thank you for your changes. Your attention to detail is appreciated."),
                 _('Page "%s" was successfully deleted!') % (self.page_name,))
+            
+            if self.request.cfg.xapian_search:
+                from MoinMoin.search.Xapian import Index
+                index = Index(self.request)
+                if index.exists():
+                    index.remove_item(self.page_name)
+
             # Then really delete it
             try:
                 os.remove(self._text_filename())
@@ -915,6 +931,7 @@
         @keyword extra: extra info field (e.g. for SAVE/REVERT with revno)
         @keyword comment: comment field (when preview is true)
         @keyword action: action for editlog (default: SAVE)
+        @keyword index: needs indexing, not already handled (default: 1)
         @rtype: unicode
         @return: error msg
         """
@@ -981,7 +998,7 @@
                 action != "SAVE/REVERT"):
                 msg = _("You can't change ACLs on this page since you have no admin rights on it!")
                 raise self.NoAdmin, msg
-            
+        
         # save only if no error occurred (msg is empty)
         if not msg:
             # set success msg
@@ -1002,13 +1019,11 @@
             if self.request.cfg.mail_enabled:
                 msg = msg + self._notifySubscribers(comment, trivial)
           
-            if self.request.cfg.xapian_search:
+            if kw.get('index', 1) and self.request.cfg.xapian_search:
                 from MoinMoin.search.Xapian import Index
                 index = Index(self.request)
-                # When we have automatic index building, we can add to
-                # the queue even if the index is missing.
                 if index.exists():
-                    index.update_page(self)
+                    index.update_page(self.page_name)
 
         # remove lock (forcibly if we were allowed to break it by the UI)
         # !!! this is a little fishy, since the lock owner might not notice
--- a/MoinMoin/action/AttachFile.py	Sun Aug 27 14:48:06 2006 +0200
+++ b/MoinMoin/action/AttachFile.py	Sun Aug 27 15:29:01 2006 +0200
@@ -192,6 +192,12 @@
 
         _addLogEntry(request, 'ATTNEW', pagename, target)
 
+        if request.cfg.xapian_search:
+            from MoinMoin.search.Xapian import Index
+            index = Index(request)
+            if index.exists():
+                index.update_page(pagename)
+
         return target
 
 
@@ -639,6 +645,12 @@
     os.remove(fpath)
     _addLogEntry(request, 'ATTDEL', pagename, filename)
 
+    if request.cfg.xapian_search:
+        from MoinMoin.search.Xapian import Index
+        index = Index(request)
+        if index.exists:
+            index.remove_item(pagename, filename)
+
     upload_form(pagename, request, msg=_("Attachment '%(filename)s' deleted.") % {'filename': filename})
 
 
--- a/MoinMoin/action/fullsearch.py	Sun Aug 27 14:48:06 2006 +0200
+++ b/MoinMoin/action/fullsearch.py	Sun Aug 27 15:29:01 2006 +0200
@@ -8,10 +8,11 @@
     @license: GNU GPL, see COPYING for details.
 """
 
-import re
+import re, time
 from MoinMoin.Page import Page
 from MoinMoin import wikiutil
-
+from MoinMoin.support.parsedatetime.parsedatetime import Calendar
+from MoinMoin.widget import html
 
 def isTitleSearch(request):
     """ Return True for title search, False for full text search 
@@ -19,7 +20,7 @@
     When used in FullSearch macro, we have 'titlesearch' parameter with
     '0' or '1'. In standard search, we have either 'titlesearch' or
     'fullsearch' with localized string. If both missing, default to
-    True (might happen with Safari).
+    True (might happen with Safari) if this isn't an advanced search.
     """
     try:
         return int(request.form['titlesearch'][0])
@@ -29,12 +30,28 @@
         return 'fullsearch' not in request.form and \
                 not isAdvancedSearch(request)
 
+
 def isAdvancedSearch(request):
+    """ Return True if advanced search is requested """
     try:
         return int(request.form['advancedsearch'][0])
     except KeyError:
         return False
 
+
+def searchHints(f, hints):
+    """ Return a paragraph showing hints for a search
+
+    @param f: the formatter to use
+    @param hints: list of hints (as strings) to show
+    """
+    return ''.join([
+        f.paragraph(1, attr={'class': 'searchhint'}),
+        html.BR().join(hints),
+        f.paragraph(0),
+    ])
+
+
 def execute(pagename, request, fieldname='value', titlesearch=0):
     _ = request.getText
     titlesearch = isTitleSearch(request)
@@ -54,9 +71,11 @@
     case = int(request.form.get('case', [0])[0])
     regex = int(request.form.get('regex', [0])[0]) # no interface currently
     hitsFrom = int(request.form.get('from', [0])[0])
+    mtime = None
+    msg = ''
+    historysearch = 0
 
-    max_context = 1 # only show first `max_context` contexts XXX still unused
-
+    # if advanced search is enabled we construct our own search query
     if advancedsearch:
         and_terms = request.form.get('and_terms', [''])[0].strip()
         or_terms = request.form.get('or_terms', [''])[0].strip()
@@ -64,25 +83,58 @@
         #xor_terms = request.form.get('xor_terms', [''])[0].strip()
         categories = request.form.get('categories', [''])[0].strip()
         timeframe = request.form.get('time', [''])[0].strip()
-        language = request.form.get('language',
-                [request.cfg.language_default])[0]
+        language = request.form.get('language', [''])[0]
         mimetype = request.form.get('mimetype', [0])[0]
-        includeunderlay = request.form.get('includeunderlay', [0])[0]
-        onlysystempages = request.form.get('onlysystempages', [0])[0]
+        excludeunderlay = request.form.get('excludeunderlay', [0])[0]
+        nosystemitems = request.form.get('nosystemitems', [0])[0]
+        historysearch = request.form.get('historysearch', [0])[0]
+
         mtime = request.form.get('mtime', [''])[0]
-        
+        if mtime:
+            mtime_parsed = None
+
+            # get mtime from known date/time formats
+            for fmt in (request.user.datetime_fmt,
+                    request.cfg.datetime_fmt, request.user.date_fmt,
+                    request.cfg.date_fmt):
+                try:
+                    mtime_parsed = time.strptime(mtime, fmt)
+                except ValueError:
+                    continue
+                else:
+                    break
+
+            if mtime_parsed:
+                mtime = time.mktime(mtime_parsed)
+            else:
+                # didn't work, let's try parsedatetime
+                cal = Calendar()
+                mtime_parsed = cal.parse(mtime)
+
+                if mtime_parsed[1] == 0 and mtime_parsed[0] <= time.localtime():
+                    mtime = time.mktime(mtime_parsed[0])
+
+            # show info
+            if mtime_parsed:
+                mtime_msg = _("(!) Only pages changed since '''%s''' are being "
+                        "displayed!") % request.user.getFormattedDateTime(mtime)
+            else:
+                mtime_msg = _('/!\\ The modification date you entered was not '
+                        'recognized and is therefore not considered for the '
+                        'search results!')
+        else:
+            mtime_msg = None
+
         word_re = re.compile(r'(\"[\w\s]+"|\w+)')
         needle = ''
         if language:
             needle += 'language:%s ' % language
         if mimetype:
             needle += 'mimetype:%s ' % mimetype
-        if not includeunderlay:
+        if excludeunderlay:
             needle += '-domain:underlay '
-        if onlysystempages:
-            needle += 'domain:system '
-        if mtime:
-            needle += 'lastmodifiedsince:%s ' % mtime
+        if nosystemitems:
+            needle += '-domain:system '
         if categories:
             needle += '(%s) ' % ' or '.join(['category:%s' % cat
                 for cat in word_re.findall(categories)])
@@ -94,31 +146,39 @@
             needle += '(%s) ' % ' or '.join(word_re.findall(or_terms))
 
     # check for sensible search term
-    striped = needle.strip()
-    if len(striped) == 0:
+    stripped = needle.strip()
+    if len(stripped) == 0:
         err = _('Please use a more selective search term instead '
                 'of {{{"%s"}}}') % needle
-        request.emit_http_headers()
         Page(request, pagename).send_page(request, msg=err)
         return
-    needle = striped
+    needle = stripped
 
     # Setup for type of search
     if titlesearch:
         title = _('Title Search: "%s"')
         sort = 'page_name'
     else:
-        title = _('Full Text Search: "%s"')
+        if advancedsearch:
+            title = _('Advanced Search: "%s"')
+        else:
+            title = _('Full Text Search: "%s"')
         sort = 'weight'
 
     # search the pages
     from MoinMoin.search import searchPages, QueryParser
-    query = QueryParser(case=case, regex=regex,
-            titlesearch=titlesearch).parse_query(needle)
-    results = searchPages(request, query, sort)
+    try:
+        query = QueryParser(case=case, regex=regex,
+                titlesearch=titlesearch).parse_query(needle)
+        results = searchPages(request, query, sort, mtime, historysearch)
+    except ValueError: # catch errors in the search query
+        err = _('Your search query {{{"%s"}}} is invalid. Please refer to '
+                'HelpOnSearching for more information.') % needle
+        Page(request, pagename).send_page(request, msg=err)
+        return
 
     # directly show a single hit
-    # XXX won't work with attachment search
+    # Note: can't work with attachment search
     # improve if we have one...
     if len(results.hits) == 1:
         page = results.hits[0]
@@ -127,38 +187,75 @@
             url = page.url(request, querystr={'highlight': query.highlight_re()}, escape=0, relative=False)
             request.http_redirect(url)
             return
+    elif not results.hits: # no hits?
+        f = request.formatter
+        querydict = wikiutil.parseQueryString(request.query_string)
+        querydict.update({'titlesearch': 0})
+
+        err = _('Your search query {{{"%s"}}} didn\'t return any results. '
+                'Please change some terms and refer to HelpOnSearching for '
+                'more information.%s') % (needle,
+                    titlesearch and ''.join([
+                        '<br>',
+                        _('(!) Consider performing a'), ' ',
+                        f.url(1, href=request.page.url(request, querydict,
+                            escape=0, relative=False)),
+                        _('full-text search with your search terms'),
+                        f.url(0), '.',
+                    ]) or '')
+        Page(request, pagename).send_page(request, msg=err)
+        return
 
     request.emit_http_headers()
 
     # This action generate data using the user language
     request.setContentLanguage(request.lang)
 
-    request.theme.send_title(title % needle, form=request.form, pagename=pagename)
+    request.theme.send_title(title % needle, form=request.form,
+            pagename=pagename, msg=msg)
 
     # Start content (important for RTL support)
     request.write(request.formatter.startContent("content"))
 
-    # Did we get any hits?
-    if results.hits:
-        # First search stats
-        request.write(results.stats(request, request.formatter, hitsFrom))
+    # Hints
+    f = request.formatter
+    hints = []
 
-        # Then search results
-        info = not titlesearch
-        if context:
-            output = results.pageListWithContext(request, request.formatter,
-                    info=info, context=context, hitsFrom=hitsFrom)
-        else:
-            output = results.pageList(request, request.formatter, info=info,
-                    hitsFrom=hitsFrom)
-        request.write(output)
+    if titlesearch:
+        querydict = wikiutil.parseQueryString(request.query_string)
+        querydict.update({'titlesearch': 0})
+
+        hints.append(''.join([
+            _('(!) You\'re conducting a title search so your search '
+                'results might not contain all information available for '
+                'your search query in this wiki.'),
+            ' ',
+            f.url(1, href=request.page.url(request, querydict, escape=0,
+                relative=False)),
+            f.text(_('Click here to perform a full-text search with your '
+                'search terms!')),
+            f.url(0),
+        ]))
+
+    if advancedsearch and mtime_msg:
+        hints.append(mtime_msg)
+
+    if hints:
+        request.write(searchHints(f, hints))
+
+    # Search stats
+    request.write(results.stats(request, request.formatter, hitsFrom))
+
+    # Then search results
+    info = not titlesearch
+    if context:
+        output = results.pageListWithContext(request, request.formatter,
+                info=info, context=context, hitsFrom=hitsFrom)
     else:
-        f = request.formatter
-        request.write(''.join([
-            f.heading(1, 3),
-            f.text(_('Your search query didn\'t return any results.')),
-            f.heading(0, 3),
-        ]))
+        output = results.pageList(request, request.formatter, info=info,
+                hitsFrom=hitsFrom)
+
+    request.write(output)
 
     request.write(request.formatter.endContent())
     request.theme.send_footer(pagename)
--- a/MoinMoin/config/multiconfig.py	Sun Aug 27 14:48:06 2006 +0200
+++ b/MoinMoin/config/multiconfig.py	Sun Aug 27 15:29:01 2006 +0200
@@ -301,6 +301,7 @@
     xapian_search = False # disabled until xapian is finished
     xapian_index_dir = None
     xapian_stemming = True
+    xapian_index_history = True
     search_results_per_page = 10
 
     mail_login = None # or "user pwd" if you need to use SMTP AUTH
--- a/MoinMoin/macro/AdvancedSearch.py	Sun Aug 27 14:48:06 2006 +0200
+++ b/MoinMoin/macro/AdvancedSearch.py	Sun Aug 27 15:29:01 2006 +0200
@@ -1,43 +1,53 @@
 # -*- coding: iso-8859-1 -*-
-'''
+"""
     MoinMoin - AdvancedSearch Macro
 
     [[AdvancedSearch]]
         displays advanced search dialog.
-
-    MAYBE:
-    [[AdvancedSearch(Help)]]
-        embed results of an advanced search (use more parameters...)
-'''
+"""
 
 from MoinMoin import config, wikiutil, search
 from MoinMoin.i18n import languages
+from MoinMoin.support import sorted
+from MoinMoin.widget import html
+from MoinMoin.util.web import makeSelection
 
 import mimetypes
 
 Dependencies = ['pages']
 
-try:
-    sorted
-except NameError:
-    def sorted(l, *args, **kw):
-        l = l[:]
-        l.sort(*args, **kw)
-        return l
+
+def form_get(request, name, default=''):
+    """ Fetches a form field
+    
+    @param request: current request
+    @param name: name of the field
+    @keyword default: value if not present (default: '')
+    """
+    return request.form.get(name, [default])[0]
+
 
 def advanced_ui(macro):
+    """ Returns the code for the advanced search user interface
+
+    @param macro: current macro instance
+    """
     _ = macro._
     f = macro.formatter
+    request = macro.request
+
+    disabledIfMoinSearch = not request.cfg.xapian_search and \
+            ' disabled="disabled"' or ''
 
     search_boxes = ''.join([
         f.table_row(1),
         f.table_cell(1, attrs={'rowspan': '6', 'class': 'searchfor'}),
-        f.text(_('Search for pages')),
+        f.text(_('Search for items')),
         f.table_cell(0),
         ''.join([''.join([
             f.table_row(1),
             f.table_cell(1),
-            f.text(_(txt)),
+            f.text(txt),
             f.table_cell(0),
             f.table_cell(1),
             f.rawHTML(input_field),
@@ -45,39 +55,45 @@
             f.table_row(0),
         ]) for txt, input_field in (
             (_('containing all the following terms'),
-                '<input type="text" name="and_terms" size="30">'),
+                '<input type="text" name="and_terms" size="30" value="%s">'
+                % (form_get(request, 'and_terms') or form_get(request, 'value'))),
             (_('containing one or more of the following terms'),
-                '<input type="text" name="or_terms" size="30">'),
+                '<input type="text" name="or_terms" size="30" value="%s">'
+                % form_get(request, 'or_terms')),
             (_('not containing the following terms'),
-                '<input type="text" name="not_terms" size="30">'),
+                '<input type="text" name="not_terms" size="30" value="%s">'
+                % form_get(request, 'not_terms')),
             #('containing only one of the following terms',
-            #    '<input type="text" name="xor_terms" size="30">'),
+            #    '<input type="text" name="xor_terms" size="30" value="%s">'
+            #    % form_get(request, 'xor_terms')),
             # TODO: dropdown-box?
             (_('belonging to one of the following categories'),
-                '<input type="text" name="categories" size="30">'),
-            (_('last modified since (XXX)'),
-                '<input type="text" name="mtime" size="30" value="">'),
+                '<input type="text" name="categories" size="30" value="%s">'
+                % form_get(request, 'categories')),
+            (_('last modified since (e.g. last 2 weeks)'),
+                '<input type="text" name="mtime" size="30" value="%s">'
+                % form_get(request, 'mtime')),
         )])
     ])
 
+    # language selection
+    searchedlang = form_get(request, 'language')
     langs = dict([(lang, lmeta['x-language-in-english'])
-        for lang, lmeta in sorted(languages.items())])
-    lang_dropdown = ''.join([
-        u'<select name="language" size="1">',
-        u'<option value="" selected>%s</option>' % _('any language'),
-        ''.join(['<option value="%s">%s</option>' % lt for lt in
-            langs.items()]),
-        u'</select>',
-    ])
+        for lang, lmeta in languages.items()])
+    userlang = macro.request.lang
+    lang_select = makeSelection('language',
+            [('', _('any language')), (userlang, langs[userlang])] +
+                sorted(langs.items(), key=lambda i: i[1]),
+            searchedlang)
 
-    ft_dropdown = ''.join([
-        u'<select name="mimetype" size="1">',
-        u'<option value="" selected>%s</option>' % _('any type'),
-        ''.join(['<option value="%s">%s</option>' % (m[1], '*%s - %s' % m)
-            for m in sorted(mimetypes.types_map.items())]),
-        u'</select>',
-    ])
+    # mimetype selection
+    mimetype = form_get(request, 'mimetype')
+    mt_select = makeSelection('mimetype',
+            [(m[1], '*%s - %s' % m) for m in sorted(mimetypes.types_map.items())],
+            mimetype)
 
+
+    # misc search options (checkboxes)
     search_options = ''.join([
         ''.join([
             f.table_row(1),
@@ -85,24 +101,39 @@
             txt[0],
             f.table_cell(0),
             f.table_cell(1, colspan=2),
-            txt[1],
+            unicode(txt[1]),
+            txt[2],
             f.table_cell(0),
             f.table_row(0),
             ]) for txt in (
-                (_('Language'), lang_dropdown),
-                (_('File Type'), ft_dropdown),
-                ('', '<input type="checkbox" name="titlesearch" value="1">%s</input>' %
-                _('Search only in titles')),
-                ('', '<input type="checkbox" name="case" value="1">%s</input>' %
-                _('Case-sensitive search')),
-                ('', '<input type="checkbox" name="includeunderlay" value="1" checked>%s'
-                    '</input>' % _('Include underlay')),
-                ('', '<input type="checkbox" name="onlysystempages" value="1">%s'
-                    '</input>' % _('Only system pages')),
+                (_('Language'), unicode(lang_select), ''),
+                (_('File Type'), unicode(mt_select), ''),
+                ('', html.INPUT(type='checkbox', name='titlesearch',
+                    value='1', checked=form_get(request, 'titlesearch'),
+                    id='titlesearch'),
+                    '<label for="titlesearch">%s</label>' % _('Search only in titles')),
+                ('', html.INPUT(type='checkbox', name='case', value='1',
+                    checked=form_get(request, 'case'), id='case'),
+                    '<label for="case">%s</label>' % _('Case-sensitive search')),
+                ('', html.INPUT(type='checkbox', name='excludeunderlay',
+                    value='1', checked=form_get(request, 'excludeunderlay'),
+                    id='excludeunderlay'),
+                    '<label for="excludeunderlay">%s</label>' % _('Exclude underlay')),
+                ('', html.INPUT(type='checkbox', name='nosystemitems',
+                    value='1', checked=form_get(request, 'nosystemitems'),
+                    id='nosystempages'),
+                    '<label for="nosystempages">%s</label>' % _('No system items')),
+                ('', html.INPUT(type='checkbox', name='historysearch',
+                    value='1', checked=form_get(request, 'historysearch'),
+                    disabled=(not request.cfg.xapian_search or
+                        not request.cfg.xapian_index_history),
+                    id='historysearch'),
+                    '<label for="historysearch">%s</label>' % _('Search in all page revisions'))
             )
     ])
-    
-    html = [
+
+    # the dialogue
+    return f.rawHTML('\n'.join([
         u'<form method="get" action="">',
         u'<div>',
         u'<input type="hidden" name="action" value="fullsearch">',
@@ -118,19 +149,10 @@
         f.table(0),
         u'</div>',
         u'</form>',
-    ]
-
-    return f.rawHTML('\n'.join(html))
+    ]))
 
 
 def execute(macro, needle):
-    request = macro.request
-    _ = request.getText
+    # for now, just show the advanced ui
+    return advanced_ui(macro)
 
-    # no args given
-    if needle is None:
-        return advanced_ui(macro)
-
-    return macro.formatter.rawHTML('wooza!')
-        
-
--- a/MoinMoin/macro/FullSearch.py	Sun Aug 27 14:48:06 2006 +0200
+++ b/MoinMoin/macro/FullSearch.py	Sun Aug 27 15:29:01 2006 +0200
@@ -105,8 +105,7 @@
     needle = needle.strip()
 
     # Search the pages and return the results
-    results = search.searchPages(request, needle)
-    results.sortByPagename()
+    results = search.searchPages(request, needle, sort='page_name')
 
     return results.pageList(request, macro.formatter, paging=False)
 
--- a/MoinMoin/macro/SystemInfo.py	Sun Aug 27 14:48:06 2006 +0200
+++ b/MoinMoin/macro/SystemInfo.py	Sun Aug 27 15:29:01 2006 +0200
@@ -126,7 +126,13 @@
                     _('N/A'))
         xapRow += ', %s, %s' % (available, mtime)
 
+        import xapian
+        xapVersion = xapian.xapian_version_string()
+    else:
+        xapVersion = _('not installed')
+
     row(_('Xapian search'), xapRow)
+    row(_('Xapian Version'), xapVersion)
     row(_('Xapian stemming'), xapState[request.cfg.xapian_stemming])
 
     row(_('Active threads'), t_count or _('N/A'))
--- a/MoinMoin/macro/__init__.py	Sun Aug 27 14:48:06 2006 +0200
+++ b/MoinMoin/macro/__init__.py	Sun Aug 27 15:29:01 2006 +0200
@@ -280,8 +280,7 @@
         # Return a title search for needle, sorted by name.
         # XXX: what's with literal?
         results = search.searchPages(self.request, needle,
-                titlesearch=1, case=case)
-        results.sortByPagename()
+                titlesearch=1, case=case, sort='page_name')
         return results.pageList(self.request, self.formatter, paging=False)
 
     def _macro_InterWiki(self, args):
--- a/MoinMoin/search/Xapian.py	Sun Aug 27 14:48:06 2006 +0200
+++ b/MoinMoin/search/Xapian.py	Sun Aug 27 15:29:01 2006 +0200
@@ -1,6 +1,6 @@
 # -*- coding: iso-8859-1 -*-
 """
-    MoinMoin - xapian indexing search engine
+    MoinMoin - xapian search engine
 
     @copyright: 2006 MoinMoin:ThomasWaldmann,
                 2006 MoinMoin:FranzPletz
@@ -27,8 +27,13 @@
 except ImportError:
     Stemmer = None
 
+
 class UnicodeQuery(xapian.Query):
+    """ Xapian query object which automatically encodes unicode strings """
     def __init__(self, *args, **kwargs):
+        """
+        @keyword encoding: specifiy the encoding manually (default: value of config.charset)
+        """
         self.encoding = kwargs.get('encoding', config.charset)
 
         nargs = []
@@ -47,9 +52,21 @@
 ##############################################################################
 
 def getWikiAnalyzerFactory(request=None, language='en'):
+    """ Returns a WikiAnalyzer instance
+
+    @keyword request: current request object
+    @keyword language: stemming language iso code, defaults to 'en'
+    """
     return (lambda: WikiAnalyzer(request, language))
 
 class WikiAnalyzer:
+    """ A text analyzer for wiki syntax
+    
+    The purpose of this class is to anaylze texts/pages in wiki syntax
+    and yield yielding single terms for xapwrap to feed into the xapian
+    database.
+    """
+
     singleword = r"[%(u)s][%(l)s]+" % {
                      'u': config.chars_upper,
                      'l': config.chars_lower,
@@ -69,22 +86,31 @@
 
     dot_re = re.compile(r"[-_/,.]")
     mail_re = re.compile(r"[-_/,.]|(@)")
-    
+
     # XXX limit stuff above to xapdoc.MAX_KEY_LEN
     # WORD_RE = re.compile('\\w{1,%i}' % MAX_KEY_LEN, re.U)
 
     def __init__(self, request=None, language=None):
+        """
+        @param request: current request
+        @param language: if given, the language in which to stem words
+        """
         if request and request.cfg.xapian_stemming and language:
             self.stemmer = Stemmer(language)
         else:
             self.stemmer = None
 
     def raw_tokenize(self, value):
+        """ Yield a stream of lower cased raw and stemmed words from a string.
+
+        @param value: string to split, must be an unicode object or a list of
+                      unicode objects
+        """
         def enc(uc):
-            """ 'encode' unicode results into whatever xapian / xapwrap wants """
+            """ 'encode' unicode results into whatever xapian wants """
             lower = uc.lower()
             return lower
-            
+
         if isinstance(value, list): # used for page links
             for v in value:
                 yield (enc(v), 0)
@@ -121,13 +147,16 @@
                             yield (enc(sm.group()), m.start() + sm.start())
 
     def tokenize(self, value, flat_stemming=True):
-        """Yield a stream of lower cased raw and stemmed (optional) words from a string.
-           value must be an UNICODE object or a list of unicode objects
+        """ Yield a stream of lower cased raw and stemmed words from a string.
+
+        @param value: string to split, must be an unicode object or a list of
+                      unicode objects
+        @keyword flat_stemming: whether to yield stemmed terms automatically
+                                with the natural forms (True) or
+                                yield both at once as a tuple (False)
         """
         for word, pos in self.raw_tokenize(value):
             if flat_stemming:
-                # XXX: should we really use a prefix for that?
-                # Index.prefixMap['raw'] + i
                 yield (word, pos)
                 if self.stemmer:
                     yield (self.stemmer.stemWord(word), pos)
@@ -140,6 +169,7 @@
 #############################################################################
 
 class Index(BaseIndex):
+    """ A Xapian index """
     indexValueMap = {
         # mapping the value names we can easily fetch from the index to
         # integers required by xapian. 0 and 1 are reserved by xapwrap!
@@ -147,45 +177,61 @@
         'attachment': 3,
         'mtime': 4,
         'wikiname': 5,
+        'revision': 6,
     }
     prefixMap = {
         # http://svn.xapian.org/*checkout*/trunk/xapian-applications/omega/docs/termprefixes.txt
         'author': 'A',
-        'date':   'D', # numeric format: YYYYMMDD or "latest" - e.g. D20050224 or Dlatest
-                       #G   newsGroup (or similar entity - e.g. a web forum name)
+        'date': 'D',              # numeric format: YYYYMMDD or "latest" - e.g. D20050224 or Dlatest
+                                  #G   newsGroup (or similar entity - e.g. a web forum name)
         'hostname': 'H',
         'keyword': 'K',
-        'lang': 'L',   # ISO Language code
-                       #M   Month (numeric format: YYYYMM)
-                       #N   ISO couNtry code (or domaiN name)
-                       #P   Pathname
-                       #Q   uniQue id
-        'raw':  'R',   # Raw (i.e. unstemmed) term
-        'title': 'S',  # Subject (or title)
+        'lang': 'L',              # ISO Language code
+                                  #M   Month (numeric format: YYYYMM)
+                                  #N   ISO couNtry code (or domaiN name)
+                                  #P   Pathname
+                                  #Q   uniQue id
+        'raw': 'R',               # Raw (i.e. unstemmed) term
+        'title': 'S',             # Subject (or title)
         'mimetype': 'T',
-        'url': 'U',    # full URL of indexed document - if the resulting term would be > 240
-                       # characters, a hashing scheme is used to prevent overflowing
-                       # the Xapian term length limit (see omindex for how to do this).
-                       #W   "weak" (approximately 10 day intervals, taken as YYYYMMD from
-                       #  the D term, and changing the last digit to a '2' if it's a '3')
-                       #X   longer prefix for user-defined use
-        'linkto': 'XLINKTO', # this document links to that document
+        'url': 'U',               # full URL of indexed document - if the resulting term would be > 240
+                                  # characters, a hashing scheme is used to prevent overflowing
+                                  # the Xapian term length limit (see omindex for how to do this).
+                                  #W   "weak" (approximately 10 day intervals, taken as YYYYMMD from
+                                  #  the D term, and changing the last digit to a '2' if it's a '3')
+                                  #X   longer prefix for user-defined use
+        'linkto': 'XLINKTO',      # this document links to that document
         'stem_lang': 'XSTEMLANG', # ISO Language code this document was stemmed in
-        'category': 'XCAT', # category this document belongs to
-        'full_title': 'XFT', # full title (for regex)
-        'domain': 'XDOMAIN', # standard or underlay
-        'revision': 'XREV', # revision of page
-                       #Y   year (four digits)
+        'category': 'XCAT',       # category this document belongs to
+        'fulltitle': 'XFT',       # full title
+        'domain': 'XDOMAIN',      # standard or underlay
+        'revision': 'XREV',       # revision of page
+                                  #Y   year (four digits)
     }
 
     def __init__(self, request):
+        self._check_version()
         BaseIndex.__init__(self, request)
 
         # Check if we should and can stem words
         if request.cfg.xapian_stemming and not Stemmer:
             request.cfg.xapian_stemming = False
 
+    def _check_version(self):
+        """ Checks if the correct version of Xapian is installed """
+        # every version greater than or equal to 0.9.6 is allowed for now
+        # Note: fails if crossing the 10.x barrier
+        if xapian.xapian_version_string() >= '0.9.6':
+            return
+
+        from MoinMoin.error import ConfigurationError
+        raise ConfigurationError('MoinMoin needs at least Xapian version '
+                '0.9.6 to work correctly. Either disable Xapian '
+                'completetly in your wikiconfig or upgrade your Xapian '
+                'installation!')
+
     def _main_dir(self):
+        """ Get the directory of the xapian index """
         if self.request.cfg.xapian_index_dir:
             return os.path.join(self.request.cfg.xapian_index_dir,
                     self.request.cfg.siteid)
@@ -196,8 +242,13 @@
         """ Check if the Xapian index exists """
         return BaseIndex.exists(self) and os.listdir(self.dir)
 
-    def _search(self, query, sort=None):
-        """ read lock must be acquired """
+    def _search(self, query, sort='weight', historysearch=0):
+        """ Perform the search using xapian (read-lock acquired)
+        
+        @param query: the search query objects
+        @keyword sort: the sorting of the results (default: 'weight')
+        @keyword historysearch: whether to search in all page revisions (default: 0)
+        """
         while True:
             try:
                 searcher, timestamp = self.request.cfg.xapian_searchers.pop()
@@ -210,33 +261,50 @@
                 searcher.configure(self.prefixMap, self.indexValueMap)
                 timestamp = self.mtime()
                 break
-        
+
         kw = {}
         if sort == 'weight':
             # XXX: we need real weight here, like _moinSearch
             # (TradWeight in xapian)
             kw['sortByRelevence'] = True
+            kw['sortKey'] = 'revision'
         if sort == 'page_name':
             kw['sortKey'] = 'pagename'
 
         hits = searcher.search(query, valuesWanted=['pagename',
-            'attachment', 'mtime', 'wikiname'], **kw)
+            'attachment', 'mtime', 'wikiname', 'revision'], **kw)
         self.request.cfg.xapian_searchers.append((searcher, timestamp))
         return hits
-    
+
     def _do_queued_updates(self, request, amount=5):
         """ Assumes that the write lock is acquired """
         self.touch()
         writer = xapidx.Index(self.dir, True)
         writer.configure(self.prefixMap, self.indexValueMap)
-        pages = self.queue.pages()[:amount]
+
+        # do all page updates
+        pages = self.update_queue.pages()[:amount]
         for name in pages:
             p = Page(request, name)
-            self._index_page(writer, p, mode='update')
-            self.queue.remove([name])
+            if request.cfg.xapian_index_history:
+                for rev in p.getRevList():
+                    self._index_page(writer, Page(request, name, rev=rev), mode='update')
+            else:
+                self._index_page(writer, p, mode='update')
+            self.update_queue.remove([name])
+
+        # do page/attachment removals
+        items = self.remove_queue.pages()[:amount]
+        for item in items:
+            _item = item.split('//')
+            p = Page(request, _item[0])
+            self._remove_item(writer, p, _item[1])
+            self.remove_queue.remove([item])
+
         writer.close()
 
     def allterms(self):
+        """ Fetches all terms in the Xapian index """
         db = xapidx.ExceptionTranslater.openIndex(True, self.dir)
         i = db.allterms_begin()
         while i != db.allterms_end():
@@ -244,6 +312,11 @@
             i.next()
 
     def termpositions(self, uid, term):
+        """ Fetches all positions of a term in a document
+        
+        @param uid: document id of the item in the xapian index
+        @param term: the term as a string
+        """
         db = xapidx.ExceptionTranslater.openIndex(True, self.dir)
         pos = db.positionlist_begin(uid, term)
         while pos != db.positionlist_end(uid, term):
@@ -283,13 +356,15 @@
                 xpname = xapdoc.SortKey('pagename', fs_rootpage)
                 xattachment = xapdoc.SortKey('attachment', filename) # XXX we should treat files like real pages, not attachments
                 xmtime = xapdoc.SortKey('mtime', mtime)
+                xrev = xapdoc.SortKey('revision', '0')
                 title = " ".join(os.path.join(fs_rootpage, filename).split("/"))
                 xtitle = xapdoc.Keyword('title', title)
                 xmimetype = xapdoc.TextField('mimetype', mimetype, True)
                 xcontent = xapdoc.TextField('content', file_content)
                 doc = xapdoc.Document(textFields=(xcontent, xmimetype, ),
                                       keywords=(xtitle, xitemid, ),
-                                      sortFields=(xpname, xattachment, xmtime, xwname, ),
+                                      sortFields=(xpname, xattachment,
+                                          xmtime, xwname, xrev, ),
                                      )
                 doc.analyzerFactory = getWikiAnalyzerFactory()
                 if mode == 'update':
@@ -303,11 +378,17 @@
             pass
 
     def _get_languages(self, page):
+        """ Get language of a page and the language to stem it in
+
+        @param page: the page instance
+        """
         body = page.get_raw_body()
         default_lang = page.request.cfg.language_default
 
         lang = ''
 
+        # if we should stem, we check if we have stemmer for the
+        # language available
         if page.request.cfg.xapian_stemming:
             for line in body.split('\n'):
                 if line.startswith('#language'):
@@ -322,7 +403,7 @@
                         return (lang, lang)
                 elif not line.startswith('#'):
                     break
-        
+
         if not lang:
             # no lang found at all.. fallback to default language
             lang = default_lang
@@ -331,6 +412,11 @@
         return (lang, default_lang)
 
     def _get_categories(self, page):
+        """ Get all categories the page belongs to through the old
+            regular expression
+
+        @param page: the page instance
+        """
         body = page.get_raw_body()
 
         prev, next = (0, 1)
@@ -347,6 +433,10 @@
                 for cat in re.findall(r'Category([^\s]+)', body[pos:])]
 
     def _get_domains(self, page):
+        """ Returns a generator with all the domains the page belongs to
+
+        @param page: page
+        """
         if page.isUnderlayPage():
             yield 'underlay'
         if page.isStandardPage():
@@ -356,18 +446,18 @@
 
     def _index_page(self, writer, page, mode='update'):
         """ Index a page - assumes that the write lock is acquired
-            @arg writer: the index writer object
-            @arg page: a page object
-            @arg mode: 'add' = just add, no checks
-                       'update' = check if already in index and update if needed (mtime)
-            
+
+        @arg writer: the index writer object
+        @arg page: a page object
+        @arg mode: 'add' = just add, no checks
+                   'update' = check if already in index and update if needed (mtime)
         """
         request = page.request
         wikiname = request.cfg.interwikiname or "Self"
         pagename = page.page_name
         mtime = page.mtime_usecs()
-        itemid = "%s:%s" % (wikiname, pagename)
         revision = str(page.get_real_rev())
+        itemid = "%s:%s:%s" % (wikiname, pagename, revision)
         author = page.last_edit(request)['editor']
         # XXX: Hack until we get proper metadata
         language, stem_language = self._get_languages(page)
@@ -394,15 +484,16 @@
             updated = True
         if debug: request.log("%s %r" % (pagename, updated))
         if updated:
-            xwname = xapdoc.SortKey('wikiname', request.cfg.interwikiname or "Self")
+            xwname = xapdoc.SortKey('wikiname', wikiname)
             xpname = xapdoc.SortKey('pagename', pagename)
             xattachment = xapdoc.SortKey('attachment', '') # this is a real page, not an attachment
-            xmtime = xapdoc.SortKey('mtime', mtime)
+            xmtime = xapdoc.SortKey('mtime', str(mtime))
+            xrev = xapdoc.SortKey('revision', revision)
             xtitle = xapdoc.TextField('title', pagename, True) # prefixed
             xkeywords = [xapdoc.Keyword('itemid', itemid),
                     xapdoc.Keyword('lang', language),
                     xapdoc.Keyword('stem_lang', stem_language),
-                    xapdoc.Keyword('full_title', pagename.lower()),
+                    xapdoc.Keyword('fulltitle', pagename),
                     xapdoc.Keyword('revision', revision),
                     xapdoc.Keyword('author', author),
                 ]
@@ -415,7 +506,8 @@
             xcontent = xapdoc.TextField('content', page.get_raw_body())
             doc = xapdoc.Document(textFields=(xcontent, xtitle),
                                   keywords=xkeywords,
-                                  sortFields=(xpname, xattachment, xmtime, xwname, ),
+                                  sortFields=(xpname, xattachment,
+                                      xmtime, xwname, xrev),
                                  )
             doc.analyzerFactory = getWikiAnalyzerFactory(request,
                     stem_language)
@@ -433,7 +525,7 @@
         attachments = AttachFile._get_files(request, pagename)
         for att in attachments:
             filename = AttachFile.getFilename(request, pagename, att)
-            att_itemid = "%s//%s" % (itemid, att)
+            att_itemid = "%s:%s//%s" % (wikiname, pagename, att)
             mtime = wikiutil.timestamp2version(os.path.getmtime(filename))
             if mode == 'update':
                 query = xapidx.RawQuery(xapdoc.makePairForWrite('itemid', att_itemid))
@@ -456,18 +548,24 @@
                 xpname = xapdoc.SortKey('pagename', pagename)
                 xattachment = xapdoc.SortKey('attachment', att) # this is an attachment, store its filename
                 xmtime = xapdoc.SortKey('mtime', mtime)
+                xrev = xapdoc.SortKey('revision', '0')
                 xtitle = xapdoc.Keyword('title', '%s/%s' % (pagename, att))
                 xlanguage = xapdoc.Keyword('lang', language)
                 xstem_language = xapdoc.Keyword('stem_lang', stem_language)
                 mimetype, att_content = self.contentfilter(filename)
                 xmimetype = xapdoc.Keyword('mimetype', mimetype)
                 xcontent = xapdoc.TextField('content', att_content)
-                doc = xapdoc.Document(textFields=(xcontent, ),
-                                      keywords=(xatt_itemid, xtitle,
-                                          xlanguage, xstem_language,
-                                          xmimetype, ),
+                xtitle_txt = xapdoc.TextField('title',
+                        '%s/%s' % (pagename, att), True)
+                xfulltitle = xapdoc.Keyword('fulltitle', pagename)
+                xdomains = [xapdoc.Keyword('domain', domain)
+                        for domain in domains]
+                doc = xapdoc.Document(textFields=(xcontent, xtitle_txt),
+                                      keywords=xdomains + [xatt_itemid,
+                                          xtitle, xlanguage, xstem_language,
+                                          xmimetype, xfulltitle, ],
                                       sortFields=(xpname, xattachment, xmtime,
-                                          xwname, ),
+                                          xwname, xrev, ),
                                      )
                 doc.analyzerFactory = getWikiAnalyzerFactory(request,
                         stem_language)
@@ -480,6 +578,33 @@
                     id = writer.index(doc)
         #writer.flush()
 
+    def _remove_item(self, writer, page, attachment=None):
+        request = page.request
+        wikiname = request.cfg.interwikiname or 'Self'
+        pagename = page.page_name
+
+        if not attachment:
+            # Remove all revisions and attachments from the index
+            query = xapidx.RawQuery(xapidx.makePairForWrite(
+                self.prefixMap['fulltitle'], pagename))
+            enq, mset, docs = writer.search(query, valuesWanted=['pagename',
+                'attachment', ])
+            for doc in docs:
+                writer.delete_document(doc['uid'])
+                request.log('%s removed from xapian index' %
+                        doc['values']['pagename'])
+        else:
+            # Only remove a single attachment
+            query = xapidx.RawQuery(xapidx.makePairForWrite('itemid',
+                "%s:%s//%s" % (wikiname, pagename, attachment)))
+            enq, mset, docs = writer.search(query, valuesWanted=['pagename',
+                'attachment', ])
+            if docs:
+                doc = docs[0] # there should be only one
+                writer.delete_document(doc['uid'])
+                request.log('attachment %s from %s removed from index' %
+                    (doc['values']['attachment'], doc['values']['pagename']))
+
     def _index_pages(self, request, files=None, mode='update'):
         """ Index all pages (and all given files)
         
@@ -490,6 +615,11 @@
 
         When called in a new thread, lock is acquired before the call,
         and this method must release it when it finishes or fails.
+
+        @param request: the current request
+        @keyword files: an optional list of files to index
+        @keyword mode: how to index the files, either 'add', 'update' or
+                       'rebuild'
         """
 
         # rebuilding the DB: delete it and add everything
@@ -506,7 +636,13 @@
             request.log("indexing all (%d) pages..." % len(pages))
             for pagename in pages:
                 p = Page(request, pagename)
-                self._index_page(writer, p, mode)
+                if request.cfg.xapian_index_history:
+                    for rev in p.getRevList():
+                        self._index_page(writer,
+                                Page(request, pagename, rev=rev),
+                                mode)
+                else:
+                    self._index_page(writer, p, mode)
             if files:
                 request.log("indexing all files...")
                 for fname in files:
@@ -516,33 +652,3 @@
         finally:
             writer.__del__()
 
-def run_query(query, db):
-    enquire = xapian.Enquire(db)
-    parser = xapian.QueryParser()
-    query = parser.parse_query(query, xapian.QueryParser.FLAG_WILDCARD)
-    print query.get_description()
-    enquire.set_query(query)
-    return enquire.get_mset(0, 10)
-
-def run(request):
-    pass
-    #print "Begin"
-    #db = xapian.WritableDatabase(xapian.open('test.db',
-    #                                         xapian.DB_CREATE_OR_OPEN))
-    #
-    # index_data(db) ???
-    #del db
-    #mset = run_query(sys.argv[1], db)
-    #print mset.get_matches_estimated()
-    #iterator = mset.begin()
-    #while iterator != mset.end():
-    #    print iterator.get_document().get_data()
-    #    iterator.next()
-    #for i in xrange(1,170):
-    #    doc = db.get_document(i)
-    #    print doc.get_data()
-
-if __name__ == '__main__':
-    run()
-
-
--- a/MoinMoin/search/__init__.py	Sun Aug 27 14:48:06 2006 +0200
+++ b/MoinMoin/search/__init__.py	Sun Aug 27 15:29:01 2006 +0200
@@ -13,15 +13,22 @@
 from MoinMoin.search.queryparser import QueryParser
 from MoinMoin.search.builtin import Search
 
-def searchPages(request, query, sort='weight', **kw):
+def searchPages(request, query, sort='weight', mtime=None, historysearch=None, **kw):
     """ Search the text of all pages for query.
     
     @param request: current request
     @param query: the expression (string or query objects) we want to search for
+    @keyword sort: sorting of the search results, either 'weight' or 'page_name'
+    @keyword mtime: only items modified since mtime
+    @keyword historysearch: include older revisions of items in search
+    @keyword titlesearch: treat all terms as title searches (passed to qp)
+    @keyword case: do case sensitive search (passed to qp)
+    @keyword regex: treat all terms as regular expression (passed to qp)
     @rtype: SearchResults instance
     @return: search results
     """
     if isinstance(query, str) or isinstance(query, unicode):
         query = QueryParser(**kw).parse_query(query)
-    return Search(request, query, sort).run()
+    return Search(request, query, sort, mtime=mtime,
+            historysearch=historysearch).run()
 
--- a/MoinMoin/search/builtin.py	Sun Aug 27 14:48:06 2006 +0200
+++ b/MoinMoin/search/builtin.py	Sun Aug 27 15:29:01 2006 +0200
@@ -1,6 +1,6 @@
 # -*- coding: iso-8859-1 -*-
 """
-    MoinMoin - search engine
+    MoinMoin - search engine internals
     
     @copyright: 2005 MoinMoin:FlorianFesti,
                 2005 MoinMoin:NirSoffer,
@@ -22,16 +22,26 @@
 ##############################################################################
 
 class UpdateQueue:
-    def __init__(self, file, lock_dir):
-        self.file = file
+    """ Represents a locked page queue on the disk """
+
+    def __init__(self, f, lock_dir):
+        """
+        @param f: file to write to
+        @param lock_dir: directory to save the lock files
+        """
+        self.file = f
         self.writeLock = lock.WriteLock(lock_dir, timeout=10.0)
         self.readLock = lock.ReadLock(lock_dir, timeout=10.0)
 
     def exists(self):
+        """ Checks if the queue exists on the filesystem """
         return os.path.exists(self.file)
 
     def append(self, pagename):
-        """ Append a page to queue """
+        """ Append a page to queue
+        
+        @param pagename: string to save
+        """
         if not self.writeLock.acquire(60.0):
             request.log("can't add %r to xapian update queue: can't lock queue" %
                         pagename)
@@ -59,6 +69,8 @@
         
         When the queue is empty, the queue file is removed, so exists()
         can tell if there is something waiting in the queue.
+
+        @param pages: list of pagenames to remove
         """
         if self.writeLock.acquire(30.0):
             try:
@@ -80,12 +92,18 @@
     # Private -------------------------------------------------------
 
     def _decode(self, data):
-        """ Decode queue data """
+        """ Decode queue data
+        
+        @param data: the data to decode
+        """
         pages = data.splitlines()
         return self._filterDuplicates(pages)
 
     def _filterDuplicates(self, pages):
-        """ Filter duplicates in page list, keeping the order """
+        """ Filter duplicates in page list, keeping the order
+        
+        @param pages: list of pages to filter
+        """
         unique = []
         seen = {}
         for name in pages:
@@ -115,6 +133,8 @@
         """ Write pages to queue file
         
         Requires queue write locking.
+
+        @param pages: list of pages to write
         """
         # XXX use tmpfile/move for atomic replace on real operating systems
         data = '\n'.join(pages) + '\n'
@@ -135,11 +155,17 @@
             if err.errno != errno.ENOENT:
                 raise
 
+
 class BaseIndex:
+    """ Represents a search engine index """
+
     class LockedException(Exception):
         pass
 
     def __init__(self, request):
+        """
+        @param request: current request
+        """
         self.request = request
         cache_dir = request.cfg.cache_dir
         main_dir = self._main_dir()
@@ -148,11 +174,12 @@
             os.makedirs(self.dir)
         self.sig_file = os.path.join(main_dir, 'complete')
         lock_dir = os.path.join(main_dir, 'index-lock')
-        self.lock = lock.WriteLock(lock_dir,
-                                   timeout=3600.0, readlocktimeout=60.0)
+        self.lock = lock.WriteLock(lock_dir, timeout=3600.0, readlocktimeout=60.0)
         #self.read_lock = lock.ReadLock(lock_dir, timeout=3600.0)
-        self.queue = UpdateQueue(os.path.join(main_dir, 'update-queue'),
-                                 os.path.join(main_dir, 'update-queue-lock'))
+        self.update_queue = UpdateQueue(os.path.join(main_dir, 'update-queue'),
+                                os.path.join(main_dir, 'update-queue-lock'))
+        self.remove_queue = UpdateQueue(os.path.join(main_dir, 'remove-queue'),
+                                os.path.join(main_dir, 'remove-queue-lock'))
 
         # Disabled until we have a sane way to build the index with a
         # queue in small steps.
@@ -163,37 +190,65 @@
         raise NotImplemented('...')
 
     def exists(self):
-        """ Check if index exists """        
+        """ Check if index exists """
         return os.path.exists(self.sig_file)
-                
+
     def mtime(self):
+        """ Modification time of the index """
         return os.path.getmtime(self.dir)
 
     def touch(self):
+        """ Touch the index """
         os.utime(self.dir, None)
-    
+
     def _search(self, query):
+        """ Actually perfom the search (read-lock acquired)
+        
+        @param query: the search query objects tree
+        """
         raise NotImplemented('...')
 
-    def search(self, query, *args, **kw):
+    def search(self, query, **kw):
+        """ Search for items in the index
+        
+        @param query: the search query objects to pass to the index
+        """
         #if not self.read_lock.acquire(1.0):
         #    raise self.LockedException
         #try:
-        hits = self._search(query, *args, **kw)
+        hits = self._search(query, **kw)
         #finally:
         #    self.read_lock.release()
         return hits
 
-    def update_page(self, page):
-        self.queue.append(page.page_name)
-        self._do_queued_updates_InNewThread()
+    def update_page(self, pagename, now=1):
+        """ Update a single page in the index
+
+        @param pagename: the name of the page to update
+        @keyword now: do all updates now (default: 1)
+        """
+        self.update_queue.append(pagename)
+        if now:
+           self._do_queued_updates_InNewThread()
+
+    def remove_item(self, pagename, attachment=None, now=1):
+        """ Removes a page and all its revisions or a single attachment
+
+        @param pagename: name of the page to be removed
+        @keyword attachment: optional, only remove this attachment of the page
+        @keyword now: do all updates now (default: 1)
+        """
+        self.remove_queue.append('%s//%s' % (pagename, attachment or ''))
+        if now:
+            self._do_queued_updates_InNewThread()
 
     def indexPages(self, files=None, mode='update'):
         """ Index all pages (and files, if given)
         
         Can be called only from a script. To index pages during a user
         request, use indexPagesInNewThread.
-        @arg files: iterator or list of files to index additionally
+        @keyword files: iterator or list of files to index additionally
+        @keyword mode: set the mode of indexing the pages, either 'update', 'add' or 'rebuild'
         """
         if not self.lock.acquire(1.0):
             self.request.log("can't index: can't acquire lock")
@@ -221,7 +276,7 @@
         from threading import Thread
         indexThread = Thread(target=self._index_pages, args=(files, mode))
         indexThread.setDaemon(True)
-        
+
         # Join the index thread after current request finish, prevent
         # Apache CGI from killing the process.
         def joinDecorator(finish):
@@ -243,6 +298,20 @@
 
         When called in a new thread, lock is acquired before the call,
         and this method must release it when it finishes or fails.
+
+        @param request: current request
+        @keyword files: iterator or list of files to index additionally
+        @keyword mode: set the mode of indexing the pages, either 'update',
+        'add' or 'rebuild'
+        """
+        raise NotImplemented('...')
+
+    def _remove_item(self, writer, page, attachment=None):
+        """ Remove a page and all its revisions from the index or just
+            an attachment of that page
+
+        @param pagename: name of the page to remove
+        @keyword attachment: optionally, just remove this attachment
         """
         raise NotImplemented('...')
 
@@ -268,7 +337,7 @@
                     target=lockedDecorator(self._do_queued_updates),
                     args=(self._indexingRequest(self.request),))
             indexThread.setDaemon(True)
-            
+
             # Join the index thread after current request finish, prevent
             # Apache CGI from killing the process.
             def joinDecorator(finish):
@@ -276,7 +345,7 @@
                     finish()
                     indexThread.join()
                 return func
-                
+
             self.request.finish = joinDecorator(self.request.finish)
             indexThread.start()
         except:
@@ -284,13 +353,22 @@
             raise
 
     def _do_queued_updates(self, request, amount=5):
+        """ Perform updates in the queues (read-lock acquired)
+        
+        @param request: the current request
+        @keyword amount: how many updates to perform at once (default: 5)
+        """
         raise NotImplemented('...')
 
     def optimize(self):
+        """ Optimize the index if possible """
         raise NotImplemented('...')
 
     def contentfilter(self, filename):
-        """ Get a filter for content of filename and return unicode content. """
+        """ Get a filter for content of filename and return unicode content.
+        
+        @param filename: name of the file
+        """
         request = self.request
         mt = wikiutil.MimeType(filename=filename)
         for modulename in mt.module_name():
@@ -311,22 +389,21 @@
             request.log("Filter %s threw error '%s' for file %s" % (modulename, str(err), filename))
         return mt.mime_type(), data
 
-    def test(self, request):
-        raise NotImplemented('...')
-
     def _indexingRequest(self, request):
         """ Return a new request that can be used for index building.
         
         This request uses a security policy that lets the current user
         read any page. Without this policy some pages will not render,
-        which will create broken pagelinks index.        
+        which will create broken pagelinks index.
+
+        @param request: current request
         """
         from MoinMoin.request.CLI import Request
         from MoinMoin.security import Permissions
         request = Request(request.url)
         class SecurityPolicy(Permissions):
             def read(*args, **kw):
-                return True        
+                return True
         request.user.may = SecurityPolicy(request.user)
         return request
 
@@ -346,17 +423,28 @@
         finally:
             f.close()
 
+
 ##############################################################################
 ### Searching
 ##############################################################################
 
 class Search:
     """ A search run """
-    
-    def __init__(self, request, query, sort='weight'):
+
+    def __init__(self, request, query, sort='weight', mtime=None,
+            historysearch=0):
+        """
+        @param request: current request
+        @param query: search query objects tree
+        @keyword sort: the sorting of the results (default: 'weight')
+        @keyword mtime: only show items newer than this timestamp (default: None)
+        @keyword historysearch: whether to show old revisions of a page (default: 0)
+        """
         self.request = request
         self.query = query
         self.sort = sort
+        self.mtime = mtime
+        self.historysearch = historysearch
         self.filtered = False
         self.fs_rootpage = "FS" # XXX FS hardcoded
 
@@ -367,38 +455,44 @@
             hits = self._xapianSearch()
         else:
             hits = self._moinSearch()
-            
+
         # important - filter deleted pages or pages the user may not read!
         if not self.filtered:
             hits = self._filter(hits)
 
-        # when xapian was used, we won't need to sort manually
-        if self.request.cfg.xapian_search:
+        # when xapian was used, we can estimate the numer of matches
+        # Note: hits can't be estimated by xapian with historysearch enabled
+        if not self.request.cfg.xapian_index_history and \
+                self.request.cfg.xapian_search:
             self.sort = None
             mset = self._xapianMset
             estimated_hits = (
-                (mset.get_matches_estimated() == mset.get_matches_upper_bound() and
-                    mset.get_matches_estimated() == mset.get_matches_lower_bound()) and
-                '' or 'about',
+                (mset.get_matches_estimated() == mset.get_matches_upper_bound()
+                    and
+                 mset.get_matches_estimated() == mset.get_matches_lower_bound())
+                and '' or 'about',
                 mset.get_matches_estimated())
         else:
             estimated_hits = None
 
         return getSearchResults(self.request, self.query, hits, start,
                 self.sort, estimated_hits)
-        
 
     # ----------------------------------------------------------------
     # Private!
 
     def _xapianIndex(request):
+        """ Get the xapian index if possible
+
+        @param request: current request
+        """
         try:
             from MoinMoin.search.Xapian import Index
             index = Index(request)
         except ImportError:
-            index = None
+            return None
 
-        if index and index.exists():
+        if index.exists():
             return index
 
     _xapianIndex = staticmethod(_xapianIndex)
@@ -407,32 +501,35 @@
         """ Search using Xapian
         
         Get a list of pages using fast xapian search and
-        return moin search in those pages.
+        return moin search in those pages if needed.
         """
         clock = self.request.clock
         pages = None
         index = self._xapianIndex(self.request)
+
         if index and self.query.xapian_wanted():
             clock.start('_xapianSearch')
             try:
                 from MoinMoin.support import xapwrap
+
                 clock.start('_xapianQuery')
                 query = self.query.xapian_term(self.request, index.allterms)
                 self.request.log("xapianSearch: query = %r" %
                         query.get_description())
                 query = xapwrap.index.QObjQuery(query)
-                enq, mset, hits = index.search(query, sort=self.sort)
+                enq, mset, hits = index.search(query, sort=self.sort,
+                        historysearch=self.historysearch)
                 clock.stop('_xapianQuery')
+
                 #self.request.log("xapianSearch: finds: %r" % hits)
                 def dict_decode(d):
                     """ decode dict values to unicode """
                     for k, v in d.items():
                         d[k] = d[k].decode(config.charset)
                     return d
-                #pages = [{'uid': hit['uid'], 'values': dict_decode(hit['values'])}
-                #        for hit in hits]
                 pages = [dict_decode(hit['values']) for hit in hits]
                 self.request.log("xapianSearch: finds pages: %r" % pages)
+
                 self._xapianEnquire = enq
                 self._xapianMset = mset
                 self._xapianIndex = index
@@ -442,6 +539,7 @@
             #    pages = []
 
             try:
+                # xapian handled the full query
                 if not self.query.xapian_need_postproc():
                     clock.start('_xapianProcess')
                     try:
@@ -453,22 +551,31 @@
         else:
             # we didn't use xapian in this request
             self.request.cfg.xapian_search = 0
-        
+
+        # some postprocessing by _moinSearch is required
         return self._moinSearch(pages)
 
     def _xapianMatchDecider(self, term, pos):
-        if term[0] == 'S':      # TitleMatch
+        """ Returns correct Match object for a Xapian match
+        
+        @param term: the term as string
+        @param pos: starting position of the match
+        """
+        if term[0] == 'S': # TitleMatch
             return TitleMatch(start=pos, end=pos+len(term)-1)
-        else:                   # TextMatch (incl. headers)
+        else: # TextMatch (incl. headers)
             return TextMatch(start=pos, end=pos+len(term))
+
+    def _xapianMatch(self, uid, page=None):
+        """ Get all relevant Xapian matches per document id
         
-    def _xapianMatch(self, page, uid):
-        """ Get all relevant Xapian matches per document id """
+        @param uid: the id of the document in the xapian index
+        """
         positions = {}
         term = self._xapianEnquire.get_matching_terms_begin(uid)
         while term != self._xapianEnquire.get_matching_terms_end(uid):
             term_name = term.get_term()
-            for pos in self._xapianIndex.termpositions(uid,term.get_term()):
+            for pos in self._xapianIndex.termpositions(uid, term.get_term()):
                 if pos not in positions or \
                         len(positions[pos]) < len(term_name):
                     positions[pos] = term_name
@@ -477,7 +584,7 @@
             in positions.iteritems()]
 
         if not matches:
-            return [Match()]    # dummy for metadata, we got a match!
+            return [Match()] # dummy for metadata, we got a match!
 
         return matches
 
@@ -486,6 +593,8 @@
         
         Return list of tuples (page, match). The list may contain
         deleted pages or pages the user may not read.
+
+        @keyword pages: optional list of pages to search in
         """
         self.request.clock.start('_moinSearch')
         from MoinMoin.Page import Page
@@ -496,14 +605,18 @@
         hits = self._getHits(pages, self._moinMatch)
         self.request.clock.stop('_moinSearch')
         return hits
-    
-    def _moinMatch(self, page, uid):
-        """ Just kick off regular moinSearch """
+
+    def _moinMatch(self, page, uid=None):
+        """ Get all matches from regular moinSearch
+        
+        @param page: the current page instance
+        """
         return self.query.search(page)
 
     def _getHits(self, pages, matchSearchFunction):
         """ Get the hit tuples in pages through matchSearchFunction """
         hits = []
+        revisionCache = {}
         fs_rootpage = self.fs_rootpage
         for hit in pages:
             if 'values' in hit:
@@ -516,8 +629,17 @@
             wikiname = valuedict['wikiname']
             pagename = valuedict['pagename']
             attachment = valuedict['attachment']
+
+            if 'revision' in valuedict and valuedict['revision']:
+                revision = int(valuedict['revision'])
+            else:
+                revision = 0
+
             if wikiname in (self.request.cfg.interwikiname, 'Self'): # THIS wiki
-                page = Page(self.request, pagename)
+                page = Page(self.request, pagename, rev=revision)
+                if not self.historysearch and revision and \
+                        page.getRevList()[0] != revision:
+                    continue
                 if attachment:
                     if pagename == fs_rootpage: # not really an attachment
                         page = Page(self.request, "%s/%s" % (fs_rootpage, attachment))
@@ -525,11 +647,17 @@
                     else:
                         hits.append((wikiname, page, attachment, None))
                 else:
-                    matches = matchSearchFunction(page, uid)
+                    matches = matchSearchFunction(page=page, uid=uid)
                     if matches:
+                        if not self.historysearch and \
+                                pagename in revisionCache and \
+                                revisionCache[pagename][0] < revision:
+                            hits.remove(revisionCache[pagename][1])
+                            del revisionCache[pagename]
                         hits.append((wikiname, page, attachment, matches))
+                        revisionCache[pagename] = (revision, hits[-1])
             else: # other wiki
-                hits.append((wikiname, pagename, attachment, None))
+                hits.append((wikiname, pagename, attachment, None, revision))
         return hits
 
     def _getPageList(self):
@@ -540,22 +668,27 @@
         will happen later on the hits, which is faster with current
         slow storage.
         """
-        filter = self.query.pageFilter()
-        if filter:
+        filter_ = self.query.pageFilter()
+        if filter_:
             # There is no need to filter the results again.
             self.filtered = True
-            return self.request.rootpage.getPageList(filter=filter)
+            return self.request.rootpage.getPageList(filter=filter_)
         else:
             return self.request.rootpage.getPageList(user='', exists=0)
+
+    def _filter(self, hits):
+        """ Filter out deleted or acl protected pages
         
-    def _filter(self, hits):
-        """ Filter out deleted or acl protected pages """
+        @param hits: list of hits
+        """
         userMayRead = self.request.user.may.read
         fs_rootpage = self.fs_rootpage + "/"
         thiswiki = (self.request.cfg.interwikiname, 'Self')
-        filtered = [(wikiname, page, attachment, match) for wikiname, page, attachment, match in hits
-                    if not wikiname in thiswiki or
+        filtered = [(wikiname, page, attachment, match)
+                for wikiname, page, attachment, match in hits
+                    if (not wikiname in thiswiki or
                        page.exists() and userMayRead(page.page_name) or
-                       page.page_name.startswith(fs_rootpage)]
+                       page.page_name.startswith(fs_rootpage)) and
+                       (not self.mtime or self.mtime <= page.mtime_usecs()/1000000)]
         return filtered
 
--- a/MoinMoin/search/queryparser.py	Sun Aug 27 14:48:06 2006 +0200
+++ b/MoinMoin/search/queryparser.py	Sun Aug 27 15:29:01 2006 +0200
@@ -1,6 +1,6 @@
 # -*- coding: iso-8859-1 -*-
 """
-    MoinMoin - search engine query parser
+    MoinMoin - search query parser
     
     @copyright: 2005 MoinMoin:FlorianFesti,
                 2005 MoinMoin:NirSoffer,
@@ -11,7 +11,7 @@
 """
 
 import re
-from MoinMoin import config
+from MoinMoin import config, wikiutil
 from MoinMoin.search.results import Match, TitleMatch, TextMatch
 
 try:
@@ -26,7 +26,7 @@
 
 class BaseExpression:
     """ Base class for all search terms """
-    
+
     def __init__(self):
         self.negated = 0
 
@@ -35,7 +35,7 @@
 
     def negate(self):
         """ Negate the result of this term """
-        self.negated = 1 
+        self.negated = 1
 
     def pageFilter(self):
         """ Return a page filtering function
@@ -57,17 +57,16 @@
         This Base class returns True (Match()) if not negated.
         """
         if self.negated:
-            # XXX why?
             return [Match()]
         else:
             return None
-    
+
     def costs(self):
         """ Return estimated time to calculate this term
         
         Number is relative to other terms and has no real unit.
         It allows to do the fast searches first.
-        """ 
+        """
         return 0
 
     def highlight_re(self):
@@ -94,6 +93,9 @@
             self.search_re = re.compile(pattern, flags)
             self.pattern = pattern
 
+    def xapian_wanted(self):
+        return False
+
 
 class AndExpression(BaseExpression):
     """ A term connecting several sub terms with a logical AND """
@@ -114,7 +116,7 @@
 
     def subterms(self):
         return self._subterms
-    
+
     def costs(self):
         return self._costs
 
@@ -144,7 +146,7 @@
                         return False
                 return True
             return filter
-        
+
         return None
 
     def sortByCost(self):
@@ -168,7 +170,7 @@
         for s in self._subterms:
             highlight_re = s.highlight_re()
             if highlight_re: result.append(highlight_re)
-            
+
         return '|'.join(result)
 
     def xapian_wanted(self):
@@ -203,7 +205,7 @@
         if not not_terms:
             # no, just return query for not negated terms
             return t1
-        
+
         # yes, link not negated and negated terms' query with a AND_NOT query
         if len(not_terms) == 1:
             t2 = Query(not_terms[0])
@@ -215,16 +217,18 @@
 
 class OrExpression(AndExpression):
     """ A term connecting several sub terms with a logical OR """
-    
+
     operator = ' or '
 
     def search(self, page):
-        """ Search page with terms, cheap terms first
+        """ Search page with terms
+        
+        @param page: the page instance
+        """
 
-        XXX Do we have any reason to sort here? we are not breaking out
-        of the search in any case.
-        """
-        self.sortByCost()
+        # XXX Do we have any reason to sort here? we are not breaking out
+        # of the search in any case.
+        #self.sortByCost()
         matches = []
         for term in self._subterms:
             result = term.search(page)
@@ -243,7 +247,7 @@
     Both page content and the page title are searched, using an
     additional TitleSearch term.
     """
-    
+
     def __init__(self, pattern, use_re=False, case=False):
         """ Init a text search
 
@@ -257,10 +261,10 @@
         self.case = case
         self._build_re(self._pattern, use_re=use_re, case=case)
         self.titlesearch = TitleSearch(self._pattern, use_re=use_re, case=case)
-        
+
     def costs(self):
         return 10000
-    
+
     def __unicode__(self):
         neg = self.negated and '-' or ''
         return u'%s"%s"' % (neg, unicode(self._pattern))
@@ -287,11 +291,12 @@
                     continue
 
                 post = 0
-                for c in body[match.end():]:
-                    if c in config.chars_lower:
-                        post += 1
-                    else:
-                        break
+                # XXX only do this for stemmd words. how?
+                #for c in body[match.end():]:
+                #    if c in config.chars_lower:
+                #        post += 1
+                #    else:
+                #        break
 
                 matches.append(TextMatch(start=match.start(),
                         end=match.end()+post))
@@ -317,14 +322,12 @@
     def xapian_term(self, request, allterms):
         if self.use_re:
             # basic regex matching per term
-            terms = [term for term in allterms() if
-                    self.search_re.match(term)]
+            terms = [term for term in allterms() if self.search_re.match(term)]
             if not terms:
                 return Query()
             queries = [Query(Query.OP_OR, terms)]
         else:
-            analyzer = Xapian.WikiAnalyzer(request=request,
-                    language=request.cfg.language_default)
+            analyzer = Xapian.WikiAnalyzer(request=request, language=request.cfg.language_default)
             terms = self._pattern.split()
 
             # all parsed wikiwords, AND'ed
@@ -346,8 +349,7 @@
             if not self.case and stemmed:
                 new_pat = ' '.join(stemmed)
                 self._pattern = new_pat
-                self._build_re(new_pat, use_re=False, case=self.case,
-                        stemmed=True)
+                self._build_re(new_pat, use_re=False, case=self.case, stemmed=True)
 
         # titlesearch OR parsed wikiwords
         return Query(Query.OP_OR,
@@ -369,7 +371,7 @@
         self.use_re = use_re
         self.case = case
         self._build_re(self._pattern, use_re=use_re, case=case)
-        
+
     def costs(self):
         return 100
 
@@ -389,7 +391,7 @@
                 return False
             return True
         return filter
-            
+
     def search(self, page):
         # Get matches in page name
         matches = []
@@ -402,17 +404,18 @@
                     continue
 
                 post = 0
-                for c in page.page_name[match.end():]:
-                    if c in config.chars_lower:
-                        post += 1
-                    else:
-                        break
+                # XXX only do this for stemmd words. how?
+                #for c in page.page_name[match.end():]:
+                #    if c in config.chars_lower:
+                #        post += 1
+                #    else:
+                #        break
 
                 matches.append(TitleMatch(start=match.start(),
                         end=match.end()+post))
             else:
                 matches.append(TitleMatch(re_match=match))
-        
+
         if ((self.negated and matches) or
             (not self.negated and not matches)):
             return None
@@ -422,7 +425,7 @@
             return []
 
     def xapian_wanted(self):
-        return True             # only easy regexps possible
+        return True # only easy regexps possible
 
     def xapian_need_postproc(self):
         return self.case
@@ -448,7 +451,7 @@
             terms = self._pattern.split()
             terms = [[w for w, pos in analyzer.raw_tokenize(t)] for t in terms]
 
-            # all parsed wikiwords, AND'ed
+            # all parsed wikiwords, ANDed
             queries = []
             stemmed = []
             for t in terms:
@@ -475,8 +478,7 @@
             if not self.case and stemmed:
                 new_pat = ' '.join(stemmed)
                 self._pattern = new_pat
-                self._build_re(new_pat, use_re=False, case=self.case,
-                        stemmed=True)
+                self._build_re(new_pat, use_re=False, case=self.case, stemmed=True)
 
         return Query(Query.OP_AND, queries)
 
@@ -510,7 +512,7 @@
         else:
             self.pattern = pattern
             self.static = True
-        
+
     def costs(self):
         return 5000 # cheaper than a TextSearch
 
@@ -524,9 +526,8 @@
     def search(self, page):
         # Get matches in page name
         matches = []
+        Found = True
 
-        Found = True
-        
         for link in page.getPageLinks(page.request):
             if ((self.static and self.pattern == link) or
                 (not self.static and self.search_re.match(link))):
@@ -539,7 +540,7 @@
             results = self.textsearch.search(page)
             if results:
                 matches.extend(results)
-            else: #This happens e.g. for pages that use navigation macros
+            else: # This happens e.g. for pages that use navigation macros
                 matches.append(TextMatch(0, 0))
 
         # Decide what to do with the results.
@@ -552,7 +553,7 @@
             return []
 
     def xapian_wanted(self):
-        return True             # only easy regexps possible
+        return True # only easy regexps possible
 
     def xapian_need_postproc(self):
         return self.case
@@ -608,17 +609,25 @@
         return ""
 
     def search(self, page):
-        # We just use (and trust ;)) xapian for this.. deactivated for _moinSearch
-        if not self.xapian_called:
+        match = False
+        body = page.getPageHeader()
+
+        if re.findall('#language %s' % self.pattern, body):
+            match = True
+
+        # Decide what to do with the results.
+        if self.negated and match:
+            return None
+        elif match or (self.negated and not match):
+            return [Match()]
+        else:
             return []
-        else:
-            return [Match()]
 
     def xapian_wanted(self):
-        return True             # only easy regexps possible
+        return True # only easy regexps possible
 
     def xapian_need_postproc(self):
-        return False            # case-sensitivity would make no sense
+        return False # case-sensitivity would make no sense
 
     def xapian_term(self, request, allterms):
         self.xapian_called = True
@@ -667,7 +676,7 @@
         return u'(Category%s)' % self._pattern
 
     def xapian_wanted(self):
-        return True             # only easy regexps possible
+        return True # only easy regexps possible
 
     def xapian_need_postproc(self):
         return self.case
@@ -709,7 +718,7 @@
         self._pattern = pattern.lower()
         self.negated = 0
         self.use_re = use_re
-        self.case = False       # not case-sensitive!
+        self.case = False # not case-sensitive!
         self.xapian_called = False
         self._build_re(self._pattern, use_re=use_re, case=case)
 
@@ -724,17 +733,13 @@
         return ""
 
     def search(self, page):
-        # We just use (and trust ;)) xapian for this.. deactivated for _moinSearch
-        if not self.xapian_called:
-            return []
-        else:
-            return [Match()]
+        return None
 
     def xapian_wanted(self):
-        return True             # only easy regexps possible
+        return True # only easy regexps possible
 
     def xapian_need_postproc(self):
-        return False            # case-sensitivity would make no sense
+        return False # case-sensitivity would make no sense
 
     def xapian_term(self, request, allterms):
         self.xapian_called = True
@@ -773,7 +778,7 @@
         self._pattern = pattern.lower()
         self.negated = 0
         self.use_re = use_re
-        self.case = False       # not case-sensitive!
+        self.case = False # not case-sensitive!
         self.xapian_called = False
         self._build_re(self._pattern, use_re=use_re, case=case)
 
@@ -788,17 +793,29 @@
         return ""
 
     def search(self, page):
-        # We just use (and trust ;)) xapian for this.. deactivated for _moinSearch
-        if not self.xapian_called:
+        checks = {'underlay': page.isUnderlayPage,
+                  'standard': page.isStandardPage,
+                  'system': wikiutil.isSystemPage(page.request, page.page_name),
+                 }
+
+        try:
+            match = checks[self.pattern]()
+        except KeyError:
+            match = False
+
+        # Decide what to do with the results.
+        if self.negated and match:
+            return None
+        elif match or (self.negated and not match):
+            return [Match()]
+        else:
             return []
-        else:
-            return [Match()]
 
     def xapian_wanted(self):
-        return True             # only easy regexps possible
+        return True # only easy regexps possible
 
     def xapian_need_postproc(self):
-        return False            # case-sensitivity would make no sense
+        return False # case-sensitivity would make no sense
 
     def xapian_term(self, request, allterms):
         self.xapian_called = True
@@ -863,7 +880,7 @@
             if q:
                 result.append(q)
         return result
-            
+
     def _and_expression(self):
         result = None
         while not result and self._query:
@@ -878,13 +895,13 @@
             result.append(term)
             term = self._single_term()
         return result
-                                
+
     def _single_term(self):
         regex = (r'(?P<NEG>-?)\s*(' +              # leading '-'
                  r'(?P<OPS>\(|\)|(or\b(?!$)))|' +  # or, (, )
                  r'(?P<MOD>(\w+:)*)' +
                  r'(?P<TERM>("[^"]+")|' +
-                 r"('[^']+')|(\S+)))")             # search word itself
+                 r"('[^']+')|([^\s\)]+)))")        # search word itself
         self._query = self._query.strip()
         match = re.match(regex, self._query, re.U)
         if not match:
@@ -931,7 +948,7 @@
             elif "domain".startswith(m):
                 domain = True
 
-        # oh, let's better call xapian if we encouter this nasty regexp ;)
+        # oh, let's better call xapian if we encounter this nasty regexp ;)
         if not category:
             cat_re = re.compile(r'----\(-\*\)\(\\r\)\?\\n\)\(\.\*\)Category(.*)\\b', re.U)
             cat_match = cat_re.search(text)
@@ -967,4 +984,3 @@
                 text.startswith("'") and text.endswith("'"))
 
 
-
--- a/MoinMoin/search/results.py	Sun Aug 27 14:48:06 2006 +0200
+++ b/MoinMoin/search/results.py	Sun Aug 27 15:29:01 2006 +0200
@@ -1,6 +1,6 @@
 # -*- coding: iso-8859-1 -*-
 """
-    MoinMoin - search engine
+    MoinMoin - search results processing
     
     @copyright: 2005 MoinMoin:FlorianFesti,
                 2005 MoinMoin:NirSoffer,
@@ -25,7 +25,7 @@
     """
     # Default match weight
     _weight = 1.0
-    
+
     def __init__(self, start=0, end=0, re_match=None):
         self.re_match = re_match
         if not re_match:
@@ -42,7 +42,7 @@
                  self.start == other.start and
                  self.end == other.end)
         return equal
-        
+
     def __ne__(self, other):
         return not self.__eq__(other)
 
@@ -64,7 +64,7 @@
 
     # object properties
     start = property(_get_start)
-    end   = property(_get_end)
+    end = property(_get_end)
 
 
 class TextMatch(Match):
@@ -75,7 +75,7 @@
 class TitleMatch(Match):
     """ Represents a match in the page title
     
-    Has more weight as a match in the page content.
+    Has more weight than a match in the page content.
     """
     # Matches in titles are much more important in wikis. This setting
     # seems to make all pages that have matches in the title to appear
@@ -139,19 +139,16 @@
         else:
             matches = self._matches
 
-        # Filter by type and sort by sort using fast schwartzian
-        # transform.
+        # Filter by type and sort by sort using fast schwartzian transform.
         if sort == 'start':
-            tmp = [(match.start, match) for match in matches
-                   if instance(match, type)]
+            tmp = [(match.start, match) for match in matches if instance(match, type)]
         else:
-            tmp = [(match.weight(), match) for match in matches
-                   if instance(match, type)]
+            tmp = [(match.weight(), match) for match in matches if instance(match, type)]
         tmp.sort()
         if sort == 'weight':
             tmp.reverse()
         matches = [item[1] for item in tmp]
-        
+
         return matches
 
     def _unique_matches(self, type=Match):
@@ -164,10 +161,8 @@
         @rtype: list
         @return: list of matches of type, sorted by match.start
         """
-        # Filter by type and sort by match.start using fast schwartzian
-        # transform.
-        tmp = [(match.start, match) for match in self._matches
-               if isinstance(match, type)]
+        # Filter by type and sort by match.start using fast schwartzian transform.
+        tmp = [(match.start, match) for match in self._matches if isinstance(match, type)]
         tmp.sort()
 
         if not len(tmp):
@@ -183,11 +178,11 @@
             matches.append(item[1])
 
         return matches
-    
+
 
 class FoundAttachment(FoundPage):
-    """ Represent an attachment in search results """
-    
+    """ Represents an attachment in search results """
+
     def __init__(self, page_name, attachment, matches=None, page=None):
         self.page_name = page_name
         self.attachment = attachment
@@ -207,8 +202,8 @@
 
 
 class FoundRemote(FoundPage):
-    """ Represent an attachment in search results """
-    
+    """ Represents an attachment in search results """
+
     def __init__(self, wikiname, page_name, attachment, matches=None, page=None):
         self.wikiname = wikiname
         self.page_name = page_name
@@ -243,7 +238,7 @@
     by name and then by rank.
     """
     # Public functions --------------------------------------------------
-    
+
     def __init__(self, query, hits, pages, elapsed, sort, estimated_hits):
         self.query = query # the query
         self.hits = hits # hits list
@@ -263,13 +258,13 @@
         tmp.sort()
         tmp.reverse()
         self.hits = [item[2] for item in tmp]
-        
+
     def _sortByPagename(self):
         """ Sorts a list of found pages alphabetical by page name """
         tmp = [(hit.page_name, hit) for hit in self.hits]
         tmp.sort()
         self.hits = [item[1] for item in tmp]
-        
+
     def stats(self, request, formatter, hitsFrom):
         """ Return search statistics, formatted with formatter
 
@@ -331,8 +326,7 @@
         # Add pages formatted as list
         if self.hits:
             write(list(1))
-            
-            # XXX: Do some xapian magic here
+
             if paging:
                 hitsTo = hitsFrom + request.cfg.search_results_per_page
                 displayHits = self.hits[hitsFrom:hitsTo]
@@ -346,10 +340,14 @@
                         'do': 'get',
                         'target': page.attachment,
                     }
+                elif page.page.rev and page.page.rev != page.page.getRevList()[0]:
+                    querydict = {
+                        'rev': page.page.rev,
+                    }
                 else:
                     querydict = None
                 querystr = self.querystring(querydict)
-            
+
                 matchInfo = ''
                 if info:
                     matchInfo = self.formatInfo(f, page)
@@ -365,7 +363,7 @@
                 write(''.join(item))
             write(list(0))
             if paging:
-                write(self.formatPrevNextPageLinks(hitsFrom=hitsFrom,
+                write(self.formatPageLinks(hitsFrom=hitsFrom,
                     hitsPerPage=request.cfg.search_results_per_page,
                     hitsNum=len(self.hits)))
 
@@ -397,12 +395,11 @@
 
         if paging and len(self.hits) <= request.cfg.search_results_per_page:
             paging = False
-        
+
         # Add pages formatted as definition list
         if self.hits:
             write(f.definition_list(1))
 
-            # XXX: Do some xapian magic here
             if paging:
                 hitsTo = hitsFrom+request.cfg.search_results_per_page
                 displayHits = self.hits[hitsFrom:hitsTo]
@@ -425,7 +422,12 @@
                     querydict = None
                 else:
                     fmt_context = self.formatContext(page, context, maxlines)
-                    querydict = None
+                    if page.page.rev and page.page.rev != page.page.getRevList()[0]:
+                        querydict = {
+                            'rev': page.page.rev,
+                        }
+                    else:
+                        querydict = None
                 querystr = self.querystring(querydict)
                 item = [
                     f.definition_term(1),
@@ -442,17 +444,17 @@
                 write(''.join(item))
             write(f.definition_list(0))
             if paging:
-                write(self.formatPrevNextPageLinks(hitsFrom=hitsFrom,
+                write(self.formatPageLinks(hitsFrom=hitsFrom,
                     hitsPerPage=request.cfg.search_results_per_page,
                     hitsNum=len(self.hits)))
-        
+
         return self.getvalue()
 
     # Private -----------------------------------------------------------
 
     # This methods are not meant to be used by clients and may change
     # without notice.
-    
+
     def formatContext(self, page, context, maxlines):
         """ Format search context for each matched page
 
@@ -465,7 +467,7 @@
         last = len(body) - 1
         lineCount = 0
         output = []
-        
+
         # Get unique text matches sorted by match.start, try to ignore
         # matches in page header, and show the first maxlines matches.
         # TODO: when we implement weight algorithm for text matches, we
@@ -477,7 +479,7 @@
         # Format context
         while i < len(matches) and lineCount < maxlines:
             match = matches[i]
-            
+
             # Get context range for this match
             start, end = self.contextRange(context, match, start, last)
 
@@ -487,7 +489,7 @@
             # same match again on a separate line.
 
             output.append(f.text(u'...'))
-            
+
             # Get the index of the first match completely within the
             # context.
             for j in xrange(0, len(matches)):
@@ -531,9 +533,9 @@
                 # This is a page with no text, only header, for example,
                 # a redirect page.
                 output = f.text(page.page.getPageHeader(length=context))
-        
+
         return output
-        
+
     def firstInterestingMatch(self, page, matches):
         """ Return the first interesting match
 
@@ -578,7 +580,7 @@
         if cstart < start:
             cend += start - cstart
             cstart = start
-            
+
         # But if end if after last, give back context to start
         if cend > last:
             cstart -= cend - last
@@ -600,7 +602,7 @@
         """
         # Get unique title matches sorted by match.start
         matches = page.get_matches(unique=1, sort='start', type=TitleMatch)
-        
+
         # Format
         pagename = page.page_name
         f = self.formatter
@@ -618,7 +620,7 @@
         # Add text after match
         if start < len(pagename):
             output.append(f.text(pagename[start:]))
-        
+
         if page.attachment: # show the attachment that matched
             output.extend([
                     " ",
@@ -640,7 +642,7 @@
         @param location: current location in text
         @rtype: unicode
         @return: formatted match or empty string
-        """        
+        """
         start = max(location, match.start)
         if start < match.end:
             f = self.formatter
@@ -652,7 +654,7 @@
             return ''.join(output)
         return ''
 
-    def formatPrevNextPageLinks(self, hitsFrom, hitsPerPage, hitsNum):
+    def formatPageLinks(self, hitsFrom, hitsPerPage, hitsNum):
         """ Format previous and next page links in page
 
         @param hitsFrom: current position in the hits
@@ -667,31 +669,33 @@
         querydict = wikiutil.parseQueryString(self.request.query_string)
         def page_url(n):
             querydict.update({'from': n * hitsPerPage})
-            return self.request.page.url(self.request, querydict, escape=0)
-        
+            return self.request.page.url(self.request, querydict,
+                    escape=0, relative=False)
+
         pages = float(hitsNum) / hitsPerPage
         if pages - int(pages) > 0.0:
             pages = int(pages) + 1
         cur_page = hitsFrom / hitsPerPage
-        l = []
+
+        textlinks = []
 
         # previous page available
         if cur_page > 0:
-            l.append(''.join([
+            textlinks.append(''.join([
                 f.url(1, href=page_url(cur_page-1)),
                 f.text(_('Previous')),
                 f.url(0)
             ]))
         else:
-            l.append('')
+            textlinks.append('')
 
         # list of pages to be shown
         page_range = range(*(
-            cur_page - 4 < 0 and
-                (0, pages >= 10 and 10 or pages) or
-                (cur_page - 4, cur_page + 6 >= pages and
+            cur_page - 5 < 0 and
+                (0, pages > 10 and 10 or pages) or
+                (cur_page - 5, cur_page + 6 > pages and
                     pages or cur_page + 6)))
-        l.extend([''.join([
+        textlinks.extend([''.join([
                 i != cur_page and f.url(1, href=page_url(i)) or '',
                 f.text(str(i+1)),
                 i != cur_page and f.url(0) or '',
@@ -699,34 +703,45 @@
 
         # next page available
         if cur_page < pages-1:
-            l.append(''.join([
+            textlinks.append(''.join([
                 f.url(1, href=page_url(cur_page+1)),
                 f.text(_('Next')),
                 f.url(0)
             ]))
         else:
-            l.append('')
+            textlinks.append('')
 
         return ''.join([
             f.table(1, attrs={'tableclass': 'searchpages'}),
             f.table_row(1),
                 f.table_cell(1),
                 # textlinks
-                (f.table_cell(0) + f.table_cell(1)).join(l),
+                (f.table_cell(0) + f.table_cell(1)).join(textlinks),
                 f.table_cell(0),
             f.table_row(0),
             f.table(0),
         ])
 
     def formatHitInfoBar(self, page):
+        """ Returns the code for the information below a search hit
+
+        @param page: the page instance
+        """
         f = self.formatter
         _ = self.request.getText
+        request = self.request
+
+        rev = page.page.get_real_rev()
+        if rev is None:
+            rev = 0
+
         return ''.join([
             f.paragraph(1, attr={'class': 'searchhitinfobar'}),
             f.text('%.1fk - ' % (page.page.size()/1024.0)),
-            f.text('rev: %d %s- ' % (page.page.get_real_rev(),
-                not page.page.rev and '(%s) ' % _('current') or '')),
-            f.text('last modified: %(time)s' % page.page.lastEditInfo()),
+            f.text('rev: %d %s- ' % (rev,
+                rev == page.page.getRevList()[0] and
+                '(%s) ' % _('current') or '')),
+            f.text('last modified: %s' % page.page.mtime_printable(request)),
             # XXX: proper metadata
             #f.text('lang: %s - ' % page.page.language),
             #f.url(1, href='#'),
@@ -736,15 +751,23 @@
         ])
 
     def querystring(self, querydict=None):
-        """ Return query string, used in the page link """
+        """ Return query string, used in the page link
+        
+        @keyword querydict: use these parameters (default: None)
+        """
         if querydict is None:
-            querydict = {'highlight': self.query.highlight_re()}
+            querydict = {}
+        if 'action' not in querydict or querydict['action'] == 'AttachFile':
+            querydict.update({'highlight': self.query.highlight_re()})
         querystr = wikiutil.makeQueryString(querydict)
-        #querystr = wikiutil.escape(querystr)
         return querystr
 
     def formatInfo(self, formatter, page):
-        """ Return formatted match info """
+        """ Return formatted match info
+        
+        @param formatter: the formatter instance to use
+        @param page: the current page instance
+        """
         template = u' . . . %s %s'
         template = u"%s%s%s" % (formatter.span(1, css_class="info"),
                                 template,
@@ -771,6 +794,9 @@
 
         Each request might need different translations or other user
         preferences.
+
+        @param request: current request
+        @param formatter: the formatter instance to use
         """
         self.buffer = StringIO.StringIO()
         self.formatter = formatter
@@ -781,6 +807,15 @@
 
 
 def getSearchResults(request, query, hits, start, sort, estimated_hits):
+    """ Return a SearchResults object with the specified properties
+
+    @param request: current request
+    @param query: the search query object tree
+    @param hits: list of hits
+    @param start: position to start showing the hits
+    @param sort: sorting of the results, either 'weight' or 'page_name'
+    @param estimated_hits: if true, use this estimated hit count
+    """
     result_hits = []
     for wikiname, page, attachment, match in hits:
         if wikiname in (request.cfg.interwikiname, 'Self'): # a local match
--- a/MoinMoin/support/__init__.py	Sun Aug 27 14:48:06 2006 +0200
+++ b/MoinMoin/support/__init__.py	Sun Aug 27 15:29:01 2006 +0200
@@ -10,3 +10,16 @@
     @copyright: 2001-2004 by Jürgen Hermann <jh@web.de>
     @license: GNU GPL, see COPYING for details.
 """
+
+try:
+    sorted = sorted
+except NameError:
+    def sorted(l, *args, **kw):
+        l = l[:]
+        # py2.3 is a bit different
+        if 'cmp' in kw:
+            args = (kw['cmp'], )
+
+        l.sort(*args)
+        return l
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/support/parsedatetime/__init__.py	Sun Aug 27 15:29:01 2006 +0200
@@ -0,0 +1,17 @@
+version = '0.7'
+author  = 'Mike Taylor <http://code-bear.com>'
+license = """Copyright (c) 2004-2006 Mike Taylor, All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/support/parsedatetime/parsedatetime.py	Sun Aug 27 15:29:01 2006 +0200
@@ -0,0 +1,1285 @@
+#!/usr/bin/env python
+
+"""
+Parse human-readable date/time text.
+"""
+
+__license__ = """Copyright (c) 2004-2006 Mike Taylor, All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+__author__       = 'Mike Taylor <http://code-bear.com>'
+__contributors__ = ['Darshana Chhajed <mailto://darshana@osafoundation.org>',
+                   ]
+
+_debug = False
+
+
+import string, re, time
+import datetime, calendar, rfc822
+import parsedatetime_consts
+
+
+# Copied from feedparser.py
+# Universal Feedparser, Copyright (c) 2002-2006, Mark Pilgrim, All rights reserved.
+# Originally a def inside of _parse_date_w3dtf()
+def _extract_date(m):
+    year = int(m.group('year'))
+    if year < 100:
+        year = 100 * int(time.gmtime()[0] / 100) + int(year)
+    if year < 1000:
+        return 0, 0, 0
+    julian = m.group('julian')
+    if julian:
+        julian = int(julian)
+        month = julian / 30 + 1
+        day = julian % 30 + 1
+        jday = None
+        while jday != julian:
+            t = time.mktime((year, month, day, 0, 0, 0, 0, 0, 0))
+            jday = time.gmtime(t)[-2]
+            diff = abs(jday - julian)
+            if jday > julian:
+                if diff < day:
+                    day = day - diff
+                else:
+                    month = month - 1
+                    day = 31
+            elif jday < julian:
+                if day + diff < 28:
+                   day = day + diff
+                else:
+                    month = month + 1
+        return year, month, day
+    month = m.group('month')
+    day = 1
+    if month is None:
+        month = 1
+    else:
+        month = int(month)
+        day = m.group('day')
+        if day:
+            day = int(day)
+        else:
+            day = 1
+    return year, month, day
+
+# Copied from feedparser.py 
+# Universal Feedparser, Copyright (c) 2002-2006, Mark Pilgrim, All rights reserved.
+# Originally a def inside of _parse_date_w3dtf()
+def _extract_time(m):
+    if not m:
+        return 0, 0, 0
+    hours = m.group('hours')
+    if not hours:
+        return 0, 0, 0
+    hours = int(hours)
+    minutes = int(m.group('minutes'))
+    seconds = m.group('seconds')
+    if seconds:
+        seconds = int(seconds)
+    else:
+        seconds = 0
+    return hours, minutes, seconds
+
+
+# Copied from feedparser.py
+# Universal Feedparser, Copyright (c) 2002-2006, Mark Pilgrim, All rights reserved.
+# Modified to return a tuple instead of mktime
+#
+# Original comment:
+#       W3DTF-style date parsing adapted from PyXML xml.utils.iso8601, written by
+#       Drake and licensed under the Python license.  Removed all range checking
+#       for month, day, hour, minute, and second, since mktime will normalize
+#       these later
+def _parse_date_w3dtf(dateString):
+    # the __extract_date and __extract_time methods were
+    # copied-out so they could be used by my code --bear
+    def __extract_tzd(m):
+        '''Return the Time Zone Designator as an offset in seconds from UTC.'''
+        if not m:
+            return 0
+        tzd = m.group('tzd')
+        if not tzd:
+            return 0
+        if tzd == 'Z':
+            return 0
+        hours = int(m.group('tzdhours'))
+        minutes = m.group('tzdminutes')
+        if minutes:
+            minutes = int(minutes)
+        else:
+            minutes = 0
+        offset = (hours*60 + minutes) * 60
+        if tzd[0] == '+':
+            return -offset
+        return offset
+
+    __date_re = ('(?P<year>\d\d\d\d)'
+                 '(?:(?P<dsep>-|)'
+                 '(?:(?P<julian>\d\d\d)'
+                 '|(?P<month>\d\d)(?:(?P=dsep)(?P<day>\d\d))?))?')
+    __tzd_re = '(?P<tzd>[-+](?P<tzdhours>\d\d)(?::?(?P<tzdminutes>\d\d))|Z)'
+    __tzd_rx = re.compile(__tzd_re)
+    __time_re = ('(?P<hours>\d\d)(?P<tsep>:|)(?P<minutes>\d\d)'
+                 '(?:(?P=tsep)(?P<seconds>\d\d(?:[.,]\d+)?))?'
+                 + __tzd_re)
+    __datetime_re = '%s(?:T%s)?' % (__date_re, __time_re)
+    __datetime_rx = re.compile(__datetime_re)
+    m = __datetime_rx.match(dateString)
+    if (m is None) or (m.group() != dateString): return
+    return _extract_date(m) + _extract_time(m) + (0, 0, 0)
+
+
+# Copied from feedparser.py
+# Universal Feedparser, Copyright (c) 2002-2006, Mark Pilgrim, All rights reserved.
+# Modified to return a tuple instead of mktime
+#
+def _parse_date_rfc822(dateString):
+    '''Parse an RFC822, RFC1123, RFC2822, or asctime-style date'''
+    data = dateString.split()
+    if data[0][-1] in (',', '.') or data[0].lower() in rfc822._daynames:
+        del data[0]
+    if len(data) == 4:
+        s = data[3]
+        i = s.find('+')
+        if i > 0:
+            data[3:] = [s[:i], s[i+1:]]
+        else:
+            data.append('')
+        dateString = " ".join(data)
+    if len(data) < 5:
+        dateString += ' 00:00:00 GMT'
+    return rfc822.parsedate_tz(dateString)
+
+# rfc822.py defines several time zones, but we define some extra ones.
+# 'ET' is equivalent to 'EST', etc.
+_additional_timezones = {'AT': -400, 'ET': -500, 'CT': -600, 'MT': -700, 'PT': -800}
+rfc822._timezones.update(_additional_timezones)
+
+
+class Calendar:
+    """
+    A collection of routines to input, parse and manipulate date and times.
+    The text can either be 'normal' date values or it can be human readable.
+    """
+
+    def __init__(self, constants=None):
+        """
+        Default constructor for the Calendar class.
+
+        @type  constants: object
+        @param constants: Instance of the class L{CalendarConstants}
+
+        @rtype:  object
+        @return: Calendar instance
+        """
+          # if a constants reference is not included, use default
+        if constants is None:
+            self.ptc = parsedatetime_consts.Constants()
+        else:
+            self.ptc = constants
+
+        self.CRE_SPECIAL   = re.compile(self.ptc.RE_SPECIAL,   re.IGNORECASE)
+        self.CRE_UNITS     = re.compile(self.ptc.RE_UNITS,     re.IGNORECASE)
+        self.CRE_QUNITS    = re.compile(self.ptc.RE_QUNITS,    re.IGNORECASE)
+        self.CRE_MODIFIER  = re.compile(self.ptc.RE_MODIFIER,  re.IGNORECASE)
+        self.CRE_MODIFIER2 = re.compile(self.ptc.RE_MODIFIER2, re.IGNORECASE)
+        self.CRE_TIMEHMS   = re.compile(self.ptc.RE_TIMEHMS,   re.IGNORECASE)
+        self.CRE_TIMEHMS2  = re.compile(self.ptc.RE_TIMEHMS2,  re.IGNORECASE)
+        self.CRE_DATE      = re.compile(self.ptc.RE_DATE,      re.IGNORECASE)
+        self.CRE_DATE2     = re.compile(self.ptc.RE_DATE2,     re.IGNORECASE)
+        self.CRE_DATE3     = re.compile(self.ptc.RE_DATE3,     re.IGNORECASE)
+        self.CRE_MONTH     = re.compile(self.ptc.RE_MONTH,     re.IGNORECASE)
+        self.CRE_WEEKDAY   = re.compile(self.ptc.RE_WEEKDAY,   re.IGNORECASE)
+        self.CRE_DAY       = re.compile(self.ptc.RE_DAY,       re.IGNORECASE)
+        self.CRE_TIME      = re.compile(self.ptc.RE_TIME,      re.IGNORECASE)
+        self.CRE_REMAINING = re.compile(self.ptc.RE_REMAINING, re.IGNORECASE)
+
+        #regex for date/time ranges
+        self.CRE_RTIMEHMS  = re.compile(self.ptc.RE_RTIMEHMS,  re.IGNORECASE)
+        self.CRE_RTIMEHMS2 = re.compile(self.ptc.RE_RTIMEHMS2, re.IGNORECASE)
+        self.CRE_RDATE     = re.compile(self.ptc.RE_RDATE,     re.IGNORECASE)
+        self.CRE_RDATE3    = re.compile(self.ptc.RE_RDATE3,    re.IGNORECASE)
+
+        self.CRE_TIMERNG1  = re.compile(self.ptc.TIMERNG1, re.IGNORECASE)
+        self.CRE_TIMERNG2  = re.compile(self.ptc.TIMERNG2, re.IGNORECASE)
+        self.CRE_TIMERNG3  = re.compile(self.ptc.TIMERNG3, re.IGNORECASE)
+        self.CRE_DATERNG1  = re.compile(self.ptc.DATERNG1, re.IGNORECASE)
+        self.CRE_DATERNG2  = re.compile(self.ptc.DATERNG2, re.IGNORECASE)
+        self.CRE_DATERNG3  = re.compile(self.ptc.DATERNG3, re.IGNORECASE)
+
+        self.invalidFlag   = False  # Is set if the datetime string entered cannot be parsed at all
+        self.weekdyFlag    = False  # monday/tuesday/...
+        self.dateStdFlag   = False  # 07/21/06
+        self.dateStrFlag   = False  # July 21st, 2006
+        self.timeFlag      = False  # 5:50 
+        self.meridianFlag  = False  # am/pm
+        self.dayStrFlag    = False  # tomorrow/yesterday/today/..
+        self.timeStrFlag   = False  # lunch/noon/breakfast/...
+        self.modifierFlag  = False  # after/before/prev/next/..
+        self.modifier2Flag = False  # after/before/prev/next/..
+        self.unitsFlag     = False  # hrs/weeks/yrs/min/..
+        self.qunitsFlag    = False  # h/m/t/d..
+
+
+    def _convertUnitAsWords(self, unitText):
+        """
+        Converts text units into their number value
+
+        Five = 5
+        Twenty Five = 25
+        Two hundred twenty five = 225
+        Two thousand and twenty five = 2025
+        Two thousand twenty five = 2025
+
+        @type  unitText: string
+        @param unitText: number string
+
+        @rtype:  integer
+        @return: numerical value of unitText
+        """
+        # TODO: implement this
+        pass
+
+
+    def _buildTime(self, source, quantity, modifier, units):
+        """
+        Take quantity, modifier and unit strings and convert them into values.
+        Then calcuate the time and return the adjusted sourceTime
+
+        @type  source:   time
+        @param source:   time to use as the base (or source)
+        @type  quantity: string
+        @param quantity: quantity string
+        @type  modifier: string
+        @param modifier: how quantity and units modify the source time
+        @type  units:    string
+        @param units:    unit of the quantity (i.e. hours, days, months, etc)
+
+        @rtype:  timetuple
+        @return: timetuple of the calculated time
+        """
+        if _debug:
+            print '_buildTime: [%s][%s][%s]' % (quantity, modifier, units)
+
+        if source is None:
+            source = time.localtime()
+
+        if quantity is None:
+            quantity = ''
+        else:
+            quantity = string.strip(quantity)
+
+        if len(quantity) == 0:
+            qty = 1
+        else:
+            try:
+                qty = int(quantity)
+            except ValueError:
+                qty = 0
+
+        if modifier in self.ptc.Modifiers:
+            qty = qty * self.ptc.Modifiers[modifier]
+
+            if units is None or units == '':
+                units = 'dy'
+
+        # plurals are handled by regex's (could be a bug tho)
+
+        (yr, mth, dy, hr, mn, sec, wd, yd, isdst) = source
+
+        start  = datetime.datetime(yr, mth, dy, hr, mn, sec)
+        target = start
+
+        if units.startswith('y'):
+            target = self.inc(start, year=qty)
+        elif units.endswith('th') or units.endswith('ths'):
+            target = self.inc(start, month=qty)
+        else:
+            if units.startswith('d'):
+                target = start + datetime.timedelta(days=qty)
+            elif units.startswith('h'):
+                target = start + datetime.timedelta(hours=qty)
+            elif units.startswith('m'):
+                target = start + datetime.timedelta(minutes=qty)
+            elif units.startswith('s'):
+                target = start + datetime.timedelta(seconds=qty)
+            elif units.startswith('w'):
+                target = start + datetime.timedelta(weeks=qty)
+
+        if target != start:
+            self.invalidFlag = False
+
+        return target.timetuple()
+
+
+    def parseDate(self, dateString):
+        """
+        Parses strings like 05/28/200 or 04.21
+
+        @type  dateString: string
+        @param dateString: text to convert to a datetime
+
+        @rtype:  datetime
+        @return: calculated datetime value of dateString
+        """
+        yr, mth, dy, hr, mn, sec, wd, yd, isdst = time.localtime()
+
+        s = dateString
+        m = self.CRE_DATE2.search(s)
+        if m is not None:
+            index = m.start()
+            mth   = int(s[:index])
+            s     = s[index + 1:]
+
+        m = self.CRE_DATE2.search(s)
+        if m is not None:
+            index = m.start()
+            dy    = int(s[:index])
+            yr    = int(s[index + 1:])
+            # TODO should this have a birthday epoch constraint?
+            if yr < 99:
+                yr += 2000
+        else:
+            dy = int(string.strip(s))
+
+        if (mth > 0 and mth <= 12) and (dy > 0 and dy <= self.ptc.DaysInMonthList[mth - 1]):
+            sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
+        else:
+            self.invalidFlag = True
+            sourceTime       = time.localtime() #return current time if date string is invalid
+
+        return sourceTime
+
+
+    def parseDateText(self, dateString):
+        """
+        Parses strings like "May 31st, 2006" or "Jan 1st" or "July 2006"
+
+        @type  dateString: string
+        @param dateString: text to convert to a datetime
+
+        @rtype:  datetime
+        @return: calculated datetime value of dateString
+        """
+        yr, mth, dy, hr, mn, sec, wd, yd, isdst = time.localtime()
+
+        currentMth = mth
+        currentDy  = dy
+
+        s   = dateString.lower()
+        m   = self.CRE_DATE3.search(s)
+        mth = m.group('mthname')
+        mth = self.ptc.MonthOffsets[mth]
+
+        if m.group('day') !=  None:
+            dy = int(m.group('day'))
+        else:
+            dy = 1
+
+        if m.group('year') !=  None:
+            yr = int(m.group('year'))
+        elif (mth < currentMth) or (mth == currentMth and dy < currentDy):
+            # if that day and month have already passed in this year,
+            # then increment the year by 1
+            yr += 1
+
+        if dy > 0 and dy <= self.ptc.DaysInMonthList[mth - 1]:
+            sourceTime = (yr, mth, dy, 9, 0, 0, wd, yd, isdst)
+        else:
+              # Return current time if date string is invalid
+            self.invalidFlag = True
+            sourceTime       = time.localtime()
+
+        return sourceTime
+
+
+    def evalRanges(self, datetimeString, sourceTime=None):
+        """
+        Evaluates the strings with time or date ranges
+
+        @type  datetimeString: string
+        @param datetimeString: datetime text to evaluate
+        @type  sourceTime:     datetime
+        @param sourceTime:     datetime value to use as the base
+
+        @rtype:  tuple
+        @return: tuple of the start datetime, end datetime and the invalid flag
+        """
+        startTime = ''
+        endTime   = ''
+        startDate = ''
+        endDate   = ''
+        rangeFlag = 0
+
+        s = string.strip(datetimeString.lower())
+
+        m = self.CRE_TIMERNG1.search(s)
+        if m is not None:
+            rangeFlag = 1
+        else:
+            m = self.CRE_TIMERNG2.search(s)
+            if m is not None:
+                rangeFlag = 2
+            else:
+                m = self.CRE_TIMERNG3.search(s)
+                if m is not None:
+                    rangeFlag = 3
+                else:
+                    m = self.CRE_DATERNG1.search(s)
+                    if m is not None:
+                        rangeFlag = 4
+                    else:
+                        m = self.CRE_DATERNG2.search(s)
+                        if m is not None:
+                            rangeFlag = 5
+                        else:
+                            m = self.CRE_DATERNG3.search(s)
+                            if m is not None:
+                                rangeFlag = 6
+
+        if _debug:
+            print 'evalRanges: rangeFlag =', rangeFlag, '[%s]' % s
+
+        if m is not None:
+            if (m.group() != s):
+                # capture remaining string
+                parseStr = m.group()
+                chunk1   = s[:m.start()]
+                chunk2   = s[m.end():]
+                s        = '%s %s' % (chunk1, chunk2)
+                flag     = 1
+
+                sourceTime, flag = self.parse(s, sourceTime)
+
+                if flag == True:
+                    sourceTime = None
+            else:
+                parseStr = s
+
+        if rangeFlag == 1:
+            # FIXME hardcoded seperator
+            m                = re.search('-', parseStr)
+            startTime, sflag = self.parse((parseStr[:m.start()]),       sourceTime)
+            endTime, eflag   = self.parse((parseStr[(m.start() + 1):]), sourceTime)
+
+            if eflag is False and sflag is False:
+                return (startTime, endTime, False)
+
+        elif rangeFlag == 2:
+            # FIXME hardcoded seperator
+            m                = re.search('-', parseStr)
+            startTime, sflag = self.parse((parseStr[:m.start()]), sourceTime)
+            endTime, eflag   = self.parse((parseStr[(m.start() + 1):]), sourceTime)
+
+            if eflag is False and sflag is False:
+                return (startTime, endTime, False)
+
+        elif rangeFlag == 3:
+            # FIXME hardcoded seperator
+            m = re.search('-', parseStr)
+
+            # capturing the meridian from the end time
+            # FIXME hardcoded meridian
+            if self.ptc.usesMeridian:
+                ampm = re.search('a', parseStr)
+
+                # appending the meridian to the start time
+                if ampm is not None:
+                    startTime, sflag = self.parse((parseStr[:m.start()] + self.ptc.meridian[0]), sourceTime)
+                else:
+                    startTime, sflag = self.parse((parseStr[:m.start()] + self.ptc.meridian[1]), sourceTime)
+            else:
+                startTime, sflag = self.parse((parseStr[:m.start()]), sourceTime)
+
+            endTime, eflag = self.parse(parseStr[(m.start() + 1):], sourceTime)
+
+            if eflag is False and sflag is False:
+                return (startTime, endTime, False)
+
+        elif rangeFlag == 4:
+            # FIXME hardcoded seperator
+            m                = re.search('-', parseStr)
+            startDate, sflag = self.parse((parseStr[:m.start()]),       sourceTime)
+            endDate, eflag   = self.parse((parseStr[(m.start() + 1):]), sourceTime)
+
+            if eflag is False and sflag is False:
+                return (startDate, endDate, False)
+
+        elif rangeFlag == 5:
+            # FIXME hardcoded seperator
+            m       = re.search('-', parseStr)
+            endDate = parseStr[(m.start() + 1):]
+
+            # capturing the year from the end date
+            date    = self.CRE_DATE3.search(endDate)
+            endYear = date.group('year')
+
+            # appending the year to the start date if the start date
+            # does not have year information and the end date does.
+            # eg : "Aug 21 - Sep 4, 2007"
+            if endYear is not None:
+                startDate = parseStr[:m.start()]
+                date      = self.CRE_DATE3.search(startDate)
+                startYear = date.group('year')
+
+                if startYear is None:
+                    startDate += endYear
+            else:
+                startDate = parseStr[:m.start()]
+
+            startDate, sflag = self.parse(startDate, sourceTime)
+            endDate, eflag   = self.parse(endDate, sourceTime)
+
+            if eflag is False and sflag is False:
+                return (startDate, endDate, False)
+
+        elif rangeFlag == 6:
+            # FIXME hardcoded seperator
+            m = re.search('-', parseStr)
+
+            startDate = parseStr[:m.start()]
+
+            # capturing the month from the start date
+            mth = self.CRE_DATE3.search(startDate)
+            mth = mth.group('mthname')
+
+            # appending the month name to the end date
+            endDate = mth + parseStr[(m.start() + 1):]
+
+            startDate, sflag = self.parse(startDate, sourceTime)
+            endDate, eflag   = self.parse(endDate, sourceTime)
+
+            if eflag is False and sflag is False:
+                return (startDate, endDate, False)
+        else:
+            # if range is not found
+            sourceTime = time.localtime()
+
+            return (sourceTime, sourceTime, True)
+
+
+    def _evalModifier(self, modifier, chunk1, chunk2, sourceTime):
+        """
+        Evaluate the modifier string and following text (passed in
+        as chunk1 and chunk2) and if they match any known modifiers
+        calculate the delta and apply it to sourceTime
+
+        @type  modifier:   string
+        @param modifier:   modifier text to apply to sourceTime
+        @type  chunk1:     string
+        @param chunk1:     first text chunk that followed modifier (if any)
+        @type  chunk2:     string
+        @param chunk2:     second text chunk that followed modifier (if any)
+        @type  sourceTime: datetime
+        @param sourceTime: datetime value to use as the base
+
+        @rtype:  tuple
+        @return: tuple of any remaining text and the modified sourceTime
+        """
+        offset = self.ptc.Modifiers[modifier]
+
+        if sourceTime is not None:
+            (yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
+        else:
+            (yr, mth, dy, hr, mn, sec, wd, yd, isdst) = time.localtime()
+
+        # capture the units after the modifier and the remaining string after the unit
+        m = self.CRE_REMAINING.search(chunk2)
+        if m is not None:
+            index  = m.start() + 1
+            unit   = chunk2[:m.start()]
+            chunk2 = chunk2[index:]
+        else:
+            unit   = chunk2
+            chunk2 = ''
+
+        flag = False
+
+        if unit == 'month' or \
+           unit == 'mth':
+            if offset == 0:
+                dy         = self.ptc.DaysInMonthList[mth - 1]
+                sourceTime = (yr, mth, dy, 9, 0, 0, wd, yd, isdst)
+            elif offset == 2:
+                # if day is the last day of the month, calculate the last day of the next month
+                if dy == self.ptc.DaysInMonthList[mth - 1]:
+                    dy = self.ptc.DaysInMonthList[mth]
+
+                start      = datetime.datetime(yr, mth, dy, 9, 0, 0)
+                target     = self.inc(start, month=1)
+                sourceTime = target.timetuple()
+            else:
+                start      = datetime.datetime(yr, mth, 1, 9, 0, 0)
+                target     = self.inc(start, month=offset)
+                sourceTime = target.timetuple()
+
+            flag = True
+
+        if unit == 'week' or \
+             unit == 'wk' or \
+             unit == 'w':
+            if offset == 0:
+                start      = datetime.datetime(yr, mth, dy, 17, 0, 0)
+                target     = start + datetime.timedelta(days=(4 - wd))
+                sourceTime = target.timetuple()
+            elif offset == 2:
+                start      = datetime.datetime(yr, mth, dy, 9, 0, 0)
+                target     = start + datetime.timedelta(days=7)
+                sourceTime = target.timetuple()
+            else:
+                return self._evalModifier(modifier, chunk1, "monday " + chunk2, sourceTime)
+
+            flag = True
+
+        if unit == 'day' or \
+            unit == 'dy' or \
+            unit == 'd':
+            if offset == 0:
+                sourceTime = (yr, mth, dy, 17, 0, 0, wd, yd, isdst)
+            elif offset == 2:
+                start      = datetime.datetime(yr, mth, dy, hr, mn, sec)
+                target     = start + datetime.timedelta(days=1)
+                sourceTime = target.timetuple()
+            else:
+                start      = datetime.datetime(yr, mth, dy, 9, 0, 0)
+                target     = start + datetime.timedelta(days=offset)
+                sourceTime = target.timetuple()
+
+            flag = True
+
+        if unit == 'hour' or \
+           unit == 'hr':
+            if offset == 0:
+                sourceTime = (yr, mth, dy, hr, 0, 0, wd, yd, isdst)
+            else:
+                start      = datetime.datetime(yr, mth, dy, hr, 0, 0)
+                target     = start + datetime.timedelta(hours=offset)
+                sourceTime = target.timetuple()
+
+            flag = True
+
+        if unit == 'year' or \
+             unit == 'yr' or \
+             unit == 'y':
+            if offset == 0:
+                sourceTime = (yr, 12, 31, hr, mn, sec, wd, yd, isdst)
+            elif offset == 2:
+                sourceTime = (yr + 1, mth, dy, hr, mn, sec, wd, yd, isdst)
+            else:
+                sourceTime = (yr + offset, 1, 1, 9, 0, 0, wd, yd, isdst)
+
+            flag = True
+
+        if flag == False:
+            m = self.CRE_WEEKDAY.match(unit)
+            if m is not None:
+                wkdy = m.group()
+                wkdy = self.ptc.WeekdayOffsets[wkdy]
+
+                if offset == 0:
+                    diff       = wkdy - wd
+                    start      = datetime.datetime(yr, mth, dy, 9, 0, 0)
+                    target     = start + datetime.timedelta(days=diff)
+                    sourceTime = target.timetuple()
+                else:
+                    diff       = wkdy - wd
+                    start      = datetime.datetime(yr, mth, dy, 9, 0, 0)
+                    target     = start + datetime.timedelta(days=diff + 7 * offset)
+                    sourceTime = target.timetuple()
+
+                flag = True
+
+        if not flag:
+            m = self.CRE_TIME.match(unit)
+            if m is not None:
+                (yr, mth, dy, hr, mn, sec, wd, yd, isdst), self.invalidFlag = self.parse(unit)
+                start      = datetime.datetime(yr, mth, dy, hr, mn, sec)
+                target     = start + datetime.timedelta(days=offset)
+                sourceTime = target.timetuple()
+
+                flag              = True
+                self.modifierFlag = False
+
+        # if the word after next is a number, the string is likely
+        # to be something like "next 4 hrs" for which we have to
+        # combine the units with the rest of the string
+        if not flag:
+            if offset < 0:
+                # if offset is negative, the unit has to be made negative
+                unit = '-%s' % unit
+
+            chunk2 = '%s %s' % (unit, chunk2)
+
+        self.modifierFlag = False
+
+        return '%s %s' % (chunk1, chunk2), sourceTime
+
+
+    def _evalModifier2(self, modifier, chunk1 , chunk2, sourceTime):
+        """
+        Evaluate the modifier string and following text (passed in
+        as chunk1 and chunk2) and if they match any known modifiers
+        calculate the delta and apply it to sourceTime
+
+        @type  modifier:   string
+        @param modifier:   modifier text to apply to sourceTime
+        @type  chunk1:     string
+        @param chunk1:     first text chunk that followed modifier (if any)
+        @type  chunk2:     string
+        @param chunk2:     second text chunk that followed modifier (if any)
+        @type  sourceTime: datetime
+        @param sourceTime: datetime value to use as the base
+
+        @rtype:  tuple
+        @return: tuple of any remaining text and the modified sourceTime
+        """
+        offset = self.ptc.Modifiers[modifier]
+        digit  = r'\d+'
+
+        if sourceTime is not None:
+            (yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
+        else:
+            (yr, mth, dy, hr, mn, sec, wd, yd, isdst) = time.localtime()
+
+        self.modifier2Flag = False
+
+        # If the string after the negative modifier starts with
+        # digits, then it is likely that the string is similar to
+        # " before 3 days" or 'evening prior to 3 days'.
+        # In this case, the total time is calculated by subtracting
+        # '3 days' from the current date.
+        # So, we have to identify the quantity and negate it before
+        # parsing the string.
+        # This is not required for strings not starting with digits
+        # since the string is enough to calculate the sourceTime
+        if offset < 0:
+            m = re.match(digit, string.strip(chunk2))
+            if m is not None:
+                qty    = int(m.group()) * -1
+                chunk2 = chunk2[m.end():]
+                chunk2 = '%d%s' % (qty, chunk2)
+
+        sourceTime, flag = self.parse(chunk2, sourceTime)
+
+        if chunk1 != '':
+            if offset < 0:
+                m = re.match(digit, string.strip(chunk1))
+                if m is not None:
+                    qty    = int(m.group()) * -1
+                    chunk1 = chunk1[m.end():]
+                    chunk1 = '%d%s' % (qty, chunk1)
+
+            sourceTime, flag = self.parse(chunk1, sourceTime)
+
+        return '', sourceTime
+
+
+    def _evalString(self, datetimeString, sourceTime=None):
+        """
+        Calculate the datetime based on flags set by the L{parse()} routine
+
+        Examples handled::
+            RFC822, W3CDTF formatted dates
+            HH:MM[:SS][ am/pm]
+            MM/DD/YYYY
+            DD MMMM YYYY
+
+        @type  datetimeString: string
+        @param datetimeString: text to try and parse as more "traditional" date/time text
+        @type  sourceTime:     datetime
+        @param sourceTime:     datetime value to use as the base
+
+        @rtype:  datetime
+        @return: calculated datetime value or current datetime if not parsed
+        """
+        s   = string.strip(datetimeString)
+        now = time.localtime()
+
+          # Given string date is a RFC822 date
+        if sourceTime is None:
+            sourceTime = _parse_date_rfc822(s)
+
+          # Given string date is a W3CDTF date
+        if sourceTime is None:
+            sourceTime = _parse_date_w3dtf(s)
+
+        if sourceTime is None:
+            s = s.lower()
+
+          # Given string is in the format HH:MM(:SS)(am/pm)
+        if self.meridianFlag:
+            if sourceTime is None:
+                (yr, mth, dy, hr, mn, sec, wd, yd, isdst) = now
+            else:
+                (yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
+
+            m = self.CRE_TIMEHMS2.search(s)
+            if m is not None:
+                dt = s[:m.start('meridian')].strip()
+                if len(dt) <= 2:
+                    hr  = int(dt)
+                    mn  = 0
+                    sec = 0
+                else:
+                    hr, mn, sec = _extract_time(m)
+
+                if hr == 24:
+                    hr = 0
+
+                sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
+                meridian   = m.group('meridian').lower()
+
+                  # if 'am' found and hour is 12 - force hour to 0 (midnight)
+                if (meridian in self.ptc.am) and hr == 12:
+                    sourceTime = (yr, mth, dy, 0, mn, sec, wd, yd, isdst)
+
+                  # if 'pm' found and hour < 12, add 12 to shift to evening
+                if (meridian in self.ptc.pm) and hr < 12:
+                    sourceTime = (yr, mth, dy, hr + 12, mn, sec, wd, yd, isdst)
+
+              # invalid time
+            if hr > 24 or mn > 59 or sec > 59:
+                sourceTime       = now
+                self.invalidFlag = True
+
+            self.meridianFlag = False
+
+          # Given string is in the format HH:MM(:SS)
+        if self.timeFlag:
+            if sourceTime is None:
+                (yr, mth, dy, hr, mn, sec, wd, yd, isdst) = now
+            else:
+                (yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
+
+            m = self.CRE_TIMEHMS.search(s)
+            if m is not None:
+                hr, mn, sec = _extract_time(m)
+            if hr == 24:
+                hr = 0
+
+            if hr > 24 or mn > 59 or sec > 59:
+                # invalid time
+                sourceTime       = now
+                self.invalidFlag = True
+            else:
+                sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
+
+            self.timeFlag = False
+
+          # Given string is in the format 07/21/2006
+        if self.dateStdFlag:
+            sourceTime       = self.parseDate(s)
+            self.dateStdFlag = False
+
+          # Given string is in the format  "May 23rd, 2005"
+        if self.dateStrFlag:
+            sourceTime       = self.parseDateText(s)
+            self.dateStrFlag = False
+
+          # Given string is a weekday
+        if self.weekdyFlag:
+            (yr, mth, dy, hr, mn, sec, wd, yd, isdst) = now
+
+            start = datetime.datetime(yr, mth, dy, hr, mn, sec)
+            wkDy  = self.ptc.WeekdayOffsets[s]
+
+            if wkDy > wd:
+                qty    = wkDy - wd
+                target = start + datetime.timedelta(days=qty)
+                wd     = wkDy
+            else:
+                qty    = 6 - wd + wkDy + 1
+                target = start + datetime.timedelta(days=qty)
+                wd     = wkDy
+
+            sourceTime      = target.timetuple()
+            self.weekdyFlag = False
+
+          # Given string is a natural language time string like lunch, midnight, etc
+        if self.timeStrFlag:
+            if s in self.ptc.re_values['now']:
+                sourceTime = now
+            else:
+                sources = self.ptc.buildSources(now)
+
+                if s in sources:
+                    sourceTime = sources[s]
+                else:
+                    sourceTime       = now
+                    self.invalidFlag = True
+
+            self.timeStrFlag = False
+
+           # Given string is a natural language date string like today, tomorrow..
+        if self.dayStrFlag:
+            if sourceTime is None:
+                sourceTime = now
+
+            (yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
+
+            if s in self.ptc.dayOffsets:
+                offset = self.ptc.dayOffsets[s]
+            else:
+                offset = 0
+
+            start      = datetime.datetime(yr, mth, dy, 9, 0, 0)
+            target     = start + datetime.timedelta(days=offset)
+            sourceTime = target.timetuple()
+
+            self.dayStrFlag = False
+
+          # Given string is a time string with units like "5 hrs 30 min"
+        if self.unitsFlag:
+            modifier = ''  # TODO
+
+            if sourceTime is None:
+                sourceTime = now
+
+            m = self.CRE_UNITS.search(s)
+            if m is not None:
+                units    = m.group('units')
+                quantity = s[:m.start('units')]
+
+            sourceTime     = self._buildTime(sourceTime, quantity, modifier, units)
+            self.unitsFlag = False
+
+          # Given string is a time string with single char units like "5 h 30 m"
+        if self.qunitsFlag:
+            modifier = ''  # TODO
+
+            if sourceTime is None:
+                sourceTime = now
+
+            m = self.CRE_QUNITS.search(s)
+            if m is not None:
+                units    = m.group('qunits')
+                quantity = s[:m.start('qunits')]
+
+            sourceTime      = self._buildTime(sourceTime, quantity, modifier, units)
+            self.qunitsFlag = False
+
+          # Given string does not match anything
+        if sourceTime is None:
+            sourceTime       = now
+            self.invalidFlag = True
+
+        return sourceTime
+
+
+    def parse(self, datetimeString, sourceTime=None):
+        """
+        Splits the L{datetimeString} into tokens, finds the regex patters
+        that match and then calculates a datetime value from the chunks
+
+        if L{sourceTime} is given then the datetime value will be calcualted
+        from that datetime, otherwise from the current datetime.
+
+        @type  datetimeString: string
+        @param datetimeString: datetime text to evaluate
+        @type  sourceTime:     datetime
+        @param sourceTime:     datetime value to use as the base
+
+        @rtype:  tuple
+        @return: tuple of any remaining text and the modified sourceTime
+        """
+        s         = string.strip(datetimeString.lower())
+        dateStr   = ''
+        parseStr  = ''
+        totalTime = sourceTime
+
+        self.invalidFlag = False
+
+        if s == '' :
+            if sourceTime is not None:
+                return (sourceTime, False)
+            else:
+                return (time.localtime(), True)
+
+        while len(s) > 0:
+            flag   = False
+            chunk1 = ''
+            chunk2 = ''
+
+            if _debug:
+                print 'parse (top of loop): [%s][%s]' % (s, parseStr)
+
+            if parseStr == '':
+                # Modifier like next\prev..
+                m = self.CRE_MODIFIER.search(s)
+                if m is not None:
+                    self.modifierFlag = True
+                    if (m.group('modifier') != s):
+                        # capture remaining string
+                        parseStr = m.group('modifier')
+                        chunk1   = string.strip(s[:m.start('modifier')])
+                        chunk2   = string.strip(s[m.end('modifier'):])
+                        flag     = True
+                    else:
+                        parseStr = s
+
+            if parseStr == '':
+                # Modifier like from\after\prior..
+                m = self.CRE_MODIFIER2.search(s)
+                if m is not None:
+                    self.modifier2Flag = True
+                    if (m.group('modifier') != s):
+                        # capture remaining string
+                        parseStr = m.group('modifier')
+                        chunk1   = string.strip(s[:m.start('modifier')])
+                        chunk2   = string.strip(s[m.end('modifier'):])
+                        flag     = True
+                    else:
+                        parseStr = s
+
+            if parseStr == '':
+                # String date format
+                m = self.CRE_DATE3.search(s)
+                if m is not None:
+                    self.dateStrFlag = True
+                    if (m.group('date') != s):
+                        # capture remaining string
+                        parseStr = m.group('date')
+                        chunk1   = s[:m.start('date')]
+                        chunk2   = s[m.end('date'):]
+                        s        = '%s %s' % (chunk1, chunk2)
+                        flag     = True
+                    else:
+                        parseStr = s
+
+            if parseStr == '':
+                # Standard date format
+                m = self.CRE_DATE.search(s)
+                if m is not None:
+                    self.dateStdFlag = True
+                    if (m.group('date') != s):
+                        # capture remaining string
+                        parseStr = m.group('date')
+                        chunk1   = s[:m.start('date')]
+                        chunk2   = s[m.end('date'):]
+                        s        = '%s %s' % (chunk1, chunk2)
+                        flag     = True
+                    else:
+                        parseStr = s
+
+            if parseStr == '':
+                # Natural language day strings
+                m = self.CRE_DAY.search(s)
+                if m is not None:
+                    self.dayStrFlag = True
+                    if (m.group('day') != s):
+                        # capture remaining string
+                        parseStr = m.group('day')
+                        chunk1   = s[:m.start('day')]
+                        chunk2   = s[m.end('day'):]
+                        s        = '%s %s' % (chunk1, chunk2)
+                        flag     = True
+                    else:
+                        parseStr = s
+
+            if parseStr == '':
+                # Quantity + Units
+                m = self.CRE_UNITS.search(s)
+                if m is not None:
+                    self.unitsFlag = True
+                    if (m.group('qty') != s):
+                        # capture remaining string
+                        parseStr = m.group('qty')
+                        chunk1   = s[:m.start('qty')].strip()
+                        chunk2   = s[m.end('qty'):].strip()
+
+                        if chunk1[-1:] == '-':
+                            parseStr = '-%s' % parseStr
+                            chunk1   = chunk1[:-1]
+
+                        s    = '%s %s' % (chunk1, chunk2)
+                        flag = True
+                    else:
+                        parseStr = s
+
+            if parseStr == '':
+                # Quantity + Units
+                m = self.CRE_QUNITS.search(s)
+                if m is not None:
+                    self.qunitsFlag = True
+                    if (m.group('qty') != s):
+                        # capture remaining string
+                        parseStr = m.group('qty')
+                        chunk1   = s[:m.start('qty')].strip()
+                        chunk2   = s[m.end('qty'):].strip()
+
+                        if chunk1[-1:] == '-':
+                            parseStr = '-%s' % parseStr
+                            chunk1   = chunk1[:-1]
+
+                        s    = '%s %s' % (chunk1, chunk2)
+                        flag = True
+                    else:
+                        parseStr = s 
+
+            if parseStr == '':
+                # Weekday
+                m = self.CRE_WEEKDAY.search(s)
+                if m is not None:
+                    self.weekdyFlag = True
+                    if (m.group('weekday') != s):
+                        # capture remaining string
+                        parseStr = m.group()
+                        chunk1   = s[:m.start('weekday')]
+                        chunk2   = s[m.end('weekday'):]
+                        s        = '%s %s' % (chunk1, chunk2)
+                        flag     = True
+                    else:
+                        parseStr = s
+
+            if parseStr == '':
+                # Natural language time strings
+                m = self.CRE_TIME.search(s)
+                if m is not None:
+                    self.timeStrFlag = True
+                    if (m.group('time') != s):
+                        # capture remaining string
+                        parseStr = m.group('time')
+                        chunk1   = s[:m.start('time')]
+                        chunk2   = s[m.end('time'):]
+                        s        = '%s %s' % (chunk1, chunk2)
+                        flag     = True
+                    else:
+                        parseStr = s
+
+            if parseStr == '':
+                # HH:MM(:SS) am/pm time strings
+                m = self.CRE_TIMEHMS2.search(s)
+                if m is not None:
+                    self.meridianFlag = True
+                    if m.group('minutes') is not None:
+                        if m.group('seconds') is not None:
+                            parseStr = '%s:%s:%s %s' % (m.group('hours'), m.group('minutes'), m.group('seconds'), m.group('meridian'))
+                        else:
+                            parseStr = '%s:%s %s' % (m.group('hours'), m.group('minutes'), m.group('meridian'))
+                    else:
+                        parseStr = '%s %s' % (m.group('hours'), m.group('meridian'))
+
+                    chunk1 = s[:m.start('hours')]
+                    chunk2 = s[m.end('meridian'):]
+
+                    s    = '%s %s' % (chunk1, chunk2)
+                    flag = True
+
+            if parseStr == '':
+                # HH:MM(:SS) time strings
+                m = self.CRE_TIMEHMS.search(s)
+                if m is not None:
+                    self.timeFlag = True
+                    if m.group('seconds') is not None:
+                        parseStr = '%s:%s:%s' % (m.group('hours'), m.group('minutes'), m.group('seconds'))
+                        chunk1   = s[:m.start('hours')]
+                        chunk2   = s[m.end('seconds'):]
+                    else:
+                        parseStr = '%s:%s' % (m.group('hours'), m.group('minutes'))
+                        chunk1   = s[:m.start('hours')]
+                        chunk2   = s[m.end('minutes'):]
+
+                    s    = '%s %s' % (chunk1, chunk2)
+                    flag = True
+
+            # if string does not match any regex, empty string to come out of the while loop
+            if not flag:
+                s = ''
+
+            if _debug:
+                print 'parse (bottom) [%s][%s][%s][%s]' % (s, parseStr, chunk1, chunk2)
+                print 'invalid %s, weekday %s, dateStd %s, dateStr %s, time %s, timeStr %s, meridian %s' % \
+                       (self.invalidFlag, self.weekdyFlag, self.dateStdFlag, self.dateStrFlag, self.timeFlag, self.timeStrFlag, self.meridianFlag)
+                print 'dayStr %s, modifier %s, modifier2 %s, units %s, qunits %s' % \
+                       (self.dayStrFlag, self.modifierFlag, self.modifier2Flag, self.unitsFlag, self.qunitsFlag)
+
+            # evaluate the matched string
+            if parseStr != '':
+                if self.modifierFlag == True:
+                    t, totalTime = self._evalModifier(parseStr, chunk1, chunk2, totalTime)
+
+                    return self.parse(t, totalTime)
+
+                elif self.modifier2Flag == True:
+                    s, totalTime = self._evalModifier2(parseStr, chunk1, chunk2, totalTime)
+                else:
+                    totalTime = self._evalString(parseStr, totalTime)
+                    parseStr  = ''
+
+        # String is not parsed at all
+        if totalTime is None or totalTime == sourceTime:
+            totalTime        = time.localtime()
+            self.invalidFlag = True
+
+        return (totalTime, self.invalidFlag)
+
+
+    def inc(self, source, month=None, year=None):
+        """
+        Takes the given date, or current date if none is passed, and
+        increments it according to the values passed in by month
+        and/or year.
+
+        This routine is needed because the timedelta() routine does
+        not allow for month or year increments.
+
+        @type  source: datetime
+        @param source: datetime value to increment
+        @type  month:  integer
+        @param month:  optional number of months to increment
+        @type  year:   integer
+        @param year:   optional number of years to increment
+
+        @rtype:  datetime
+        @return: L{source} incremented by the number of months and/or years
+        """
+        yr  = source.year
+        mth = source.month
+
+        if year:
+            try:
+                yi = int(year)
+            except ValueError:
+                yi = 0
+
+            yr += yi
+
+        if month:
+            try:
+                mi = int(month)
+            except ValueError:
+                mi = 0
+
+            m = abs(mi)
+            y = m / 12      # how many years are in month increment
+            m = m % 12      # get remaining months
+
+            if mi < 0:
+                mth = mth - m           # sub months from start month
+                if mth < 1:             # cross start-of-year?
+                    y   -= 1            #   yes - decrement year
+                    mth += 12           #         and fix month
+            else:
+                mth = mth + m           # add months to start month
+                if mth > 12:            # cross end-of-year?
+                    y   += 1            #   yes - increment year
+                    mth -= 12           #         and fix month
+
+            yr += y
+
+        d = source.replace(year=yr, month=mth)
+
+        return source + (d - source)
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/support/parsedatetime/parsedatetime_consts.py	Sun Aug 27 15:29:01 2006 +0200
@@ -0,0 +1,577 @@
+#!/usr/bin/env python
+
+"""
+The Constants class defines all constants used by parsedatetime.py.
+"""
+
+__license__ = """Copyright (c) 2004-2006 Mike Taylor, All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+__author__       = 'Mike Taylor <http://code-bear.com>'
+__contributors__ = [ 'Darshana Chhajed <mailto://darshana@osafoundation.org>',
+                   ]
+
+
+try:
+    import PyICU as pyicu
+except:
+    pyicu = None
+
+
+import string
+import datetime, time
+
+
+class pdtLocale_en:
+    """
+    en_US Locale constants
+
+    This class will be used to initialize C{Constants} if PyICU is not located.
+
+    Defined as class variables are the lists and strings needed by parsedatetime
+    to evaluate strings in English (US)
+    """
+
+    localeID      = 'en_US'   # don't use a unicode string
+    dateSep       = u'/'
+    timeSep       = u':'
+    meridian      = [ u'AM', u'PM' ]
+    usesMeridian  = True
+    uses24        = False
+
+    Weekdays      = [ u'sunday', u'monday', u'tuesday',
+                      u'wednesday', u'thursday', u'friday', u'saturday',
+                    ]
+    shortWeekdays = [ u'sun', u'mon', u'tues',
+                      u'wed', u'thu', u'fri', u'sat',
+                    ]
+    Months        = [ u'january', u'february', u'march',
+                      u'april',   u'may',      u'june',
+                      u'july',    u'august',   u'september',
+                      u'october', u'november', u'december',
+                    ]
+    shortMonths   = [ u'jan', u'feb', u'mar',
+                      u'apr', u'may', u'jun',
+                      u'jul', u'aug', u'sep',
+                      u'oct', u'nov', u'dec',
+                    ]
+    dateFormats   = { 'full':   'EEEE, MMMM d, yyyy',
+                      'long':   'MMMM d, yyyy',
+                      'medium': 'MMM d, yyyy',
+                      'short':  'M/d/yy',
+                    }
+    timeFormats   = { 'full':   'h:mm:ss a z',
+                      'long':   'h:mm:ss a z',
+                      'medium': 'h:mm:ss a',
+                      'short':  'h:mm a',
+                    }
+
+      # this will be added to re_consts later
+    units = { 'seconds': [ 'second', 'sec' ],
+              'minutes': [ 'minute', 'min' ],
+              'hours':   [ 'hour',   'hr'  ],
+              'days':    [ 'day',    'dy'  ],
+              'weeks':   [ 'week',   'wk'  ],
+              'months':  [ 'month',  'mth' ],
+              'years':   [ 'year',   'yr'  ],
+            }
+
+      # text constants to be used by regex's later
+    re_consts     = { 'specials':      'in|on|of|at',
+                      'timeseperator': ':',
+                      'daysuffix':     'rd|st|nd|th',
+                      'meridian':      'am|pm|a.m.|p.m.|a|p',
+                      'qunits':        'h|m|s|d|w|m|y',
+                      'now':           [ 'now' ],
+                    }
+
+      # Used to adjust the returned date before/after the source
+    modifiers = { 'from':       1,
+                  'before':    -1,
+                  'after':      1,
+                  'ago':        1,
+                  'prior':     -1,
+                  'prev':      -1,
+                  'last':      -1,
+                  'next':       1,
+                  'this':       0,
+                  'previous':  -1,
+                  'in a':       2,
+                  'end of':     0,
+                  'eo':         0,
+                }
+
+    dayoffsets = { 'tomorrow':   1,
+                   'today':      0,
+                   'yesterday': -1,
+                 }
+
+      # special day and/or times, i.e. lunch, noon, evening
+      # each element in the dictionary is a dictionary that is used
+      # to fill in any value to be replace - the current date/time will
+      # already have been populated by the method buildSources
+    re_sources    = { 'noon':      { 'hr': 12, 'mn': 0, 'sec': 0 },
+                      'lunch':     { 'hr': 12, 'mn': 0, 'sec': 0 },
+                      'morning':   { 'hr':  6, 'mn': 0, 'sec': 0 },
+                      'breakfast': { 'hr':  8, 'mn': 0, 'sec': 0 },
+                      'dinner':    { 'hr': 19, 'mn': 0, 'sec': 0 },
+                      'evening':   { 'hr': 18, 'mn': 0, 'sec': 0 },
+                      'midnight':  { 'hr':  0, 'mn': 0, 'sec': 0 },
+                      'night':     { 'hr': 21, 'mn': 0, 'sec': 0 },
+                      'tonight':   { 'hr': 21, 'mn': 0, 'sec': 0 },
+                    }
+
+
+class pdtLocale_es:
+    """
+    es Locale constants
+
+    This class will be used to initialize C{Constants} if PyICU is not located.
+
+    Defined as class variables are the lists and strings needed by parsedatetime
+    to evaluate strings in Spanish
+
+    Note that I don't speak Spanish so many of the items below are still in English
+    """
+
+    localeID      = 'es'   # don't use a unicode string
+    dateSep       = u'/'
+    timeSep       = u':'
+    meridian      = []
+    usesMeridian  = False
+    uses24        = True
+
+    Weekdays      = [ u'domingo', u'lunes', u'martes',
+                      u'mi\xe9rcoles', u'jueves', u'viernes', u's\xe1bado',
+                    ]
+    shortWeekdays = [ 'dom', u'lun', u'mar',
+                      u'mi\xe9', u'jue', u'vie', u's\xe1b',
+                    ]
+    Months        = [ u'enero', u'febrero', u'marzo',
+                      u'abril', u'mayo', u'junio',
+                      u'julio', u'agosto', u'septiembre',
+                      u'octubre', u'noviembre', u'diciembre'
+                    ]
+    shortMonths   = [ u'ene', u'feb', u'mar',
+                      u'abr', u'may', u'jun',
+                      u'jul', u'ago', u'sep',
+                      u'oct', u'nov', u'dic'
+                    ]
+    dateFormats   = { 'full':   "EEEE d' de 'MMMM' de 'yyyy",
+                      'long':   "d' de 'MMMM' de 'yyyy",
+                      'medium': "dd-MMM-yy",
+                      'short':  "d/MM/yy",
+                    }
+    timeFormats   = { 'full':   "HH'H'mm' 'ss z",
+                      'long':   "HH:mm:ss z",
+                      'medium': "HH:mm:ss",
+                      'short':  "HH:mm",
+                    }
+
+      # this will be added to re_consts later
+    units = { 'seconds': [ 'second', 'sec' ],
+              'minutes': [ 'minute', 'min' ],
+              'hours':   [ 'hour',   'hr'  ],
+              'days':    [ 'day',    'dy'  ],
+              'weeks':   [ 'week',   'wk'  ],
+              'months':  [ 'month',  'mth' ],
+              'years':   [ 'year',   'yr'  ],
+            }
+
+      # text constants to be used by regex's later
+    re_consts     = { 'specials':      'in|on|of|at',
+                      'timeseperator': timeSep,
+                      'dateseperator': dateSep,
+                      'daysuffix':     'rd|st|nd|th',
+                      'qunits':        'h|m|s|d|w|m|y',
+                      'now':           [ 'now' ],
+                    }
+
+      # Used to adjust the returned date before/after the source
+    modifiers = { 'from':       1,
+                  'before':    -1,
+                  'after':      1,
+                  'ago':        1,
+                  'prior':     -1,
+                  'prev':      -1,
+                  'last':      -1,
+                  'next':       1,
+                  'this':       0,
+                  'previous':  -1,
+                  'in a':       2,
+                  'end of':     0,
+                  'eo':         0,
+                }
+
+    dayoffsets = { 'tomorrow':   1,
+                   'today':      0,
+                   'yesterday': -1,
+                 }
+
+      # special day and/or times, i.e. lunch, noon, evening
+      # each element in the dictionary is a dictionary that is used
+      # to fill in any value to be replace - the current date/time will
+      # already have been populated by the method buildSources
+    re_sources    = { 'noon':      { 'hr': 12,  'mn': 0, 'sec': 0 },
+                      'lunch':     { 'hr': 12,  'mn': 0, 'sec': 0 },
+                      'morning':   { 'hr':  6,  'mn': 0, 'sec': 0 },
+                      'breakfast': { 'hr':  8,  'mn': 0, 'sec': 0 },
+                      'dinner':    { 'hr': 19,  'mn': 0, 'sec': 0 },
+                      'evening':   { 'hr': 18,  'mn': 0, 'sec': 0 },
+                      'midnight':  { 'hr':  0,  'mn': 0, 'sec': 0 },
+                      'night':     { 'hr': 21,  'mn': 0, 'sec': 0 },
+                      'tonight':   { 'hr': 21,  'mn': 0, 'sec': 0 },
+                    }
+
+
+pdtLocales = { 'en_US': pdtLocale_en,
+               'es':    pdtLocale_es,
+             }
+
+
+def _initLocale(ptc):
+    """
+    Helper function to initialize the different lists and strings
+    from either PyICU or one of the locale pdt Locales and store
+    them into ptc.
+    """
+    if pyicu and ptc.usePyICU:
+        ptc.icuLocale = pyicu.Locale(ptc.localeID)
+
+        if not ptc.icuLocale:
+            ptc.icuLocale = pyicu.Locale('en_US')
+
+        ptc.icuSymbols    = pyicu.DateFormatSymbols(ptc.icuLocale)
+
+        ptc.Weekdays      = map(string.lower, ptc.icuSymbols.getWeekdays()[1:])
+        ptc.shortWeekdays = map(string.lower, ptc.icuSymbols.getShortWeekdays()[1:])
+        ptc.Months        = map(string.lower, ptc.icuSymbols.getMonths())
+        ptc.shortMonths   = map(string.lower, ptc.icuSymbols.getShortMonths())
+
+          # not quite sure how to init this so for now
+          # set it to none so it will be set to the en_US defaults for now
+        ptc.re_consts     = None
+
+        ptc.icu_df        = { 'full':   pyicu.DateFormat.createDateInstance(pyicu.DateFormat.kFull,   ptc.icuLocale),
+                              'long':   pyicu.DateFormat.createDateInstance(pyicu.DateFormat.kLong,   ptc.icuLocale),
+                              'medium': pyicu.DateFormat.createDateInstance(pyicu.DateFormat.kMedium, ptc.icuLocale),
+                              'short':  pyicu.DateFormat.createDateInstance(pyicu.DateFormat.kShort,  ptc.icuLocale),
+                            }
+        ptc.icu_tf        = { 'full':   pyicu.DateFormat.createTimeInstance(pyicu.DateFormat.kFull,   ptc.icuLocale),
+                              'long':   pyicu.DateFormat.createTimeInstance(pyicu.DateFormat.kLong,   ptc.icuLocale),
+                              'medium': pyicu.DateFormat.createTimeInstance(pyicu.DateFormat.kMedium, ptc.icuLocale),
+                              'short':  pyicu.DateFormat.createTimeInstance(pyicu.DateFormat.kShort,  ptc.icuLocale),
+                            }
+
+        ptc.dateFormats   = { 'full':   ptc.icu_df['full'].toPattern(),
+                              'long':   ptc.icu_df['long'].toPattern(),
+                              'medium': ptc.icu_df['medium'].toPattern(),
+                              'short':  ptc.icu_df['short'].toPattern(),
+                            }
+        ptc.timeFormats   = { 'full':   ptc.icu_tf['full'].toPattern(),
+                              'long':   ptc.icu_tf['long'].toPattern(),
+                              'medium': ptc.icu_tf['medium'].toPattern(),
+                              'short':  ptc.icu_tf['short'].toPattern(),
+                            }
+    else:
+        if not ptc.localeID in pdtLocales:
+            ptc.localeID = 'en_US'
+
+        ptc.locale = pdtLocales[ptc.localeID]
+
+        ptc.Weekdays      = ptc.locale.Weekdays
+        ptc.shortWeekdays = ptc.locale.shortWeekdays
+        ptc.Months        = ptc.locale.Months
+        ptc.shortMonths   = ptc.locale.shortMonths
+        ptc.dateFormats   = ptc.locale.dateFormats
+        ptc.timeFormats   = ptc.locale.timeFormats
+
+
+      # these values are used to setup the various bits 
+      # of the regex values used to parse
+      #
+      # check if a local set of constants has been
+      # provided, if not use en_US as the default
+    if ptc.localeID in pdtLocales:
+        ptc.re_sources = pdtLocales[ptc.localeID].re_sources
+        ptc.re_values  = pdtLocales[ptc.localeID].re_consts
+
+        units = pdtLocales[ptc.localeID].units
+
+        ptc.Modifiers  = pdtLocales[ptc.localeID].modifiers
+        ptc.dayOffsets = pdtLocales[ptc.localeID].dayoffsets
+
+          # for now, pull over any missing keys from the US set
+        for key in pdtLocales['en_US'].re_consts:
+            if not key in ptc.re_values:
+                ptc.re_values[key] = pdtLocales['en_US'].re_consts[key]
+    else:
+        ptc.re_sources = pdtLocales['en_US'].re_sources
+        ptc.re_values  = pdtLocales['en_US'].re_consts
+        ptc.Modifiers  = pdtLocales['en_US'].modifiers
+        ptc.dayOffsets = pdtLocales['en_US'].dayoffsets
+        units          = pdtLocales['en_US'].units
+
+    ptc.re_values['months']      = '%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s' % tuple(ptc.Months)
+    ptc.re_values['shortmonths'] = '%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s' % tuple(ptc.shortMonths)
+    ptc.re_values['days']        = '%s|%s|%s|%s|%s|%s|%s' % tuple(ptc.Weekdays)
+    ptc.re_values['shortdays']   = '%s|%s|%s|%s|%s|%s|%s' % tuple(ptc.shortWeekdays)
+
+    l = []
+    for unit in units:
+        l.append('|'.join(units[unit]))
+
+    ptc.re_values['units'] = '|'.join(l)
+    ptc.Units              = ptc.re_values['units'].split('|')
+
+
+def _initSymbols(ptc):
+    """
+    Helper function to initialize the single character constants
+    and other symbols needed.
+    """
+    ptc.timeSep  = u':'
+    ptc.dateSep  = u'/'
+    ptc.meridian = [ u'AM', u'PM' ]
+
+    ptc.usesMeridian = True
+    ptc.uses24       = False
+
+    if pyicu:
+        am = u''
+        pm = u''
+
+          # ICU doesn't seem to provide directly the
+          # date or time seperator - so we have to
+          # figure it out
+
+        p = pyicu.FieldPosition(pyicu.DateFormat.AM_PM_FIELD)
+        o = ptc.icu_tf['short']
+
+        s = ptc.timeFormats['short']
+
+        ptc.usesMeridian = u'a' in s
+        ptc.uses24       = u'H' in s
+
+        s = o.format(datetime.datetime(2003, 10, 30, 11, 45))       # '11:45 AM' or '11:45'
+
+        s = s.replace('11', '').replace('45', '')                   # ': AM' or ':'
+
+        if len(s) > 0:
+            ptc.timeSep = s[0]
+
+        if ptc.usesMeridian:
+            am = s[1:].strip()                                      # 'AM'
+
+            s = o.format(datetime.datetime(2003, 10, 30, 23, 45))   # '23:45 AM' or '23:45'
+
+            if ptc.uses24:
+                s = s.replace('23', '')
+            else:
+                s = s.replace('11', '')
+
+            pm = s.replace('45', '').replace(ptc.timeSep, '').strip()  # 'PM' or ''
+
+        ptc.meridian = [ am, pm ]
+
+    else:
+        ptc.timeSep      = ptc.locale.timeSep
+        ptc.dateSep      = ptc.locale.dateSep
+        ptc.meridian     = ptc.locale.meridian
+        ptc.usesMeridian = ptc.locale.usesMeridian
+        ptc.uses24       = ptc.locale.uses24
+
+      # build am and pm lists to contain
+      # original case, lowercase and first-char
+      # versions of the meridian text
+
+    if len(ptc.meridian) > 0:
+        am     = ptc.meridian[0]
+        ptc.am = [ am ]
+
+        if len(am) > 0:
+            ptc.am.append(am[0])
+            am = am.lower()
+            ptc.am.append(am)
+            ptc.am.append(am[0])
+    else:
+        am     = ''
+        ptc.am = [ '', '' ]
+
+    if len(ptc.meridian) > 1:
+        pm     = ptc.meridian[1]
+        ptc.pm = [ pm ]
+
+        if len(pm) > 0:
+            ptc.pm.append(pm[0])
+            pm = pm.lower()
+            ptc.pm.append(pm)
+            ptc.pm.append(pm[0])
+    else:
+        pm     = ''
+        ptc.pm = [ '', '' ]
+
+
+def _initPatterns(ptc):
+    """
+    Helper function to take the different localized bits from ptc and
+    create the regex strings.
+    """
+    # TODO add code to parse the date formats and build the regexes up from sub-parts
+    # TODO find all hard-coded uses of date/time seperators
+
+    ptc.RE_DATE3     = r'(?P<date>((?P<mthname>(%(months)s|%(shortmonths)s))\s?((?P<day>\d\d?)(\s|%(daysuffix)s|,|$)+)?(?P<year>\d\d\d\d)?))' % ptc.re_values
+    ptc.RE_MONTH     = r'(?P<month>((?P<mthname>(%(months)s|%(shortmonths)s))(\s?(?P<year>(\d\d\d\d)))?))' % ptc.re_values
+    ptc.RE_WEEKDAY   = r'(?P<weekday>(%(days)s|%(shortdays)s))' % ptc.re_values
+
+    ptc.RE_SPECIAL   = r'(?P<special>^[%(specials)s]+)\s+' % ptc.re_values
+    ptc.RE_UNITS     = r'(?P<qty>(-?\d+\s*(?P<units>((%(units)s)s?))))' % ptc.re_values
+    ptc.RE_QUNITS    = r'(?P<qty>(-?\d+\s?(?P<qunits>%(qunits)s)(\s|,|$)))' % ptc.re_values
+    ptc.RE_MODIFIER  = r'(?P<modifier>(previous|prev|last|next|this|eo|(end\sof)|(in\sa)))' % ptc.re_values
+    ptc.RE_MODIFIER2 = r'(?P<modifier>(from|before|after|ago|prior))' % ptc.re_values
+    ptc.RE_TIMEHMS   = r'(?P<hours>\d\d?)(?P<tsep>%(timeseperator)s|)(?P<minutes>\d\d)(?:(?P=tsep)(?P<seconds>\d\d(?:[.,]\d+)?))?' % ptc.re_values
+
+    ptc.RE_TIMEHMS2  = r'(?P<hours>(\d\d?))((?P<tsep>%(timeseperator)s|)(?P<minutes>(\d\d?))(?:(?P=tsep)(?P<seconds>\d\d?(?:[.,]\d+)?))?)?' % ptc.re_values
+
+    if 'meridian' in ptc.re_values:
+        ptc.RE_TIMEHMS2 += r'\s?(?P<meridian>(%(meridian)s))' % ptc.re_values
+
+    ptc.RE_DATE      = r'(?P<date>\d+([/.\\]\d+)+)'
+    ptc.RE_DATE2     = r'[/.\\]'
+    ptc.RE_DAY       = r'(?P<day>(today|tomorrow|yesterday))' % ptc.re_values
+    ptc.RE_TIME      = r'\s*(?P<time>(morning|breakfast|noon|lunch|evening|midnight|tonight|dinner|night|now))' % ptc.re_values
+    ptc.RE_REMAINING = r'\s+'
+
+      # Regex for date/time ranges
+
+    ptc.RE_RTIMEHMS  = r'(\d\d?)%(timeseperator)s(\d\d)(%(timeseperator)s(\d\d))?' % ptc.re_values
+
+    ptc.RE_RTIMEHMS2 = r'(\d\d?)(%(timeseperator)s(\d\d?))?(%(timeseperator)s(\d\d?))?' % ptc.re_values
+
+    if 'meridian' in ptc.re_values:
+        ptc.RE_RTIMEHMS2 += r'\s?(%(meridian)s)' % ptc.re_values
+
+    ptc.RE_RDATE     = r'(\d+([/.\\]\d+)+)'
+    ptc.RE_RDATE3    = r'((((%(months)s))\s?((\d\d?)(\s|%(daysuffix)s|,|$)+)?(\d\d\d\d)?))' % ptc.re_values
+    ptc.DATERNG1     = ptc.RE_RDATE     + r'\s?-\s?' + ptc.RE_RDATE     # "06/07/06 - 08/09/06"
+    ptc.DATERNG2     = ptc.RE_RDATE3    + r'\s?-\s?' + ptc.RE_RDATE3    # "march 31 - june 1st, 2006"
+    ptc.DATERNG3     = ptc.RE_RDATE3    + r'\s?' + r'-' + r'\s?(\d\d?)\s?(rd|st|nd|th)?' % ptc.re_values # "march 1rd -13th"
+    ptc.TIMERNG1     = ptc.RE_RTIMEHMS2 + r'\s?-\s?'+ ptc.RE_RTIMEHMS2  # "4:00:55 pm - 5:90:44 am",'4p-5p'
+    ptc.TIMERNG2     = ptc.RE_RTIMEHMS  + r'\s?-\s?'+ ptc.RE_RTIMEHMS   # "4:00 - 5:90 ","4:55:55-3:44:55"
+    ptc.TIMERNG3     = r'\d\d?\s?-\s?'+ ptc.RE_RTIMEHMS2                # "4-5pm "
+
+
+def _initConstants(ptc):
+    """
+    Create localized versions of the units, week and month names
+    """
+      # build weekday offsets - yes, it assumes the Weekday and shortWeekday
+      # lists are in the same order and Sun..Sat
+    ptc.WeekdayOffsets = {}
+
+    o = 0
+    for key in ptc.Weekdays:
+        ptc.WeekdayOffsets[key] = o
+        o += 1
+    o = 0
+    for key in ptc.shortWeekdays:
+        ptc.WeekdayOffsets[key] = o
+        o += 1
+
+      # build month offsets - yes, it assumes the Months and shortMonths
+      # lists are in the same order and Jan..Dec
+    ptc.MonthOffsets = {}
+    ptc.DaysInMonth  = {}
+
+    o = 1
+    for key in ptc.Months:
+        ptc.MonthOffsets[key] = o
+        ptc.DaysInMonth[key]  = ptc.DaysInMonthList[o - 1]
+        o += 1
+    o = 1
+    for key in ptc.shortMonths:
+        ptc.MonthOffsets[key] = o
+        ptc.DaysInMonth[key]  = ptc.DaysInMonthList[o - 1]
+        o += 1
+
+
+class Constants:
+    """
+    Default set of constants for parsedatetime.
+
+    If PyICU is present, then the class will initialize itself to
+    the current default locale or to the locale specified by C{localeID}.
+
+    If PyICU is not present then the class will initialize itself to
+    en_US locale or if C{localeID} is passed in and the value matches one
+    of the defined pdtLocales then that will be used.
+    """
+    def __init__(self, localeID=None, usePyICU=True):
+        if localeID is None:
+            self.localeID = 'en_US'
+        else:
+            self.localeID = localeID
+
+          # define non-locale specific constants
+
+        self.locale   = None
+        self.usePyICU = usePyICU
+
+        self.Second =   1
+        self.Minute =  60 * self.Second
+        self.Hour   =  60 * self.Minute
+        self.Day    =  24 * self.Hour
+        self.Week   =   7 * self.Day
+        self.Month  =  30 * self.Day
+        self.Year   = 365 * self.Day
+
+        self.DaysInMonthList = (31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31)
+
+        _initLocale(self)
+        _initConstants(self)
+        _initSymbols(self)
+        _initPatterns(self)
+
+
+    def buildSources(self, sourceTime=None):
+        """
+        Return a dictionary of date/time tuples based on the keys
+        found in self.re_sources.
+
+        The current time is used as the default and any specified
+        item found in self.re_sources is inserted into the value
+        and the generated dictionary is returned.
+        """
+        if sourceTime is None:
+            (yr, mth, dy, hr, mn, sec, wd, yd, isdst) = time.localtime()
+        else:
+            (yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
+
+        sources  = {}
+        defaults = { 'yr': yr, 'mth': mth, 'dy':  dy,
+                     'hr': hr, 'mn':  mn,  'sec': sec, }
+
+        for item in self.re_sources:
+            values = self.re_sources[item]
+
+            for key in defaults.keys():
+                if not key in values:
+                    values[key] = defaults[key]
+
+            sources[item] = ( values['yr'], values['mth'], values['dy'],
+                              values['hr'], values['mn'], values['sec'], wd, yd, isdst )
+
+        return sources
+
+        
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/support/parsedatetime/pdt.py	Sun Aug 27 15:29:01 2006 +0200
@@ -0,0 +1,1266 @@
+#!/usr/bin/env python
+
+"""
+Parse human-readable date/time text.
+"""
+
+__license__ = """Copyright (c) 2004-2006 Mike Taylor, All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+__author__       = 'Mike Taylor <http://code-bear.com>'
+__contributors__ = ['Darshana Chhajed <mailto://darshana@osafoundation.org>',
+                   ]
+
+_debug = False
+
+
+import string, re, time
+import datetime, calendar, rfc822
+import parsedatetime_consts
+
+
+# Copied from feedparser.py
+# Universal Feedparser, Copyright (c) 2002-2006, Mark Pilgrim, All rights reserved.
+# Originally a def inside of _parse_date_w3dtf()
+def _extract_date(m):
+    year = int(m.group('year'))
+    if year < 100:
+        year = 100 * int(time.gmtime()[0] / 100) + int(year)
+    if year < 1000:
+        return 0, 0, 0
+    julian = m.group('julian')
+    if julian:
+        julian = int(julian)
+        month = julian / 30 + 1
+        day = julian % 30 + 1
+        jday = None
+        while jday != julian:
+            t = time.mktime((year, month, day, 0, 0, 0, 0, 0, 0))
+            jday = time.gmtime(t)[-2]
+            diff = abs(jday - julian)
+            if jday > julian:
+                if diff < day:
+                    day = day - diff
+                else:
+                    month = month - 1
+                    day = 31
+            elif jday < julian:
+                if day + diff < 28:
+                   day = day + diff
+                else:
+                    month = month + 1
+        return year, month, day
+    month = m.group('month')
+    day = 1
+    if month is None:
+        month = 1
+    else:
+        month = int(month)
+        day = m.group('day')
+        if day:
+            day = int(day)
+        else:
+            day = 1
+    return year, month, day
+
+# Copied from feedparser.py 
+# Universal Feedparser, Copyright (c) 2002-2006, Mark Pilgrim, All rights reserved.
+# Originally a def inside of _parse_date_w3dtf()
+def _extract_time(m):
+    if not m:
+        return 0, 0, 0
+    hours = m.group('hours')
+    if not hours:
+        return 0, 0, 0
+    hours = int(hours)
+    minutes = int(m.group('minutes'))
+    seconds = m.group('seconds')
+    if seconds:
+        seconds = int(seconds)
+    else:
+        seconds = 0
+    return hours, minutes, seconds
+
+
+# Copied from feedparser.py
+# Universal Feedparser, Copyright (c) 2002-2006, Mark Pilgrim, All rights reserved.
+# Modified to return a tuple instead of mktime
+#
+# Original comment:
+#       W3DTF-style date parsing adapted from PyXML xml.utils.iso8601, written by
+#       Drake and licensed under the Python license.  Removed all range checking
+#       for month, day, hour, minute, and second, since mktime will normalize
+#       these later
+def _parse_date_w3dtf(dateString):
+    # the __extract_date and __extract_time methods were
+    # copied-out so they could be used by my code --bear
+    def __extract_tzd(m):
+        '''Return the Time Zone Designator as an offset in seconds from UTC.'''
+        if not m:
+            return 0
+        tzd = m.group('tzd')
+        if not tzd:
+            return 0
+        if tzd == 'Z':
+            return 0
+        hours = int(m.group('tzdhours'))
+        minutes = m.group('tzdminutes')
+        if minutes:
+            minutes = int(minutes)
+        else:
+            minutes = 0
+        offset = (hours*60 + minutes) * 60
+        if tzd[0] == '+':
+            return -offset
+        return offset
+
+    __date_re = ('(?P<year>\d\d\d\d)'
+                 '(?:(?P<dsep>-|)'
+                 '(?:(?P<julian>\d\d\d)'
+                 '|(?P<month>\d\d)(?:(?P=dsep)(?P<day>\d\d))?))?')
+    __tzd_re = '(?P<tzd>[-+](?P<tzdhours>\d\d)(?::?(?P<tzdminutes>\d\d))|Z)'
+    __tzd_rx = re.compile(__tzd_re)
+    __time_re = ('(?P<hours>\d\d)(?P<tsep>:|)(?P<minutes>\d\d)'
+                 '(?:(?P=tsep)(?P<seconds>\d\d(?:[.,]\d+)?))?'
+                 + __tzd_re)
+    __datetime_re = '%s(?:T%s)?' % (__date_re, __time_re)
+    __datetime_rx = re.compile(__datetime_re)
+    m = __datetime_rx.match(dateString)
+    if (m is None) or (m.group() != dateString): return
+    return _extract_date(m) + _extract_time(m) + (0, 0, 0)
+
+
+# Copied from feedparser.py
+# Universal Feedparser, Copyright (c) 2002-2006, Mark Pilgrim, All rights reserved.
+# Modified to return a tuple instead of mktime
+#
+def _parse_date_rfc822(dateString):
+    '''Parse an RFC822, RFC1123, RFC2822, or asctime-style date'''
+    data = dateString.split()
+    if data[0][-1] in (',', '.') or data[0].lower() in rfc822._daynames:
+        del data[0]
+    if len(data) == 4:
+        s = data[3]
+        i = s.find('+')
+        if i > 0:
+            data[3:] = [s[:i], s[i+1:]]
+        else:
+            data.append('')
+        dateString = " ".join(data)
+    if len(data) < 5:
+        dateString += ' 00:00:00 GMT'
+    return rfc822.parsedate_tz(dateString)
+
+# rfc822.py defines several time zones, but we define some extra ones.
+# 'ET' is equivalent to 'EST', etc.
+_additional_timezones = {'AT': -400, 'ET': -500, 'CT': -600, 'MT': -700, 'PT': -800}
+rfc822._timezones.update(_additional_timezones)
+
+
+class Calendar:
+    """
+    A collection of routines to input, parse and manipulate date and times.
+    The text can either be 'normal' date values or it can be human readable.
+    """
+
+    def __init__(self, constants=None):
+        """
+        Default constructor for the Calendar class.
+
+        @type  constants: object
+        @param constants: Instance of the class L{CalendarConstants}
+
+        @rtype:  object
+        @return: Calendar instance
+        """
+          # if a constants reference is not included, use default
+        if constants is None:
+            self.ptc = parsedatetime_consts.CalendarConstants()
+        else:
+            self.ptc = constants
+
+        self.CRE_SPECIAL   = re.compile(self.ptc.RE_SPECIAL,   re.IGNORECASE)
+        self.CRE_UNITS     = re.compile(self.ptc.RE_UNITS,     re.IGNORECASE)
+        self.CRE_QUNITS    = re.compile(self.ptc.RE_QUNITS,    re.IGNORECASE)
+        self.CRE_MODIFIER  = re.compile(self.ptc.RE_MODIFIER,  re.IGNORECASE)
+        self.CRE_MODIFIER2 = re.compile(self.ptc.RE_MODIFIER2, re.IGNORECASE)
+        self.CRE_TIMEHMS   = re.compile(self.ptc.RE_TIMEHMS,   re.IGNORECASE)
+        self.CRE_TIMEHMS2  = re.compile(self.ptc.RE_TIMEHMS2,  re.IGNORECASE)
+        self.CRE_DATE      = re.compile(self.ptc.RE_DATE,      re.IGNORECASE)
+        self.CRE_DATE2     = re.compile(self.ptc.RE_DATE2,     re.IGNORECASE)
+        self.CRE_DATE3     = re.compile(self.ptc.RE_DATE3,     re.IGNORECASE)
+        self.CRE_MONTH     = re.compile(self.ptc.RE_MONTH,     re.IGNORECASE)
+        self.CRE_WEEKDAY   = re.compile(self.ptc.RE_WEEKDAY,   re.IGNORECASE)
+        self.CRE_DAY       = re.compile(self.ptc.RE_DAY,       re.IGNORECASE)
+        self.CRE_TIME      = re.compile(self.ptc.RE_TIME,      re.IGNORECASE)
+        self.CRE_REMAINING = re.compile(self.ptc.RE_REMAINING, re.IGNORECASE)
+
+        #regex for date/time ranges
+        self.CRE_RTIMEHMS   = re.compile(self.ptc.RE_RTIMEHMS,  re.IGNORECASE)
+        self.CRE_RTIMEHMS2  = re.compile(self.ptc.RE_RTIMEHMS2,  re.IGNORECASE)
+        self.CRE_RDATE      = re.compile(self.ptc.RE_RDATE,      re.IGNORECASE)
+        self.CRE_RDATE3     = re.compile(self.ptc.RE_RDATE3,     re.IGNORECASE)
+
+        self.CRE_TIMERNG1      = re.compile(self.ptc.TIMERNG1, re.IGNORECASE)
+        self.CRE_TIMERNG2      = re.compile(self.ptc.TIMERNG2, re.IGNORECASE)
+        self.CRE_TIMERNG3      = re.compile(self.ptc.TIMERNG3, re.IGNORECASE)
+        self.CRE_DATERNG1      = re.compile(self.ptc.DATERNG1, re.IGNORECASE)
+        self.CRE_DATERNG2      = re.compile(self.ptc.DATERNG2, re.IGNORECASE)
+        self.CRE_DATERNG3      = re.compile(self.ptc.DATERNG3, re.IGNORECASE)
+
+        self.invalidFlag   = False  # Is set if the datetime string entered cannot be parsed at all
+        self.weekdyFlag    = False  # monday/tuesday/...
+        self.dateStdFlag   = False  # 07/21/06
+        self.dateStrFlag   = False  # July 21st, 2006
+        self.timeFlag      = False  # 5:50 
+        self.meridianFlag  = False  # am/pm
+        self.dayStrFlag    = False  # tomorrow/yesterday/today/..
+        self.timeStrFlag   = False  # lunch/noon/breakfast/...
+        self.modifierFlag  = False  # after/before/prev/next/..
+        self.modifier2Flag = False  # after/before/prev/next/..
+        self.unitsFlag     = False  # hrs/weeks/yrs/min/..
+        self.qunitsFlag    = False  # h/m/t/d..
+
+
+    def _convertUnitAsWords(self, unitText):
+        """
+        Converts text units into their number value
+
+        Five = 5
+        Twenty Five = 25
+        Two hundred twenty five = 225
+        Two thousand and twenty five = 2025
+        Two thousand twenty five = 2025
+
+        @type  unitText: string
+        @param unitText: number string
+
+        @rtype:  integer
+        @return: numerical value of unitText
+        """
+        # TODO: implement this
+        pass
+
+
+    def _buildTime(self, source, quantity, modifier, units):
+        """
+        Take quantity, modifier and unit strings and convert them into values.
+        Then calcuate the time and return the adjusted sourceTime
+
+        @type  source:   time
+        @param source:   time to use as the base (or source)
+        @type  quantity: string
+        @param quantity: quantity string
+        @type  modifier: string
+        @param modifier: how quantity and units modify the source time
+        @type  units:    string
+        @param units:    unit of the quantity (i.e. hours, days, months, etc)
+
+        @rtype:  timetuple
+        @return: timetuple of the calculated time
+        """
+        if _debug:
+            print '_buildTime: [%s][%s][%s]' % (quantity, modifier, units)
+
+        if source is None:
+            source = time.localtime()
+
+        if quantity is None:
+            quantity = ''
+        else:
+            quantity = string.strip(quantity)
+
+        if len(quantity) == 0:
+            qty = 1
+        else:
+            try:
+                qty = int(quantity)
+            except ValueError:
+                qty = 0
+
+        if modifier in self.ptc.Modifiers:
+            qty = qty * self.ptc.Modifiers[modifier]
+
+            if units is None or units == '':
+                units = 'dy'
+
+        # plurals are handled by regex's (could be a bug tho)
+
+        if units in self.ptc.Units:
+            u = self.ptc.Units[units]
+        else:
+            u = 1
+
+        (yr, mth, dy, hr, mn, sec, wd, yd, isdst) = source
+
+        start  = datetime.datetime(yr, mth, dy, hr, mn, sec)
+        target = start
+
+        if units.startswith('y'):
+            target = self.inc(start, year=qty)
+        elif units.endswith('th') or units.endswith('ths'):
+            target = self.inc(start, month=qty)
+        else:
+            if units.startswith('d'):
+                target = start + datetime.timedelta(days=qty)
+            elif units.startswith('h'):
+                target = start + datetime.timedelta(hours=qty)
+            elif units.startswith('m'):
+                target = start + datetime.timedelta(minutes=qty)
+            elif units.startswith('s'):
+                target = start + datetime.timedelta(seconds=qty)
+            elif units.startswith('w'):
+                target = start + datetime.timedelta(weeks=qty)
+
+        if target != start:
+            self.invalidFlag = False
+
+        return target.timetuple()
+
+
+    def parseDate(self, dateString):
+        """
+        Parses strings like 05/28/200 or 04.21
+
+        @type  dateString: string
+        @param dateString: text to convert to a datetime
+
+        @rtype:  datetime
+        @return: calculated datetime value of dateString
+        """
+        yr, mth, dy, hr, mn, sec, wd, yd, isdst = time.localtime()
+
+        s = dateString
+        m = self.CRE_DATE2.search(s)
+        if m is not None:
+            index = m.start()
+            mth   = int(s[:index])
+            s     = s[index + 1:]
+
+        m = self.CRE_DATE2.search(s)
+        if m is not None:
+            index = m.start()
+            dy    = int(s[:index])
+            yr    = int(s[index + 1:])
+            # TODO should this have a birthday epoch constraint?
+            if yr < 99:
+                yr += 2000
+        else:
+            dy = int(string.strip(s))
+
+        if mth <= 12 and dy <= self.ptc.DaysInMonthList[mth - 1]:
+            sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
+        else:
+            self.invalidFlag = True
+            sourceTime       = time.localtime() #return current time if date string is invalid
+
+        return sourceTime
+
+
+    def parseDateText(self, dateString):
+        """
+        Parses strings like "May 31st, 2006" or "Jan 1st" or "July 2006"
+
+        @type  dateString: string
+        @param dateString: text to convert to a datetime
+
+        @rtype:  datetime
+        @return: calculated datetime value of dateString
+        """
+        yr, mth, dy, hr, mn, sec, wd, yd, isdst = time.localtime()
+
+        currentMth = mth
+        currentDy  = dy
+
+        s   = dateString.lower()
+        m   = self.CRE_DATE3.search(s)
+        mth = m.group('mthname')
+        mth = int(self.ptc.MthNames[mth])
+
+        if m.group('day') !=  None:
+            dy = int(m.group('day'))
+        else:
+            dy = 1
+
+        if m.group('year') !=  None:
+            yr = int(m.group('year'))
+        elif (mth < currentMth) or (mth == currentMth and dy < currentDy):
+            # if that day and month have already passed in this year,
+            # then increment the year by 1
+            yr += 1
+
+        if dy <= self.ptc.DaysInMonthList[mth - 1]:
+            sourceTime = (yr, mth, dy, 9, 0, 0, wd, yd, isdst)
+        else:
+              # Return current time if date string is invalid
+            self.invalidFlag = True
+            sourceTime       = time.localtime()
+
+        return sourceTime
+
+
+    def evalRanges(self,datetimeString,sourceTime=None):
+        """
+        Evaluates the strings with time or date ranges
+        """
+        startTime = ''
+        endTime   = ''
+        startDate = ''
+        endDate   = ''
+        rangeFlag = 0
+        sourceTime = None
+
+        s = string.strip(datetimeString.lower())
+
+        m = self.CRE_TIMERNG1.search(s)
+        if m is not None:
+            rangeFlag = 1
+        else:
+            m = self.CRE_TIMERNG2.search(s)
+            if m is not None:  
+                rangeFlag = 2
+            else:
+                m = self.CRE_TIMERNG3.search(s)
+                if m is not None:
+                    rangeFlag = 3
+                else:
+                    m = self.CRE_DATERNG1.search(s)
+                    if m is not None:
+                        rangeFlag = 4
+                    else:
+                        m = self.CRE_DATERNG2.search(s)
+                        if m is not None:
+                            rangeFlag = 5
+                        else:
+                            m = self.CRE_DATERNG3.search(s)
+                            if m is not None:
+                                rangeFlag = 6
+
+        if m is not None :
+            if (m.group() != s):
+                # capture remaining string
+                parseStr = m.group()
+                str1    = s[:m.start()]
+                str2    = s[m.end():]
+                s       = str1 + ' ' + str2
+                flag    = 1
+                sourceTime, flag = self.parse(s, sourceTime)
+                if flag == True:
+                    sourceTime = None
+            else:
+                parseStr = s
+
+
+        if rangeFlag == 1:
+            m = re.search('-',parseStr)
+            startTime, sflag = self.parse((parseStr[:m.start()]), sourceTime)
+            endTime, eflag   = self.parse((parseStr[(m.start()+1):]), sourceTime)
+            if eflag is False and sflag is False:
+                return (startTime, endTime, False)
+
+        elif rangeFlag == 2:
+            m = re.search('-',parseStr)
+            startTime, sflag = self.parse((parseStr[:m.start()]), sourceTime)
+            endTime, eflag   = self.parse((parseStr[(m.start()+1):]), sourceTime)
+            if eflag is False and sflag is False:
+                return (startTime, endTime, False)
+
+        elif rangeFlag == 3:
+            m = re.search('-',parseStr)
+
+            #capturing the meridian from the end time
+            ampm = re.search('a',parseStr)
+
+            #appending the meridian to the start time
+            if ampm is not None:
+                startTime, sflag = self.parse((parseStr[:m.start()]+'am'), sourceTime)
+            else:
+                startTime, sflag = self.parse((parseStr[:m.start()]+'pm'), sourceTime)
+            endTime, eflag   = self.parse(parseStr[(m.start()+1):], sourceTime)
+            if eflag is False and sflag is False:
+                return (startTime, endTime, False)
+
+        elif rangeFlag == 4:
+            m = re.search('-',parseStr)
+            startDate, sflag = self.parse((parseStr[:m.start()]), sourceTime)
+            endDate, eflag   = self.parse((parseStr[(m.start()+1):]), sourceTime)
+            if eflag is False and sflag is False:
+                return (startDate, endDate, False)
+
+        elif rangeFlag == 5:
+            m = re.search('-',parseStr)
+            endDate = parseStr[(m.start()+1):]
+
+            #capturing the year from the end date
+            date = self.CRE_DATE3.search(endDate)
+            endYear = date.group('year')
+
+            # appending the year to the start date if the start date does not have year information
+            # and the end date does. eg : "Aug 21 - Sep 4, 2007
+            if endYear is not None:
+                startDate = parseStr[:m.start()]
+                date = self.CRE_DATE3.search(startDate)
+                startYear = date.group('year')
+                if startYear is None:
+                    startDate += endYear
+            else:
+                startDate = parseStr[:m.start()]
+
+            startDate, sflag = self.parse(startDate, sourceTime)
+            endDate, eflag   = self.parse(endDate, sourceTime)
+            if eflag is False and sflag is False:
+                return (startDate, endDate, False)
+
+        elif rangeFlag == 6:
+            m = re.search('-',parseStr)
+
+            startDate = parseStr[:m.start()]
+
+            #capturing the month from the start date
+            mth = self.CRE_DATE3.search(startDate)
+            mth = mth.group('mthname')
+
+            # appending the month name to the end date
+            endDate = mth + parseStr[(m.start()+1):]
+
+            startDate, sflag = self.parse(startDate, sourceTime)
+            endDate, eflag   = self.parse(endDate, sourceTime)
+            if eflag is False and sflag is False:
+                return (startDate, endDate, False)
+        else :
+            sourceTime = time.localtime()
+            #if range is not found
+            return (sourceTime, sourceTime, True)
+
+
+    def _evalModifier(self, modifier, chunk1, chunk2, sourceTime):
+        """
+        Evaluate the modifier string and following text (passed in
+        as chunk1 and chunk2) and if they match any known modifiers
+        calculate the delta and apply it to sourceTime
+
+        @type  modifier: string
+        @param modifier: modifier text to apply to sourceTime
+        @type  chunk1:   string
+        @param chunk1:   first text chunk that followed modifier (if any)
+        @type  chunk2:   string
+        @param chunk2:   second text chunk that followed modifier (if any)
+        @type  sourceTime: datetime
+        @param sourceTime: datetime value to use as the base
+
+        @rtype:  tuple
+        @return: tuple of any remaining text and the modified sourceTime
+        """
+        offset = self.ptc.Modifiers[modifier]
+
+        if sourceTime is not None:
+            (yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
+        else:
+            (yr, mth, dy, hr, mn, sec, wd, yd, isdst) = time.localtime()
+
+        # capture the units after the modifier and the remaining string after the unit
+        m = self.CRE_REMAINING.search(chunk2)
+        if m is not None:
+            index  = m.start() + 1
+            unit   = chunk2[:m.start()]
+            chunk2 = chunk2[index:]
+        else:
+            unit   = chunk2
+            chunk2 = ''
+
+        flag = False
+
+        if unit == self.ptc.Target_Text['month'] or \
+           unit == self.ptc.Target_Text['mth']:
+            if offset == 0:
+                dy        = self.ptc.DaysInMonthList[mth - 1]
+                sourceTime = (yr, mth, dy, 9, 0, 0, wd, yd, isdst)
+            elif offset == 2:
+                # if day is the last day of the month, calculate the last day of the next month
+                if dy == self.ptc.DaysInMonthList[mth - 1]:
+                    dy = self.ptc.DaysInMonthList[mth]
+
+                start      = datetime.datetime(yr, mth, dy, 9, 0, 0)
+                target     = self.inc(start, month=1)
+                sourceTime = target.timetuple()
+            else:
+                start      = datetime.datetime(yr, mth, 1, 9, 0, 0)
+                target     = self.inc(start, month=offset)
+                sourceTime = target.timetuple()
+
+            flag = True
+
+        if unit == self.ptc.Target_Text['week'] or \
+             unit == self.ptc.Target_Text['wk'] or \
+             unit == self.ptc.Target_Text['w']:
+            if offset == 0:
+                start      = datetime.datetime(yr, mth, dy, 17, 0, 0)
+                target     = start + datetime.timedelta(days=(4 - wd))
+                sourceTime = target.timetuple()
+            elif offset == 2:
+                start      = datetime.datetime(yr, mth, dy, 9, 0, 0)
+                target     = start + datetime.timedelta(days=7)
+                sourceTime = target.timetuple()
+            else:
+                return self._evalModifier(modifier, chunk1, "monday " + chunk2, sourceTime)
+
+            flag = True
+
+        if unit == self.ptc.Target_Text['day'] or \
+            unit == self.ptc.Target_Text['dy'] or \
+            unit == self.ptc.Target_Text['d']:
+            if offset == 0:
+                sourceTime = (yr, mth, dy, 17, 0, 0, wd, yd, isdst)
+            elif offset == 2:
+                start      = datetime.datetime(yr, mth, dy, hr, mn, sec)
+                target     = start + datetime.timedelta(days=1)
+                sourceTime = target.timetuple()
+            else:
+                start      = datetime.datetime(yr, mth, dy, 9, 0, 0)
+                target     = start + datetime.timedelta(days=offset)
+                sourceTime = target.timetuple()
+
+            flag = True
+
+        if unit == self.ptc.Target_Text['hour'] or \
+           unit == self.ptc.Target_Text['hr']:
+            if offset == 0:
+                sourceTime = (yr, mth, dy, hr, 0, 0, wd, yd, isdst)
+            else:
+                start      = datetime.datetime(yr, mth, dy, hr, 0, 0)
+                target     = start + datetime.timedelta(hours=offset)
+                sourceTime = target.timetuple()
+
+            flag = True
+
+        if unit == self.ptc.Target_Text['year'] or \
+             unit == self.ptc.Target_Text['yr'] or \
+             unit == self.ptc.Target_Text['y']:
+            if offset == 0:
+                sourceTime = (yr, 12, 31, hr, mn, sec, wd, yd, isdst)
+            elif offset == 2:
+                sourceTime = (yr + 1, mth, dy, hr, mn, sec, wd, yd, isdst)
+            else:
+                sourceTime = (yr + offset, 1, 1, 9, 0, 0, wd, yd, isdst)
+
+            flag = True
+
+        if flag == False:
+            m = self.CRE_WEEKDAY.match(unit)
+            if m is not None:
+                wkdy = m.group()
+                wkdy = self.ptc.WeekDays[wkdy]
+
+                if offset == 0:
+                    diff       = wkdy - wd
+                    start      = datetime.datetime(yr, mth, dy, 9, 0, 0)
+                    target     = start + datetime.timedelta(days=diff)
+                    sourceTime = target.timetuple()
+                else:
+                    diff       = wkdy - wd
+                    start      = datetime.datetime(yr, mth, dy, 9, 0, 0)
+                    target     = start + datetime.timedelta(days=diff + 7 * offset)
+                    sourceTime = target.timetuple()
+
+                flag = True
+
+        if not flag:
+            m = self.CRE_TIME.match(unit)
+            if m is not None:
+                (yr, mth, dy, hr, mn, sec, wd, yd, isdst), self.invalidFlag = self.parse(unit)
+                start      = datetime.datetime(yr, mth, dy, hr, mn, sec)
+                target     = start + datetime.timedelta(days=offset)
+                sourceTime = target.timetuple()
+
+                flag              = True
+                self.modifierFlag = False
+
+        # if the word after next is a number, the string is likely
+        # to be something like "next 4 hrs" for which we have to
+        # combine the units with the rest of the string
+        if not flag:
+            if offset < 0:
+                # if offset is negative, the unit has to be made negative
+                unit = '-%s' % unit
+
+            chunk2 = '%s %s' % (unit, chunk2)
+
+        self.modifierFlag = False
+
+        return '%s %s' % (chunk1, chunk2), sourceTime
+
+
+    def _evalModifier2(self, modifier, chunk1 , chunk2, sourceTime):
+        """
+        Evaluate the modifier string and following text (passed in
+        as chunk1 and chunk2) and if they match any known modifiers
+        calculate the delta and apply it to sourceTime
+
+        @type  modifier:   string
+        @param modifier:   modifier text to apply to sourceTime
+        @type  chunk1:     string
+        @param chunk1:     first text chunk that followed modifier (if any)
+        @type  chunk2:     string
+        @param chunk2:     second text chunk that followed modifier (if any)
+        @type  sourceTime: datetime
+        @param sourceTime: datetime value to use as the base
+
+        @rtype:  tuple
+        @return: tuple of any remaining text and the modified sourceTime
+        """
+        offset = self.ptc.Modifiers[modifier]
+        digit  = r'\d+'
+
+        if sourceTime is not None:
+            (yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
+        else:
+            (yr, mth, dy, hr, mn, sec, wd, yd, isdst) = time.localtime()
+
+        self.modifier2Flag = False
+
+        # If the string after the negative modifier starts with
+        # digits, then it is likely that the string is similar to
+        # " before 3 days" or 'evening prior to 3 days'.
+        # In this case, the total time is calculated by subtracting
+        # '3 days' from the current date.
+        # So, we have to identify the quantity and negate it before
+        # parsing the string.
+        # This is not required for strings not starting with digits
+        # since the string is enough to calculate the sourceTime
+        if offset < 0:
+            m = re.match(digit, string.strip(chunk2))
+            if m is not None:
+                qty    = int(m.group()) * -1
+                chunk2 = chunk2[m.end():]
+                chunk2 = '%d%s' % (qty, chunk2)
+
+        sourceTime, flag = self.parse(chunk2, sourceTime)
+
+        if chunk1 != '':
+            if offset < 0:
+                m = re.match(digit, string.strip(chunk1))
+                if m is not None:
+                    qty    = int(m.group()) * -1
+                    chunk1 = chunk1[m.end():]
+                    chunk1 = '%d%s' % (qty, chunk1)
+
+            sourceTime, flag = self.parse(chunk1, sourceTime)
+
+        return '', sourceTime
+
+
+    def _evalString(self, datetimeString, sourceTime=None):
+        """
+        Calculate the datetime based on flags set by the L{parse()} routine
+
+        Examples handled::
+            RFC822, W3CDTF formatted dates
+            HH:MM[:SS][ am/pm]
+            MM/DD/YYYY
+            DD MMMM YYYY
+
+        @type  datetimeString: string
+        @param datetimeString: text to try and parse as more "traditional" date/time text
+        @type  sourceTime:     datetime
+        @param sourceTime:     datetime value to use as the base
+
+        @rtype:  datetime
+        @return: calculated datetime value or current datetime if not parsed
+        """
+        s = string.strip(datetimeString)
+
+          # Given string date is a RFC822 date
+        if sourceTime is None:
+            sourceTime = _parse_date_rfc822(s)
+
+          # Given string date is a W3CDTF date
+        if sourceTime is None:
+            sourceTime = _parse_date_w3dtf(s)
+
+        if sourceTime is None:
+            s = s.lower()
+
+          # Given string is in the format HH:MM(:SS)(am/pm)
+        if self.meridianFlag:
+            if sourceTime is None:
+                (yr, mth, dy, hr, mn, sec, wd, yd, isdst) = time.localtime()
+            else:
+                (yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
+
+            m = self.CRE_TIMEHMS2.search(s)
+            if m is not None:
+                dt = s[:m.start('meridian')].strip()
+                if len(dt) <= 2:
+                    hr  = int(dt)
+                    mn  = 0
+                    sec = 0
+                else:
+                    hr, mn, sec = _extract_time(m)
+
+                if hr == 24:
+                    hr = 0
+
+                sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
+                meridian   = m.group('meridian')
+
+                if (re.compile("a").search(meridian)) and hr == 12:
+                    sourceTime = (yr, mth, dy, 0, mn, sec, wd, yd, isdst)
+                if (re.compile("p").search(meridian)) and hr < 12:
+                    sourceTime = (yr, mth, dy, hr+12, mn, sec, wd, yd, isdst)
+
+              # invalid time
+            if hr > 24 or mn > 59 or sec > 59:
+                sourceTime       = time.localtime()
+                self.invalidFlag = True
+
+            self.meridianFlag = False
+
+          # Given string is in the format HH:MM(:SS)
+        if self.timeFlag:
+            if sourceTime is None:
+                (yr, mth, dy, hr, mn, sec, wd, yd, isdst) = time.localtime()
+            else:
+                (yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
+
+            m = self.CRE_TIMEHMS.search(s)
+            if m is not None:
+                hr, mn, sec = _extract_time(m)
+            if hr == 24:
+                hr = 0
+
+            if hr > 24 or mn > 59 or sec > 59:
+                # invalid time
+                sourceTime = time.localtime()
+                self.invalidFlag = True
+            else:
+                sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
+
+            self.timeFlag = False
+
+          # Given string is in the format 07/21/2006
+        if self.dateStdFlag:
+            sourceTime       = self.parseDate(s)
+            self.dateStdFlag = False
+
+          # Given string is in the format  "May 23rd, 2005"
+        if self.dateStrFlag:
+            sourceTime       = self.parseDateText(s)
+            self.dateStrFlag = False
+
+          # Given string is a weekday
+        if self.weekdyFlag:
+            yr, mth, dy, hr, mn, sec, wd, yd, isdst = time.localtime()
+            start = datetime.datetime(yr, mth, dy, hr, mn, sec)
+            wkDy  = self.ptc.WeekDays[s]
+
+            if wkDy > wd:
+                qty    = wkDy - wd
+                target = start + datetime.timedelta(days=qty)
+                wd     = wkDy
+            else:
+                qty    = 6 - wd + wkDy + 1
+                target = start + datetime.timedelta(days=qty)
+                wd     = wkDy
+
+            sourceTime      = target.timetuple()
+            self.weekdyFlag = False
+
+          # Given string is a natural language time string like lunch, midnight, etc
+        if self.timeStrFlag:
+            if sourceTime is None:
+                (yr, mth, dy, hr, mn, sec, wd, yd, isdst) = time.localtime()
+            else:
+                (yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
+
+            sources = { 'now':       (yr, mth, dy, hr, mn, sec, wd, yd, isdst),
+                        'noon':      (yr, mth, dy, 12,  0,   0, wd, yd, isdst),
+                        'lunch':     (yr, mth, dy, 12,  0,   0, wd, yd, isdst),
+                        'morning':   (yr, mth, dy,  6,  0,   0, wd, yd, isdst),
+                        'breakfast': (yr, mth, dy,  8,  0,   0, wd, yd, isdst),
+                        'dinner':    (yr, mth, dy, 19,  0,   0, wd, yd, isdst),
+                        'evening':   (yr, mth, dy, 18,  0,   0, wd, yd, isdst),
+                        'midnight':  (yr, mth, dy,  0,  0,   0, wd, yd, isdst),
+                        'night':     (yr, mth, dy, 21,  0,   0, wd, yd, isdst),
+                        'tonight':   (yr, mth, dy, 21,  0,   0, wd, yd, isdst),
+                      }
+
+            if s in sources:
+                sourceTime = sources[s]
+            else:
+                sourceTime       = time.localtime()
+                self.invalidFlag = True
+
+            self.timeStrFlag = False
+
+           # Given string is a natural language date string like today, tomorrow..
+        if self.dayStrFlag:
+            if sourceTime is None:
+                sourceTime = time.localtime()
+
+            (yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
+
+            sources = { 'tomorrow':   1,
+                        'today':      0,
+                        'yesterday': -1,
+                       }
+
+            start      = datetime.datetime(yr, mth, dy, 9, 0, 0)
+            target     = start + datetime.timedelta(days=sources[s])
+            sourceTime = target.timetuple()
+
+            self.dayStrFlag = False
+
+          # Given string is a time string with units like "5 hrs 30 min"
+        if self.unitsFlag:
+            modifier = ''  # TODO
+
+            if sourceTime is None:
+                sourceTime = time.localtime()
+
+            m = self.CRE_UNITS.search(s)
+            if m is not None:
+                units    = m.group('units')
+                quantity = s[:m.start('units')]
+
+            sourceTime     = self._buildTime(sourceTime, quantity, modifier, units)
+            self.unitsFlag = False
+
+          # Given string is a time string with single char units like "5 h 30 m"
+        if self.qunitsFlag:
+            modifier = ''  # TODO
+
+            if sourceTime is None:
+                sourceTime = time.localtime()
+
+            m = self.CRE_QUNITS.search(s)
+            if m is not None:
+                units    = m.group('qunits')
+                quantity = s[:m.start('qunits')]
+
+            sourceTime      = self._buildTime(sourceTime, quantity, modifier, units)
+            self.qunitsFlag = False
+
+          # Given string does not match anything
+        if sourceTime is None:
+            sourceTime       = time.localtime()
+            self.invalidFlag = True
+
+        return sourceTime
+
+
+    def parse(self, datetimeString, sourceTime=None):
+        """
+        Splits the L{datetimeString} into tokens, finds the regex patters
+        that match and then calculates a datetime value from the chunks
+
+        if L{sourceTime} is given then the datetime value will be calcualted
+        from that datetime, otherwise from the current datetime.
+
+        @type  datetimeString: string
+        @param datetimeString: datetime text to evaluate
+        @type  sourceTime:     datetime
+        @param sourceTime:     datetime value to use as the base
+
+        @rtype:  tuple
+        @return: tuple of any remaining text and the modified sourceTime
+        """
+        s         = string.strip(datetimeString.lower())
+        dateStr   = ''
+        parseStr  = ''
+        totalTime = sourceTime
+
+        self.invalidFlag = False
+
+        if s == '' :
+            if sourceTime is not None:
+                return (sourceTime, False)
+            else:
+                return (time.localtime(), True)
+
+        while len(s) > 0:
+            flag   = False
+            chunk1 = ''
+            chunk2 = ''
+
+            if _debug:
+                print 'parse (top of loop): [%s][%s]' % (s, parseStr)
+
+            if parseStr == '':
+                # Modifier like next\prev..
+                m = self.CRE_MODIFIER.search(s)
+                if m is not None:
+                    self.modifierFlag = True
+                    if (m.group('modifier') != s):
+                        # capture remaining string
+                        parseStr = m.group('modifier')
+                        chunk1   = string.strip(s[:m.start('modifier')])
+                        chunk2   = string.strip(s[m.end('modifier'):])
+                        flag     = True
+                    else:
+                        parseStr = s
+
+            if parseStr == '':
+                # Modifier like from\after\prior..
+                m = self.CRE_MODIFIER2.search(s)
+                if m is not None:
+                    self.modifier2Flag = True
+                    if (m.group('modifier') != s):
+                        # capture remaining string
+                        parseStr = m.group('modifier')
+                        chunk1   = string.strip(s[:m.start('modifier')])
+                        chunk2   = string.strip(s[m.end('modifier'):])
+                        flag     = True
+                    else:
+                        parseStr = s
+
+            if parseStr == '':
+                # String date format
+                m = self.CRE_DATE3.search(s)
+                if m is not None:
+                    self.dateStrFlag = True
+                    if (m.group('date') != s):
+                        # capture remaining string
+                        parseStr = m.group('date')
+                        chunk1   = s[:m.start('date')]
+                        chunk2   = s[m.end('date'):]
+                        s        = '%s %s' % (chunk1, chunk2)
+                        flag     = True
+                    else:
+                        parseStr = s
+
+            if parseStr == '':
+                # Standard date format
+                m = self.CRE_DATE.search(s)
+                if m is not None:
+                    self.dateStdFlag = True
+                    if (m.group('date') != s):
+                        # capture remaining string
+                        parseStr = m.group('date')
+                        chunk1   = s[:m.start('date')]
+                        chunk2   = s[m.end('date'):]
+                        s        = '%s %s' % (chunk1, chunk2)
+                        flag     = True
+                    else:
+                        parseStr = s
+
+            if parseStr == '':
+                # Natural language day strings
+                m = self.CRE_DAY.search(s)
+                if m is not None:
+                    self.dayStrFlag = True
+                    if (m.group('day') != s):
+                        # capture remaining string
+                        parseStr = m.group('day')
+                        chunk1   = s[:m.start('day')]
+                        chunk2   = s[m.end('day'):]
+                        s        = '%s %s' % (chunk1, chunk2)
+                        flag     = True
+                    else:
+                        parseStr = s
+
+            if parseStr == '':
+                # Quantity + Units
+                m = self.CRE_UNITS.search(s)
+                if m is not None:
+                    self.unitsFlag = True
+                    if (m.group('qty') != s):
+                        # capture remaining string
+                        parseStr = m.group('qty')
+                        chunk1   = s[:m.start('qty')].strip()
+                        chunk2   = s[m.end('qty'):].strip()
+
+                        if chunk1[-1:] == '-':
+                            parseStr = '-%s' % parseStr
+                            chunk1   = chunk1[:-1]
+
+                        s    = '%s %s' % (chunk1, chunk2)
+                        flag = True
+                    else:
+                        parseStr = s
+
+            if parseStr == '':
+                # Quantity + Units
+                m = self.CRE_QUNITS.search(s)
+                if m is not None:
+                    self.qunitsFlag = True
+                    if (m.group('qty') != s):
+                        # capture remaining string
+                        parseStr = m.group('qty')
+                        chunk1   = s[:m.start('qty')].strip()
+                        chunk2   = s[m.end('qty'):].strip()
+
+                        if chunk1[-1:] == '-':
+                            parseStr = '-%s' % parseStr
+                            chunk1   = chunk1[:-1]
+
+                        s    = '%s %s' % (chunk1, chunk2)
+                        flag = True
+                    else:
+                        parseStr = s 
+
+            if parseStr == '':
+                # Weekday
+                m = self.CRE_WEEKDAY.search(s)
+                if m is not None:
+                    self.weekdyFlag = True
+                    if (m.group('weekday') != s):
+                        # capture remaining string
+                        parseStr = m.group()
+                        chunk1   = s[:m.start('weekday')]
+                        chunk2   = s[m.end('weekday'):]
+                        s        = '%s %s' % (chunk1, chunk2)
+                        flag     = True
+                    else:
+                        parseStr = s
+
+            if parseStr == '':
+                # Natural language time strings
+                m = self.CRE_TIME.search(s)
+                if m is not None:
+                    self.timeStrFlag = True
+                    if (m.group('time') != s):
+                        # capture remaining string
+                        parseStr = m.group('time')
+                        chunk1   = s[:m.start('time')]
+                        chunk2   = s[m.end('time'):]
+                        s        = '%s %s' % (chunk1, chunk2)
+                        flag     = True
+                    else:
+                        parseStr = s
+
+            if parseStr == '':
+                # HH:MM(:SS) am/pm time strings
+                m = self.CRE_TIMEHMS2.search(s)
+                if m is not None:
+                    self.meridianFlag = True
+                    if m.group('minutes') is not None:
+                        if m.group('seconds') is not None:
+                            parseStr = '%s:%s:%s %s' % (m.group('hours'), m.group('minutes'), m.group('seconds'), m.group('meridian'))
+                        else:
+                            parseStr = '%s:%s %s' % (m.group('hours'), m.group('minutes'), m.group('meridian'))
+                    else:
+                        parseStr = '%s %s' % (m.group('hours'), m.group('meridian'))
+
+                    chunk1 = s[:m.start('hours')]
+                    chunk2 = s[m.end('meridian'):]
+
+                    s    = '%s %s' % (chunk1, chunk2)
+                    flag = True
+
+            if parseStr == '':
+                # HH:MM(:SS) time strings
+                m = self.CRE_TIMEHMS.search(s)
+                if m is not None:
+                    self.timeFlag = True
+                    if m.group('seconds') is not None:
+                        parseStr = '%s:%s:%s' % (m.group('hours'), m.group('minutes'), m.group('seconds'))
+                        chunk1   = s[:m.start('hours')]
+                        chunk2   = s[m.end('seconds'):]
+                    else:
+                        parseStr = '%s:%s' % (m.group('hours'), m.group('minutes'))
+                        chunk1   = s[:m.start('hours')]
+                        chunk2   = s[m.end('minutes'):]
+
+                    s    = '%s %s' % (chunk1, chunk2)
+                    flag = True
+
+            # if string does not match any regex, empty string to come out of the while loop
+            if not flag:
+                s = ''
+
+            if _debug:
+                print 'parse (bottom) [%s][%s][%s][%s]' % (s, parseStr, chunk1, chunk2)
+                print 'invalid %s, weekday %s, dateStd %s, dateStr %s, time %s, timeStr %s, meridian %s' % \
+                       (self.invalidFlag, self.weekdyFlag, self.dateStdFlag, self.dateStrFlag, self.timeFlag, self.timeStrFlag, self.meridianFlag)
+                print 'dayStr %s, modifier %s, modifier2 %s, units %s, qunits %s' % \
+                       (self.dayStrFlag, self.modifierFlag, self.modifier2Flag, self.unitsFlag, self.qunitsFlag)
+
+            # evaluate the matched string
+            if parseStr != '':
+                if self.modifierFlag == True:
+                    t, totalTime = self._evalModifier(parseStr, chunk1, chunk2, totalTime)
+
+                    return self.parse(t, totalTime)
+
+                elif self.modifier2Flag == True:
+                    s, totalTime = self._evalModifier2(parseStr, chunk1, chunk2, totalTime)
+                else:
+                    totalTime = self._evalString(parseStr, totalTime)
+                    parseStr  = ''
+
+        # String is not parsed at all
+        if totalTime is None or totalTime == sourceTime:
+            totalTime        = time.localtime()
+            self.invalidFlag = True
+
+        return (totalTime, self.invalidFlag)
+
+
+    def inc(self, source, month=None, year=None):
+        """
+        Takes the given date, or current date if none is passed, and
+        increments it according to the values passed in by month
+        and/or year.
+
+        This routine is needed because the timedelta() routine does
+        not allow for month or year increments.
+
+        @type  source: datetime
+        @param source: datetime value to increment
+        @type  month:  integer
+        @param month:  optional number of months to increment
+        @type  year:   integer
+        @param year:   optional number of years to increment
+
+        @rtype:  datetime
+        @return: L{source} incremented by the number of months and/or years
+        """
+        yr  = source.year
+        mth = source.month
+
+        if year:
+            try:
+                yi = int(year)
+            except ValueError:
+                yi = 0
+
+            yr += yi
+
+        if month:
+            try:
+                mi = int(month)
+            except ValueError:
+                mi = 0
+
+            m = abs(mi)
+            y = m / 12      # how many years are in month increment
+            m = m % 12      # get remaining months
+
+            if mi < 0:
+                mth = mth - m           # sub months from start month
+                if mth < 1:             # cross start-of-year?
+                    y   -= 1            #   yes - decrement year
+                    mth += 12           #         and fix month
+            else:
+                mth = mth + m           # add months to start month
+                if mth > 12:            # cross end-of-year?
+                    y   += 1            #   yes - increment year
+                    mth -= 12           #         and fix month
+
+            yr += y
+
+        d = source.replace(year=yr, month=mth)
+
+        return source + (d - source)
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/support/parsedatetime/pdtc.py	Sun Aug 27 15:29:01 2006 +0200
@@ -0,0 +1,290 @@
+#!/usr/bin/env python
+
+"""
+CalendarConstants defines all constants used by parsedatetime.py.
+"""
+
+__license__ = """Copyright (c) 2004-2006 Mike Taylor, All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+__author__       = 'Mike Taylor <http://code-bear.com>'
+__contributors__ = ['Darshana Chhajed <mailto://darshana@osafoundation.org>',
+                   ]
+
+
+class CalendarConstants:
+    def __init__(self):
+        self.Locale = 'American'
+
+        self.TIMESEP      = ':'
+
+        self.RE_SPECIAL   = r'(?P<special>^[in|on|of|at]+)\s+'
+        self.RE_UNITS     = r'(?P<qty>(-?\d+\s*(?P<units>((hour|hr|minute|min|second|sec|day|dy|week|wk|month|mth|year|yr)s?))))'
+        self.RE_QUNITS    = r'(?P<qty>(-?\d+\s?(?P<qunits>h|m|s|d|w|m|y)(\s|,|$)))'
+        self.RE_MODIFIER  = r'(?P<modifier>(previous|prev|last|next|this|eo|(end\sof)|(in\sa)))'
+        self.RE_MODIFIER2 = r'(?P<modifier>(from|before|after|ago|prior))'
+        self.RE_TIMEHMS   = r'(?P<hours>\d\d?)(?P<tsep>:|)(?P<minutes>\d\d)(?:(?P=tsep)(?P<seconds>\d\d(?:[.,]\d+)?))?'
+        self.RE_TIMEHMS2  = r'(?P<hours>(\d\d?))((?P<tsep>:|)(?P<minutes>(\d\d?))(?:(?P=tsep)(?P<seconds>\d\d?(?:[.,]\d+)?))?)?\s?(?P<meridian>(am|pm|a.m.|p.m.|a|p))'
+        self.RE_DATE      = r'(?P<date>\d+([/.\\]\d+)+)'
+        self.RE_DATE2     = r'[/.\\]'
+        self.RE_DATE3     = r'(?P<date>((?P<mthname>(january|february|march|april|may|june|july|august|september|october|november|december))\s?((?P<day>\d\d?)(\s|rd|st|nd|th|,|$)+)?(?P<year>\d\d\d\d)?))'
+        self.RE_MONTH     = r'(?P<month>((?P<mthname>(january|february|march|april|may|june|july|august|september|october|november|december))(\s?(?P<year>(\d\d\d\d)))?))'
+        self.RE_WEEKDAY   = r'(?P<weekday>(monday|mon|tuesday|tue|wednesday|wed|thursday|thu|friday|fri|saturday|sat|sunday|sun))'
+        self.RE_DAY       = r'(?P<day>(today|tomorrow|yesterday))'
+        self.RE_TIME      = r'\s*(?P<time>(morning|breakfast|noon|lunch|evening|midnight|tonight|dinner|night|now))' 
+        self.RE_REMAINING = r'\s+'
+
+        # Regex for date/time ranges
+        self.RE_RTIMEHMS   = r'(\d\d?):(\d\d)(:(\d\d))?'
+        self.RE_RTIMEHMS2  = r'(\d\d?)(:(\d\d?))?(:(\d\d?))?\s?(am|pm|a.m.|p.m.|a|p)'
+        self.RE_RDATE      = r'(\d+([/.\\]\d+)+)'
+        self.RE_RDATE3     = r'((((january|february|march|april|may|june|july|august|september|october|november|december))\s?((\d\d?)(\s|rd|st|nd|th|,|$)+)?(\d\d\d\d)?))'
+        self.DATERNG1 = self.RE_RDATE + r'\s?-\s?' + self.RE_RDATE                      #"06/07/06 - 08/09/06"
+        self.DATERNG2 = self.RE_RDATE3 + r'\s?-\s?' + self.RE_RDATE3                    #"march 31 - june 1st, 2006"
+        self.DATERNG3 = self.RE_RDATE3 + r'\s?' + r'-' + r'\s?(\d\d?)\s?(rd|st|nd|th)?' #"march 1rd -13th"
+        self.TIMERNG1 = self.RE_RTIMEHMS2 + r'\s?-\s?'+ self.RE_RTIMEHMS2  #"4:00:55 pm - 5:90:44 am",'4p-5p'
+        self.TIMERNG2 = self.RE_RTIMEHMS + r'\s?-\s?'+ self.RE_RTIMEHMS    #"4:00 - 5:90 ","4:55:55-3:44:55"
+        self.TIMERNG3 = r'\d\d?\s?-\s?'+ self.RE_RTIMEHMS2                 #"4-5pm "
+
+          # Used to adjust the returned date before/after the source
+
+        self.Modifiers = { 'from':       1,
+                           'before':    -1,
+                           'after':      1,
+                           'ago':        1,
+                           'prior':     -1,
+                           'prev':      -1,
+                           'last':      -1,
+                           'next':       1,
+                           'this':       0,
+                           'previous':  -1,
+                           'in a':       2,
+                           'end of':     0,
+                           'eo':         0,
+                        }
+
+        self.Second =   1
+        self.Minute =  60 * self.Second
+        self.Hour   =  60 * self.Minute
+        self.Day    =  24 * self.Hour
+        self.Week   =   7 * self.Day
+        self.Month  =  30 * self.Day
+        self.Year   = 365 * self.Day
+
+        self.WeekDays = { 'monday':    0,
+                          'mon':       0,
+                          'tuesday':   1,
+                          'tue':       1,
+                          'wednesday': 2,
+                          'wed':       2,
+                          'thursday':  3,
+                          'thu':       3,
+                          'friday':    4,
+                          'fri':       4,
+                          'saturday':  5,
+                          'sat':       5,
+                          'sunday':    6,
+                          'sun':       6,
+                        }
+
+          # dictionary to allow for locale specific text
+          # NOTE: The keys are the localized values - the parsing
+          #       code will be using Target_Text using the values
+          #       extracted *from* the user's input
+
+        self.Target_Text = { 'datesep':   '-',
+                             'timesep':   ':',
+                             'day':       'day',
+                             'dy':        'dy',
+                             'd':         'd',
+                             'week':      'week',
+                             'wk':        'wk',
+                             'w':         'w',
+                             'month':     'month',
+                             'mth':       'mth',
+                             'year':      'year',
+                             'yr':        'yr',
+                             'y':         'y',
+                             'hour':      'hour',
+                             'hr':        'hr',
+                             'h':         'h',
+                             'minute':    'minute',
+                             'min':       'min',
+                             'm':         'm',
+                             'second':    'second',
+                             'sec':       'sec',
+                             's':         's',
+                             'now':       'now',
+                             'noon':      'noon',
+                             'morning':   'morning',
+                             'evening':   'evening',
+                             'breakfast': 'breakfast',
+                             'lunch':     'lunch',
+                             'dinner':    'dinner',
+                             'monday':    'monday',
+                             'mon':       'mon',
+                             'tuesday':   'tuesday',
+                             'tue':       'tue',
+                             'wednesday': 'wednesday',
+                             'wed':       'wed',
+                             'thursday':  'thursday',
+                             'thu':       'thu',
+                             'friday':    'friday',
+                             'fri':       'fri',
+                             'saturday':  'saturday',
+                             'sat':       'sat',
+                             'sunday':    'sunday',
+                             'sun':       'sun',
+                             'january':   'january',
+                             'jan':       'jan',
+                             'febuary':   'febuary',
+                             'feb':       'feb',
+                             'march':     'march',
+                             'mar':       'mar',
+                             'april':     'april',
+                             'apr':       'apr',
+                             'may':       'may',
+                             'may':       'may',
+                             'june':      'june',
+                             'jun':       'jun',
+                             'july':      'july',
+                             'jul':       'jul',
+                             'august':    'august',
+                             'aug':       'aug',
+                             'september': 'september',
+                             'sept':      'sep',
+                             'october':   'october',
+                             'oct':       'oct',
+                             'november':  'november',
+                             'nov':       'nov',
+                             'december':  'december',
+                             'dec':       'dec',
+                           }
+
+          # FIXME: there *has* to be a standard routine that does this
+
+        self.DOW_Text = [self.Target_Text['mon'],
+                         self.Target_Text['tue'],
+                         self.Target_Text['wed'],
+                         self.Target_Text['thu'],
+                         self.Target_Text['fri'],
+                         self.Target_Text['sat'],
+                         self.Target_Text['sun'],
+                        ]
+
+        self.DaysInMonthList = (31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31)
+
+        self.DaysInMonth = {}
+        self.DaysInMonth[self.Target_Text['january']]   = self.DaysInMonthList[0]
+        self.DaysInMonth[self.Target_Text['febuary']]   = self.DaysInMonthList[1]
+        self.DaysInMonth[self.Target_Text['march']]     = self.DaysInMonthList[2]
+        self.DaysInMonth[self.Target_Text['april']]     = self.DaysInMonthList[3]
+        self.DaysInMonth[self.Target_Text['may']]       = self.DaysInMonthList[4]
+        self.DaysInMonth[self.Target_Text['june']]      = self.DaysInMonthList[5]
+        self.DaysInMonth[self.Target_Text['july']]      = self.DaysInMonthList[6]
+        self.DaysInMonth[self.Target_Text['august']]    = self.DaysInMonthList[7]
+        self.DaysInMonth[self.Target_Text['september']] = self.DaysInMonthList[8]
+        self.DaysInMonth[self.Target_Text['october']]   = self.DaysInMonthList[9]
+        self.DaysInMonth[self.Target_Text['november']]  = self.DaysInMonthList[10]
+        self.DaysInMonth[self.Target_Text['december']]  = self.DaysInMonthList[11]
+
+        self.Month_Text = [ self.Target_Text['january'],
+                            self.Target_Text['febuary'],
+                            self.Target_Text['march'],
+                            self.Target_Text['april'],
+                            self.Target_Text['may'],
+                            self.Target_Text['june'],
+                            self.Target_Text['july'],
+                            self.Target_Text['august'],
+                            self.Target_Text['september'],
+                            self.Target_Text['october'],
+                            self.Target_Text['november'],
+                            self.Target_Text['december'],
+                          ]
+
+
+        self.MthNames = { 'january':    1,
+                          'february':   2,
+                          'march':      3,
+                          'april':      4,
+                          'may' :       5,
+                          'june':       6,
+                          'july':       7,
+                          'august':     8,
+                          'september':  9,
+                          'october':   10,
+                          'november':  11,
+                          'december':  12,
+                        }
+
+
+
+          # This looks hokey - but it is a nice simple way to get
+          # the proper unit value and it has the advantage that
+          # later I can morph it into something localized.
+          # Any trailing s will be removed before lookup.
+
+        self.Units = {}
+        self.Units[self.Target_Text['second']] = self.Second
+        self.Units[self.Target_Text['sec']]    = self.Second
+        self.Units[self.Target_Text['s']]      = self.Second
+        self.Units[self.Target_Text['minute']] = self.Minute
+        self.Units[self.Target_Text['min']]    = self.Minute
+        self.Units[self.Target_Text['m']]      = self.Minute
+        self.Units[self.Target_Text['hour']]   = self.Hour
+        self.Units[self.Target_Text['hr']]     = self.Hour
+        self.Units[self.Target_Text['h']]      = self.Hour
+        self.Units[self.Target_Text['day']]    = self.Day
+        self.Units[self.Target_Text['dy']]     = self.Day
+        self.Units[self.Target_Text['d']]      = self.Day
+        self.Units[self.Target_Text['week']]   = self.Week
+        self.Units[self.Target_Text['wk']]     = self.Week
+        self.Units[self.Target_Text['w']]      = self.Week
+        self.Units[self.Target_Text['month']]  = self.Month
+        self.Units[self.Target_Text['mth']]    = self.Month
+        self.Units[self.Target_Text['year']]   = self.Year
+        self.Units[self.Target_Text['yr']]     = self.Year
+        self.Units[self.Target_Text['y']]      = self.Year
+
+        self.Units_Text = { 'one':        1,
+                            'two':        2,
+                            'three':      3,
+                            'four':       4,
+                            'five':       5,
+                            'six':        6,
+                            'seven':      7,
+                            'eight':      8,
+                            'nine':       9,
+                            'ten':       10,
+                            'eleven':    11,
+                            'twelve':    12,
+                            'thirteen':  13,
+                            'fourteen':  14,
+                            'fifteen':   15,
+                            'sixteen':   16,
+                            'seventeen': 17,
+                            'eighteen':  18,
+                            'nineteen':  19,
+                            'twenty':    20,
+                            'thirty':    30,
+                            'forty':     40,
+                            'fifty':     50,
+                            'sixty':     60,
+                            'seventy':   70,
+                            'eighty':    80,
+                            'ninety':    90,
+                            'half':      0.5,
+                            'quarter':  0.25,
+                         }
+
--- a/MoinMoin/support/xapwrap/index.py	Sun Aug 27 14:48:06 2006 +0200
+++ b/MoinMoin/support/xapwrap/index.py	Sun Aug 27 15:29:01 2006 +0200
@@ -562,7 +562,8 @@
                batchSize = MAX_DOCS_TO_RETURN,
                sortIndex = None, sortAscending = True,
                sortByRelevence = False,
-               valuesWanted = None):
+               valuesWanted = None,
+               collapseKey = None):
         """
         Search an index.
 
@@ -586,6 +587,8 @@
         # the only thing we use sortKey for is to set sort index
         if sortKey is not None:
             sortIndex = self.indexValueMap[sortKey]
+        if collapseKey is not None:
+            collapseKey = self.indexValueMap[collapseKey]
 
         # once you call set_sorting on an Enquire instance, there is no
         # way to resort it by relevence, so we have to open a new
@@ -612,11 +615,16 @@
                     del self._searchSessions[qString]
                     self._searchSessions[qString] = (self.enquire(q), None)
                     enq, lastIndexSortedBy = self._searchSessions[qString]
-            if sortIndex is not None:
+            if sortByRelevence is not None and sortIndex is not None:
+                enq.set_sort_by_relevance_then_value(sortIndex, not sortAscending)
+            elif sortIndex is not None:
                 # It seems that we have the opposite definition of sort ascending
                 # than Xapian so we invert the ascending flag!
                 enq.set_sort_by_value(sortIndex, not sortAscending)
 
+            if collapseKey is not None:
+                enq.set_collapse_key(collapseKey)
+
             self._searchSessions[qString] = (enq, sortIndex)
 
             mset = enq.get_mset(startingIndex, batchSize)
--- a/MoinMoin/widget/html.py	Sun Aug 27 14:48:06 2006 +0200
+++ b/MoinMoin/widget/html.py	Sun Aug 27 15:29:01 2006 +0200
@@ -397,6 +397,7 @@
         'checked': None,
         'class': None,
         'disabled': None,
+        'id': None,
         'ismap': None,
         'maxlength': None,
         'name': None,
--- a/docs/CHANGES.fpletz	Sun Aug 27 14:48:06 2006 +0200
+++ b/docs/CHANGES.fpletz	Sun Aug 27 15:29:01 2006 +0200
@@ -8,9 +8,7 @@
       metadata)
 
   ToDo:
-    * Write/update documentation for all the new search stuff
-    * Search based on mtime
-    * Index all revisions and let users search in them (rev, mtime)
+    * none
 
   ToDo (low priority):
     * Reevaluate Xapwrap, possibly drop it and rip out usable stuff
@@ -29,14 +27,17 @@
         - CategorySearch: category:Homepage
         - MimetypeSearch: mimetype:image/png (for attachments/files)
         - DomainSearch: domain:underlay
-      Note: Currently only available when Xapian is used
+        - History Search: available in advanced ui
+      Note: Some currently only available when Xapian is used
     * New config options:
         xapian_search        0      enables xapian-powered search
         xapian_index_dir     None   directory for xapian indices
-        xapian_stemming      True   toggles usage of stemmer, fallback
+        xapian_stemming      1      toggles usage of stemmer, fallback
                                     to False if no stemmer installed
         search_results_per_page 10  determines how many hits should be
                                     shown on a fullsearch action
+        xapian_index_history True   indexes all revisions of pages to
+                                    allow searching in their history
   
   Bugfixes (only stuff that is buggy in moin/1.6 main branch):
     * ...
@@ -257,3 +258,26 @@
     * minor bugfixes (i18n etc.)
     * domain-specific search (underlay -> system pages)
 
+2006-08-20
+    * major fixes mimetype & mtime search and language sorting (neither
+      really worked)
+
+2006-08-21
+    * indexing the history of pages (all revisions) if requested:
+        xapian_index_history
+    * implemented optional history search in advanced ui, defaults to off
+
+SOC END
+
+2006-08-22
+    ...
+
+2006-08-23 
+    ...
+
+2006-08-24 (on train from Hannover to Passau)
+    * finish some code docs
+    * added support for renaming, deleting pages (I somehow forgot :()
+    * handle deleting of attachments correctly
+    * testing of all this new stuff
+
--- a/setup.py	Sun Aug 27 14:48:06 2006 +0200
+++ b/setup.py	Sun Aug 27 15:29:01 2006 +0200
@@ -247,6 +247,7 @@
         'MoinMoin.stats',
         'MoinMoin.support',
         'MoinMoin.support.xapwrap',
+        'MoinMoin.support.parsedatetime',
         'MoinMoin.theme',
         'MoinMoin.util',
         'MoinMoin.widget',
--- a/wiki/htdocs/modern/css/common.css	Sun Aug 27 14:48:06 2006 +0200
+++ b/wiki/htdocs/modern/css/common.css	Sun Aug 27 15:29:01 2006 +0200
@@ -344,11 +344,16 @@
 .advancedsearch td.searchfor {
     font-weight: bold;
 }
+
 .advancedsearch input {
     border: 1px solid #ADB9CC;
     background-color: #fff;
 }
 
+.advancedsearch input[disabled] {
+    background-color: #eee;
+}
+
 .advancedsearch td.submit {
     border-top: 1px solid #ADB9CC;
     background-color: #fff;
@@ -385,6 +390,12 @@
     padding: 2px;
 }
 
+p.searchhint {
+    background-color: #E6EAF0;
+    border: 1px solid #9088DC;
+    padding: 2px;
+}
+
 .searchpages {
     margin-left: auto;
     margin-right: auto;