changeset 1389:09aa290e4a88

Merge with main.
author Alexander Schremmer <alex AT alexanderweb DOT de>
date Mon, 21 Aug 2006 13:22:05 +0200
parents 46812497775e (current diff) d1ceb31f3d02 (diff)
children 7a37f25b5399
files wiki/htdocs/modern/img/nav_current.png wiki/htdocs/modern/img/nav_first.png wiki/htdocs/modern/img/nav_last.png wiki/htdocs/modern/img/nav_next.png wiki/htdocs/modern/img/nav_page.png wiki/htdocs/modern/img/nav_prev.png
diffstat 31 files changed, 627 insertions(+), 244 deletions(-) [+]
line wrap: on
line diff
--- a/MoinMoin/Page.py	Mon Aug 21 13:20:44 2006 +0200
+++ b/MoinMoin/Page.py	Mon Aug 21 13:22:05 2006 +0200
@@ -872,9 +872,9 @@
             # New code should call with escape=0 to prevent the warning.
             if escape:
                 import warnings
-                warnings.warn("In moin 2.0 query string in url will not be"
-                              " escaped. See"
-                              " http://moinmoin.wikiwikiweb.de/ApiChanges")
+                warnings.warn("In moin 2.0 query string in url will not be escaped. "
+                              "See http://moinmoin.wikiwikiweb.de/ApiChanges. "
+                              "%s" % querystr)
                 querystr = wikiutil.escape(querystr)
 
             # make action URLs denyable by robots.txt:
@@ -1209,14 +1209,6 @@
 
             # send the page header
             if self.default_formatter:
-                querydict = {
-                    'action': 'fullsearch',
-                    'value': 'linkto:"%s"' % self.page_name,
-                    'context' : '180',
-                }
-                link = self.url(request, querydict)
-
-                title = self.split_title(request)
                 if self.rev:
                     msg = "<strong>%s</strong><br>%s" % (
                         _('Revision %(rev)d as of %(date)s') % {
@@ -1243,7 +1235,9 @@
                     request.user.addTrail(self.page_name)
                     trail = request.user.getTrail()
 
-                request.theme.send_title(title,  page=self, link=link, msg=msg,
+                title = self.split_title(request)
+
+                request.theme.send_title(title,  page=self, msg=msg,
                                     pagename=self.page_name, print_mode=print_mode,
                                     media=media, pi_refresh=pi_refresh,
                                     allow_doubleclick=1, trail=trail,
--- a/MoinMoin/action/RenamePage.py	Mon Aug 21 13:20:44 2006 +0200
+++ b/MoinMoin/action/RenamePage.py	Mon Aug 21 13:22:05 2006 +0200
@@ -54,7 +54,7 @@
 
     def do_action_finish(self, success):
         if success:
-            url = Page(self.request, self.newpagename).url(self.request)
+            url = Page(self.request, self.newpagename).url(self.request, escape=0, relative=False)
             self.request.http_redirect(url)
             self.request.finish()
         else:
--- a/MoinMoin/action/RenderAsDocbook.py	Mon Aug 21 13:20:44 2006 +0200
+++ b/MoinMoin/action/RenderAsDocbook.py	Mon Aug 21 13:22:05 2006 +0200
@@ -8,6 +8,6 @@
 from MoinMoin.Page import Page
 
 def execute(pagename, request):
-    url = Page(request, pagename).url(request, {'action': 'show', 'mimetype': 'text/docbook'}, escape=0)
+    url = Page(request, pagename).url(request, {'action': 'show', 'mimetype': 'text/docbook'}, escape=0, relative=False)
     request.http_redirect(url)
 
--- a/MoinMoin/action/__init__.py	Mon Aug 21 13:20:44 2006 +0200
+++ b/MoinMoin/action/__init__.py	Mon Aug 21 13:22:05 2006 +0200
@@ -91,8 +91,9 @@
             for convenience we give him some pre-assembled html for the buttons.
         """
         _ = self._
+        f = self.request.formatter
         prompt = _("Execute action %(actionname)s?") % {'actionname': self.actionname}
-        return "<p>%s</p>%s" % (prompt, buttons_html)
+        return f.paragraph(1) + f.text(prompt) + f.paragraph(0) + f.rawHTML(buttons_html)
 
     def make_buttons(self):
         """ return a list of form buttons for the action form """
@@ -277,7 +278,7 @@
 def do_goto(pagename, request):
     """ redirect to another page """
     target = request.form.get('target', [''])[0]
-    request.http_redirect(Page(request, target).url(request, escape=0))
+    request.http_redirect(Page(request, target).url(request, escape=0, relative=False))
 
 def do_userform(pagename, request):
     """ save data posted from UserPreferences """
--- a/MoinMoin/action/diff.py	Mon Aug 21 13:20:44 2006 +0200
+++ b/MoinMoin/action/diff.py	Mon Aug 21 13:22:05 2006 +0200
@@ -117,49 +117,48 @@
             oldrev2 = 0 # XXX
 
     edit_count = abs(oldcount1 - oldcount2)
-
-    # this should use the formatter, but there is none?
-    request.write('<div id="content">\n') # start content div
-    request.write('<p class="diff-header">')
-    request.write(_('Differences between revisions %d and %d') % (oldpage.get_real_rev(), newpage.get_real_rev()))
+    f = request.formatter
+    request.write(f.div(1, id="content"))
+    request.write(f.paragraph(1, css_class="diff-header"))
+    request.write(f.text(_('Differences between revisions %d and %d') % (oldpage.get_real_rev(), newpage.get_real_rev())))
     if edit_count > 1:
-        request.write(' ' + _('(spanning %d versions)') % (edit_count,))
-    request.write('</p>')
+        request.write(f.text(' ' + _('(spanning %d versions)') % (edit_count,)))
+    request.write(f.paragraph(0))
 
     if request.user.show_fancy_diff:
         from MoinMoin.util import diff_html
-        request.write(diff_html.diff(request, oldpage.get_raw_body(), newpage.get_raw_body()))
+        request.write(f.rawHTML(diff_html.diff(request, oldpage.get_raw_body(), newpage.get_raw_body())))
         newpage.send_page(request, count_hit=0, content_only=1, content_id="content-below-diff")
     else:
         from MoinMoin.util import diff_text
         lines = diff_text.diff(oldpage.getlines(), newpage.getlines())
         if not lines:
-            msg = _("No differences found!")
+            msg = f.text(_("No differences found!"))
             if edit_count > 1:
-                msg = msg + '<p>' + _('The page was saved %(count)d times, though!') % {
-                    'count': edit_count}
+                msg = msg + f.paragraph(1) + f.text(_('The page was saved %(count)d times, though!') % {
+                    'count': edit_count}) + f.paragraph(0)
             request.write(msg)
         else:
             if ignorews:
-                request.write(_('(ignoring whitespace)') + '<br>')
+                request.write(f.text(_('(ignoring whitespace)')), f.linebreak())
             else:
                 qstr = {'action': 'diff', 'ignorews': '1', }
                 if rev1:
                     qstr['rev1'] = str(rev1)
                 if rev2:
                     qstr['rev2'] = str(rev2)
-                request.write(Page(request, pagename).link_to(request,
+                request.write(f.paragraph(1), Page(request, pagename).link_to(request,
                     text=_('Ignore changes in the amount of whitespace'),
-                    querystr=qstr, rel='nofollow') + '<p>')
+                    querystr=qstr, rel='nofollow'), f.paragraph(0))
 
-            request.write('<pre>')
+            request.write(f.preformatted(1))
             for line in lines:
                 if line[0] == "@":
-                    request.write('<hr>')
-                request.write(wikiutil.escape(line)+'\n')
-            request.write('</pre>')
+                    request.write(f.rule(1))
+                request.write(f.text(wikiutil.escape(line)+'\n'))
+            request.write(f.preformatted(0))
 
-    request.write('</div>\n') # end content div
+    request.write(f.div(0)) # end content div
     request.theme.send_footer(pagename)
     request.theme.send_closing_html()
 
--- a/MoinMoin/action/fullsearch.py	Mon Aug 21 13:20:44 2006 +0200
+++ b/MoinMoin/action/fullsearch.py	Mon Aug 21 13:22:05 2006 +0200
@@ -8,6 +8,7 @@
     @license: GNU GPL, see COPYING for details.
 """
 
+import re
 from MoinMoin.Page import Page
 from MoinMoin import wikiutil
 
@@ -25,16 +26,26 @@
     except ValueError:
         return True
     except KeyError:
-        return 'fullsearch' not in request.form
+        return 'fullsearch' not in request.form and \
+                not isAdvancedSearch(request)
 
+def isAdvancedSearch(request):
+    try:
+        return int(request.form['advancedsearch'][0])
+    except KeyError:
+        return False
 
 def execute(pagename, request, fieldname='value', titlesearch=0):
     _ = request.getText
     titlesearch = isTitleSearch(request)
+    advancedsearch = isAdvancedSearch(request)
 
     # context is relevant only for full search
     if titlesearch:
         context = 0
+    elif advancedsearch:
+        # XXX: hardcoded
+        context = 180
     else:
         context = int(request.form.get('context', [0])[0])
 
@@ -46,6 +57,42 @@
 
     max_context = 1 # only show first `max_context` contexts XXX still unused
 
+    if advancedsearch:
+        and_terms = request.form.get('and_terms', [''])[0].strip()
+        or_terms = request.form.get('or_terms', [''])[0].strip()
+        not_terms = request.form.get('not_terms', [''])[0].strip()
+        #xor_terms = request.form.get('xor_terms', [''])[0].strip()
+        categories = request.form.get('categories', [''])[0].strip()
+        timeframe = request.form.get('time', [''])[0].strip()
+        language = request.form.get('language',
+                [request.cfg.language_default])[0]
+        mimetype = request.form.get('mimetype', [0])[0]
+        includeunderlay = request.form.get('includeunderlay', [0])[0]
+        onlysystempages = request.form.get('onlysystempages', [0])[0]
+        mtime = request.form.get('mtime', [''])[0]
+        
+        word_re = re.compile(r'(\"[\w\s]+"|\w+)')
+        needle = ''
+        if language:
+            needle += 'language:%s ' % language
+        if mimetype:
+            needle += 'mimetype:%s ' % mimetype
+        if not includeunderlay:
+            needle += '-domain:underlay '
+        if onlysystempages:
+            needle += 'domain:system '
+        if mtime:
+            needle += 'lastmodifiedsince:%s ' % mtime
+        if categories:
+            needle += '(%s) ' % ' or '.join(['category:%s' % cat
+                for cat in word_re.findall(categories)])
+        if and_terms:
+            needle += '(%s) ' % and_terms
+        if not_terms:
+            needle += '(%s) ' % ' '.join(['-%s' % t for t in word_re.findall(not_terms)])
+        if or_terms:
+            needle += '(%s) ' % ' or '.join(word_re.findall(or_terms))
+
     # check for sensible search term
     striped = needle.strip()
     if len(striped) == 0:
@@ -54,6 +101,7 @@
         request.emit_http_headers()
         Page(request, pagename).send_page(request, msg=err)
         return
+    needle = striped
 
     # Setup for type of search
     if titlesearch:
@@ -76,8 +124,7 @@
         page = results.hits[0]
         if not page.attachment: # we did not find an attachment
             page = Page(request, page.page_name)
-            # TODO: remove escape=0 in 2.0
-            url = page.url(request, querystr={'highlight': query.highlight_re()}, escape=0)
+            url = page.url(request, querystr={'highlight': query.highlight_re()}, escape=0, relative=False)
             request.http_redirect(url)
             return
 
--- a/MoinMoin/action/info.py	Mon Aug 21 13:20:44 2006 +0200
+++ b/MoinMoin/action/info.py	Mon Aug 21 13:22:05 2006 +0200
@@ -23,19 +23,23 @@
 
     def general(page, pagename, request):
         _ = request.getText
-
-        request.write('<h2>%s</h2>\n' % _('General Information'))
+        f = request.formatter
 
-        # show page size
-        request.write(("<p>%s</p>" % _("Page size: %d")) % page.size())
+        request.write(f.heading(1, 1),
+                      f.text(_('General Information')),
+                      f.heading(0, 1))
 
-        # show SHA digest fingerprint
+        request.write(f.paragraph(1),
+                      f.text(_("Page size: %d") % page.size()),
+                      f.paragraph(0))
+
         import sha
         digest = sha.new(page.get_raw_body().encode(config.charset)).hexdigest().upper()
-        request.write('<p>%(label)s <tt>%(value)s</tt></p>' % {
-            'label': _("SHA digest of this page's content is:"),
-            'value': digest,
-            })
+        request.write(f.paragraph(1),
+                      f.rawHTML('%(label)s <tt>%(value)s</tt>' % {
+                          'label': _("SHA digest of this page's content is:"),
+                          'value': digest, }),
+                      f.paragraph(0))
 
         # show attachments (if allowed)
         attachment_info = action.getHandler(request, 'AttachFile', 'info')
@@ -45,25 +49,28 @@
         # show subscribers
         subscribers = page.getSubscribers(request, include_self=1, return_users=1)
         if subscribers:
-            request.write('<p>', _('The following users subscribed to this page:'))
+            request.write(f.paragraph(1))
+            request.write(f.text(_('The following users subscribed to this page:')))
             for lang in subscribers.keys():
-                request.write('<br>[%s] ' % lang)
+                request.write(f.linebreak(), f.text('[%s] ' % lang))
                 for user in subscribers[lang]:
                     # do NOT disclose email addr, only WikiName
                     userhomepage = Page(request, user.name)
                     if userhomepage.exists():
-                        request.write(userhomepage.link_to(request) + ' ')
+                        request.write(f.rawHTML(userhomepage.link_to(request) + ' '))
                     else:
-                        request.write(user.name + ' ')
-            request.write('</p>')
+                        request.write(f.text(user.name + ' '))
+            request.write(f.paragraph(0))
 
         # show links
         links = page.getPageLinks(request)
         if links:
-            request.write('<p>', _('This page links to the following pages:'), '<br>')
+            request.write(f.paragraph(1))
+            request.write(f.text(_('This page links to the following pages:')))
+            request.write(f.linebreak())
             for linkedpage in links:
-                request.write("%s%s " % (Page(request, linkedpage).link_to(request), ",."[linkedpage == links[-1]]))
-            request.write("</p>")
+                request.write(f.rawHTML("%s%s " % (Page(request, linkedpage).link_to(request), ",."[linkedpage == links[-1]])))
+            request.write(f.paragraph(0))
 
     def history(page, pagename, request):
         # show history as default
@@ -206,6 +213,7 @@
     # this will be automatically fixed.
     lang = page.language or request.cfg.language_default
     request.setContentLanguage(lang)
+    f = request.formatter
 
     request.theme.send_title(_('Info for "%s"') % (title,), pagename=pagename)
     menu_items = [
@@ -216,11 +224,11 @@
         (_('Show "%(title)s"') % {'title': _('Page hits and edits')},
          {'action': 'info', 'hitcounts': '1'}),
     ]
-    request.write('<div id="content">\n') # start content div
-    request.write("<p>")
+    request.write(f.div(1, id="content")) # start content div
+    request.write(f.paragraph(1))
     for text, querystr in menu_items:
         request.write("[%s] " % page.link_to(request, text=text, querystr=querystr, rel='nofollow'))
-    request.write("</p>")
+    request.write(f.paragraph(0))
 
     show_hitcounts = int(request.form.get('hitcounts', [0])[0]) != 0
     show_general = int(request.form.get('general', [0])[0]) != 0
@@ -233,7 +241,7 @@
     else:
         history(page, pagename, request)
 
-    request.write('</div>\n') # end content div
+    request.write(f.div(0)) # end content div
     request.theme.send_footer(pagename)
     request.theme.send_closing_html()
 
--- a/MoinMoin/action/newpage.py	Mon Aug 21 13:20:44 2006 +0200
+++ b/MoinMoin/action/newpage.py	Mon Aug 21 13:22:05 2006 +0200
@@ -89,7 +89,7 @@
             if parent:
                 pagename = "%s/%s" % (parent, pagename)
 
-            url = Page(self.request, pagename).url(self.request, query, escape=0)
+            url = Page(self.request, pagename).url(self.request, query, escape=0, relative=False)
             self.request.http_redirect(url)
 
         return ''
--- a/MoinMoin/formatter/text_html.py	Mon Aug 21 13:20:44 2006 +0200
+++ b/MoinMoin/formatter/text_html.py	Mon Aug 21 13:22:05 2006 +0200
@@ -6,7 +6,12 @@
     @license: GNU GPL, see COPYING for details.
 """
 import os.path, re
-from sets import Set # TODO: when we require Python 2.4+ use the builtin 'set' type
+
+try:
+    set
+except:
+    from sets import Set as set
+
 from MoinMoin.formatter import FormatterBase
 from MoinMoin import wikiutil, i18n, config
 from MoinMoin.Page import Page
@@ -16,7 +21,7 @@
 prettyprint = False
 
 # These are the HTML elements that we treat as block elements.
-_blocks = Set(['dd', 'div', 'dl', 'dt', 'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
+_blocks = set(['dd', 'div', 'dl', 'dt', 'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
                'hr', 'li', 'ol', 'p', 'pre', 'table', 'tbody', 'td', 'tfoot', 'th',
                'thead', 'tr', 'ul', 'blockquote', ])
 
@@ -26,30 +31,30 @@
 # content, and also IE has a parsing bug with those two elements (only)
 # when they don't have a closing tag even if valid XHTML.
 
-_self_closing_tags = Set(['area', 'base', 'br', 'col', 'frame', 'hr', 'img', 'input',
-                          'isindex', 'link', 'meta', 'param'])
+_self_closing_tags = set(['area', 'base', 'br', 'col', 'frame', 'hr', 'img',
+                          'input', 'isindex', 'link', 'meta', 'param'])
 
 # We only open those tags and let the browser auto-close them:
-_auto_closing_tags = Set(['p'])
+_auto_closing_tags = set(['p'])
 
 # These are the elements which generally should cause an increase in the
 # indention level in the html souce code.
-_indenting_tags = Set(['ol', 'ul', 'dl', 'li', 'dt', 'dd', 'tr', 'td'])
+_indenting_tags = set(['ol', 'ul', 'dl', 'li', 'dt', 'dd', 'tr', 'td'])
 
 # These are the elements that discard any whitespace they contain as
 # immediate child nodes.
-_space_eating_tags = Set(['colgroup', 'dl', 'frameset', 'head', 'map' 'menu',
+_space_eating_tags = set(['colgroup', 'dl', 'frameset', 'head', 'map' 'menu',
                           'ol', 'optgroup', 'select', 'table', 'tbody', 'tfoot',
                           'thead', 'tr', 'ul'])
 
 # These are standard HTML attributes which are typically used without any
 # value; e.g., as boolean flags indicated by their presence.
-_html_attribute_boolflags = Set(['compact', 'disabled', 'ismap', 'nohref',
+_html_attribute_boolflags = set(['compact', 'disabled', 'ismap', 'nohref',
                                  'noresize', 'noshade', 'nowrap', 'readonly',
                                  'selected', 'wrap'])
 
 # These are all the standard HTML attributes that are allowed on any element.
-_common_attributes = Set(['accesskey', 'class', 'dir', 'disabled', 'id', 'lang',
+_common_attributes = set(['accesskey', 'class', 'dir', 'disabled', 'id', 'lang',
                           'style', 'tabindex', 'title'])
 
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/macro/AdvancedSearch.py	Mon Aug 21 13:22:05 2006 +0200
@@ -0,0 +1,136 @@
+# -*- coding: iso-8859-1 -*-
+'''
+    MoinMoin - AdvancedSearch Macro
+
+    [[AdvancedSearch]]
+        displays advanced search dialog.
+
+    MAYBE:
+    [[AdvancedSearch(Help)]]
+        embed results of an advanced search (use more parameters...)
+'''
+
+from MoinMoin import config, wikiutil, search
+from MoinMoin.i18n import languages
+
+import mimetypes
+
+Dependencies = ['pages']
+
+try:
+    sorted
+except NameError:
+    def sorted(l, *args, **kw):
+        l = l[:]
+        l.sort(*args, **kw)
+        return l
+
+def advanced_ui(macro):
+    _ = macro._
+    f = macro.formatter
+
+    search_boxes = ''.join([
+        f.table_row(1),
+        f.table_cell(1, attrs={'rowspan': '6', 'class': 'searchfor'}),
+        f.text(_('Search for pages')),
+        f.table_cell(0),
+        ''.join([''.join([
+            f.table_row(1),
+            f.table_cell(1),
+            f.text(_(txt)),
+            f.table_cell(0),
+            f.table_cell(1),
+            f.rawHTML(input_field),
+            f.table_cell(0),
+            f.table_row(0),
+        ]) for txt, input_field in (
+            (_('containing all the following terms'),
+                '<input type="text" name="and_terms" size="30">'),
+            (_('containing one or more of the following terms'),
+                '<input type="text" name="or_terms" size="30">'),
+            (_('not containing the following terms'),
+                '<input type="text" name="not_terms" size="30">'),
+            #('containing only one of the following terms',
+            #    '<input type="text" name="xor_terms" size="30">'),
+            # TODO: dropdown-box?
+            (_('belonging to one of the following categories'),
+                '<input type="text" name="categories" size="30">'),
+            (_('last modified since (XXX)'),
+                '<input type="text" name="mtime" size="30" value="">'),
+        )])
+    ])
+
+    langs = dict([(lang, lmeta['x-language-in-english'])
+        for lang, lmeta in sorted(languages.items())])
+    lang_dropdown = ''.join([
+        u'<select name="language" size="1">',
+        u'<option value="" selected>%s</option>' % _('any language'),
+        ''.join(['<option value="%s">%s</option>' % lt for lt in
+            langs.items()]),
+        u'</select>',
+    ])
+
+    ft_dropdown = ''.join([
+        u'<select name="mimetype" size="1">',
+        u'<option value="" selected>%s</option>' % _('any type'),
+        ''.join(['<option value="%s">%s</option>' % (m[1], '*%s - %s' % m)
+            for m in sorted(mimetypes.types_map.items())]),
+        u'</select>',
+    ])
+
+    search_options = ''.join([
+        ''.join([
+            f.table_row(1),
+            f.table_cell(1, attrs={'class': 'searchfor'}),
+            txt[0],
+            f.table_cell(0),
+            f.table_cell(1, colspan=2),
+            txt[1],
+            f.table_cell(0),
+            f.table_row(0),
+            ]) for txt in (
+                (_('Language'), lang_dropdown),
+                (_('File Type'), ft_dropdown),
+                ('', '<input type="checkbox" name="titlesearch" value="1">%s</input>' %
+                _('Search only in titles')),
+                ('', '<input type="checkbox" name="case" value="1">%s</input>' %
+                _('Case-sensitive search')),
+                ('', '<input type="checkbox" name="includeunderlay" value="1" checked>%s'
+                    '</input>' % _('Include underlay')),
+                ('', '<input type="checkbox" name="onlysystempages" value="1">%s'
+                    '</input>' % _('Only system pages')),
+            )
+    ])
+    
+    html = [
+        u'<form method="get" action="">',
+        u'<div>',
+        u'<input type="hidden" name="action" value="fullsearch">',
+        u'<input type="hidden" name="advancedsearch" value="1">',
+        f.table(1, attrs={'tableclass': 'advancedsearch'}),
+        search_boxes,
+        search_options,
+        f.table_row(1),
+        f.table_cell(1, attrs={'class': 'submit', 'colspan': '3'}),
+        u'<input type="submit" value="%s">' % _('Go get it!'),
+        f.table_cell(0),
+        f.table_row(0),
+        f.table(0),
+        u'</div>',
+        u'</form>',
+    ]
+
+    return f.rawHTML('\n'.join(html))
+
+
+def execute(macro, needle):
+    request = macro.request
+    _ = request.getText
+
+    # no args given
+    if needle is None:
+        return advanced_ui(macro)
+
+    return macro.formatter.rawHTML('wooza!')
+        
+
--- a/MoinMoin/macro/FullSearch.py	Mon Aug 21 13:20:44 2006 +0200
+++ b/MoinMoin/macro/FullSearch.py	Mon Aug 21 13:22:05 2006 +0200
@@ -23,7 +23,8 @@
     context argument, or make another macro that use context, which may
     be easier to use.
 
-    @copyright: 2000-2004 by Jürgen Hermann <jh@web.de>
+    @copyright: 2000-2004 by Jürgen Hermann <jh@web.de>,
+                2005 MoinMoin:FranzPletz
     @license: GNU GPL, see COPYING for details.
 """
 
--- a/MoinMoin/macro/MonthCalendar.py	Mon Aug 21 13:20:44 2006 +0200
+++ b/MoinMoin/macro/MonthCalendar.py	Mon Aug 21 13:22:05 2006 +0200
@@ -334,16 +334,16 @@
         while st < l:
             ch = parmpagename[0][st:st+chstep]
             r, g, b = cliprgb(r, g, b)
-            pagelinks = pagelinks + '<a style="%s" href="%s">%s</a>' % \
-                ('background-color:#%02x%02x%02x;color:#000000;text-decoration:none' % \
-                    (r, g, b), Page(request, parmpagename[0]).url(request), ch)
+            link = Page(request, parmpagename[0]).link_to(request, ch,
+                        style='background-color:#%02x%02x%02x;color:#000000;text-decoration:none' % (r, g, b))
+            pagelinks = pagelinks + link
             r, g, b = (r, g+colorstep, b)
             st = st + chstep
         r, g, b = (255-colorstep, 255, 255-colorstep)
         for page in parmpagename[1:]:
-            pagelinks = pagelinks + '*<a style="%s" href="%s">%s</a>' % \
-                            ('background-color:#%02x%02x%02x;color:#000000;text-decoration:none' % \
-                                (r, g, b), Page(request, page).url(request), page)
+            link = Page(request, page).link_to(request, page,
+                        style='background-color:#%02x%02x%02x;color:#000000;text-decoration:none' % (r, g, b))
+            pagelinks = pagelinks + '*' + link
         showpagename = '   %s<BR>\n' % pagelinks
     else:
         showpagename = ''
--- a/MoinMoin/macro/OrphanedPages.py	Mon Aug 21 13:20:44 2006 +0200
+++ b/MoinMoin/macro/OrphanedPages.py	Mon Aug 21 13:22:05 2006 +0200
@@ -24,23 +24,26 @@
             if link in orphaned:
                 del orphaned[link]
 
-    # check for the extreme case
-    if not orphaned:
-        return "<p>%s</p>" % _("No orphaned pages in this wiki.")
-
-    # return a list of page links
-    orphanednames = orphaned.keys()
-    orphanednames.sort()
     result = []
-    result.append(macro.formatter.number_list(1))
-    for name in orphanednames:
-        if not name: continue
-        result.append(macro.formatter.listitem(1))
-        result.append(macro.formatter.pagelink(1, name, generated=1))
-        result.append(macro.formatter.text(name))
-        result.append(macro.formatter.pagelink(0, name))
-        result.append(macro.formatter.listitem(0))
-    result.append(macro.formatter.number_list(0))
+    f = macro.formatter
+    if not orphaned:
+        result.append(f.paragraph(1))
+        result.append(f.text(_("No orphaned pages in this wiki.")))
+        result.append(f.paragraph(0))
+    else:
+        # return a list of page links
+        orphanednames = orphaned.keys()
+        orphanednames.sort()
+        result.append(f.number_list(1))
+        for name in orphanednames:
+            if not name:
+                continue
+            result.append(f.listitem(1))
+            result.append(f.pagelink(1, name, generated=1))
+            result.append(f.text(name))
+            result.append(f.pagelink(0, name))
+            result.append(f.listitem(0))
+        result.append(f.number_list(0))
 
     return ''.join(result)
 
--- a/MoinMoin/macro/SystemAdmin.py	Mon Aug 21 13:20:44 2006 +0200
+++ b/MoinMoin/macro/SystemAdmin.py	Mon Aug 21 13:22:05 2006 +0200
@@ -23,7 +23,6 @@
     if not request.user.isSuperUser():
         return ''
 
-    result = []
     _MENU = {
         'attachments': (("File attachment browser"), do_admin_browser),
         'users': (("User account browser"), do_user_browser),
@@ -31,21 +30,24 @@
     choice = request.form.get('sysadm', [None])[0]
 
     # create menu
-    menuitems = [(label, id) for id, (label, handler) in _MENU.items()]
+    menuitems = [(label, fnid) for fnid, (label, handler) in _MENU.items()]
     menuitems.sort()
-    for label, id in menuitems:
-        if id == choice:
-            result.append(macro.formatter.strong(1))
-            result.append(macro.formatter.text(label))
-            result.append(macro.formatter.strong(0))
+    result = []
+    f = macro.formatter
+    for label, fnid in menuitems:
+        if fnid == choice:
+            result.append(f.strong(1))
+            result.append(f.text(label))
+            result.append(f.strong(0))
         else:
-            result.append(wikiutil.link_tag(request, "%s?sysadm=%s" % (macro.formatter.page.page_name, id), label))
-        result.append('<br>')
-    result.append('<br>')
+            #result.append(wikiutil.link_tag(request, "%s?sysadm=%s" % (macro.formatter.page.page_name, id), label))
+            result.append(f.page.link_to(request, label, querystr={'sysadm': fnid}))
+        result.append(f.linebreak())
+    result.append(f.linebreak())
 
     # add chosen content
     if _MENU.has_key(choice):
-        result.append(_MENU[choice][1](request))
+        result.append(f.rawHTML(_MENU[choice][1](request)))
 
-    return macro.formatter.rawHTML(''.join(result))
+    return ''.join(result)
 
--- a/MoinMoin/macro/WantedPages.py	Mon Aug 21 13:20:44 2006 +0200
+++ b/MoinMoin/macro/WantedPages.py	Mon Aug 21 13:22:05 2006 +0200
@@ -25,7 +25,9 @@
     # TODO: we should make this a widget and use on all page listing pages
     label = (_('Include system pages'), _('Exclude system pages'))[allpages]
     page = macro.formatter.page
-    controlbar = '<div class="controlbar">%s</div>' % page.link_to(request, label, querystr={'allpages': '%d' % allpages and '0' or '1'})
+    controlbar = macro.formatter.div(1, css_class="controlbar") + \
+                 page.link_to(request, label, querystr={'allpages': '%d' % allpages and '0' or '1'}) + \
+                 macro.formatter.div(0)
 
     # Get page dict readable by current user
     pages = request.rootpage.getPageDict()
--- a/MoinMoin/mail/mailimport.py	Mon Aug 21 13:20:44 2006 +0200
+++ b/MoinMoin/mail/mailimport.py	Mon Aug 21 13:22:05 2006 +0200
@@ -235,7 +235,7 @@
                 break
 
     # build an attachment link table for the page with the e-mail
-    attachment_links = [""] + [u'''[attachment:"%s/%s" %s]''' % (pagename, att, att) for att in attachments]
+    attachment_links = [""] + [u'''[attachment:%s %s]''' % (wikiutil.quoteName("%s/%s" % (pagename, att)), att) for att in attachments]
 
     # assemble old page content and new mail body together
     old_content = Page(request, pagename).get_raw_body()
@@ -280,7 +280,7 @@
         from_col = email_to_markup(request, msg['from_addr'])
         to_col = ' '.join([email_to_markup(request, (realname, mailaddr))
                            for realname, mailaddr in msg['target_addrs'] if mailaddr != wiki_address])
-        subj_col = '["%s" %s]' % (pagename, msg['subject'])
+        subj_col = '[%s %s]' % (wikiutil.quoteName(pagename), msg['subject'])
         date_col = msg['date']
         attach_col = " ".join(attachment_links)
         new_line = u'|| %s || %s || %s || [[DateTime(%s)]] || %s ||' % (from_col, to_col, subj_col, date_col, attach_col)
--- a/MoinMoin/parser/text_moin_wiki.py	Mon Aug 21 13:20:44 2006 +0200
+++ b/MoinMoin/parser/text_moin_wiki.py	Mon Aug 21 13:22:05 2006 +0200
@@ -2,7 +2,8 @@
 """
     MoinMoin - MoinMoin Wiki Markup Parser
 
-    @copyright: 2000, 2001, 2002 by Jürgen Hermann <jh@web.de>
+    @copyright: 2000, 2001, 2002 by Jürgen Hermann <jh@web.de>,
+                2006 by MoinMoin:ThomasWaldmann
     @license: GNU GPL, see COPYING for details.
 """
 
@@ -38,9 +39,9 @@
     q_string = ur"(%s|%s)" % (sq_string, dq_string) # quoted string
     attachment_schemas = ["attachment", "inline", "drawing"]
     punct_pattern = re.escape(u'''"\'}]|:,.)?!''')
-    punct_no_quote_pattern = re.escape(u'''\}]|:,.)?!''')
+    punct_no_quote_pattern = re.escape(u'''}]|:,.)?!''')
     url_pattern = (u'http|https|ftp|nntp|news|mailto|telnet|wiki|file|irc|' +
-            u'|'.join(attachment_schemas) + 
+            u'|'.join(attachment_schemas) +
             (config.url_schemas and u'|' + u'|'.join(config.url_schemas) or ''))
 
     # some common rules
@@ -51,7 +52,7 @@
         'parent': ur'(?:%s)?' % re.escape(PARENT_PREFIX),
     }
     url_rule = ur'%(url_guard)s(%(url)s)\:(([^\s\<%(punct)s]|([%(punctnq)s][^\s\<%(punct)s]))+|%(q_string)s)' % {
-        'url_guard': u'(^|(?<!\w))',
+        'url_guard': ur'(^|(?<!\w))',
         'url': url_pattern,
         'punct': punct_pattern,
         'punctnq': punct_no_quote_pattern,
@@ -143,7 +144,7 @@
         # holds the nesting level (in chars) of open lists
         self.list_indents = []
         self.list_types = []
-        
+
         self.formatting_rules = self.formatting_rules % {'macronames': u'|'.join(macro.getNames(self.cfg))}
 
     def _close_item(self, result):
@@ -182,7 +183,7 @@
             #self.request.log("interwiki: join_wiki -> %s.%s.%s" % (wikiurl,pagename,href))
             return self.formatter.image(src=href)
 
-        return (self.formatter.interwikilink(1, wikiname, pagename) + 
+        return (self.formatter.interwikilink(1, wikiname, pagename) +
                 self.formatter.text(text) +
                 self.formatter.interwikilink(0, wikiname, pagename))
 
@@ -200,7 +201,7 @@
         # check for image, and possibly return IMG tag (images are always inlined)
         if not kw.get('pretty_url', 0) and wikiutil.isPicture(fname):
             return self.formatter.attachment_image(fname)
-                
+
         # inline the attachment
         if scheme == 'inline':
             return self.formatter.attachment_inlined(fname, text)
@@ -325,7 +326,7 @@
         # handle anchors
         parts = word.split("#", 1)
         anchor = ""
-        if len(parts)==2:
+        if len(parts) == 2:
             word, anchor = parts
 
         return (self.formatter.pagelink(1, word, anchor=anchor) +
@@ -372,7 +373,7 @@
         """
         word = text[1:-1] # strip brackets
         first_char = word[0]
-        if first_char in "'\"": # this is quoted
+        if first_char in wikiutil.QUOTE_CHARS:
             # split on closing quote
             target, linktext = word[1:].split(first_char, 1)
         else: # not quoted
@@ -388,13 +389,13 @@
     def _url_bracket_repl(self, word):
         """Handle bracketed URLs."""
         word = word[1:-1] # strip brackets
-        
+
         # Local extended link? [:page name:link text] XXX DEPRECATED
         if word[0] == ':':
             words = word[1:].split(':', 1)
             if len(words) == 1:
                 words = words * 2
-            target_and_text = 'wiki:Self:"%s" %s' % tuple(words)
+            target_and_text = 'wiki:Self:%s %s' % (wikiutil.quoteName(words[0]), words[1])
             return self.interwiki(target_and_text, pretty_url=1)
 
         scheme_and_rest = word.split(":", 1)
@@ -414,7 +415,7 @@
                 return self.interwiki(word, pretty_url=1)
             if scheme in self.attachment_schemas:
                 return self.attachment(word, pretty_url=1)
-            
+
             words = word.split(None, 1)
             if len(words) == 1:
                 words = words * 2
@@ -450,7 +451,7 @@
     def _ent_symbolic_repl(self, word):
         """Handle symbolic SGML entities."""
         return self.formatter.rawHTML(word)
-    
+
     def _indent_repl(self, match):
         """Handle pure indentation (no - * 1. markup)."""
         result = []
@@ -510,59 +511,59 @@
 
     def _indent_to(self, new_level, list_type, numtype, numstart):
         """Close and open lists."""
-        open = []   # don't make one out of these two statements!
-        close = []
+        openlist = []   # don't make one out of these two statements!
+        closelist = []
 
         if self._indent_level() != new_level and self.in_table:
-            close.append(self.formatter.table(0))
+            closelist.append(self.formatter.table(0))
             self.in_table = 0
-        
+
         while self._indent_level() > new_level:
-            self._close_item(close)
+            self._close_item(closelist)
             if self.list_types[-1] == 'ol':
                 tag = self.formatter.number_list(0)
             elif self.list_types[-1] == 'dl':
                 tag = self.formatter.definition_list(0)
             else:
                 tag = self.formatter.bullet_list(0)
-            close.append(tag)
+            closelist.append(tag)
 
             del self.list_indents[-1]
             del self.list_types[-1]
-            
+
             if self.list_types: # we are still in a list
                 if self.list_types[-1] == 'dl':
                     self.in_dd = 1
                 else:
                     self.in_li = 1
-                
+
         # Open new list, if necessary
         if self._indent_level() < new_level:
             self.list_indents.append(new_level)
             self.list_types.append(list_type)
 
             if self.formatter.in_p:
-                close.append(self.formatter.paragraph(0))
-            
+                closelist.append(self.formatter.paragraph(0))
+
             if list_type == 'ol':
                 tag = self.formatter.number_list(1, numtype, numstart)
             elif list_type == 'dl':
                 tag = self.formatter.definition_list(1)
             else:
                 tag = self.formatter.bullet_list(1)
-            open.append(tag)
-            
+            openlist.append(tag)
+
             self.first_list_item = 1
             self.in_li = 0
             self.in_dd = 0
-            
+
         # If list level changes, close an open table
-        if self.in_table and (open or close):
-            close[0:0] = [self.formatter.table(0)]
+        if self.in_table and (openlist or closelist):
+            closelist[0:0] = [self.formatter.table(0)]
             self.in_table = 0
-        
+
         self.in_list = self.list_types != []
-        return ''.join(close) + ''.join(open)
+        return ''.join(closelist) + ''.join(openlist)
 
 
     def _undent(self):
@@ -725,9 +726,9 @@
                     attrs['colspan'] = '"%d"' % (word.count("|")/2)
 
             # return the complete cell markup
-            result.append(self.formatter.table_cell(1, attrs) + attrerr)         
+            result.append(self.formatter.table_cell(1, attrs) + attrerr)
             result.append(self._line_anchordef())
-            return ''.join(result) 
+            return ''.join(result)
         else:
             return self.formatter.text(word)
 
@@ -740,7 +741,7 @@
         level = 1
         while h[level:level+1] == '=':
             level += 1
-        depth = min(5,level)
+        depth = min(5, level)
 
         # this is needed for Included pages
         # TODO but it might still result in unpredictable results
@@ -755,10 +756,10 @@
             unique_id = '-%d' % self.titles[pntt]
         result = self._closeP()
         result += self.formatter.heading(1, depth, id="head-"+sha.new(pntt.encode(config.charset)).hexdigest()+unique_id)
-                                     
+
         return (result + self.formatter.text(title_text) +
                 self.formatter.heading(0, depth))
-    
+
     def _parser_repl(self, word):
         """Handle parsed code displays."""
         if word.startswith('{{{'):
@@ -823,7 +824,7 @@
         if self.formatter.in_p:
             return self.formatter.paragraph(0)
         return ''
-        
+
     def _macro_repl(self, word):
         """Handle macros ([[macroname]])."""
         macro_name = word[2:-2]
@@ -849,26 +850,26 @@
         lastpos = 0
 
         ###result.append(u'<span class="info">[scan: <tt>"%s"</tt>]</span>' % line)
-      
+
         for match in scan_re.finditer(line):
             # Add text before the match
             if lastpos < match.start():
-                
+
                 ###result.append(u'<span class="info">[add text before match: <tt>"%s"</tt>]</span>' % line[lastpos:match.start()])
-                
+
                 if not (self.inhibit_p or self.in_pre or self.formatter.in_p):
                     result.append(self.formatter.paragraph(1, css_class="line862"))
                 result.append(self.formatter.text(line[lastpos:match.start()]))
-            
+
             # Replace match with markup
             if not (self.inhibit_p or self.in_pre or self.formatter.in_p or
                     self.in_table or self.in_list):
                 result.append(self.formatter.paragraph(1, css_class="line867"))
             result.append(self.replace(match))
             lastpos = match.end()
-        
+
         ###result.append('<span class="info">[no match, add rest: <tt>"%s"<tt>]</span>' % line[lastpos:])
-        
+
         # Add paragraph with the remainder of the line
         if not (self.in_pre or self.in_li or self.in_dd or self.inhibit_p or
                 self.formatter.in_p) and lastpos < len(line):
@@ -881,17 +882,17 @@
         result = []
         for type, hit in match.groupdict().items():
             if hit is not None and not type in ["hmarker", ]:
-                
+
                 ###result.append(u'<span class="info">[replace: %s: "%s"]</span>' % (type, hit))
                 if self.in_pre and type not in ['pre', 'ent']:
-                    return self.formatter.text(hit) 
+                    return self.formatter.text(hit)
                 else:
                     # Open p for certain types
                     if not (self.inhibit_p or self.formatter.in_p
                             or self.in_pre or (type in self.no_new_p_before)):
                         result.append(self.formatter.paragraph(1, css_class="line891"))
-                    
-                    # Get replace method and replece hit
+
+                    # Get replace method and replace hit
                     replace = getattr(self, '_' + type + '_repl')
                     result.append(replace(hit))
                     return ''.join(result)
@@ -925,13 +926,13 @@
                 'word_rule': self.word_rule,
                 'rules': rules,
             }
-        self.request.clock.start('compile_huge_and_ugly')        
+        self.request.clock.start('compile_huge_and_ugly')
         scan_re = re.compile(rules, re.UNICODE)
         number_re = re.compile(self.ol_rule, re.UNICODE)
         term_re = re.compile(self.dl_rule, re.UNICODE)
-        indent_re = re.compile("^\s*", re.UNICODE)
+        indent_re = re.compile(ur"^\s*", re.UNICODE)
         eol_re = re.compile(r'\r?\n', re.UNICODE)
-        self.request.clock.stop('compile_huge_and_ugly')        
+        self.request.clock.stop('compile_huge_and_ugly')
 
         # get text and replace TABs
         rawtext = self.raw.expandtabs()
@@ -996,7 +997,7 @@
                         continue
                     if line[:endpos]:
                         self.parser_lines.append(line[:endpos])
-                    
+
                     # Close p before calling parser
                     # TODO: do we really need this?
                     self.request.write(self._closeP())
@@ -1014,7 +1015,7 @@
                 # we don't have \n as whitespace any more
                 # This is the space between lines we join to one paragraph
                 line += ' '
-                
+
                 # Paragraph break on empty lines
                 if not line.strip():
                     if self.in_table:
@@ -1064,7 +1065,7 @@
                         ## CHANGE: no automatic p on li
                         ##self.request.write(self.formatter.paragraph(1))
                         self.in_li = 1
-                        
+
                     # CHANGE: removed check for self.in_li
                     # paragraph should end before table, always!
                     if self.formatter.in_p:
@@ -1078,12 +1079,12 @@
                        line[indlen:indlen + 2] == "||" and
                        line.endswith("|| ") and
                        len(line) >= 5 + indlen)):
-                    
+
                     # Close table
                     self.request.write(self.formatter.table(0))
                     self.request.write(self._line_anchordef())
                     self.in_table = 0
-                                            
+
             # Scan line, format and write
             formatted_line = self.scan(scan_re, line)
             self.request.write(formatted_line)
@@ -1098,7 +1099,7 @@
         if self.in_table: self.request.write(self.formatter.table(0))
 
     # Private helpers ------------------------------------------------------------
-    
+
     def setParser(self, name):
         """ Set parser to parser named 'name' """
         mt = wikiutil.MimeType(name)
--- a/MoinMoin/search/Xapian.py	Mon Aug 21 13:20:44 2006 +0200
+++ b/MoinMoin/search/Xapian.py	Mon Aug 21 13:22:05 2006 +0200
@@ -174,6 +174,7 @@
         'category': 'XCAT', # category this document belongs to
         'full_title': 'XFT', # full title (for regex)
         'domain': 'XDOMAIN', # standard or underlay
+        'revision': 'XREV', # revision of page
                        #Y   year (four digits)
     }
 
@@ -350,6 +351,8 @@
             yield 'underlay'
         if page.isStandardPage():
             yield 'standard'
+        if wikiutil.isSystemPage(self.request, page.page_name):
+            yield 'system'
 
     def _index_page(self, writer, page, mode='update'):
         """ Index a page - assumes that the write lock is acquired
@@ -364,6 +367,8 @@
         pagename = page.page_name
         mtime = page.mtime_usecs()
         itemid = "%s:%s" % (wikiname, pagename)
+        revision = str(page.get_real_rev())
+        author = page.last_edit(request)['editor']
         # XXX: Hack until we get proper metadata
         language, stem_language = self._get_languages(page)
         categories = self._get_categories(page)
@@ -397,7 +402,10 @@
             xkeywords = [xapdoc.Keyword('itemid', itemid),
                     xapdoc.Keyword('lang', language),
                     xapdoc.Keyword('stem_lang', stem_language),
-                    xapdoc.Keyword('full_title', pagename.lower())]
+                    xapdoc.Keyword('full_title', pagename.lower()),
+                    xapdoc.Keyword('revision', revision),
+                    xapdoc.Keyword('author', author),
+                ]
             for pagelink in page.getPageLinks(request):
                 xkeywords.append(xapdoc.Keyword('linkto', pagelink))
             for category in categories:
@@ -452,11 +460,14 @@
                 xlanguage = xapdoc.Keyword('lang', language)
                 xstem_language = xapdoc.Keyword('stem_lang', stem_language)
                 mimetype, att_content = self.contentfilter(filename)
-                xmimetype = xapdoc.TextField('mimetype', mimetype, True)
+                xmimetype = xapdoc.Keyword('mimetype', mimetype)
                 xcontent = xapdoc.TextField('content', att_content)
-                doc = xapdoc.Document(textFields=(xcontent, xmimetype, ),
-                                      keywords=(xatt_itemid, xtitle, xlanguage, xstem_language, ),
-                                      sortFields=(xpname, xattachment, xmtime, xwname, ),
+                doc = xapdoc.Document(textFields=(xcontent, ),
+                                      keywords=(xatt_itemid, xtitle,
+                                          xlanguage, xstem_language,
+                                          xmimetype, ),
+                                      sortFields=(xpname, xattachment, xmtime,
+                                          xwname, ),
                                      )
                 doc.analyzerFactory = getWikiAnalyzerFactory(request,
                         stem_language)
--- a/MoinMoin/search/queryparser.py	Mon Aug 21 13:20:44 2006 +0200
+++ b/MoinMoin/search/queryparser.py	Mon Aug 21 13:22:05 2006 +0200
@@ -696,6 +696,134 @@
             return UnicodeQuery('%s:%s' % (prefix, pattern))
 
 
+class MimetypeSearch(BaseExpression):
+    """ Search for files belonging to a specific mimetype """
+
+    def __init__(self, pattern, use_re=False, case=True):
+        """ Init a mimetype search
+
+        @param pattern: pattern to search for, ascii string or unicode
+        @param use_re: treat pattern as re of plain text, bool
+        @param case: do case sensitive search, bool 
+        """
+        self._pattern = pattern.lower()
+        self.negated = 0
+        self.use_re = use_re
+        self.case = False       # not case-sensitive!
+        self.xapian_called = False
+        self._build_re(self._pattern, use_re=use_re, case=case)
+
+    def costs(self):
+        return 5000 # cheaper than a TextSearch
+
+    def __unicode__(self):
+        neg = self.negated and '-' or ''
+        return u'%s!"%s"' % (neg, unicode(self._pattern))
+
+    def highlight_re(self):
+        return ""
+
+    def search(self, page):
+        # We just use (and trust ;)) xapian for this.. deactivated for _moinSearch
+        if not self.xapian_called:
+            return []
+        else:
+            return [Match()]
+
+    def xapian_wanted(self):
+        return True             # only easy regexps possible
+
+    def xapian_need_postproc(self):
+        return False            # case-sensitivity would make no sense
+
+    def xapian_term(self, request, allterms):
+        self.xapian_called = True
+        prefix = Xapian.Index.prefixMap['mimetype']
+        if self.use_re:
+            # basic regex matching per term
+            terms = []
+            found = None
+            n = len(prefix)
+            for term in allterms():
+                if prefix == term[:n]:
+                    found = True
+                    if self.search_re.match(term[n:]):
+                        terms.append(term)
+                elif found:
+                    continue
+
+            if not terms:
+                return Query()
+            return Query(Query.OP_OR, terms)
+        else:
+            pattern = self._pattern
+            return UnicodeQuery('%s%s' % (prefix, pattern))
+
+
+class DomainSearch(BaseExpression):
+    """ Search for pages belonging to a specific domain """
+
+    def __init__(self, pattern, use_re=False, case=True):
+        """ Init a domain search
+
+        @param pattern: pattern to search for, ascii string or unicode
+        @param use_re: treat pattern as re of plain text, bool
+        @param case: do case sensitive search, bool 
+        """
+        self._pattern = pattern.lower()
+        self.negated = 0
+        self.use_re = use_re
+        self.case = False       # not case-sensitive!
+        self.xapian_called = False
+        self._build_re(self._pattern, use_re=use_re, case=case)
+
+    def costs(self):
+        return 5000 # cheaper than a TextSearch
+
+    def __unicode__(self):
+        neg = self.negated and '-' or ''
+        return u'%s!"%s"' % (neg, unicode(self._pattern))
+
+    def highlight_re(self):
+        return ""
+
+    def search(self, page):
+        # We just use (and trust ;)) xapian for this.. deactivated for _moinSearch
+        if not self.xapian_called:
+            return []
+        else:
+            return [Match()]
+
+    def xapian_wanted(self):
+        return True             # only easy regexps possible
+
+    def xapian_need_postproc(self):
+        return False            # case-sensitivity would make no sense
+
+    def xapian_term(self, request, allterms):
+        self.xapian_called = True
+        prefix = Xapian.Index.prefixMap['domain']
+        if self.use_re:
+            # basic regex matching per term
+            terms = []
+            found = None
+            n = len(prefix)
+            for term in allterms():
+                if prefix == term[:n]:
+                    found = True
+                    if self.search_re.match(term[n+1:]):
+                        terms.append(term)
+                elif found:
+                    continue
+
+            if not terms:
+                return Query()
+            return Query(Query.OP_OR, terms)
+        else:
+            pattern = self._pattern
+            return UnicodeQuery('%s:%s' % (prefix, pattern))
+
+
 ##############################################################################
 ### Parse Query
 ##############################################################################
@@ -782,6 +910,8 @@
         linkto = False
         lang = False
         category = False
+        mimetype = False
+        domain = False
 
         for m in modifiers:
             if "title".startswith(m):
@@ -796,6 +926,10 @@
                 lang = True
             elif "category".startswith(m):
                 category = True
+            elif "mimetype".startswith(m):
+                mimetype = True
+            elif "domain".startswith(m):
+                domain = True
 
         # oh, let's better call xapian if we encouter this nasty regexp ;)
         if not category:
@@ -808,10 +942,14 @@
 
         if category:
             obj = CategorySearch(text, use_re=regex, case=case)
+        elif mimetype:
+            obj = MimetypeSearch(text, use_re=regex, case=False)
         elif lang:
             obj = LanguageSearch(text, use_re=regex, case=False)
         elif linkto:
             obj = LinkSearch(text, use_re=regex, case=case)
+        elif domain:
+            obj = DomainSearch(text, use_re=regex, case=False)
         elif title_search:
             obj = TitleSearch(text, use_re=regex, case=case)
         else:
--- a/MoinMoin/search/results.py	Mon Aug 21 13:20:44 2006 +0200
+++ b/MoinMoin/search/results.py	Mon Aug 21 13:22:05 2006 +0200
@@ -287,7 +287,7 @@
         output = [
             formatter.paragraph(1, attr={'class': 'searchstats'}),
             _("Results %(bs)s%(hitsFrom)d - %(hitsTo)d%(be)s "
-                    "of %(aboutHits)s %(bs)s%(hits)d%(be)s results out of"
+                    "of %(aboutHits)s %(bs)s%(hits)d%(be)s results out of "
                     "about %(pages)d pages.") %
                 {'aboutHits': self.estimated_hits[0],
                     'hits': self.estimated_hits[1], 'pages': self.pages,
@@ -652,10 +652,6 @@
             return ''.join(output)
         return ''
 
-    def _img_url(self, img):
-        cfg = self.request.cfg
-        return '%s/%s/img/%s.png' % (cfg.url_prefix_static, self.request.theme.name, img)
-
     def formatPrevNextPageLinks(self, hitsFrom, hitsPerPage, hitsNum):
         """ Format previous and next page links in page
 
@@ -714,39 +710,6 @@
         return ''.join([
             f.table(1, attrs={'tableclass': 'searchpages'}),
             f.table_row(1),
-                f.table_cell(1, attrs={'class': 'prev'}),
-                # first image, previous page
-                l[0] and
-                    ''.join([
-                        f.url(1, href=page_url(cur_page-1)),
-                        f.image(self._img_url('nav_prev')),
-                        f.url(0),
-                    ]) or
-                    f.image(self._img_url('nav_first')),
-                f.table_cell(0),
-                # images for ooos, highlighted current page
-                ''.join([
-                    ''.join([
-                        f.table_cell(1),
-                        i != cur_page and f.url(1, href=page_url(i)) or '',
-                        f.image(self._img_url(i == cur_page and
-                            'nav_current' or 'nav_page')),
-                        i != cur_page and f.url(0) or '',
-                        f.table_cell(0),
-                    ]) for i in page_range
-                ]),
-                f.table_cell(1, attrs={'class': 'next'}),
-                # last image, next page
-                l[-1] and
-                    ''.join([
-                        f.url(1, href=page_url(cur_page+1)),
-                        f.image(self._img_url('nav_next')),
-                        f.url(0),
-                    ]) or
-                    f.image(self._img_url('nav_last')),
-                f.table_cell(0),
-            f.table_row(0),
-            f.table_row(1),
                 f.table_cell(1),
                 # textlinks
                 (f.table_cell(0) + f.table_cell(1)).join(l),
@@ -772,7 +735,6 @@
             f.paragraph(0),
         ])
 
-
     def querystring(self, querydict=None):
         """ Return query string, used in the page link """
         if querydict is None:
--- a/MoinMoin/theme/__init__.py	Mon Aug 21 13:20:44 2006 +0200
+++ b/MoinMoin/theme/__init__.py	Mon Aug 21 13:22:05 2006 +0200
@@ -213,19 +213,23 @@
         """
         _ = self.request.getText
         content = []
-        if d['title_link']: # having a link means we have a (linked) pagename ONLY as title, not a message title
-                            # XXX this method is rather ugly and should be improved
+        if d['title_text'] == d['page_name']: # just showing a page, no action
             curpage = ''
             segments = d['page_name'].split('/') # was: title_text
             for s in segments[:-1]:
                 curpage += s
                 content.append("<li>%s</li>" % Page(self.request, curpage).link_to(self.request, s))
                 curpage += '/'
-            content.append(('<li><a class="backlink" title="%(title)s" rel="nofollow" href="%(href)s">%(text)s</a></li>') % {
-                'title': _('Click to do a full-text search for this title'),
-                'href': d['title_link'],
-                'text': wikiutil.escape(segments[-1]),
-                })
+            link_text = segments[-1]
+            link_title = _('Click to do a full-text search for this title')
+            link_query = {
+                'action': 'fullsearch',
+                'value': 'linkto:"%s"' % d['page_name'],
+                'context': '180',
+            }
+            # we dont use d['title_link'] any more, but make it ourselves:
+            link = d['page'].link_to(self.request, link_text, querystr=link_query, title=link_title, css_class='backlink', rel='nofollow')
+            content.append(('<li>%s</li>') % link)
         else:
             content.append('<li>%s</li>' % wikiutil.escape(d['title_text']))
 
@@ -322,7 +326,9 @@
         for scheme in self.linkSchemas:
             if pagename.startswith(scheme):
                 title = wikiutil.escape(title)
-                link = '<a href="%s">%s</a>' % (pagename, title)
+                link = self.request.formatter.url(1, pagename) + \
+                       self.request.formatter.text(title) +\
+                       self.request.formatter.url(0)
                 return pagename, link
 
         # remove wiki: url prefix
@@ -841,8 +847,10 @@
         @rtype: unicode
         @return: rss href
         """
-        return (u'%s/RecentChanges?action=rss_rc&amp;ddiffs=1&amp;unique=1'
-                % self.request.getScriptname())
+        request = self.request
+        url = Page(request, 'RecentChanges').url(request, querystr={
+                'action':'rss_rc', 'ddiffs': '1', 'unique': '1', }, escape=0, relative=False)
+        return url
 
     def rsslink(self):
         """ Create rss link in head, used by FireFox
@@ -1341,9 +1349,9 @@
         if self.shouldUseRSS():
             link = [
                 u'<div class="rcrss">',
-                u'<a href="%s">' % self.rsshref(),
-                self.make_icon("rss"),
-                u'</a>',
+                self.request.formatter.url(1, self.rsshref()),
+                self.request.formatter.rawHTML(self.make_icon("rss")),
+                self.request.formatter.url(0),
                 u'</div>',
                 ]
             html += ''.join(link)
@@ -1421,7 +1429,6 @@
         current page being rendered.
         
         @param text: the title text
-        @keyword link: URL for the title
         @keyword msg: additional message (after saving)
         @keyword pagename: 'PageName'
         @keyword page: the page instance that called us.
@@ -1596,7 +1603,7 @@
 
         # If in print mode, start page div and emit the title
         if keywords.get('print_mode', 0):
-            d = {'title_text': text, 'title_link': None, 'page': page, }
+            d = {'title_text': text, 'page': page, }
             request.themedict = d
             output.append(self.startPage())
             output.append(self.interwiki(d))
@@ -1609,7 +1616,6 @@
                 'theme': self.name,
                 'script_name': scriptname,
                 'title_text': text,
-                'title_link': keywords.get('link', ''),
                 'logo_string': request.cfg.logo_string,
                 'site_name': request.cfg.sitename,
                 'page': page,
--- a/MoinMoin/wikiutil.py	Mon Aug 21 13:20:44 2006 +0200
+++ b/MoinMoin/wikiutil.py	Mon Aug 21 13:22:05 2006 +0200
@@ -490,6 +490,26 @@
             self.wlock.release()
 
 
+# Quoting of wiki names, file names, etc. (in the wiki markup) -----------------------------------
+
+QUOTE_CHARS = u"'\""
+
+def quoteName(name):
+    """ put quotes around a given name """
+    for quote_char in QUOTE_CHARS:
+        if quote_char not in name:
+            return u"%s%s%s" % (quote_char, name, quote_char)
+    else:
+        return name # XXX we need to be able to escape the quote char for worst case
+
+def unquoteName(name):
+    """ if there are quotes around the name, strip them """
+    for quote_char in QUOTE_CHARS:
+        if quote_char == name[0] == name[-1]:
+            return name[1:-1]
+    else:
+        return name
+
 #############################################################################
 ### InterWiki
 #############################################################################
@@ -594,7 +614,7 @@
         except ValueError:
             wikiname, rest = 'Self', wikiurl
     first_char = rest[0]
-    if first_char in "'\"": # quoted pagename
+    if first_char in QUOTE_CHARS: # quoted pagename
         pagename_linktext = rest[1:].split(first_char, 1)
     else: # not quoted, split on whitespace
         pagename_linktext = rest.split(None, 1)
@@ -822,7 +842,7 @@
     if re.match(Parser.word_rule + "$", pagename):
         return pagename
     else:
-        return u'["%s"]' % pagename
+        return u'["%s"]' % pagename # XXX use quoteName(pagename) later
 
 #############################################################################
 ### mimetype support
@@ -860,7 +880,6 @@
     MIMETYPES_spoil_mapping[value] = key
 
 
-# mimetype stuff ------------------------------------------------------------
 class MimeType(object):
     """ represents a mimetype like text/plain """
 
--- a/docs/CHANGES	Mon Aug 21 13:20:44 2006 +0200
+++ b/docs/CHANGES	Mon Aug 21 13:22:05 2006 +0200
@@ -138,6 +138,9 @@
       anyway), just use emit_http_headers and include a Status: XXX header.
       Method will vanish with moin 1.7. 
     * cfg.url_prefix is DEPRECATED, please use cfg.url_prefix_static.
+    * d['title_link'] is not supported any more. You can easily make that link
+      on your own in your theme, see example in MoinMoin/theme/__init__.py,
+      function "title".
 
   New Features:
     * Removed "underscore in URL" == "blank in pagename magic" - it made more
--- a/docs/CHANGES.fpletz	Mon Aug 21 13:20:44 2006 +0200
+++ b/docs/CHANGES.fpletz	Mon Aug 21 13:22:05 2006 +0200
@@ -8,12 +8,13 @@
       metadata)
 
   ToDo:
-    * Implement the new search UI
     * Write/update documentation for all the new search stuff
+    * Search based on mtime
+    * Index all revisions and let users search in them (rev, mtime)
+
+  ToDo (low priority):
     * Reevaluate Xapwrap, possibly drop it and rip out usable stuff
       (i.e. ExceptionTranslator)
-
-  ToDo (low priority):
     * Case-sensitive searches / Regexp on multiple terms: Graceful
       fallback to and/or merge with moinSearch based on nodes xapian can
       handle in the search term tree
@@ -23,10 +24,12 @@
 
   New Features:
     * Faster search thanks to Xapian
-    * Searching for languages with new prefix lang/language, i.e. lang:de
+    * New searches:
+        - LanguageSearch: language:de
+        - CategorySearch: category:Homepage
+        - MimetypeSearch: mimetype:image/png (for attachments/files)
+        - DomainSearch: domain:underlay
       Note: Currently only available when Xapian is used
-    * CategorySearch with prefix category or with the regexp previously
-      used (autodetected as CategorySearch)
     * New config options:
         xapian_search        0      enables xapian-powered search
         xapian_index_dir     None   directory for xapian indices
@@ -228,7 +231,29 @@
       -> still TODO: need real weight
 
 2006-08-10
-    * entry missing
+    * corrected range and count of results (estimated by xapian)
+    * pagelinks only there are enough results
 
 2006-08-10 .. 13 no work on project
 
+2006-08-14
+    * fixed some remaining issues with the ui
+
+2006-08-15
+    * removed Moooin gfx as requested by Google
+
+2006-08-16 no work on project
+
+2006-08-17
+    * started advanced gui, new macro: AdvancedSearch
+
+2006-08-18
+    * eye-candy for advanced gui
+    * reworked fullsearch action to work with AdvancedSearch and most of
+      the 
+
+2006-08-19
+    * mimetype search works (more or less)
+    * minor bugfixes (i18n etc.)
+    * domain-specific search (underlay -> system pages)
+
--- a/wiki/htdocs/modern/css/common.css	Mon Aug 21 13:20:44 2006 +0200
+++ b/wiki/htdocs/modern/css/common.css	Mon Aug 21 13:22:05 2006 +0200
@@ -331,6 +331,35 @@
 div.codearea pre span.DiffSeparator {color: #228B22; font-weight: bold}
 
 /* Search results */
+.advancedsearch {
+    border: 1pt solid #ADB9CC;
+}
+
+.advancedsearch td {
+    vertical-align: top;
+    background-color: #E7E7E7;    
+    border: 0px;
+}
+
+.advancedsearch td.searchfor {
+    font-weight: bold;
+}
+.advancedsearch input {
+    border: 1px solid #ADB9CC;
+    background-color: #fff;
+}
+
+.advancedsearch td.submit {
+    border-top: 1px solid #ADB9CC;
+    background-color: #fff;
+    text-align: right;
+}
+
+.advancedsearch optioni, select {
+    border: 1px solid #ADB9CC;
+    background-color: #fff;
+}
+
 
 .searchresults dt {
     margin-top: 1em;
@@ -363,26 +392,17 @@
 
 .searchpages tr, .searchpages td {
     border: 0;
-    padding: 0;
+    padding: 5px;
     margin: 0;
     text-align: center;
     vertical-align: middle;
-    color: #a90a08;
+    color: #b93a58;
     font-weight: bold;
-}
-
-.searchpages td.prev {
-    text-align: right;
-}
-
-.searchpage td.next {
-    text-align: left;
+    font-size: 1.05em;
 }
 
 .searchpages td a, .searchpages td a:link {
-    color: #000000;
     text-decoration: underline;
-    font-weight: normal;
 }
 
 /* MonthCalendar css */
Binary file wiki/htdocs/modern/img/nav_current.png has changed
Binary file wiki/htdocs/modern/img/nav_first.png has changed
Binary file wiki/htdocs/modern/img/nav_last.png has changed
Binary file wiki/htdocs/modern/img/nav_next.png has changed
Binary file wiki/htdocs/modern/img/nav_page.png has changed
Binary file wiki/htdocs/modern/img/nav_prev.png has changed