changeset 1232:10099880cf8f

merged xapian branch
author Thomas Waldmann <tw AT waldmann-edv DOT de>
date Tue, 08 Aug 2006 08:49:47 +0200
parents 0e6266605d55 (current diff) 039e25408bac (diff)
children 79ac7ab77ea8
files MoinMoin/parser/ParserBase.py docs/CHANGES.fpletz
diffstat 92 files changed, 2171 insertions(+), 1248 deletions(-) [+]
line wrap: on
line diff
--- a/Makefile	Mon Aug 07 11:08:17 2006 +0200
+++ b/Makefile	Tue Aug 08 08:49:47 2006 +0200
@@ -33,7 +33,7 @@
 
 # Create documentation
 epydoc: patchlevel
-	@epydoc -o ../html -n MoinMoin -u http://moinmoin.wikiwikiweb.de MoinMoin
+	@epydoc -o ../html-1.6 -n MoinMoin -u http://moinmoin.wikiwikiweb.de MoinMoin
 
 # Create new underlay directory from MoinMaster
 # Should be used only on TW machine
--- a/MoinMoin/Page.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/MoinMoin/Page.py	Tue Aug 08 08:49:47 2006 +0200
@@ -12,6 +12,10 @@
 from MoinMoin.logfile import eventlog
 from MoinMoin.util import filesys, timefuncs
 
+def is_cache_exception(e):
+    args = e.args
+    return not (len(args) != 1 or args[0] != 'CacheNeedsUpdate')
+
 class Page:
     """Page - Manage an (immutable) page associated with a WikiName.
        To change a page's content, use the PageEditor class.
@@ -964,22 +968,21 @@
     def send_raw(self):
         """ Output the raw page data (action=raw) """
         request = self.request
-        request.http_headers(["Content-type: text/plain;charset=%s" % config.charset])
+        request.setHttpHeader("Content-type: text/plain; charset=%s" % config.charset)
         if self.exists():
             # use the correct last-modified value from the on-disk file
             # to ensure cacheability where supported. Because we are sending
             # RAW (file) content, the file mtime is correct as Last-Modified header.
-            request.http_headers(["Last-Modified: " +
-                 timefuncs.formathttpdate(os.path.getmtime(self._text_filename()))])
-
+            request.setHttpHeader("Status: 200 OK")
+            request.setHttpHeader("Last-Modified: %s" % timefuncs.formathttpdate(os.path.getmtime(self._text_filename())))
             text = self.get_raw_body()
             text = self.encodeTextMimeType(text)
-            request.write(text)
         else:
-            request.http_headers(['Status: 404 NOTFOUND'])
-            request.setResponseCode(404)
-            request.write(u"Page %s not found." % self.page_name)
+            request.setHttpHeader('Status: 404 NOTFOUND')
+            text = u"Page %s not found." % self.page_name
 
+        request.emit_http_headers()
+        request.write(text)
 
     def send_page(self, request, msg=None, **keywords):
         """ Output the formatted page.
@@ -990,7 +993,7 @@
 
         @param request: the request object
         @param msg: if given, display message in header area
-        @keyword content_only: if 1, omit page header and footer
+        @keyword content_only: if 1, omit http headers, page header and footer
         @keyword content_id: set the id of the enclosing div
         @keyword count_hit: if 1, add an event to the log
         @keyword send_missing_page: if 1, assume that page to be sent is MissingPage
@@ -1036,14 +1039,12 @@
                 except wikiutil.PluginMissingError:
                     pass
             else:
-                raise "Plugin missing error!" # XXX what now?
+                raise NotImplementedError("Plugin missing error!") # XXX what now?
         request.formatter = self.formatter
         self.formatter.setPage(self)
         if self.hilite_re:
             self.formatter.set_highlight_re(self.hilite_re)
         
-        request.http_headers(["Content-Type: %s; charset=%s" % (self.output_mimetype, self.output_charset)])
-
         # default is wiki markup
         pi_format = self.cfg.default_markup or "wiki"
         pi_formatargs = ''
@@ -1162,25 +1163,26 @@
         doc_leader = self.formatter.startDocument(self.page_name)
         page_exists = self.exists()
         if not content_only:
-            # send the document leader
-
-            # use "nocache" headers if we're using a method that
-            # is not simply "display", or if a user is logged in
-            # (which triggers personalisation features)
-
+            request.setHttpHeader("Content-Type: %s; charset=%s" % (self.output_mimetype, self.output_charset))
             if page_exists:
+                request.setHttpHeader('Status: 200 OK')
                 if not request.cacheable or request.user.valid:
-                    request.http_headers(request.nocache)
+                    # use "nocache" headers if we're using a method that
+                    # is not simply "display", or if a user is logged in
+                    # (which triggers personalisation features)
+                    for header in request.nocache:
+                        request.setHttpHeader(header)
                 else:
                     # TODO: we need to know if a page generates dynamic content
                     # if it does, we must not use the page file mtime as last modified value
                     # XXX The following code is commented because it is incorrect for dynamic pages:
                     #lastmod = os.path.getmtime(self._text_filename())
-                    #request.http_headers(["Last-Modified: %s" % timefuncs.formathttpdate(lastmod)])
-                    request.http_headers()
+                    #request.setHttpHeader("Last-Modified: %s" % timefuncs.formathttpdate(lastmod))
+                    pass
             else:
-                request.http_headers(['Status: 404 NOTFOUND'])
-                request.setResponseCode(404)
+                request.setHttpHeader('Status: 404 NOTFOUND')
+            request.emit_http_headers()
+
             request.write(doc_leader)
 
             # send the page header
@@ -1247,7 +1249,7 @@
             except wikiutil.PluginMissingError:
                 pass
         else:
-            raise "No matching parser" # XXX what do we use if nothing at all matches?
+            raise NotImplementedError("No matching parser") # XXX what do we use if nothing at all matches?
             
         # start wiki content div
         request.write(self.formatter.startContent(content_id))
@@ -1339,7 +1341,7 @@
                     except wikiutil.PluginMissingError:
                         pass
                 else:
-                    raise "no matching parser" # XXX what now?
+                    raise NotImplementedError("no matching parser") # XXX what now?
             return getattr(parser, 'caching', False)
         return False
 
@@ -1362,11 +1364,15 @@
             try:
                 code = self.loadCache(request)
                 self.execute(request, parser, code)
-            except 'CacheNeedsUpdate':
+            except Exception, e:
+                if not is_cache_exception(e):
+                    raise
                 try:
                     code = self.makeCache(request, parser)
                     self.execute(request, parser, code)
-                except 'CacheNeedsUpdate':
+                except Exception, e:
+                    if not is_cache_exception(e):
+                        raise
                     request.log('page cache failed after creation')
                     self.format(parser)
         
@@ -1379,20 +1385,28 @@
     def execute(self, request, parser, code):
         """ Write page content by executing cache code """            
         formatter = self.formatter
-        from MoinMoin.macro import Macro
-        macro_obj = Macro(parser)        
-        # Fix __file__ when running from a zip package
-        import MoinMoin
-        if hasattr(MoinMoin, '__loader__'):
-            __file__ = os.path.join(MoinMoin.__loader__.archive, 'dummy')
-        exec code
+        request.clock.start("Page.execute")
+        try:
+            from MoinMoin.macro import Macro
+            macro_obj = Macro(parser)        
+            # Fix __file__ when running from a zip package
+            import MoinMoin
+            if hasattr(MoinMoin, '__loader__'):
+                __file__ = os.path.join(MoinMoin.__loader__.archive, 'dummy')
+    
+            try:
+                exec code
+            except "CacheNeedsUpdate": # convert the exception
+                raise Exception("CacheNeedsUpdate")
+        finally:
+            request.clock.stop("Page.execute")
 
     def loadCache(self, request):
         """ Return page content cache or raises 'CacheNeedsUpdate' """
         cache = caching.CacheEntry(request, self, self.getFormatterName(), scope='item')
         attachmentsPath = self.getPagePath('attachments', check_create=0)
         if cache.needsUpdate(self._text_filename(), attachmentsPath):
-            raise 'CacheNeedsUpdate'
+            raise Exception('CacheNeedsUpdate')
         
         import marshal
         try:
@@ -1400,11 +1414,11 @@
         except (EOFError, ValueError, TypeError):
             # Bad marshal data, must update the cache.
             # See http://docs.python.org/lib/module-marshal.html
-            raise 'CacheNeedsUpdate'
+            raise Exception('CacheNeedsUpdate')
         except Exception, err:
             request.log('fail to load "%s" cache: %s' % 
                         (self.page_name, str(err)))
-            raise 'CacheNeedsUpdate'
+            raise Exception('CacheNeedsUpdate')
 
     def makeCache(self, request, parser):
         """ Format content into code, update cache and return code """
--- a/MoinMoin/PageEditor.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/MoinMoin/PageEditor.py	Tue Aug 08 08:49:47 2006 +0200
@@ -18,6 +18,12 @@
 from MoinMoin.mail import sendmail
 
 
+# used for merging
+conflict_markers = ("\n---- /!\ '''Edit conflict - other version:''' ----\n",
+                    "\n---- /!\ '''Edit conflict - your version:''' ----\n",
+                    "\n---- /!\ '''End of edit conflict''' ----\n")
+
+
 #############################################################################
 ### Javascript code for editor page
 #############################################################################
@@ -101,10 +107,7 @@
         
         # And try to merge all into one with edit conflict separators
         verynewtext = diff3.text_merge(original_text, saved_text, savetext,
-                                       allow_conflicts,
-                                       "\n---- /!\ '''Edit conflict - other version:''' ----\n",
-                                       "\n---- /!\ '''Edit conflict - your version:''' ----\n",
-                                       "\n---- /!\ '''End of edit conflict''' ----\n")
+                                       allow_conflicts, *conflict_markers)
         if verynewtext:
             self.set_raw_body(verynewtext)
             return True
@@ -141,7 +144,7 @@
 
         form = self.request.form
         _ = self._
-        self.request.http_headers(self.request.nocache)
+        self.request.emit_http_headers(self.request.nocache)
 
         raw_body = ''
         msg = None
--- a/MoinMoin/PageGraphicalEditor.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/MoinMoin/PageGraphicalEditor.py	Tue Aug 08 08:49:47 2006 +0200
@@ -54,7 +54,7 @@
         request = self.request
         form = self.request.form
         _ = self._
-        self.request.http_headers(self.request.nocache)
+        self.request.emit_http_headers(self.request.nocache)
 
         raw_body = ''
         msg = None
--- a/MoinMoin/__init__.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/MoinMoin/__init__.py	Tue Aug 08 08:49:47 2006 +0200
@@ -1,6 +1,6 @@
 # -*- coding: iso-8859-1 -*-
 """
-MoinMoin Version 1.6.0alpha bf18e19e618d+ tip
+MoinMoin Version 1.6.0alpha b27d720fbc8e tip
 
 @copyright: 2000-2006 by Jürgen Hermann <jh@web.de>
 @license: GNU GPL, see COPYING for details.
--- a/MoinMoin/_tests/test_request.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/MoinMoin/_tests/test_request.py	Tue Aug 08 08:49:47 2006 +0200
@@ -123,30 +123,3 @@
                              'wrong date string')
 
 
-class GetPageNameFromQueryString(unittest.TestCase):
-    """ Test urls like http://netloc/wiki?pagename """
-
-    def setUp(self):
-        self.savedQuery = self.request.query_string
-
-    def tearDown(self):
-        self.request.query_string = self.savedQuery
-
-    def testAscii(self):
-        """ request: getPageNameFromQueryString: ascii """
-        name = expected = u'page name'
-        self.runTest(name, expected)
-
-    def testNonAscii(self):
-        """ request: getPageNameFromQueryString: non ascii """
-        name = expected = u'דף עברי'
-        self.runTest(name, expected)
-
-    def runTest(self, name, expected):
-        import urllib
-        # query as made by most browsers when you type the url into the
-        # location box.
-        query = urllib.quote(name.encode('utf-8'))
-        self.request.query_string = query
-        self.assertEqual(self.request.getPageNameFromQueryString(), expected)
-
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/_tests/test_wikisync.py	Tue Aug 08 08:49:47 2006 +0200
@@ -0,0 +1,36 @@
+# -*- coding: iso-8859-1 -*-
+"""
+MoinMoin - MoinMoin.wikisync tests
+
+@copyright: 2006 MoinMoin:AlexanderSchremmer
+@license: GNU GPL, see COPYING for details.
+"""
+
+from unittest import TestCase
+from MoinMoin.Page import Page
+from MoinMoin.PageEditor import PageEditor
+from MoinMoin._tests import TestConfig, TestSkipped
+
+from MoinMoin.wikisync import TagStore
+
+
+class UnsafeSyncTestcase(TestCase):
+    """ Tests various things related to syncing. Note that it is not possible
+        to create pages without cluttering page revision currently, so we have to use
+        the testwiki. """
+
+    def setUp(self):
+        if not getattr(self.request.cfg, 'is_test_wiki', False):
+            raise TestSkipped('This test needs to be run using the test wiki.')
+        self.page = PageEditor(self.request, "FrontPage")
+
+    def testBasicTagThings(self):
+        tags = TagStore(self.page)
+        self.assert_(not tags.get_all_tags())
+        tags.add(remote_wiki="foo", remote_rev=1, current_rev=2)
+        tags = TagStore(self.page) # reload
+        self.assert_(tags.get_all_tags()[0].remote_rev == 1)
+    
+    def tearDown(self):
+        tags = TagStore(self.page)
+        tags.clear()
--- a/MoinMoin/action/AttachFile.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/MoinMoin/action/AttachFile.py	Tue Aug 08 08:49:47 2006 +0200
@@ -29,7 +29,7 @@
 import os, time, zipfile
 from MoinMoin import config, user, util, wikiutil, packages
 from MoinMoin.Page import Page
-from MoinMoin.util import filesys
+from MoinMoin.util import filesys, timefuncs
 
 action_name = __name__.split('.')[-1]
 
@@ -493,7 +493,7 @@
     elif request.form['do'][0] == 'savedrawing':
         if request.user.may.write(pagename):
             save_drawing(pagename, request)
-            request.http_headers()
+            request.emit_http_headers()
             request.write("OK")
         else:
             msg = _('You are not allowed to save a drawing on this page.')
@@ -541,7 +541,7 @@
 def upload_form(pagename, request, msg=''):
     _ = request.getText
 
-    request.http_headers()
+    request.emit_http_headers()
     # Use user interface language for this generated page
     request.setContentLanguage(request.lang)
     request.theme.send_title(_('Attachments for "%(pagename)s"') % {'pagename': pagename}, pagename=pagename, msg=msg)
@@ -651,19 +651,22 @@
     if not filename:
         return # error msg already sent in _access_file
 
-    mt = wikiutil.MimeType(filename=filename)
+    timestamp = timefuncs.formathttpdate(int(os.path.getmtime(fpath)))
+    if request.if_modified_since == timestamp:
+        request.emit_http_headers(["Status: 304 Not modified"])
+    else:
+        mt = wikiutil.MimeType(filename=filename)
+        request.emit_http_headers([
+            "Content-Type: %s" % mt.content_type(),
+            "Last-Modified: %s" % timestamp, # TODO maybe add a short Expires: header here?
+            "Content-Length: %d" % os.path.getsize(fpath),
+            # TODO: fix the encoding here, plain 8 bit is not allowed according to the RFCs
+            # There is no solution that is compatible to IE except stripping non-ascii chars
+            "Content-Disposition: attachment; filename=\"%s\"" % filename.encode(config.charset),
+        ])
 
-    # send header
-    request.http_headers([
-        "Content-Type: %s" % mt.content_type(),
-        "Content-Length: %d" % os.path.getsize(fpath),
-        # TODO: fix the encoding here, plain 8 bit is not allowed according to the RFCs
-        # There is no solution that is compatible to IE except stripping non-ascii chars
-        "Content-Disposition: attachment; filename=\"%s\"" % filename.encode(config.charset),
-    ])
-
-    # send data
-    shutil.copyfileobj(open(fpath, 'rb'), request, 8192)
+        # send data
+        shutil.copyfileobj(open(fpath, 'rb'), request, 8192)
 
 def install_package(pagename, request):
     _ = request.getText
@@ -824,7 +827,7 @@
     if not filename: return
 
     # send header & title
-    request.http_headers()
+    request.emit_http_headers()
     # Use user interface language for this generated page
     request.setContentLanguage(request.lang)
     title = _('attachment:%(filename)s of %(pagename)s', formatted=True) % {
--- a/MoinMoin/action/Despam.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/MoinMoin/action/Despam.py	Tue Aug 08 08:49:47 2006 +0200
@@ -169,7 +169,7 @@
        # request.form.get('timestamp', [None])[0]
     ok = request.form.get('ok', [0])[0]
 
-    request.http_headers()
+    request.emit_http_headers()
     request.theme.send_title("Despam", pagename=pagename)
     # Start content (important for RTL support)
     request.write(request.formatter.startContent("content"))
--- a/MoinMoin/action/LikePages.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/MoinMoin/action/LikePages.py	Tue Aug 08 08:49:47 2006 +0200
@@ -41,7 +41,7 @@
         return
 
     # more than one match, list 'em
-    request.http_headers()
+    request.emit_http_headers()
 
     # This action generate data using the user language
     request.setContentLanguage(request.lang)
--- a/MoinMoin/action/LocalSiteMap.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/MoinMoin/action/LocalSiteMap.py	Tue Aug 08 08:49:47 2006 +0200
@@ -28,7 +28,7 @@
 
 def execute(pagename, request):
     _ = request.getText
-    request.http_headers()
+    request.emit_http_headers()
 
     # This action generate data using the user language
     request.setContentLanguage(request.lang)
--- a/MoinMoin/action/MyPages.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/MoinMoin/action/MyPages.py	Tue Aug 08 08:49:47 2006 +0200
@@ -58,7 +58,7 @@
 
     from MoinMoin.Page import Page
     from MoinMoin.parser.text_moin_wiki import Parser as WikiParser
-    request.http_headers()
+    request.emit_http_headers()
 
     # This action generate data using the user language
     request.setContentLanguage(request.lang)
--- a/MoinMoin/action/SubscribeUser.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/MoinMoin/action/SubscribeUser.py	Tue Aug 08 08:49:47 2006 +0200
@@ -17,7 +17,7 @@
 
 def show_form(pagename, request):
     _ = request.getText
-    request.http_headers()
+    request.emit_http_headers()
     request.theme.send_title(_("Subscribe users to the page %s") % pagename, pagename=pagename)
 
     request.write("""
@@ -32,7 +32,7 @@
 
 def show_result(pagename, request):
     _ = request.getText
-    request.http_headers()
+    request.emit_http_headers()
 
     request.theme.send_title(_("Subscribed for %s:") % pagename, pagename=pagename)
 
--- a/MoinMoin/action/SyncPages.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/MoinMoin/action/SyncPages.py	Tue Aug 08 08:49:47 2006 +0200
@@ -9,45 +9,247 @@
 """
 
 import os
+import re
 import zipfile
 import xmlrpclib
 from datetime import datetime
 
+# Compatiblity to Python 2.3
+try:
+    set
+except NameError:
+    from sets import Set as set
+
+
 from MoinMoin import wikiutil, config, user
-from MoinMoin.PageEditor import PageEditor
+from MoinMoin.packages import unpackLine, packLine
+from MoinMoin.PageEditor import PageEditor, conflict_markers
 from MoinMoin.Page import Page
-from MoinMoin.wikidicts import Dict
+from MoinMoin.wikidicts import Dict, Group
+from MoinMoin.wikisync import TagStore
+from MoinMoin.util.bdiff import decompress, patch, compress, textdiff
+from MoinMoin.util import diff3
+
+# directions
+UP, DOWN, BOTH = range(3)
+directions_map = {"up": UP, "down": DOWN, "both": BOTH}
+
+
+def normalise_pagename(page_name, prefix):
+    if prefix:
+        if not page_name.startswith(prefix):
+            return None
+        else:
+            return page_name[len(prefix):]
+    else:
+        return page_name
+
 
 class ActionStatus(Exception): pass
 
+class UnsupportedWikiException(Exception): pass
+
+# XXX Move these classes to MoinMoin.wikisync
+class SyncPage(object):
+    """ This class represents a page in one or two wiki(s). """
+    def __init__(self, name, local_rev=None, remote_rev=None, local_name=None, remote_name=None):
+        self.name = name
+        self.local_rev = local_rev
+        self.remote_rev = remote_rev
+        self.local_name = local_name
+        self.remote_name = remote_name
+        assert local_rev or remote_rev
+        assert local_name or remote_name
+
+    def __repr__(self):
+        return repr("<Remote Page %r>" % unicode(self))
+
+    def __unicode__(self):
+        return u"%s<%r:%r>" % (self.name, self.local_rev, self.remote_rev)
+
+    def __lt__(self, other):
+        return self.name < other.name
+
+    def __hash__(self):
+        return hash(self.name)
+
+    def __eq__(self, other):
+        if not isinstance(other, SyncPage):
+            return false
+        return self.name == other.name
+
+    def add_missing_pagename(self, local, remote):
+        if self.local_name is None:
+            n_name = normalise_pagename(self.remote_name, remote.prefix)
+            assert n_name is not None
+            self.local_name = (local.prefix or "") + n_name
+        elif self.remote_name is None:
+            n_name = normalise_pagename(self.local_name, local.prefix)
+            assert n_name is not None
+            self.remote_name = (local.prefix or "") + n_name
+
+        return self # makes using list comps easier
+
+    def filter(cls, sp_list, func):
+        return [x for x in sp_list if func(x.name)]
+    filter = classmethod(filter)
+
+    def merge(cls, local_list, remote_list):
+        # map page names to SyncPage objects :-)
+        d = dict(zip(local_list, local_list))
+        for sp in remote_list:
+            if sp in d:
+                d[sp].remote_rev = sp.remote_rev
+                d[sp].remote_name = sp.remote_name
+            else:
+                d[sp] = sp
+        return d.keys()
+    merge = classmethod(merge)
+
+    def is_only_local(self):
+        return not self.remote_rev
+
+    def is_only_remote(self):
+        return not self.local_rev
+
+    def is_local_and_remote(self):
+        return self.local_rev and self.remote_rev
+
+    def iter_local_only(cls, sp_list):
+        for x in sp_list:
+            if x.is_only_local():
+                yield x
+    iter_local_only = classmethod(iter_local_only)
+
+    def iter_remote_only(cls, sp_list):
+        for x in sp_list:
+            if x.is_only_remote():
+                yield x
+    iter_remote_only = classmethod(iter_remote_only)
+
+    def iter_local_and_remote(cls, sp_list):
+        for x in sp_list:
+            if x.is_local_and_remote():
+                yield x
+    iter_local_and_remote = classmethod(iter_local_and_remote)
+
 class RemoteWiki(object):
     """ This class should be the base for all implementations of remote wiki
         classes. """
-    def getInterwikiName(self):
-        """ Returns the interwiki name of the other wiki. """
-        return NotImplemented
-    
+
     def __repr__(self):
         """ Returns a representation of the instance for debugging purposes. """
         return NotImplemented
 
-class MoinWiki(RemoteWiki):
-    def __init__(self, interwikiname):
+    def get_interwiki_name(self):
+        """ Returns the interwiki name of the other wiki. """
+        return NotImplemented
+
+    def get_iwid(self):
+        """ Returns the InterWiki ID. """
+        return NotImplemented
+
+    def get_pages(self):
+        """ Returns a list of SyncPage instances. """
+        return NotImplemented
+
+
+class MoinRemoteWiki(RemoteWiki):
+    """ Used for MoinMoin wikis reachable via XMLRPC. """
+    def __init__(self, request, interwikiname, prefix):
+        self.request = request
+        self.prefix = prefix
+        _ = self.request.getText
+
         wikitag, wikiurl, wikitail, wikitag_bad = wikiutil.resolve_wiki(self.request, '%s:""' % (interwikiname, ))
         self.wiki_url = wikiutil.mapURL(self.request, wikiurl)
         self.valid = not wikitag_bad
         self.xmlrpc_url = self.wiki_url + "?action=xmlrpc2"
+        if not self.valid:
+            self.connection = None
+            return
+
         self.connection = self.createConnection()
 
+        version = self.connection.getMoinVersion()
+        if not isinstance(version, (tuple, list)):
+            raise UnsupportedWikiException(_("The remote version of MoinMoin is too old, the version 1.6 is required at least."))
+
+        remote_interwikiname = self.get_interwiki_name()
+        remote_iwid = self.connection.interwikiName()[1]
+        self.is_anonymous = remote_interwikiname is None
+        if not self.is_anonymous and interwikiname != remote_interwikiname:
+            raise UnsupportedWikiException(_("The remote wiki uses a different InterWiki name (%(remotename)s)"
+                                             " internally than you specified (%(localname)s).") % {
+                "remotename": wikiutil.escape(remote_interwikiname), "localname": wikiutil.escape(interwikiname)})
+
+        if self.is_anonymous:
+            self.iwid_full = packLine([remote_iwid])
+        else:
+            self.iwid_full = packLine([remote_iwid, interwikiname])
+
     def createConnection(self):
-        return xmlrpclib.ServerProxy(self.xmlrpc_url, allow_none=True)
+        return xmlrpclib.ServerProxy(self.xmlrpc_url, allow_none=True, verbose=True)
+
+    # Public methods
+    def get_diff(self, pagename, from_rev, to_rev):
+        return str(self.connection.getDiff(pagename, from_rev, to_rev))
 
     # Methods implementing the RemoteWiki interface
-    def getInterwikiName(self):
-        return self.connection.interwikiName()
+    def get_interwiki_name(self):
+        return self.connection.interwikiName()[0]
+
+    def get_iwid(self):
+        return self.connection.interwikiName()[1]
+
+    def get_pages(self):
+        pages = self.connection.getAllPagesEx({"include_revno": True, "include_deleted": True})
+        rpages = []
+        for name, revno in pages:
+            normalised_name = normalise_pagename(name, self.prefix)
+            if normalised_name is None:
+                continue
+            rpages.append(SyncPage(normalised_name, remote_rev=revno, remote_name=name))
+        return rpages
 
     def __repr__(self):
-        return "<RemoteWiki wiki_url=%r valid=%r>" % (self.valid, self.wiki_url)
+        return "<MoinRemoteWiki wiki_url=%r valid=%r>" % (self.wiki_url, self.valid)
+
+
+class MoinLocalWiki(RemoteWiki):
+    """ Used for the current MoinMoin wiki. """
+    def __init__(self, request, prefix):
+        self.request = request
+        self.prefix = prefix
+
+    def getGroupItems(self, group_list):
+        """ Returns all page names that are listed on the page group_list. """
+        pages = []
+        for group_pagename in group_list:
+            pages.extend(Group(self.request, group_pagename).members())
+        return [self.createSyncPage(x) for x in pages]
+
+    def createSyncPage(self, page_name):
+        normalised_name = normalise_pagename(page_name, self.prefix)
+        if normalised_name is None:
+            return None
+        return SyncPage(normalised_name, local_rev=Page(self.request, page_name).get_real_rev(), local_name=page_name)
+
+    # Public methods:
+
+    # Methods implementing the RemoteWiki interface
+    def get_interwiki_name(self):
+        return self.request.cfg.interwikiname
+
+    def get_iwid(self):
+        return self.request.cfg.iwid
+
+    def get_pages(self):
+        return [x for x in [self.createSyncPage(x) for x in self.request.rootpage.getPageList(exists=0)] if x]
+
+    def __repr__(self):
+        return "<MoinLocalWiki>"
+
 
 class ActionClass:
     def __init__(self, pagename, request):
@@ -55,44 +257,161 @@
         self.pagename = pagename
         self.page = Page(request, pagename)
 
-    def parsePage(self):
-        defaults = {
+    def parse_page(self):
+        options = {
             "remotePrefix": "",
             "localPrefix": "",
-            "remoteWiki": ""
+            "remoteWiki": "",
+            "pageMatch": None,
+            "pageList": None,
+            "groupList": None,
+            "direction": "foo", # is defaulted below
         }
-        
-        defaults.update(Dict(self.request, self.pagename).get_dict())
-        return defaults
-        
+
+        options.update(Dict(self.request, self.pagename).get_dict())
+
+        # Convert page and group list strings to lists
+        if options["pageList"] is not None:
+            options["pageList"] = unpackLine(options["pageList"], ",")
+        if options["groupList"] is not None:
+            options["groupList"] = unpackLine(options["groupList"], ",")
+
+        options["direction"] = directions_map.get(options["direction"], BOTH)
+
+        return options
+
+    def fix_params(self, params):
+        """ Does some fixup on the parameters. """
+
+        # merge the pageList case into the pageMatch case
+        if params["pageList"] is not None:
+            params["pageMatch"] = u'|'.join([r'^%s$' % re.escape(name)
+                                             for name in params["pageList"]])
+            del params["pageList"]
+
+        if params["pageMatch"] is not None:
+            params["pageMatch"] = re.compile(params["pageMatch"], re.U)
+
+        # we do not support matching or listing pages if there is a group of pages
+        if params["groupList"]:
+            params["pageMatch"] = None
+
+        return params
+
     def render(self):
         """ Render action
 
-        This action returns a wiki page with optional message, or
-        redirects to new page.
+        This action returns a status message.
         """
         _ = self.request.getText
-        
-        params = self.parsePage()
-        
+
+        params = self.fix_params(self.parse_page())
+
         try:
             if not self.request.cfg.interwikiname:
                 raise ActionStatus(_("Please set an interwikiname in your wikiconfig (see HelpOnConfiguration) to be able to use this action."))
 
             if not params["remoteWiki"]:
                 raise ActionStatus(_("Incorrect parameters. Please supply at least the ''remoteWiki'' parameter."))
-            
-            remote = MoinWiki(params["remoteWiki"])
-            
+
+            local = MoinLocalWiki(self.request, params["localPrefix"])
+            try:
+                remote = MoinRemoteWiki(self.request, params["remoteWiki"], params["remotePrefix"])
+            except UnsupportedWikiException, (msg, ):
+                raise ActionStatus(msg)
+
             if not remote.valid:
                 raise ActionStatus(_("The ''remoteWiki'' is unknown."))
-            
-            # ...
-            self.sync(params)
+
+            self.sync(params, local, remote)
         except ActionStatus, e:
             return self.page.send_page(self.request, msg=u'<p class="error">%s</p>\n' % (e.args[0], ))
 
         return self.page.send_page(self.request, msg=_("Syncronisation finished."))
     
+    def sync(self, params, local, remote):
+        """ This method does the syncronisation work. """
+
+        l_pages = local.get_pages()
+        r_pages = remote.get_pages()
+
+        if params["groupList"]:
+            pages_from_groupList = set(local.getGroupItems(params["groupList"]))
+            r_pages = SyncPage.filter(r_pages, pages_from_groupList.__contains__)
+            l_pages = SyncPage.filter(l_pages, pages_from_groupList.__contains__)
+
+        m_pages = [elem.add_missing_pagename(local, remote) for elem in SyncPage.merge(l_pages, r_pages)]
+
+        print "Got %i local, %i remote pages, %i merged pages" % (len(l_pages), len(r_pages), len(m_pages))
+        
+        if params["pageMatch"]:
+            m_pages = SyncPage.filter(m_pages, params["pageMatch"].match)
+        print "After filtering: Got %i merges pages" % (len(m_pages), )
+
+        on_both_sides = list(SyncPage.iter_local_and_remote(m_pages))
+        remote_but_not_local = list(SyncPage.iter_remote_only(m_pages))
+        local_but_not_remote = list(SyncPage.iter_local_only(m_pages))
+        
+        # some initial test code (XXX remove)
+        #r_new_pages = u", ".join([unicode(x) for x in remote_but_not_local])
+        #l_new_pages = u", ".join([unicode(x) for x in local_but_not_remote])
+        #raise ActionStatus("These pages are in the remote wiki, but not local: " + wikiutil.escape(r_new_pages) + "<br>These pages are in the local wiki, but not in the remote one: " + wikiutil.escape(l_new_pages))
+        #if params["direction"] in (DOWN, BOTH):
+        #    for rp in remote_but_not_local:
+
+        # let's do the simple case first, can be refactored later to match all cases
+        # XXX handle deleted pages
+        for rp in on_both_sides:
+            # XXX add locking, acquire read-lock on rp
+
+            current_page = Page(self.request, local_pagename)
+            current_rev = current_page.get_real_rev()
+            local_pagename = rp.local_pagename
+
+            tags = TagStore(current_page)
+            matching_tags = tags.fetch(iwid_full=remote.iwid_full)
+            matching_tags.sort()
+
+            if not matching_tags:
+                remote_rev = None
+                local_rev = rp.local_rev # merge against the newest version
+                old_contents = ""
+            else:
+                newest_tag = matching_tags[-1]
+                local_rev = newest_tag.current_rev
+                remote_rev = newest_tag.remote_rev
+                if remote_rev == rp.remote_rev and local_rev == current_rev:
+                    continue # no changes done, next page
+                old_page = Page(self.request, local_pagename, rev=local_rev)
+                old_contents = old_page.get_raw_body_str()
+
+            diff_result = remote.get_diff(rp.remote_pagename, remote_rev, None)
+            is_remote_conflict = diff_result["conflict"]
+            assert diff_result["diffversion"] == 1
+            diff = diff_result["diff"]
+            current_remote_rev = diff_result["current"]
+
+            if remote_rev is None: # set the remote_rev for the case without any tags
+                remote_rev = current_remote_rev
+
+            new_contents = patch(old_contents, decompress(diff)).decode("utf-8")
+            old_contents = old_contents.encode("utf-8")
+
+            # here, the actual merge happens
+            verynewtext = diff3.text_merge(old_contents, new_contents, current_page.get_raw_body(), 1, *conflict_markers)
+
+            new_local_rev = current_rev + 1 # XXX commit first?
+            local_full_iwid = packLine([local.get_iwid(), local.get_interwiki_name()])
+            remote_full_iwid = packLine([remote.get_iwid(), remote.get_interwiki_name()])
+            # XXX add remote conflict handling
+            very_current_remote_rev = remote.merge_diff(rp.remote_pagename, compress(textdiff(new_contents, verynewtext)), new_local_rev, remote_rev, current_remote_rev, local_full_iwid)
+            tags.add(remote_wiki=remote_full_iwid, remote_rev=very_current_remote_rev, current_rev=new_local_rev)
+            comment = u"Local Merge - %r" % (remote.get_interwiki_name() or remote.get_iwid())
+            try:
+                current_page.saveText(verynewtext, current_rev, comment=comment)
+            except PageEditor.EditConflict:
+                assert False, "You stumbled on a problem with the current storage system - I cannot lock pages"
+            # XXX untested
+
 def execute(pagename, request):
     ActionClass(pagename, request).render()
--- a/MoinMoin/action/__init__.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/MoinMoin/action/__init__.py	Tue Aug 08 08:49:47 2006 +0200
@@ -29,9 +29,11 @@
 from MoinMoin import wikiutil
 from MoinMoin.Page import Page
 
-# create a list of extension actions from the subpackage directory
-extension_actions = pysupport.getPackageModules(__file__)
-modules = extension_actions
+# create a list of extension actions from the package directory
+modules = pysupport.getPackageModules(__file__)
+
+# builtin-stuff (see do_<name> below):
+names = ['show', 'recall', 'raw', 'format', 'content', 'print', 'refresh', 'goto', 'userform', ]
 
 class ActionBase:
     """ action base class with some generic stuff to inherit
@@ -201,15 +203,8 @@
         else:
             self.render_msg(self.make_form()) # display the form again
 
-# from wikiaction.py ---------------------------------------------------------
 
-import os, re, time
-from MoinMoin import config, util
-from MoinMoin.logfile import editlog
-
-#############################################################################
-### Misc Actions
-#############################################################################
+# Builtin Actions ------------------------------------------------------------
 
 def do_raw(pagename, request):
     """ send raw content of a page (e.g. wiki markup) """
@@ -252,7 +247,7 @@
 
 def do_content(pagename, request):
     """ same as do_show, but we only show the content """
-    request.http_headers()
+    request.emit_http_headers()
     page = Page(request, pagename)
     request.write('<!-- Transclusion of %s -->' % request.getQualifiedURL(page.url(request)))
     page.send_page(request, count_hit=0, content_only=1)
@@ -279,168 +274,26 @@
     caching.CacheEntry(request, arena, "pagelinks", scope='item').remove()
     do_show(pagename, request)
 
-def do_revert(pagename, request):
-    """ restore another revision of a page as a new current revision """
-    from MoinMoin.PageEditor import PageEditor
-    _ = request.getText
-
-    if not request.user.may.revert(pagename):
-        return Page(request, pagename).send_page(request,
-            msg=_('You are not allowed to revert this page!'))
-
-    rev = int(request.form['rev'][0])
-    revstr = '%08d' % rev
-    oldpg = Page(request, pagename, rev=rev)
-    pg = PageEditor(request, pagename)
-
-    try:
-        savemsg = pg.saveText(oldpg.get_raw_body(), 0, extra=revstr,
-                              action="SAVE/REVERT")
-    except pg.SaveError, msg:
-        # msg contain a unicode string
-        savemsg = unicode(msg)
-    request.reset()
-    pg.send_page(request, msg=savemsg)
-    return None
-
 def do_goto(pagename, request):
     """ redirect to another page """
     target = request.form.get('target', [''])[0]
     request.http_redirect(Page(request, target).url(request))
 
-def do_quicklink(pagename, request):
-    """ Add the current wiki page to the user quicklinks 
-    
-    TODO: what if add or remove quicklink fail? display an error message?
-    """
-    _ = request.getText
-    msg = None
-
-    if not request.user.valid:
-        msg = _("You must login to add a quicklink.")
-    elif request.user.isQuickLinkedTo([pagename]):
-        if request.user.removeQuicklink(pagename):
-            msg = _('Your quicklink to this page has been removed.')
-    else:
-        if request.user.addQuicklink(pagename):
-            msg = _('A quicklink to this page has been added for you.')
-
-    Page(request, pagename).send_page(request, msg=msg)
-
-def do_subscribe(pagename, request):
-    """ Subscribe or unsubscribe the user to pagename
-    
-    TODO: what if subscribe failed? no message is displayed.
-    """
-    _ = request.getText
-    cfg = request.cfg
-    msg = None
-
-    if not request.user.may.read(pagename):
-        msg = _("You are not allowed to subscribe to a page you can't read.")
-
-    # Check if mail is enabled
-    elif not cfg.mail_enabled:
-        msg = _("This wiki is not enabled for mail processing.")
-
-    # Suggest visitors to login
-    elif not request.user.valid:
-        msg = _("You must log in to use subscribtions.")
-
-    # Suggest users without email to add their email address
-    elif not request.user.email:
-        msg = _("Add your email address in your UserPreferences to use subscriptions.")
-
-    elif request.user.isSubscribedTo([pagename]):
-        # Try to unsubscribe
-        if request.user.unsubscribe(pagename):
-            msg = _('Your subscribtion to this page has been removed.')
-        else:
-            msg = _("Can't remove regular expression subscription!") + u' ' + \
-                  _("Edit the subscription regular expressions in your "
-                    "UserPreferences.")
-
-    else:
-        # Try to subscribe
-        if request.user.subscribe(pagename):
-            msg = _('You have been subscribed to this page.')
-
-    Page(request, pagename).send_page(request, msg=msg)
-
 def do_userform(pagename, request):
     """ save data posted from UserPreferences """
     from MoinMoin import userform
     savemsg = userform.savedata(request)
     Page(request, pagename).send_page(request, msg=savemsg)
 
-def do_bookmark(pagename, request):
-    """ set bookmarks (in time) for RecentChanges or delete them """
-    timestamp = request.form.get('time', [None])[0]
-    if timestamp is not None:
-        if timestamp == 'del':
-            tm = None
-        else:
-            try:
-                tm = int(timestamp)
-            except StandardError:
-                tm = wikiutil.timestamp2version(time.time())
-    else:
-        tm = wikiutil.timestamp2version(time.time())
-
-    if tm is None:
-        request.user.delBookmark()
+# Dispatching ----------------------------------------------------------------
+def getNames(cfg):
+    if hasattr(cfg, 'action_names'):
+        return cfg.action_names
     else:
-        request.user.setBookmark(tm)
-    Page(request, pagename).send_page(request)
-
-
-#############################################################################
-### Special Actions
-#############################################################################
-
-def do_chart(pagename, request):
-    """ Show page charts """
-    _ = request.getText
-    if not request.user.may.read(pagename):
-        msg = _("You are not allowed to view this page.")
-        return request.page.send_page(request, msg=msg)
-
-    if not request.cfg.chart_options:
-        msg = _("Charts are not available!")
-        return request.page.send_page(request, msg=msg)
-
-    chart_type = request.form.get('type', [''])[0].strip()
-    if not chart_type:
-        msg = _('You need to provide a chart type!')
-        return request.page.send_page(request, msg=msg)
-
-    try:
-        func = pysupport.importName("MoinMoin.stats." + chart_type, 'draw')
-    except (ImportError, AttributeError):
-        msg = _('Bad chart type "%s"!') % chart_type
-        return request.page.send_page(request, msg=msg)
-
-    func(pagename, request)
-
-def do_dumpform(pagename, request):
-    """ dump the form data we received in this request for debugging """
-    data = util.dumpFormData(request.form)
-
-    request.http_headers()
-    request.write("<html><body>%s</body></html>" % data)
-
-
-#############################################################################
-### Dispatching
-#############################################################################
-
-def getPlugins(request):
-    """ return the path to the action plugin directory and a list of plugins there """
-    dir = os.path.join(request.cfg.plugin_dir, 'action')
-    plugins = []
-    if os.path.isdir(dir):
-        plugins = pysupport.getPackageModules(os.path.join(dir, 'dummy'))
-    return dir, plugins
+        lnames = names[:]
+        lnames.extend(wikiutil.getPlugins('action', cfg))
+        cfg.action_names = lnames # remember it
+        return lnames
 
 def getHandler(request, action, identifier="execute"):
     """ return a handler function for a given action or None """
--- a/MoinMoin/action/backup.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/MoinMoin/action/backup.py	Tue Aug 08 08:49:47 2006 +0200
@@ -28,7 +28,7 @@
     """ Send compressed tar file """
     dateStamp = time.strftime("%Y-%m-%d--%H-%M-%S-UTC", time.gmtime())
     filename = "%s-%s.tar.%s" % (request.cfg.siteid, dateStamp, request.cfg.backup_compression)
-    request.http_headers([
+    request.emit_http_headers([
         "Content-Type: application/octet-stream",
         "Content-Disposition: inline; filename=\"%s\"" % filename, ])
 
@@ -70,7 +70,7 @@
 
 def sendBackupForm(request, pagename):
     _ = request.getText
-    request.http_headers()
+    request.emit_http_headers()
     request.setContentLanguage(request.lang)
     title = _('Wiki Backup / Restore')
     request.theme.send_title(title, form=request.form, pagename=pagename)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/action/bookmark.py	Tue Aug 08 08:49:47 2006 +0200
@@ -0,0 +1,33 @@
+# -*- coding: iso-8859-1 -*-
+"""
+    MoinMoin - set or delete bookmarks (in time) for RecentChanges
+
+    @copyright: 2000-2004 by Jürgen Hermann <jh@web.de>,
+                2006 by MoinMoin:ThomasWaldmann
+    @license: GNU GPL, see COPYING for details.
+"""
+import time
+
+from MoinMoin import wikiutil
+from MoinMoin.Page import Page
+
+def execute(pagename, request):
+    """ set bookmarks (in time) for RecentChanges or delete them """
+    timestamp = request.form.get('time', [None])[0]
+    if timestamp is not None:
+        if timestamp == 'del':
+            tm = None
+        else:
+            try:
+                tm = int(timestamp)
+            except StandardError:
+                tm = wikiutil.timestamp2version(time.time())
+    else:
+        tm = wikiutil.timestamp2version(time.time())
+
+    if tm is None:
+        request.user.delBookmark()
+    else:
+        request.user.setBookmark(tm)
+    request.page.send_page(request)
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/action/chart.py	Tue Aug 08 08:49:47 2006 +0200
@@ -0,0 +1,34 @@
+# -*- coding: iso-8859-1 -*-
+"""
+    MoinMoin - show some statistics chart
+
+    @copyright: 2000-2004 by Jürgen Hermann <jh@web.de>,
+                2006 by MoinMoin:ThomasWaldmann
+    @license: GNU GPL, see COPYING for details.
+"""
+from MoinMoin.util import pysupport
+
+def execute(pagename, request):
+    """ Show page charts """
+    _ = request.getText
+    if not request.user.may.read(pagename):
+        msg = _("You are not allowed to view this page.")
+        return request.page.send_page(request, msg=msg)
+
+    if not request.cfg.chart_options:
+        msg = _("Charts are not available!")
+        return request.page.send_page(request, msg=msg)
+
+    chart_type = request.form.get('type', [''])[0].strip()
+    if not chart_type:
+        msg = _('You need to provide a chart type!')
+        return request.page.send_page(request, msg=msg)
+
+    try:
+        func = pysupport.importName("MoinMoin.stats.%s" % chart_type, 'draw')
+    except (ImportError, AttributeError), err:
+        msg = _('Bad chart type "%s"!') % chart_type
+        return request.page.send_page(request, msg=msg)
+
+    func(pagename, request)
+
--- a/MoinMoin/action/diff.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/MoinMoin/action/diff.py	Tue Aug 08 08:49:47 2006 +0200
@@ -71,7 +71,7 @@
     # This action generate content in the user language
     request.setContentLanguage(request.lang)
 
-    request.http_headers()
+    request.emit_http_headers()
     request.theme.send_title(_('Diff for "%s"') % (pagename,), pagename=pagename, allow_doubleclick=1)
 
     if rev1 > 0 and rev2 > 0 and rev1 > rev2 or rev1 == 0 and rev2 > 0:
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/action/dumpform.py	Tue Aug 08 08:49:47 2006 +0200
@@ -0,0 +1,17 @@
+# -*- coding: iso-8859-1 -*-
+"""
+    MoinMoin - dump form data we received (debugging)
+
+    @copyright: 2000-2004 by Jürgen Hermann <jh@web.de>,
+                2006 by MoinMoin:ThomasWaldmann
+    @license: GNU GPL, see COPYING for details.
+"""
+from MoinMoin import util
+
+def execute(pagename, request):
+    """ dump the form data we received in this request for debugging """
+    data = util.dumpFormData(request.form)
+
+    request.emit_http_headers()
+    request.write("<html><body>%s</body></html>" % data)
+
--- a/MoinMoin/action/edit.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/MoinMoin/action/edit.py	Tue Aug 08 08:49:47 2006 +0200
@@ -137,7 +137,6 @@
         try:
             still_conflict = wikiutil.containsConflictMarker(savetext)
             pg.setConflict(still_conflict)
-            request.http_headers() # XXX WHY? XXX
             savemsg = pg.saveText(savetext, rev, trivial=trivial, comment=comment)
         except pg.EditConflict, e:
             msg = e.message
--- a/MoinMoin/action/fckdialog.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/MoinMoin/action/fckdialog.py	Tue Aug 08 08:49:47 2006 +0200
@@ -15,7 +15,7 @@
 
 def macro_dialog(request):
     help = get_macro_help(request)
-    request.http_headers()
+    request.emit_http_headers()
     request.write(
         '''<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
 <html>
@@ -169,7 +169,7 @@
         pages = [p.page_name for p in searchresult.hits]
     else:
         pages = [name]
-    request.http_headers()
+    request.emit_http_headers()
     request.write(
         '''<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
 <html>
@@ -200,7 +200,7 @@
 ''' % "".join(["<option>%s</option>\n" % p for p in pages]))
 
 def link_dialog(request):
-    request.http_headers()
+    request.emit_http_headers()
     # list of wiki pages
     name = request.form.get("pagename", [""])[0]
     if name:
@@ -246,7 +246,7 @@
         scriptname += "/"
     action = scriptname
     basepage = request.page.page_name.encode(config.charset)
-    request.http_headers()
+    request.emit_http_headers()
     request.write('''
 <!--
  * FCKeditor - The text editor for internet
@@ -367,7 +367,7 @@
 ##############################################################################
 
 def attachment_dialog(request):
-    request.http_headers()
+    request.emit_http_headers()
     # list of wiki pages
     name = request.form.get("pagename", [""])[0]
     if name:
@@ -397,7 +397,7 @@
     if not scriptname or scriptname[-1] != "/":
         scriptname += "/"
     action = scriptname
-    request.http_headers()
+    request.emit_http_headers()
     request.write('''
 <!--
  * FCKeditor - The text editor for internet
@@ -462,7 +462,7 @@
 ##############################################################################
 
 def image_dialog(request):
-    request.http_headers()
+    request.emit_http_headers()
     url_prefix = request.cfg.url_prefix
     request.write('''
 <!--
--- a/MoinMoin/action/fullsearch.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/MoinMoin/action/fullsearch.py	Tue Aug 08 08:49:47 2006 +0200
@@ -51,8 +51,7 @@
     if len(striped) == 0:
         err = _('Please use a more selective search term instead '
                 'of {{{"%s"}}}') % needle
-        # send http headers
-        request.http_headers()
+        request.emit_http_headers()
         Page(request, pagename).send_page(request, msg=err)
         return
 
@@ -75,8 +74,7 @@
             request.http_redirect(url)
             return
 
-    # send http headers
-    request.http_headers()
+    request.emit_http_headers()
 
     # This action generate data using the user language
     request.setContentLanguage(request.lang)
@@ -100,8 +98,8 @@
     # Then search results
     info = not titlesearch
     if context:
-        output = results.pageListWithContext(request, request.formatter, info=info,
-                context=context, hitsFrom=hitsFrom)
+        output = results.pageListWithContext(request, request.formatter,
+                info=info, context=context, hitsFrom=hitsFrom)
     else:
         output = results.pageList(request, request.formatter, info=info,
                 hitsFrom=hitsFrom)
--- a/MoinMoin/action/info.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/MoinMoin/action/info.py	Tue Aug 08 08:49:47 2006 +0200
@@ -8,10 +8,12 @@
                 2006 by MoinMoin:ThomasWaldmann
     @license: GNU GPL, see COPYING for details.
 """
+import os
 
 from MoinMoin import config, wikiutil, action
 from MoinMoin.Page import Page
 from MoinMoin.logfile import editlog
+from MoinMoin.widget import html
 
 def execute(pagename, request):
     """ show misc. infos about a page """
@@ -87,38 +89,41 @@
 
         may_revert = request.user.may.revert(pagename)
 
+        def render_action(text, query, **kw):
+            kw.update(rel='nofollow')
+            if 0: # diff button doesnt work XXX
+                params_html = []
+                for k, v in query.items():
+                    params_html.append('<input type="hidden" name="%s" value="%s">' % (k, v))
+                params_html = ''.join(params_html)
+                html = '''
+<form>
+<input type="submit" value="%s">
+%s
+</form>
+''' % (text, params_html)
+            else:
+                html = page.link_to(request, text, querystr=query, **kw)
+            return html
+
         # read in the complete log of this page
         log = editlog.EditLog(request, rootpagename=pagename)
         count = 0
         for line in log.reverse():
             rev = int(line.rev)
-            actions = ""
+            actions = []
             if line.action in ['SAVE', 'SAVENEW', 'SAVE/REVERT', ]:
                 size = page.size(rev=rev)
                 if count == 0: # latest page
-                    actions = '%s&nbsp;%s' % (actions, page.link_to(request,
-                        text=_('view'),
-                        querystr=''))
-                    actions = '%s&nbsp;%s' % (actions, page.link_to(request,
-                        text=_('raw'),
-                        querystr='action=raw', rel='nofollow'))
-                    actions = '%s&nbsp;%s' % (actions, page.link_to(request,
-                        text=_('print'),
-                        querystr='action=print', rel='nofollow'))
+                    actions.append(render_action(_('view'), {'action': 'show'}))
+                    actions.append(render_action(_('raw'), {'action': 'raw'}))
+                    actions.append(render_action(_('print'), {'action': 'print'}))
                 else:
-                    actions = '%s&nbsp;%s' % (actions, page.link_to(request,
-                        text=_('view'),
-                        querystr='action=recall&rev=%d' % rev, rel='nofollow'))
-                    actions = '%s&nbsp;%s' % (actions, page.link_to(request,
-                        text=_('raw'),
-                        querystr='action=raw&rev=%d' % rev, rel='nofollow'))
-                    actions = '%s&nbsp;%s' % (actions, page.link_to(request,
-                        text=_('print'),
-                        querystr='action=print&rev=%d' % rev, rel='nofollow'))
+                    actions.append(render_action(_('view'), {'action': 'recall', 'rev': '%d' % rev}))
+                    actions.append(render_action(_('raw'), {'action': 'raw', 'rev': '%d' % rev}))
+                    actions.append(render_action(_('print'), {'action': 'print', 'rev': '%d' % rev}))
                     if may_revert and size: # you can only revert to nonempty revisions
-                        actions = '%s&nbsp;%s' % (actions, page.link_to(request,
-                            text=_('revert'),
-                            querystr='action=revert&rev=%d' % rev, rel='nofollow'))
+                        actions.append(render_action(_('revert'), {'action': 'revert', 'rev': '%d' % rev}))
                 if count == 0:
                     rchecked = ' checked="checked"'
                     lchecked = ''
@@ -148,20 +153,12 @@
                     except:
                         pass
                     if line.action == 'ATTNEW':
-                        actions = '%s&nbsp;%s' % (actions, page.link_to(request,
-                            text=_('view'),
-                            querystr='action=AttachFile&do=view&target=%s' % filename, rel='nofollow'))
+                        actions.append(render_action(_('view'), {'action': 'AttachFile', 'do': 'view', 'target': '%s' % filename}))
                     elif line.action == 'ATTDRW':
-                        actions = '%s&nbsp;%s' % (actions, page.link_to(request,
-                            text=_('edit'),
-                            querystr='action=AttachFile&drawing=%s' % filename.replace(".draw", ""), rel='nofollow'))
+                        actions.append(render_action(_('edit'), {'action': 'AttachFile', 'drawing': '%s' % filename.replace(".draw", "")}))
 
-                    actions = '%s&nbsp;%s' % (actions, page.link_to(request,
-                        text=_('get'),
-                        querystr='action=AttachFile&do=get&target=%s' % filename, rel='nofollow'))
-                    actions = '%s&nbsp;%s' % (actions, page.link_to(request,
-                        text=_('del'),
-                        querystr='action=AttachFile&do=del&target=%s' % filename, rel='nofollow'))
+                    actions.append(render_action(_('get'), {'action': 'AttachFile', 'do': 'get', 'target': '%s' % filename}))
+                    actions.append(render_action(_('del'), {'action': 'AttachFile', 'do': 'del', 'target': '%s' % filename}))
                     # XXX use?: wikiutil.escape(filename)
 
             history.addRow((
@@ -171,7 +168,7 @@
                 diff,
                 line.getEditor(request) or _("N/A"),
                 wikiutil.escape(comment) or '&nbsp;',
-                actions,
+                "&nbsp;".join(actions),
             ))
             count += 1
             if count >= 100:
@@ -180,24 +177,22 @@
         # print version history
         from MoinMoin.widget.browser import DataBrowserWidget
 
-        request.write('<h2>%s</h2>\n' % _('Revision History'))
+        request.write(unicode(html.H2().append(_('Revision History'))))
 
         if not count: # there was no entry in logfile
             request.write(_('No log entries found.'))
             return
 
-        # TODO: this form activates revert, which should use POST, but
-        # other actions should use get. Maybe we should put the revert
-        # into the page view itself, and not in this form.
-        request.write('<form method="GET" action="">\n')
-        request.write('<div id="page-history">\n')
-        request.write('<input type="hidden" name="action" value="diff">\n')
-
         history_table = DataBrowserWidget(request)
         history_table.setData(history)
-        history_table.render()
-        request.write('</div>\n')
-        request.write('</form>\n')
+
+        div = html.DIV(id="page-history")
+        div.append(html.INPUT(type="hidden", name="action", value="diff"))
+        div.append(history_table.toHTML())
+
+        form = html.FORM(method="GET", action="")
+        form.append(div)
+        request.write(unicode(form))
 
     # main function
     _ = request.getText
@@ -205,7 +200,7 @@
     qpagename = wikiutil.quoteWikinameURL(pagename)
     title = page.split_title(request)
 
-    request.http_headers()
+    request.emit_http_headers()
 
     # This action uses page or wiki language TODO: currently
     # page.language is broken and not available now, when we fix it,
--- a/MoinMoin/action/links.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/MoinMoin/action/links.py	Tue Aug 08 08:49:47 2006 +0200
@@ -19,7 +19,7 @@
     else:
         mimetype = "text/html"
 
-    request.http_headers(["Content-Type: %s; charset=%s" % (mimetype, config.charset)])
+    request.emit_http_headers(["Content-Type: %s; charset=%s" % (mimetype, config.charset)])
 
     if mimetype == "text/html":
         request.theme.send_title(_('Full Link List for "%s"') % request.cfg.sitename)
--- a/MoinMoin/action/login.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/MoinMoin/action/login.py	Tue Aug 08 08:49:47 2006 +0200
@@ -59,7 +59,7 @@
             return self.page.send_page(request, msg=error)
 
         else: # show login form
-            request.http_headers()
+            request.emit_http_headers()
             request.theme.send_title(_("Login"), pagename=self.pagename)
             # Start content (important for RTL support)
             request.write(request.formatter.startContent("content"))
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/action/quicklink.py	Tue Aug 08 08:49:47 2006 +0200
@@ -0,0 +1,29 @@
+# -*- coding: iso-8859-1 -*-
+"""
+    MoinMoin - add a quicklink to the user's quicklinks
+
+    @copyright: 2000-2004 by Jürgen Hermann <jh@web.de>,
+                2006 by MoinMoin:ThomasWaldmann
+    @license: GNU GPL, see COPYING for details.
+"""
+from MoinMoin.Page import Page
+
+def execute(pagename, request):
+    """ Add the current wiki page to the user quicklinks 
+    
+    TODO: what if add or remove quicklink fail? display an error message?
+    """
+    _ = request.getText
+    msg = None
+
+    if not request.user.valid:
+        msg = _("You must login to add a quicklink.")
+    elif request.user.isQuickLinkedTo([pagename]):
+        if request.user.removeQuicklink(pagename):
+            msg = _('Your quicklink to this page has been removed.')
+    else:
+        if request.user.addQuicklink(pagename):
+            msg = _('A quicklink to this page has been added for you.')
+
+    Page(request, pagename).send_page(request, msg=msg)
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/action/refresh.py	Tue Aug 08 08:49:47 2006 +0200
@@ -0,0 +1,24 @@
+# -*- coding: iso-8859-1 -*-
+"""
+    MoinMoin - refresh cache of a page
+
+    @copyright: 2000-2004 by Jürgen Hermann <jh@web.de>,
+                2006 by MoinMoin:ThomasWaldmann
+    @license: GNU GPL, see COPYING for details.
+"""
+from MoinMoin.Page import Page
+
+def execute(pagename, request):
+    """ Handle refresh action """
+    # Without arguments, refresh action will refresh the page text_html cache.
+    arena = request.form.get('arena', ['Page.py'])[0]
+    if arena == 'Page.py':
+        arena = Page(request, pagename)
+    key = request.form.get('key', ['text_html'])[0]
+
+    # Remove cache entry (if exists), and send the page
+    from MoinMoin import caching
+    caching.CacheEntry(request, arena, key, scope='item').remove()
+    caching.CacheEntry(request, arena, "pagelinks", scope='item').remove()
+    request.page.send_page(request)
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/action/revert.py	Tue Aug 08 08:49:47 2006 +0200
@@ -0,0 +1,33 @@
+# -*- coding: iso-8859-1 -*-
+"""
+    MoinMoin - revert a page to a previous revision
+
+    @copyright: 2000-2004 by Jürgen Hermann <jh@web.de>,
+                2006 by MoinMoin:ThomasWaldmann
+    @license: GNU GPL, see COPYING for details.
+"""
+from MoinMoin.Page import Page
+
+def execute(pagename, request):
+    """ restore another revision of a page as a new current revision """
+    from MoinMoin.PageEditor import PageEditor
+    _ = request.getText
+
+    if not request.user.may.revert(pagename):
+        return Page(request, pagename).send_page(request,
+            msg=_('You are not allowed to revert this page!'))
+
+    rev = int(request.form['rev'][0])
+    revstr = '%08d' % rev
+    oldpg = Page(request, pagename, rev=rev)
+    pg = PageEditor(request, pagename)
+
+    try:
+        savemsg = pg.saveText(oldpg.get_raw_body(), 0, extra=revstr,
+                              action="SAVE/REVERT")
+    except pg.SaveError, msg:
+        # msg contain a unicode string
+        savemsg = unicode(msg)
+    request.reset()
+    pg.send_page(request, msg=savemsg)
+
--- a/MoinMoin/action/rss_rc.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/MoinMoin/action/rss_rc.py	Tue Aug 08 08:49:47 2006 +0200
@@ -45,18 +45,7 @@
     except ValueError:
         ddiffs = 0
 
-    # prepare output
-    out = StringIO.StringIO()
-    handler = RssGenerator(out)
-
     # get data
-    interwiki = request.getBaseURL()
-    if interwiki[-1] != "/": interwiki = interwiki + "/"
-
-    logo = re.search(r'src="([^"]*)"', cfg.logo_string)
-    if logo:
-        logo = request.getQualifiedURL(logo.group(1))
-
     log = editlog.EditLog(request)
     logdata = []
     counter = 0
@@ -81,145 +70,173 @@
             break
     del log
 
-    # start SAX stream
-    handler.startDocument()
-    handler._out.write(
-        '<!--\n'
-        '    Add an "items=nnn" URL parameter to get more than the default 15 items.\n'
-        '    You cannot get more than %d items though.\n'
-        '    \n'
-        '    Add "unique=1" to get a list of changes where page names are unique,\n'
-        '    i.e. where only the latest change of each page is reflected.\n'
-        '    \n'
-        '    Add "diffs=1" to add change diffs to the description of each items.\n'
-        '    \n'
-        '    Add "ddiffs=1" to link directly to the diff (good for FeedReader).\n'
-        '    Current settings: items=%i, unique=%i, diffs=%i, ddiffs=%i'
-        '-->\n' % (items_limit, max_items, unique, diffs, ddiffs)
-        )
+    timestamp = timefuncs.formathttpdate(lastmod)
+    etag = "%d-%d-%d-%d-%d" % (lastmod, max_items, diffs, ddiffs, unique)
 
-    # emit channel description
-    handler.startNode('channel', {
-        (handler.xmlns['rdf'], 'about'): request.getBaseURL(),
-        })
-    handler.simpleNode('title', cfg.sitename)
-    handler.simpleNode('link', interwiki + wikiutil.quoteWikinameURL(pagename))
-    handler.simpleNode('description', 'RecentChanges at %s' % cfg.sitename)
-    if logo:
-        handler.simpleNode('image', None, {
-            (handler.xmlns['rdf'], 'resource'): logo,
-            })
-    if cfg.interwikiname:
-        handler.simpleNode(('wiki', 'interwiki'), cfg.interwikiname)
+    # for 304, we look at if-modified-since and if-none-match headers,
+    # one of them must match and the other is either not there or must match.
+    if request.if_modified_since == timestamp:
+        if request.if_none_match:
+            if request.if_none_match == etag:
+                request.emit_http_headers(["Status: 304 Not modified"])
+        else:
+            request.emit_http_headers(["Status: 304 Not modified"])
+    elif request.if_none_match == etag:
+        if request.if_modified_since:
+            if request.if_modified_since == timestamp:
+                request.emit_http_headers(["Status: 304 Not modified"])
+        else:
+            request.emit_http_headers(["Status: 304 Not modified"])
+    else:
+        # generate an Expires header, using whatever setting the admin
+        # defined for suggested cache lifetime of the RecentChanges RSS doc
+        expires = timefuncs.formathttpdate(time.time() + cfg.rss_cache)
 
-    handler.startNode('items')
-    handler.startNode(('rdf', 'Seq'))
-    for item in logdata:
-        link = "%s%s#%04d%02d%02d%02d%02d%02d" % ((interwiki,
-                wikiutil.quoteWikinameURL(item.pagename),) + item.time[:6])
-        handler.simpleNode(('rdf', 'li'), None, attr={
-            (handler.xmlns['rdf'], 'resource'): link,
-        })
-    handler.endNode(('rdf', 'Seq'))
-    handler.endNode('items')
-    handler.endNode('channel')
+        httpheaders = ["Content-Type: text/xml; charset=%s" % config.charset,
+                       "Expires: %s" % expires,
+                       "Last-Modified: %s" % timestamp,
+                       "Etag: %s" % etag, ]
 
-    # emit logo data
-    if logo:
-        handler.startNode('image', attr={
-            (handler.xmlns['rdf'], 'about'): logo,
+        # send the generated XML document
+        request.emit_http_headers(httpheaders)
+
+        interwiki = request.getBaseURL()
+        if interwiki[-1] != "/":
+            interwiki = interwiki + "/"
+
+        logo = re.search(r'src="([^"]*)"', cfg.logo_string)
+        if logo:
+            logo = request.getQualifiedURL(logo.group(1))
+
+        # prepare output
+        out = StringIO.StringIO()
+        handler = RssGenerator(out)
+
+        # start SAX stream
+        handler.startDocument()
+        handler._out.write(
+            '<!--\n'
+            '    Add an "items=nnn" URL parameter to get more than the default 15 items.\n'
+            '    You cannot get more than %d items though.\n'
+            '    \n'
+            '    Add "unique=1" to get a list of changes where page names are unique,\n'
+            '    i.e. where only the latest change of each page is reflected.\n'
+            '    \n'
+            '    Add "diffs=1" to add change diffs to the description of each items.\n'
+            '    \n'
+            '    Add "ddiffs=1" to link directly to the diff (good for FeedReader).\n'
+            '    Current settings: items=%i, unique=%i, diffs=%i, ddiffs=%i'
+            '-->\n' % (items_limit, max_items, unique, diffs, ddiffs)
+            )
+
+        # emit channel description
+        handler.startNode('channel', {
+            (handler.xmlns['rdf'], 'about'): request.getBaseURL(),
             })
         handler.simpleNode('title', cfg.sitename)
-        handler.simpleNode('link', interwiki)
-        handler.simpleNode('url', logo)
-        handler.endNode('image')
-
-    # emit items
-    for item in logdata:
-        page = Page(request, item.pagename)
-        link = interwiki + wikiutil.quoteWikinameURL(item.pagename)
-        rdflink = "%s#%04d%02d%02d%02d%02d%02d" % ((link,) + item.time[:6])
-        handler.startNode('item', attr={
-            (handler.xmlns['rdf'], 'about'): rdflink,
-        })
-
-        # general attributes
-        handler.simpleNode('title', item.pagename)
-        if ddiffs:
-            handler.simpleNode('link', link+"?action=diff")
-        else:
-            handler.simpleNode('link', link)
-
-        handler.simpleNode(('dc', 'date'), timefuncs.W3CDate(item.time))
-
-        # description
-        desc_text = item.comment
-        if diffs:
-            # TODO: rewrite / extend wikiutil.pagediff
-            # searching for the matching pages doesn't really belong here
-            revisions = page.getRevList()
-
-            rl = len(revisions)
-            for idx in range(rl):
-                rev = revisions[idx]
-                if rev <= item.rev:
-                    if idx+1 < rl:
-                        lines = wikiutil.pagediff(request, item.pagename, revisions[idx+1], item.pagename, 0, ignorews=1)
-                        if len(lines) > 20:
-                            lines = lines[:20] + ['...\n']
-                        lines = '\n'.join(lines)
-                        lines = wikiutil.escape(lines)
-                        desc_text = '%s\n<pre>\n%s\n</pre>\n' % (desc_text, lines)
-                    break
-        if desc_text:
-            handler.simpleNode('description', desc_text)
+        handler.simpleNode('link', interwiki + wikiutil.quoteWikinameURL(pagename))
+        handler.simpleNode('description', 'RecentChanges at %s' % cfg.sitename)
+        if logo:
+            handler.simpleNode('image', None, {
+                (handler.xmlns['rdf'], 'resource'): logo,
+                })
+        if cfg.interwikiname:
+            handler.simpleNode(('wiki', 'interwiki'), cfg.interwikiname)
 
-        # contributor
-        edattr = {}
-        if cfg.show_hosts:
-            edattr[(handler.xmlns['wiki'], 'host')] = item.hostname
-        if item.editor[0] == 'interwiki':
-            edname = "%s:%s" % item.editor[1]
-            ##edattr[(None, 'link')] = interwiki + wikiutil.quoteWikiname(edname)
-        else: # 'ip'
-            edname = item.editor[1]
-            ##edattr[(None, 'link')] = link + "?action=info"
-
-        # this edattr stuff, esp. None as first tuple element breaks things (tracebacks)
-        # if you know how to do this right, please send us a patch
-
-        handler.startNode(('dc', 'contributor'))
-        handler.startNode(('rdf', 'Description'), attr=edattr)
-        handler.simpleNode(('rdf', 'value'), edname)
-        handler.endNode(('rdf', 'Description'))
-        handler.endNode(('dc', 'contributor'))
+        handler.startNode('items')
+        handler.startNode(('rdf', 'Seq'))
+        for item in logdata:
+            link = "%s%s#%04d%02d%02d%02d%02d%02d" % ((interwiki,
+                    wikiutil.quoteWikinameURL(item.pagename),) + item.time[:6])
+            handler.simpleNode(('rdf', 'li'), None, attr={
+                (handler.xmlns['rdf'], 'resource'): link,
+            })
+        handler.endNode(('rdf', 'Seq'))
+        handler.endNode('items')
+        handler.endNode('channel')
 
-        # wiki extensions
-        handler.simpleNode(('wiki', 'version'), "%i" % (item.ed_time_usecs))
-        handler.simpleNode(('wiki', 'status'), ('deleted', 'updated')[page.exists()])
-        handler.simpleNode(('wiki', 'diff'), link + "?action=diff")
-        handler.simpleNode(('wiki', 'history'), link + "?action=info")
-        # handler.simpleNode(('wiki', 'importance'), ) # ( major | minor ) 
-        # handler.simpleNode(('wiki', 'version'), ) # ( #PCDATA ) 
-
-        handler.endNode('item')
-
-    # end SAX stream
-    handler.endDocument()
+        # emit logo data
+        if logo:
+            handler.startNode('image', attr={
+                (handler.xmlns['rdf'], 'about'): logo,
+                })
+            handler.simpleNode('title', cfg.sitename)
+            handler.simpleNode('link', interwiki)
+            handler.simpleNode('url', logo)
+            handler.endNode('image')
 
-    # generate an Expires header, using whatever setting the admin
-    # defined for suggested cache lifetime of the RecentChanges RSS doc
-    expires = timefuncs.formathttpdate(time.time() + cfg.rss_cache)
-
-    httpheaders = ["Content-Type: text/xml; charset=%s" % config.charset,
-                   "Expires: %s" % expires]
+        # emit items
+        for item in logdata:
+            page = Page(request, item.pagename)
+            link = interwiki + wikiutil.quoteWikinameURL(item.pagename)
+            rdflink = "%s#%04d%02d%02d%02d%02d%02d" % ((link,) + item.time[:6])
+            handler.startNode('item', attr={
+                (handler.xmlns['rdf'], 'about'): rdflink,
+            })
 
-    # use a correct Last-Modified header, set to whatever the mod date
-    # on the most recent page was; if there were no mods, don't send one
-    if lastmod:
-        httpheaders.append("Last-Modified: %s" % timefuncs.formathttpdate(lastmod))
+            # general attributes
+            handler.simpleNode('title', item.pagename)
+            if ddiffs:
+                handler.simpleNode('link', link+"?action=diff")
+            else:
+                handler.simpleNode('link', link)
 
-    # send the generated XML document
-    request.http_headers(httpheaders)
-    request.write(out.getvalue())
+            handler.simpleNode(('dc', 'date'), timefuncs.W3CDate(item.time))
 
+            # description
+            desc_text = item.comment
+            if diffs:
+                # TODO: rewrite / extend wikiutil.pagediff
+                # searching for the matching pages doesn't really belong here
+                revisions = page.getRevList()
+
+                rl = len(revisions)
+                for idx in range(rl):
+                    rev = revisions[idx]
+                    if rev <= item.rev:
+                        if idx+1 < rl:
+                            lines = wikiutil.pagediff(request, item.pagename, revisions[idx+1], item.pagename, 0, ignorews=1)
+                            if len(lines) > 20:
+                                lines = lines[:20] + ['...\n']
+                            lines = '\n'.join(lines)
+                            lines = wikiutil.escape(lines)
+                            desc_text = '%s\n<pre>\n%s\n</pre>\n' % (desc_text, lines)
+                        break
+            if desc_text:
+                handler.simpleNode('description', desc_text)
+
+            # contributor
+            edattr = {}
+            if cfg.show_hosts:
+                edattr[(handler.xmlns['wiki'], 'host')] = item.hostname
+            if item.editor[0] == 'interwiki':
+                edname = "%s:%s" % item.editor[1]
+                ##edattr[(None, 'link')] = interwiki + wikiutil.quoteWikiname(edname)
+            else: # 'ip'
+                edname = item.editor[1]
+                ##edattr[(None, 'link')] = link + "?action=info"
+
+            # this edattr stuff, esp. None as first tuple element breaks things (tracebacks)
+            # if you know how to do this right, please send us a patch
+
+            handler.startNode(('dc', 'contributor'))
+            handler.startNode(('rdf', 'Description'), attr=edattr)
+            handler.simpleNode(('rdf', 'value'), edname)
+            handler.endNode(('rdf', 'Description'))
+            handler.endNode(('dc', 'contributor'))
+
+            # wiki extensions
+            handler.simpleNode(('wiki', 'version'), "%i" % (item.ed_time_usecs))
+            handler.simpleNode(('wiki', 'status'), ('deleted', 'updated')[page.exists()])
+            handler.simpleNode(('wiki', 'diff'), link + "?action=diff")
+            handler.simpleNode(('wiki', 'history'), link + "?action=info")
+            # handler.simpleNode(('wiki', 'importance'), ) # ( major | minor ) 
+            # handler.simpleNode(('wiki', 'version'), ) # ( #PCDATA ) 
+
+            handler.endNode('item')
+
+        # end SAX stream
+        handler.endDocument()
+
+        request.write(out.getvalue())
+
--- a/MoinMoin/action/sitemap.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/MoinMoin/action/sitemap.py	Tue Aug 08 08:49:47 2006 +0200
@@ -64,7 +64,7 @@
     request.user.datetime_fmt = datetime_fmt
     base = request.getBaseURL()
 
-    request.http_headers(["Content-Type: text/xml; charset=UTF-8"])
+    request.emit_http_headers(["Content-Type: text/xml; charset=UTF-8"])
 
     # we emit a piece of data so other side doesn't get bored:
     request.write("""<?xml version="1.0" encoding="UTF-8"?>\r\n""")
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/action/subscribe.py	Tue Aug 08 08:49:47 2006 +0200
@@ -0,0 +1,50 @@
+# -*- coding: iso-8859-1 -*-
+"""
+    MoinMoin - subscribe to a page to get notified when it changes
+
+    @copyright: 2000-2004 by Jürgen Hermann <jh@web.de>,
+                2006 by MoinMoin:ThomasWaldmann
+    @license: GNU GPL, see COPYING for details.
+"""
+from MoinMoin.Page import Page
+
+def execute(pagename, request):
+    """ Subscribe or unsubscribe the user to pagename
+    
+    TODO: what if subscribe failed? no message is displayed.
+    """
+    _ = request.getText
+    cfg = request.cfg
+    msg = None
+
+    if not request.user.may.read(pagename):
+        msg = _("You are not allowed to subscribe to a page you can't read.")
+
+    # Check if mail is enabled
+    elif not cfg.mail_enabled:
+        msg = _("This wiki is not enabled for mail processing.")
+
+    # Suggest visitors to login
+    elif not request.user.valid:
+        msg = _("You must log in to use subscribtions.")
+
+    # Suggest users without email to add their email address
+    elif not request.user.email:
+        msg = _("Add your email address in your UserPreferences to use subscriptions.")
+
+    elif request.user.isSubscribedTo([pagename]):
+        # Try to unsubscribe
+        if request.user.unsubscribe(pagename):
+            msg = _('Your subscribtion to this page has been removed.')
+        else:
+            msg = _("Can't remove regular expression subscription!") + u' ' + \
+                  _("Edit the subscription regular expressions in your "
+                    "UserPreferences.")
+
+    else:
+        # Try to subscribe
+        if request.user.subscribe(pagename):
+            msg = _('You have been subscribed to this page.')
+
+    Page(request, pagename).send_page(request, msg=msg)
+
--- a/MoinMoin/action/test.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/MoinMoin/action/test.py	Tue Aug 08 08:49:47 2006 +0200
@@ -105,7 +105,7 @@
     def do_action(self):
         """ run tests """
         request = self.request
-        request.http_headers(["Content-type: text/plain; charset=%s" % config.charset])
+        request.emit_http_headers(["Content-type: text/plain; charset=%s" % config.charset])
         request.write('MoinMoin Diagnosis\n======================\n\n')
         runTest(request)
         return True, ""
--- a/MoinMoin/action/thread_monitor.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/MoinMoin/action/thread_monitor.py	Tue Aug 08 08:49:47 2006 +0200
@@ -27,11 +27,11 @@
     else:
         dump_fname = "nowhere"
 
-    request.http_headers()
+    request.emit_http_headers()
     request.write('<html><body>A dump has been saved to %s.</body></html>' % dump_fname)
 
 def execute_wiki(pagename, request):
-    request.http_headers()
+    request.emit_http_headers()
 
     request.theme.send_title("Thread monitor")
     request.write('<pre>')
--- a/MoinMoin/action/titleindex.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/MoinMoin/action/titleindex.py	Tue Aug 08 08:49:47 2006 +0200
@@ -22,7 +22,7 @@
     else:
         mimetype = "text/plain"
 
-    request.http_headers(["Content-Type: %s; charset=%s" % (mimetype, config.charset)])
+    request.emit_http_headers(["Content-Type: %s; charset=%s" % (mimetype, config.charset)])
 
     # Get list of user readable pages
     pages = request.rootpage.getPageList()
--- a/MoinMoin/config/multiconfig.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/MoinMoin/config/multiconfig.py	Tue Aug 08 08:49:47 2006 +0200
@@ -7,9 +7,14 @@
     @license: GNU GPL, see COPYING for details.
 """
 
-import re, os, sys
-from MoinMoin import error
+import re
+import os
+import sys
+import time
+
+from MoinMoin import error, util, wikiutil
 import MoinMoin.auth as authmodule
+from MoinMoin.packages import packLine
 
 _url_re_cache = None
 _farmconfig_mtime = None
@@ -549,6 +554,47 @@
         # check if mail is possible and set flag:
         self.mail_enabled = (self.mail_smarthost is not None or self.mail_sendmail is not None) and self.mail_from
 
+        # Cache variables for the properties below
+        self._iwid = self._iwid_full = self._meta_dict = None
+
+    def load_meta_dict(self):
+        """ The meta_dict contains meta data about the wiki instance. """
+        if getattr(self, "_meta_dict", None) is None:
+            self._meta_dict = wikiutil.MetaDict(os.path.join(self.data_dir, 'meta'), self.cache_dir)
+        return self._meta_dict
+    meta_dict = property(load_meta_dict)
+
+    # lazily load iwid(_full)
+    def make_iwid_property(attr):
+        def getter(self):
+            if getattr(self, attr, None) is None:
+                self.load_IWID()
+            return getattr(self, attr)
+        return property(getter)
+    iwid = make_iwid_property("_iwid")
+    iwid_full = make_iwid_property("_iwid_full")
+
+    def load_IWID(self):
+        """ Loads the InterWikiID of this instance. It is used to identify the instance
+            globally.
+            The IWID is available as cfg.iwid
+            The full IWID containing the interwiki name is available as cfg.iwid_full
+            This method is called by the property.
+        """
+
+        try:
+            iwid = self.meta_dict['IWID']
+        except KeyError:
+            iwid = util.random_string(16).encode("hex") + "-" + str(int(time.time()))
+            self.meta_dict['IWID'] = iwid
+            self.meta_dict.sync()
+
+        self._iwid = iwid
+        if self.interwikiname is not None:
+            self._iwid_full = packLine([iwid, self.interwikiname])
+        else:
+            self._iwid_full = packLine([iwid])
+
     def _config_check(self):
         """ Check namespace and warn about unknown names
         
--- a/MoinMoin/converter/__init__.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/MoinMoin/converter/__init__.py	Tue Aug 08 08:49:47 2006 +0200
@@ -6,8 +6,3 @@
     @license: GNU GPL, see COPYING for details.
 """
 
-from MoinMoin.util import pysupport
-
-# create a list of extension converters from the subpackage directory
-extension_converters = pysupport.getPackageModules(__file__)
-modules = extension_converters
--- a/MoinMoin/filter/__init__.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/MoinMoin/filter/__init__.py	Tue Aug 08 08:49:47 2006 +0200
@@ -9,8 +9,7 @@
 import os
 from MoinMoin.util import pysupport
 
-filters = pysupport.getPackageModules(__file__)
-modules = filters
+modules = pysupport.getPackageModules(__file__)
 
 standard_codings = ['utf-8', 'iso-8859-15', 'iso-8859-1', ]
 
--- a/MoinMoin/formatter/text_python.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/MoinMoin/formatter/text_python.py	Tue Aug 08 08:49:47 2006 +0200
@@ -54,8 +54,9 @@
         waspcode_timestamp = int(time.time())
         source = ["""
 moincode_timestamp = int(os.path.getmtime(os.path.dirname(__file__)))
-if moincode_timestamp > %d or request.cfg.cfg_mtime > %d:
-    raise "CacheNeedsUpdate"
+cfg_mtime = getattr(request.cfg, "cfg_mtime", None)
+if moincode_timestamp > %d or cfg_mtime is None or cfg_mtime > %d:
+    raise Exception("CacheNeedsUpdate")
 """ % (waspcode_timestamp, waspcode_timestamp)]
 
 
--- a/MoinMoin/i18n/__init__.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/MoinMoin/i18n/__init__.py	Tue Aug 08 08:49:47 2006 +0200
@@ -61,10 +61,11 @@
         The very first time, this will be slow as it will load all languages,
         but next time it will be fast due to caching.
     """
+    request.clock.start('i18n_init')
     global languages
     if languages is None:
         meta_cache = caching.CacheEntry(request, 'i18n', 'meta', scope='farm')
-        i18n_dir = os.path.join(request.cfg.moinmoin_dir, 'i18n', 'mo')
+        i18n_dir = os.path.join(request.cfg.moinmoin_dir, 'i18n')
         if meta_cache.needsUpdate(i18n_dir):
             _languages = {}
             for lang_file in glob.glob(po_filename(request, language='*', domain='MoinMoin')): # only MoinMoin domain for now XXX
@@ -85,6 +86,7 @@
         _languages = pickle.loads(meta_cache.content())
         if languages is None:
             languages = _languages
+    request.clock.stop('i18n_init')
 
 
 class Translation(object):
@@ -162,6 +164,7 @@
         return text
 
     def loadLanguage(self, request):
+        request.clock.start('loadLanguage')
         cache = caching.CacheEntry(request, arena='i18n', key=self.language, scope='farm')
         langfilename = po_filename(request, self.language, self.domain)
         needsupdate = cache.needsUpdate(langfilename)
@@ -203,6 +206,7 @@
 
         self.formatted = uc_texts
         self.raw = uc_unformatted
+        request.clock.stop('loadLanguage')
 
 
 def getDirection(lang):
--- a/MoinMoin/logfile/__init__.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/MoinMoin/logfile/__init__.py	Tue Aug 08 08:49:47 2006 +0200
@@ -6,10 +6,6 @@
     @license: GNU GPL, see COPYING for details.
 """
 
-from MoinMoin.util import pysupport
-
-logfiles = pysupport.getPackageModules(__file__)
-
 import os, codecs, errno
 from MoinMoin import config, wikiutil
 
--- a/MoinMoin/macro/FullSearch.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/MoinMoin/macro/FullSearch.py	Tue Aug 08 08:49:47 2006 +0200
@@ -32,13 +32,63 @@
 
 Dependencies = ["pages"]
 
+
+def search_box(type, macro):
+    """ Make a search box
+
+    Make both Title Search and Full Search boxes, according to type.
+
+    @param type: search box type: 'titlesearch' or 'fullsearch'
+    @rtype: unicode
+    @return: search box html fragment
+    """
+    _ = macro._
+    if macro.form.has_key('value'):
+        default = wikiutil.escape(macro.form["value"][0], quote=1)
+    else:
+        default = ''
+
+    # Title search settings
+    boxes = ''
+    button = _("Search Titles")
+
+    # Special code for fullsearch
+    if type == "fullsearch":
+        boxes = [
+            u'<br>',
+            u'<input type="checkbox" name="context" value="160" checked="checked">',
+            _('Display context of search results'),
+            u'<br>',
+            u'<input type="checkbox" name="case" value="1">',
+            _('Case-sensitive searching'),
+            ]
+        boxes = u'\n'.join(boxes)
+        button = _("Search Text")
+
+    # Format
+    type = (type == "titlesearch")
+    html = [
+        u'<form method="get" action="">',
+        u'<div>',
+        u'<input type="hidden" name="action" value="fullsearch">',
+        u'<input type="hidden" name="titlesearch" value="%i">' % type,
+        u'<input type="text" name="value" size="30" value="%s">' % default,
+        u'<input type="submit" value="%s">' % button,
+        boxes,
+        u'</div>',
+        u'</form>',
+        ]
+    html = u'\n'.join(html)
+    return macro.formatter.rawHTML(html)
+
+
 def execute(macro, needle):
     request = macro.request
     _ = request.getText
 
     # if no args given, invoke "classic" behavior
     if needle is None:
-        return macro._m_search("fullsearch")
+        return search_box("fullsearch", macro)
 
     # With empty arguments, simulate title click (backlinks to page)
     elif needle == '':
--- a/MoinMoin/macro/SystemInfo.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/MoinMoin/macro/SystemInfo.py	Tue Aug 08 08:49:47 2006 +0200
@@ -17,7 +17,6 @@
 from MoinMoin import action, macro, parser
 from MoinMoin.logfile import editlog, eventlog
 from MoinMoin.Page import Page
-from MoinMoin.util import timefuncs
 
 def execute(Macro, args):
     """ show SystemInfo: wiki infos, wiki sw version, space usage infos """
@@ -98,15 +97,16 @@
     nonestr = _("NONE")
     row('Event log', _formatInReadableUnits(eventlogger.size()))
 
-    row(_('Global extension macros'), ', '.join(macro.extension_macros) or nonestr)
+    row(_('Global extension macros'), ', '.join(macro.modules) or nonestr)
     row(_('Local extension macros'),
         ', '.join(wikiutil.wikiPlugins('macro', Macro.cfg)) or nonestr)
 
-    ext_actions = [x for x in action.extension_actions
+    glob_actions = [x for x in action.modules
+                    if not x in request.cfg.actions_excluded]
+    row(_('Global extension actions'), ', '.join(glob_actions) or nonestr)
+    loc_actions = [x for x in wikiutil.wikiPlugins('action', Macro.cfg)
                    if not x in request.cfg.actions_excluded]
-    row(_('Global extension actions'), ', '.join(ext_actions) or nonestr)
-    row(_('Local extension actions'),
-        ', '.join(action.getPlugins(request)[1]) or nonestr)
+    row(_('Local extension actions'), ', '.join(loc_actions) or nonestr)
 
     row(_('Global parsers'), ', '.join(parser.modules) or nonestr)
     row(_('Local extension parsers'),
@@ -118,11 +118,13 @@
     idx = Search._xapianIndex(request)
     available = idx and idxState[0] or idxState[1]
     mtime = _('last modified: %s') % (idx and
-            timefuncs.formathttpdate(idx.mtime()) or _('unavailable'))
+            request.user.getFormattedDateTime(
+                wikiutil.version2timestamp(idx.mtime())) or
+                _('N/A'))
     row(_('Xapian search'), '%s, %s, %s'
             % (xapState[request.cfg.xapian_search], available, mtime))
 
-    row(_('Active threads'), t_count or 'N/A')
+    row(_('Active threads'), t_count or _('N/A'))
     buf.write(u'</dl>')
 
     return Macro.formatter.rawHTML(buf.getvalue())
--- a/MoinMoin/macro/__init__.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/MoinMoin/macro/__init__.py	Tue Aug 08 08:49:47 2006 +0200
@@ -17,9 +17,7 @@
 """
 
 from MoinMoin.util import pysupport
-
-extension_macros = pysupport.getPackageModules(__file__)
-modules = extension_macros
+modules = pysupport.getPackageModules(__file__)
 
 import re, time, os
 from MoinMoin import action, config, util
@@ -145,55 +143,8 @@
             return self.defaultDependency
 
     def _macro_TitleSearch(self, args):
-        return self._m_search("titlesearch")
-
-    def _m_search(self, type):
-        """ Make a search box
-
-        Make both Title Search and Full Search boxes, according to type.
-
-        @param type: search box type: 'titlesearch' or 'fullsearch'
-        @rtype: unicode
-        @return: search box html fragment
-        """
-        _ = self._
-        if self.form.has_key('value'):
-            default = wikiutil.escape(self.form["value"][0], quote=1)
-        else:
-            default = ''
-
-        # Title search settings
-        boxes = ''
-        button = _("Search Titles")
-
-        # Special code for fullsearch
-        if type == "fullsearch":
-            boxes = [
-                u'<br>',
-                u'<input type="checkbox" name="context" value="160" checked="checked">',
-                _('Display context of search results'),
-                u'<br>',
-                u'<input type="checkbox" name="case" value="1">',
-                _('Case-sensitive searching'),
-                ]
-            boxes = u'\n'.join(boxes)
-            button = _("Search Text")
-
-        # Format
-        type = (type == "titlesearch")
-        html = [
-            u'<form method="get" action="">',
-            u'<div>',
-            u'<input type="hidden" name="action" value="fullsearch">',
-            u'<input type="hidden" name="titlesearch" value="%i">' % type,
-            u'<input type="text" name="value" size="30" value="%s">' % default,
-            u'<input type="submit" value="%s">' % button,
-            boxes,
-            u'</div>',
-            u'</form>',
-            ]
-        html = u'\n'.join(html)
-        return self.formatter.rawHTML(html)
+        from FullSearch import search_box
+        return search_box("titlesearch", self)
 
     def _macro_GoTo(self, args):
         """ Make a goto box
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/multiconfig.py	Tue Aug 08 08:49:47 2006 +0200
@@ -0,0 +1,28 @@
+""" This is just a dummy file to overwrite MoinMoin/multiconfig.py(c) from a
+    previous moin installation.
+
+    The file moved to MoinMoin/config/multiconfig.py and you have to fix your
+    imports as shown below.
+
+    Alternatively, you can temporarily set show_configuration_error = False,
+    so some compatibility code will get activated.
+    But this compatibility code will get removed soon, so you really should
+    update your config as soon as possible.
+"""
+show_configuration_error = True
+
+if show_configuration_error:
+    from MoinMoin.error import ConfigurationError
+    raise ConfigurationError("""\
+Please edit your wikiconfig/farmconfig and fix your DefaultConfig import:\r\n
+\r\n
+Old:   from MoinMoin.multiconfig import DefaultConfig\r\n
+New:   from MoinMoin.config.multiconfig import DefaultConfig\r\n
+\r\n
+If you can't do that, but if you can change the MoinMoin code, see the file
+MoinMoin/multiconfig.py for an alternative, but temporary workaround.
+""")
+
+else:
+    from MoinMoin.config.multiconfig import *
+
--- a/MoinMoin/parser/ParserBase.py	Mon Aug 07 11:08:17 2006 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,270 +0,0 @@
-# -*- coding: iso-8859-1 -*-
-"""
-	MoinMoin - Base Source Parser
-
-    @copyright: 2002 by Taesu Pyo <bigflood@hitel.net>
-    @license: GNU GPL, see COPYING for details.
-
-    Docstrings and some refactoring by Oliver Graf <ograf@bitart.de>
-
-basic css:
-
-pre.codearea     { font-style: sans-serif; color: #000000; }
-
-pre.codearea span.ID       { color: #000000; }
-pre.codearea span.Char     { color: #004080; }
-pre.codearea span.Comment  { color: #808080; }
-pre.codearea span.Number   { color: #008080; font-weight: bold; }
-pre.codearea span.String   { color: #004080; }
-pre.codearea span.SPChar   { color: #0000C0; }
-pre.codearea span.ResWord  { color: #4040ff; font-weight: bold; }
-pre.codearea span.ConsWord { color: #008080; font-weight: bold; }
-
-"""
-
-import re, sys, sha
-from MoinMoin import config, wikiutil
-
-def parse_start_step(request, args):
-    """
-    Parses common Colorizer parameters start, step, numbers.
-    Uses L{wikiutil.parseAttributes} and sanitizes the results.
-
-    Start and step must be a non negative number and default to 1,
-    numbers might be on, off, or none and defaults to on. On or off
-    means that numbers are switchable via JavaScript (html formatter),
-    disabled means that numbers are disabled completely.
-
-    attrdict is returned as last element in the tuple, to enable the
-    calling parser to extract further arguments.
-
-    @param request: a request instance
-    @param args: the argument string
-
-    @returns: numbers, start, step, attrdict
-    """
-    nums, start, step = 1, 1, 1
-    attrs, msg = wikiutil.parseAttributes(request, args)
-    if not msg:
-        try:
-            start = int(attrs.get('start','"1"')[1:-1])
-        except ValueError:
-            pass
-        try:
-            step = int(attrs.get('step','"1"')[1:-1])
-        except ValueError:
-            pass
-        if attrs.get('numbers','"on"')[1:-1].lower() in ('off', 'false', 'no'):
-            nums = 0
-        elif attrs.get('numbers','"on"')[1:-1].lower() in ('none', 'disable'):
-            nums = -1
-    return nums, start, step, attrs
-
-class FormatTextBase:
-    pass
-
-class FormatText(FormatTextBase):
-    
-    def __init__(self, fmt):
-        self.fmt = fmt
-
-    def formatString(self, formatter, word):
-        return (formatter.code_token(1, self.fmt) +
-                formatter.text(word) +
-                formatter.code_token(0, self.fmt))
-
-class FormatTextID(FormatTextBase):
-    
-    def __init__(self, fmt, icase=0):
-        if not isinstance(fmt, FormatText):
-            self.def_fmt = FormatText(fmt)
-        else:
-            self.def_fmt = fmt
-        self._ignore_case = icase
-        self.fmt = {}
-
-    def addFormat(self, word, fmt):
-        if self._ignore_case:
-            word = word.lower()
-        self.fmt[word] = fmt
-        
-    def setDefaultFormat(self, fmt):
-        self.def_fmt = fmt
-        
-    def formatString(self, formatter, word):
-        if self._ignore_case:
-            sword = word.lower()
-        else:
-            sword = word
-        return self.fmt.get(sword,self.def_fmt).formatString(formatter, word)
-
-class FormattingRuleSingle:
-    
-    def __init__(self, name, str_re, icase=0):
-        self.name = name
-        self.str_re = str_re
-        
-    def getStartRe(self):
-        return self.str_re
-    
-    def getText(self, parser, hit):
-        return hit
-
-class FormattingRulePair:
-    
-    def __init__(self, name, str_begin, str_end, icase=0):
-        self.name = name
-        self.str_begin = str_begin
-        self.str_end = str_end
-        if icase:
-            self.end_re = re.compile(str_end, re.M|re.I)
-        else:
-            self.end_re = re.compile(str_end, re.M)
-        
-    def getStartRe(self):
-        return self.str_begin
-    
-    def getText(self, parser, hit):
-        match = self.end_re.search(parser.line, parser.lastpos)
-        if not match:
-            next_lastpos = len(parser.line)
-        else:
-            next_lastpos = match.end() + (match.end() == parser.lastpos)
-        r = parser.line[parser.lastpos:next_lastpos]
-        parser.lastpos = next_lastpos
-        return hit + r
-
-
-# ------------------------------------------------------------------------
-
-class ParserBase:
-
-    parsername = 'ParserBase'
-    
-    def __init__(self, raw, request, **kw):
-        self.raw = raw
-        self.request = request
-        self.show_nums, self.num_start, self.num_step, attrs = parse_start_step(request, kw.get('format_args',''))
-
-        self._ignore_case = 0
-        self._formatting_rules = []
-        self._formatting_rules_n2r = {}
-        self._formatting_rule_index = 0
-        self.rule_fmt = {}
-        self.line_count = len(raw.split('\n'))+1
-
-    def setupRules(self):
-        self.def_format = FormatText('Default')
-        self.ID_format = FormatTextID('ID', self._ignore_case)
-        self.addRuleFormat("ID",self.ID_format)
-        self.addRuleFormat("Operator")
-        self.addRuleFormat("Char")
-        self.addRuleFormat("Comment")
-        self.addRuleFormat("Number")
-        self.addRuleFormat("String")
-        self.addRuleFormat("SPChar")
-        self.addRuleFormat("ResWord")
-        self.addRuleFormat("ResWord2")
-        self.addRuleFormat("ConsWord")
-        self.addRuleFormat("Special")
-        self.addRuleFormat("Preprc")
-        self.addRuleFormat("Error")
-        self.reserved_word_format = FormatText('ResWord')
-        self.constant_word_format = FormatText('ConsWord')
-
-    def addRule(self, name, str_re):
-        self._formatting_rule_index += 1
-        n = "%s_%s" % (name, self._formatting_rule_index)
-        f = FormattingRuleSingle(name, str_re, self._ignore_case)
-        self._formatting_rules.append((n,f))
-        self._formatting_rules_n2r[n] = f
-
-    def addRulePair(self, name, start_re, end_re):
-        self._formatting_rule_index += 1
-        n = "%s_%s" % (name,self._formatting_rule_index)
-        f = FormattingRulePair(name, start_re, end_re, self._ignore_case)
-        self._formatting_rules.append((n,f))
-        self._formatting_rules_n2r[n] = f
-
-    def addWords(self, words, fmt):
-        if not isinstance(fmt,FormatTextBase):
-            fmt = FormatText(fmt)
-        for w in words:
-            self.ID_format.addFormat(w, fmt)
-
-    def addReserved(self, words):
-        self.addWords(words, self.reserved_word_format)
-
-    def addConstant(self, words):
-        self.addWords(words, self.constant_word_format)
-        
-    def addRuleFormat(self, name, fmt=None):
-        if fmt is None:
-            fmt = FormatText(name)
-        self.rule_fmt[name] = fmt
-
-    def format(self, formatter, form = None):
-        """ Send the text.
-        """
-
-        self.setupRules()
-
-        l = []
-        for n,f in self._formatting_rules:
-            l.append("(?P<%s>%s)" % (n,f.getStartRe()))
-        
-        if self._ignore_case:
-            scan_re = re.compile("|".join(l),re.M|re.I)
-        else:
-            scan_re = re.compile("|".join(l),re.M)
-
-        self.lastpos = 0
-        self.line = self.raw
-
-        self._code_id = sha.new(self.raw.encode(config.charset)).hexdigest()
-        self.request.write(formatter.code_area(1, self._code_id, self.parsername, self.show_nums, self.num_start, self.num_step))
-
-        self.request.write(formatter.code_line(1))
-            #formatter, len('%d' % (self.line_count,)))
-        
-        match = scan_re.search(self.line)
-
-        while match and self.lastpos < len(self.line):
-            # add the match we found
-            self.write_normal_text(formatter,
-                                   self.line[self.lastpos:match.start()])
-            self.lastpos = match.end() + (match.end() == self.lastpos)
-
-            self.write_match(formatter, match)
-
-            # search for the next one
-            match = scan_re.search(self.line, self.lastpos)
-
-        self.write_normal_text(formatter, self.line[self.lastpos:])
-
-        self.request.write(formatter.code_area(0, self._code_id))
-
-
-    def write_normal_text(self, formatter, text):
-        first = 1
-        for line in text.expandtabs(4).split('\n'):
-            if not first:
-                self.request.write(formatter.code_line(1))
-            else:
-                first = 0
-            self.request.write(formatter.text(line))
-
-    def write_match(self, formatter, match):
-        for n, hit in match.groupdict().items():
-            if not hit: continue
-            r = self._formatting_rules_n2r[n]
-            s = r.getText(self, hit)
-            c = self.rule_fmt.get(r.name,None)
-            if not c: c = self.def_format
-            first = 1
-            for line in s.expandtabs(4).split('\n'):
-                if not first:
-                    self.request.write(formatter.code_line(1))
-                else:
-                    first = 0
-                self.request.write(c.formatString(formatter, line))
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/parser/_ParserBase.py	Tue Aug 08 08:49:47 2006 +0200
@@ -0,0 +1,270 @@
+# -*- coding: iso-8859-1 -*-
+"""
+	MoinMoin - Base Source Parser
+
+    @copyright: 2002 by Taesu Pyo <bigflood@hitel.net>
+    @license: GNU GPL, see COPYING for details.
+
+    Docstrings and some refactoring by Oliver Graf <ograf@bitart.de>
+
+basic css:
+
+pre.codearea     { font-style: sans-serif; color: #000000; }
+
+pre.codearea span.ID       { color: #000000; }
+pre.codearea span.Char     { color: #004080; }
+pre.codearea span.Comment  { color: #808080; }
+pre.codearea span.Number   { color: #008080; font-weight: bold; }
+pre.codearea span.String   { color: #004080; }
+pre.codearea span.SPChar   { color: #0000C0; }
+pre.codearea span.ResWord  { color: #4040ff; font-weight: bold; }
+pre.codearea span.ConsWord { color: #008080; font-weight: bold; }
+
+"""
+
+import re, sys, sha
+from MoinMoin import config, wikiutil
+
+def parse_start_step(request, args):
+    """
+    Parses common Colorizer parameters start, step, numbers.
+    Uses L{wikiutil.parseAttributes} and sanitizes the results.
+
+    Start and step must be a non negative number and default to 1,
+    numbers might be on, off, or none and defaults to on. On or off
+    means that numbers are switchable via JavaScript (html formatter),
+    disabled means that numbers are disabled completely.
+
+    attrdict is returned as last element in the tuple, to enable the
+    calling parser to extract further arguments.
+
+    @param request: a request instance
+    @param args: the argument string
+
+    @returns: numbers, start, step, attrdict
+    """
+    nums, start, step = 1, 1, 1
+    attrs, msg = wikiutil.parseAttributes(request, args)
+    if not msg:
+        try:
+            start = int(attrs.get('start','"1"')[1:-1])
+        except ValueError:
+            pass
+        try:
+            step = int(attrs.get('step','"1"')[1:-1])
+        except ValueError:
+            pass
+        if attrs.get('numbers','"on"')[1:-1].lower() in ('off', 'false', 'no'):
+            nums = 0
+        elif attrs.get('numbers','"on"')[1:-1].lower() in ('none', 'disable'):
+            nums = -1
+    return nums, start, step, attrs
+
+class FormatTextBase:
+    pass
+
+class FormatText(FormatTextBase):
+    
+    def __init__(self, fmt):
+        self.fmt = fmt
+
+    def formatString(self, formatter, word):
+        return (formatter.code_token(1, self.fmt) +
+                formatter.text(word) +
+                formatter.code_token(0, self.fmt))
+
+class FormatTextID(FormatTextBase):
+    
+    def __init__(self, fmt, icase=0):
+        if not isinstance(fmt, FormatText):
+            self.def_fmt = FormatText(fmt)
+        else:
+            self.def_fmt = fmt
+        self._ignore_case = icase
+        self.fmt = {}
+
+    def addFormat(self, word, fmt):
+        if self._ignore_case:
+            word = word.lower()
+        self.fmt[word] = fmt
+        
+    def setDefaultFormat(self, fmt):
+        self.def_fmt = fmt
+        
+    def formatString(self, formatter, word):
+        if self._ignore_case:
+            sword = word.lower()
+        else:
+            sword = word
+        return self.fmt.get(sword,self.def_fmt).formatString(formatter, word)
+
+class FormattingRuleSingle:
+    
+    def __init__(self, name, str_re, icase=0):
+        self.name = name
+        self.str_re = str_re
+        
+    def getStartRe(self):
+        return self.str_re
+    
+    def getText(self, parser, hit):
+        return hit
+
+class FormattingRulePair:
+    
+    def __init__(self, name, str_begin, str_end, icase=0):
+        self.name = name
+        self.str_begin = str_begin
+        self.str_end = str_end
+        if icase:
+            self.end_re = re.compile(str_end, re.M|re.I)
+        else:
+            self.end_re = re.compile(str_end, re.M)
+        
+    def getStartRe(self):
+        return self.str_begin
+    
+    def getText(self, parser, hit):
+        match = self.end_re.search(parser.line, parser.lastpos)
+        if not match:
+            next_lastpos = len(parser.line)
+        else:
+            next_lastpos = match.end() + (match.end() == parser.lastpos)
+        r = parser.line[parser.lastpos:next_lastpos]
+        parser.lastpos = next_lastpos
+        return hit + r
+
+
+# ------------------------------------------------------------------------
+
+class ParserBase:
+
+    parsername = 'ParserBase'
+    
+    def __init__(self, raw, request, **kw):
+        self.raw = raw
+        self.request = request
+        self.show_nums, self.num_start, self.num_step, attrs = parse_start_step(request, kw.get('format_args',''))
+
+        self._ignore_case = 0
+        self._formatting_rules = []
+        self._formatting_rules_n2r = {}
+        self._formatting_rule_index = 0
+        self.rule_fmt = {}
+        self.line_count = len(raw.split('\n'))+1
+
+    def setupRules(self):
+        self.def_format = FormatText('Default')
+        self.ID_format = FormatTextID('ID', self._ignore_case)
+        self.addRuleFormat("ID",self.ID_format)
+        self.addRuleFormat("Operator")
+        self.addRuleFormat("Char")
+        self.addRuleFormat("Comment")
+        self.addRuleFormat("Number")
+        self.addRuleFormat("String")
+        self.addRuleFormat("SPChar")
+        self.addRuleFormat("ResWord")
+        self.addRuleFormat("ResWord2")
+        self.addRuleFormat("ConsWord")
+        self.addRuleFormat("Special")
+        self.addRuleFormat("Preprc")
+        self.addRuleFormat("Error")
+        self.reserved_word_format = FormatText('ResWord')
+        self.constant_word_format = FormatText('ConsWord')
+
+    def addRule(self, name, str_re):
+        self._formatting_rule_index += 1
+        n = "%s_%s" % (name, self._formatting_rule_index)
+        f = FormattingRuleSingle(name, str_re, self._ignore_case)
+        self._formatting_rules.append((n,f))
+        self._formatting_rules_n2r[n] = f
+
+    def addRulePair(self, name, start_re, end_re):
+        self._formatting_rule_index += 1
+        n = "%s_%s" % (name,self._formatting_rule_index)
+        f = FormattingRulePair(name, start_re, end_re, self._ignore_case)
+        self._formatting_rules.append((n,f))
+        self._formatting_rules_n2r[n] = f
+
+    def addWords(self, words, fmt):
+        if not isinstance(fmt,FormatTextBase):
+            fmt = FormatText(fmt)
+        for w in words:
+            self.ID_format.addFormat(w, fmt)
+
+    def addReserved(self, words):
+        self.addWords(words, self.reserved_word_format)
+
+    def addConstant(self, words):
+        self.addWords(words, self.constant_word_format)
+        
+    def addRuleFormat(self, name, fmt=None):
+        if fmt is None:
+            fmt = FormatText(name)
+        self.rule_fmt[name] = fmt
+
+    def format(self, formatter, form = None):
+        """ Send the text.
+        """
+
+        self.setupRules()
+
+        l = []
+        for n,f in self._formatting_rules:
+            l.append("(?P<%s>%s)" % (n,f.getStartRe()))
+        
+        if self._ignore_case:
+            scan_re = re.compile("|".join(l),re.M|re.I)
+        else:
+            scan_re = re.compile("|".join(l),re.M)
+
+        self.lastpos = 0
+        self.line = self.raw
+
+        self._code_id = sha.new(self.raw.encode(config.charset)).hexdigest()
+        self.request.write(formatter.code_area(1, self._code_id, self.parsername, self.show_nums, self.num_start, self.num_step))
+
+        self.request.write(formatter.code_line(1))
+            #formatter, len('%d' % (self.line_count,)))
+        
+        match = scan_re.search(self.line)
+
+        while match and self.lastpos < len(self.line):
+            # add the match we found
+            self.write_normal_text(formatter,
+                                   self.line[self.lastpos:match.start()])
+            self.lastpos = match.end() + (match.end() == self.lastpos)
+
+            self.write_match(formatter, match)
+
+            # search for the next one
+            match = scan_re.search(self.line, self.lastpos)
+
+        self.write_normal_text(formatter, self.line[self.lastpos:])
+
+        self.request.write(formatter.code_area(0, self._code_id))
+
+
+    def write_normal_text(self, formatter, text):
+        first = 1
+        for line in text.expandtabs(4).split('\n'):
+            if not first:
+                self.request.write(formatter.code_line(1))
+            else:
+                first = 0
+            self.request.write(formatter.text(line))
+
+    def write_match(self, formatter, match):
+        for n, hit in match.groupdict().items():
+            if not hit: continue
+            r = self._formatting_rules_n2r[n]
+            s = r.getText(self, hit)
+            c = self.rule_fmt.get(r.name,None)
+            if not c: c = self.def_format
+            first = 1
+            for line in s.expandtabs(4).split('\n'):
+                if not first:
+                    self.request.write(formatter.code_line(1))
+                else:
+                    first = 0
+                self.request.write(c.formatString(formatter, line))
--- a/MoinMoin/parser/__init__.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/MoinMoin/parser/__init__.py	Tue Aug 08 08:49:47 2006 +0200
@@ -9,5 +9,5 @@
     @license: GNU GPL, see COPYING for details.
 """
 from MoinMoin.util import pysupport
+modules = pysupport.getPackageModules(__file__)
 
-modules = pysupport.getPackageModules(__file__)
--- a/MoinMoin/parser/text_cplusplus.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/MoinMoin/parser/text_cplusplus.py	Tue Aug 08 08:49:47 2006 +0200
@@ -23,7 +23,7 @@
 
 """
 
-from MoinMoin.parser.ParserBase import ParserBase
+from MoinMoin.parser._ParserBase import ParserBase
 
 Dependencies = []
 
--- a/MoinMoin/parser/text_diff.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/MoinMoin/parser/text_diff.py	Tue Aug 08 08:49:47 2006 +0200
@@ -7,7 +7,7 @@
     @license: GNU GPL, see COPYING for details.
 """
 
-from MoinMoin.parser.ParserBase import ParserBase
+from MoinMoin.parser._ParserBase import ParserBase
 
 class Parser(ParserBase):
     parsername = "ColorizedDiff"
--- a/MoinMoin/parser/text_java.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/MoinMoin/parser/text_java.py	Tue Aug 08 08:49:47 2006 +0200
@@ -7,7 +7,7 @@
 
 """
 
-from MoinMoin.parser.ParserBase import ParserBase
+from MoinMoin.parser._ParserBase import ParserBase
 
 Dependencies = []
 
--- a/MoinMoin/parser/text_pascal.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/MoinMoin/parser/text_pascal.py	Tue Aug 08 08:49:47 2006 +0200
@@ -6,7 +6,7 @@
     @license: GNU GPL, see COPYING for details.
 """
 
-from MoinMoin.parser.ParserBase import ParserBase
+from MoinMoin.parser._ParserBase import ParserBase
 
 Dependencies = []
 
--- a/MoinMoin/parser/text_python.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/MoinMoin/parser/text_python.py	Tue Aug 08 08:49:47 2006 +0200
@@ -9,7 +9,7 @@
 import StringIO
 import keyword, token, tokenize, sha
 from MoinMoin import config, wikiutil
-from MoinMoin.parser.ParserBase import parse_start_step
+from MoinMoin.parser._ParserBase import parse_start_step
 
 _KEYWORD = token.NT_OFFSET + 1
 _TEXT    = token.NT_OFFSET + 2
--- a/MoinMoin/parser/text_rst.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/MoinMoin/parser/text_rst.py	Tue Aug 08 08:49:47 2006 +0200
@@ -381,8 +381,13 @@
                     node['classes'].append(prefix)
             else:
                 # Default case - make a link to a wiki page.
-                page = Page(self.request, refuri)
-                node['refuri'] = page.url(self.request)
+                pagename = refuri
+                anchor = ''
+                if refuri.find('#') != -1:
+                    pagename, anchor = refuri.split('#', 1)
+                    anchor = '#' + anchor
+                page = Page(self.request, pagename)
+                node['refuri'] = page.url(self.request) + anchor
                 if not page.exists():
                     node['classes'].append('nonexistent')
         html4css1.HTMLTranslator.visit_reference(self, node)
--- a/MoinMoin/request/CGI.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/MoinMoin/request/CGI.py	Tue Aug 08 08:49:47 2006 +0200
@@ -63,33 +63,9 @@
             import errno
             if ex.errno != errno.EPIPE: raise
 
-    # Headers ----------------------------------------------------------
-
-    def http_headers(self, more_headers=[]):
-        # Send only once
-        if getattr(self, 'sent_headers', None):
-            return
-
-        self.sent_headers = 1
-        have_ct = 0
+    def _emit_http_headers(self, headers):
+        """ private method to send out preprocessed list of HTTP headers """
+        for header in headers:
+            self.write("%s\r\n" % header)
+        self.write("\r\n")
 
-        # send http headers
-        for header in more_headers + getattr(self, 'user_headers', []):
-            if header.lower().startswith("content-type:"):
-                # don't send content-type multiple times!
-                if have_ct: continue
-                have_ct = 1
-            if type(header) is unicode:
-                header = header.encode('ascii')
-            self.write("%s\r\n" % header)
-
-        if not have_ct:
-            self.write("Content-type: text/html;charset=%s\r\n" % config.charset)
-
-        self.write('\r\n')
-
-        #from pprint import pformat
-        #sys.stderr.write(pformat(more_headers))
-        #sys.stderr.write(pformat(self.user_headers))
-
-
--- a/MoinMoin/request/CLI.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/MoinMoin/request/CLI.py	Tue Aug 08 08:49:47 2006 +0200
@@ -26,6 +26,8 @@
         self.http_host = 'localhost'
         self.http_referer = ''
         self.script_name = '.'
+        self.if_modified_since = None
+        self.if_none_match = None
         RequestBase.__init__(self, properties)
         self.cfg.caching_formats = [] # don't spoil the cache
         self.initTheme() # usually request.run() does this, but we don't use it
@@ -78,7 +80,8 @@
     def setHttpHeader(self, header):
         pass
 
-    def http_headers(self, more_headers=[]):
+    def _emit_http_headers(self, headers):
+        """ private method to send out preprocessed list of HTTP headers """
         pass
 
     def http_redirect(self, url):
--- a/MoinMoin/request/FCGI.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/MoinMoin/request/FCGI.py	Tue Aug 08 08:49:47 2006 +0200
@@ -56,31 +56,9 @@
         RequestBase.finish(self)
         self.fcgreq.finish()
 
-    # Headers ----------------------------------------------------------
-
-    def http_headers(self, more_headers=[]):
-        """ Send out HTTP headers. Possibly set a default content-type. """
-        if getattr(self, 'sent_headers', None):
-            return
-        self.sent_headers = 1
-        have_ct = 0
+    def _emit_http_headers(self, headers):
+        """ private method to send out preprocessed list of HTTP headers """
+        for header in headers:
+            self.write("%s\r\n" % header)
+        self.write("\r\n")
 
-        # send http headers
-        for header in more_headers + getattr(self, 'user_headers', []):
-            if type(header) is unicode:
-                header = header.encode('ascii')
-            if header.lower().startswith("content-type:"):
-                # don't send content-type multiple times!
-                if have_ct: continue
-                have_ct = 1
-            self.write("%s\r\n" % header)
-
-        if not have_ct:
-            self.write("Content-type: text/html;charset=%s\r\n" % config.charset)
-
-        self.write('\r\n')
-
-        #from pprint import pformat
-        #sys.stderr.write(pformat(more_headers))
-        #sys.stderr.write(pformat(self.user_headers))
-
--- a/MoinMoin/request/MODPYTHON.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/MoinMoin/request/MODPYTHON.py	Tue Aug 08 08:49:47 2006 +0200
@@ -138,51 +138,14 @@
         from mod_python import apache
         return apache.OK
 
-    # Headers ----------------------------------------------------------
-
-    def setHttpHeader(self, header):
-        """ Filters out content-type and status to set them directly
-            in the mod_python request. Rest is put into the headers_out
-            member of the mod_python request.
-
-            @param header: string, containing valid HTTP header.
-        """
-        if type(header) is unicode:
-            header = header.encode('ascii')
-        key, value = header.split(':', 1)
-        value = value.lstrip()
-        if key.lower() == 'content-type':
-            # save content-type for http_headers
-            if not self._have_ct:
-                # we only use the first content-type!
-                self.mpyreq.content_type = value
-                self._have_ct = 1
-        elif key.lower() == 'status':
-            # save status for finish
-            try:
-                self.mpyreq.status = int(value.split(' ', 1)[0])
-            except:
-                pass
-            else:
-                self._have_status = 1
-        else:
-            # this is a header we sent out
+    def _emit_http_headers(self, headers):
+        """ private method to send out preprocessed list of HTTP headers """
+        st_header, ct_header, other_headers = headers[0], headers[1], headers[2:]
+        status = st_header.split(':', 1)[1].lstrip()
+        self.mpyreq.status = int(status.split(' ', 1)[0])
+        self.mpyreq.content_type = ct_header.split(':', 1)[1].lstrip()
+        for header in other_headers:
             self.mpyreq.headers_out[key] = value
-
-    def http_headers(self, more_headers=[]):
-        """ Sends out headers and possibly sets default content-type
-            and status.
-
-            @param more_headers: list of strings, defaults to []
-        """
-        for header in more_headers + getattr(self, 'user_headers', []):
-            self.setHttpHeader(header)
-        # if we don't had an content-type header, set text/html
-        if self._have_ct == 0:
-            self.mpyreq.content_type = "text/html;charset=%s" % config.charset
-        # if we don't had a status header, set 200
-        if self._have_status == 0:
-            self.mpyreq.status = 200
         # this is for mod_python 2.7.X, for 3.X it's a NOP
         self.mpyreq.send_http_header()
 
--- a/MoinMoin/request/STANDALONE.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/MoinMoin/request/STANDALONE.py	Tue Aug 08 08:49:47 2006 +0200
@@ -33,6 +33,8 @@
             self.http_user_agent = sa.headers.getheader('user-agent', '')
             co = filter(None, sa.headers.getheaders('cookie'))
             self.saved_cookie = ', '.join(co) or ''
+            self.if_modified_since = sa.headers.getheader('if-modified-since')
+            self.if_none_match = sa.headers.getheader('if-none-match')
 
             # Copy rest from standalone request   
             self.server_name = sa.server.server_name
@@ -90,44 +92,16 @@
 
     # Headers ----------------------------------------------------------
 
-    def http_headers(self, more_headers=[]):
-        if getattr(self, 'sent_headers', None):
-            return
-
-        self.sent_headers = 1
-        user_headers = getattr(self, 'user_headers', [])
-
-        # check for status header and send it
-        our_status = 200
-        for header in more_headers + user_headers:
-            if header.lower().startswith("status:"):
-                try:
-                    our_status = int(header.split(':', 1)[1].strip().split(" ", 1)[0])
-                except:
-                    pass
-                # there should be only one!
-                break
-        # send response
-        self.sareq.send_response(our_status)
+    def _emit_http_headers(self, headers):
+        """ private method to send out preprocessed list of HTTP headers """
+        st_header, other_headers = headers[0], headers[1:]
+        status = st_header.split(':', 1)[1].lstrip()
+        status_code, status_msg = status.split(' ', 1)
+        status_code = int(status_code)
+        self.sareq.send_response(status_code, status_msg)
+        for header in other_headers:
+            key, value = header.split(':', 1)
+            value = value.lstrip()
+            self.sareq.send_header(key, value)
+        self.sareq.end_headers()
 
-        # send http headers
-        have_ct = 0
-        for header in more_headers + user_headers:
-            if type(header) is unicode:
-                header = header.encode('ascii')
-            if header.lower().startswith("content-type:"):
-                # don't send content-type multiple times!
-                if have_ct: continue
-                have_ct = 1
-
-            self.write("%s\r\n" % header)
-
-        if not have_ct:
-            self.write("Content-type: text/html;charset=%s\r\n" % config.charset)
-
-        self.write('\r\n')
-
-        #from pprint import pformat
-        #sys.stderr.write(pformat(more_headers))
-        #sys.stderr.write(pformat(self.user_headers))
-
--- a/MoinMoin/request/TWISTED.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/MoinMoin/request/TWISTED.py	Tue Aug 08 08:49:47 2006 +0200
@@ -23,6 +23,8 @@
             self.http_accept_language = self.twistd.getHeader('Accept-Language')
             self.saved_cookie = self.twistd.getHeader('Cookie')
             self.http_user_agent = self.twistd.getHeader('User-Agent')
+            self.if_modified_since = self.twistd.getHeader('If-Modified-Since')
+            self.if_none_match = self.twistd.getHeader('If-None-Match')
 
             # Copy values from twisted request
             self.server_protocol = self.twistd.clientproto
@@ -46,7 +48,7 @@
             RequestBase.__init__(self, properties)
 
         except MoinMoinFinish: # might be triggered by http_redirect
-            self.http_headers() # send headers (important for sending MOIN_ID cookie)
+            self.emit_http_headers() # send headers (important for sending MOIN_ID cookie)
             self.finish()
 
         except Exception, err:
@@ -110,34 +112,20 @@
 
     # Headers ----------------------------------------------------------
 
-    def __setHttpHeader(self, header):
-        if type(header) is unicode:
-            header = header.encode('ascii')
-        key, value = header.split(':', 1)
-        value = value.lstrip()
-        if key.lower() == 'set-cookie':
-            key, value = value.split('=', 1)
-            self.twistd.addCookie(key, value)
-        else:
-            self.twistd.setHeader(key, value)
-        #print "request.RequestTwisted.setHttpHeader: %s" % header
-
-    def http_headers(self, more_headers=[]):
-        if getattr(self, 'sent_headers', None):
-            return
-        self.sent_headers = 1
-        have_ct = 0
-
-        # set http headers
-        for header in more_headers + getattr(self, 'user_headers', []):
-            if header.lower().startswith("content-type:"):
-                # don't send content-type multiple times!
-                if have_ct: continue
-                have_ct = 1
-            self.__setHttpHeader(header)
-
-        if not have_ct:
-            self.__setHttpHeader("Content-type: text/html;charset=%s" % config.charset)
+    def _emit_http_headers(self, headers):
+        """ private method to send out preprocessed list of HTTP headers """
+        st_header, other_headers = headers[0], headers[1:]
+        status = st_header.split(':', 1)[1].lstrip()
+        status_code, status_msg = status.split(' ', 1)
+        self.twistd.setResponseCode(status_code, status_message)
+        for header in other_headers:
+            key, value = header.split(':', 1)
+            value = value.lstrip()
+            if key.lower() == 'set-cookie':
+                key, value = value.split('=', 1)
+                self.twistd.addCookie(key, value)
+            else:
+                self.twistd.setHeader(key, value)
 
     def http_redirect(self, url):
         """ Redirect to a fully qualified, or server-rooted URL 
@@ -151,6 +139,3 @@
         #self.twistd.finish()
         raise MoinMoinFinish
 
-    def setResponseCode(self, code, message=None):
-        self.twistd.setResponseCode(code, message)
-
--- a/MoinMoin/request/WSGI.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/MoinMoin/request/WSGI.py	Tue Aug 08 08:49:47 2006 +0200
@@ -21,6 +21,7 @@
             self.stdin = env['wsgi.input']
             self.stdout = StringIO.StringIO()
 
+            # used by MoinMoin.server.wsgi:
             self.status = '200 OK'
             self.headers = []
 
@@ -48,33 +49,14 @@
     def reset_output(self):
         self.stdout = StringIO.StringIO()
 
-    def setHttpHeader(self, header):
-        if type(header) is unicode:
-            header = header.encode('ascii')
-
-        key, value = header.split(':', 1)
-        value = value.lstrip()
-        if key.lower() == 'content-type':
-            # save content-type for http_headers
-            if self.hasContentType:
-                # we only use the first content-type!
-                return
-            else:
-                self.hasContentType = True
-
-        elif key.lower() == 'status':
-            # save status for finish
-            self.status = value
-            return
-
-        self.headers.append((key, value))
-
-    def http_headers(self, more_headers=[]):
-        for header in more_headers:
-            self.setHttpHeader(header)
-
-        if not self.hasContentType:
-            self.headers.insert(0, ('Content-Type', 'text/html;charset=%s' % config.charset))
+    def _emit_http_headers(self, headers):
+        """ private method to send out preprocessed list of HTTP headers """
+        st_header, other_headers = headers[0], headers[1:]
+        self.status = st_header.split(':', 1)[1].lstrip()
+        for header in other_headers:
+            key, value = header.split(':', 1)
+            value = value.lstrip()
+            self.headers.append((key, value))
 
     def flush(self):
         pass
@@ -83,6 +65,7 @@
         pass
 
     def output(self):
+        # called by MoinMoin.server.wsgi
         return self.stdout.getvalue()
 
 
--- a/MoinMoin/request/__init__.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/MoinMoin/request/__init__.py	Tue Aug 08 08:49:47 2006 +0200
@@ -9,7 +9,7 @@
 
 import os, re, time, sys, cgi, StringIO
 import copy
-from MoinMoin import config, wikiutil, user, caching
+from MoinMoin import config, wikiutil, user, caching, error
 from MoinMoin.util import IsWin9x
 
 
@@ -17,31 +17,64 @@
 
 class MoinMoinFinish(Exception):
     """ Raised to jump directly to end of run() function, where finish is called """
-    pass
+
+
+class HeadersAlreadySentException(Exception):
+    """ Is raised if the headers were already sent when emit_http_headers is called."""
+
 
 # Timing ---------------------------------------------------------------
 
 class Clock:
     """ Helper class for code profiling
         we do not use time.clock() as this does not work across threads
+        This is not thread-safe when it comes to multiple starts for one timer.
+        It is possible to recursively call the start and stop methods, you
+        should just ensure that you call them often enough :)
     """
 
     def __init__(self):
-        self.timings = {'total': time.time()}
+        self.timings = {}
+        self.states = {}
+
+    def _get_name(timer, generation):
+        if generation == 0:
+            return timer
+        else:
+            return "%s|%i" % (timer, generation)
+    _get_name = staticmethod(_get_name)
 
     def start(self, timer):
-        self.timings[timer] = time.time() - self.timings.get(timer, 0)
+        state = self.states.setdefault(timer, -1)
+        new_level = state + 1
+        name = Clock._get_name(timer, new_level)
+        self.timings[name] = time.time() - self.timings.get(name, 0)
+        self.states[timer] = new_level
 
     def stop(self, timer):
-        self.timings[timer] = time.time() - self.timings[timer]
+        state = self.states.setdefault(timer, -1)
+        if state >= 0: # timer is active
+            name = Clock._get_name(timer, state)
+            self.timings[name] = time.time() - self.timings[name]
+            self.states[timer] = state - 1
 
     def value(self, timer):
-        return "%.3f" % (self.timings[timer], )
+        base_timer = timer.split("|")[0]
+        state = self.states.get(base_timer, None)
+        if state == -1:
+            result = "%.3fs" % self.timings[timer]
+        elif state is None:
+            result = "- (%s)" % state
+        else:
+            print "Got state %r" % state
+            result = "%.3fs (still running)" % (time.time() - self.timings[timer])
+        return result
 
     def dump(self):
         outlist = []
-        for timing in self.timings.items():
-            outlist.append("%s = %.3fs" % timing)
+        for timer in self.timings.keys():
+            value = self.value(timer)
+            outlist.append("%s = %s" % (timer, value))
         outlist.sort()
         return outlist
 
@@ -93,7 +126,6 @@
         # Pages meta data that we collect in one request
         self.pages = {}
 
-        self.sent_headers = 0
         self.user_headers = []
         self.cacheable = 0 # may this output get cached by http proxies/caches?
         self.page = None
@@ -115,6 +147,8 @@
         else:
             self.writestack = []
             self.clock = Clock()
+            self.clock.start('total')
+            self.clock.start('base__init__')
             # order is important here!
             self.__dict__.update(properties)
             self._load_multi_cfg()
@@ -151,7 +185,7 @@
 
             rootname = u''
             self.rootpage = Page(self, rootname, is_rootpage=1)
-
+            
             from MoinMoin import i18n
             self.i18n = i18n
             i18n.i18n_init(self)
@@ -176,6 +210,7 @@
 
             self.opened_logs = 0
             self.reset()
+            self.clock.stop('base__init__')
 
     def surge_protect(self):
         """ check if someone requesting too much from us """
@@ -265,8 +300,10 @@
     def _load_multi_cfg(self):
         # protect against calling multiple times
         if not hasattr(self, 'cfg'):
+            self.clock.start('load_multi_cfg')
             from MoinMoin.config import multiconfig
             self.cfg = multiconfig.getConfig(self.url)
+            self.clock.stop('load_multi_cfg')
 
     def setAcceptedCharsets(self, accept_charset):
         """ Set accepted_charsets by parsing accept-charset header
@@ -312,8 +349,7 @@
         """
         # Values we can just copy
         self.env = env
-        self.http_accept_language = env.get('HTTP_ACCEPT_LANGUAGE',
-                                            self.http_accept_language)
+        self.http_accept_language = env.get('HTTP_ACCEPT_LANGUAGE', self.http_accept_language)
         self.server_name = env.get('SERVER_NAME', self.server_name)
         self.server_port = env.get('SERVER_PORT', self.server_port)
         self.saved_cookie = env.get('HTTP_COOKIE', '')
@@ -323,6 +359,8 @@
         self.request_method = env.get('REQUEST_METHOD', None)
         self.remote_addr = env.get('REMOTE_ADDR', '')
         self.http_user_agent = env.get('HTTP_USER_AGENT', '')
+        self.if_modified_since = env.get('If-modified-since') or env.get(cgiMetaVariable('If-modified-since'))
+        self.if_none_match = env.get('If-none-match') or env.get(cgiMetaVariable('If-none-match'))
 
         # REQUEST_URI is not part of CGI spec, but an addition of Apache.
         self.request_uri = env.get('REQUEST_URI', '')
@@ -333,8 +371,7 @@
         self.setHost(env.get('HTTP_HOST'))
         self.fixURI(env)
         self.setURL(env)
-
-        ##self.debugEnvironment(env)
+        #self.debugEnvironment(env)
 
     def setHttpReferer(self, referer):
         """ Set http_referer, making sure its ascii
@@ -618,18 +655,6 @@
             return ''
         return self.script_name
 
-    def getPageNameFromQueryString(self):
-        """ Try to get pagename from the query string
-        
-        Support urls like http://netloc/script/?page_name. Allow
-        solving path_info encoding problems by calling with the page
-        name as a query.
-        """
-        pagename = wikiutil.url_unquote(self.query_string, want_unicode=False)
-        pagename = self.decodePagename(pagename)
-        pagename = self.normalizePagename(pagename)
-        return pagename
-
     def getKnownActions(self):
         """ Create a dict of avaiable actions
 
@@ -642,21 +667,9 @@
             self.cfg._known_actions # check
         except AttributeError:
             from MoinMoin import action
-            # Add built in actions
-            actions = [name[3:] for name in action.__dict__ if name.startswith('do_')]
-
-            # Add plugins           
-            dummy, plugins = action.getPlugins(self)
-            actions.extend(plugins)
+            self.cfg._known_actions = set(action.getNames(self.cfg))
 
-            # Add extensions
-            actions.extend(action.extension_actions)
-
-            # TODO: Use set when we require Python 2.3
-            actions = dict(zip(actions, [''] * len(actions)))
-            self.cfg._known_actions = actions
-
-        # Return a copy, so clients will not change the dict.
+        # Return a copy, so clients will not change the set.
         return self.cfg._known_actions.copy()
 
     def getAvailableActions(self, page):
@@ -677,14 +690,10 @@
 
             # Filter non ui actions (starts with lower case letter)
             actions = self.getKnownActions()
-            for key in actions.keys():
-                if key[0].islower():
-                    del actions[key]
+            actions = [action for action in actions if not action[0].islower()]
 
             # Filter wiki excluded actions
-            for key in self.cfg.actions_excluded:
-                if key in actions:
-                    del actions[key]
+            actions = [action for action in actions if not action in self.cfg.actions_excluded]
 
             # Filter actions by page type, acl and user state
             excluded = []
@@ -694,11 +703,9 @@
                 # Prevent modification of underlay only pages, or pages
                 # the user can't write and can't delete
                 excluded = [u'RenamePage', u'DeletePage', ] # AttachFile must NOT be here!
-            for key in excluded:
-                if key in actions:
-                    del actions[key]
+            actions = [action for action in actions if not action in excluded]
 
-            self._available_actions = actions
+            self._available_actions = set(actions)
 
         # Return a copy, so clients will not change the dict.
         return self._available_actions.copy()
@@ -981,13 +988,12 @@
         }
         headers = [
             'Status: %d %s' % (resultcode, statusmsg[resultcode]),
-            'Content-Type: text/plain'
+            'Content-Type: text/plain; charset=utf-8'
         ]
         # when surge protection triggered, tell bots to come back later...
         if resultcode == 503:
             headers.append('Retry-After: %d' % self.cfg.surge_lockout_time)
-        self.http_headers(headers)
-        self.setResponseCode(resultcode)
+        self.emit_http_headers(headers)
         self.write(msg)
         self.forbidden = True
 
@@ -1081,8 +1087,6 @@
 
             # 3. Or handle action
             else:
-                if not pagename and self.query_string:
-                    pagename = self.getPageNameFromQueryString()
                 # pagename could be empty after normalization e.g. '///' -> ''
                 # Use localized FrontPage if pagename is empty
                 if not pagename:
@@ -1129,7 +1133,85 @@
         @param url: relative or absolute url, ascii using url encoding.
         """
         url = self.getQualifiedURL(url)
-        self.http_headers(["Status: 302 Found", "Location: %s" % url])
+        self.emit_http_headers(["Status: 302 Found", "Location: %s" % url])
+
+    def http_headers(self, more_headers=[]):
+        """ wrapper for old, deprecated http_headers call,
+            new code only calls emit_http_headers.
+            Remove in moin 1.7.
+        """
+        self.emit_http_headers(more_headers)
+
+    def emit_http_headers(self, more_headers=[]):
+        """ emit http headers after some preprocessing / checking
+
+            Makes sure we only emit headers once.
+            Encodes to ASCII if it gets unicode headers.
+            Make sure we have exactly one Content-Type and one Status header.
+            Make sure Status header string begins with a integer number.
+        
+            For emitting, it calls the server specific _emit_http_headers
+            method.
+
+            @param more_headers: list of additional header strings
+        """
+        user_headers = getattr(self, 'user_headers', [])
+        self.user_headers = []
+        all_headers = more_headers + user_headers
+
+        # Send headers only once
+        sent_headers = getattr(self, 'sent_headers', 0)
+        self.sent_headers = sent_headers + 1
+        if sent_headers:
+            raise HeadersAlreadySentException("emit_http_headers called multiple (%d) times! Headers: %r" % (sent_headers, headers))
+        #else:
+        #    self.log("Notice: emit_http_headers called first time. Headers: %r" % all_headers)
+
+        content_type = None
+        status = None
+        headers = []
+        # assemble complete list of http headers
+        for header in all_headers:
+            if isinstance(header, unicode):
+                header = header.encode('ascii')
+            key, value = header.split(':', 1)
+            lkey = key.lower()
+            value = value.lstrip()
+            if content_type is None and lkey == "content-type":
+                content_type = value
+            elif status is None and lkey == "status":
+                status = value
+            else:
+                headers.append(header)
+
+        if content_type is None:
+            content_type = "text/html; charset=%s" % config.charset
+        ct_header = "Content-type: %s" % content_type
+
+        if status is None:
+            status = "200 OK"
+        try:
+            int(status.split(" ", 1)[0])
+        except:
+            self.log("emit_http_headers called with invalid header Status: %s" % status)
+            status = "500 Server Error - invalid status header"
+        st_header = "Status: %s" % status
+
+        headers = [st_header, ct_header] + headers # do NOT change order!
+        self._emit_http_headers(headers)
+
+        #from pprint import pformat
+        #sys.stderr.write(pformat(headers))
+
+    def _emit_http_headers(self, headers):
+        """ server specific method to emit http headers.
+        
+            @param headers: a list of http header strings in this FIXED order:
+                1. status header (always present and valid, e.g. "200 OK")
+                2. content type header (always present)
+                3. other headers (optional)
+        """
+        raise NotImplementedError
 
     def setHttpHeader(self, header):
         """ Save header for later send.
@@ -1140,6 +1222,9 @@
         self.user_headers.append(header)
 
     def setResponseCode(self, code, message=None):
+        """ DEPRECATED, will vanish in moin 1.7,
+            just use a Status: <code> <message> header and emit_http_headers.
+        """
         pass
 
     def fail(self, err):
@@ -1153,8 +1238,9 @@
         @param err: Exception instance or subclass.
         """
         self.failed = 1 # save state for self.run()            
-        self.http_headers(['Status: 500 MoinMoin Internal Error'])
-        self.setResponseCode(500)
+        # we should not generate the headers two times
+        if not getattr(self, 'sent_headers', 0):
+            self.emit_http_headers(['Status: 500 MoinMoin Internal Error'])
         self.log('%s: %s' % (err.__class__.__name__, str(err)))
         from MoinMoin import failure
         failure.handle(self)
@@ -1291,7 +1377,7 @@
             environment.append('  %s = %r\n' % (key, env[key]))
         environment = ''.join(environment)
 
-        data = '\nRequest Attributes\n%s\nEnviroment\n%s' % (attributes, environment)
+        data = '\nRequest Attributes\n%s\nEnvironment\n%s' % (attributes, environment)
         f = open('/tmp/env.log', 'a')
         try:
             f.write(data)
--- a/MoinMoin/script/__init__.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/MoinMoin/script/__init__.py	Tue Aug 08 08:49:47 2006 +0200
@@ -7,12 +7,6 @@
     @license: GNU GPL, see COPYING for details.
 """
 
-from MoinMoin.util import pysupport
-
-# create a list of extension scripts from the subpackage directory
-extension_scripts = pysupport.getPackageModules(__file__)
-modules = extension_scripts
-
 import os, sys, time
 
 flag_quiet = 0
--- a/MoinMoin/script/migration/data.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/MoinMoin/script/migration/data.py	Tue Aug 08 08:49:47 2006 +0200
@@ -39,7 +39,7 @@
         meta_fname = os.path.join(data_dir, 'meta')
         while True:
             try:
-                meta = wikiutil.MetaDict(meta_fname)
+                meta = wikiutil.MetaDict(meta_fname, request.cfg.cache_dir)
                 try:
                     curr_rev = meta['data_format_revision']
                     mig_name = str(curr_rev)
--- a/MoinMoin/search/Xapian.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/MoinMoin/search/Xapian.py	Tue Aug 08 08:49:47 2006 +0200
@@ -173,6 +173,7 @@
         'stem_lang': 'XSTEMLANG', # ISO Language code this document was stemmed in
         'category': 'XCAT', # category this document belongs to
         'full_title': 'XFT', # full title (for regex)
+        'domain': 'XDOMAIN', # standard or underlay
                        #Y   year (four digits)
     }
 
@@ -335,6 +336,12 @@
         return [cat.lower()
                 for cat in re.findall(r'Category([^\s]+)', body[pos:])]
 
+    def _get_domains(self, page):
+        if page.isUnderlayPage():
+            yield 'underlay'
+        if page.isStandardPage():
+            yield 'standard'
+
     def _index_page(self, writer, page, mode='update'):
         """ Index a page - assumes that the write lock is acquired
             @arg writer: the index writer object
@@ -351,6 +358,7 @@
         # XXX: Hack until we get proper metadata
         language, stem_language = self._get_languages(page)
         categories = self._get_categories(page)
+        domains = tuple(self._get_domains(page))
         updated = False
 
         if mode == 'update':
@@ -385,6 +393,8 @@
                 xkeywords.append(xapdoc.Keyword('linkto', pagelink))
             for category in categories:
                 xkeywords.append(xapdoc.Keyword('category', category))
+            for domain in domains:
+                xkeywords.append(xapdoc.Keyword('domain', domain))
             xcontent = xapdoc.TextField('content', page.get_raw_body())
             doc = xapdoc.Document(textFields=(xcontent, xtitle),
                                   keywords=xkeywords,
--- a/MoinMoin/search/builtin.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/MoinMoin/search/builtin.py	Tue Aug 08 08:49:47 2006 +0200
@@ -159,7 +159,7 @@
         ##    self.indexPagesInNewThread(request)
 
     def _main_dir(self):
-        raise NotImplemented
+        raise NotImplemented('...')
 
     def exists(self):
         """ Check if index exists """        
@@ -172,7 +172,7 @@
         os.utime(self.dir, None)
     
     def _search(self, query):
-        raise NotImplemented
+        raise NotImplemented('...')
 
     def search(self, query):
         #if not self.read_lock.acquire(1.0):
@@ -243,7 +243,7 @@
         When called in a new thread, lock is acquired before the call,
         and this method must release it when it finishes or fails.
         """
-        raise NotImplemented
+        raise NotImplemented('...')
 
     def _do_queued_updates_InNewThread(self):
         """ do queued index updates in a new thread
@@ -283,10 +283,10 @@
             raise
 
     def _do_queued_updates(self, request, amount=5):
-        raise NotImplemented
+        raise NotImplemented('...')
 
     def optimize(self):
-        raise NotImplemented
+        raise NotImplemented('...')
 
     def contentfilter(self, filename):
         """ Get a filter for content of filename and return unicode content. """
@@ -311,7 +311,7 @@
         return mt.mime_type(), data
 
     def test(self, request):
-        raise NotImplemented
+        raise NotImplemented('...')
 
     def _indexingRequest(self, request):
         """ Return a new request that can be used for index building.
@@ -421,10 +421,12 @@
                 pass
             #except AttributeError:
             #    pages = []
-            self.request.clock.stop('_xapianSearch')
 
-            if not self.query.xapian_need_postproc():
-                return self._getHits(hits, self._xapianMatch)
+            try:
+                if not self.query.xapian_need_postproc():
+                    return self._getHits(hits, self._xapianMatch)
+            finally:
+                self.request.clock.stop('_xapianSearch')
         
         return self._moinSearch(pages)
 
--- a/MoinMoin/search/results.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/MoinMoin/search/results.py	Tue Aug 08 08:49:47 2006 +0200
@@ -277,13 +277,18 @@
         """
         _ = request.getText
         output = [
-            formatter.paragraph(1),
-            formatter.text(_("Hits %(hitsFrom)d to %(hitsTo)d "
-                "from %(hits)d results out of about %(pages)d pages.") %
+            formatter.paragraph(1, attr={'class': 'searchstats'}),
+            _("Results %(bs)s%(hitsFrom)d - %(hitsTo)d%(be)s "
+                    "of about %(bs)s%(hits)d%(be)s results out of about "
+                    "%(pages)d pages.") %
                    {'hits': len(self.hits), 'pages': self.pages,
-                       'hitsFrom': hitsFrom + 1,
-                       'hitsTo': hitsFrom + request.cfg.search_results_per_page}),
-            u' (%s)' % formatter.text(_("%.2f seconds") % self.elapsed),
+                    'hitsFrom': hitsFrom + 1,
+                    'hitsTo': hitsFrom + request.cfg.search_results_per_page,
+                    'bs': formatter.strong(1), 'be': formatter.strong(0)},
+            u' (%s %s)' % (''.join([formatter.strong(1),
+                formatter.text("%.2f" % self.elapsed),
+                formatter.strong(0)]),
+                formatter.text(_("seconds"))),
             formatter.paragraph(0),
             ]
         return ''.join(output)
@@ -373,6 +378,7 @@
         self._reset(request, formatter)
         f = formatter
         write = self.buffer.write
+        _ = request.getText
         
         # Add pages formatted as definition list
         if self.hits:
@@ -413,6 +419,17 @@
                     f.definition_desc(1),
                     fmt_context,
                     f.definition_desc(0),
+                    f.definition_desc(1, attr={'class': 'searchresinfobar'}),
+                    f.text('%.1fk - ' % (page.page.size()/1024.0)),
+                    f.text('rev: %d %s- ' % (page.page.get_real_rev(),
+                        not page.page.rev and '(%s) ' % _('current') or '')),
+                    f.text('last modified: %(time)s - ' % page.page.lastEditInfo()),
+                    # XXX: proper metadata
+                    #f.text('lang: %s - ' % page.page.language),
+                    f.url(1, href='#'),
+                    f.text(_('Similar pages')),
+                    f.url(0),
+                    f.definition_desc(0),
                     ]
                 write(''.join(item))
             write(f.definition_list(0))
@@ -627,6 +644,10 @@
             return ''.join(output)
         return ''
 
+    def _img_url(self, img):
+        cfg = self.request.cfg
+        return '%s/%s/img/%s.png' % (cfg.url_prefix, self.request.theme.name, img)
+
     def formatPrevNextPageLinks(self, hitsFrom, hitsPerPage, hitsNum):
         """ Format previous and next page links in page
 
@@ -638,27 +659,93 @@
         """
         _ = self.request.getText
         f = self.formatter
+
+        # url magic
         from_re = r'\&from=[\d]+'
         uri = re.sub(from_re, '', self.request.request_uri)
-        from_uri = lambda n: '%s&from=%i' % (uri, n)
+        page_url = lambda n: '%s&from=%i' % (uri, n * hitsPerPage)
+        
+        pages = float(hitsNum) / hitsPerPage
+        if pages - int(pages) > 0.0:
+            pages = int(pages) + 1
+        cur_page = hitsFrom / hitsPerPage
         l = []
-        if hitsFrom > 0:                        # previous page available
-            n = hitsFrom - hitsPerPage
-            if n < 0: n = 0
+
+        # previous page available
+        if cur_page > 0:
             l.append(''.join([
-                f.url(1, href=from_uri(n)),
-                _('Previous Page'),
+                f.url(1, href=page_url(cur_page-1)),
+                f.text(_('Previous')),
                 f.url(0)
             ]))
-        if hitsFrom + hitsPerPage < hitsNum:    # next page available
-            n = hitsFrom + hitsPerPage
-            if n >= hitsNum: n = hitsNum - 1
+        else:
+            l.append('')
+
+        # list of pages to be shown
+        page_range = range(*(
+            cur_page - 4 < 0 and
+                (0, pages >= 10 and 10 or pages) or
+                (cur_page - 4, cur_page + 6 >= pages and
+                    pages or cur_page + 6)))
+        l.extend([''.join([
+                i != cur_page and f.url(1, href=page_url(i)) or '',
+                f.text(str(i+1)),
+                i != cur_page and f.url(0) or '',
+            ]) for i in page_range])
+
+        # next page available
+        if cur_page < pages-1:
             l.append(''.join([
-                f.url(1, href=from_uri(n)),
-                _('Next Page'),
+                f.url(1, href=page_url(cur_page+1)),
+                f.text(_('Next')),
                 f.url(0)
             ]))
-        return f.text(' | ').join(l)
+        else:
+            l.append('')
+
+        return ''.join([
+            f.table(1, attrs={'tableclass': 'searchpages'}),
+            f.table_row(1),
+                f.table_cell(1),
+                # first image, previous page
+                l[0] and
+                    ''.join([
+                        f.url(1, href=page_url(cur_page-1)),
+                        f.image(self._img_url('nav_prev')),
+                        f.url(0),
+                    ]) or
+                    f.image(self._img_url('nav_first')),
+                f.table_cell(0),
+                # images for ooos, highlighted current page
+                ''.join([
+                    ''.join([
+                        f.table_cell(1),
+                        i != cur_page and f.url(1, href=page_url(i)) or '',
+                        f.image(self._img_url(i == cur_page and
+                            'nav_current' or 'nav_page')),
+                        i != cur_page and f.url(0) or '',
+                        f.table_cell(0),
+                    ]) for i in page_range
+                ]),
+                f.table_cell(1),
+                # last image, next page
+                l[-1] and
+                    ''.join([
+                        f.url(1, href=page_url(cur_page+1)),
+                        f.image(self._img_url('nav_next')),
+                        f.url(0),
+                    ]) or
+                    f.image(self._img_url('nav_last')),
+                f.table_cell(0),
+            f.table_row(0),
+            f.table_row(1),
+                f.table_cell(1),
+                # textlinks
+                (f.table_cell(0) + f.table_cell(1)).join(l),
+                f.table_cell(0),
+            f.table_row(0),
+            f.table(0),
+        ])
 
     def querystring(self, querydict=None):
         """ Return query string, used in the page link """
--- a/MoinMoin/stats/hitcounts.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/MoinMoin/stats/hitcounts.py	Tue Aug 08 08:49:47 2006 +0200
@@ -243,12 +243,11 @@
         (request.cfg.chart_options['width'], request.cfg.chart_options['height']),
         image, days)
 
-    # send HTTP headers
     headers = [
         "Content-Type: image/gif",
         "Content-Length: %d" % len(image.getvalue()),
     ]
-    request.http_headers(headers)
+    request.emit_http_headers(headers)
 
     # copy the image
     image.reset()
--- a/MoinMoin/stats/pagesize.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/MoinMoin/stats/pagesize.py	Tue Aug 08 08:49:47 2006 +0200
@@ -114,12 +114,11 @@
         (request.cfg.chart_options['width'], request.cfg.chart_options['height']),
         image, labels)
 
-    # send HTTP headers
     headers = [
         "Content-Type: image/gif",
         "Content-Length: %d" % len(image.getvalue()),
     ]
-    request.http_headers(headers)
+    request.emit_http_headers(headers)
 
     # copy the image
     image.reset()
--- a/MoinMoin/stats/useragents.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/MoinMoin/stats/useragents.py	Tue Aug 08 08:49:47 2006 +0200
@@ -172,12 +172,11 @@
         (request.cfg.chart_options['width'], request.cfg.chart_options['height']),
         image, labels)
 
-    # send HTTP headers
     headers = [
         "Content-Type: image/gif",
         "Content-Length: %d" % len(image.getvalue()),
     ]
-    request.http_headers(headers)
+    request.emit_http_headers(headers)
 
     # copy the image
     image.reset()
--- a/MoinMoin/support/cgitb.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/MoinMoin/support/cgitb.py	Tue Aug 08 08:49:47 2006 +0200
@@ -70,6 +70,11 @@
 __UNDEF__ = [] # a special sentinel object
 
 
+class HiddenObject:
+    def __repr__(self):
+        return "<HIDDEN>"
+HiddenObject = HiddenObject()
+
 class HTMLFormatter:
     """ Minimal html formatter """
     
@@ -295,7 +300,10 @@
             if ttype == tokenize.NAME and token not in keyword.kwlist:
                 if lasttoken == '.':
                     if parent is not __UNDEF__:
-                        value = getattr(parent, token, __UNDEF__)
+                        if self.unsafe_name(token):
+                            value = HiddenObject
+                        else:
+                            value = getattr(parent, token, __UNDEF__)
                         vars.append((prefix + token, prefix, value))
                 else:
                     where, value = self.lookup(token)
@@ -324,8 +332,12 @@
                 value = builtins.get(name, __UNDEF__)
             else:
                 value = getattr(builtins, name, __UNDEF__)
+        if self.unsafe_name(name):
+            value = HiddenObject
         return scope, value
 
+    def unsafe_name(self, name):
+        return name in self.frame.f_globals.get("unsafe_names", ())
 
 class View:
     """ Traceback view """
--- a/MoinMoin/support/thfcgi.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/MoinMoin/support/thfcgi.py	Tue Aug 08 08:49:47 2006 +0200
@@ -327,17 +327,18 @@
         self.have_finished = 1
 
         # stderr
-        self.err.reset()
-        rec = Record()
-        rec.rec_type = FCGI_STDERR
-        rec.req_id = self.req_id
-        data = self.err.read()
-        while data:
-            chunk, data = self.getNextChunk(data)
-            rec.content = chunk
-            rec.writeRecord(self.conn)
-        rec.content = ""
-        rec.writeRecord(self.conn)      # Terminate stream
+        if self.err.tell(): # just send err record if there is data on the err stream
+            self.err.reset()
+            rec = Record()
+            rec.rec_type = FCGI_STDERR
+            rec.req_id = self.req_id
+            data = self.err.read()
+            while data:
+                chunk, data = self.getNextChunk(data)
+                rec.content = chunk
+                rec.writeRecord(self.conn)
+            rec.content = ""
+            rec.writeRecord(self.conn)      # Terminate stream
 
         # stdout
         self.out.reset()
--- a/MoinMoin/user.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/MoinMoin/user.py	Tue Aug 08 08:49:47 2006 +0200
@@ -6,6 +6,9 @@
     @license: GNU GPL, see COPYING for details.
 """
 
+# add names here to hide them in the cgitb traceback
+unsafe_names = ("id", "key", "val", "user_data", "enc_password")
+
 import os, time, sha, codecs
 
 try:
@@ -289,9 +292,9 @@
             self.language = 'en'
 
     def __repr__(self):
-        return "<%s.%s at 0x%x name:%r id:%s valid:%r>" % (
+        return "<%s.%s at 0x%x name:%r valid:%r>" % (
             self.__class__.__module__, self.__class__.__name__,
-            id(self), self.name, self.id, self.valid)
+            id(self), self.name, self.valid)
 
     def make_id(self):
         """ make a new unique user id """
--- a/MoinMoin/util/__init__.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/MoinMoin/util/__init__.py	Tue Aug 08 08:49:47 2006 +0200
@@ -7,7 +7,7 @@
     @license: GNU GPL, see COPYING for details.
 """
 
-import os, sys, re
+import os, sys, re, random
 
 #############################################################################
 ### XML helper functions
@@ -112,3 +112,7 @@
     def close(self):
         self.buffer = None
 
+
+def random_string(length):
+    chars = ''.join([chr(random.randint(0, 255)) for x in xrange(length)])
+    return chars
--- a/MoinMoin/util/lock.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/MoinMoin/util/lock.py	Tue Aug 08 08:49:47 2006 +0200
@@ -11,9 +11,9 @@
 
 # Temporary debugging aid, to be replaced with system wide debuging
 # in release 3000.
-import sys
-def log(msg):
-    sys.stderr.write('[%s] lock: %s' % (time.asctime(), msg))
+#import sys
+#def log(msg):
+#    sys.stderr.write('[%s] lock: %s' % (time.asctime(), msg))
 
 
 class Timer:
--- a/MoinMoin/widget/html.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/MoinMoin/widget/html.py	Tue Aug 08 08:49:47 2006 +0200
@@ -272,6 +272,7 @@
 class DIV(CompositeElement):
     "generic language/style container"
     _ATTRS = {
+        'id': None,
         'class': None,
     }
 
--- a/MoinMoin/wikidicts.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/MoinMoin/wikidicts.py	Tue Aug 08 08:49:47 2006 +0200
@@ -26,7 +26,7 @@
 
 # Version of the internal data structure which is pickled
 # Please increment if you have changed the structure
-DICTS_PICKLE_VERSION = 4
+DICTS_PICKLE_VERSION = 5
 
 
 class DictBase:
@@ -44,13 +44,18 @@
         """
         self.name = name
 
-        self.regex = re.compile(self.regex, re.MULTILINE | re.UNICODE)
+        self.initRegex()
 
         # Get text from page named 'name'
         p = Page.Page(request, name)
         text = p.get_raw_body()
         self.initFromText(text)
 
+    def initRegex(cls):
+        """ Make it a class attribute to avoid it being pickled. """
+        cls.regex = re.compile(cls.regex, re.MULTILINE | re.UNICODE)
+    initRegex = classmethod(initRegex)
+
     def initFromText(self, text):
         raise NotImplementedError('sub classes should override this')
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/wikisync.py	Tue Aug 08 08:49:47 2006 +0200
@@ -0,0 +1,137 @@
+# -*- coding: iso-8859-1 -*-
+"""
+    MoinMoin - Wiki Synchronisation
+
+    @copyright: 2006 by MoinMoin:AlexanderSchremmer
+    @license: GNU GPL, see COPYING for details.
+"""
+
+import os
+
+try:
+    import cPickle as pickle
+except ImportError:
+    import pickle
+
+from MoinMoin.util import lock
+from MoinMoin.packages import unpackLine
+
+
+class Tag(object):
+    """ This class is used to store information about merging state. """
+    
+    def __init__(self, remote_wiki, remote_rev, current_rev):
+        """ Creates a new Tag.
+        
+        @param remote_wiki: The identifier of the remote wiki.
+        @param remote_rev: The revision number on the remote end.
+        @param current_rev: The related local revision.
+        """
+        self.remote_wiki = remote_wiki
+        self.remote_rev = remote_rev
+        self.current_rev = current_rev
+
+    def __repr__(self):
+        return u"<Tag remote_wiki=%r remote_rev=%r current_rev=%r>" % (self.remote_wiki, self.remote_rev, self.current_rev)
+
+    def __cmp__(self, other):
+        if not isinstance(other, Tag):
+            return NotImplemented
+        return cmp(self.current_rev, other.current_rev)
+
+
+class AbstractTagStore(object):
+    """ This class is an abstract base class that shows how to implement classes
+        that manage the storage of tags. """
+
+    def __init__(self, page):
+        """ Subclasses don't need to call this method. It is just here to enforce
+        them having accept a page argument at least. """
+        pass
+
+    def add(self, **kwargs):
+        """ Adds a Tag object to the current TagStore. """
+        print "Got tag for page %r: %r" % (self.page, kwargs)
+        return NotImplemented
+
+    def get_all_tags(self):
+        """ Returns a list of all Tag objects associated to this page. """
+        return NotImplemented
+    
+    def clear(self):
+        """ Removes all tags. """
+        return NotImplemented
+
+    def fetch(self, iwid_full=None, iw_name=None):
+        """ Fetches tags by a special IWID or interwiki name. """
+        return NotImplemented
+
+
+class PickleTagStore(AbstractTagStore):
+    """ This class manages the storage of tags in pickle files. """
+
+    def __init__(self, page):
+        """ Creates a new TagStore that uses pickle files.
+        
+        @param page: a Page object where the tags should be related to
+        """
+        
+        self.page = page
+        self.filename = page.getPagePath('synctags', use_underlay=0, check_create=1, isfile=1)
+        lock_dir = os.path.join(page.getPagePath('cache', use_underlay=0, check_create=1), '__taglock__')
+        self.rlock = lock.ReadLock(lock_dir, 60.0)
+        self.wlock = lock.WriteLock(lock_dir, 60.0)
+        self.load()
+
+    def load(self):
+        """ Loads the tags from the data file. """
+        if not self.rlock.acquire(3.0):
+            raise EnvironmentError("Could not lock in PickleTagStore")
+        try:
+            try:
+                datafile = file(self.filename, "rb")
+            except IOError:
+                self.tags = []
+            else:
+                self.tags = pickle.load(datafile)
+                datafile.close()
+        finally:
+            self.rlock.release()
+    
+    def commit(self):
+        """ Writes the memory contents to the data file. """
+        if not self.wlock.acquire(3.0):
+            raise EnvironmentError("Could not lock in PickleTagStore")
+        try:
+            datafile = file(self.filename, "wb")
+            pickle.dump(self.tags, datafile, protocol=pickle.HIGHEST_PROTOCOL)
+            datafile.close()
+        finally:
+            self.wlock.release()
+
+    # public methods ---------------------------------------------------
+    def add(self, **kwargs):
+        self.tags.append(Tag(**kwargs))
+        self.commit()
+    
+    def get_all_tags(self):
+        return self.tags
+
+    def clear(self):
+        self.tags = []
+        self.commit()
+
+    def fetch(self, iwid_full=None, iw_name=None):
+        assert iwid_full ^ iw_name
+        if iwid_full:
+            iwid_full = unpackLine(iwid_full)
+            if len(iwid_full) == 1:
+                assert False, "This case is not supported yet" # XXX
+            iw_name = iwid_full[1]
+
+        return [t for t in self.tags if t.remote_wiki == iw_name]
+
+
+# currently we just have one implementation, so we do not need
+# a factory method
+TagStore = PickleTagStore
\ No newline at end of file
--- a/MoinMoin/wikiutil.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/MoinMoin/wikiutil.py	Tue Aug 08 08:49:47 2006 +0200
@@ -6,11 +6,16 @@
     @license: GNU GPL, see COPYING for details.
 """
 
-import os, re, urllib, cgi
-import codecs, types
+import cgi
+import codecs
+import os
+import re
+import time
+import types
+import urllib
 
 from MoinMoin import util, version, config
-from MoinMoin.util import pysupport, filesys
+from MoinMoin.util import pysupport, filesys, lock
 
 # Exceptions
 class InvalidFileNameError(Exception):
@@ -403,13 +408,18 @@
                 ]
 
 class MetaDict(dict):
-    """ store meta informations as a dict """
-    def __init__(self, metafilename):
+    """ store meta informations as a dict.
+    XXX It is not thread-safe, add locks!
+    """
+    def __init__(self, metafilename, cache_directory):
         """ create a MetaDict from metafilename """
         dict.__init__(self)
         self.metafilename = metafilename
         self.dirty = False
         self.loaded = False
+        lock_dir = os.path.join(cache_directory, '__metalock__')
+        self.rlock = lock.ReadLock(lock_dir, 60.0)
+        self.wlock = lock.WriteLock(lock_dir, 60.0)
 
     def _get_meta(self):
         """ get the meta dict from an arbitrary filename.
@@ -417,11 +427,16 @@
             @param metafilename: the name of the file to read
             @return: dict with all values or {} if empty or error
         """
-        # XXX what does happen if the metafile is being written to in another process?
+
         try:
-            metafile = codecs.open(self.metafilename, "r", "utf-8")
-            meta = metafile.read() # this is much faster than the file's line-by-line iterator
-            metafile.close()
+            if not self.rlock.acquire(3.0):
+                raise EnvironmentError("Could not lock in MetaDict")
+            try:
+                metafile = codecs.open(self.metafilename, "r", "utf-8")
+                meta = metafile.read() # this is much faster than the file's line-by-line iterator
+                metafile.close()
+            finally:
+                self.rlock.release()
         except IOError:
             meta = u''
         for line in meta.splitlines():
@@ -443,16 +458,21 @@
             if key in INTEGER_METAS:
                 value = str(value)
             meta.append("%s: %s" % (key, value))
-        meta = '\n'.join(meta)
-        # XXX what does happen if the metafile is being read or written to in another process?
-        metafile = codecs.open(self.metafilename, "w", "utf-8")
-        metafile.write(meta)
-        metafile.close()
+        meta = '\r\n'.join(meta)
+
+        if not self.wlock.acquire(5.0):
+            raise EnvironmentError("Could not lock in MetaDict")
+        try:
+            metafile = codecs.open(self.metafilename, "w", "utf-8")
+            metafile.write(meta)
+            metafile.close()
+        finally:
+            self.wlock.release()
         filesys.chmod(self.metafilename, 0666 & config.umask)
         self.dirty = False
 
     def sync(self, mtime_usecs=None):
-        """ sync the in-memory dict to disk (if dirty) """
+        """ sync the in-memory dict to the persistent store (if dirty) """
         if self.dirty:
             if not mtime_usecs is None:
                 self.__setitem__('mtime', str(mtime_usecs))
@@ -469,6 +489,8 @@
                 raise
 
     def __setitem__(self, key, value):
+        """ Sets a dictionary entry. You actually have to call sync to write it
+            to the persistent store. """
         try:
             oldvalue = dict.__getitem__(self, key)
         except KeyError:
@@ -481,27 +503,59 @@
 #############################################################################
 ### InterWiki
 #############################################################################
+INTERWIKI_PAGE = "InterWikiMap"
+
+def generate_file_list(request):
+    """ generates a list of all files. for internal use. """
+
+    # order is important here, the local intermap file takes
+    # precedence over the shared one, and is thus read AFTER
+    # the shared one
+    intermap_files = request.cfg.shared_intermap
+    if not isinstance(intermap_files, list):
+        intermap_files = [intermap_files]
+    else:
+        intermap_files = intermap_files[:]
+    intermap_files.append(os.path.join(request.cfg.data_dir, "intermap.txt"))
+    request.cfg.shared_intermap_files = [filename for filename in intermap_files
+                                         if filename and os.path.isfile(filename)]
+
+
+def get_max_mtime(file_list, page):
+    """ Returns the highest modification time of the files in file_list and the
+    page page. """
+    return max([os.stat(filename).st_mtime for filename in file_list] +
+        [version2timestamp(page.mtime_usecs())])
+
+
 def load_wikimap(request):
     """ load interwiki map (once, and only on demand) """
+    from MoinMoin.Page import Page
+
+    now = int(time.time())
+    if getattr(request.cfg, "shared_intermap_files", None) is None:
+        generate_file_list(request)
+
     try:
         _interwiki_list = request.cfg._interwiki_list
+        old_mtime = request.cfg._interwiki_mtime
+        if request.cfg._interwiki_ts + (1*60) < now: # 1 minutes caching time
+            max_mtime = get_max_mtime(request.cfg.shared_intermap_files, Page(request, INTERWIKI_PAGE))
+            if max_mtime > old_mtime:
+                raise AttributeError # refresh cache
+            else:
+                request.cfg._interwiki_ts = now
     except AttributeError:
         _interwiki_list = {}
         lines = []
 
-        # order is important here, the local intermap file takes
-        # precedence over the shared one, and is thus read AFTER
-        # the shared one
-        intermap_files = request.cfg.shared_intermap
-        if not isinstance(intermap_files, type([])):
-            intermap_files = [intermap_files]
-        intermap_files.append(os.path.join(request.cfg.data_dir, "intermap.txt"))
+        for filename in request.cfg.shared_intermap_files:
+            f = open(filename, "r")
+            lines.extend(f.readlines())
+            f.close()
 
-        for filename in intermap_files:
-            if filename and os.path.isfile(filename):
-                f = open(filename, "r")
-                lines.extend(f.readlines())
-                f.close()
+        # add the contents of the InterWikiMap page
+        lines += Page(request, INTERWIKI_PAGE).get_raw_body().splitlines()
 
         for line in lines:
             if not line or line[0] == '#': continue
@@ -522,6 +576,8 @@
 
         # save for later
         request.cfg._interwiki_list = _interwiki_list
+        request.cfg._interwiki_ts = now
+        request.cfg._interwiki_mtime = get_max_mtime(request.cfg.shared_intermap_files, Page(request, INTERWIKI_PAGE))
 
     return _interwiki_list
 
--- a/MoinMoin/xmlrpc/__init__.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/MoinMoin/xmlrpc/__init__.py	Tue Aug 08 08:49:47 2006 +0200
@@ -20,7 +20,8 @@
     when really necessary (like for transferring binary files like
     attachments maybe).
 
-    @copyright: 2003-2005 by Thomas Waldmann
+    @copyright: 2003-2006 MoinMoin:ThomasWaldmann
+    @copyright: 2004-2006 MoinMoin:AlexanderSchremmer
     @license: GNU GPL, see COPYING for details
 """
 from MoinMoin.util import pysupport
@@ -58,7 +59,7 @@
         @rtype: str
         @return: string in config.charset
         """
-        raise "NotImplementedError"
+        raise NotImplementedError("please implement _instr in derived class")
 
     def _outstr(self, text):
         """ Convert outbound string to utf-8.
@@ -67,7 +68,7 @@
         @rtype: str
         @return: string in utf-8
         """
-        raise "NotImplementedError"
+        raise NotImplementedError("please implement _outstr in derived class")
 
     def _inlob(self, text):
         """ Convert inbound base64-encoded utf-8 to Large OBject.
@@ -130,8 +131,8 @@
             # serialize it
             response = xmlrpclib.dumps(response, methodresponse=1)
 
-        self.request.http_headers([
-            "Content-Type: text/xml;charset=utf-8",
+        self.request.emit_http_headers([
+            "Content-Type: text/xml; charset=utf-8",
             "Content-Length: %d" % len(response),
         ])
         self.request.write(response)
@@ -219,10 +220,39 @@
         """ Get all pages readable by current user
 
         @rtype: list
-        @return: a list of all pages. The result is a list of utf-8 strings.
+        @return: a list of all pages.
         """
-        pagelist = self.request.rootpage.getPageList()
-        return map(self._outstr, pagelist)
+
+        # the official WikiRPC interface is implemented by the extended method
+        # as well
+        return self.xmlrpc_getAllPagesEx()
+
+
+    def xmlrpc_getAllPagesEx(self, opts=None):
+        """ Get all pages readable by current user. Not an WikiRPC method.
+
+        @param opts: dictionary that can contain the following arguments:
+                include_system:: set it to false if you do not want to see system pages
+                include_revno:: set it to True if you want to have lists with [pagename, revno]
+                include_deleted:: set it to True if you want to include deleted pages
+        @rtype: list
+        @return: a list of all pages.
+        """
+        options = {"include_system": True, "include_revno": False, "include_deleted": False}
+        if opts is not None:
+            options.update(opts)
+
+        if not options["include_system"]:
+            filter = lambda name: not wikiutil.isSystemPage(self.request, name)
+        else:
+            filter = lambda name: True
+
+        pagelist = self.request.rootpage.getPageList(filter=filter, exists=not options["include_deleted"])
+        
+        if options['include_revno']:
+            return [[self._outstr(x), Page(self.request, x).get_real_rev()] for x in pagelist]
+        else:
+            return [self._outstr(x) for x in pagelist]
 
     def xmlrpc_getRecentChanges(self, date):
         """ Get RecentChanges since date
@@ -498,6 +528,7 @@
         from MoinMoin import version
         return (version.project, version.release, version.revision)
 
+
     # authorization methods
 
     def xmlrpc_getAuthToken(self, username, password, *args):
@@ -519,6 +550,9 @@
         else:
             return xmlrpclib.Fault("INVALID", "Invalid token.")
 
+
+    # methods for wiki synchronization
+
     def xmlrpc_getDiff(self, pagename, from_rev, to_rev):
         """ Gets the binary difference between two page revisions. See MoinMoin:WikiSyncronisation. """
         from MoinMoin.util.bdiff import textdiff, compress
@@ -555,13 +589,13 @@
         if from_rev is None:
             oldcontents = lambda: ""
         else:
-            oldpage = Page(request, pagename, rev=from_rev)
+            oldpage = Page(self.request, pagename, rev=from_rev)
             oldcontents = lambda: oldpage.get_raw_body_str()
 
         if to_rev is None:
             newcontents = lambda: currentpage.get_raw_body()
         else:
-            newpage = Page(request, pagename, rev=to_rev)
+            newpage = Page(self.request, pagename, rev=to_rev)
             newcontents = lambda: newpage.get_raw_body_str()
             newrev = newpage.get_real_rev()
 
@@ -575,14 +609,15 @@
         return {"conflict": conflict, "diff": diffblob, "diffversion": 1, "current": currentpage.get_real_rev()}
 
     def xmlrpc_interwikiName(self):
-        """ Returns the interwiki name of the current wiki. """
+        """ Returns the interwiki name and the IWID of the current wiki. """
         name = self.request.cfg.interwikiname
+        iwid = self.request.cfg.iwid
         if name is None:
-            return None
+            return [None, iwid]
         else:
-            return self._outstr(name)
+            return [self._outstr(name), iwid]
 
-    def xmlrpc_mergeChanges(self, pagename, diff, local_rev, delta_remote_rev, last_remote_rev, interwiki_name):
+    def xmlrpc_mergeDiff(self, pagename, diff, local_rev, delta_remote_rev, last_remote_rev, interwiki_name):
         """ Merges a diff sent by the remote machine and returns the number of the new revision.
             Additionally, this method tags the new revision.
             
@@ -594,9 +629,13 @@
             @param interwiki_name: Used to build the interwiki tag.
         """
         from MoinMoin.util.bdiff import decompress, patch
+        from MoinMoin.wikisync import TagStore
+        LASTREV_INVALID = xmlrpclib.Fault("LASTREV_INVALID", "The page was changed")
 
         pagename = self._instr(pagename)
 
+        comment = u"Remote Merge - %r" % interwiki_name
+        
         # User may read page?
         if not self.request.user.may.read(pagename) or not self.request.user.may.write(pagename):
             return self.notAllowedFault()
@@ -604,10 +643,10 @@
         # XXX add locking here!
 
         # current version of the page
-        currentpage = Page(self.request, pagename)
+        currentpage = PageEditor(self.request, pagename, do_editor_backup=0)
 
         if currentpage.get_real_rev() != last_remote_rev:
-            return xmlrpclib.Fault("LASTREV_INVALID", "The page was changed")
+            return LASTREV_INVALID
 
         if not currentpage.exists() and diff is None:
             return xmlrpclib.Fault("NOT_EXIST", "The page does not exist and no diff was supplied.")
@@ -619,11 +658,19 @@
         newcontents = patch(basepage.get_raw_body_str(), decompress(str(diff)))
 
         # write page
-        # XXX ...
+        try:
+            currentpage.saveText(newcontents.encode("utf-8"), last_remote_rev, comment=comment)
+        except PageEditor.EditConflict:
+            return LASTREV_INVALID
 
-        # XXX add a tag (interwiki_name, local_rev, current rev) to the page
-        # XXX return current rev
-        # XXX finished
+        current_rev = currentpage.get_real_rev()
+        
+        tags = TagStore(currentpage)
+        tags.add(remote_wiki=interwiki_name, remote_rev=local_rev, current_rev=current_rev)
+
+        # XXX unlock page
+
+        return current_rev
 
 
     # XXX BEGIN WARNING XXX
--- a/docs/CHANGES	Mon Aug 07 11:08:17 2006 +0200
+++ b/docs/CHANGES	Tue Aug 08 08:49:47 2006 +0200
@@ -81,11 +81,11 @@
       will be missing and the adaptor script will need a change maybe):
       CGI works
       CLI works
-      STANDALONE not
-      MODPY not
-      WSGI not
-      FCGI not
-      TWISTED not
+      STANDALONE ?
+      MODPY ?
+      WSGI ?
+      FCGI ?
+      TWISTED ?
     * moved util/antispam.py to security/antispam.py,
       moved util/autoadmin.py to security/autoadmin.py,
       moved security.py to security/__init__.py,
@@ -119,6 +119,24 @@
       TODO: write mig script for data_dir
       TODO: make blanks in interwiki pagelinks possible
     * request.action now has the action requested, default: 'show'.
+    * Cleaned up duplicated http_headers code and DEPRECATED this function
+      call (it was sometimes confused with setHttpHeaders call) - it will
+      vanish with moin 1.7, so please fix your custom plugins!
+      The replacement is:
+          request.emit_http_headers(more_headers=[])
+      This call pre-processes the headers list (encoding from unicode, making
+      sure that there is exactly ONE content-type header, etc.) and then
+      calls a server specific helper _emit_http_headers to emit it.
+      CGI works
+      CLI ?
+      STANDALONE ?
+      MODPY ?
+      WSGI ?
+      FCGI ?
+      TWISTED ?
+    * setResponseCode request method DEPRECATED (it only worked for Twisted
+      anyway), just use emit_http_headers and include a Status: XXX header.
+      Method will vanish with moin 1.7. 
 
   New Features:
     * Removed "underscore in URL" == "blank in pagename magic" - it made more
@@ -165,7 +183,9 @@
     * The i18n system no loads *.po files directly (no *.py or *.mo any more)
       and caches the results (farm wide cache/i18n/*).
     * added the diff parser from ParserMarket, thanks to Emilio Lopes, Fabien
-      Ninoles and Jürgen Hermann.
+      Ninoles and Jrgen Hermann.
+    * Added support for "304 not modified" response header for AttachFile get
+      and rss_rc actions - faster, less traffic, less load.
 
   Bugfixes:
     * on action "info" page, "revert" link will not be displayed for empty page
@@ -186,6 +206,8 @@
     * Added a (less broken) MoinMoin.support.difflib, details see there.
     * BadContent and LocalBadContent now get noindex,nofollow robots header,
       same as POSTs.
+    * Fixed handling of anchors in wiki links for the Restructured text parser.
+    * Fixed http header output.
 
   Other changes:
     * we use (again) the same browser compatibility check as FCKeditor uses
@@ -194,7 +216,8 @@
       at FCKeditor development or browser development.
     * HINT: instead of "from MoinMoin.multiconfig import DefaultConfig" you
       need to use "from MoinMoin.config.multiconfig import DefaultConfig" now.
-      You need to change this in you wikiconfig.py or farmconfig.py file.
+      You need to change this in your wikiconfig.py or farmconfig.py file.
+      See MoinMoin/multiconfig.py for an alternative way if you can't do that.
 
 Version 1.5.4-current:
     * increased maxlength of some input fields from 80 to 200
@@ -859,7 +882,7 @@
 
   International support:    
     * mail_from can be now a unicode name-address 
-      e.g u'Jürgen wiki <noreply@jhwiki.org>'
+      e.g u'Jrgen wiki <noreply@jhwiki.org>'
 
   Theme changes:
     * logo_string is now should be really only the logo (img).
@@ -2352,8 +2375,7 @@
         || {{{ ;)) }}} || ;)) || lol.gif    ||
     * AbandonedPages macro
     * Added definition list markup: {{{<whitespace>term:: definition}}}
-    * Added email notification features contributed by Daniel Saß
-    * SystemInfo: show "Entries in edit log"
+    * Added email notification features contributed by Daniel Sa�    * SystemInfo: show "Entries in edit log"
     * Added "RSS" icon to RecentChanges macro and code to generate a
       RecentChanges RSS channel, see
           http://www.usemod.com/cgi-bin/mb.pl?UnifiedRecentChanges
@@ -2626,7 +2648,7 @@
       there before a new version is written to disk
     * Removed the "Reset" button from EditPage
     * Added "Reduce editor size" link
-    * Added Latin-1 WikiNames (JürgenHermann ;)
+    * Added Latin-1 WikiNames (JrgenHermann ;)
     * Speeded up RecentChanges by looking up hostnames ONCE while saving
     * Show at most 14 (distinct) days in RecentChanges
     * Added icons for common functions, at the top of the page
@@ -2639,7 +2661,7 @@
     * Grey background for code sections
     * Added handling for numbered lists
     * the edit textarea now grows in width with the browser window
-      (thanks to Sebastian Dauß for that idea)
+      (thanks to Sebastian Dau�for that idea)
     * Added page info (revision history) and viewing of old revisions
     * Added page diff, and diff links on page info
     * Added InterWiki support (use "wiki:WikiServer/theirlocalname"; the list
--- a/docs/CHANGES.aschremmer	Mon Aug 07 11:08:17 2006 +0200
+++ b/docs/CHANGES.aschremmer	Tue Aug 08 08:49:47 2006 +0200
@@ -2,16 +2,19 @@
 ===============================
 
   Known main issues:
-    * ...
+    * How will we store tags? (Metadata support would be handy)
+    * How to handle renames/deletes?
+    * How to handle colliding/empty interwiki names?
 
   ToDo:
     * Implement actual syncronisation.
+      * Add correct IWID_full handling.
     * Implement a cross-site authentication system, i.e. mainly an
       identity storage.
     * Clean up trailing whitespace.
-    * Add page locking.
-    * How about using unique IDs that just derive from the interwikiname?
-    * How to handle renames?
+    * Add page locking, i.e. use the one in the new storage layer.
+    * Check what needs to be documented on MoinMaster.
+    * Search for XXX
 
   New Features:
     * XMLRPC method to return the Moin version
@@ -20,10 +23,25 @@
     * XMLRPC Authentication System
     * Binary Diffing
     * XMLRPC method to get binary diffs
-    * 
+    * XMLRPC method to merge remote changes locally
+    * XMLRPC method to get the interwiki name
+    * TagStore/PickleTagStore class
+    * XMLRPC method to get the pagelist in a special way (revnos,
+      no system pages etc.)
+    * IWID support - i.e. every instance has a unique ID
+    * InterWiki page editable in the wiki, modification detection based on mtimes
 
   Bugfixes (only stuff that is buggy in moin/1.6 main branch):
-    * Conflict resolution fixes.
+    * Conflict resolution fixes. (merged into main)
+    * Python 2.5 compatibility fixes in the Page caching logic (merged)
+    * sre pickle issues in the wikidicts code (merged)
+    * cgitb can hide particular names, this avoids information leaks
+      if the user files cannot be parsed for example
+    * Fixed User.__repr__ - it is insane to put the ID in there
+    * Worked around the FastCGI problem on Lighttpd: empty lines in the error log, thanks to Jay Soffian
+    * Fixed the MetaDict code to use locks.
+    * Fixed bug in request.py that avoided showing a traceback if there was a fault
+      after the first headers were sent.
 
   Other Changes:
     * Refactored conflict resolution and XMLRPC code.
@@ -58,6 +76,38 @@
 Week 28: Debian-Edu Developer Camp. Implemented getDiff XMLRPC method, added preliminary SyncPages action,
          added interwikiName XMLRPC method, added mergeChanges XMLRPC method. Started analysis of the moinupdate
          script written by Stefan Merten.
+Week 29: Finished first version of the mergeChanges method. Added Tag and TagStore classes which are currently
+         using pickle-based storage. Added getAllPagesEx XMLRPC method.
+Week 30: Implemented IWID support, added function to generate random strings. Added support
+         for editing the InterWikiMap in the wiki. Added locking to the PickleTagStore and the MetaDict classes. Added handling of
+         various options and detection of anonymous wikis to the SyncPages action.
+Week 31: Load the IWID and the meta dict lazily. Reworked RemotePage/SyncPage,
+         fixed option handling again, refined semantics of options, introduced
+         direction option, replaced "localMatch"/"remoteMatch" by "pageMatch".
+         Store mtime for InterWiki list updates and detect changes based on it.
+         Added support for localPrefix and remotePrefix.
+Week 32: Continued work on the merge logic, finished prefix handling.
+
+2006-07-18: the requested daily entry is missing here, see http://moinmoin.wikiwikiweb.de/GoogleSoc2006/BetterProgress
+2006-07-19: the requested daily entry is missing here, see http://moinmoin.wikiwikiweb.de/GoogleSoc2006/BetterProgress
+2006-07-20: the requested daily entry is missing here, see http://moinmoin.wikiwikiweb.de/GoogleSoc2006/BetterProgress
+2006-07-21: the requested daily entry is missing here, see http://moinmoin.wikiwikiweb.de/GoogleSoc2006/BetterProgress
+2006-07-22: the requested daily entry is missing here, see http://moinmoin.wikiwikiweb.de/GoogleSoc2006/BetterProgress
+2006-07-23: no work on SOC project -- a Sunday
+2006-07-24: the requested daily entry is missing here, see http://moinmoin.wikiwikiweb.de/GoogleSoc2006/BetterProgress
+2006-07-25: the requested daily entry is missing here, see http://moinmoin.wikiwikiweb.de/GoogleSoc2006/BetterProgress
+2006-07-26: student didnt work on project
+2006-07-27: student didnt work on project
+2006-07-28: the requested daily entry is missing here, see http://moinmoin.wikiwikiweb.de/GoogleSoc2006/BetterProgress
+2006-07-29: the requested daily entry is missing here, see http://moinmoin.wikiwikiweb.de/GoogleSoc2006/BetterProgress
+2006-07-30: the requested daily entry is missing here, see http://moinmoin.wikiwikiweb.de/GoogleSoc2006/BetterProgress
+2006-07-31: the requested daily entry is missing here, see http://moinmoin.wikiwikiweb.de/GoogleSoc2006/BetterProgress
+2006-08-01: student didn't work on project
+2006-08-02: the requested daily entry is missing here, see http://moinmoin.wikiwikiweb.de/GoogleSoc2006/BetterProgress
+2006-08-03: the requested daily entry is missing here, see http://moinmoin.wikiwikiweb.de/GoogleSoc2006/BetterProgress
+2006-08-04: the requested daily entry is missing here, see http://moinmoin.wikiwikiweb.de/GoogleSoc2006/BetterProgress
+2006-08-05: student didn't work on project
+2006-08-06: student didn't work on project -- a Sunday
 
 Time plan
 =========
--- a/docs/CHANGES.fpletz	Mon Aug 07 11:08:17 2006 +0200
+++ b/docs/CHANGES.fpletz	Tue Aug 08 08:49:47 2006 +0200
@@ -199,16 +199,20 @@
     * SystemInfo macro update (mtime)
     * nicer regexp support for TitleSearch
 
-2006-07-25 .. 2006-07-29
+2006-07-25 .. 2006-08-01
     * student did not work on project
 
-2006-07-30 no work on project
-2006-07-31 no work on project
-2006-08-01 no work on project
-2006-08-02 entry missing
+2006-08-02
+    * Reformatted search statistics to use CSS and be more google-like
+      (only in modern theme for now)
+    * Added "search result info bar", showing revision, size, mtime,
+      links for further searches (-> ToDo) etc.
+
 2006-08-03 no work on project
 2006-08-04 no work on project
-2006-08-05 entry missing
-2006-08-06 entry missing
 
+2006-08-05 .. 2006-08-06
+    * (finally :)) Google-like paging, using images from google.com until
+      we get proper moin gfx
+    * index domains of a page (standard, underlay)
 
--- a/setup.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/setup.py	Tue Aug 08 08:49:47 2006 +0200
@@ -219,6 +219,7 @@
         'MoinMoin',
         'MoinMoin.action',
         'MoinMoin.auth',
+        'MoinMoin.config',
         'MoinMoin.converter',
         'MoinMoin.filter',
         'MoinMoin.formatter',
--- a/wiki/htdocs/modern/css/common.css	Mon Aug 07 11:08:17 2006 +0200
+++ b/wiki/htdocs/modern/css/common.css	Tue Aug 08 08:49:47 2006 +0200
@@ -334,11 +334,46 @@
 
 .searchresults dt {
     margin-top: 1em;
-	font-weight: normal;
+    font-weight: normal;
 }
 
 .searchresults dd {
-	font-size: 0.85em;
+    font-size: 0.85em;
+}
+
+.searchresults dd.searchresinfobar {
+    color: #008000;
+    margin-left: 15px;
+}
+
+p.searchstats {
+    font-size: 0.8em;
+    text-align: right;
+    width: 100%;
+    background-color: #E6EAF0;
+    border-top: 1px solid #9088DC;
+    padding: 2px;
+}
+
+.searchpages {
+    margin-left: auto;
+    margin-right: auto;
+}
+
+.searchpages tr, .searchpages td {
+    border: 0;
+    padding: 0;
+    margin: 0;
+    text-align: center;
+    vertical-align: middle;
+    color: #a90a08;
+    font-weight: bold;
+}
+
+.searchpages td a, .searchpages td a:link {
+    color: #000000;
+    text-decoration: underline;
+    font-weight: normal;
 }
 
 /* MonthCalendar css */
Binary file wiki/htdocs/modern/img/nav_current.png has changed
Binary file wiki/htdocs/modern/img/nav_first.png has changed
Binary file wiki/htdocs/modern/img/nav_last.png has changed
Binary file wiki/htdocs/modern/img/nav_next.png has changed
Binary file wiki/htdocs/modern/img/nav_page.png has changed
Binary file wiki/htdocs/modern/img/nav_prev.png has changed
--- a/wikiconfig.py	Mon Aug 07 11:08:17 2006 +0200
+++ b/wikiconfig.py	Tue Aug 08 08:49:47 2006 +0200
@@ -6,7 +6,7 @@
 @license: GNU GPL, see COPYING for details.
 """
 
-from MoinMoin.multiconfig import DefaultConfig
+from MoinMoin.config.multiconfig import DefaultConfig
 
 
 class Config(DefaultConfig):