changeset 3931:9c2fb9246e66

merged main
author Reimar Bauer <rb.proj AT googlemail DOT com>
date Sun, 03 Aug 2008 22:58:40 +0200
parents db3edaef0ba1 (current diff) 15f2aaa2be01 (diff)
children 2bcd23157684 529b71b82fdc
files
diffstat 10 files changed, 4262 insertions(+), 11 deletions(-) [+]
line wrap: on
line diff
--- a/MoinMoin/macro/EditTemplates.py	Sun Aug 03 22:57:44 2008 +0200
+++ b/MoinMoin/macro/EditTemplates.py	Sun Aug 03 22:58:40 2008 +0200
@@ -1,12 +1,14 @@
 # -*- coding: iso-8859-1 -*-
 """
-    MoinMoin - Create an action link
+    MoinMoin - Create a list of currentpage?action=edit&template=X links
+    for all available templates X. Used by MissingPage.
 
     @copyright: 2004 Johannes Berg <johannes@sipsolutions.de>
     @license: GNU GPL, see COPYING for details.
 """
 
 Dependencies = ["language"]
+
 def macro_EditTemplates(macro):
     result = ''
     # we don't want to spend much CPU for spiders requesting nonexisting pages
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/script/migration/1059997.py	Sun Aug 03 22:58:40 2008 +0200
@@ -0,0 +1,25 @@
+#!/usr/bin/env python
+# -*- coding: iso-8859-1 -*-
+"""
+    MoinMoin - 1st pass of 1.6a to 1.6 migration
+
+    Note: this is a special hack for some users of a early 1.6 alpha version,
+          this code is skipped and NOT executed in a normal release-to-release
+          migration (like when going from 1.5.x release to 1.6.0 release).
+          
+          If you run this early 1.6alpha code (with different link markup than
+          1.5.x AND 1.6.x release has), you need to manually put 1059997 into
+          your data/meta file to have this special code executed.
+
+    @copyright: 2008 by Thomas Waldmann
+    @license: GNU GPL, see COPYING for details.
+"""
+
+from _conv160a import DataConverter
+
+def execute(script, data_dir, rev):
+    # the first pass just creates <data_dir>/rename1.txt
+    dc = DataConverter(script.request, data_dir, None)
+    dc.pass1()
+    return 1059998
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/script/migration/1059998.py	Sun Aug 03 22:58:40 2008 +0200
@@ -0,0 +1,45 @@
+#!/usr/bin/env python
+# -*- coding: iso-8859-1 -*-
+"""
+    MoinMoin - 2nd pass of 1.6 migration
+
+    @copyright: 2007 by Thomas Waldmann
+    @license: GNU GPL, see COPYING for details.
+"""
+
+import os, shutil
+
+from _conv160a import DataConverter
+
+def execute(script, data_dir, rev):
+    rename1_map = os.path.join(data_dir, 'rename1.txt')
+    rename2_map = os.path.join(data_dir, 'rename2.txt')
+    fieldsep = DataConverter.LIST_FIELDSEP
+    if fieldsep == u'\t':
+        fieldsep = u'TAB'
+    if not os.path.exists(rename2_map):
+        print "You must first edit %s." % rename1_map
+        print "For editing it, please use an editor that is able to edit UTF-8 encoded files."
+        print "Carefully edit - the fields are separated by a %s char, do not change this!" % fieldsep
+        print "Entries in this file look like:"
+        print "PAGE OLDPAGENAME NEWPAGENAME"
+        print "FILE OLDPAGENAME OLDFILENAME NEWFILENAME"
+        print "You may ONLY edit the rightmost field (the new name - in case you want to rename the page or file)."
+        print
+        print "After you have finished editing, rename the file to %s and re-issue the moin migrate command." % rename2_map
+        return None # terminate here
+    # the second pass does the conversion, reading <data_dir>/rename2.txt
+    src_data_dir = os.path.abspath(os.path.join(data_dir, '..', 'data.pre160')) # keep the orig data_dir here
+    dst_data_dir = data_dir
+    shutil.move(data_dir, src_data_dir)
+    # the 1.5 parser checks page existance, so we must use the orig, fully populated dir:
+    saved_data_dir = script.request.cfg.data_dir
+    script.request.cfg.data_dir = src_data_dir
+    os.mkdir(dst_data_dir)
+    shutil.move(os.path.join(src_data_dir, 'cache'), os.path.join(dst_data_dir, 'cache')) # mig script has locks there
+    dc = DataConverter(script.request, src_data_dir, dst_data_dir)
+    dc.pass2()
+    # restore correct data dir:
+    script.request.cfg.data_dir = saved_data_dir
+    return 1060000
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/script/migration/_conv160a.py	Sun Aug 03 22:58:40 2008 +0200
@@ -0,0 +1,567 @@
+# -*- coding: iso-8859-1 -*-
+"""
+    MoinMoin - migration from 1.6.0alpha (rev 1844: 58ebb64243cc - used a similar markup as 1.5.8, but with quotes for linking stuff with blanks) to 1.6.0 (creole link style)
+
+    What it does:
+
+    a) reverse underscore == blank stuff in pagenames (introducing this was a fault)
+
+                   pagename            quoted pagename
+       -----------------------------------------------------
+       old         MainPage/Sub_Page   MainPage(2f)Sub_Page
+       new         MainPage/Sub Page   MainPage(2f)Sub(20)Page    or
+       new         MainPage/Sub_Page   MainPage(2f)Sub_Page       (user has to decide by editing rename1.txt)
+
+
+                   markup
+       ----------------------------------------------------
+       old         MoinMoin:MainPage/Sub_Page      ../Sub_Page2
+       new         [[MoinMoin:MainPage/Sub Page]]  [[../Sub Page2]]
+
+
+    b) decode url encoded chars in attachment names (and quote the whole fname):
+
+                   markup
+       ----------------------------------------------------
+       old         attachment:file%20with%20blanks.txt
+       new         [[attachment:file with blanks.txt]]
+
+    c) users: move bookmarks from separate files into user profile
+    d) users: generate new name[] for lists and name{} for dicts
+
+    e) kill all */MoinEditorBackup pages (replaced by drafts functionality)
+
+    @copyright: 2007 by Thomas Waldmann
+    @license: GNU GPL, see COPYING for details.
+"""
+
+import os.path
+import re
+import time
+import codecs, urllib, glob
+
+from MoinMoin import config, wikiutil
+from MoinMoin.script.migration.migutil import opj, listdir, copy_file, move_file, copy_dir
+
+import mimetypes # this MUST be after wikiutil import!
+
+from _conv160b_wiki import convert_wiki
+
+create_rev = True # create a <new> rev with the converted content of <new-1> rev?
+
+def markup_converter(request, pagename, text, renames):
+    """ Convert the <text> content of page <pagename>, using <renames> dict
+        to rename links correctly. Additionally, convert some changed markup.
+    """
+    if text.startswith('<?xml'):
+        # would be done with xslt processor
+        return text
+
+    pis, body = wikiutil.get_processing_instructions(text)
+    for pi, val in pis:
+        if pi == 'format' and val != 'wiki':
+            # not wiki page
+            return text
+
+    text = convert_wiki(request, pagename, text, renames)
+    return text
+
+
+class EventLog:
+    def __init__(self, request, fname):
+        self.request = request
+        self.fname = fname
+        self.data = None
+        self.renames = {}
+
+    def read(self):
+        """ read complete event-log from disk """
+        data = []
+        try:
+            lineno = 0
+            f = file(self.fname, 'r')
+            for line in f:
+                lineno += 1
+                line = line.replace('\r', '').replace('\n', '')
+                if not line.strip(): # skip empty lines
+                    continue
+                fields = line.split('\t')
+                try:
+                    timestamp, action, kvpairs = fields[:3]
+                    timestamp = int(timestamp)
+                    kvdict = wikiutil.parseQueryString(kvpairs)
+                    data.append((timestamp, action, kvdict))
+                except ValueError, err:
+                    # corrupt event log line, log error and skip it
+                    print "Error: invalid event log (%s) line %d, err: %s, SKIPPING THIS LINE!" % (self.fname, lineno, str(err))
+            f.close()
+        except IOError, err:
+            # no event-log
+            pass
+        self.data = data
+
+    def write(self, fname):
+        """ write complete event-log to disk """
+        if self.data:
+            f = file(fname, 'w')
+            for timestamp, action, kvdict in self.data:
+                pagename = kvdict.get('pagename')
+                if pagename and ('PAGE', pagename) in self.renames:
+                    kvdict['pagename'] = self.renames[('PAGE', pagename)]
+                kvpairs = wikiutil.makeQueryString(kvdict, want_unicode=False)
+                fields = str(timestamp), action, kvpairs
+                line = '\t'.join(fields) + '\n'
+                f.write(line)
+            f.close()
+
+    def copy(self, destfname, renames):
+        self.renames = renames
+        self.read()
+        self.write(destfname)
+
+
+class EditLog:
+    def __init__(self, request, fname):
+        self.request = request
+        self.fname = fname
+        self.data = None
+        self.renames = {}
+
+    def read(self):
+        """ read complete edit-log from disk """
+        data = {}
+        try:
+            f = file(self.fname, 'r')
+            for line in f:
+                line = line.replace('\r', '').replace('\n', '')
+                if not line.strip(): # skip empty lines
+                    continue
+                fields = line.split('\t') + [''] * 9
+                timestamp, rev, action, pagename, ip, hostname, userid, extra, comment = fields[:9]
+                timestamp = int(timestamp)
+                rev = int(rev)
+                pagename = wikiutil.unquoteWikiname(pagename)
+                data[(timestamp, rev, pagename)] = (timestamp, rev, action, pagename, ip, hostname, userid, extra, comment)
+            f.close()
+        except IOError, err:
+            # no edit-log
+            pass
+        self.data = data
+
+    def write(self, fname, deleted=False):
+        """ write complete edit-log to disk """
+        if self.data:
+            editlog = self.data.items()
+            editlog.sort()
+            f = file(fname, "w")
+            max_rev = 0
+            for key, fields in editlog:
+                timestamp, rev, action, pagename, ip, hostname, userid, extra, comment = fields
+                if action.startswith('ATT'):
+                    try:
+                        fname = urllib.unquote(extra).decode('utf-8')
+                    except UnicodeDecodeError:
+                        fname = urllib.unquote(extra).decode('iso-8859-1')
+                    if ('FILE', pagename, fname) in self.renames:
+                        fname = self.renames[('FILE', pagename, fname)]
+                    extra = urllib.quote(fname.encode('utf-8'))
+                if ('PAGE', pagename) in self.renames:
+                    pagename = self.renames[('PAGE', pagename)]
+                timestamp = str(timestamp)
+                if rev != 99999999:
+                    max_rev = max(rev, max_rev)
+                revstr = '%08d' % rev
+                pagename = wikiutil.quoteWikinameFS(pagename)
+                fields = timestamp, revstr, action, pagename, ip, hostname, userid, extra, comment
+                log_str = '\t'.join(fields) + '\n'
+                f.write(log_str)
+            if create_rev and not deleted:
+                timestamp = str(wikiutil.timestamp2version(time.time()))
+                revstr = '%08d' % (max_rev + 1)
+                action = 'SAVE'
+                ip = '127.0.0.1'
+                hostname = 'localhost'
+                userid = ''
+                extra = ''
+                comment = "converted to 1.6 markup"
+                fields = timestamp, revstr, action, pagename, ip, hostname, userid, extra, comment
+                log_str = '\t'.join(fields) + '\n'
+                f.write(log_str)
+            f.close()
+
+    def copy(self, destfname, renames, deleted=False):
+        self.renames = renames
+        self.read()
+        self.write(destfname, deleted)
+
+
+class PageRev:
+    """ a single revision of a page """
+    def __init__(self, request, pagename, rev_dir, rev):
+        self.request = request
+        self.pagename = pagename
+        self.rev_dir = rev_dir
+        self.rev = rev
+
+    def read(self):
+        fname = opj(self.rev_dir, '%08d' % self.rev)
+        f = file(fname, "rb")
+        data = f.read()
+        f.close()
+        data = data.decode(config.charset)
+        return data
+
+    def write(self, data, rev_dir, convert, rev=None):
+        if rev is None:
+            rev = self.rev
+        if convert:
+            data = markup_converter(self.request, self.pagename, data, self.renames)
+        fname = opj(rev_dir, '%08d' % rev)
+        data = data.encode(config.charset)
+        f = file(fname, "wb")
+        f.write(data)
+        f.close()
+
+    def copy(self, rev_dir, renames, convert=False, new_rev=None):
+        self.renames = renames
+        data = self.read()
+        self.write(data, rev_dir, convert, new_rev)
+
+
+class Attachment:
+    """ a single attachment """
+    def __init__(self, request, attach_dir, attfile):
+        self.request = request
+        self.path = opj(attach_dir, attfile)
+        self.name = attfile.decode('utf-8', 'replace')
+
+    def copy(self, attach_dir):
+        """ copy attachment file from orig path to new destination """
+        attfile = self.name.encode('utf-8')
+        dest = opj(attach_dir, attfile)
+        copy_file(self.path, dest)
+
+
+class Page:
+    """ represents a page with all related data """
+    def __init__(self, request, pages_dir, qpagename):
+        self.request = request
+        self.name = wikiutil.unquoteWikiname(qpagename)
+        self.name_old = self.name # renaming: still original name when self.name has the new name
+        self.page_dir = opj(pages_dir, qpagename)
+        self.current = None # int current
+        self.editlog = None # dict (see read_editlog)
+        self.revlist = None # list of ints (page text revisions)
+        self.revisions = None # dict int: pagerev obj
+        self.attachments = None # dict of unicode fname: full path
+        self.renames = {} # info for renaming pages/attachments
+
+    def read(self):
+        """ read a page, including revisions, log, attachments from disk """
+        page_dir = self.page_dir
+        # read current file
+        current_fname = opj(page_dir, 'current')
+        if os.path.exists(current_fname):
+            current_file = file(current_fname, "r")
+            current_rev = current_file.read()
+            current_file.close()
+            try:
+                self.current = int(current_rev)
+            except ValueError:
+                print "Error: invalid current file %s, SKIPPING THIS PAGE!" % current_fname
+                return
+        # read edit-log
+        editlog_fname = opj(page_dir, 'edit-log')
+        if os.path.exists(editlog_fname):
+            self.editlog = EditLog(self.request, editlog_fname)
+        # read page revisions
+        rev_dir = opj(page_dir, 'revisions')
+        if os.path.exists(rev_dir):
+            revlist = listdir(rev_dir)
+            revlist = [int(rev) for rev in revlist]
+            revlist.sort()
+            self.revlist = revlist
+            self.revisions = {}
+            for rev in revlist:
+                self.revisions[rev] = PageRev(self.request, self.name_old, rev_dir, rev)
+        # set deleted status
+        self.is_deleted = not self.revisions or self.current not in self.revisions
+        # read attachment filenames
+        attach_dir = opj(page_dir, 'attachments')
+        if os.path.exists(attach_dir):
+            self.attachments = {}
+            attlist = listdir(attach_dir)
+            for attfile in attlist:
+                a = Attachment(self.request, attach_dir, attfile)
+                self.attachments[a.name] = a
+
+    def write(self, pages_dir):
+        """ write a page, including revisions, log, attachments to disk """
+        if ('PAGE', self.name) in self.renames:
+            name_new = self.renames[('PAGE', self.name)]
+            if name_new != self.name:
+                print "Renaming page %r -> %r" % (self.name, name_new)
+                self.name_old = self.name
+                self.name = name_new
+        qpagename = wikiutil.quoteWikinameFS(self.name)
+        page_dir = opj(pages_dir, qpagename)
+        os.makedirs(page_dir)
+        # write current file
+        current = self.current
+        if current is not None:
+            if create_rev and not self.is_deleted:
+                current += 1
+            current_fname = opj(page_dir, 'current')
+            current_file = file(current_fname, "w")
+            current_str = '%08d\n' % current
+            current_file.write(current_str)
+            current_file.close()
+        # copy edit-log
+        if self.editlog is not None:
+            editlog_fname = opj(page_dir, 'edit-log')
+            self.editlog.copy(editlog_fname, self.renames, deleted=self.is_deleted)
+        # copy page revisions
+        if self.revisions is not None:
+            rev_dir = opj(page_dir, 'revisions')
+            os.makedirs(rev_dir)
+            for rev in self.revlist:
+                if create_rev:
+                    self.revisions[rev].copy(rev_dir, self.renames)
+                else:
+                    if int(rev) == self.current:
+                        self.revisions[rev].copy(rev_dir, self.renames, convert=True)
+                    else:
+                        self.revisions[rev].copy(rev_dir, self.renames)
+            if create_rev and not self.is_deleted:
+                self.revisions[rev].copy(rev_dir, self.renames, convert=True, new_rev=rev+1)
+
+        # copy attachments
+        if self.attachments is not None:
+            attach_dir = opj(page_dir, 'attachments')
+            os.makedirs(attach_dir)
+            for fn, att in self.attachments.items():
+                # we have to check for renames here because we need the (old) pagename, too:
+                if ('FILE', self.name_old, fn) in self.renames:
+                    fn_new = self.renames[('FILE', self.name_old, fn)]
+                    if fn_new != fn:
+                        print "Renaming file %r %r -> %r" % (self.name_old, fn, fn_new)
+                        att.name = fn_new
+                att.copy(attach_dir)
+
+    def copy(self, pages_dir, renames):
+        self.renames = renames
+        self.read()
+        self.write(pages_dir)
+
+
+class User:
+    """ represents a user with all related data """
+    def __init__(self, request, users_dir, uid):
+        self.request = request
+        self.uid = uid
+        self.users_dir = users_dir
+        self.profile = None
+        self.bookmarks = None
+
+    def read(self):
+        """ read profile and bookmarks data from disk """
+        self.profile = {}
+        fname = opj(self.users_dir, self.uid)
+        # read user profile
+        f = codecs.open(fname, 'r', config.charset)
+        for line in f:
+            line = line.replace(u'\r', '').replace(u'\n', '')
+            if not line.strip() or line.startswith(u'#'): # skip empty or comment lines
+                continue
+            try:
+                key, value = line.split(u'=', 1)
+            except Exception, err:
+                print "Error: User reader can not parse line %r from profile %r (%s)" % (line, fname, str(err))
+                continue
+            self.profile[key] = value
+        f.close()
+        # read bookmarks
+        self.bookmarks = {}
+        fname_pattern = opj(self.users_dir, "%s.*.bookmark" % self.uid)
+        for fname in glob.glob(fname_pattern):
+            f = file(fname, "r")
+            bookmark = f.read()
+            f.close()
+            wiki = fname.replace('.bookmark', '').replace(opj(self.users_dir, self.uid+'.'), '')
+            self.bookmarks[wiki] = int(bookmark)
+        # don't care about trail
+
+    def write(self, users_dir):
+        """ write profile and bookmarks data to disk """
+        fname = opj(users_dir, self.uid)
+        f = codecs.open(fname, 'w', config.charset)
+        for key, value in self.profile.items():
+            if key in (u'subscribed_pages', u'quicklinks'):
+                pages = value.split(u'\t')
+                for i in range(len(pages)):
+                    pagename = pages[i]
+                    try:
+                        interwiki, pagename = pagename.split(u':', 1)
+                    except:
+                        interwiki, pagename = u'Self', pagename
+                    if interwiki == u'Self' or interwiki == self.request.cfg.interwikiname:
+                        if ('PAGE', pagename) in self.renames:
+                            pagename = self.renames[('PAGE', pagename)]
+                            pages[i] = u'%s:%s' % (interwiki, pagename)
+                key += '[]' # we have lists here
+                value = u'\t'.join(pages)
+                f.write(u"%s=%s\n" % (key, value))
+            else:
+                f.write(u"%s=%s\n" % (key, value))
+        bookmark_entries = [u'%s:%s' % item for item in self.bookmarks.items()]
+        key = u"bookmarks{}"
+        value = u'\t'.join(bookmark_entries)
+        f.write(u"%s=%s\n" % (key, value))
+        f.close()
+        # don't care about trail
+
+    def copy(self, users_dir, renames):
+        self.renames = renames
+        self.read()
+        self.write(users_dir)
+
+
+class DataConverter(object):
+    def __init__(self, request, src_data_dir, dest_data_dir):
+        self.request = request
+        self.sdata = src_data_dir
+        self.ddata = dest_data_dir
+        self.pages = {}
+        self.users = {}
+        self.complete = {}
+        self.renames = {}
+        self.complete_fname = opj(self.sdata, 'complete.txt')
+        self.rename_fname1 = opj(self.sdata, 'rename1.txt')
+        self.rename_fname2 = opj(self.sdata, 'rename2.txt')
+
+    def pass1(self):
+        """ First create the rename list - the user has to review/edit it as
+            we can't decide about page/attachment names automatically.
+        """
+        self.read_src()
+        # pages
+        for pn, p in self.pages.items():
+            p.read()
+            if not p.revisions:
+                continue # we don't care for pages with no revisions (trash)
+            if pn.endswith('/MoinEditorBackup'):
+                continue # we don't care for old editor backups
+            self.complete[('PAGE', pn)] = None
+            if "_" in pn:
+                # log all pagenames with underscores
+                self.renames[('PAGE', pn)] = None
+            if p.attachments is not None:
+                for fn in p.attachments:
+                    try:
+                        fn_str = fn.encode('ascii')
+                        log = False # pure ascii filenames are no problem
+                    except UnicodeEncodeError:
+                        log = True # this file maybe has a strange representation in wiki markup
+                    else:
+                        if ' ' in fn_str or '%' in fn_str: # files with blanks need quoting
+                            log = True
+                    self.complete[('FILE', pn, fn)] = None
+                    if log:
+                        # log all strange attachment filenames
+                        fn_str = fn.encode('utf-8')
+                        self.renames[('FILE', pn, fn)] = None
+        self.save_list(self.complete_fname, self.complete)
+        self.save_list(self.rename_fname1, self.renames)
+
+    LIST_FIELDSEP = u'|' # in case | makes trouble, one can use \t tab char
+
+    def save_list(self, fname, what):
+        what_sorted = what.keys()
+        # make sure we have 3-tuples:
+        what_sorted = [(k + (None, ))[:3] for k in what_sorted]
+        # we only have python 2.3, thus no cmp keyword for the sort() call,
+        # thus we need to do it the more complicated way:
+        what_sorted = [(pn, fn, rtype) for rtype, pn, fn in what_sorted] # shuffle
+        what_sorted.sort() # sort
+        what_sorted = [(rtype, pn, fn) for pn, fn, rtype in what_sorted] # shuffle
+        f = codecs.open(fname, 'w', 'utf-8')
+        for rtype, pn, fn in what_sorted:
+            if rtype == 'PAGE':
+                line = (rtype, pn, pn)
+            elif rtype == 'FILE':
+                line = (rtype, pn, fn, fn)
+            line = self.LIST_FIELDSEP.join(line)
+            f.write(line + u'\n')
+        f.close()
+
+    def load_list(self, fname, what):
+        f = codecs.open(fname, 'r', 'utf-8')
+        for line in f:
+            line = line.rstrip()
+            if not line:
+                continue
+            t = line.split(self.LIST_FIELDSEP)
+            rtype, p1, p2, p3 = (t + [None]*3)[:4]
+            if rtype == u'PAGE':
+                what[(str(rtype), p1)] = p2
+            elif rtype == u'FILE':
+                what[(str(rtype), p1, p2)] = p3
+        f.close()
+
+    def pass2(self):
+        """ Second, read the (user edited) rename list and do the renamings everywhere. """
+        self.read_src()
+        #self.load_list(self.complete_fname, self.complete)
+        self.load_list(self.rename_fname2, self.renames)
+        self.write_dest()
+
+    def read_src(self):
+        # create Page objects in memory
+        pages_dir = opj(self.sdata, 'pages')
+        pagelist = listdir(pages_dir)
+        for qpagename in pagelist:
+            p = Page(self.request, pages_dir, qpagename)
+            self.pages[p.name] = p
+
+        # create User objects in memory
+        users_dir = opj(self.sdata, 'user')
+        user_re = re.compile(r'^\d+\.\d+(\.\d+)?$')
+        userlist = listdir(users_dir)
+        userlist = [f for f in userlist if user_re.match(f)]
+        for userid in userlist:
+            u = User(self.request, users_dir, userid)
+            self.users[u.uid] = u
+
+        # create log objects in memory
+        self.editlog = EditLog(self.request, opj(self.sdata, 'edit-log'))
+        self.eventlog = EventLog(self.request, opj(self.sdata, 'event-log'))
+
+    def write_dest(self):
+        self.init_dest()
+        # copy pages
+        pages_dir = opj(self.ddata, 'pages')
+        for pn, page in self.pages.items():
+            if pn.endswith('/MoinEditorBackup'):
+                continue # we don't care for old editor backups
+            page.copy(pages_dir, self.renames)
+
+        # copy users
+        users_dir = opj(self.ddata, 'user')
+        for user in self.users.values():
+            user.copy(users_dir, self.renames)
+
+        # copy logs
+        self.editlog.copy(opj(self.ddata, 'edit-log'), self.renames)
+        self.eventlog.copy(opj(self.ddata, 'event-log'), self.renames)
+
+    def init_dest(self):
+        try:
+            os.makedirs(self.ddata)
+        except:
+            pass
+        os.makedirs(opj(self.ddata, 'pages'))
+        os.makedirs(opj(self.ddata, 'user'))
+        copy_dir(opj(self.sdata, 'plugin'), opj(self.ddata, 'plugin'))
+        copy_file(opj(self.sdata, 'intermap.txt'), opj(self.ddata, 'intermap.txt'))
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/script/migration/_conv160a_wiki.py	Sun Aug 03 22:58:40 2008 +0200
@@ -0,0 +1,629 @@
+# -*- coding: iso-8859-1 -*-
+"""
+    MoinMoin - convert content in 1.6.0alpha (rev 1844: 58ebb64243cc) wiki markup to 1.6.0 style
+               by using a modified 1.6.0alpha parser as translator.
+
+    Assuming we have this "renames" map:
+    -------------------------------------------------------
+    'PAGE', 'some_page'        -> 'some page'
+    'FILE', 'with%20blank.txt' -> 'with blank.txt'
+
+    Markup transformations needed:
+    -------------------------------------------------------
+    ["some_page"]           -> [[some page]] # renamed
+    [:some_page:some text]  -> [[some page|some text]]
+    [:page:text]            -> [[page|text]]
+                               (with a page not being renamed)
+
+    attachment:with%20blank.txt -> [[attachment:with blank.txt]]
+    attachment:some_page/with%20blank.txt -> [[attachment:some page/with blank.txt]]
+    The attachment processing should also urllib.unquote the filename (or at
+    least replace %20 by space) and put it into "quotes" if it contains spaces.
+
+    @copyright: 2007 MoinMoin:JohannesBerg,
+                2007 MoinMoin:ThomasWaldmann
+    @license: GNU GPL, see COPYING for details.
+"""
+
+import re
+
+from MoinMoin import i18n
+i18n.wikiLanguages = lambda: {}
+
+from MoinMoin import config, macro, wikiutil
+from MoinMoin.action import AttachFile
+from MoinMoin.Page import Page
+from MoinMoin.support.python_compatibility import rsplit
+
+import wikiutil160a
+from text_moin160a_wiki import Parser
+
+QUOTE_CHARS = u"'\""
+
+def convert_wiki(request, pagename, intext, renames):
+    """ Convert content written in wiki markup """
+    noeol = False
+    if not intext.endswith('\r\n'):
+        intext += '\r\n'
+        noeol = True
+    c = Converter(request, pagename, intext, renames)
+    result = request.redirectedOutput(c.convert, request)
+    if noeol and result.endswith('\r\n'):
+        result = result[:-2]
+    return result
+
+
+STONEAGE_IMAGELINK = False # True for ImageLink(target,image), False for ImageLink(image,target)
+
+# copied from moin 1.6.0 macro/ImageLink.py (to be safe in case we remove ImageLink some day)
+# ... and slightly modified/refactored for our needs here.
+# hint: using parse_quoted_separated from wikiutil does NOT work here, because we do not have
+#       quoted urls when they contain a '=' char in the 1.5 data input.
+def explore_args(args):
+    """ explore args for positional and keyword parameters """
+    if args:
+        args = args.split(',')
+        args = [arg.strip() for arg in args]
+    else:
+        args = []
+
+    kw_count = 0
+    kw = {} # keyword args
+    pp = [] # positional parameters
+
+    kwAllowed = ('width', 'height', 'alt')
+
+    for arg in args:
+        if '=' in arg:
+            key, value = arg.split('=', 1)
+            key_lowerstr = str(key.lower())
+            # avoid that urls with "=" are interpreted as keyword
+            if key_lowerstr in kwAllowed:
+                kw_count += 1
+                kw[key_lowerstr] = value
+            elif not kw_count and '://' in arg:
+                # assuming that this is the image
+                pp.append(arg)
+        else:
+            pp.append(arg)
+
+    if STONEAGE_IMAGELINK and len(pp) >= 2:
+        pp[0], pp[1] = pp[1], pp[0]
+
+    return pp, kw
+
+
+class Converter(Parser):
+    def __init__(self, request, pagename, raw, renames):
+        self.pagename = pagename
+        self.raw = raw
+        self.renames = renames
+        self.request = request
+        self._ = None
+        self.in_pre = 0
+
+        self.formatting_rules = self.formatting_rules % {'macronames': u'|'.join(['ImageLink', ] + macro.getNames(self.request.cfg))}
+
+    # no change
+    def return_word(self, word):
+        return word
+    _emph_repl = return_word
+    _emph_ibb_repl = return_word
+    _emph_ibi_repl = return_word
+    _emph_ib_or_bi_repl = return_word
+    _u_repl = return_word
+    _strike_repl = return_word
+    _sup_repl = return_word
+    _sub_repl = return_word
+    _small_repl = return_word
+    _big_repl = return_word
+    _tt_repl = return_word
+    _tt_bt_repl = return_word
+    _remark_repl = return_word
+    _table_repl = return_word
+    _tableZ_repl = return_word
+    _rule_repl = return_word
+    _smiley_repl = return_word
+    _smileyA_repl = return_word
+    _ent_repl = return_word
+    _ent_numeric_repl = return_word
+    _ent_symbolic_repl = return_word
+    _heading_repl = return_word
+    _email_repl = return_word
+    _notword_repl = return_word
+    _indent_repl = return_word
+    _li_none_repl = return_word
+    _li_repl = return_word
+    _ol_repl = return_word
+    _dl_repl = return_word
+    _comment_repl = return_word
+
+    # translate pagenames using pagename translation map
+
+    def _replace(self, key):
+        """ replace a item_name if it is in the renames dict
+            key is either a 2-tuple ('PAGE', pagename)
+            or a 3-tuple ('FILE', pagename, filename)
+        """
+        current_page = self.pagename
+        item_type, page_name, file_name = (key + (None, ))[:3]
+        abs_page_name = wikiutil.AbsPageName(current_page, page_name)
+        if item_type == 'PAGE':
+            key = (item_type, abs_page_name)
+            new_name = self.renames.get(key)
+            if new_name is None:
+                # we don't have an entry in rename map - apply the same magic
+                # to the page name as 1.5 did (" " -> "_") and try again:
+                abs_magic_name = abs_page_name.replace(u' ', u'_')
+                key = (item_type, abs_magic_name)
+                new_name = self.renames.get(key)
+                if new_name is None:
+                    # we didn't find it under the magic name either -
+                    # that means we do not rename it!
+                    new_name = page_name
+            if new_name != page_name and abs_page_name != page_name:
+                # we have to fix the (absolute) new_name to be a relative name (as it was before)
+                new_name = wikiutil.RelPageName(current_page, new_name)
+        elif item_type == 'FILE':
+            key = (item_type, abs_page_name, file_name)
+            new_name = self.renames.get(key)
+            if new_name is None:
+                # we don't have an entry in rename map - apply the same magic
+                # to the page name as 1.5 did (" " -> "_") and try again:
+                abs_magic_name = abs_page_name.replace(u' ', u'_')
+                key = (item_type, abs_magic_name, file_name)
+                new_name = self.renames.get(key)
+                if new_name is None:
+                    # we didn't find it under the magic name either -
+                    # that means we do not rename it!
+                    new_name = file_name
+        return new_name
+
+    def _replace_target(self, target):
+        target_and_anchor = rsplit(target, '#', 1)
+        if len(target_and_anchor) > 1:
+            target, anchor = target_and_anchor
+            target = self._replace(('PAGE', target))
+            return '%s#%s' % (target, anchor)
+        else:
+            target = self._replace(('PAGE', target))
+            return target
+
+    # markup conversion
+
+    def _macro_repl(self, word):
+        # we use [[...]] for links now, macros will be <<...>>
+        macro_rule = ur"""
+            \[\[
+            (?P<macro_name>\w+)
+            (\((?P<macro_args>.*?)\))?
+            \]\]
+        """
+        word = unicode(word) # XXX why is word not unicode before???
+        m = re.match(macro_rule, word, re.X|re.U)
+        macro_name = m.group('macro_name')
+        macro_args = m.group('macro_args')
+        if macro_name == 'ImageLink':
+            fixed, kw = explore_args(macro_args)
+            #print "macro_args=%r" % macro_args
+            #print "fixed=%r, kw=%r" % (fixed, kw)
+            image, target = (fixed + ['', ''])[:2]
+            if image is None:
+                image = ''
+            if target is None:
+                target = ''
+            if '://' not in image:
+                # if it is not a URL, it is meant as attachment
+                image = u'attachment:%s' % image
+            if not target:
+                target = image
+            elif target.startswith('inline:'):
+                target = 'attachment:' + target[7:] # we don't support inline:
+            elif target.startswith('wiki:'):
+                target = target[5:] # drop wiki:
+            image_attrs = []
+            alt = kw.get('alt') or ''
+            width = kw.get('width')
+            if width is not None:
+                image_attrs.append(u"width=%s" % width)
+            height = kw.get('height')
+            if height is not None:
+                image_attrs.append(u"height=%s" % height)
+            image_attrs = u", ".join(image_attrs)
+            if image_attrs:
+                image_attrs = u'|' + image_attrs
+            if alt or image_attrs:
+                alt = u'|' + alt
+            result = u'[[%s|{{%s%s%s}}]]' % (target, image, alt, image_attrs)
+        else:
+            if macro_args:
+                macro_args = u"(%s)" % macro_args
+            else:
+                macro_args = u''
+            result = u"<<%s%s>>" % (macro_name, macro_args)
+        # XXX later check whether some to be renamed pagename is used as macro param
+        return result
+
+    def _word_repl(self, word, text=None):
+        """Handle WikiNames."""
+        if not text:
+            if wikiutil.isStrictWikiname(word):
+                return word
+            else:
+                return '[[%s]]' % word
+        else: # internal use:
+            return '[[%s|%s]]' % (word, text)
+
+    def _wikiname_bracket_repl(self, text):
+        """Handle special-char wikinames with link text, like:
+           ["Jim O'Brian" Jim's home page] or ['Hello "world"!' a page with doublequotes]
+        """
+        word = text[1:-1] # strip brackets
+        first_char = word[0]
+        if first_char in QUOTE_CHARS:
+            # split on closing quote
+            target, linktext = word[1:].split(first_char, 1)
+        else: # not quoted
+            # split on whitespace
+            target, linktext = word.split(None, 1)
+        if target:
+            target = self._replace(('PAGE', target))
+            linktext = linktext.strip()
+            if linktext and linktext != target:
+                return '[[%s|%s]]' % (target, linktext)
+            else:
+                return '[[%s]]' % target
+        else:
+            return text
+
+    ''' old:
+    def _interwiki_repl(self, word):
+        """Handle InterWiki links."""
+        wikitag, wikiurl, wikitail, wikitag_bad = wikiutil.resolve_wiki(self.request, word)
+        if wikitag_bad:
+            return word
+        else:
+            wikiname, pagename = word.split(':', 1)
+            pagename = wikiutil.url_unquote(pagename) # maybe someone has used %20 for blanks in pagename
+            camelcase = wikiutil.isStrictWikiname(pagename)
+            if wikiname in ('Self', self.request.cfg.interwikiname):
+                pagename = self._replace(('PAGE', pagename))
+                if camelcase:
+                    return '%s' % pagename # optimize special case
+                else:
+                    return '[[%s]]' % pagename # optimize special case
+            else:
+                if ' ' in pagename: # we could get a ' '  by urlunquoting
+                    return '[[%s:%s]]' % (wikiname, pagename)
+                else:
+                    return '%s:%s' % (wikiname, pagename)
+    '''
+
+    def _interwiki_repl(self, word):
+        """Handle InterWiki links."""
+        wikitag, wikiurl, wikitail, wikitag_bad = wikiutil.resolve_wiki(self.request, word)
+        if wikitag_bad:
+            return word
+        else:
+            return self.interwiki("wiki:" + word)
+
+    def interwiki(self, target_and_text, **kw):
+        scheme, rest = target_and_text.split(':', 1)
+        wikiname, pagename, text = wikiutil160a.split_wiki(rest)
+        if text:
+            text = '|' + text
+
+        if (pagename.startswith(wikiutil.CHILD_PREFIX) or # fancy link to subpage [wiki:/SubPage text]
+            Page(self.request, pagename).exists()): # fancy link to local page [wiki:LocalPage text]
+            pagename = wikiutil.url_unquote(pagename)
+            pagename = self._replace_target(pagename)
+            return '[[%s%s]]' % (pagename, text)
+
+        if wikiname in ('Self', self.request.cfg.interwikiname, ''): # [wiki:Self:LocalPage text] or [:LocalPage:text]
+            pagename = wikiutil.url_unquote(pagename)
+            pagename = self._replace_target(pagename)
+            camelcase = wikiutil.isStrictWikiname(pagename)
+            if camelcase and text == pagename:
+                return '%s' % pagename # optimize special case
+            else:
+                return '[[%s%s]]' % (pagename, text)
+
+        wikitag, wikiurl, wikitail, wikitag_bad = wikiutil.resolve_wiki(self.request, wikiname+':')
+        if wikitag_bad: # likely we got some /InterWiki as wikitail, we don't want that!
+            pagename = wikiutil.url_unquote(pagename)
+            pagename = self._replace_target(pagename)
+            wikitail = pagename
+        else: # good
+            wikitail = wikiutil.url_unquote(pagename)
+
+        # link to self?
+        if wikiutil.isPicture(wikitail):
+            return '{{%s:%s%s}}' % (wikitag, wikitail, text)
+        else:
+            if ' ' not in wikitail and not text:
+                return '%s:%s' % (wikitag, wikitail)
+            else:
+                return '[[%s:%s%s]]' % (wikitag, wikitail, text)
+
+    ''' old:
+    def interwiki(self, url_and_text):
+        # keep track of whether this is a self-reference, so links
+        # are always shown even the page doesn't exist.
+        wikiname, pagename = wikiutil.split_wiki(url)
+    '''
+    '''
+    def attachment(self, url_and_text):
+        """ This gets called on attachment URLs. """
+        if len(url_and_text) == 1:
+            url = url_and_text[0]
+            text = ''
+        else:
+            url, text = url_and_text
+            text = '|' + text
+
+        scheme, fname = url.split(":", 1)
+        #scheme, fname, text = wikiutil.split_wiki(target_and_text)
+
+        pagename, fname = AttachFile.absoluteName(fname, self.pagename)
+        from_this_page = pagename == self.pagename
+        fname = self._replace(('FILE', pagename, fname))
+        fname = wikiutil.url_unquote(fname, want_unicode=True)
+        fname = self._replace(('FILE', pagename, fname))
+        pagename = self._replace(('PAGE', pagename))
+        if from_this_page:
+            name = fname
+        else:
+            name = "%s/%s" % (pagename, fname)
+
+        if scheme == 'drawing':
+            return "{{drawing:%s%s}}" % (name, text)
+
+        # check for image URL, and possibly return IMG tag
+        # (images are always inlined, just like for other URLs)
+        if wikiutil.isPicture(name):
+            return "{{attachment:%s%s}}" % (name, text)
+
+        # inline the attachment
+        if scheme == 'inline':
+            return '{{attachment:%s%s}}' % (name, text)
+        else: # 'attachment'
+            return '[[attachment:%s%s]]' % (name, text)
+    '''
+
+    def attachment(self, target_and_text, **kw):
+        """ This gets called on attachment URLs """
+        _ = self._
+        scheme, fname, text = wikiutil160a.split_wiki(target_and_text)
+        fn_txt = fname
+        if text:
+            fn_txt += '|' + text
+
+        if scheme == 'drawing':
+            return "{{drawing:%s}}" % fn_txt
+
+        # check for image, and possibly return IMG tag (images are always inlined)
+        if not kw.get('pretty_url', 0) and wikiutil.isPicture(fname):
+            return "{{attachment:%s}}" % fn_txt
+
+        # inline the attachment
+        if scheme == 'inline':
+            return '{{attachment:%s}}' % fn_txt
+
+        return '[[attachment:%s]]' % fn_txt
+
+    def _url_repl(self, word):
+        """Handle literal URLs including inline images."""
+        scheme = word.split(":", 1)[0]
+
+        if scheme == 'wiki':
+            return self.interwiki(word)
+        if scheme in self.attachment_schemas:
+            return '%s' % self.attachment(word)
+
+        if wikiutil.isPicture(word): # magic will go away in 1.6!
+            return '{{%s}}' % word # new markup for inline images
+        else:
+            return word
+
+
+    def _url_bracket_repl(self, word):
+        """Handle bracketed URLs."""
+        word = word[1:-1] # strip brackets
+
+        # Local extended link? [:page name:link text] XXX DEPRECATED
+        if word[0] == ':':
+            words = word[1:].split(':', 1)
+            pagename = self._replace(('PAGE', words[0]))
+            if len(words) == 1 or len(words) == 2 and not words[1]:
+                return '[[%s]]' % (pagename, )
+            else:
+                return '[[%s|%s]]' % (pagename, words[1])
+
+        scheme_and_rest = word.split(":", 1)
+        if len(scheme_and_rest) == 1: # no scheme
+            # Traditional split on space
+            words = word.split(None, 1)
+            if len(words) == 1:
+                words = words * 2
+
+            if words[0].startswith('#'): # anchor link
+                if words[0] == words[1]:
+                    return '[[%s]]' % words[0]
+                else:
+                    return '[[%s|%s]]' % tuple(words)
+        else:
+            scheme, rest = scheme_and_rest
+            if scheme == "wiki":
+                return self.interwiki(word, pretty_url=1)
+            if scheme in self.attachment_schemas:
+                return self.attachment(word)
+
+            words = word.split(None, 1)
+            if len(words) == 1:
+                words = words * 2
+
+        target, text = words
+        if wikiutil.isPicture(text) and re.match(self.url_rule, text):
+            return '[[%s|{{%s}}]]' % (target, text)
+        else:
+            if target == text:
+                return '[[%s]]' % target
+            else:
+                return '[[%s|%s]]' % (target, text)
+
+
+    '''
+    def _url_bracket_repl(self, word):
+        """Handle bracketed URLs."""
+        word = word[1:-1] # strip brackets
+
+        # Local extended link?
+        if word[0] == ':':
+            words = word[1:].split(':', 1)
+            link, text = (words + ['', ''])[:2]
+            if link.strip() == text.strip():
+                text = ''
+            link = self._replace_target(link)
+            if text:
+                text = '|' + text
+            return '[[%s%s]]' % (link, text)
+
+        # Traditional split on space
+        words = word.split(None, 1)
+        if words[0][0] == '#':
+            # anchor link
+            link, text = (words + ['', ''])[:2]
+            if link.strip() == text.strip():
+                text = ''
+            #link = self._replace_target(link)
+            if text:
+                text = '|' + text
+            return '[[%s%s]]' % (link, text)
+
+        scheme = words[0].split(":", 1)[0]
+        if scheme == "wiki":
+            return self.interwiki(words)
+            #scheme, wikiname, pagename, text = self.interwiki(word)
+            #print "%r %r %r %r" % (scheme, wikiname, pagename, text)
+            #if wikiname in ('Self', self.request.cfg.interwikiname, ''):
+            #    if text:
+            #        text = '|' + text
+            #    return '[[%s%s]]' % (pagename, text)
+            #else:
+            #    if text:
+            #        text = '|' + text
+            #    return "[[%s:%s%s]]" % (wikiname, pagename, text)
+        if scheme in self.attachment_schemas:
+            m = self.attachment(words)
+            if m.startswith('{{') and m.endswith('}}'):
+                # with url_bracket markup, 1.5.8 parser does not embed, but link!
+                m = '[[%s]]' % m[2:-2]
+            return m
+
+        target, desc = (words + ['', ''])[:2]
+        if wikiutil.isPicture(desc) and re.match(self.url_rule, desc):
+            #return '[[%s|{{%s|%s}}]]' % (words[0], words[1], words[0])
+            return '[[%s|{{%s}}]]' % (target, desc)
+        else:
+            if desc:
+                desc = '|' + desc
+            return '[[%s%s]]' % (target, desc)
+    '''
+
+    def _pre_repl(self, word):
+        w = word.strip()
+        if w == '{{{' and not self.in_pre:
+            self.in_pre = True
+        elif w == '}}}' and self.in_pre:
+            self.in_pre = False
+        return word
+
+    def _processor_repl(self, word):
+        self.in_pre = True
+        return word
+
+    def scan(self, scan_re, line):
+        """ Scans one line - append text before match, invoke replace() with match, and add text after match.  """
+        result = []
+        lastpos = 0
+
+        for match in scan_re.finditer(line):
+            # Add text before the match
+            if lastpos < match.start():
+                result.append(line[lastpos:match.start()])
+            # Replace match with markup
+            result.append(self.replace(match))
+            lastpos = match.end()
+
+        # Add remainder of the line
+        result.append(line[lastpos:])
+        return u''.join(result)
+
+
+    def replace(self, match):
+        """ Replace match using type name """
+        result = []
+        for _type, hit in match.groupdict().items():
+            if hit is not None and not _type in ["hmarker", ]:
+                # Get replace method and replace hit
+                replace = getattr(self, '_' + _type + '_repl')
+                # print _type, hit
+                result.append(replace(hit))
+                return ''.join(result)
+        else:
+            # We should never get here
+            import pprint
+            raise Exception("Can't handle match %r\n%s\n%s" % (
+                match,
+                pprint.pformat(match.groupdict()),
+                pprint.pformat(match.groups()),
+            ))
+
+        return ""
+
+    def convert(self, request):
+        """ For each line, scan through looking for magic
+            strings, outputting verbatim any intervening text.
+        """
+        self.request = request
+        # prepare regex patterns
+        rules = self.formatting_rules.replace('\n', '|')
+        if self.request.cfg.bang_meta:
+            rules = ur'(?P<notword>!%(word_rule)s)|%(rules)s' % {
+                'word_rule': self.word_rule,
+                'rules': rules,
+            }
+        pre_rules = r'''(?P<pre>\}\}\})'''
+        pre_scan_re = re.compile(pre_rules, re.UNICODE)
+        scan_re = re.compile(rules, re.UNICODE)
+        eol_re = re.compile(r'\r?\n', re.UNICODE)
+
+        rawtext = self.raw
+
+        # remove last item because it's guaranteed to be empty
+        self.lines = eol_re.split(rawtext)[:-1]
+        self.in_processing_instructions = True
+
+        # Main loop
+        for line in self.lines:
+            # ignore processing instructions
+            if self.in_processing_instructions:
+                found = False
+                for pi in ("##", "#format", "#refresh", "#redirect", "#deprecated",
+                           "#pragma", "#form", "#acl", "#language"):
+                    if line.lower().startswith(pi):
+                        self.request.write(line + '\r\n')
+                        found = True
+                        break
+                if not found:
+                    self.in_processing_instructions = False
+                else:
+                    continue # do not parse this line
+            if not line.strip():
+                self.request.write(line + '\r\n')
+            else:
+                # Scan line, format and write
+                scanning_re = self.in_pre and pre_scan_re or scan_re
+                formatted_line = self.scan(scanning_re, line)
+                self.request.write(formatted_line + '\r\n')
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/script/migration/_tests/test_conv160a_wiki.py	Sun Aug 03 22:58:40 2008 +0200
@@ -0,0 +1,161 @@
+# -*- coding: iso-8859-1 -*-
+"""
+    MoinMoin - tests of wiki content conversion
+
+    TODO:
+    * fix failing tests
+    * fix parser/converter anchor link handling
+    * emit a warning if we find some page name that was renamed as a macro argument?
+    * shall we support camelcase renaming?
+
+    Limitations of this converter:
+    * converter does not touch "pre sections", thus markup examples in {{{ }}}
+      or ` ` will have to get handled manually.
+    * converter does not touch macro arguments, they will have to get handled
+      manually
+    * converter does not touch CamelCase links (but there should be no need to do)
+
+    @copyright: 2007 MoinMoin:ThomasWaldmann
+    @license: GNU GPL, see COPYING for details.
+"""
+import py
+#py.test.skip("broken")
+
+from MoinMoin.script.migration._conv160a_wiki import convert_wiki
+
+class TestWikiConversion:
+    """ test the wiki markup conversion 1.6.0a -> 1.6.0 """
+    def test_absolute(self):
+        request = self.request
+        pagename = 'TestPage'
+        rename_some_page = {
+                ('PAGE', 'some_page'): 'some page',
+        }
+        rename_some_file = {
+                ('FILE', pagename, 'with_underscore'): 'without underscore',
+                ('FILE', pagename, 'with blank'): 'without_blank',
+        }
+
+        tests = [
+            # 1.6.0a specific tests
+            ('["some page" somepage]', {}, '[[some page|somepage]]'),
+            ("['some page' somepage]", {}, '[[some page|somepage]]'),
+            ("MoinMaster:'some page'", {}, '[[MoinMaster:some page]]'),
+            ('MoinMaster:"some page"', {}, '[[MoinMaster:some page]]'),
+            #("MoinMaster:'some page'", {}, '[[MoinMaster:some page]]'),
+            # "nothing changed" checks (except markup)
+            ('', {}, ''),
+            ('CamelCase', {}, 'CamelCase'),
+            # XXX TODO ('MoinMaster:CamelCase', {}, 'MoinMaster:CamelCase'),
+
+            # did not work in 1.6a
+            #('[wiki:LinuxWiki: LinuxWiki.de]', {}, '[[LinuxWiki:|LinuxWiki.de]]'),
+            #('[wiki:/OtherPage]', rename_some_page, '[[/OtherPage]]'),
+            #('[wiki:/OtherPage other page]', rename_some_page, '[[/OtherPage|other page]]'),
+
+            # XXX TODO  ('[wiki:MoinMoin/FrontPage]', {}, 'MoinMoin:FrontPage'),
+            ('some_text', {}, 'some_text'),
+            ('["some_text"]', {}, '[[some_text]]'),
+            ('some_page', rename_some_page, 'some_page'), # not a link
+            ('{{{["some_page"]}}}', rename_some_page, '{{{["some_page"]}}}'), # not a link
+            ('`["some_page"]`', rename_some_page, '`["some_page"]`'), # not a link
+            ('["OtherPage/some_page"]', rename_some_page, '[[OtherPage/some_page]]'), # different link
+            # XXX TODO ('MoinMaster:some_page', rename_some_page, 'MoinMaster:some_page'), # external link
+            ('http://some_server/some_page', rename_some_page, 'http://some_server/some_page'), # external link
+            ('[http://some_server/some_page]', rename_some_page, '[[http://some_server/some_page]]'), # external link
+            ('[#some_page]', rename_some_page, '[[#some_page]]'), # link to anchor that has same name
+            #XXX ('[attachment:some_page.png]', rename_some_page, '[[attachment:some_page.png]]'), # att, not page
+            #XXX ('[attachment:some_page.png test picture]', rename_some_page, '[[attachment:some_page.png|test picture]]'), # att, not page
+            # url unquote stuff (%20 was popular for space)
+            #XXX ('attachment:My%20Attachment.jpg', {}, '{{attachment:My Attachment.jpg}}'), # embed!
+            #XXX ('[attachment:My%20Attachment.jpg]', {}, '[[attachment:My Attachment.jpg]]'), # link!
+            #XXX ('[attachment:My%20Attachment.jpg it works]', {}, '[[attachment:My Attachment.jpg|it works]]'),
+
+            # page rename changes result
+            ('["some_page"]', rename_some_page, '[[some page]]'),
+            ('[:some_page]', rename_some_page, '[[some page]]'),
+            ('[:some_page:]', rename_some_page, '[[some page]]'),
+            ('[:some_page:some text]', rename_some_page, '[[some page|some text]]'),
+            ('Self:some_page', rename_some_page, '[[some page]]'),
+            ('wiki:Self:some_page', rename_some_page, '[[some page]]'),
+            ('[wiki:Self:some_page some text]', rename_some_page, '[[some page|some text]]'),
+            ('wiki:Self:some_page#some_anchor', rename_some_page, '[[some page#some_anchor]]'),
+
+            # other markup changes we do
+            ('[:other page]', {}, '[[other page]]'),
+            ('[:other page:]', {}, '[[other page]]'),
+            ('[:other page:other text]', {}, '[[other page|other text]]'),
+            # XXX TODO ('Self:CamelCase', {}, 'CamelCase'),
+            # XXX TODO ('[wiki:WikiPedia:Lynx_%28web_browser%29 Lynx]', {}, '[[WikiPedia:Lynx_(web_browser)|Lynx]]'),
+            # XXX TODO ('[:Something:Something]', {}, '[[Something]]'), # optimize markup
+
+            # "nothing changed" checks
+            ('attachment:OtherPage/with_underscore', rename_some_file, '[[attachment:OtherPage/with_underscore]]'),
+
+            # file rename changes result
+            # XXX TODO ('attachment:with_underscore', rename_some_file, '[[attachment:without underscore]]'),
+            # XXX TODO ('attachment:TestPage/with_underscore', rename_some_file, '[[attachment:without underscore]]'), # remove superfluous pagename
+
+            # attachment syntax: kill %20
+            # XXX TODO ('attachment:with%20blank', rename_some_file, '[[attachment:without_blank]]'), # plus rename
+            # XXX TODO ('attachment:keep%20blank', rename_some_file, '[[attachment:keep blank]]'), # no rename
+            # XXX TODO ('attachment:TestPage/keep%20blank', rename_some_file, '[[attachment:keep blank]]'), # remove superfluous pagename
+            # XXX TODO ('attachment:OtherPage/keep%20blank', rename_some_file, '[[attachment:OtherPage/keep blank]]'),
+
+            # embed images
+            ('http://server/image.png', {}, '{{http://server/image.png}}'),
+            ('attachment:image.gif', {}, '{{attachment:image.gif}}'),
+            ('inline:image.jpg', {}, '{{attachment:image.jpg}}'), # inline is now implied by {{...}}
+            ('drawing:image', {}, '{{drawing:image}}'),
+
+            # macros
+            ('[[BR]]', {}, '<<BR>>'),
+            ('[[FullSearch(wtf)]]', {}, '<<FullSearch(wtf)>>'),
+            (u'[[ImageLink(töst.png)]]', {}, u'[[attachment:töst.png|{{attachment:töst.png}}]]'),
+            ('[[ImageLink(test.png,OtherPage)]]', {}, '[[OtherPage|{{attachment:test.png}}]]'),
+            ('[[ImageLink(test.png,OtherPage,width=123,height=456)]]', {}, '[[OtherPage|{{attachment:test.png||width=123, height=456}}]]'),
+            ('[[ImageLink(test.png,OtherPage,width=123,height=456,alt=alttext)]]', {}, '[[OtherPage|{{attachment:test.png|alttext|width=123, height=456}}]]'),
+            ('[[ImageLink(test.png,OtherPage,width=123,height=456,alt=alt text with blanks)]]', {}, '[[OtherPage|{{attachment:test.png|alt text with blanks|width=123, height=456}}]]'),
+            ('[[ImageLink(http://server/test.png,OtherPage,width=123,height=456)]]', {}, '[[OtherPage|{{http://server/test.png||width=123, height=456}}]]'),
+            ('[[ImageLink(http://server/test.png,http://server/,width=123)]]', {}, '[[http://server/|{{http://server/test.png||width=123}}]]'),
+            ('[[ImageLink(test.png,attachment:test.png)]]', {}, '[[attachment:test.png|{{attachment:test.png}}]]'),
+            ('[[ImageLink(test.png,inline:test.py)]]', {}, '[[attachment:test.py|{{attachment:test.png}}]]'),
+
+        ]
+        for data, renames, expected in tests:
+            assert convert_wiki(request, pagename, data, renames) == expected
+
+    def test_sisterpage(self):
+        request = self.request
+        top_page = 'toppage'
+        pagename = '%s/subpage' % top_page
+        rename_some_page = {
+                ('PAGE', '%s/sister' % top_page): '%s/renamed_sister' % top_page,
+        }
+        tests = [
+            # "nothing changed" checks
+            ('["../sister_norename"]', rename_some_page, '[[../sister_norename]]'),
+
+            # renames
+            ('["../sister"]', rename_some_page, '[[../renamed_sister]]'),
+        ]
+        for data, renames, expected in tests:
+            assert convert_wiki(request, pagename, data, renames) == expected
+
+    def test_subpage(self):
+        request = self.request
+        pagename = 'toppage'
+        rename_some_page = {
+                ('PAGE', '%s/subpage' % pagename): '%s/renamed_subpage' % pagename,
+        }
+        tests = [
+            # "nothing changed" checks
+            ('["/subpage_norename"]', rename_some_page, '[[/subpage_norename]]'),
+
+            # renames
+            ('["/subpage"]', rename_some_page, '[[/renamed_subpage]]'),
+        ]
+        for data, renames, expected in tests:
+            assert convert_wiki(request, pagename, data, renames) == expected
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/script/migration/text_moin160a_wiki.py	Sun Aug 03 22:58:40 2008 +0200
@@ -0,0 +1,1148 @@
+# -*- coding: iso-8859-1 -*-
+"""
+    MoinMoin - MoinMoin Wiki Markup Parser
+
+    @copyright: 2000, 2001, 2002 by Jürgen Hermann <jh@web.de>,
+                2006 by MoinMoin:ThomasWaldmann
+    @license: GNU GPL, see COPYING for details.
+"""
+
+import re
+
+import wikiutil160a as wikiutil
+from MoinMoin import config, macro
+
+Dependencies = []
+
+class Parser:
+    """
+        Object that turns Wiki markup into HTML.
+
+        All formatting commands can be parsed one line at a time, though
+        some state is carried over between lines.
+
+        Methods named like _*_repl() are responsible to handle the named regex
+        patterns defined in print_html().
+    """
+
+    # allow caching
+    caching = 1
+    Dependencies = []
+
+    # some common strings
+    PARENT_PREFIX = wikiutil.PARENT_PREFIX
+    # quoted strings (we require that there is at least one char (that is not the quoting char)
+    # inside to not confuse stuff like '''Contact:''' (just a bold Contact:) with interwiki markup
+    # OtherWiki:'Page with blanks'
+    sq_string = ur"('[^']+?')" # single quoted string
+    dq_string = ur"(\"[^\"]+?\")" # double quoted string
+    q_string = ur"(%s|%s)" % (sq_string, dq_string) # quoted string
+    attachment_schemas = ["attachment", "inline", "drawing"]
+    punct_pattern = re.escape(u'''"\'}]|:,.)?!''')
+    punct_no_quote_pattern = re.escape(u'''}]|:,.)?!''')
+    url_pattern = (u'http|https|ftp|nntp|news|mailto|telnet|wiki|file|irc|' +
+            u'|'.join(attachment_schemas) +
+            (config.url_schemas and u'|' + u'|'.join(config.url_schemas) or ''))
+
+    # some common rules
+    word_rule = ur'(?:(?<![%(u)s%(l)s])|^)%(parent)s(?:%(subpages)s(?:[%(u)s][%(l)s]+){2,})+(?![%(u)s%(l)s]+)' % {
+        'u': config.chars_upper,
+        'l': config.chars_lower,
+        'subpages': wikiutil.CHILD_PREFIX + '?',
+        'parent': ur'(?:%s)?' % re.escape(PARENT_PREFIX),
+    }
+    url_rule = ur'%(url_guard)s(%(url)s)\:(([^\s\<%(punct)s]|([%(punctnq)s][^\s\<%(punct)s]))+|%(q_string)s)' % {
+        'url_guard': ur'(^|(?<!\w))',
+        'url': url_pattern,
+        'punct': punct_pattern,
+        'punctnq': punct_no_quote_pattern,
+        'q_string': q_string,
+    }
+
+    ol_rule = ur"^\s+(?:[0-9]+|[aAiI])\.(?:#\d+)?\s"
+    dl_rule = ur"^\s+.*?::\s"
+
+    # this is used inside <pre> / parser sections (we just want to know when it's over):
+    pre_formatting_rules = ur"""(?P<pre>(\}\}\}))"""
+
+    # the big, fat, ugly one ;)
+    formatting_rules = ur"""(?P<ent_numeric>&#(\d{1,5}|x[0-9a-fA-F]+);)
+(?:(?P<emph_ibb>'''''(?=[^']+'''))
+(?P<emph_ibi>'''''(?=[^']+''))
+(?P<emph_ib_or_bi>'{5}(?=[^']))
+(?P<emph>'{2,3})
+(?P<u>__)
+(?P<sup>\^.*?\^)
+(?P<sub>,,[^,]{1,40},,)
+(?P<tt>\{\{\{.*?\}\}\})
+(?P<parser>(\{\{\{(#!.*|\s*$)))
+(?P<pre>(\{\{\{ ?|\}\}\}))
+(?P<small>(\~- ?|-\~))
+(?P<big>(\~\+ ?|\+\~))
+(?P<strike>(--\(|\)--))
+(?P<remark>(/\* ?| ?\*/))
+(?P<rule>-{4,})
+(?P<comment>^\#\#.*$)
+(?P<macro>\[\[(%%(macronames)s)(?:\(.*?\))?\]\]))
+(?P<ol>%(ol_rule)s)
+(?P<dl>%(dl_rule)s)
+(?P<li>^\s+\*\s*)
+(?P<li_none>^\s+\.\s*)
+(?P<indent>^\s+)
+(?P<tableZ>\|\| $)
+(?P<table>(?:\|\|)+(?:<[^>]*?>)?(?!\|? $))
+(?P<heading>^\s*(?P<hmarker>=+)\s.*\s(?P=hmarker) $)
+(?P<interwiki>[A-Z][a-zA-Z]+\:(%(q_string)s|([^\s'\"\:\<\|]([^\s%(punct)s]|([%(punct)s][^\s%(punct)s]))+)))
+(?P<word>%(word_rule)s)
+(?P<url_bracket>\[((%(url)s)\:|#|\:)[^\s\]]+(\s[^\]]+)?\])
+(?P<url>%(url_rule)s)
+(?P<email>[-\w._+]+\@[\w-]+(\.[\w-]+)+)
+(?P<smiley>(?<=\s)(%(smiley)s)(?=\s))
+(?P<smileyA>^(%(smiley)s)(?=\s))
+(?P<ent_symbolic>&[a-zA-Z]+;)
+(?P<ent>[<>&])
+(?P<wikiname_bracket>\[%(q_string)s.*?\])
+(?P<tt_bt>`.*?`)"""  % {
+
+        'url': url_pattern,
+        'punct': punct_pattern,
+        'q_string': q_string,
+        'ol_rule': ol_rule,
+        'dl_rule': dl_rule,
+        'url_rule': url_rule,
+        'word_rule': word_rule,
+        'smiley': u'|'.join(map(re.escape, config.smileys))}
+
+    # Don't start p before these 
+    no_new_p_before = ("heading rule table tableZ tr td "
+                       "ul ol dl dt dd li li_none indent "
+                       "macro parser pre")
+    no_new_p_before = no_new_p_before.split()
+    no_new_p_before = dict(zip(no_new_p_before, [1] * len(no_new_p_before)))
+
+    def __init__(self, raw, request, **kw):
+        self.raw = raw
+        self.request = request
+        self.form = request.form # Macro object uses this
+        self._ = request.getText
+        self.cfg = request.cfg
+        self.line_anchors = kw.get('line_anchors', True)
+        self.macro = None
+        self.start_line = kw.get('start_line', 0)
+
+        # currently, there is only a single, optional argument to this parser and
+        # (when given), it is used as class(es) for a div wrapping the formatter output
+        # either use a single class like "comment" or multiple like "comment/red/dotted"
+        self.wrapping_div_class = kw.get('format_args', '').strip().replace('/', ' ')
+
+        self.is_em = 0 # must be int
+        self.is_b = 0 # must be int
+        self.is_u = False
+        self.is_strike = False
+        self.is_big = False
+        self.is_small = False
+        self.is_remark = False
+
+        self.lineno = 0
+        self.in_list = 0 # between <ul/ol/dl> and </ul/ol/dl>
+        self.in_li = 0 # between <li> and </li>
+        self.in_dd = 0 # between <dd> and </dd>
+
+        # states of the parser concerning being inside/outside of some "pre" section:
+        # None == we are not in any kind of pre section (was: 0)
+        # 'search_parser' == we didn't get a parser yet, still searching for it (was: 1)
+        # 'found_parser' == we found a valid parser (was: 2)
+        # 'no_parser' == we have no (valid) parser, use a normal <pre>...</pre> (was: 3)
+        self.in_pre = None
+
+        self.in_table = 0
+        self.inhibit_p = 0 # if set, do not auto-create a <p>aragraph
+        self.titles = request._page_headings
+
+        # holds the nesting level (in chars) of open lists
+        self.list_indents = []
+        self.list_types = []
+
+        self.formatting_rules = self.formatting_rules % {'macronames': u'|'.join(macro.getNames(self.cfg))}
+
+    def _close_item(self, result):
+        #result.append("<!-- close item begin -->\n")
+        if self.in_table:
+            result.append(self.formatter.table(0))
+            self.in_table = 0
+        if self.in_li:
+            self.in_li = 0
+            if self.formatter.in_p:
+                result.append(self.formatter.paragraph(0))
+            result.append(self.formatter.listitem(0))
+        if self.in_dd:
+            self.in_dd = 0
+            if self.formatter.in_p:
+                result.append(self.formatter.paragraph(0))
+            result.append(self.formatter.definition_desc(0))
+        #result.append("<!-- close item end -->\n")
+
+
+    def interwiki(self, target_and_text, **kw):
+        # TODO: maybe support [wiki:Page http://wherever/image.png] ?
+        scheme, rest = target_and_text.split(':', 1)
+        wikiname, pagename, text = wikiutil.split_wiki(rest)
+        if not pagename:
+            pagename = self.formatter.page.page_name
+        if not text:
+            text = pagename
+        #self.request.log("interwiki: split_wiki -> %s.%s.%s" % (wikiname,pagename,text))
+
+        if wikiname.lower() == 'self': # [wiki:Self:LocalPage text] or [:LocalPage:text]
+            return self._word_repl(pagename, text)
+
+        # check for image URL, and possibly return IMG tag
+        if not kw.get('pretty_url', 0) and wikiutil.isPicture(pagename):
+            dummy, wikiurl, dummy, wikitag_bad = wikiutil.resolve_wiki(self.request, rest)
+            href = wikiutil.join_wiki(wikiurl, pagename)
+            #self.request.log("interwiki: join_wiki -> %s.%s.%s" % (wikiurl,pagename,href))
+            return self.formatter.image(src=href)
+
+        return (self.formatter.interwikilink(1, wikiname, pagename) +
+                self.formatter.text(text) +
+                self.formatter.interwikilink(0, wikiname, pagename))
+
+    def attachment(self, target_and_text, **kw):
+        """ This gets called on attachment URLs """
+        _ = self._
+        #self.request.log("attachment: target_and_text %s" % target_and_text)
+        scheme, fname, text = wikiutil.split_wiki(target_and_text)
+        if not text:
+            text = fname
+
+        if scheme == 'drawing':
+            return self.formatter.attachment_drawing(fname, text)
+
+        # check for image, and possibly return IMG tag (images are always inlined)
+        if not kw.get('pretty_url', 0) and wikiutil.isPicture(fname):
+            return self.formatter.attachment_image(fname)
+
+        # inline the attachment
+        if scheme == 'inline':
+            return self.formatter.attachment_inlined(fname, text)
+
+        return self.formatter.attachment_link(fname, text)
+
+    def _u_repl(self, word):
+        """Handle underline."""
+        self.is_u = not self.is_u
+        return self.formatter.underline(self.is_u)
+
+    def _strike_repl(self, word):
+        """Handle strikethrough."""
+        # XXX we don't really enforce the correct sequence --( ... )-- here
+        self.is_strike = not self.is_strike
+        return self.formatter.strike(self.is_strike)
+
+    def _remark_repl(self, word):
+        """Handle remarks."""
+        # XXX we don't really enforce the correct sequence /* ... */ here
+        self.is_remark = not self.is_remark
+        span_kw = {
+            'style': self.request.user.show_comments and "display:''" or "display:none",
+            'class': "comment",
+        }
+        return self.formatter.span(self.is_remark, **span_kw)
+
+    def _small_repl(self, word):
+        """Handle small."""
+        if word.strip() == '~-' and self.is_small:
+            return self.formatter.text(word)
+        if word.strip() == '-~' and not self.is_small:
+            return self.formatter.text(word)
+        self.is_small = not self.is_small
+        return self.formatter.small(self.is_small)
+
+    def _big_repl(self, word):
+        """Handle big."""
+        if word.strip() == '~+' and self.is_big:
+            return self.formatter.text(word)
+        if word.strip() == '+~' and not self.is_big:
+            return self.formatter.text(word)
+        self.is_big = not self.is_big
+        return self.formatter.big(self.is_big)
+
+    def _emph_repl(self, word):
+        """Handle emphasis, i.e. '' and '''."""
+        ##print "#", self.is_b, self.is_em, "#"
+        if len(word) == 3:
+            self.is_b = not self.is_b
+            if self.is_em and self.is_b:
+                self.is_b = 2
+            return self.formatter.strong(self.is_b)
+        else:
+            self.is_em = not self.is_em
+            if self.is_em and self.is_b:
+                self.is_em = 2
+            return self.formatter.emphasis(self.is_em)
+
+    def _emph_ibb_repl(self, word):
+        """Handle mixed emphasis, i.e. ''''' followed by '''."""
+        self.is_b = not self.is_b
+        self.is_em = not self.is_em
+        if self.is_em and self.is_b:
+            self.is_b = 2
+        return self.formatter.emphasis(self.is_em) + self.formatter.strong(self.is_b)
+
+    def _emph_ibi_repl(self, word):
+        """Handle mixed emphasis, i.e. ''''' followed by ''."""
+        self.is_b = not self.is_b
+        self.is_em = not self.is_em
+        if self.is_em and self.is_b:
+            self.is_em = 2
+        return self.formatter.strong(self.is_b) + self.formatter.emphasis(self.is_em)
+
+    def _emph_ib_or_bi_repl(self, word):
+        """Handle mixed emphasis, exactly five '''''."""
+        ##print "*", self.is_b, self.is_em, "*"
+        b_before_em = self.is_b > self.is_em > 0
+        self.is_b = not self.is_b
+        self.is_em = not self.is_em
+        if b_before_em:
+            return self.formatter.strong(self.is_b) + self.formatter.emphasis(self.is_em)
+        else:
+            return self.formatter.emphasis(self.is_em) + self.formatter.strong(self.is_b)
+
+
+    def _sup_repl(self, word):
+        """Handle superscript."""
+        return self.formatter.sup(1) + \
+            self.formatter.text(word[1:-1]) + \
+            self.formatter.sup(0)
+
+    def _sub_repl(self, word):
+        """Handle subscript."""
+        return self.formatter.sub(1) + \
+            self.formatter.text(word[2:-2]) + \
+            self.formatter.sub(0)
+
+
+    def _rule_repl(self, word):
+        """Handle sequences of dashes."""
+        result = self._undent() + self._closeP()
+        if len(word) <= 4:
+            result = result + self.formatter.rule()
+        else:
+            # Create variable rule size 1 - 6. Actual size defined in css.
+            size = min(len(word), 10) - 4
+            result = result + self.formatter.rule(size)
+        return result
+
+
+    def _word_repl(self, word, text=None):
+        """Handle WikiNames."""
+
+        # check for parent links
+        # !!! should use wikiutil.AbsPageName here, but setting `text`
+        # correctly prevents us from doing this for now
+        if word.startswith(wikiutil.PARENT_PREFIX):
+            if not text:
+                text = word
+            word = '/'.join(filter(None, self.formatter.page.page_name.split('/')[:-1] + [word[wikiutil.PARENT_PREFIX_LEN:]]))
+
+        if not text:
+            # if a simple, self-referencing link, emit it as plain text
+            if word == self.formatter.page.page_name:
+                return self.formatter.text(word)
+            text = word
+        if word.startswith(wikiutil.CHILD_PREFIX):
+            word = self.formatter.page.page_name + '/' + word[wikiutil.CHILD_PREFIX_LEN:]
+
+        # handle anchors
+        parts = word.split("#", 1)
+        anchor = ""
+        if len(parts) == 2:
+            word, anchor = parts
+
+        return (self.formatter.pagelink(1, word, anchor=anchor) +
+                self.formatter.text(text) +
+                self.formatter.pagelink(0, word))
+
+    def _notword_repl(self, word):
+        """Handle !NotWikiNames."""
+        return self.formatter.nowikiword(word[1:])
+
+    def _interwiki_repl(self, word):
+        """Handle InterWiki links."""
+        wikitag, wikiurl, wikitail, wikitag_bad = wikiutil.resolve_wiki(self.request, word)
+        if wikitag_bad:
+            return self.formatter.text(word)
+        else:
+            return self.interwiki("wiki:" + word)
+
+    def _url_repl(self, word):
+        """Handle literal URLs including inline images."""
+        scheme = word.split(":", 1)[0]
+
+        if scheme == "wiki":
+            return self.interwiki(word)
+
+        if scheme in self.attachment_schemas:
+            return self.attachment(word)
+
+        if wikiutil.isPicture(word):
+            word = wikiutil.mapURL(self.request, word)
+            # Get image name http://here.com/dir/image.gif -> image
+            name = word.split('/')[-1]
+            name = ''.join(name.split('.')[:-1])
+            return self.formatter.image(src=word, alt=name)
+        else:
+            return (self.formatter.url(1, word, css=scheme) +
+                    self.formatter.text(word) +
+                    self.formatter.url(0))
+
+
+    def _wikiname_bracket_repl(self, text):
+        """Handle special-char wikinames with link text, like:
+           ["Jim O'Brian" Jim's home page] or ['Hello "world"!' a page with doublequotes]i
+        """
+        word = text[1:-1] # strip brackets
+        first_char = word[0]
+        if first_char in wikiutil.QUOTE_CHARS:
+            # split on closing quote
+            target, linktext = word[1:].split(first_char, 1)
+        else: # not quoted
+            # split on whitespace
+            target, linktext = word.split(None, 1)
+        if target:
+            linktext = linktext.strip()
+            return self._word_repl(target, linktext)
+        else:
+            return self.formatter.text(text)
+
+
+    def _url_bracket_repl(self, word):
+        """Handle bracketed URLs."""
+        word = word[1:-1] # strip brackets
+
+        # Local extended link? [:page name:link text] XXX DEPRECATED
+        if word[0] == ':':
+            words = word[1:].split(':', 1)
+            if len(words) == 1:
+                words = words * 2
+            target_and_text = 'wiki:Self:%s %s' % (wikiutil.quoteName(words[0]), words[1])
+            return self.interwiki(target_and_text, pretty_url=1)
+
+        scheme_and_rest = word.split(":", 1)
+        if len(scheme_and_rest) == 1: # no scheme
+            # Traditional split on space
+            words = word.split(None, 1)
+            if len(words) == 1:
+                words = words * 2
+
+            if words[0].startswith('#'): # anchor link
+                return (self.formatter.url(1, words[0]) +
+                        self.formatter.text(words[1]) +
+                        self.formatter.url(0))
+        else:
+            scheme, rest = scheme_and_rest
+            if scheme == "wiki":
+                return self.interwiki(word, pretty_url=1)
+            if scheme in self.attachment_schemas:
+                return self.attachment(word, pretty_url=1)
+
+            words = word.split(None, 1)
+            if len(words) == 1:
+                words = words * 2
+
+        if wikiutil.isPicture(words[1]) and re.match(self.url_rule, words[1]):
+            return (self.formatter.url(1, words[0], css='external', do_escape=0) +
+                    self.formatter.image(title=words[0], alt=words[0], src=words[1]) +
+                    self.formatter.url(0))
+        else:
+            return (self.formatter.url(1, words[0], css=scheme, do_escape=0) +
+                    self.formatter.text(words[1]) +
+                    self.formatter.url(0))
+
+
+    def _email_repl(self, word):
+        """Handle email addresses (without a leading mailto:)."""
+        return (self.formatter.url(1, "mailto:" + word, css='mailto') +
+                self.formatter.text(word) +
+                self.formatter.url(0))
+
+
+    def _ent_repl(self, word):
+        """Handle SGML entities."""
+        return self.formatter.text(word)
+        #return {'&': '&amp;',
+        #        '<': '&lt;',
+        #        '>': '&gt;'}[word]
+
+    def _ent_numeric_repl(self, word):
+        """Handle numeric (decimal and hexadecimal) SGML entities."""
+        return self.formatter.rawHTML(word)
+
+    def _ent_symbolic_repl(self, word):
+        """Handle symbolic SGML entities."""
+        return self.formatter.rawHTML(word)
+
+    def _indent_repl(self, match):
+        """Handle pure indentation (no - * 1. markup)."""
+        result = []
+        if not (self.in_li or self.in_dd):
+            self._close_item(result)
+            self.in_li = 1
+            css_class = None
+            if self.line_was_empty and not self.first_list_item:
+                css_class = 'gap'
+            result.append(self.formatter.listitem(1, css_class=css_class, style="list-style-type:none"))
+        return ''.join(result)
+
+    def _li_none_repl(self, match):
+        """Handle type=none (" .") lists."""
+        result = []
+        self._close_item(result)
+        self.in_li = 1
+        css_class = None
+        if self.line_was_empty and not self.first_list_item:
+            css_class = 'gap'
+        result.append(self.formatter.listitem(1, css_class=css_class, style="list-style-type:none"))
+        return ''.join(result)
+
+    def _li_repl(self, match):
+        """Handle bullet (" *") lists."""
+        result = []
+        self._close_item(result)
+        self.in_li = 1
+        css_class = None
+        if self.line_was_empty and not self.first_list_item:
+            css_class = 'gap'
+        result.append(self.formatter.listitem(1, css_class=css_class))
+        return ''.join(result)
+
+    def _ol_repl(self, match):
+        """Handle numbered lists."""
+        return self._li_repl(match)
+
+    def _dl_repl(self, match):
+        """Handle definition lists."""
+        result = []
+        self._close_item(result)
+        self.in_dd = 1
+        result.extend([
+            self.formatter.definition_term(1),
+            self.formatter.text(match[1:-3].lstrip(' ')),
+            self.formatter.definition_term(0),
+            self.formatter.definition_desc(1),
+        ])
+        return ''.join(result)
+
+
+    def _indent_level(self):
+        """Return current char-wise indent level."""
+        return len(self.list_indents) and self.list_indents[-1]
+
+
+    def _indent_to(self, new_level, list_type, numtype, numstart):
+        """Close and open lists."""
+        openlist = []   # don't make one out of these two statements!
+        closelist = []
+
+        if self._indent_level() != new_level and self.in_table:
+            closelist.append(self.formatter.table(0))
+            self.in_table = 0
+
+        while self._indent_level() > new_level:
+            self._close_item(closelist)
+            if self.list_types[-1] == 'ol':
+                tag = self.formatter.number_list(0)
+            elif self.list_types[-1] == 'dl':
+                tag = self.formatter.definition_list(0)
+            else:
+                tag = self.formatter.bullet_list(0)
+            closelist.append(tag)
+
+            del self.list_indents[-1]
+            del self.list_types[-1]
+
+            if self.list_types: # we are still in a list
+                if self.list_types[-1] == 'dl':
+                    self.in_dd = 1
+                else:
+                    self.in_li = 1
+
+        # Open new list, if necessary
+        if self._indent_level() < new_level:
+            self.list_indents.append(new_level)
+            self.list_types.append(list_type)
+
+            if self.formatter.in_p:
+                closelist.append(self.formatter.paragraph(0))
+
+            if list_type == 'ol':
+                tag = self.formatter.number_list(1, numtype, numstart)
+            elif list_type == 'dl':
+                tag = self.formatter.definition_list(1)
+            else:
+                tag = self.formatter.bullet_list(1)
+            openlist.append(tag)
+
+            self.first_list_item = 1
+            self.in_li = 0
+            self.in_dd = 0
+
+        # If list level changes, close an open table
+        if self.in_table and (openlist or closelist):
+            closelist[0:0] = [self.formatter.table(0)]
+            self.in_table = 0
+
+        self.in_list = self.list_types != []
+        return ''.join(closelist) + ''.join(openlist)
+
+
+    def _undent(self):
+        """Close all open lists."""
+        result = []
+        #result.append("<!-- _undent start -->\n")
+        self._close_item(result)
+        for type in self.list_types[::-1]:
+            if type == 'ol':
+                result.append(self.formatter.number_list(0))
+            elif type == 'dl':
+                result.append(self.formatter.definition_list(0))
+            else:
+                result.append(self.formatter.bullet_list(0))
+        #result.append("<!-- _undent end -->\n")
+        self.list_indents = []
+        self.list_types = []
+        return ''.join(result)
+
+
+    def _tt_repl(self, word):
+        """Handle inline code."""
+        return self.formatter.code(1) + \
+            self.formatter.text(word[3:-3]) + \
+            self.formatter.code(0)
+
+
+    def _tt_bt_repl(self, word):
+        """Handle backticked inline code."""
+        # if len(word) == 2: return "" // removed for FCK editor
+        return self.formatter.code(1, css="backtick") + \
+            self.formatter.text(word[1:-1]) + \
+            self.formatter.code(0)
+
+
+    def _getTableAttrs(self, attrdef):
+        # skip "|" and initial "<"
+        while attrdef and attrdef[0] == "|":
+            attrdef = attrdef[1:]
+        if not attrdef or attrdef[0] != "<":
+            return {}, ''
+        attrdef = attrdef[1:]
+
+        # extension for special table markup
+        def table_extension(key, parser, attrs, wiki_parser=self):
+            """ returns: tuple (found_flag, msg)
+                found_flag: whether we found something and were able to process it here
+                  true for special stuff like 100% or - or #AABBCC
+                  false for style xxx="yyy" attributes
+                msg: "" or an error msg
+            """
+            _ = wiki_parser._
+            found = False
+            msg = ''
+            if key[0] in "0123456789":
+                token = parser.get_token()
+                if token != '%':
+                    wanted = '%'
+                    msg = _('Expected "%(wanted)s" after "%(key)s", got "%(token)s"') % {
+                        'wanted': wanted, 'key': key, 'token': token}
+                else:
+                    try:
+                        dummy = int(key)
+                    except ValueError:
+                        msg = _('Expected an integer "%(key)s" before "%(token)s"') % {
+                            'key': key, 'token': token}
+                    else:
+                        found = True
+                        attrs['width'] = '"%s%%"' % key
+            elif key == '-':
+                arg = parser.get_token()
+                try:
+                    dummy = int(arg)
+                except ValueError:
+                    msg = _('Expected an integer "%(arg)s" after "%(key)s"') % {
+                        'arg': arg, 'key': key}
+                else:
+                    found = True
+                    attrs['colspan'] = '"%s"' % arg
+            elif key == '|':
+                arg = parser.get_token()
+                try:
+                    dummy = int(arg)
+                except ValueError:
+                    msg = _('Expected an integer "%(arg)s" after "%(key)s"') % {
+                        'arg': arg, 'key': key}
+                else:
+                    found = True
+                    attrs['rowspan'] = '"%s"' % arg
+            elif key == '(':
+                found = True
+                attrs['align'] = '"left"'
+            elif key == ':':
+                found = True
+                attrs['align'] = '"center"'
+            elif key == ')':
+                found = True
+                attrs['align'] = '"right"'
+            elif key == '^':
+                found = True
+                attrs['valign'] = '"top"'
+            elif key == 'v':
+                found = True
+                attrs['valign'] = '"bottom"'
+            elif key == '#':
+                arg = parser.get_token()
+                try:
+                    if len(arg) != 6: raise ValueError
+                    dummy = int(arg, 16)
+                except ValueError:
+                    msg = _('Expected a color value "%(arg)s" after "%(key)s"') % {
+                        'arg': arg, 'key': key}
+                else:
+                    found = True
+                    attrs['bgcolor'] = '"#%s"' % arg
+            return found, self.formatter.rawHTML(msg)
+
+        # scan attributes
+        attr, msg = wikiutil.parseAttributes(self.request, attrdef, '>', table_extension)
+        if msg:
+            msg = '<strong class="highlight">%s</strong>' % msg
+        #self.request.log("parseAttributes returned %r" % attr)
+        return attr, msg
+
+    def _tableZ_repl(self, word):
+        """Handle table row end."""
+        if self.in_table:
+            result = ''
+            # REMOVED: check for self.in_li, p should always close
+            if self.formatter.in_p:
+                result = self.formatter.paragraph(0)
+            result += self.formatter.table_cell(0) + self.formatter.table_row(0)
+            return result
+        else:
+            return self.formatter.text(word)
+
+    def _table_repl(self, word):
+        """Handle table cell separator."""
+        if self.in_table:
+            result = []
+            # check for attributes
+            attrs, attrerr = self._getTableAttrs(word)
+
+            # start the table row?
+            if self.table_rowstart:
+                self.table_rowstart = 0
+                result.append(self.formatter.table_row(1, attrs))
+            else:
+                # Close table cell, first closing open p
+                # REMOVED check for self.in_li, paragraph should close always!
+                if self.formatter.in_p:
+                    result.append(self.formatter.paragraph(0))
+                result.append(self.formatter.table_cell(0))
+
+            # check for adjacent cell markers
+            if word.count("|") > 2:
+                if not attrs.has_key('align') and \
+                   not (attrs.has_key('style') and 'text-align' in attrs['style'].lower()):
+                    # add center alignment if we don't have some alignment already
+                    attrs['align'] = '"center"'
+                if not attrs.has_key('colspan'):
+                    attrs['colspan'] = '"%d"' % (word.count("|")/2)
+
+            # return the complete cell markup
+            result.append(self.formatter.table_cell(1, attrs) + attrerr)
+            result.append(self._line_anchordef())
+            return ''.join(result)
+        else:
+            return self.formatter.text(word)
+
+
+    def _heading_repl(self, word):
+        """Handle section headings."""
+        import sha
+
+        h = word.strip()
+        level = 1
+        while h[level:level+1] == '=':
+            level += 1
+        depth = min(5, level)
+
+        # FIXME: needed for Included pages but might still result in unpredictable results
+        # when included the same page multiple times
+        title_text = h[level:-level].strip()
+        pntt = self.formatter.page.page_name + title_text
+        self.titles.setdefault(pntt, 0)
+        self.titles[pntt] += 1
+
+        unique_id = ''
+        if self.titles[pntt] > 1:
+            unique_id = '-%d' % self.titles[pntt]
+        result = self._closeP()
+        result += self.formatter.heading(1, depth, id="head-"+sha.new(pntt.encode(config.charset)).hexdigest()+unique_id)
+
+        return (result + self.formatter.text(title_text) +
+                self.formatter.heading(0, depth))
+
+    def _parser_repl(self, word):
+        """Handle parsed code displays."""
+        if word.startswith('{{{'):
+            word = word[3:]
+
+        self.parser = None
+        self.parser_name = None
+        s_word = word.strip()
+        if s_word == '#!':
+            # empty bang paths lead to a normal code display
+            # can be used to escape real, non-empty bang paths
+            word = ''
+            self.in_pre = 'no_parser'
+            return self._closeP() + self.formatter.preformatted(1)
+        elif s_word.startswith('#!'):
+            # First try to find a parser for this
+            parser_name = s_word[2:].split()[0]
+            self.setParser(parser_name)
+
+        if self.parser:
+            self.parser_name = parser_name
+            self.in_pre = 'found_parser'
+            self.parser_lines = [word]
+            return ''
+        elif s_word:
+            self.in_pre = 'no_parser'
+            return self._closeP() + self.formatter.preformatted(1) + \
+                   self.formatter.text(s_word + ' (-)')
+        else:
+            self.in_pre = 'search_parser'
+            return ''
+
+    def _pre_repl(self, word):
+        """Handle code displays."""
+        word = word.strip()
+        if word == '{{{' and not self.in_pre:
+            self.in_pre = 'no_parser'
+            return self._closeP() + self.formatter.preformatted(1)
+        elif word == '}}}' and self.in_pre:
+            self.in_pre = None
+            self.inhibit_p = 0
+            return self.formatter.preformatted(0)
+        return self.formatter.text(word)
+
+
+    def _smiley_repl(self, word):
+        """Handle smileys."""
+        return self.formatter.smiley(word)
+
+    _smileyA_repl = _smiley_repl
+
+
+    def _comment_repl(self, word):
+        # if we are in a paragraph, we must close it so that normal text following
+        # in the line below the comment will reopen a new paragraph.
+        if self.formatter.in_p:
+            self.formatter.paragraph(0)
+        self.line_is_empty = 1 # markup following comment lines treats them as if they were empty
+        return self.formatter.comment(word)
+
+    def _closeP(self):
+        if self.formatter.in_p:
+            return self.formatter.paragraph(0)
+        return ''
+
+    def _macro_repl(self, word):
+        """Handle macros ([[macroname]])."""
+        macro_name = word[2:-2]
+        self.inhibit_p = 0 # 1 fixes UserPreferences, 0 fixes paragraph formatting for macros
+
+        # check for arguments
+        args = None
+        if macro_name.count("("):
+            macro_name, args = macro_name.split('(', 1)
+            args = args[:-1]
+
+        # create macro instance
+        if self.macro is None:
+            self.macro = macro.Macro(self)
+        return self.formatter.macro(self.macro, macro_name, args)
+
+    def scan(self, scan_re, line, inhibit_p=False):
+        """ Scans one line
+        Append text before match, invoke replace() with match, and add text after match.
+        """
+        result = []
+        lastpos = 0
+
+        ###result.append(u'<span class="info">[scan: <tt>"%s"</tt>]</span>' % line)
+
+        for match in scan_re.finditer(line):
+            # Add text before the match
+            if lastpos < match.start():
+
+                ###result.append(u'<span class="info">[add text before match: <tt>"%s"</tt>]</span>' % line[lastpos:match.start()])
+
+                if not (inhibit_p or self.inhibit_p or self.in_pre or self.formatter.in_p):
+                    result.append(self.formatter.paragraph(1, css_class="line862"))
+                result.append(self.formatter.text(line[lastpos:match.start()]))
+
+            # Replace match with markup
+            if not (inhibit_p or self.inhibit_p or self.in_pre or self.formatter.in_p or
+                    self.in_table or self.in_list):
+                result.append(self.formatter.paragraph(1, css_class="line867"))
+            result.append(self.replace(match, inhibit_p))
+            lastpos = match.end()
+
+        ###result.append('<span class="info">[no match, add rest: <tt>"%s"<tt>]</span>' % line[lastpos:])
+
+        # Add paragraph with the remainder of the line
+        if not (inhibit_p or self.in_pre or self.in_li or self.in_dd or self.inhibit_p or
+                self.formatter.in_p) and lastpos < len(line):
+            result.append(self.formatter.paragraph(1, css_class="line874"))
+        result.append(self.formatter.text(line[lastpos:]))
+        return u''.join(result)
+
+    def replace(self, match, inhibit_p=False):
+        """ Replace match using type name """
+        result = []
+        for type, hit in match.groupdict().items():
+            if hit is not None and not type in ["hmarker", ]:
+
+                ##result.append(u'<span class="info">[replace: %s: "%s"]</span>' % (type, hit))
+                # Open p for certain types
+                if not (inhibit_p or self.inhibit_p or self.formatter.in_p
+                        or self.in_pre or (type in self.no_new_p_before)):
+                    result.append(self.formatter.paragraph(1, css_class="line891"))
+
+                # Get replace method and replace hit
+                replace = getattr(self, '_' + type + '_repl')
+                result.append(replace(hit))
+                return ''.join(result)
+        else:
+            # We should never get here
+            import pprint
+            raise Exception("Can't handle match " + `match`
+                + "\n" + pprint.pformat(match.groupdict())
+                + "\n" + pprint.pformat(match.groups()) )
+
+        return ""
+
+    def _line_anchordef(self):
+        if self.line_anchors and not self.line_anchor_printed:
+            self.line_anchor_printed = 1
+            return self.formatter.line_anchordef(self.lineno)
+        else:
+            return ''
+
+    def format(self, formatter, inhibit_p=False):
+        """ For each line, scan through looking for magic
+            strings, outputting verbatim any intervening text.
+        """
+        self.formatter = formatter
+        self.hilite_re = self.formatter.page.hilite_re
+
+        # prepare regex patterns
+        rules = self.formatting_rules.replace('\n', '|')
+        if self.cfg.bang_meta:
+            rules = ur'(?P<notword>!%(word_rule)s)|%(rules)s' % {
+                'word_rule': self.word_rule,
+                'rules': rules,
+            }
+        pre_rules = self.pre_formatting_rules.replace('\n', '|')
+        self.request.clock.start('compile_huge_and_ugly')
+        scan_re = re.compile(rules, re.UNICODE)
+        pre_scan_re = re.compile(pre_rules, re.UNICODE)
+        number_re = re.compile(self.ol_rule, re.UNICODE)
+        term_re = re.compile(self.dl_rule, re.UNICODE)
+        indent_re = re.compile(ur"^\s*", re.UNICODE)
+        eol_re = re.compile(r'\r?\n', re.UNICODE)
+        self.request.clock.stop('compile_huge_and_ugly')
+
+        # get text and replace TABs
+        rawtext = self.raw.expandtabs()
+
+        # go through the lines
+        self.lineno = self.start_line
+        self.lines = eol_re.split(rawtext)
+        self.line_is_empty = 0
+
+        self.in_processing_instructions = 1
+
+        if self.wrapping_div_class:
+            div_kw = {'css_class': self.wrapping_div_class, }
+            if 'comment' in self.wrapping_div_class.split():
+                # show comment divs depending on user profile (and wiki configuration)
+                div_kw['style'] = self.request.user.show_comments and "display:''" or "display:none"
+            self.request.write(self.formatter.div(1, **div_kw))
+
+        # Main loop
+        for line in self.lines:
+            self.lineno += 1
+            self.line_anchor_printed = 0
+            if not self.in_table:
+                self.request.write(self._line_anchordef())
+            self.table_rowstart = 1
+            self.line_was_empty = self.line_is_empty
+            self.line_is_empty = 0
+            self.first_list_item = 0
+            self.inhibit_p = 0
+
+            # ignore processing instructions
+            if self.in_processing_instructions:
+                found = False
+                for pi in ("##", "#format", "#refresh", "#redirect", "#deprecated",
+                           "#pragma", "#form", "#acl", "#language"):
+                    if line.lower().startswith(pi):
+                        self.request.write(self.formatter.comment(line))
+                        found = True
+                        break
+                if not found:
+                    self.in_processing_instructions = 0
+                else:
+                    continue # do not parse this line
+            if self.in_pre:
+                # TODO: move this into function
+                # still looking for processing instructions
+                if self.in_pre == 'search_parser':
+                    self.parser = None
+                    parser_name = ''
+                    if line.strip().startswith("#!"):
+                        parser_name = line.strip()[2:].split()[0]
+                        self.setParser(parser_name)
+
+                    if self.parser:
+                        self.in_pre = 'found_parser'
+                        self.parser_lines = [line]
+                        self.parser_name = parser_name
+                        continue
+                    else:
+                        self.request.write(self._closeP() +
+                                           self.formatter.preformatted(1))
+                        self.in_pre = 'no_parser'
+                if self.in_pre == 'found_parser':
+                    # processing mode
+                    try:
+                        endpos = line.index("}}}")
+                    except ValueError:
+                        self.parser_lines.append(line)
+                        continue
+                    if line[:endpos]:
+                        self.parser_lines.append(line[:endpos])
+
+                    # Close p before calling parser
+                    # TODO: do we really need this?
+                    self.request.write(self._closeP())
+                    res = self.formatter.parser(self.parser_name, self.parser_lines)
+                    self.request.write(res)
+                    del self.parser_lines
+                    self.in_pre = None
+                    self.parser = None
+
+                    # send rest of line through regex machinery
+                    line = line[endpos+3:]
+                    if not line.strip(): # just in the case "}}} " when we only have blanks left...
+                        continue
+            else:
+                # we don't have \n as whitespace any more
+                # This is the space between lines we join to one paragraph
+                line += ' '
+
+                # Paragraph break on empty lines
+                if not line.strip():
+                    if self.in_table:
+                        self.request.write(self.formatter.table(0))
+                        self.request.write(self._line_anchordef())
+                        self.in_table = 0
+                    # CHANGE: removed check for not self.list_types
+                    # p should close on every empty line
+                    if self.formatter.in_p:
+                        self.request.write(self.formatter.paragraph(0))
+                    self.line_is_empty = 1
+                    continue
+
+                # Check indent level
+                indent = indent_re.match(line)
+                indlen = len(indent.group(0))
+                indtype = "ul"
+                numtype = None
+                numstart = None
+                if indlen:
+                    match = number_re.match(line)
+                    if match:
+                        numtype, numstart = match.group(0).strip().split('.')
+                        numtype = numtype[0]
+
+                        if numstart and numstart[0] == "#":
+                            numstart = int(numstart[1:])
+                        else:
+                            numstart = None
+
+                        indtype = "ol"
+                    else:
+                        match = term_re.match(line)
+                        if match:
+                            indtype = "dl"
+
+                # output proper indentation tags
+                self.request.write(self._indent_to(indlen, indtype, numtype, numstart))
+
+                # Table mode
+                # TODO: move into function?                
+                if (not self.in_table and line[indlen:indlen + 2] == "||"
+                    and line.endswith("|| ") and len(line) >= 5 + indlen):
+                    # Start table
+                    if self.list_types and not self.in_li:
+                        self.request.write(self.formatter.listitem(1, style="list-style-type:none"))
+                        ## CHANGE: no automatic p on li
+                        ##self.request.write(self.formatter.paragraph(1))
+                        self.in_li = 1
+
+                    # CHANGE: removed check for self.in_li
+                    # paragraph should end before table, always!
+                    if self.formatter.in_p:
+                        self.request.write(self.formatter.paragraph(0))
+                    attrs, attrerr = self._getTableAttrs(line[indlen+2:])
+                    self.request.write(self.formatter.table(1, attrs) + attrerr)
+                    self.in_table = True # self.lineno
+                elif (self.in_table and not
+                      # intra-table comments should not break a table
+                      (line.startswith("##") or
+                       line[indlen:indlen + 2] == "||" and
+                       line.endswith("|| ") and
+                       len(line) >= 5 + indlen)):
+
+                    # Close table
+                    self.request.write(self.formatter.table(0))
+                    self.request.write(self._line_anchordef())
+                    self.in_table = 0
+
+            # Scan line, format and write
+            scanning_re = self.in_pre and pre_scan_re or scan_re
+            formatted_line = self.scan(scanning_re, line, inhibit_p=inhibit_p)
+            self.request.write(formatted_line)
+            if self.in_pre == 'no_parser':
+                self.request.write(self.formatter.linebreak())
+
+        # Close code displays, paragraphs, tables and open lists
+        self.request.write(self._undent())
+        if self.in_pre: self.request.write(self.formatter.preformatted(0))
+        if self.formatter.in_p: self.request.write(self.formatter.paragraph(0))
+        if self.in_table: self.request.write(self.formatter.table(0))
+
+        if self.wrapping_div_class:
+            self.request.write(self.formatter.div(0))
+
+    # Private helpers ------------------------------------------------------------
+
+    def setParser(self, name):
+        """ Set parser to parser named 'name' """
+        # XXX this is done by the formatter as well
+        try:
+            self.parser = wikiutil.searchAndImportPlugin(self.request.cfg, "parser", name)
+        except wikiutil.PluginMissingError:
+            self.parser = None
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/script/migration/wikiutil160a.py	Sun Aug 03 22:58:40 2008 +0200
@@ -0,0 +1,1671 @@
+# -*- coding: iso-8859-1 -*-
+"""
+    MoinMoin - Wiki Utility Functions
+
+    @copyright: 2000 - 2004 by Jürgen Hermann <jh@web.de>
+                2007 by Reimar Bauer
+    @license: GNU GPL, see COPYING for details.
+"""
+
+import cgi
+import codecs
+import os
+import re
+import time
+import urllib
+
+from MoinMoin import config
+from MoinMoin.util import pysupport, lock
+
+# Exceptions
+class InvalidFileNameError(Exception):
+    """ Called when we find an invalid file name """
+    pass
+
+# constants for page names
+PARENT_PREFIX = "../"
+PARENT_PREFIX_LEN = len(PARENT_PREFIX)
+CHILD_PREFIX = "/"
+CHILD_PREFIX_LEN = len(CHILD_PREFIX)
+
+#############################################################################
+### Getting data from user/Sending data to user
+#############################################################################
+
+def decodeWindowsPath(text):
+    """ Decode Windows path names correctly. This is needed because many CGI
+    servers follow the RFC recommendation and re-encode the path_info variable
+    according to the file system semantics.
+    
+    @param text: the text to decode, string
+    @rtype: unicode
+    @return: decoded text
+    """
+
+    import locale
+    cur_charset = locale.getdefaultlocale()[1]
+    try:
+        return unicode(text, 'utf-8')
+    except UnicodeError:
+        try:
+            return unicode(text, cur_charset, 'replace')
+        except LookupError:
+            return unicode(text, 'iso-8859-1', 'replace')
+
+def decodeUnknownInput(text):
+    """ Decode unknown input, like text attachments
+
+    First we try utf-8 because it has special format, and it will decode
+    only utf-8 files. Then we try config.charset, then iso-8859-1 using
+    'replace'. We will never raise an exception, but may return junk
+    data.
+
+    WARNING: Use this function only for data that you view, not for data
+    that you save in the wiki.
+
+    @param text: the text to decode, string
+    @rtype: unicode
+    @return: decoded text (maybe wrong)
+    """
+    # Shortcut for unicode input
+    if isinstance(text, unicode):
+        return text
+
+    try:
+        return unicode(text, 'utf-8')
+    except UnicodeError:
+        if config.charset not in ['utf-8', 'iso-8859-1']:
+            try:
+                return unicode(text, config.charset)
+            except UnicodeError:
+                pass
+        return unicode(text, 'iso-8859-1', 'replace')
+
+
+def decodeUserInput(s, charsets=[config.charset]):
+    """
+    Decodes input from the user.
+    
+    @param s: the string to unquote
+    @param charsets: list of charsets to assume the string is in
+    @rtype: unicode
+    @return: the unquoted string as unicode
+    """
+    for charset in charsets:
+        try:
+            return s.decode(charset)
+        except UnicodeError:
+            pass
+    raise UnicodeError('The string %r cannot be decoded.' % s)
+
+
+# this is a thin wrapper around urllib (urllib only handles str, not unicode)
+# with py <= 2.4.1, it would give incorrect results with unicode
+# with py == 2.4.2, it crashes with unicode, if it contains non-ASCII chars
+def url_quote(s, safe='/', want_unicode=False):
+    """
+    Wrapper around urllib.quote doing the encoding/decoding as usually wanted:
+    
+    @param s: the string to quote (can be str or unicode, if it is unicode,
+              config.charset is used to encode it before calling urllib)
+    @param safe: just passed through to urllib
+    @param want_unicode: for the less usual case that you want to get back
+                         unicode and not str, set this to True
+                         Default is False.
+    """
+    if isinstance(s, unicode):
+        s = s.encode(config.charset)
+    elif not isinstance(s, str):
+        s = str(s)
+    s = urllib.quote(s, safe)
+    if want_unicode:
+        s = s.decode(config.charset) # ascii would also work
+    return s
+
+def url_quote_plus(s, safe='/', want_unicode=False):
+    """
+    Wrapper around urllib.quote_plus doing the encoding/decoding as usually wanted:
+    
+    @param s: the string to quote (can be str or unicode, if it is unicode,
+              config.charset is used to encode it before calling urllib)
+    @param safe: just passed through to urllib
+    @param want_unicode: for the less usual case that you want to get back
+                         unicode and not str, set this to True
+                         Default is False.
+    """
+    if isinstance(s, unicode):
+        s = s.encode(config.charset)
+    elif not isinstance(s, str):
+        s = str(s)
+    s = urllib.quote_plus(s, safe)
+    if want_unicode:
+        s = s.decode(config.charset) # ascii would also work
+    return s
+
+def url_unquote(s, want_unicode=True):
+    """
+    Wrapper around urllib.unquote doing the encoding/decoding as usually wanted:
+    
+    @param s: the string to unquote (can be str or unicode, if it is unicode,
+              config.charset is used to encode it before calling urllib)
+    @param want_unicode: for the less usual case that you want to get back
+                         str and not unicode, set this to False.
+                         Default is True.
+    """
+    if isinstance(s, unicode):
+        s = s.encode(config.charset) # ascii would also work
+    s = urllib.unquote(s)
+    if want_unicode:
+        s = s.decode(config.charset)
+    return s
+
+def parseQueryString(qstr, want_unicode=True):
+    """ Parse a querystring "key=value&..." into a dict.
+    """
+    is_unicode = isinstance(qstr, unicode)
+    if is_unicode:
+        qstr = qstr.encode(config.charset)
+    values = {}
+    for key, value in cgi.parse_qs(qstr).items():
+        if len(value) < 2:
+            v = ''.join(value)
+            if want_unicode:
+                try:
+                    v = unicode(v, config.charset)
+                except UnicodeDecodeError:
+                    v = unicode(v, 'iso-8859-1', 'replace')
+            values[key] = v
+    return values
+
+def makeQueryString(qstr=None, want_unicode=False, **kw):
+    """ Make a querystring from arguments.
+        
+    kw arguments overide values in qstr.
+
+    If a string is passed in, it's returned verbatim and
+    keyword parameters are ignored.
+
+    @param qstr: dict to format as query string, using either ascii or unicode
+    @param kw: same as dict when using keywords, using ascii or unicode
+    @rtype: string
+    @return: query string ready to use in a url
+    """
+    if qstr is None:
+        qstr = {}
+    if isinstance(qstr, dict):
+        qstr.update(kw)
+        items = ['%s=%s' % (url_quote_plus(key, want_unicode=want_unicode), url_quote_plus(value, want_unicode=want_unicode)) for key, value in qstr.items()]
+        qstr = '&'.join(items)
+    return qstr
+
+
+def quoteWikinameURL(pagename, charset=config.charset):
+    """ Return a url encoding of filename in plain ascii
+
+    Use urllib.quote to quote any character that is not always safe. 
+
+    @param pagename: the original pagename (unicode)
+    @param charset: url text encoding, 'utf-8' recommended. Other charset
+                    might not be able to encode the page name and raise
+                    UnicodeError. (default config.charset ('utf-8')).
+    @rtype: string
+    @return: the quoted filename, all unsafe characters encoded
+    """
+    pagename = pagename.encode(charset)
+    return urllib.quote(pagename)
+
+
+def escape(s, quote=0):
+    """ Escape possible html tags
+    
+    Replace special characters '&', '<' and '>' by SGML entities.
+    (taken from cgi.escape so we don't have to include that, even if we
+    don't use cgi at all)
+    
+    @param s: (unicode) string to escape
+    @param quote: bool, should transform '\"' to '&quot;'
+    @rtype: when called with a unicode object, return unicode object - otherwise return string object
+    @return: escaped version of s
+    """
+    if not isinstance(s, (str, unicode)):
+        s = str(s)
+
+    # Must first replace &
+    s = s.replace("&", "&amp;")
+
+    # Then other...
+    s = s.replace("<", "&lt;")
+    s = s.replace(">", "&gt;")
+    if quote:
+        s = s.replace('"', "&quot;")
+    return s
+
+def clean_comment(comment):
+    """ Clean comment - replace CR, LF, TAB by whitespace, delete control chars
+        TODO: move this to config, create on first call then return cached.
+    """
+    # we only have input fields with max 200 chars, but spammers send us more
+    if len(comment) > 201:
+        comment = u''
+    remap_chars = {
+        ord(u'\t'): u' ',
+        ord(u'\r'): u' ',
+        ord(u'\n'): u' ',
+    }
+    control_chars = u'\x00\x01\x02\x03\x04\x05\x06\x07\x08\x0b\x0c\x0e\x0f' \
+                    '\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f'
+    for c in control_chars:
+        remap_chars[c] = None
+    comment = comment.translate(remap_chars)
+    return comment
+
+def make_breakable(text, maxlen):
+    """ make a text breakable by inserting spaces into nonbreakable parts
+    """
+    text = text.split(" ")
+    newtext = []
+    for part in text:
+        if len(part) > maxlen:
+            while part:
+                newtext.append(part[:maxlen])
+                part = part[maxlen:]
+        else:
+            newtext.append(part)
+    return " ".join(newtext)
+
+########################################################################
+### Storage
+########################################################################
+
+# Precompiled patterns for file name [un]quoting
+UNSAFE = re.compile(r'[^a-zA-Z0-9_]+')
+QUOTED = re.compile(r'\(([a-fA-F0-9]+)\)')
+
+
+def quoteWikinameFS(wikiname, charset=config.charset):
+    """ Return file system representation of a Unicode WikiName.
+            
+    Warning: will raise UnicodeError if wikiname can not be encoded using
+    charset. The default value of config.charset, 'utf-8' can encode any
+    character.
+        
+    @param wikiname: Unicode string possibly containing non-ascii characters
+    @param charset: charset to encode string
+    @rtype: string
+    @return: quoted name, safe for any file system
+    """
+    filename = wikiname.encode(charset)
+
+    quoted = []
+    location = 0
+    for needle in UNSAFE.finditer(filename):
+        # append leading safe stuff
+        quoted.append(filename[location:needle.start()])
+        location = needle.end()
+        # Quote and append unsafe stuff           
+        quoted.append('(')
+        for character in needle.group():
+            quoted.append('%02x' % ord(character))
+        quoted.append(')')
+
+    # append rest of string
+    quoted.append(filename[location:])
+    return ''.join(quoted)
+
+
+def unquoteWikiname(filename, charsets=[config.charset]):
+    """ Return Unicode WikiName from quoted file name.
+    
+    We raise an InvalidFileNameError if we find an invalid name, so the
+    wiki could alarm the admin or suggest the user to rename a page.
+    Invalid file names should never happen in normal use, but are rather
+    cheap to find. 
+    
+    This function should be used only to unquote file names, not page
+    names we receive from the user. These are handled in request by
+    urllib.unquote, decodePagename and normalizePagename.
+    
+    Todo: search clients of unquoteWikiname and check for exceptions. 
+
+    @param filename: string using charset and possibly quoted parts
+    @param charsets: list of charsets used by string
+    @rtype: Unicode String
+    @return: WikiName
+    """
+    ### Temporary fix start ###
+    # From some places we get called with Unicode strings
+    if isinstance(filename, type(u'')):
+        filename = filename.encode(config.charset)
+    ### Temporary fix end ###
+
+    parts = []
+    start = 0
+    for needle in QUOTED.finditer(filename):
+        # append leading unquoted stuff
+        parts.append(filename[start:needle.start()])
+        start = needle.end()
+        # Append quoted stuff
+        group = needle.group(1)
+        # Filter invalid filenames
+        if (len(group) % 2 != 0):
+            raise InvalidFileNameError(filename)
+        try:
+            for i in range(0, len(group), 2):
+                byte = group[i:i+2]
+                character = chr(int(byte, 16))
+                parts.append(character)
+        except ValueError:
+            # byte not in hex, e.g 'xy'
+            raise InvalidFileNameError(filename)
+
+    # append rest of string
+    if start == 0:
+        wikiname = filename
+    else:
+        parts.append(filename[start:len(filename)])
+        wikiname = ''.join(parts)
+
+    # FIXME: This looks wrong, because at this stage "()" can be both errors
+    # like open "(" without close ")", or unquoted valid characters in the file name.
+    # Filter invalid filenames. Any left (xx) must be invalid
+    #if '(' in wikiname or ')' in wikiname:
+    #    raise InvalidFileNameError(filename)
+
+    wikiname = decodeUserInput(wikiname, charsets)
+    return wikiname
+
+# time scaling
+def timestamp2version(ts):
+    """ Convert UNIX timestamp (may be float or int) to our version
+        (long) int.
+        We don't want to use floats, so we just scale by 1e6 to get
+        an integer in usecs. 
+    """
+    return long(ts*1000000L) # has to be long for py 2.2.x
+
+def version2timestamp(v):
+    """ Convert version number to UNIX timestamp (float).
+        This must ONLY be used for display purposes.
+    """
+    return v/1000000.0
+
+
+# This is the list of meta attribute names to be treated as integers.
+# IMPORTANT: do not use any meta attribute names with "-" (or any other chars
+# invalid in python attribute names), use e.g. _ instead.
+INTEGER_METAS = ['current', 'revision', # for page storage (moin 2.0)
+                 'data_format_revision', # for data_dir format spec (use by mig scripts)
+                ]
+
+class MetaDict(dict):
+    """ store meta informations as a dict.
+    """
+    def __init__(self, metafilename, cache_directory):
+        """ create a MetaDict from metafilename """
+        dict.__init__(self)
+        self.metafilename = metafilename
+        self.dirty = False
+        lock_dir = os.path.join(cache_directory, '__metalock__')
+        self.rlock = lock.ReadLock(lock_dir, 60.0)
+        self.wlock = lock.WriteLock(lock_dir, 60.0)
+
+        if not self.rlock.acquire(3.0):
+            raise EnvironmentError("Could not lock in MetaDict")
+        try:
+            self._get_meta()
+        finally:
+            self.rlock.release()
+
+    def _get_meta(self):
+        """ get the meta dict from an arbitrary filename.
+            does not keep state, does uncached, direct disk access.
+            @param metafilename: the name of the file to read
+            @return: dict with all values or {} if empty or error
+        """
+
+        try:
+            metafile = codecs.open(self.metafilename, "r", "utf-8")
+            meta = metafile.read() # this is much faster than the file's line-by-line iterator
+            metafile.close()
+        except IOError:
+            meta = u''
+        for line in meta.splitlines():
+            key, value = line.split(':', 1)
+            value = value.strip()
+            if key in INTEGER_METAS:
+                value = int(value)
+            dict.__setitem__(self, key, value)
+
+    def _put_meta(self):
+        """ put the meta dict into an arbitrary filename.
+            does not keep or modify state, does uncached, direct disk access.
+            @param metafilename: the name of the file to write
+            @param metadata: dict of the data to write to the file
+        """
+        meta = []
+        for key, value in self.items():
+            if key in INTEGER_METAS:
+                value = str(value)
+            meta.append("%s: %s" % (key, value))
+        meta = '\r\n'.join(meta)
+
+        metafile = codecs.open(self.metafilename, "w", "utf-8")
+        metafile.write(meta)
+        metafile.close()
+        self.dirty = False
+
+    def sync(self, mtime_usecs=None):
+        """ No-Op except for that parameter """
+        if not mtime_usecs is None:
+            self.__setitem__('mtime', str(mtime_usecs))
+        # otherwise no-op
+
+    def __getitem__(self, key):
+        """ We don't care for cache coherency here. """
+        return dict.__getitem__(self, key)
+
+    def __setitem__(self, key, value):
+        """ Sets a dictionary entry. """
+        if not self.wlock.acquire(5.0):
+            raise EnvironmentError("Could not lock in MetaDict")
+        try:
+            self._get_meta() # refresh cache
+            try:
+                oldvalue = dict.__getitem__(self, key)
+            except KeyError:
+                oldvalue = None
+            if value != oldvalue:
+                dict.__setitem__(self, key, value)
+                self._put_meta() # sync cache
+        finally:
+            self.wlock.release()
+
+
+# Quoting of wiki names, file names, etc. (in the wiki markup) -----------------------------------
+
+QUOTE_CHARS = u"'\""
+
+def quoteName(name):
+    """ put quotes around a given name """
+    for quote_char in QUOTE_CHARS:
+        if quote_char not in name:
+            return u"%s%s%s" % (quote_char, name, quote_char)
+    else:
+        return name # XXX we need to be able to escape the quote char for worst case
+
+def unquoteName(name):
+    """ if there are quotes around the name, strip them """
+    for quote_char in QUOTE_CHARS:
+        if quote_char == name[0] == name[-1]:
+            return name[1:-1]
+    else:
+        return name
+
+#############################################################################
+### InterWiki
+#############################################################################
+INTERWIKI_PAGE = "InterWikiMap"
+
+def generate_file_list(request):
+    """ generates a list of all files. for internal use. """
+
+    # order is important here, the local intermap file takes
+    # precedence over the shared one, and is thus read AFTER
+    # the shared one
+    intermap_files = request.cfg.shared_intermap
+    if not isinstance(intermap_files, list):
+        intermap_files = [intermap_files]
+    else:
+        intermap_files = intermap_files[:]
+    intermap_files.append(os.path.join(request.cfg.data_dir, "intermap.txt"))
+    request.cfg.shared_intermap_files = [filename for filename in intermap_files
+                                         if filename and os.path.isfile(filename)]
+
+
+def get_max_mtime(file_list, page):
+    """ Returns the highest modification time of the files in file_list and the
+    page page. """
+    timestamps = [os.stat(filename).st_mtime for filename in file_list]
+    if page.exists():
+        # exists() is cached and thus cheaper than mtime_usecs()
+        timestamps.append(version2timestamp(page.mtime_usecs()))
+    return max(timestamps)
+
+
+def load_wikimap(request):
+    """ load interwiki map (once, and only on demand) """
+    from MoinMoin.Page import Page
+
+    now = int(time.time())
+    if getattr(request.cfg, "shared_intermap_files", None) is None:
+        generate_file_list(request)
+
+    try:
+        _interwiki_list = request.cfg.cache.interwiki_list
+        old_mtime = request.cfg.cache.interwiki_mtime
+        if request.cfg.cache.interwiki_ts + (1*60) < now: # 1 minutes caching time
+            max_mtime = get_max_mtime(request.cfg.shared_intermap_files, Page(request, INTERWIKI_PAGE))
+            if max_mtime > old_mtime:
+                raise AttributeError # refresh cache
+            else:
+                request.cfg.cache.interwiki_ts = now
+    except AttributeError:
+        _interwiki_list = {}
+        lines = []
+
+        for filename in request.cfg.shared_intermap_files:
+            f = open(filename, "r")
+            lines.extend(f.readlines())
+            f.close()
+
+        # add the contents of the InterWikiMap page
+        lines += Page(request, INTERWIKI_PAGE).get_raw_body().splitlines()
+
+        for line in lines:
+            if not line or line[0] == '#': continue
+            try:
+                line = "%s %s/InterWiki" % (line, request.getScriptname())
+                wikitag, urlprefix, dummy = line.split(None, 2)
+            except ValueError:
+                pass
+            else:
+                _interwiki_list[wikitag] = urlprefix
+
+        del lines
+
+        # add own wiki as "Self" and by its configured name
+        _interwiki_list['Self'] = request.getScriptname() + '/'
+        if request.cfg.interwikiname:
+            _interwiki_list[request.cfg.interwikiname] = request.getScriptname() + '/'
+
+        # save for later
+        request.cfg.cache.interwiki_list = _interwiki_list
+        request.cfg.cache.interwiki_ts = now
+        request.cfg.cache.interwiki_mtime = get_max_mtime(request.cfg.shared_intermap_files, Page(request, INTERWIKI_PAGE))
+
+    return _interwiki_list
+
+def split_wiki(wikiurl):
+    """ Split a wiki url, e.g:
+    
+    'MoinMoin:FrontPage' -> "MoinMoin", "FrontPage", ""
+    'FrontPage' -> "Self", "FrontPage", ""
+    'MoinMoin:"Page with blanks" link title' -> "MoinMoin", "Page with blanks", "link title"
+
+    can also be used for:
+
+    'attachment:"filename with blanks.txt" other title' -> "attachment", "filename with blanks.txt", "other title"
+
+    @param wikiurl: the url to split
+    @rtype: tuple
+    @return: (wikiname, pagename, linktext)
+    """
+    try:
+        wikiname, rest = wikiurl.split(":", 1) # e.g. MoinMoin:FrontPage
+    except ValueError:
+        try:
+            wikiname, rest = wikiurl.split("/", 1) # for what is this used?
+        except ValueError:
+            wikiname, rest = 'Self', wikiurl
+    if rest:
+        first_char = rest[0]
+        if first_char in QUOTE_CHARS: # quoted pagename
+            pagename_linktext = rest[1:].split(first_char, 1)
+        else: # not quoted, split on whitespace
+            pagename_linktext = rest.split(None, 1)
+    else:
+        pagename_linktext = "", ""
+    if len(pagename_linktext) == 1:
+        pagename, linktext = pagename_linktext[0], ""
+    else:
+        pagename, linktext = pagename_linktext
+    linktext = linktext.strip()
+    return wikiname, pagename, linktext
+
+def resolve_wiki(request, wikiurl):
+    """ Resolve an interwiki link.
+    
+    @param request: the request object
+    @param wikiurl: the InterWiki:PageName link
+    @rtype: tuple
+    @return: (wikitag, wikiurl, wikitail, err)
+    """
+    _interwiki_list = load_wikimap(request)
+    wikiname, pagename, linktext = split_wiki(wikiurl)
+    if _interwiki_list.has_key(wikiname):
+        return (wikiname, _interwiki_list[wikiname], pagename, False)
+    else:
+        return (wikiname, request.getScriptname(), "/InterWiki", True)
+
+def join_wiki(wikiurl, wikitail):
+    """
+    Add a (url_quoted) page name to an interwiki url.
+   
+    Note: We can't know what kind of URL quoting a remote wiki expects.
+          We just use a utf-8 encoded string with standard URL quoting.
+          
+    @param wikiurl: wiki url, maybe including a $PAGE placeholder
+    @param wikitail: page name
+    @rtype: string
+    @return: generated URL of the page in the other wiki
+    """
+    wikitail = url_quote(wikitail)
+    if '$PAGE' in wikiurl:
+        return wikiurl.replace('$PAGE', wikitail)
+    else:
+        return wikiurl + wikitail
+
+
+#############################################################################
+### Page types (based on page names)
+#############################################################################
+
+def isSystemPage(request, pagename):
+    """ Is this a system page? Uses AllSystemPagesGroup internally.
+    
+    @param request: the request object
+    @param pagename: the page name
+    @rtype: bool
+    @return: true if page is a system page
+    """
+    return (request.dicts.has_member('SystemPagesGroup', pagename) or
+        isTemplatePage(request, pagename))
+
+
+def isTemplatePage(request, pagename):
+    """ Is this a template page?
+    
+    @param pagename: the page name
+    @rtype: bool
+    @return: true if page is a template page
+    """
+    return request.cfg.cache.page_template_regex.search(pagename) is not None
+
+
+def isGroupPage(request, pagename):
+    """ Is this a name of group page?
+
+    @param pagename: the page name
+    @rtype: bool
+    @return: true if page is a form page
+    """
+    return request.cfg.cache.page_group_regex.search(pagename) is not None
+
+
+def filterCategoryPages(request, pagelist):
+    """ Return category pages in pagelist
+
+    WARNING: DO NOT USE THIS TO FILTER THE FULL PAGE LIST! Use
+    getPageList with a filter function.
+        
+    If you pass a list with a single pagename, either that is returned
+    or an empty list, thus you can use this function like a `isCategoryPage`
+    one.
+       
+    @param pagelist: a list of pages
+    @rtype: list
+    @return: only the category pages of pagelist
+    """
+    func = request.cfg.cache.page_category_regex.search
+    return filter(func, pagelist)
+
+
+def getLocalizedPage(request, pagename): # was: getSysPage
+    """ Get a system page according to user settings and available translations.
+    
+    We include some special treatment for the case that <pagename> is the
+    currently rendered page, as this is the case for some pages used very
+    often, like FrontPage, RecentChanges etc. - in that case we reuse the
+    already existing page object instead creating a new one.
+
+    @param request: the request object
+    @param pagename: the name of the page
+    @rtype: Page object
+    @return: the page object of that system page, using a translated page,
+             if it exists
+    """
+    from MoinMoin.Page import Page
+    i18n_name = request.getText(pagename, formatted=False)
+    pageobj = None
+    if i18n_name != pagename:
+        if request.page and i18n_name == request.page.page_name:
+            # do not create new object for current page
+            i18n_page = request.page
+            if i18n_page.exists():
+                pageobj = i18n_page
+        else:
+            i18n_page = Page(request, i18n_name)
+            if i18n_page.exists():
+                pageobj = i18n_page
+
+    # if we failed getting a translated version of <pagename>,
+    # we fall back to english
+    if not pageobj:
+        if request.page and pagename == request.page.page_name:
+            # do not create new object for current page
+            pageobj = request.page
+        else:
+            pageobj = Page(request, pagename)
+    return pageobj
+
+
+def getFrontPage(request):
+    """ Convenience function to get localized front page
+
+    @param request: current request
+    @rtype: Page object
+    @return localized page_front_page, if there is a translation
+    """
+    return getLocalizedPage(request, request.cfg.page_front_page)
+
+
+def getHomePage(request, username=None):
+    """
+    Get a user's homepage, or return None for anon users and
+    those who have not created a homepage.
+
+    DEPRECATED - try to use getInterwikiHomePage (see below)
+    
+    @param request: the request object
+    @param username: the user's name
+    @rtype: Page
+    @return: user's homepage object - or None
+    """
+    from MoinMoin.Page import Page
+    # default to current user
+    if username is None and request.user.valid:
+        username = request.user.name
+
+    # known user?
+    if username:
+        # Return home page
+        page = Page(request, username)
+        if page.exists():
+            return page
+
+    return None
+
+
+def getInterwikiHomePage(request, username=None):
+    """
+    Get a user's homepage.
+    
+    cfg.user_homewiki influences behaviour of this:
+    'Self' does mean we store user homepage in THIS wiki.
+    When set to our own interwikiname, it behaves like with 'Self'.
+    
+    'SomeOtherWiki' means we store user homepages in another wiki.
+    
+    @param request: the request object
+    @param username: the user's name
+    @rtype: tuple (or None for anon users)
+    @return: (wikiname, pagename)
+    """
+    # default to current user
+    if username is None and request.user.valid:
+        username = request.user.name
+    if not username:
+        return None # anon user
+
+    homewiki = request.cfg.user_homewiki
+    if homewiki == request.cfg.interwikiname:
+        homewiki = 'Self'
+
+    return homewiki, username
+
+
+def AbsPageName(request, context, pagename):
+    """
+    Return the absolute pagename for a (possibly) relative pagename.
+
+    @param context: name of the page where "pagename" appears on
+    @param pagename: the (possibly relative) page name
+    @rtype: string
+    @return: the absolute page name
+    """
+    if pagename.startswith(PARENT_PREFIX):
+        pagename = '/'.join(filter(None, context.split('/')[:-1] + [pagename[PARENT_PREFIX_LEN:]]))
+    elif pagename.startswith(CHILD_PREFIX):
+        pagename = context + '/' + pagename[CHILD_PREFIX_LEN:]
+    return pagename
+
+def pagelinkmarkup(pagename):
+    """ return markup that can be used as link to page <pagename> """
+    from MoinMoin.parser.text_moin_wiki import Parser
+    if re.match(Parser.word_rule + "$", pagename):
+        return pagename
+    else:
+        return u'["%s"]' % pagename # XXX use quoteName(pagename) later
+
+#############################################################################
+### mimetype support
+#############################################################################
+import mimetypes
+
+MIMETYPES_MORE = {
+ # OpenOffice 2.x & other open document stuff
+ '.odt': 'application/vnd.oasis.opendocument.text',
+ '.ods': 'application/vnd.oasis.opendocument.spreadsheet',
+ '.odp': 'application/vnd.oasis.opendocument.presentation',
+ '.odg': 'application/vnd.oasis.opendocument.graphics',
+ '.odc': 'application/vnd.oasis.opendocument.chart',
+ '.odf': 'application/vnd.oasis.opendocument.formula',
+ '.odb': 'application/vnd.oasis.opendocument.database',
+ '.odi': 'application/vnd.oasis.opendocument.image',
+ '.odm': 'application/vnd.oasis.opendocument.text-master',
+ '.ott': 'application/vnd.oasis.opendocument.text-template',
+ '.ots': 'application/vnd.oasis.opendocument.spreadsheet-template',
+ '.otp': 'application/vnd.oasis.opendocument.presentation-template',
+ '.otg': 'application/vnd.oasis.opendocument.graphics-template',
+}
+[mimetypes.add_type(mimetype, ext, True) for ext, mimetype in MIMETYPES_MORE.items()]
+
+MIMETYPES_sanitize_mapping = {
+    # this stuff is text, but got application/* for unknown reasons
+    ('application', 'docbook+xml'): ('text', 'docbook'),
+    ('application', 'x-latex'): ('text', 'latex'),
+    ('application', 'x-tex'): ('text', 'tex'),
+    ('application', 'javascript'): ('text', 'javascript'),
+}
+
+MIMETYPES_spoil_mapping = {} # inverse mapping of above
+for key, value in MIMETYPES_sanitize_mapping.items():
+    MIMETYPES_spoil_mapping[value] = key
+
+
+class MimeType(object):
+    """ represents a mimetype like text/plain """
+
+    def __init__(self, mimestr=None, filename=None):
+        self.major = self.minor = None # sanitized mime type and subtype
+        self.params = {} # parameters like "charset" or others
+        self.charset = None # this stays None until we know for sure!
+        self.raw_mimestr = mimestr
+
+        if mimestr:
+            self.parse_mimetype(mimestr)
+        elif filename:
+            self.parse_filename(filename)
+
+    def parse_filename(self, filename):
+        mtype, encoding = mimetypes.guess_type(filename)
+        if mtype is None:
+            mtype = 'application/octet-stream'
+        self.parse_mimetype(mtype)
+
+    def parse_mimetype(self, mimestr):
+        """ take a string like used in content-type and parse it into components,
+            alternatively it also can process some abbreviated string like "wiki"
+        """
+        parameters = mimestr.split(";")
+        parameters = [p.strip() for p in parameters]
+        mimetype, parameters = parameters[0], parameters[1:]
+        mimetype = mimetype.split('/')
+        if len(mimetype) >= 2:
+            major, minor = mimetype[:2] # we just ignore more than 2 parts
+        else:
+            major, minor = self.parse_format(mimetype[0])
+        self.major = major.lower()
+        self.minor = minor.lower()
+        for param in parameters:
+            key, value = param.split('=')
+            if value[0] == '"' and value[-1] == '"': # remove quotes
+                value = value[1:-1]
+            self.params[key.lower()] = value
+        if self.params.has_key('charset'):
+            self.charset = self.params['charset'].lower()
+        self.sanitize()
+
+    def parse_format(self, format):
+        """ maps from what we currently use on-page in a #format xxx processing
+            instruction to a sanitized mimetype major, minor tuple.
+            can also be user later for easier entry by the user, so he can just
+            type "wiki" instead of "text/moin-wiki".
+        """
+        format = format.lower()
+        if format in ('plain', 'csv', 'rst', 'docbook', 'latex', 'tex', 'html', 'css',
+                      'xml', 'python', 'perl', 'php', 'ruby', 'javascript',
+                      'cplusplus', 'java', 'pascal', 'diff', 'gettext', 'xslt', ):
+            mimetype = 'text', format
+        else:
+            mapping = {
+                'wiki': ('text', 'moin-wiki'),
+                'irc': ('text', 'irssi'),
+            }
+            try:
+                mimetype = mapping[format]
+            except KeyError:
+                mimetype = 'text', 'x-%s' % format
+        return mimetype
+
+    def sanitize(self):
+        """ convert to some representation that makes sense - this is not necessarily
+            conformant to /etc/mime.types or IANA listing, but if something is
+            readable text, we will return some text/* mimetype, not application/*,
+            because we need text/plain as fallback and not application/octet-stream.
+        """
+        self.major, self.minor = MIMETYPES_sanitize_mapping.get((self.major, self.minor), (self.major, self.minor))
+
+    def spoil(self):
+        """ this returns something conformant to /etc/mime.type or IANA as a string,
+            kind of inverse operation of sanitize(), but doesn't change self
+        """
+        major, minor = MIMETYPES_spoil_mapping.get((self.major, self.minor), (self.major, self.minor))
+        return self.content_type(major, minor)
+
+    def content_type(self, major=None, minor=None, charset=None, params=None):
+        """ return a string suitable for Content-Type header
+        """
+        major = major or self.major
+        minor = minor or self.minor
+        params = params or self.params or {}
+        if major == 'text':
+            charset = charset or self.charset or params.get('charset', config.charset)
+            params['charset'] = charset
+        mimestr = "%s/%s" % (major, minor)
+        params = ['%s="%s"' % (key.lower(), value) for key, value in params.items()]
+        params.insert(0, mimestr)
+        return "; ".join(params)
+
+    def mime_type(self):
+        """ return a string major/minor only, no params """
+        return "%s/%s" % (self.major, self.minor)
+
+    def module_name(self):
+        """ convert this mimetype to a string useable as python module name,
+            we yield the exact module name first and then proceed to shorter
+            module names (useful for falling back to them, if the more special
+            module is not found) - e.g. first "text_python", next "text".
+            Finally, we yield "application_octet_stream" as the most general
+            mimetype we have.
+            Hint: the fallback handler module for text/* should be implemented
+                  in module "text" (not "text_plain")
+        """
+        mimetype = self.mime_type()
+        modname = mimetype.replace("/", "_").replace("-", "_").replace(".", "_")
+        fragments = modname.split('_')
+        for length in range(len(fragments), 1, -1):
+            yield "_".join(fragments[:length])
+        yield self.raw_mimestr
+        yield fragments[0]
+        yield "application_octet_stream"
+
+
+#############################################################################
+### Plugins
+#############################################################################
+
+class PluginError(Exception):
+    """ Base class for plugin errors """
+
+class PluginMissingError(PluginError):
+    """ Raised when a plugin is not found """
+
+class PluginAttributeError(PluginError):
+    """ Raised when plugin does not contain an attribtue """
+
+
+def importPlugin(cfg, kind, name, function="execute"):
+    """ Import wiki or builtin plugin
+    
+    Returns function from a plugin module name. If name can not be
+    imported, raise PluginMissingError. If function is missing, raise
+    PluginAttributeError.
+
+    kind may be one of 'action', 'formatter', 'macro', 'parser' or any other
+    directory that exist in MoinMoin or data/plugin.
+
+    Wiki plugins will always override builtin plugins. If you want
+    specific plugin, use either importWikiPlugin or importBuiltinPlugin
+    directly.
+    
+    @param cfg: wiki config instance
+    @param kind: what kind of module we want to import
+    @param name: the name of the module
+    @param function: the function name
+    @rtype: any object
+    @return: "function" of module "name" of kind "kind", or None
+    """
+    try:
+        return importWikiPlugin(cfg, kind, name, function)
+    except PluginMissingError:
+        return importBuiltinPlugin(kind, name, function)
+
+
+def importWikiPlugin(cfg, kind, name, function="execute"):
+    """ Import plugin from the wiki data directory
+    
+    See importPlugin docstring.
+    """
+    if not name in wikiPlugins(kind, cfg):
+        raise PluginMissingError
+    moduleName = '%s.plugin.%s.%s' % (cfg.siteid, kind, name)
+    return importNameFromPlugin(moduleName, function)
+
+
+def importBuiltinPlugin(kind, name, function="execute"):
+    """ Import builtin plugin from MoinMoin package 
+    
+    See importPlugin docstring.
+    """
+    if not name in builtinPlugins(kind):
+        raise PluginMissingError
+    moduleName = 'MoinMoin.%s.%s' % (kind, name)
+    return importNameFromPlugin(moduleName, function)
+
+
+def importNameFromPlugin(moduleName, name):
+    """ Return name from plugin module 
+    
+    Raise PluginAttributeError if name does not exists.
+    """
+    module = __import__(moduleName, globals(), {}, [name])
+    try:
+        return getattr(module, name)
+    except AttributeError:
+        raise PluginAttributeError
+
+
+def builtinPlugins(kind):
+    """ Gets a list of modules in MoinMoin.'kind'
+    
+    @param kind: what kind of modules we look for
+    @rtype: list
+    @return: module names
+    """
+    modulename = "MoinMoin." + kind
+    return pysupport.importName(modulename, "modules")
+
+
+def wikiPlugins(kind, cfg):
+    """ Gets a list of modules in data/plugin/'kind'
+ 
+    Require valid plugin directory. e.g missing 'parser' directory or
+    missing '__init__.py' file will raise errors.
+    
+    @param kind: what kind of modules we look for
+    @rtype: list
+    @return: module names
+    """
+    # Wiki plugins are located in wikiconfig.plugin module
+    modulename = '%s.plugin.%s' % (cfg.siteid, kind)
+    return pysupport.importName(modulename, "modules")
+
+
+def getPlugins(kind, cfg):
+    """ Gets a list of plugin names of kind
+    
+    @param kind: what kind of modules we look for
+    @rtype: list
+    @return: module names
+    """
+    # Copy names from builtin plugins - so we dont destroy the value
+    all_plugins = builtinPlugins(kind)[:]
+
+    # Add extension plugins without duplicates
+    for plugin in wikiPlugins(kind, cfg):
+        if plugin not in all_plugins:
+            all_plugins.append(plugin)
+
+    return all_plugins
+
+
+def searchAndImportPlugin(cfg, type, name, what=None):
+    type2classname = {"parser": "Parser",
+                      "formatter": "Formatter",
+    }
+    if what is None:
+        what = type2classname[type]
+    mt = MimeType(name)
+    plugin = None
+    for module_name in mt.module_name():
+        try:
+            plugin = importPlugin(cfg, type, module_name, what)
+            break
+        except PluginMissingError:
+            pass
+    else:
+        raise PluginMissingError("Plugin not found!")
+    return plugin
+
+
+#############################################################################
+### Parsers
+#############################################################################
+
+def getParserForExtension(cfg, extension):
+    """
+    Returns the Parser class of the parser fit to handle a file
+    with the given extension. The extension should be in the same
+    format as os.path.splitext returns it (i.e. with the dot).
+    Returns None if no parser willing to handle is found.
+    The dict of extensions is cached in the config object.
+
+    @param cfg: the Config instance for the wiki in question
+    @param extension: the filename extension including the dot
+    @rtype: class, None
+    @returns: the parser class or None
+    """
+    if not hasattr(cfg.cache, 'EXT_TO_PARSER'):
+        etp, etd = {}, None
+        for pname in getPlugins('parser', cfg):
+            try:
+                Parser = importPlugin(cfg, 'parser', pname, 'Parser')
+            except PluginMissingError:
+                continue
+            if hasattr(Parser, 'extensions'):
+                exts = Parser.extensions
+                if isinstance(exts, list):
+                    for ext in Parser.extensions:
+                        etp[ext] = Parser
+                elif str(exts) == '*':
+                    etd = Parser
+        cfg.cache.EXT_TO_PARSER = etp
+        cfg.cache.EXT_TO_PARSER_DEFAULT = etd
+
+    return cfg.cache.EXT_TO_PARSER.get(extension, cfg.cache.EXT_TO_PARSER_DEFAULT)
+
+
+#############################################################################
+### Parameter parsing
+#############################################################################
+
+def parseAttributes(request, attrstring, endtoken=None, extension=None):
+    """
+    Parse a list of attributes and return a dict plus a possible
+    error message.
+    If extension is passed, it has to be a callable that returns
+    a tuple (found_flag, msg). found_flag is whether it did find and process
+    something, msg is '' when all was OK or any other string to return an error
+    message.
+    
+    @param request: the request object
+    @param attrstring: string containing the attributes to be parsed
+    @param endtoken: token terminating parsing
+    @param extension: extension function -
+                      gets called with the current token, the parser and the dict
+    @rtype: dict, msg
+    @return: a dict plus a possible error message
+    """
+    import shlex, StringIO
+
+    _ = request.getText
+
+    parser = shlex.shlex(StringIO.StringIO(attrstring))
+    parser.commenters = ''
+    msg = None
+    attrs = {}
+
+    while not msg:
+        try:
+            key = parser.get_token()
+        except ValueError, err:
+            msg = str(err)
+            break
+        if not key: break
+        if endtoken and key == endtoken: break
+
+        # call extension function with the current token, the parser, and the dict
+        if extension:
+            found_flag, msg = extension(key, parser, attrs)
+            #request.log("%r = extension(%r, parser, %r)" % (msg, key, attrs))
+            if found_flag:
+                continue
+            elif msg:
+                break
+            #else (we found nothing, but also didn't have an error msg) we just continue below:
+
+        try:
+            eq = parser.get_token()
+        except ValueError, err:
+            msg = str(err)
+            break
+        if eq != "=":
+            msg = _('Expected "=" to follow "%(token)s"') % {'token': key}
+            break
+
+        try:
+            val = parser.get_token()
+        except ValueError, err:
+            msg = str(err)
+            break
+        if not val:
+            msg = _('Expected a value for key "%(token)s"') % {'token': key}
+            break
+
+        key = escape(key) # make sure nobody cheats
+
+        # safely escape and quote value
+        if val[0] in ["'", '"']:
+            val = escape(val)
+        else:
+            val = '"%s"' % escape(val, 1)
+
+        attrs[key.lower()] = val
+
+    return attrs, msg or ''
+
+
+class ParameterParser:
+    """ MoinMoin macro parameter parser
+
+        Parses a given parameter string, separates the individual parameters
+        and detects their type.
+
+        Possible parameter types are:
+
+        Name      | short  | example
+        ----------------------------
+         Integer  | i      | -374
+         Float    | f      | 234.234 23.345E-23
+         String   | s      | 'Stri\'ng'
+         Boolean  | b      | 0 1 True false
+         Name     |        | case_sensitive | converted to string
+        
+        So say you want to parse three things, name, age and if the 
+        person is male or not:
+        
+        The pattern will be: %(name)s%(age)i%(male)b
+        
+        As a result, the returned dict will put the first value into
+        male, second into age etc. If some argument is missing, it will
+        get None as its value. This also means that all the identifiers 
+        in the pattern will exist in the dict, they will just have the
+        value None if they were not specified by the caller.
+        
+        So if we call it with the parameters as follows:
+            ("John Smith", 18)
+        this will result in the following dict:
+            {"name": "John Smith", "age": 18, "male": None}
+        
+        Another way of calling would be:
+            ("John Smith", male=True)
+        this will result in the following dict:
+            {"name": "John Smith", "age": None, "male": True}
+        
+        @copyright: 2004 by Florian Festi,
+                    2006 by Mikko Virkkilä
+        @license: GNU GPL, see COPYING for details.
+    """
+
+    def __init__(self, pattern):
+        #parameter_re = "([^\"',]*(\"[^\"]*\"|'[^']*')?[^\"',]*)[,)]"
+        name = "(?P<%s>[a-zA-Z_][a-zA-Z0-9_]*)"
+        int_re = r"(?P<int>-?\d+)"
+        bool_re = r"(?P<bool>(([10])|([Tt]rue)|([Ff]alse)))"
+        float_re = r"(?P<float>-?\d+\.\d+([eE][+-]?\d+)?)"
+        string_re = (r"(?P<string>('([^']|(\'))*?')|" +
+                                r'("([^"]|(\"))*?"))')
+        name_re = name % "name"
+        name_param_re = name % "name_param"
+
+        param_re = r"\s*(\s*%s\s*=\s*)?(%s|%s|%s|%s|%s)\s*(,|$)" % (
+                   name_re, float_re, int_re, bool_re, string_re, name_param_re)
+        self.param_re = re.compile(param_re, re.U)
+        self._parse_pattern(pattern)
+
+    def _parse_pattern(self, pattern):
+        param_re = r"(%(?P<name>\(.*?\))?(?P<type>[ibfs]{1,3}))|\|"
+        i = 0
+        # TODO: Optionals aren't checked.
+        self.optional = []
+        named = False
+        self.param_list = []
+        self.param_dict = {}
+
+        for match in re.finditer(param_re, pattern):
+            if match.group() == "|":
+                self.optional.append(i)
+                continue
+            self.param_list.append(match.group('type'))
+            if match.group('name'):
+                named = True
+                self.param_dict[match.group('name')[1:-1]] = i
+            elif named:
+                raise ValueError, "Named parameter expected"
+            i += 1
+
+    def __str__(self):
+        return "%s, %s, optional:%s" % (self.param_list, self.param_dict,
+                                        self.optional)
+
+    def parse_parameters(self, input):
+        """
+        (4, 2)
+        """
+        #Default list to "None"s
+        parameter_list = [None] * len(self.param_list)
+        parameter_dict = {}
+        check_list = [0] * len(self.param_list)
+
+        i = 0
+        start = 0
+        named = False
+        while start < len(input):
+            match = re.match(self.param_re, input[start:])
+            if not match:
+                raise ValueError, "Misformatted value"
+            start += match.end()
+            value = None
+            if match.group("int"):
+                value = int(match.group("int"))
+                type = 'i'
+            elif match.group("bool"):
+                value = (match.group("bool") == "1") or (match.group("bool") == "True") or (match.group("bool") == "true")
+                type = 'b'
+            elif match.group("float"):
+                value = float(match.group("float"))
+                type = 'f'
+            elif match.group("string"):
+                value = match.group("string")[1:-1]
+                type = 's'
+            elif match.group("name_param"):
+                value = match.group("name_param")
+                type = 'n'
+            else:
+                value = None
+
+            parameter_list.append(value)
+            if match.group("name"):
+                if not self.param_dict.has_key(match.group("name")):
+                    raise ValueError, "Unknown parameter name '%s'" % match.group("name")
+                nr = self.param_dict[match.group("name")]
+                if check_list[nr]:
+                    #raise ValueError, "Parameter specified twice"
+                    #TODO: Something saner that raising an exception. This is pretty good, since it ignores it.
+                    pass
+                else:
+                    check_list[nr] = 1
+                parameter_dict[match.group("name")] = value
+                parameter_list[nr] = value
+                named = True
+            elif named:
+                raise ValueError, "Only named parameters allowed"
+            else:
+                nr = i
+                parameter_list[nr] = value
+
+            #Let's populate and map our dictionary to what's been found
+            for name in self.param_dict.keys():
+                tmp = self.param_dict[name]
+                parameter_dict[name]=parameter_list[tmp]
+
+            i += 1
+
+        return parameter_list, parameter_dict
+
+
+""" never used:
+    def _check_type(value, type, format):
+        if type == 'n' and 's' in format: # n as s
+            return value
+
+        if type in format:
+            return value # x -> x
+
+        if type == 'i':
+            if 'f' in format:
+                return float(value) # i -> f
+            elif 'b' in format:
+                return value # i -> b
+        elif type == 'f':
+            if 'b' in format:
+                return value  # f -> b
+        elif type == 's':
+            if 'b' in format:
+                return value.lower() != 'false' # s-> b
+
+        if 's' in format: # * -> s
+            return str(value)
+        else:
+            pass # XXX error
+
+def main():
+    pattern = "%i%sf%s%ifs%(a)s|%(b)s"
+    param = ' 4,"DI\'NG", b=retry, a="DING"'
+
+    #p_list, p_dict = parse_parameters(param)
+    
+    print 'Pattern :', pattern
+    print 'Param :', param
+
+    P = ParameterParser(pattern)
+    print P
+    print P.parse_parameters(param)
+
+
+if __name__=="__main__":
+    main()
+"""
+
+#############################################################################
+### Misc
+#############################################################################
+def taintfilename(basename):
+    """
+    Make a filename that is supposed to be a plain name secure, i.e.
+    remove any possible path components that compromise our system.
+    
+    @param basename: (possibly unsafe) filename
+    @rtype: string
+    @return: (safer) filename
+    """
+    for x in (os.pardir, ':', '/', '\\', '<', '>'):
+        basename = basename.replace(x, '_')
+
+    return basename
+
+
+def mapURL(request, url):
+    """
+    Map URLs according to 'cfg.url_mappings'.
+    
+    @param url: a URL
+    @rtype: string
+    @return: mapped URL
+    """
+    # check whether we have to map URLs
+    if request.cfg.url_mappings:
+        # check URL for the configured prefixes
+        for prefix in request.cfg.url_mappings.keys():
+            if url.startswith(prefix):
+                # substitute prefix with replacement value
+                return request.cfg.url_mappings[prefix] + url[len(prefix):]
+
+    # return unchanged url
+    return url
+
+
+def getUnicodeIndexGroup(name):
+    """
+    Return a group letter for `name`, which must be a unicode string.
+    Currently supported: Hangul Syllables (U+AC00 - U+D7AF)
+    
+    @param name: a string
+    @rtype: string
+    @return: group letter or None
+    """
+    c = name[0]
+    if u'\uAC00' <= c <= u'\uD7AF': # Hangul Syllables
+        return unichr(0xac00 + (int(ord(c) - 0xac00) / 588) * 588)
+    else:
+        return c.upper() # we put lower and upper case words into the same index group
+
+
+def isStrictWikiname(name, word_re=re.compile(ur"^(?:[%(u)s][%(l)s]+){2,}$" % {'u':config.chars_upper, 'l':config.chars_lower})):
+    """
+    Check whether this is NOT an extended name.
+    
+    @param name: the wikiname in question
+    @rtype: bool
+    @return: true if name matches the word_re
+    """
+    return word_re.match(name)
+
+
+def isPicture(url):
+    """
+    Is this a picture's url?
+    
+    @param url: the url in question
+    @rtype: bool
+    @return: true if url points to a picture
+    """
+    extpos = url.rfind(".")
+    return extpos > 0 and url[extpos:].lower() in ['.gif', '.jpg', '.jpeg', '.png', '.bmp', '.ico', ]
+
+
+def link_tag(request, params, text=None, formatter=None, on=None, **kw):
+    """ Create a link.
+
+    TODO: cleanup css_class
+
+    @param request: the request object
+    @param params: parameter string appended to the URL after the scriptname/
+    @param text: text / inner part of the <a>...</a> link - does NOT get
+                 escaped, so you can give HTML here and it will be used verbatim
+    @param formatter: the formatter object to use
+    @param on: opening/closing tag only
+    @keyword attrs: additional attrs (HTMLified string) (removed in 1.5.3)
+    @rtype: string
+    @return: formatted link tag
+    """
+    if formatter is None:
+        formatter = request.html_formatter
+    if kw.has_key('css_class'):
+        css_class = kw['css_class']
+        del kw['css_class'] # one time is enough
+    else:
+        css_class = None
+    id = kw.get('id', None)
+    name = kw.get('name', None)
+    if text is None:
+        text = params # default
+    if formatter:
+        url = "%s/%s" % (request.getScriptname(), params)
+        # formatter.url will escape the url part
+        if on is not None:
+            tag = formatter.url(on, url, css_class, **kw)
+        else:
+            tag = (formatter.url(1, url, css_class, **kw) +
+                formatter.rawHTML(text) +
+                formatter.url(0))
+    else: # this shouldn't be used any more:
+        if on is not None and not on:
+            tag = '</a>'
+        else:
+            attrs = ''
+            if css_class:
+                attrs += ' class="%s"' % css_class
+            if id:
+                attrs += ' id="%s"' % id
+            if name:
+                attrs += ' name="%s"' % name
+            tag = '<a%s href="%s/%s">' % (attrs, request.getScriptname(), params)
+            if not on:
+                tag = "%s%s</a>" % (tag, text)
+        request.log("Warning: wikiutil.link_tag called without formatter and without request.html_formatter. tag=%r" % (tag, ))
+    return tag
+
+def containsConflictMarker(text):
+    """ Returns true if there is a conflict marker in the text. """
+    return "/!\\ '''Edit conflict" in text
+
+def pagediff(request, pagename1, rev1, pagename2, rev2, **kw):
+    """
+    Calculate the "diff" between two page contents.
+
+    @param pagename1: name of first page
+    @param rev1: revision of first page
+    @param pagename2: name of second page
+    @param rev2: revision of second page
+    @keyword ignorews: if 1: ignore pure-whitespace changes.
+    @rtype: list
+    @return: lines of diff output
+    """
+    from MoinMoin.Page import Page
+    from MoinMoin.util import diff_text
+    lines1 = Page(request, pagename1, rev=rev1).getlines()
+    lines2 = Page(request, pagename2, rev=rev2).getlines()
+
+    lines = diff_text.diff(lines1, lines2, **kw)
+    return lines
+
+
+########################################################################
+### Tickets - used by RenamePage and DeletePage
+########################################################################
+
+def createTicket(request, tm=None):
+    """Create a ticket using a site-specific secret (the config)"""
+    import sha
+    ticket = tm or "%010x" % time.time()
+    digest = sha.new()
+    digest.update(ticket)
+
+    varnames = ['data_dir', 'data_underlay_dir', 'language_default',
+                'mail_smarthost', 'mail_from', 'page_front_page',
+                'theme_default', 'sitename', 'logo_string',
+                'interwikiname', 'user_homewiki', 'acl_rights_before', ]
+    for varname in varnames:
+        var = getattr(request.cfg, varname, None)
+        if isinstance(var, (str, unicode)):
+            digest.update(repr(var))
+
+    return "%s.%s" % (ticket, digest.hexdigest())
+
+
+def checkTicket(request, ticket):
+    """Check validity of a previously created ticket"""
+    try:
+        timestamp_str = ticket.split('.')[0]
+        timestamp = int(timestamp_str, 16)
+    except ValueError:
+        # invalid or empty ticket
+        return False
+    now = time.time()
+    if timestamp < now - 10*3600:
+        # we don't accept tickets older than 10h
+        return False
+    ourticket = createTicket(request, timestamp_str)
+    return ticket == ourticket
+
+
+def renderText(request, Parser, text, line_anchors=False):
+    """executes raw wiki markup with all page elements"""
+    import StringIO
+    out = StringIO.StringIO()
+    request.redirect(out)
+    wikiizer = Parser(text, request)
+    wikiizer.format(request.formatter,inhibit_p=True)
+    result = out.getvalue()
+    request.redirect()
+    del out
+    return result
+
+
+def getProcessingInstructions(text):
+    """creates dict of processing instructions from raw wiki markup"""
+    kw = {}
+    for line in text.split('\n'):
+        if line.startswith('#'):
+            for pi in ("format", "refresh", "redirect", "deprecated", "pragma", "form", "acl", "language"):
+                if line[1:].lower().startswith(pi):
+                    kw[pi] = line[len(pi)+1:].strip()
+                    break
+    return kw
+
+
+def getParser(request, text):
+    """gets the parser from raw wiki murkup"""
+    # check for XML content
+    if text and text[:5] == '<?xml':
+        pi_format = "xslt"
+    else:
+        # check processing instructions
+        pi = getProcessingInstructions(text)
+        pi_format = pi.get("format", request.cfg.default_markup or "wiki").lower()
+
+    Parser = searchAndImportPlugin(request.cfg, "parser", pi_format)
+    return Parser
+
--- a/MoinMoin/support/difflib.py	Sun Aug 03 22:57:44 2008 +0200
+++ b/MoinMoin/support/difflib.py	Sun Aug 03 22:58:40 2008 +0200
@@ -1,8 +1,9 @@
 #! /usr/bin/env python
 # Python 2.4.3 (maybe other versions, too) has a broken difflib, sometimes
 # raising a "maximum recursion depth exceeded in cmp" exception.
-# This is taken from python.org SVN repo revision 46940 with patches
+# This is taken from python.org SVN repo revision 54230 with patches
 # 36160 and 34415 reversed for python2.3 compatibility.
+# Also, startswith(tuple) [2.5] was changed to multiple startswith(elem).
 
 """
 Module difflib -- helpers for computing deltas between objects.
@@ -1317,7 +1318,7 @@
 
 def _mdiff(fromlines, tolines, context=None, linejunk=None,
            charjunk=IS_CHARACTER_JUNK):
-    """Returns generator yielding marked up from/to side by side differences.
+    r"""Returns generator yielding marked up from/to side by side differences.
 
     Arguments:
     fromlines -- list of text lines to compared to tolines
@@ -1454,7 +1455,9 @@
                 num_blanks_pending -= 1
                 yield _make_line(lines,'-',0), None, True
                 continue
-            elif s.startswith(('--?+', '--+', '- ')):
+            elif s.startswith('--?+') or \
+                 s.startswith('--+') or \
+                 s.startswith('- '):
                 # in delete block and see a intraline change or unchanged line
                 # coming: yield the delete line and then blanks
                 from_line,to_line = _make_line(lines,'-',0), None
@@ -1478,7 +1481,8 @@
                 num_blanks_pending += 1
                 yield None, _make_line(lines,'+',1), True
                 continue
-            elif s.startswith(('+ ', '+-')):
+            elif s.startswith('+ ') or \
+                 s.startswith('+-'):
                 # will be leaving an add block: yield blanks then add line
                 from_line, to_line = None, _make_line(lines,'+',1)
                 num_blanks_to_yield,num_blanks_pending = num_blanks_pending+1,0
@@ -1951,8 +1955,7 @@
         fromlist,tolist,flaglist,next_href,next_id = self._convert_flags(
             fromlist,tolist,flaglist,context,numlines)
 
-        import cStringIO
-        s = cStringIO.StringIO()
+        s = []
         fmt = '            <tr><td class="diff_next"%s>%s</td>%s' + \
               '<td class="diff_next">%s</td>%s</tr>\n'
         for i in range(len(flaglist)):
@@ -1960,9 +1963,9 @@
                 # mdiff yields None on separator lines skip the bogus ones
                 # generated for the first line
                 if i > 0:
-                    s.write('        </tbody>        \n        <tbody>\n')
+                    s.append('        </tbody>        \n        <tbody>\n')
             else:
-                s.write( fmt % (next_id[i],next_href[i],fromlist[i],
+                s.append( fmt % (next_id[i],next_href[i],fromlist[i],
                                            next_href[i],tolist[i]))
         if fromdesc or todesc:
             header_row = '<thead><tr>%s%s%s%s</tr></thead>' % (
@@ -1974,7 +1977,7 @@
             header_row = ''
 
         table = self._table_template % dict(
-            data_rows=s.getvalue(),
+            data_rows=''.join(s),
             header_row=header_row,
             prefix=self._prefix[1])
 
--- a/wiki/config/wikiconfig.py	Sun Aug 03 22:57:44 2008 +0200
+++ b/wiki/config/wikiconfig.py	Sun Aug 03 22:58:40 2008 +0200
@@ -153,7 +153,7 @@
     # e.g. CategoryFoo -> group 'all' ==  CategoryFoo, group 'key' == Foo
     # moin's code will add ^ / $ at beginning / end when needed
     # You must use Unicode strings here [Unicode]
-    page_category_regex = ur'(?P<all>Category(?P<key>\S+))'
+    page_category_regex = ur'(?P<all>Category(?P<key>(?!Template)\S+))'
     page_dict_regex = ur'(?P<all>(?P<key>\S+)Dict)'
     page_group_regex = ur'(?P<all>(?P<key>\S+)Group)'
     page_template_regex = ur'(?P<all>(?P<key>\S+)Template)'