changeset 657:016a8a3ef354

wikiutil.MimeType class, renamed parsers to mimetype like module names
author Thomas Waldmann <tw AT waldmann-edv DOT de>
date Tue, 16 May 2006 20:12:29 +0200
parents ad2ac49d17fe
children e9eff264e2b9
files MoinMoin/Page.py MoinMoin/PageGraphicalEditor.py MoinMoin/_tests/test_formatter.py MoinMoin/_tests/test_pysupport.py MoinMoin/action/MyPages.py MoinMoin/i18n/__init__.py MoinMoin/macro/FootNote.py MoinMoin/macro/__init__.py MoinMoin/parser/CSV.py MoinMoin/parser/cplusplus.py MoinMoin/parser/docbook.py MoinMoin/parser/html.py MoinMoin/parser/irc.py MoinMoin/parser/java.py MoinMoin/parser/pascal.py MoinMoin/parser/plain.py MoinMoin/parser/python.py MoinMoin/parser/rst.py MoinMoin/parser/text.py MoinMoin/parser/text_cplusplus.py MoinMoin/parser/text_csv.py MoinMoin/parser/text_docbook.py MoinMoin/parser/text_html.py MoinMoin/parser/text_irssi.py MoinMoin/parser/text_java.py MoinMoin/parser/text_moin_wiki.py MoinMoin/parser/text_pascal.py MoinMoin/parser/text_python.py MoinMoin/parser/text_rst.py MoinMoin/parser/text_xslt.py MoinMoin/parser/wiki.py MoinMoin/parser/xslt.py MoinMoin/wikiutil.py docs/CHANGES
diffstat 34 files changed, 2711 insertions(+), 2577 deletions(-) [+]
line wrap: on
line diff
--- a/MoinMoin/Page.py	Tue May 16 13:10:13 2006 +0200
+++ b/MoinMoin/Page.py	Tue May 16 20:12:29 2006 +0200
@@ -1221,15 +1221,17 @@
                     ) % {'pagename': self.formatter.text(self.page_name)})
                     request.write(''.join(pi_formtext))
 
-        # Load the parser, or default to plain text parser that will
-        # just show the page raw source.
-        # TODO: do we need this magic? any effect on debugging?
-        try:
-            Parser = wikiutil.importPlugin(self.request.cfg, "parser", 
-                                           self.pi_format, "Parser")
-        except wikiutil.PluginMissingError:
-            from MoinMoin.parser.plain import Parser
-
+        # Load the parser
+        mt = wikiutil.MimeType(self.pi_format)
+        for module_name in mt.module_name():
+            try:
+                Parser = wikiutil.importPlugin(self.request.cfg, "parser", module_name, "Parser")
+                break
+            except wikiutil.PluginMissingError:
+                pass
+        else:
+            raise "No matching parser" # XXX what do we use if nothing at all matches?
+            
         # start wiki content div
         request.write(self.formatter.startContent(content_id))
 
@@ -1312,11 +1314,15 @@
             self.getFormatterName() in self.cfg.caching_formats):
             # Everything is fine, now check the parser:
             if parser is None:
-                try:
-                    parser = wikiutil.importPlugin(self.request.cfg, "parser",
-                                                   self.pi_format, "Parser")
-                except wikiutil.PluginMissingError:
-                    pass
+                mt = wikiutil.MimeType(self.pi_format)
+                for module_name in mt.module_name():
+                    try:
+                        parser = wikiutil.importPlugin(self.request.cfg, "parser", module_name, "Parser")
+                        break
+                    except wikiutil.PluginMissingError:
+                        pass
+                else:
+                    raise "no matching parser" # XXX what now?
             return getattr(parser, 'caching', False)
         return False
 
--- a/MoinMoin/PageGraphicalEditor.py	Tue May 16 13:10:13 2006 +0200
+++ b/MoinMoin/PageGraphicalEditor.py	Tue May 16 20:12:29 2006 +0200
@@ -17,7 +17,7 @@
 from MoinMoin.util import filesys
 import MoinMoin.util.web
 import MoinMoin.util.mail
-from MoinMoin.parser.wiki import Parser
+from MoinMoin.parser.text_moin_wiki import Parser
 
 from StringIO import StringIO
 
--- a/MoinMoin/_tests/test_formatter.py	Tue May 16 13:10:13 2006 +0200
+++ b/MoinMoin/_tests/test_formatter.py	Tue May 16 20:12:29 2006 +0200
@@ -11,7 +11,6 @@
 from StringIO import StringIO
 from MoinMoin._tests import TestConfig
 from MoinMoin.Page import Page
-from MoinMoin.parser.wiki import Parser
 from MoinMoin import wikiutil
 
 
--- a/MoinMoin/_tests/test_pysupport.py	Tue May 16 13:10:13 2006 +0200
+++ b/MoinMoin/_tests/test_pysupport.py	Tue May 16 20:12:29 2006 +0200
@@ -26,13 +26,13 @@
     def testNonExistingAttribute(self):
         """ pysupport: import nonexistent attritbue raises AttributeError """
         self.assertRaises(AttributeError, pysupport.importName,
-                          'MoinMoin.parser.wiki','NoSuchParser')
+                          'MoinMoin.parser.text_moin_wiki','NoSuchParser')
 
     def testExisting(self):
         """ pysupport: import name from existing module """
-        from MoinMoin.parser import wiki
-        Parser = pysupport.importName('MoinMoin.parser.wiki', 'Parser')
-        self.failUnless(Parser is wiki.Parser)
+        from MoinMoin.parser import text_moin_wiki
+        Parser = pysupport.importName('MoinMoin.parser.text_moin_wiki', 'Parser')
+        self.failUnless(Parser is text_moin_wiki.Parser)
    
 
 class ImportNameFromPlugin(unittest.TestCase):
@@ -42,11 +42,9 @@
     
     def setUp(self):
         """ Check for valid plugin package """
-        self.pluginDirectory = os.path.join(self.request.cfg.data_dir,
-                                            'plugin', 'parser')
+        self.pluginDirectory = os.path.join(self.request.cfg.data_dir, 'plugin', 'parser')
         self.checkPackage(self.pluginDirectory)
-        self.pluginModule = (self.request.cfg.siteid + '.plugin.parser.' +
-                             self.plugin)
+        self.pluginModule = (self.request.cfg.siteid + '.plugin.parser.' + self.plugin)
 
     def checkPackage(self, path):
         for item in (path, os.path.join(path, '__init__.py')):
@@ -124,5 +122,4 @@
             except OSError, err:
                 if err.errno != errno.ENOENT:
                     raise
-    
-            
\ No newline at end of file
+
--- a/MoinMoin/action/MyPages.py	Tue May 16 13:10:13 2006 +0200
+++ b/MoinMoin/action/MyPages.py	Tue May 16 20:12:29 2006 +0200
@@ -57,7 +57,7 @@
     pagecontent = pagecontent.replace('\n', '\r\n')
 
     from MoinMoin.Page import Page
-    from MoinMoin.parser.wiki import Parser
+    from MoinMoin.parser.text_moin_wiki import Parser as WikiParser
     request.http_headers()
     
     # This action generate data using the user language
@@ -67,7 +67,7 @@
     # Start content - IMPORTANT - without content div, there is no direction support!
     request.write(request.formatter.startContent("content"))
 
-    parser = Parser(pagecontent, request)
+    parser = WikiParser(pagecontent, request)
     p = Page(request, "$$$")
     request.formatter.setPage(p)
     parser.format(request.formatter)
--- a/MoinMoin/i18n/__init__.py	Tue May 16 13:10:13 2006 +0200
+++ b/MoinMoin/i18n/__init__.py	Tue May 16 20:12:29 2006 +0200
@@ -74,14 +74,14 @@
         pass
     currentStack.append(text)
 
-    from MoinMoin.parser.wiki import Parser
+    from MoinMoin.parser.text_moin_wiki import Parser as WikiParser
     from MoinMoin.formatter.text_html import Formatter
     import StringIO
 
     origtext = text
     out = StringIO.StringIO()
     request.redirect(out)
-    parser = Parser(text, request, line_anchors=False)
+    parser = WikiParser(text, request, line_anchors=False)
     formatter = Formatter(request, terse=True)
     reqformatter = None
     if hasattr(request, 'formatter'):
--- a/MoinMoin/macro/FootNote.py	Tue May 16 13:10:13 2006 +0200
+++ b/MoinMoin/macro/FootNote.py	Tue May 16 20:12:29 2006 +0200
@@ -11,7 +11,7 @@
 
 import sha, StringIO
 from MoinMoin import config
-from MoinMoin.parser import wiki
+from MoinMoin.parser.text_moin_wiki import Parser as WikiParser
 
 Dependencies = ["time"] # footnote macro cannot be cached
 
@@ -61,7 +61,7 @@
                         
             out=StringIO.StringIO()
             request.redirect(out)
-            parser=wiki.Parser(request.footnotes[idx][0], request,
+            parser = WikiParser(request.footnotes[idx][0], request,
                                line_anchors=False)
             parser.format(formatter)
             result.append(out.getvalue())
--- a/MoinMoin/macro/__init__.py	Tue May 16 13:10:13 2006 +0200
+++ b/MoinMoin/macro/__init__.py	Tue May 16 20:12:29 2006 +0200
@@ -2,8 +2,8 @@
 """
     MoinMoin - Macro Implementation
 
-    These macros are used by the parser/wiki.py module
-    to implement complex and/or dynamic page content.
+    These macros are used by the wiki parser module to implement complex
+    and/or dynamic page content.
 
     The canonical interface to plugin macros is their execute() function,
     which gets passed an instance of the Macro class. Such an instance
--- a/MoinMoin/parser/CSV.py	Tue May 16 13:10:13 2006 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,74 +0,0 @@
-# -*- coding: iso-8859-1 -*-
-"""
-    MoinMoin - Parser for CSV data
-
-    This parser lacks to flexibility to read arbitary csv dialects.
-
-    Perhaps this should be rewritten using another CSV lib
-    because the standard module csv does not support unicode.
-
-    @copyright: 2004 by Oliver Graf <ograf@bitart.de>, Alexander Schremmer
-    @license: GNU GPL, see COPYING for details.
-"""
-
-Dependencies = []
-
-class Parser:
-    """ Format CSV data as table
-    """
-
-    extensions = ['.csv']
-    Dependencies = []
-
-    def __init__(self, raw, request, **kw):
-        """ Store the source text.
-        """
-        self.raw = raw
-        self.request = request
-        self.form = request.form
-        self._ = request.getText
-
-        # parse extra arguments for excludes
-        self.exclude = []
-        self.separator = ';'
-        for arg in kw.get('format_args','').split():
-            if arg[0] == '-':
-                try:
-                    idx = int(arg[1:])
-                except ValueError:
-                    pass
-                else:
-                    self.exclude.append(idx-1)
-            else:
-                self.separator = arg
-
-    def format(self, formatter):
-        """ Parse and send the table.
-        """
-        lines = self.raw.split('\n')
-        if lines[0]:
-            # expect column headers in first line
-            first = 1
-        else:
-            # empty first line, no bold headers
-            first = 0
-            del lines[0]
-
-        self.request.write(formatter.table(1))
-        for line in lines:
-            self.request.write(formatter.table_row(1))
-            cells = line.split(self.separator)
-            for idx in range(len(cells)):
-                if idx in self.exclude:
-                    continue
-                self.request.write(formatter.table_cell(1))
-                if first:
-                    self.request.write(formatter.strong(1))
-                self.request.write(formatter.text(cells[idx]))
-                if first:
-                    self.request.write(formatter.strong(0))
-                self.request.write(formatter.table_cell(0))
-            self.request.write(formatter.table_row(0))
-            first = 0
-        self.request.write(formatter.table(0))
-
--- a/MoinMoin/parser/cplusplus.py	Tue May 16 13:10:13 2006 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,70 +0,0 @@
-# -*- coding: iso-8859-1 -*-
-"""
-	MoinMoin - C++ Source Parser
-
-    @copyright: 2002 by Taesu Pyo <bigflood@hitel.net>
-    @license: GNU GPL, see COPYING for details.
-
-css:
-
-pre.cpparea     { font-style: sans-serif; color: #000000; }
-
-pre.cpparea span.ID       { color: #000000; }
-pre.cpparea span.Char     { color: #004080; }
-pre.cpparea span.Comment  { color: #808080; }
-pre.cpparea span.Number   { color: #008080; font-weight: bold; }
-pre.cpparea span.String   { color: #004080; }
-pre.cpparea span.SPChar   { color: #0000C0; }
-pre.cpparea span.ResWord  { color: #4040ff; font-weight: bold; }
-pre.cpparea span.ConsWord { color: #008080; font-weight: bold; }
-pre.cpparea span.ResWord2 { color: #0080ff; font-weight: bold; }
-pre.cpparea span.Special  { color: #0000ff; }
-pre.cpparea span.Preprc   { color: #804000; }
-
-"""
-
-from MoinMoin.util.ParserBase import ParserBase
-
-Dependencies = []
-
-class Parser(ParserBase):
-
-    parsername = "ColorizedCPlusPlus"
-    extensions = ['.c', '.h', '.cpp', '.c++']
-    Dependencies = []
-    
-    def setupRules(self):
-        ParserBase.setupRules(self)
-
-        self.addRulePair("Comment","/[*]","[*]/")
-        self.addRule("Comment","//.*$")
-        self.addRulePair("String",'L?"',r'$|[^\\](\\\\)*"')
-        self.addRule("Char",r"'\\.'|'[^\\]'")
-        self.addRule("Number",r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|0[xX][0-9a-fA-F]+[Ll]?")
-        self.addRule("Preprc",r"^\s*#(.*\\\n)*(.*(?!\\))$")
-        self.addRule("ID","[a-zA-Z_][0-9a-zA-Z_]*")
-        self.addRule("SPChar",r"[~!%^&*()+=|\[\]:;,.<>/?{}-]")
-
-        reserved_words = ['struct','class','union','enum',
-        'int','float','double','signed','unsigned','char','short','void','bool',
-        'long','register','auto','operator',
-        'static','const','private','public','protected','virtual','explicit',
-        'new','delete','this',
-        'if','else','while','for','do','switch','case','default','sizeof',
-        'dynamic_cast','static_cast','const_cast','reinterpret_cast','typeid',
-        'try','catch','throw','throws','return','continue','break','goto']
-
-        reserved_words2 = ['extern', 'volatile', 'typedef', 'friend',
-                           '__declspec', 'inline','__asm','thread','naked',
-                           'dllimport','dllexport','namespace','using',
-                           'template','typename','goto']
-
-        special_words = ['std','string','vector','map','set','cout','cin','cerr']
-        constant_words = ['true','false','NULL']
-
-        self.addReserved(reserved_words)
-        self.addConstant(constant_words)
-
-        self.addWords(reserved_words2,'ResWord2')
-        self.addWords(special_words,'Special')
-
--- a/MoinMoin/parser/docbook.py	Tue May 16 13:10:13 2006 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,197 +0,0 @@
-# -*- coding: iso-8859-1 -*-
-"""
-    MoinMoin - DocBook-XML Parser
-
-    This code was tested with 4Suite 1.0a4 and 1.0b1
-
-    @copyright: 2005 by Henry Ho <henryho167 AT hotmail DOT com>
-    @copyright: 2005 by MoinMoin:AlexanderSchremmer
-    @license: GNU GPL, see COPYING for details.
-
-    DOCBOOK Parser:
-
-    Features:
-    - image support through Attachment
-    - internal Wikilinks if a word is a strict wikiname
-    - image alt is perserved
-    - works with compiled xslt stylesheet for optimized performance
-
-    Configuration:
-    - make sure you have installed the DocBook XSLT files
-    - set the path to the html directory of the DocBook XSLT files in your
-      wiki or farm configuration:
-      docbook_html_dir = r"/usr/share/xml/docbook/stylesheet/nwalsh/html/"
-      Note that this directory needs to be writable because a cache file will
-      be created there.
-
-    >How can I use Ft API for DTD validation?
-    If you have PyXMl installed, you can use ValidatingReader rather than
-    NonvalidatingReader.  See:
-    http://uche.ogbuji.net/tech/akara/nodes/2003-01-01/domlettes
-"""
-
-import StringIO
-import os.path
-import cPickle
-import re
-
-from MoinMoin import caching, config, wikiutil, Page
-from MoinMoin.parser.xslt import Parser as XsltParser
-from MoinMoin.parser.wiki import Parser as WikiParser
-
-Dependencies = []
-
-class Parser(XsltParser):
-    """
-        Send XML file formatted via XSLT.
-    """
-
-    caching = 1
-    Dependencies = Dependencies
-
-    def __init__(self, raw, request, **kw):
-        XsltParser.__init__(self, raw, request)
-
-        # relative path to docbook.xsl and compiled_xsl
-        docbook_html_directory = request.cfg.docbook_html_dir
-        self.db_xsl = os.path.join(docbook_html_directory, 'docbook.xsl')
-        self.db_compiled_xsl = os.path.join(docbook_html_directory, 'db_compiled.dat')
-
-        self.wikiParser = WikiParser(raw = self.raw, request = self.request, pretty_url=1)
-        self.key = 'docbook'
-
-    def format(self, formatter):
-        self.wikiParser.formatter = formatter
-        XsltParser.format(self, formatter)
-
-    def append_stylesheet(self):
-        """"
-            virtual function, for docbook parser
-        """
-        abs_db_xsl = os.path.abspath(self.db_xsl)
-        abs_db_compiled_xsl = os.path.abspath(self.db_compiled_xsl)
-
-        # same as path.exists, but also test if it is a file
-        if not os.path.isfile(abs_db_compiled_xsl):
-            _compile_xsl(abs_db_xsl, abs_db_compiled_xsl)
-
-        assert os.path.isfile(abs_db_compiled_xsl)
-
-        self.processor.appendStylesheetInstance(cPickle.load(file(abs_db_compiled_xsl, 'rb')))
-
-    def parse_result(self, result):
-        """
-        additional parsing to the resulting XSLT'ed result (resultString) before saving
-
-        will do:
-            BASIC CLEAN UP   : remove unnecessary HTML tags
-            RESOLVE IMG SRC  : fix src to find attachment
-            RESOLVE WikiNames: if a word is a valid wikiname & a valid wikipage,
-                               replace word with hyperlink
-        """
-
-        # BASIC CLEAN UP
-        # remove from beginning until end of body tag
-        found = re.search('<body.*?>', result)
-        if found:
-            result = result[found.end():]
-
-        # remove everything after & including </body>
-        found = result.rfind('</body>')
-        if found != -1:
-            result = result[:found]
-
-        # RESOLVE IMG SRC
-        found = re.finditer('<img.*?>', result)
-        if found:
-            splitResult = _splitResult(found, result)
-            for index in range(len(splitResult)):
-                if splitResult[index].startswith('<img'):
-                    found = re.search('src="(?P<source>.*?)"', splitResult[index])
-                    imageSrc = found.group('source')
-                    imageAlt = None # save alt
-                    found = re.search('alt="(?P<alt>.*?)"', splitResult[index])
-                    if found:
-                        imageAlt = found.group('alt')
-                    splitResult[index] = self.wikiParser.attachment( ('attachment:' + imageSrc, "") )
-                    if imageAlt: # restore alt
-                        splitResult[index] = re.sub('alt=".*?"', 'alt="%s"' % imageAlt, splitResult[index])
-
-            result = ''.join(splitResult)
-
-
-        # RESOLVE WikiNames
-        #    if a word is a valid wikiname & a valid wikipage,
-        #    replace word with hyperlink
-
-        found = re.finditer(self.wikiParser.word_rule, result)
-        if found:
-            splitResult = _splitResult(found, result)
-
-            for index in range(len(splitResult)):
-                if (re.match(self.wikiParser.word_rule, splitResult[index])
-                    and Page.Page(self.request, splitResult[index]).exists()):
-                    splitResult[index] = self.wikiParser._word_repl(splitResult[index])
-            result = ''.join(splitResult)
-
-        # remove stuff that fail HTML 4.01 Strict verification
-
-        # remove unsupported attributes
-        result = re.sub(' target=".*?"| type=".*?"', '', result)
-        result = re.sub('<hr .*?>', '<hr>', result)
-
-        # remove <p>...</p> inside <a>...</a> or <caption>...</caption>
-        found = re.finditer('<a href=".*?</a>|<caption>.*?</caption>', result) # XXX re.DOTALL)
-        if found:
-            splitResult = _splitResult(found, result)
-            for index in range(len(splitResult)):
-                if (splitResult[index].startswith('<a href="')
-                    or splitResult[index].startswith('<caption>')):
-                    splitResult[index] = splitResult[index].replace('<p>', '').replace('</p>', '')
-            result = ''.join(splitResult)
-
-        return result
-
-
-
-def _compile_xsl(XSLT_FILE, XSLT_COMPILED_FILE):
-    """
-        compiling docbook stylesheet
-
-        reference: http://155.210.85.193:8010/ccia/nodes/2005-03-18/compileXslt?xslt=/akara/akara.xslt
-    """
-    from Ft.Xml.Xslt.Processor import Processor
-    from Ft.Xml.Xslt import Stylesheet
-    from Ft.Xml import InputSource
-    from Ft.Lib import Uri
-
-    # New docbook processor
-    db_processor=Processor()
-
-    # Docbook Stylesheet
-    my_sheet_uri = Uri.OsPathToUri(XSLT_FILE, 1)
-    sty_isrc = InputSource.DefaultFactory.fromUri(my_sheet_uri)
-
-    # Append Stylesheet
-    db_processor.appendStylesheet(sty_isrc)
-
-    # Pickled stylesheet will be self.abs_db_compiled_xsl file
-    db_root = db_processor.stylesheet.root
-    fw = file(XSLT_COMPILED_FILE, 'wb')
-    cPickle.dump(db_root, fw) # , protocol=2)
-    fw.close()
-
-
-def _splitResult(iterator, result):
-    startpos = 0
-    splitResult = []
-
-    for f in iterator:
-        start, end = f.span()
-        splitResult.append(result[startpos:start])
-        splitResult.append(result[start:end])
-        startpos = end
-    splitResult.append(result[startpos:])
-
-    return splitResult
-
--- a/MoinMoin/parser/html.py	Tue May 16 13:10:13 2006 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,34 +0,0 @@
-# -*- coding: iso-8859-1 -*-
-"""
-    MoinMoin - HTML Parser
-
-    @copyright: 2006 by MoinMoin:AlexanderSchremmer
-    @license: GNU GPL, see COPYING for details.
-"""
-
-from MoinMoin.support.htmlmarkup import Markup
-from HTMLParser import HTMLParseError
-
-Dependencies = []
-
-class Parser:
-    """
-        Sends HTML code after filtering it.
-    """
-
-    extensions = ['.htm', '.html']
-    Dependencies = Dependencies
-    
-    def __init__(self, raw, request, **kw):
-        self.raw = raw
-        self.request = request
-
-    def format(self, formatter):
-        """ Send the text. """
-        try:
-            self.request.write(formatter.rawHTML(Markup(self.raw).sanitize()))
-        except HTMLParseError, e:
-            self.request.write(formatter.sysmsg(1) + 
-                formatter.text(u'HTML parsing error: %s in "%s"' % (e.msg,
-                                  self.raw.splitlines()[e.lineno - 1].strip())) +
-                formatter.sysmsg(0))
--- a/MoinMoin/parser/irc.py	Tue May 16 13:10:13 2006 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,50 +0,0 @@
-# -*- coding: iso-8859-1 -*-
-"""
-    MoinMoin - IRC Log Parser (irssi style logs)
-
-    @copyright: 2004 by Thomas Waldmann
-    @license: GNU GPL, see COPYING for details.
-"""
-
-import re
-from MoinMoin import wikiutil
-
-Dependencies = []
-
-class Parser:
-    """
-        Send IRC logs in a table
-    """
-    extensions = ['.irc']
-    Dependencies = []
-
-    def __init__(self, raw, request, **kw):
-        self.raw = raw
-        self.request = request
-        self.form = request.form
-        self._ = request.getText
-        self.out = kw.get('out', request)
-
-    def format(self, formatter):
-        lines = self.raw.split('\n')
-        # TODO: Add support for displaying things like join and part messages.
-        pattern = re.compile(r"""
-            ((\[|\()?                      # Opening bracket for the timestamp (if it exists)
-                (?P<time>([\d]?\d[:.]?)+)  # Timestamp as one or more :/.-separated groups of 1 or 2 digits (if it exists)
-            (\]|\))?\s+)?                  # Closing bracket for the timestamp (if it exists) plus whitespace
-            <\s*?(?P<nick>.*?)\s*?>        # Nick
-            \s+                            # Space between the nick and message
-            (?P<msg>.*)                    # Message
-        """, re.VERBOSE + re.UNICODE)
-        self.out.write(formatter.table(1))
-        for line in lines:
-            match = pattern.match(line)
-            if match:
-                self.out.write(formatter.table_row(1))
-                for g in ('time', 'nick', 'msg'):
-                    self.out.write(formatter.table_cell(1))
-                    self.out.write(formatter.text(match.group(g) or ''))
-                    self.out.write(formatter.table_cell(0))
-                self.out.write(formatter.table_row(0))
-        self.out.write(formatter.table(0))
-
--- a/MoinMoin/parser/java.py	Tue May 16 13:10:13 2006 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,42 +0,0 @@
-# -*- coding: iso-8859-1 -*-
-"""
-	MoinMoin - Java Source Parser
-
-    @copyright: 2002 by Taesu Pyo <bigflood@hitel.net>
-    @license: GNU GPL, see COPYING for details.
-
-"""
-
-from MoinMoin.util.ParserBase import ParserBase
-
-Dependencies = []
-
-class Parser(ParserBase):
-
-    parsername = "ColorizedJava"
-    extensions = ['.java']
-    Dependencies = []
-
-    def setupRules(self):
-        ParserBase.setupRules(self)
-
-        self.addRulePair("Comment","/[*]","[*]/")
-        self.addRule("Comment","//.*$")
-        self.addRulePair("String",'"',r'$|[^\\](\\\\)*"')
-        self.addRule("Char",r"'\\.'|'[^\\]'")
-        self.addRule("Number",r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|0[xX][0-9a-fA-F]+[Ll]?")
-        self.addRule("ID","[a-zA-Z_][0-9a-zA-Z_]*")
-        self.addRule("SPChar",r"[~!%^&*()+=|\[\]:;,.<>/?{}-]")
-
-        reserved_words = ['class','interface','enum','import','package',
-        'byte','int','long','float','double','char','short','void','boolean',
-        'static','final','const','private','public','protected',
-        'new','this','super','abstract','native','synchronized','transient','volatile','strictfp',
-        'extends','implements','if','else','while','for','do','switch','case','default','instanceof',
-        'try','catch','finally','throw','throws','return','continue','break']
-
-        self.addReserved(reserved_words)
-
-        constant_words = ['true','false','null']
-
-        self.addConstant(constant_words)
--- a/MoinMoin/parser/pascal.py	Tue May 16 13:10:13 2006 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,50 +0,0 @@
-# -*- coding: iso-8859-1 -*-
-"""
-	MoinMoin - Pascal Source Parser
-
-    @copyright: 2004-2005 by Johannes Berg <johannes@sipsolutions.net>
-    @license: GNU GPL, see COPYING for details.
-"""
-
-from MoinMoin.util.ParserBase import ParserBase
-
-Dependencies = []
-
-class Parser(ParserBase):
-
-    parsername = 'ColorizedPascal'
-    extensions = ['.pas']
-    Dependencies = []
-
-    def __init__(self, raw, request, **kw):
-        ParserBase.__init__(self,raw,request,**kw)
-        self._ignore_case = 1
-
-    def setupRules(self):
-        ParserBase.setupRules(self)
-        
-        self.addRulePair("Comment","\(\*","\*\)")
-        self.addRulePair("Comment","\{","\}")
-        self.addRule("Comment","//.*$")
-        self.addRulePair("String",'\'','\'')
-        self.addRule("Char",r"'\\.'|#[a-f0-9][a-f0-9]")
-        self.addRule("Number",r"[0-9](\.[0-9]*)?(eE[+-][0-9])?|\$[0-9a-fA-F]+")
-        self.addRule("ID","[a-zA-Z_][0-9a-zA-Z_]*")
-        self.addRule("SPChar",r"[~!%^&*()+=|\[\]:;,.<>/?{}-]")
-        
-        reserved_words = ['class','interface','set','uses','unit',
-                          'byte','integer','longint','float','double',
-                          'extended','char','shortint','boolean',
-                          'var','const','private','public','protected',
-                          'new','this','super','abstract','native',
-                          'synchronized','transient','volatile','strictfp',
-                          'if','else','while','for','do','case','default',
-                          'try','except','finally','raise','continue','break',
-                          'begin','end','type','class','implementation',
-                          'procedure','function','constructor','destructor']
-        
-        self.addReserved(reserved_words)
-        
-        constant_words = ['true','false','nil']
-        
-        self.addConstant(constant_words)
--- a/MoinMoin/parser/plain.py	Tue May 16 13:10:13 2006 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,35 +0,0 @@
-# -*- coding: iso-8859-1 -*-
-"""
-    MoinMoin - Plain Text Parser
-
-    @copyright: 2000, 2001, 2002 by Jürgen Hermann <jh@web.de>
-    @license: GNU GPL, see COPYING for details.
-"""
-
-Dependencies = []
-
-class Parser:
-    """
-        Send plain text in a HTML <pre> element.
-    """
-
-    ## specify extensions willing to handle (for inline:)
-    ## should be a list of extensions including the leading dot
-    ## TODO: remove the leading dot from the extension. This is stupid.
-    #extensions = ['.txt']
-    ## use '*' instead of the list(!) to specify a default parser
-    ## which is used as fallback
-    extensions = '*'
-    Dependencies = []
-    
-    def __init__(self, raw, request, **kw):
-        self.raw = raw
-        self.request = request
-        self.form = request.form
-        self._ = request.getText
-
-    def format(self, formatter):
-        """ Send the text. """
-        self.request.write(formatter.preformatted(1))
-        self.request.write(formatter.text(self.raw.expandtabs()))
-        self.request.write(formatter.preformatted(0))
--- a/MoinMoin/parser/python.py	Tue May 16 13:10:13 2006 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,125 +0,0 @@
-# -*- coding: iso-8859-1 -*-
-"""
-    MoinMoin - highlighting Python Source Parser
-
-    @copyright: 2001 by Jürgen Hermann <jh@web.de>
-    @license: GNU GPL, see COPYING for details.
-"""
-
-import StringIO
-import keyword, token, tokenize, sha
-from MoinMoin import config, wikiutil
-from MoinMoin.util.ParserBase import parse_start_step
-
-_KEYWORD = token.NT_OFFSET + 1
-_TEXT    = token.NT_OFFSET + 2
-
-_tokens = {
-    token.NUMBER:       'Number',
-    token.OP:           'Operator',
-    token.STRING:       'String',
-    tokenize.COMMENT:   'Comment',
-    token.NAME:         'ID',
-    token.ERRORTOKEN:   'Error',
-    _KEYWORD:           'ResWord',
-    _TEXT:              'Text',
-}
-
-Dependencies = []
-
-class Parser:
-    """ Send colored python source.
-    """
-
-    extensions = ['.py']
-    Dependencies = []
-
-    def __init__(self, raw, request, **kw):
-        """ Store the source text.
-        """
-        self.raw = raw.expandtabs().rstrip()
-        self.request = request
-        self.form = request.form
-        self._ = request.getText
-
-        self.show_num, self.num_start, self.num_step, attrs = parse_start_step(request, kw.get('format_args',''))
-
-    def format(self, formatter):
-        """ Parse and send the colored source.
-        """
-        # store line offsets in self.lines
-        self.lines = [0, 0]
-        pos = 0
-        while 1:
-            pos = self.raw.find('\n', pos) + 1
-            if not pos: break
-            self.lines.append(pos)
-        self.lines.append(len(self.raw))
-
-        self._code_id = sha.new(self.raw.encode(config.charset)).hexdigest()
-        self.request.write(formatter.code_area(1, self._code_id, 'ColorizedPython', self.show_num, self.num_start, self.num_step))
-        self.formatter = formatter
-        self.request.write(formatter.code_line(1))
-        #len('%d' % (len(self.lines)-1, )))
-        
-        # parse the source and write it
-        self.pos = 0
-        text = StringIO.StringIO(self.raw)
-        try:
-            tokenize.tokenize(text.readline, self)
-        except tokenize.TokenError, ex:
-            msg = ex[0]
-            line = ex[1][0]
-            errmsg = (self.formatter.linebreak() + 
-                      self.formatter.strong(1) + "ERROR: %s" % msg + self.formatter.strong(0) +
-                      self.formatter.linebreak() +
-                      wikiutil.escape(self.raw[self.lines[line]:]))
-            self.request.write(errmsg)
-        self.request.write(self.formatter.code_line(0))
-        self.request.write(formatter.code_area(0, self._code_id))
-
-    def __call__(self, toktype, toktext, (srow,scol), (erow,ecol), line):
-        """ Token handler.
-        """
-        if 0: print "type", toktype, token.tok_name[toktype], "text", toktext, \
-                    "start", srow,scol, "end", erow,ecol, "<br>"
-
-        # calculate new positions
-        oldpos = self.pos
-        newpos = self.lines[srow] + scol
-        self.pos = newpos + len(toktext)
-
-        # handle newlines
-        if toktype in [token.NEWLINE, tokenize.NL]:
-            self.request.write(self.formatter.code_line(0))
-            self.request.write(self.formatter.code_line(1))
-            return
-
-        # send the original whitespace, if needed
-        if newpos > oldpos:
-            self.request.write(self.formatter.text(self.raw[oldpos:newpos]))
-
-        # skip indenting tokens
-        if toktype in [token.INDENT, token.DEDENT]:
-            self.pos = newpos
-            return
-
-        # map token type to a color group
-        if token.LPAR <= toktype and toktype <= token.OP:
-            toktype = token.OP
-        elif toktype == token.NAME and keyword.iskeyword(toktext):
-            toktype = _KEYWORD
-        tokid = _tokens.get(toktype, _tokens[_TEXT])
-
-        # send text
-        first = 1
-        for part in toktext.split('\n'):
-            if not first:
-                self.request.write(self.formatter.code_line(0))
-                self.request.write(self.formatter.code_line(1))
-            else:
-                first = 0
-            self.request.write(self.formatter.code_token(1, tokid) +
-                               self.formatter.text(part) +
-                               self.formatter.code_token(0, tokid))
-
--- a/MoinMoin/parser/rst.py	Tue May 16 13:10:13 2006 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,602 +0,0 @@
-# -*- coding: iso-8859-1 -*-
-"""
-    MoinMoin - ReStructured Text Parser
-
-    @copyright: 2004 by Matthew Gilbert <gilbert AT voxmea DOT net>
-        and by Alexander Schremmer <alex AT alexanderweb DOT de>
-    @license: GNU GPL, see COPYING for details.
-
-    REQUIRES docutils 0.3.10 or later (must be later than December 30th, 2005)
-"""
-
-import re
-import new
-import StringIO
-import __builtin__
-import sys
-
-import types
-import os
-
-# docutils imports are below
-import MoinMoin.parser.wiki
-from MoinMoin.Page import Page
-from MoinMoin.action import AttachFile
-from MoinMoin import wikiutil
-
-Dependencies = [] # this parser just depends on the raw text
-
-# --- make docutils safe by overriding all module-scoped names related to IO ---
-
-# TODO: Add an error message to dummyOpen so that the user knows what they did
-# requested an unsupported feature of docutils in MoinMoin.
-def dummyOpen(x, y=None, z=None): return
-
-class dummyIO(StringIO.StringIO):
-    def __init__(self, destination=None, destination_path=None,
-                 encoding=None, error_handler='', autoclose=1,
-                 handle_io_errors=1, source_path=None):
-        StringIO.StringIO.__init__(self)
-
-class dummyUrllib2:
-    def urlopen(a):
-        return StringIO.StringIO()
-    urlopen = staticmethod(urlopen)
-
-# # # All docutils imports must be contained below here
-try:
-    import docutils
-    from docutils.core import publish_parts
-    from docutils.writers import html4css1
-    from docutils.nodes import reference
-    from docutils.parsers import rst
-    from docutils.parsers.rst import directives, roles
-# # # All docutils imports must be contained above here
-
-    ErrorParser = None # used in the case of missing docutils
-    docutils.io.FileOutput = docutils.io.FileInput = dummyIO
-except ImportError:
-    # we need to workaround this totally broken plugin interface that does
-    # not allow us to raise exceptions
-    class ErrorParser:
-        caching = 0
-        Dependencies = Dependencies # copy dependencies from module-scope
-
-        def __init__(self, raw, request, **kw):
-            self.raw = raw
-            self.request = request
-    
-        def format(self, formatter):
-            _ = self.request.getText
-            from MoinMoin.parser import plain
-            self.request.write(formatter.sysmsg(1) +
-                               formatter.rawHTML(_('Rendering of reStructured text is not possible, ''please'' install docutils.')) +
-                               formatter.sysmsg(0))
-            plain.Parser(self.raw, self.request).format(formatter)
-    
-    # Create a pseudo docutils environment
-    docutils = html4css1 = dummyUrllib2()
-    html4css1.HTMLTranslator = html4css1.Writer = object
-
-def safe_import(name, globals = None, locals = None, fromlist = None):
-    mod = __builtin__.__import__(name, globals, locals, fromlist)
-    if mod:
-        mod.open = dummyOpen
-        mod.urllib2 = dummyUrllib2
-    return mod
-
-# Go through and change all docutils modules to use a dummyOpen and dummyUrllib2
-# module. Also make sure that any docutils imported modules also get the dummy
-# implementations.
-for i in sys.modules.keys():
-    if i.startswith('docutils') and sys.modules[i]:
-        sys.modules[i].open = dummyOpen
-        sys.modules[i].urllib2 = dummyUrllib2
-        sys.modules[i].__import__ = safe_import
-        
-# --- End of dummy-code --------------------------------------------------------
-
-def html_escape_unicode(node):
-    # Find Python function that does this for me. string.encode('ascii',
-    # 'xmlcharrefreplace') only 2.3 and above.
-    for i in node:
-        if ord(i) > 127:
-            node = node.replace(i, '&#%d;' % (ord(i)))
-    return node
-
-class MoinWriter(html4css1.Writer):
-
-    config_section = 'MoinMoin writer'
-    config_section_dependencies = ('writers',)
-
-    #"""Final translated form of `document`."""
-    output = None
-
-    def wiki_resolver(self, node):
-        """
-            Normally an unknown reference would be an error in an reST document.
-            However, this is how new documents are created in the wiki. This
-            passes on unknown references to eventually be handled by
-            MoinMoin.
-        """
-        if hasattr(node, 'indirect_reference_name'):
-            node['refuri'] = node.indirect_reference_name
-        elif (len(node['ids']) != 0):
-            # If the node has an id then it's probably an internal link. Let
-            # docutils generate an error.
-            return False
-        elif node.hasattr('name'):
-            node['refuri'] = node['name']
-        else:
-            node['refuri'] = node['refname']
-        del node['refname']
-        node.resolved = 1
-        self.nodes.append(node)
-        return True
-
-    wiki_resolver.priority = 001
-
-    def __init__(self, formatter, request):
-        html4css1.Writer.__init__(self)
-        self.formatter = formatter
-        self.request = request
-        # Add our wiki unknown_reference_resolver to our list of functions to
-        # run when a target isn't found
-        self.unknown_reference_resolvers = [self.wiki_resolver]
-        # We create a new parser to process MoinMoin wiki style links in the
-        # reST.
-        self.wikiparser = MoinMoin.parser.wiki.Parser('', self.request)
-        self.wikiparser.formatter = self.formatter
-        self.wikiparser.hilite_re = None
-        self.nodes = []
-        # Make sure it's a supported docutils version.
-        required_version = (0, 3, 10)
-        current_version = tuple([int(i) for i in (docutils.__version__.split('.')+['0','0'])[:3]])
-        if current_version < required_version:
-            err = 'ERROR: The installed docutils version is %s;' % ('.'.join([str(i) for i in current_version]))
-            err += ' version %s or later is required.' % ('.'.join([str(i) for i in required_version]))
-            raise RuntimeError, err
-
-    def translate(self):
-        visitor = MoinTranslator(self.document,
-                                 self.formatter,
-                                 self.request,
-                                 self.wikiparser,
-                                 self)
-        self.document.walkabout(visitor)
-        self.visitor = visitor
-        # Docutils 0.5.0 and later require the writer to have the visitor 
-        # attributes.
-        if (hasattr(html4css1.Writer, 'visitor_attributes')):
-            for attr in html4css1.Writer.visitor_attributes:
-                setattr(self, attr, getattr(visitor, attr))
-        self.output = html_escape_unicode(visitor.astext())
-
-class Parser:
-    caching = 1
-    Dependencies = Dependencies # copy dependencies from module-scope
-
-    def __init__(self, raw, request, **kw):
-        self.raw = raw
-        self.request = request
-        self.form = request.form
-
-    def format(self, formatter):
-        # Create our simple parser
-        parser = MoinDirectives(self.request)
-
-        parts = publish_parts(
-            source = self.raw,
-            writer = MoinWriter(formatter, self.request),
-            settings_overrides = {
-                'halt_level': 5,
-                'traceback': True,
-                'file_insertion_enabled': 0,
-                'raw_enabled': 0,
-                'stylesheet_path': '',
-                'template': '',
-            }
-        )
-
-        html = []
-        if parts['title']:
-            html.append(formatter.rawHTML('<h1>%s</h1>' % parts['title']))
-        # If there is only one subtitle then it is held in parts['subtitle'].
-        # However, if there is more than one subtitle then this is empty and
-        # fragment contains all of the subtitles.
-        if parts['subtitle']:
-            html.append(formatter.rawHTML('<h2>%s</h2>' % parts['subtitle']))
-        if parts['docinfo']:
-            html.append(parts['docinfo'])
-        html.append(parts['fragment'])
-        self.request.write(html_escape_unicode('\n'.join(html)))
-
-class RawHTMLList(list):
-    """
-        RawHTMLList catches all html appended to internal HTMLTranslator lists.
-        It passes the HTML through the MoinMoin rawHTML formatter to strip 
-        markup when necessary. This is to support other formatting outputs
-        (such as ?action=show&mimetype=text/plain).
-    """
-    
-    def __init__(self, formatter):
-        self.formatter = formatter
-        
-    def append(self, text):
-        f = sys._getframe()
-        if f.f_back.f_code.co_filename.endswith('html4css1.py'):
-            if isinstance(text, types.StringType) or isinstance(text, types.UnicodeType):
-                text = self.formatter.rawHTML(text)
-        list.append(self, text)
-
-class MoinTranslator(html4css1.HTMLTranslator):
-
-    def __init__(self, document, formatter, request, parser, writer):
-        html4css1.HTMLTranslator.__init__(self, document)
-        self.formatter = formatter
-        self.request = request
-        # Using our own writer when needed. Save the old one to restore
-        # after the page has been processed by the html4css1 parser.
-        self.original_write, self.request.write = self.request.write, self.capture_wiki_formatting
-        self.wikiparser = parser
-        self.wikiparser.request = request
-        # MoinMoin likes to start the initial headers at level 3 and the title
-        # gets level 2, so to comply with their styles, we do here also.
-        # TODO: Could this be fixed by passing this value in settings_overrides?
-        self.initial_header_level = 3
-        # Temporary place for wiki returned markup. This will be filled when
-        # replacing the default writer with the capture_wiki_formatting
-        # function (see visit_image for an example).
-        self.wiki_text = ''
-        self.setup_wiki_handlers()
-        self.setup_admonitions_handlers()
-        
-        # Make all internal lists RawHTMLLists, see RawHTMLList class
-        # comment for more information.
-        for i in self.__dict__:
-            if isinstance(getattr(self, i), types.ListType):
-                setattr(self, i, RawHTMLList(formatter))
-
-    def depart_docinfo(self, node):
-        """
-            depart_docinfo assigns a new list to self.body, we need to re-make that
-            into a RawHTMLList.
-        """
-        html4css1.HTMLTranslator.depart_docinfo(self, node)
-        self.body = RawHTMLList(self.formatter)
-
-    def capture_wiki_formatting(self, text):
-        """
-            Captures MoinMoin generated markup to the instance variable
-            wiki_text.
-        """
-        # For some reason getting empty strings here which of course overwrites
-        # what we really want (this is called multiple times per MoinMoin
-        # format call, which I don't understand).
-        self.wiki_text += text
-
-    def process_wiki_text(self, text):
-        """
-            This sequence is repeated numerous times, so its captured as a
-            single call here. Its important that wiki_text is blanked before we
-            make the format call. format will call request.write which we've
-            hooked to capture_wiki_formatting. If wiki_text is not blanked
-            before a call to request.write we will get the old markup as well as
-            the newly generated markup.
-
-            TODO: Could implement this as a list so that it acts as a stack. I
-            don't like having to remember to blank wiki_text.
-        """
-        self.wiki_text = ''
-        self.wikiparser.raw = text
-        self.wikiparser.format(self.formatter)
-
-    def add_wiki_markup(self):
-        """
-            Place holder in case this becomes more elaborate someday. For now it
-            only appends the MoinMoin generated markup to the html body and
-            raises SkipNode.
-        """
-        self.body.append(self.wiki_text)
-        self.wiki_text = ''
-        raise docutils.nodes.SkipNode
-
-    def astext(self):
-        self.request.write = self.original_write
-        return html4css1.HTMLTranslator.astext(self)
-
-    def fixup_wiki_formatting(self, text):
-        replacement = {'<p>': '', '</p>': '', '\n': '', '> ': '>'}
-        for src, dst in replacement.items():
-            text = text.replace(src, dst)
-        # Everything seems to have a space ending the text block. We want to
-        # get rid of this
-        if text and text[-1] == ' ':
-            text = text[:-1]
-        return text
-
-    def visit_reference(self, node):
-        """
-            Pass links to MoinMoin to get the correct wiki space url. Extract
-            the url and pass it on to the html4css1 writer to handle. Inline
-            images are also handled by visit_image. Not sure what the "drawing:"
-            link scheme is used for, so for now it is handled here.
-
-            Also included here is a hack to allow MoinMoin macros. This routine
-            checks for a link which starts with "[[". This link is passed to the
-            MoinMoin formatter and the resulting markup is inserted into the
-            document in the place of the original link reference.
-        """
-        if 'refuri' in node.attributes:
-            refuri = node['refuri']
-            prefix = ''
-            link = refuri
-            if ':' in refuri:
-                prefix, link = refuri.lstrip().split(':', 1)
-            
-            # First see if MoinMoin should handle completely. Exits through add_wiki_markup.
-            if ((refuri.startswith('[[') and refuri.endswith(']]')) or 
-                    (prefix == 'drawing') or
-                    (prefix == 'inline')):
-                self.process_wiki_text(refuri)
-                # Don't call fixup_wiki_formatting because who knows what
-                # MoinMoin is inserting. (exits through add_wiki_markup)
-                self.add_wiki_markup()
-
-            # From here down, all links are handled by docutils (except 
-            # missing attachments), just fixup node['refuri'].
-            if prefix == 'attachment':
-                attach_file = AttachFile.getFilename(self.request, 
-                        self.request.page.page_name, link)
-                if not os.path.exists(attach_file):
-                    # Attachment doesn't exist, give to MoinMoin to insert
-                    # upload text.
-                    self.process_wiki_text(refuri)
-                    self.add_wiki_markup()
-                # Attachment exists, just get a link to it.
-                node['refuri'] = AttachFile.getAttachUrl(self.request.page.page_name, 
-                        link, self.request)
-                if not [i for i in node.children if i.__class__ == docutils.nodes.image]:
-                    node['classes'].append(prefix)                
-            elif prefix == 'wiki':
-                wikitag, wikiurl, wikitail, err = wikiutil.resolve_wiki(self.request, link)
-                wikiurl = wikiutil.mapURL(self.request, wikiurl)
-                node['refuri'] = wikiutil.join_wiki(wikiurl, wikitail)
-                # Only add additional class information if the reference does
-                # not have a child image (don't want to add additional markup
-                # for images with targets).
-                if not [i for i in node.children if i.__class__ == docutils.nodes.image]:
-                    node['classes'].append('interwiki')
-            elif prefix != '':
-                # Some link scheme (http, file, https, mailto, etc.), add class
-                # information if the reference doesn't have a child image (don't 
-                # want additional markup for images with targets). 
-                # Don't touch the refuri.
-                if not [i for i in node.children if i.__class__ == docutils.nodes.image]:
-                    node['classes'].append(prefix)
-            else:
-                # Default case - make a link to a wiki page.
-                page = MoinMoin.Page.Page(self.request, refuri)
-                node['refuri'] = page.url(self.request)
-                if not page.exists():
-                    node['classes'].append('nonexistent')
-        html4css1.HTMLTranslator.visit_reference(self, node)
-
-    def visit_image(self, node):
-        """
-            Need to intervene in the case of inline images. We need MoinMoin to
-            give us the actual src line to the image and then we can feed this
-            to the default html4css1 writer. NOTE: Since the writer can't "open"
-            this image the scale attribute doesn't work without directly
-            specifying the height or width (or both).
-
-            TODO: Need to handle figures similarly.
-        """
-        uri = node['uri'].lstrip()
-        prefix = ''          # assume no prefix
-        attach_name = uri
-        if ':' in uri:
-            prefix = uri.split(':', 1)[0]
-            attach_name = uri.split(':', 1)[1]
-        # if prefix isn't URL, try to display in page
-        if not prefix.lower() in ('file', 'http', 'https', 'ftp'):
-            attach_file = AttachFile.getFilename(self.request, 
-                    self.request.page.page_name, attach_name)
-            if not os.path.exists(attach_file):
-                # Attachment doesn't exist, MoinMoin should process it
-                if prefix == '':
-                    prefix = 'inline:'
-                self.process_wiki_text(prefix + attach_name)
-                self.wiki_text = self.fixup_wiki_formatting(self.wiki_text)
-                self.add_wiki_markup()
-            # Attachment exists, get a link to it.
-            # create the url
-            node['uri'] = AttachFile.getAttachUrl(self.request.page.page_name, 
-                    attach_name, self.request, addts = 1)
-            if not node.hasattr('alt'):
-                node['alt'] = node.get('name', uri)
-        html4css1.HTMLTranslator.visit_image(self, node)
-
-    def create_wiki_functor(self, moin_func):
-        moin_callable = getattr(self.formatter, moin_func)
-        def visit_func(self, node):
-            self.wiki_text = ''
-            self.request.write(moin_callable(1))
-            self.body.append(self.wiki_text)
-        def depart_func(self, node):
-            self.wiki_text = ''
-            self.request.write(moin_callable(0))
-            self.body.append(self.wiki_text)
-        return visit_func, depart_func
-
-    def setup_wiki_handlers(self):
-        """
-            Have the MoinMoin formatter handle markup when it makes sense. These
-            are portions of the document that do not contain reST specific
-            markup. This allows these portions of the document to look
-            consistent with other wiki pages.
-
-            Setup dispatch routines to handle basic document markup. The
-            hanlders dict is the html4css1 handler name followed by the wiki
-            handler name.
-        """
-        handlers = {
-            # Text Markup
-            'emphasis': 'emphasis',
-            'strong': 'strong',
-            'literal': 'code',
-            # Blocks
-            'literal_block': 'preformatted',
-            # Simple Lists
-            # bullet-lists are handled completely by docutils because it uses
-            # the node context to decide when to make a compact list 
-            # (no <p> tags).
-            'list_item': 'listitem',
-            # Definition List
-            'definition_list': 'definition_list',
-        }
-        for rest_func, moin_func in handlers.items():
-            visit_func, depart_func = self.create_wiki_functor(moin_func)
-            visit_func = new.instancemethod(visit_func, self, MoinTranslator)
-            depart_func = new.instancemethod(depart_func, self, MoinTranslator)
-            setattr(self, 'visit_%s' % (rest_func), visit_func)
-            setattr(self, 'depart_%s' % (rest_func), depart_func)
-
-    # Enumerated list takes an extra paramter so we handle this differently
-    def visit_enumerated_list(self, node):
-        self.wiki_text = ''
-        self.request.write(self.formatter.number_list(1, start=node.get('start', None)))
-        self.body.append(self.wiki_text)
-
-    def depart_enumerated_list(self, node):
-        self.wiki_text = ''
-        self.request.write(self.formatter.number_list(0))
-        self.body.append(self.wiki_text)
-
-    # Admonitions are handled here -=- tmacam
-    def create_admonition_functor(self, admotion_class):
-        tag_class = 'admonition_' + admotion_class
-        def visit_func(self, node):
-            self.wiki_text = ''
-            self.request.write(self.formatter.div(1,
-                                                  attr={'class': tag_class},
-                                                  allowed_attrs=[]))
-            self.body.append(self.wiki_text)
-        def depart_func(self, node):
-            self.wiki_text = ''
-            self.request.write(self.formatter.div(0))
-            self.body.append(self.wiki_text)
-            
-        return visit_func, depart_func 
-
-    def setup_admonitions_handlers(self):
-        """
-            Admonitions are handled here... We basically surround admonitions
-            in a div with class admonition_{name of the admonition}.
-        """
-        handled_admonitions = [
-            'attention',
-            'caution',
-            'danger',
-            'error',
-            'hint',
-            'important',
-            'note',
-            'tip',
-            'warning',
-        ]
-        for adm in handled_admonitions:
-            visit_func, depart_func = self.create_admonition_functor(adm)
-            visit_func = new.instancemethod(visit_func, self, MoinTranslator)
-            depart_func = new.instancemethod(depart_func, self, MoinTranslator)
-            setattr(self, 'visit_%s' % (adm), visit_func)
-            setattr(self, 'depart_%s' % (adm), depart_func)
-
-
-class MoinDirectives:
-    """
-        Class to handle all custom directive handling. This code is called as
-        part of the parsing stage.
-    """
-
-    def __init__(self, request):
-        self.request = request
-
-        # include MoinMoin pages
-        directives.register_directive('include', self.include)
-
-        # used for MoinMoin macros
-        directives.register_directive('macro', self.macro)
-
-        # disallow a few directives in order to prevent XSS
-        # for directive in ('meta', 'include', 'raw'):
-        for directive in ('meta', 'raw'):
-            directives.register_directive(directive, None)
-
-        # disable the raw role
-        roles._roles['raw'] = None
-
-        # As a quick fix for infinite includes we only allow a fixed number of
-        # includes per page
-        self.num_includes = 0
-        self.max_includes = 10
-
-    # Handle the include directive rather than letting the default docutils
-    # parser handle it. This allows the inclusion of MoinMoin pages instead of
-    # something from the filesystem.
-    def include(self, name, arguments, options, content, lineno,
-                content_offset, block_text, state, state_machine):
-        # content contains the included file name
-
-        _ = self.request.getText
-
-        # Limit the number of documents that can be included
-        if self.num_includes < self.max_includes:
-            self.num_includes += 1
-        else:
-            lines = [_("**Maximum number of allowed includes exceeded**")]
-            state_machine.insert_input(lines, 'MoinDirectives')
-            return
-
-        if len(content):
-            page = Page(page_name = content[0], request = self.request)
-            if page.exists():
-                text = page.get_raw_body()
-                lines = text.split('\n')
-                # Remove the "#format rst" line
-                if lines[0].startswith("#format"):
-                    del lines[0]
-            else:
-                lines = [_("**Could not find the referenced page: %s**") % (content[0],)]
-            # Insert the text from the included document and then continue
-            # parsing
-            state_machine.insert_input(lines, 'MoinDirectives')
-        return
-
-    include.content = True
-
-    # Add additional macro directive.
-    # This allows MoinMoin macros to be used either by using the directive
-    # directly or by using the substitution syntax. Much cleaner than using the
-    # reference hack (`[[SomeMacro]]`_). This however simply adds a node to the
-    # document tree which is a reference, but through a much better user
-    # interface.
-    def macro(self, name, arguments, options, content, lineno,
-                content_offset, block_text, state, state_machine):
-        # content contains macro to be called
-        if len(content):
-            # Allow either with or without brackets
-            if content[0].startswith('[['):
-                macro = content[0]
-            else:
-                macro = '[[%s]]' % content[0]
-            ref = reference(macro, refuri = macro)
-            ref['name'] = macro
-            return [ref]
-        return
-
-    macro.content = True
-
-if ErrorParser: # fixup in case of missing docutils
-    Parser = ErrorParser
-
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/parser/text.py	Tue May 16 20:12:29 2006 +0200
@@ -0,0 +1,35 @@
+# -*- coding: iso-8859-1 -*-
+"""
+    MoinMoin - Plain Text Parser, fallback for text/*
+
+    @copyright: 2000, 2001, 2002 by Jürgen Hermann <jh@web.de>
+    @license: GNU GPL, see COPYING for details.
+"""
+
+Dependencies = []
+
+class Parser:
+    """
+        Send plain text in a HTML <pre> element.
+    """
+
+    ## specify extensions willing to handle (for inline:)
+    ## should be a list of extensions including the leading dot
+    ## TODO: remove the leading dot from the extension. This is stupid.
+    #extensions = ['.txt']
+    ## use '*' instead of the list(!) to specify a default parser
+    ## which is used as fallback
+    extensions = '*'
+    Dependencies = []
+    
+    def __init__(self, raw, request, **kw):
+        self.raw = raw
+        self.request = request
+        self.form = request.form
+        self._ = request.getText
+
+    def format(self, formatter):
+        """ Send the text. """
+        self.request.write(formatter.preformatted(1))
+        self.request.write(formatter.text(self.raw.expandtabs()))
+        self.request.write(formatter.preformatted(0))
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/parser/text_cplusplus.py	Tue May 16 20:12:29 2006 +0200
@@ -0,0 +1,70 @@
+# -*- coding: iso-8859-1 -*-
+"""
+	MoinMoin - C++ Source Parser
+
+    @copyright: 2002 by Taesu Pyo <bigflood@hitel.net>
+    @license: GNU GPL, see COPYING for details.
+
+css:
+
+pre.cpparea     { font-style: sans-serif; color: #000000; }
+
+pre.cpparea span.ID       { color: #000000; }
+pre.cpparea span.Char     { color: #004080; }
+pre.cpparea span.Comment  { color: #808080; }
+pre.cpparea span.Number   { color: #008080; font-weight: bold; }
+pre.cpparea span.String   { color: #004080; }
+pre.cpparea span.SPChar   { color: #0000C0; }
+pre.cpparea span.ResWord  { color: #4040ff; font-weight: bold; }
+pre.cpparea span.ConsWord { color: #008080; font-weight: bold; }
+pre.cpparea span.ResWord2 { color: #0080ff; font-weight: bold; }
+pre.cpparea span.Special  { color: #0000ff; }
+pre.cpparea span.Preprc   { color: #804000; }
+
+"""
+
+from MoinMoin.util.ParserBase import ParserBase
+
+Dependencies = []
+
+class Parser(ParserBase):
+
+    parsername = "ColorizedCPlusPlus"
+    extensions = ['.c', '.h', '.cpp', '.c++']
+    Dependencies = []
+    
+    def setupRules(self):
+        ParserBase.setupRules(self)
+
+        self.addRulePair("Comment","/[*]","[*]/")
+        self.addRule("Comment","//.*$")
+        self.addRulePair("String",'L?"',r'$|[^\\](\\\\)*"')
+        self.addRule("Char",r"'\\.'|'[^\\]'")
+        self.addRule("Number",r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|0[xX][0-9a-fA-F]+[Ll]?")
+        self.addRule("Preprc",r"^\s*#(.*\\\n)*(.*(?!\\))$")
+        self.addRule("ID","[a-zA-Z_][0-9a-zA-Z_]*")
+        self.addRule("SPChar",r"[~!%^&*()+=|\[\]:;,.<>/?{}-]")
+
+        reserved_words = ['struct','class','union','enum',
+        'int','float','double','signed','unsigned','char','short','void','bool',
+        'long','register','auto','operator',
+        'static','const','private','public','protected','virtual','explicit',
+        'new','delete','this',
+        'if','else','while','for','do','switch','case','default','sizeof',
+        'dynamic_cast','static_cast','const_cast','reinterpret_cast','typeid',
+        'try','catch','throw','throws','return','continue','break','goto']
+
+        reserved_words2 = ['extern', 'volatile', 'typedef', 'friend',
+                           '__declspec', 'inline','__asm','thread','naked',
+                           'dllimport','dllexport','namespace','using',
+                           'template','typename','goto']
+
+        special_words = ['std','string','vector','map','set','cout','cin','cerr']
+        constant_words = ['true','false','NULL']
+
+        self.addReserved(reserved_words)
+        self.addConstant(constant_words)
+
+        self.addWords(reserved_words2,'ResWord2')
+        self.addWords(special_words,'Special')
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/parser/text_csv.py	Tue May 16 20:12:29 2006 +0200
@@ -0,0 +1,74 @@
+# -*- coding: iso-8859-1 -*-
+"""
+    MoinMoin - Parser for CSV data
+
+    This parser lacks to flexibility to read arbitary csv dialects.
+
+    Perhaps this should be rewritten using another CSV lib
+    because the standard module csv does not support unicode.
+
+    @copyright: 2004 by Oliver Graf <ograf@bitart.de>, Alexander Schremmer
+    @license: GNU GPL, see COPYING for details.
+"""
+
+Dependencies = []
+
+class Parser:
+    """ Format CSV data as table
+    """
+
+    extensions = ['.csv']
+    Dependencies = []
+
+    def __init__(self, raw, request, **kw):
+        """ Store the source text.
+        """
+        self.raw = raw
+        self.request = request
+        self.form = request.form
+        self._ = request.getText
+
+        # parse extra arguments for excludes
+        self.exclude = []
+        self.separator = ';'
+        for arg in kw.get('format_args','').split():
+            if arg[0] == '-':
+                try:
+                    idx = int(arg[1:])
+                except ValueError:
+                    pass
+                else:
+                    self.exclude.append(idx-1)
+            else:
+                self.separator = arg
+
+    def format(self, formatter):
+        """ Parse and send the table.
+        """
+        lines = self.raw.split('\n')
+        if lines[0]:
+            # expect column headers in first line
+            first = 1
+        else:
+            # empty first line, no bold headers
+            first = 0
+            del lines[0]
+
+        self.request.write(formatter.table(1))
+        for line in lines:
+            self.request.write(formatter.table_row(1))
+            cells = line.split(self.separator)
+            for idx in range(len(cells)):
+                if idx in self.exclude:
+                    continue
+                self.request.write(formatter.table_cell(1))
+                if first:
+                    self.request.write(formatter.strong(1))
+                self.request.write(formatter.text(cells[idx]))
+                if first:
+                    self.request.write(formatter.strong(0))
+                self.request.write(formatter.table_cell(0))
+            self.request.write(formatter.table_row(0))
+            first = 0
+        self.request.write(formatter.table(0))
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/parser/text_docbook.py	Tue May 16 20:12:29 2006 +0200
@@ -0,0 +1,197 @@
+# -*- coding: iso-8859-1 -*-
+"""
+    MoinMoin - DocBook-XML Parser
+
+    This code was tested with 4Suite 1.0a4 and 1.0b1
+
+    @copyright: 2005 by Henry Ho <henryho167 AT hotmail DOT com>
+    @copyright: 2005 by MoinMoin:AlexanderSchremmer
+    @license: GNU GPL, see COPYING for details.
+
+    DOCBOOK Parser:
+
+    Features:
+    - image support through Attachment
+    - internal Wikilinks if a word is a strict wikiname
+    - image alt is perserved
+    - works with compiled xslt stylesheet for optimized performance
+
+    Configuration:
+    - make sure you have installed the DocBook XSLT files
+    - set the path to the html directory of the DocBook XSLT files in your
+      wiki or farm configuration:
+      docbook_html_dir = r"/usr/share/xml/docbook/stylesheet/nwalsh/html/"
+      Note that this directory needs to be writable because a cache file will
+      be created there.
+
+    >How can I use Ft API for DTD validation?
+    If you have PyXMl installed, you can use ValidatingReader rather than
+    NonvalidatingReader.  See:
+    http://uche.ogbuji.net/tech/akara/nodes/2003-01-01/domlettes
+"""
+
+import StringIO
+import os.path
+import cPickle
+import re
+
+from MoinMoin import caching, config, wikiutil, Page
+from MoinMoin.parser.text_xslt import Parser as XsltParser
+from MoinMoin.parser.text_moin_wiki import Parser as WikiParser
+
+Dependencies = []
+
+class Parser(XsltParser):
+    """
+        Send XML file formatted via XSLT.
+    """
+
+    caching = 1
+    Dependencies = Dependencies
+
+    def __init__(self, raw, request, **kw):
+        XsltParser.__init__(self, raw, request)
+
+        # relative path to docbook.xsl and compiled_xsl
+        docbook_html_directory = request.cfg.docbook_html_dir
+        self.db_xsl = os.path.join(docbook_html_directory, 'docbook.xsl')
+        self.db_compiled_xsl = os.path.join(docbook_html_directory, 'db_compiled.dat')
+
+        self.wikiParser = WikiParser(raw = self.raw, request = self.request, pretty_url=1)
+        self.key = 'docbook'
+
+    def format(self, formatter):
+        self.wikiParser.formatter = formatter
+        XsltParser.format(self, formatter)
+
+    def append_stylesheet(self):
+        """"
+            virtual function, for docbook parser
+        """
+        abs_db_xsl = os.path.abspath(self.db_xsl)
+        abs_db_compiled_xsl = os.path.abspath(self.db_compiled_xsl)
+
+        # same as path.exists, but also test if it is a file
+        if not os.path.isfile(abs_db_compiled_xsl):
+            _compile_xsl(abs_db_xsl, abs_db_compiled_xsl)
+
+        assert os.path.isfile(abs_db_compiled_xsl)
+
+        self.processor.appendStylesheetInstance(cPickle.load(file(abs_db_compiled_xsl, 'rb')))
+
+    def parse_result(self, result):
+        """
+        additional parsing to the resulting XSLT'ed result (resultString) before saving
+
+        will do:
+            BASIC CLEAN UP   : remove unnecessary HTML tags
+            RESOLVE IMG SRC  : fix src to find attachment
+            RESOLVE WikiNames: if a word is a valid wikiname & a valid wikipage,
+                               replace word with hyperlink
+        """
+
+        # BASIC CLEAN UP
+        # remove from beginning until end of body tag
+        found = re.search('<body.*?>', result)
+        if found:
+            result = result[found.end():]
+
+        # remove everything after & including </body>
+        found = result.rfind('</body>')
+        if found != -1:
+            result = result[:found]
+
+        # RESOLVE IMG SRC
+        found = re.finditer('<img.*?>', result)
+        if found:
+            splitResult = _splitResult(found, result)
+            for index in range(len(splitResult)):
+                if splitResult[index].startswith('<img'):
+                    found = re.search('src="(?P<source>.*?)"', splitResult[index])
+                    imageSrc = found.group('source')
+                    imageAlt = None # save alt
+                    found = re.search('alt="(?P<alt>.*?)"', splitResult[index])
+                    if found:
+                        imageAlt = found.group('alt')
+                    splitResult[index] = self.wikiParser.attachment( ('attachment:' + imageSrc, "") )
+                    if imageAlt: # restore alt
+                        splitResult[index] = re.sub('alt=".*?"', 'alt="%s"' % imageAlt, splitResult[index])
+
+            result = ''.join(splitResult)
+
+
+        # RESOLVE WikiNames
+        #    if a word is a valid wikiname & a valid wikipage,
+        #    replace word with hyperlink
+
+        found = re.finditer(self.wikiParser.word_rule, result)
+        if found:
+            splitResult = _splitResult(found, result)
+
+            for index in range(len(splitResult)):
+                if (re.match(self.wikiParser.word_rule, splitResult[index])
+                    and Page.Page(self.request, splitResult[index]).exists()):
+                    splitResult[index] = self.wikiParser._word_repl(splitResult[index])
+            result = ''.join(splitResult)
+
+        # remove stuff that fail HTML 4.01 Strict verification
+
+        # remove unsupported attributes
+        result = re.sub(' target=".*?"| type=".*?"', '', result)
+        result = re.sub('<hr .*?>', '<hr>', result)
+
+        # remove <p>...</p> inside <a>...</a> or <caption>...</caption>
+        found = re.finditer('<a href=".*?</a>|<caption>.*?</caption>', result) # XXX re.DOTALL)
+        if found:
+            splitResult = _splitResult(found, result)
+            for index in range(len(splitResult)):
+                if (splitResult[index].startswith('<a href="')
+                    or splitResult[index].startswith('<caption>')):
+                    splitResult[index] = splitResult[index].replace('<p>', '').replace('</p>', '')
+            result = ''.join(splitResult)
+
+        return result
+
+
+
+def _compile_xsl(XSLT_FILE, XSLT_COMPILED_FILE):
+    """
+        compiling docbook stylesheet
+
+        reference: http://155.210.85.193:8010/ccia/nodes/2005-03-18/compileXslt?xslt=/akara/akara.xslt
+    """
+    from Ft.Xml.Xslt.Processor import Processor
+    from Ft.Xml.Xslt import Stylesheet
+    from Ft.Xml import InputSource
+    from Ft.Lib import Uri
+
+    # New docbook processor
+    db_processor=Processor()
+
+    # Docbook Stylesheet
+    my_sheet_uri = Uri.OsPathToUri(XSLT_FILE, 1)
+    sty_isrc = InputSource.DefaultFactory.fromUri(my_sheet_uri)
+
+    # Append Stylesheet
+    db_processor.appendStylesheet(sty_isrc)
+
+    # Pickled stylesheet will be self.abs_db_compiled_xsl file
+    db_root = db_processor.stylesheet.root
+    fw = file(XSLT_COMPILED_FILE, 'wb')
+    cPickle.dump(db_root, fw) # , protocol=2)
+    fw.close()
+
+
+def _splitResult(iterator, result):
+    startpos = 0
+    splitResult = []
+
+    for f in iterator:
+        start, end = f.span()
+        splitResult.append(result[startpos:start])
+        splitResult.append(result[start:end])
+        startpos = end
+    splitResult.append(result[startpos:])
+
+    return splitResult
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/parser/text_html.py	Tue May 16 20:12:29 2006 +0200
@@ -0,0 +1,34 @@
+# -*- coding: iso-8859-1 -*-
+"""
+    MoinMoin - HTML Parser
+
+    @copyright: 2006 by MoinMoin:AlexanderSchremmer
+    @license: GNU GPL, see COPYING for details.
+"""
+
+from MoinMoin.support.htmlmarkup import Markup
+from HTMLParser import HTMLParseError
+
+Dependencies = []
+
+class Parser:
+    """
+        Sends HTML code after filtering it.
+    """
+
+    extensions = ['.htm', '.html']
+    Dependencies = Dependencies
+    
+    def __init__(self, raw, request, **kw):
+        self.raw = raw
+        self.request = request
+
+    def format(self, formatter):
+        """ Send the text. """
+        try:
+            self.request.write(formatter.rawHTML(Markup(self.raw).sanitize()))
+        except HTMLParseError, e:
+            self.request.write(formatter.sysmsg(1) + 
+                formatter.text(u'HTML parsing error: %s in "%s"' % (e.msg,
+                                  self.raw.splitlines()[e.lineno - 1].strip())) +
+                formatter.sysmsg(0))
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/parser/text_irssi.py	Tue May 16 20:12:29 2006 +0200
@@ -0,0 +1,50 @@
+# -*- coding: iso-8859-1 -*-
+"""
+    MoinMoin - IRC Log Parser (irssi style logs)
+
+    @copyright: 2004 by Thomas Waldmann
+    @license: GNU GPL, see COPYING for details.
+"""
+
+import re
+from MoinMoin import wikiutil
+
+Dependencies = []
+
+class Parser:
+    """
+        Send IRC logs in a table
+    """
+    extensions = ['.irc']
+    Dependencies = []
+
+    def __init__(self, raw, request, **kw):
+        self.raw = raw
+        self.request = request
+        self.form = request.form
+        self._ = request.getText
+        self.out = kw.get('out', request)
+
+    def format(self, formatter):
+        lines = self.raw.split('\n')
+        # TODO: Add support for displaying things like join and part messages.
+        pattern = re.compile(r"""
+            ((\[|\()?                      # Opening bracket for the timestamp (if it exists)
+                (?P<time>([\d]?\d[:.]?)+)  # Timestamp as one or more :/.-separated groups of 1 or 2 digits (if it exists)
+            (\]|\))?\s+)?                  # Closing bracket for the timestamp (if it exists) plus whitespace
+            <\s*?(?P<nick>.*?)\s*?>        # Nick
+            \s+                            # Space between the nick and message
+            (?P<msg>.*)                    # Message
+        """, re.VERBOSE + re.UNICODE)
+        self.out.write(formatter.table(1))
+        for line in lines:
+            match = pattern.match(line)
+            if match:
+                self.out.write(formatter.table_row(1))
+                for g in ('time', 'nick', 'msg'):
+                    self.out.write(formatter.table_cell(1))
+                    self.out.write(formatter.text(match.group(g) or ''))
+                    self.out.write(formatter.table_cell(0))
+                self.out.write(formatter.table_row(0))
+        self.out.write(formatter.table(0))
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/parser/text_java.py	Tue May 16 20:12:29 2006 +0200
@@ -0,0 +1,42 @@
+# -*- coding: iso-8859-1 -*-
+"""
+	MoinMoin - Java Source Parser
+
+    @copyright: 2002 by Taesu Pyo <bigflood@hitel.net>
+    @license: GNU GPL, see COPYING for details.
+
+"""
+
+from MoinMoin.util.ParserBase import ParserBase
+
+Dependencies = []
+
+class Parser(ParserBase):
+
+    parsername = "ColorizedJava"
+    extensions = ['.java']
+    Dependencies = []
+
+    def setupRules(self):
+        ParserBase.setupRules(self)
+
+        self.addRulePair("Comment","/[*]","[*]/")
+        self.addRule("Comment","//.*$")
+        self.addRulePair("String",'"',r'$|[^\\](\\\\)*"')
+        self.addRule("Char",r"'\\.'|'[^\\]'")
+        self.addRule("Number",r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|0[xX][0-9a-fA-F]+[Ll]?")
+        self.addRule("ID","[a-zA-Z_][0-9a-zA-Z_]*")
+        self.addRule("SPChar",r"[~!%^&*()+=|\[\]:;,.<>/?{}-]")
+
+        reserved_words = ['class','interface','enum','import','package',
+        'byte','int','long','float','double','char','short','void','boolean',
+        'static','final','const','private','public','protected',
+        'new','this','super','abstract','native','synchronized','transient','volatile','strictfp',
+        'extends','implements','if','else','while','for','do','switch','case','default','instanceof',
+        'try','catch','finally','throw','throws','return','continue','break']
+
+        self.addReserved(reserved_words)
+
+        constant_words = ['true','false','null']
+
+        self.addConstant(constant_words)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/parser/text_moin_wiki.py	Tue May 16 20:12:29 2006 +0200
@@ -0,0 +1,1117 @@
+# -*- coding: iso-8859-1 -*-
+"""
+    MoinMoin - MoinMoin Wiki Markup Parser
+
+    @copyright: 2000, 2001, 2002 by Jürgen Hermann <jh@web.de>
+    @license: GNU GPL, see COPYING for details.
+"""
+
+import os, re
+from MoinMoin import config, wikiutil, macro
+from MoinMoin.Page import Page
+from MoinMoin.util import web
+
+Dependencies = []
+
+class Parser:
+    """
+        Object that turns Wiki markup into HTML.
+
+        All formatting commands can be parsed one line at a time, though
+        some state is carried over between lines.
+
+        Methods named like _*_repl() are responsible to handle the named regex
+        patterns defined in print_html().
+    """
+
+    # allow caching
+    caching = 1
+    Dependencies = []
+
+    # some common strings
+    PARENT_PREFIX = wikiutil.PARENT_PREFIX
+    attachment_schemas = ["attachment", "inline", "drawing"]
+    punct_pattern = re.escape(u'''"\'}]|:,.)?!''')
+    url_pattern = (u'http|https|ftp|nntp|news|mailto|telnet|wiki|file|irc|' +
+            u'|'.join(attachment_schemas) + 
+            (config.url_schemas and u'|' + u'|'.join(config.url_schemas) or ''))
+
+    # some common rules
+    word_rule = ur'(?:(?<![%(u)s%(l)s])|^)%(parent)s(?:%(subpages)s(?:[%(u)s][%(l)s]+){2,})+(?![%(u)s%(l)s]+)' % {
+        'u': config.chars_upper,
+        'l': config.chars_lower,
+        'subpages': wikiutil.CHILD_PREFIX + '?',
+        'parent': ur'(?:%s)?' % re.escape(PARENT_PREFIX),
+    }
+    url_rule = ur'%(url_guard)s(%(url)s)\:([^\s\<%(punct)s]|([%(punct)s][^\s\<%(punct)s]))+' % {
+        'url_guard': u'(^|(?<!\w))',
+        'url': url_pattern,
+        'punct': punct_pattern,
+    }
+
+    ol_rule = ur"^\s+(?:[0-9]+|[aAiI])\.(?:#\d+)?\s"
+    dl_rule = ur"^\s+.*?::\s"
+
+    # the big, fat, ugly one ;)
+    formatting_rules = ur"""(?P<ent_numeric>&#(\d{1,5}|x[0-9a-fA-F]+);)
+(?:(?P<emph_ibb>'''''(?=[^']+'''))
+(?P<emph_ibi>'''''(?=[^']+''))
+(?P<emph_ib_or_bi>'{5}(?=[^']))
+(?P<emph>'{2,3})
+(?P<u>__)
+(?P<sup>\^.*?\^)
+(?P<sub>,,[^,]{1,40},,)
+(?P<tt>\{\{\{.*?\}\}\})
+(?P<parser>(\{\{\{(#!.*|\s*$)))
+(?P<pre>(\{\{\{ ?|\}\}\}))
+(?P<small>(\~- ?|-\~))
+(?P<big>(\~\+ ?|\+\~))
+(?P<strike>(--\(|\)--))
+(?P<rule>-{4,})
+(?P<comment>^\#\#.*$)
+(?P<macro>\[\[(%%(macronames)s)(?:\(.*?\))?\]\]))
+(?P<ol>%(ol_rule)s)
+(?P<dl>%(dl_rule)s)
+(?P<li>^\s+\*\s*)
+(?P<li_none>^\s+\.\s*)
+(?P<indent>^\s+)
+(?P<tableZ>\|\| $)
+(?P<table>(?:\|\|)+(?:<[^>]*?>)?(?!\|? $))
+(?P<heading>^\s*(?P<hmarker>=+)\s.*\s(?P=hmarker) $)
+(?P<interwiki>[A-Z][a-zA-Z]+\:[^\s'\"\:\<\|]([^\s%(punct)s]|([%(punct)s][^\s%(punct)s]))+)
+(?P<word>%(word_rule)s)
+(?P<url_bracket>\[((%(url)s)\:|#|\:)[^\s\]]+(\s[^\]]+)?\])
+(?P<url>%(url_rule)s)
+(?P<email>[-\w._+]+\@[\w-]+(\.[\w-]+)+)
+(?P<smiley>(?<=\s)(%(smiley)s)(?=\s))
+(?P<smileyA>^(%(smiley)s)(?=\s))
+(?P<ent_symbolic>&[a-zA-Z]+;)
+(?P<ent>[<>&])
+(?P<wikiname_bracket>\[".*?"\])
+(?P<tt_bt>`.*?`)"""  % {
+
+        'url': url_pattern,
+        'punct': punct_pattern,
+        'ol_rule': ol_rule,
+        'dl_rule': dl_rule,
+        'url_rule': url_rule,
+        'word_rule': word_rule,
+        'smiley': u'|'.join(map(re.escape, config.smileys.keys()))}
+
+    # Don't start p before these 
+    no_new_p_before = ("heading rule table tableZ tr td "
+                       "ul ol dl dt dd li li_none indent "
+                       "macro parser pre")
+    no_new_p_before = no_new_p_before.split()
+    no_new_p_before = dict(zip(no_new_p_before, [1] * len(no_new_p_before)))
+
+    def __init__(self, raw, request, **kw):
+        self.raw = raw
+        self.request = request
+        self.form = request.form
+        self._ = request.getText
+        self.cfg = request.cfg
+        self.line_anchors = kw.get('line_anchors', True)
+        self.macro = None
+        self.start_line = kw.get('start_line', 0)
+
+        self.is_em = 0
+        self.is_b = 0
+        self.is_u = 0
+        self.is_strike = 0
+        self.lineno = 0
+        self.in_list = 0 # between <ul/ol/dl> and </ul/ol/dl>
+        self.in_li = 0 # between <li> and </li>
+        self.in_dd = 0 # between <dd> and </dd>
+        self.in_pre = 0
+        self.in_table = 0
+        self.is_big = False
+        self.is_small = False
+        self.inhibit_p = 0 # if set, do not auto-create a <p>aragraph
+        self.titles = request._page_headings
+
+        # holds the nesting level (in chars) of open lists
+        self.list_indents = []
+        self.list_types = []
+        
+        self.formatting_rules = self.formatting_rules % {'macronames': u'|'.join(macro.getNames(self.cfg))}
+
+    def _close_item(self, result):
+        #result.append("<!-- close item begin -->\n")
+        if self.in_table:
+            result.append(self.formatter.table(0))
+            self.in_table = 0
+        if self.in_li:
+            self.in_li = 0
+            if self.formatter.in_p:
+                result.append(self.formatter.paragraph(0))
+            result.append(self.formatter.listitem(0))
+        if self.in_dd:
+            self.in_dd = 0
+            if self.formatter.in_p:
+                result.append(self.formatter.paragraph(0))
+            result.append(self.formatter.definition_desc(0))
+        #result.append("<!-- close item end -->\n")
+
+
+    def interwiki(self, url_and_text, **kw):
+        # TODO: maybe support [wiki:Page http://wherever/image.png] ?
+        if len(url_and_text) == 1:
+            url = url_and_text[0]
+            text = None
+        else:
+            url, text = url_and_text
+
+        # keep track of whether this is a self-reference, so links
+        # are always shown even the page doesn't exist.
+        is_self_reference = 0
+        url2 = url.lower()
+        if url2.startswith('wiki:self:'):
+            url = url[10:] # remove "wiki:self:"
+            is_self_reference = 1
+        elif url2.startswith('wiki:'):
+            url = url[5:] # remove "wiki:"
+           
+        tag, tail = wikiutil.split_wiki(url)
+        if text is None:
+            if tag:
+                text = tail
+            else:
+                text = url
+                url = ""
+        elif (url.startswith(wikiutil.CHILD_PREFIX) or # fancy link to subpage [wiki:/SubPage text]
+              is_self_reference or # [wiki:Self:LocalPage text] or [:LocalPage:text]
+              Page(self.request, url).exists()): # fancy link to local page [wiki:LocalPage text]
+            return self._word_repl(url, text)
+
+        wikitag, wikiurl, wikitail, wikitag_bad = wikiutil.resolve_wiki(self.request, url)
+        href = wikiutil.join_wiki(wikiurl, wikitail)
+
+        # check for image URL, and possibly return IMG tag
+        if not kw.get('pretty_url', 0) and wikiutil.isPicture(wikitail):
+            return self.formatter.image(src=href)
+
+        # link to self?
+        if wikitag is None:
+            return self._word_repl(wikitail)
+              
+        return (self.formatter.interwikilink(1, tag, tail) + 
+                self.formatter.text(text) +
+                self.formatter.interwikilink(0, tag, tail))
+
+    def attachment(self, url_and_text, **kw):
+        """ This gets called on attachment URLs.
+        """
+        _ = self._
+        if len(url_and_text) == 1:
+            url = url_and_text[0]
+            text = None
+        else:
+            url, text = url_and_text
+
+        inline = url[0] == 'i'
+        drawing = url[0] == 'd'
+        url = url.split(":", 1)[1]
+        url = wikiutil.url_unquote(url, want_unicode=True)
+        text = text or url
+
+        from MoinMoin.action import AttachFile
+        if drawing:
+            return self.formatter.attachment_drawing(url, text)
+
+        # check for image URL, and possibly return IMG tag
+        # (images are always inlined, just like for other URLs)
+        if not kw.get('pretty_url', 0) and wikiutil.isPicture(url):
+            return self.formatter.attachment_image(url)
+                
+        # inline the attachment
+        if inline:
+            return self.formatter.attachment_inlined(url, text)
+
+        return self.formatter.attachment_link(url, text)
+
+    def _u_repl(self, word):
+        """Handle underline."""
+        self.is_u = not self.is_u
+        return self.formatter.underline(self.is_u)
+
+    def _strike_repl(self, word):
+        """Handle strikethrough."""
+        # XXX we don't really enforce the correct sequence --( ... )-- here
+        self.is_strike = not self.is_strike
+        return self.formatter.strike(self.is_strike)
+
+    def _small_repl(self, word):
+        """Handle small."""
+        if word.strip() == '~-' and self.is_small:
+            return self.formatter.text(word)
+        if word.strip() == '-~' and not self.is_small:
+            return self.formatter.text(word)
+        self.is_small = not self.is_small
+        return self.formatter.small(self.is_small)
+
+    def _big_repl(self, word):
+        """Handle big."""
+        if word.strip() == '~+' and self.is_big:
+            return self.formatter.text(word)
+        if word.strip() == '+~' and not self.is_big:
+            return self.formatter.text(word)
+        self.is_big = not self.is_big
+        return self.formatter.big(self.is_big)
+
+    def _emph_repl(self, word):
+        """Handle emphasis, i.e. '' and '''."""
+        ##print "#", self.is_b, self.is_em, "#"
+        if len(word) == 3:
+            self.is_b = not self.is_b
+            if self.is_em and self.is_b:
+                self.is_b = 2
+            return self.formatter.strong(self.is_b)
+        else:
+            self.is_em = not self.is_em
+            if self.is_em and self.is_b:
+                self.is_em = 2
+            return self.formatter.emphasis(self.is_em)
+
+    def _emph_ibb_repl(self, word):
+        """Handle mixed emphasis, i.e. ''''' followed by '''."""
+        self.is_b = not self.is_b
+        self.is_em = not self.is_em
+        if self.is_em and self.is_b:
+            self.is_b = 2
+        return self.formatter.emphasis(self.is_em) + self.formatter.strong(self.is_b)
+
+    def _emph_ibi_repl(self, word):
+        """Handle mixed emphasis, i.e. ''''' followed by ''."""
+        self.is_b = not self.is_b
+        self.is_em = not self.is_em
+        if self.is_em and self.is_b:
+            self.is_em = 2
+        return self.formatter.strong(self.is_b) + self.formatter.emphasis(self.is_em)
+
+    def _emph_ib_or_bi_repl(self, word):
+        """Handle mixed emphasis, exactly five '''''."""
+        ##print "*", self.is_b, self.is_em, "*"
+        b_before_em = self.is_b > self.is_em > 0
+        self.is_b = not self.is_b
+        self.is_em = not self.is_em
+        if b_before_em:
+            return self.formatter.strong(self.is_b) + self.formatter.emphasis(self.is_em)
+        else:
+            return self.formatter.emphasis(self.is_em) + self.formatter.strong(self.is_b)
+
+
+    def _sup_repl(self, word):
+        """Handle superscript."""
+        return self.formatter.sup(1) + \
+            self.formatter.text(word[1:-1]) + \
+            self.formatter.sup(0)
+
+    def _sub_repl(self, word):
+        """Handle subscript."""
+        return self.formatter.sub(1) + \
+            self.formatter.text(word[2:-2]) + \
+            self.formatter.sub(0)
+
+
+    def _rule_repl(self, word):
+        """Handle sequences of dashes."""
+        result = self._undent() + self._closeP()
+        if len(word) <= 4:
+            result = result + self.formatter.rule()
+        else:
+            # Create variable rule size 1 - 6. Actual size defined in css.
+            size = min(len(word), 10) - 4
+            result = result + self.formatter.rule(size)
+        return result
+
+
+    def _word_repl(self, word, text=None):
+        """Handle WikiNames."""
+
+        # check for parent links
+        # !!! should use wikiutil.AbsPageName here, but setting `text`
+        # correctly prevents us from doing this for now
+        if word.startswith(wikiutil.PARENT_PREFIX):
+            if not text:
+                text = word
+            word = '/'.join(filter(None, self.formatter.page.page_name.split('/')[:-1] + [word[wikiutil.PARENT_PREFIX_LEN:]]))
+
+        if not text:
+            # if a simple, self-referencing link, emit it as plain text
+            if word == self.formatter.page.page_name:
+                return self.formatter.text(word)
+            text = word
+        if word.startswith(wikiutil.CHILD_PREFIX):
+            word = self.formatter.page.page_name + '/' + word[wikiutil.CHILD_PREFIX_LEN:]
+
+        # handle anchors
+        parts = word.split("#", 1)
+        anchor = ""
+        if len(parts)==2:
+            word, anchor = parts
+
+        return (self.formatter.pagelink(1, word, anchor=anchor) +
+                self.formatter.text(text) +
+                self.formatter.pagelink(0, word))
+
+    def _notword_repl(self, word):
+        """Handle !NotWikiNames."""
+        return self.formatter.nowikiword(word[1:])
+
+    def _interwiki_repl(self, word):
+        """Handle InterWiki links."""
+        wikitag, wikiurl, wikitail, wikitag_bad = wikiutil.resolve_wiki(self.request, word)
+        if wikitag_bad:
+            return self.formatter.text(word)
+        else:
+            return self.interwiki(["wiki:" + word])
+
+
+    def _url_repl(self, word):
+        """Handle literal URLs including inline images."""
+        scheme = word.split(":", 1)[0]
+
+        if scheme == "wiki":
+            return self.interwiki([word])
+        if scheme in self.attachment_schemas:
+            return self.attachment([word])
+
+        if wikiutil.isPicture(word):
+            word = wikiutil.mapURL(self.request, word)
+            # Get image name http://here.com/dir/image.gif -> image
+            name = word.split('/')[-1]
+            name = ''.join(name.split('.')[:-1])
+            return self.formatter.image(src=word, alt=name)
+        else:
+            return (self.formatter.url(1, word, css=scheme) +
+                    self.formatter.text(word) +
+                    self.formatter.url(0))
+
+
+    def _wikiname_bracket_repl(self, word):
+        """Handle special-char wikinames."""
+        wikiname = word[2:-2]
+        if wikiname:
+            return self._word_repl(wikiname)
+        else:
+            return self.formatter.text(word)
+
+
+    def _url_bracket_repl(self, word):
+        """Handle bracketed URLs."""
+
+        # Local extended link?
+        if word[1] == ':':
+            words = word[2:-1].split(':', 1)
+            if len(words) == 1:
+                words = words * 2
+            words[0] = 'wiki:Self:%s' % words[0]
+            return self.interwiki(words, pretty_url=1)
+            #return self._word_repl(words[0], words[1])
+
+        # Traditional split on space
+        words = word[1:-1].split(None, 1)
+        if len(words) == 1:
+            words = words * 2
+
+        if words[0][0] == '#':
+            # anchor link
+            return (self.formatter.url(1, words[0]) +
+                    self.formatter.text(words[1]) +
+                    self.formatter.url(0))
+
+        scheme = words[0].split(":", 1)[0]
+        if scheme == "wiki":
+            return self.interwiki(words, pretty_url=1)
+        if scheme in self.attachment_schemas:
+            return self.attachment(words, pretty_url=1)
+
+        if wikiutil.isPicture(words[1]) and re.match(self.url_rule, words[1]):
+            return (self.formatter.url(1, words[0], css='external', do_escape=0) +
+                    self.formatter.image(title=words[0], alt=words[0], src=words[1]) +
+                    self.formatter.url(0))
+        else:
+            return (self.formatter.url(1, words[0], css=scheme, do_escape=0) +
+                    self.formatter.text(words[1]) +
+                    self.formatter.url(0))
+
+
+    def _email_repl(self, word):
+        """Handle email addresses (without a leading mailto:)."""
+        return (self.formatter.url(1, "mailto:" + word, css='mailto') +
+                self.formatter.text(word) +
+                self.formatter.url(0))
+
+
+    def _ent_repl(self, word):
+        """Handle SGML entities."""
+        return self.formatter.text(word)
+        #return {'&': '&amp;',
+        #        '<': '&lt;',
+        #        '>': '&gt;'}[word]
+
+    def _ent_numeric_repl(self, word):
+        """Handle numeric (decimal and hexadecimal) SGML entities."""
+        return self.formatter.rawHTML(word)
+
+    def _ent_symbolic_repl(self, word):
+        """Handle symbolic SGML entities."""
+        return self.formatter.rawHTML(word)
+    
+    def _indent_repl(self, match):
+        """Handle pure indentation (no - * 1. markup)."""
+        result = []
+        if not (self.in_li or self.in_dd):
+            self._close_item(result)
+            self.in_li = 1
+            css_class = None
+            if self.line_was_empty and not self.first_list_item:
+                css_class = 'gap'
+            result.append(self.formatter.listitem(1, css_class=css_class, style="list-style-type:none"))
+        return ''.join(result)
+
+    def _li_none_repl(self, match):
+        """Handle type=none (" .") lists."""
+        result = []
+        self._close_item(result)
+        self.in_li = 1
+        css_class = None
+        if self.line_was_empty and not self.first_list_item:
+            css_class = 'gap'
+        result.append(self.formatter.listitem(1, css_class=css_class, style="list-style-type:none"))
+        return ''.join(result)
+
+    def _li_repl(self, match):
+        """Handle bullet (" *") lists."""
+        result = []
+        self._close_item(result)
+        self.in_li = 1
+        css_class = None
+        if self.line_was_empty and not self.first_list_item:
+            css_class = 'gap'
+        result.append(self.formatter.listitem(1, css_class=css_class))
+        return ''.join(result)
+
+    def _ol_repl(self, match):
+        """Handle numbered lists."""
+        return self._li_repl(match)
+
+    def _dl_repl(self, match):
+        """Handle definition lists."""
+        result = []
+        self._close_item(result)
+        self.in_dd = 1
+        result.extend([
+            self.formatter.definition_term(1),
+            self.formatter.text(match[1:-3].lstrip(' ')),
+            self.formatter.definition_term(0),
+            self.formatter.definition_desc(1),
+        ])
+        return ''.join(result)
+
+
+    def _indent_level(self):
+        """Return current char-wise indent level."""
+        return len(self.list_indents) and self.list_indents[-1]
+
+
+    def _indent_to(self, new_level, list_type, numtype, numstart):
+        """Close and open lists."""
+        open = []   # don't make one out of these two statements!
+        close = []
+
+        if self._indent_level() != new_level and self.in_table:
+            close.append(self.formatter.table(0))
+            self.in_table = 0
+        
+        while self._indent_level() > new_level:
+            self._close_item(close)
+            if self.list_types[-1] == 'ol':
+                tag = self.formatter.number_list(0)
+            elif self.list_types[-1] == 'dl':
+                tag = self.formatter.definition_list(0)
+            else:
+                tag = self.formatter.bullet_list(0)
+            close.append(tag)
+
+            del self.list_indents[-1]
+            del self.list_types[-1]
+            
+            if self.list_types: # we are still in a list
+                if self.list_types[-1] == 'dl':
+                    self.in_dd = 1
+                else:
+                    self.in_li = 1
+                
+        # Open new list, if necessary
+        if self._indent_level() < new_level:
+            self.list_indents.append(new_level)
+            self.list_types.append(list_type)
+
+            if self.formatter.in_p:
+                close.append(self.formatter.paragraph(0))
+            
+            if list_type == 'ol':
+                tag = self.formatter.number_list(1, numtype, numstart)
+            elif list_type == 'dl':
+                tag = self.formatter.definition_list(1)
+            else:
+                tag = self.formatter.bullet_list(1)
+            open.append(tag)
+            
+            self.first_list_item = 1
+            self.in_li = 0
+            self.in_dd = 0
+            
+        # If list level changes, close an open table
+        if self.in_table and (open or close):
+            close[0:0] = [self.formatter.table(0)]
+            self.in_table = 0
+        
+        self.in_list = self.list_types != []
+        return ''.join(close) + ''.join(open)
+
+
+    def _undent(self):
+        """Close all open lists."""
+        result = []
+        #result.append("<!-- _undent start -->\n")
+        self._close_item(result)
+        for type in self.list_types[::-1]:
+            if type == 'ol':
+                result.append(self.formatter.number_list(0))
+            elif type == 'dl':
+                result.append(self.formatter.definition_list(0))
+            else:
+                result.append(self.formatter.bullet_list(0))
+        #result.append("<!-- _undent end -->\n")
+        self.list_indents = []
+        self.list_types = []
+        return ''.join(result)
+
+
+    def _tt_repl(self, word):
+        """Handle inline code."""
+        return self.formatter.code(1) + \
+            self.formatter.text(word[3:-3]) + \
+            self.formatter.code(0)
+
+
+    def _tt_bt_repl(self, word):
+        """Handle backticked inline code."""
+        # if len(word) == 2: return "" // removed for FCK editor
+        return self.formatter.code(1, css="backtick") + \
+            self.formatter.text(word[1:-1]) + \
+            self.formatter.code(0)
+
+
+    def _getTableAttrs(self, attrdef):
+        # skip "|" and initial "<"
+        while attrdef and attrdef[0] == "|":
+            attrdef = attrdef[1:]
+        if not attrdef or attrdef[0] != "<":
+            return {}, ''
+        attrdef = attrdef[1:]
+
+        # extension for special table markup
+        def table_extension(key, parser, attrs, wiki_parser=self):
+            """ returns: tuple (found_flag, msg)
+                found_flag: whether we found something and were able to process it here
+                  true for special stuff like 100% or - or #AABBCC
+                  false for style xxx="yyy" attributes
+                msg: "" or an error msg
+            """
+            _ = wiki_parser._
+            found = False
+            msg = ''
+            if key[0] in "0123456789":
+                token = parser.get_token()
+                if token != '%':
+                    wanted = '%'
+                    msg = _('Expected "%(wanted)s" after "%(key)s", got "%(token)s"') % {
+                        'wanted': wanted, 'key': key, 'token': token}
+                else:
+                    try:
+                        dummy = int(key)
+                    except ValueError:
+                        msg = _('Expected an integer "%(key)s" before "%(token)s"') % {
+                            'key': key, 'token': token}
+                    else:
+                        found = True
+                        attrs['width'] = '"%s%%"' % key
+            elif key == '-':
+                arg = parser.get_token()
+                try:
+                    dummy = int(arg)
+                except ValueError:
+                    msg = _('Expected an integer "%(arg)s" after "%(key)s"') % {
+                        'arg': arg, 'key': key}
+                else:
+                    found = True
+                    attrs['colspan'] = '"%s"' % arg
+            elif key == '|':
+                arg = parser.get_token()
+                try:
+                    dummy = int(arg)
+                except ValueError:
+                    msg = _('Expected an integer "%(arg)s" after "%(key)s"') % {
+                        'arg': arg, 'key': key}
+                else:
+                    found = True
+                    attrs['rowspan'] = '"%s"' % arg
+            elif key == '(':
+                found = True
+                attrs['align'] = '"left"'
+            elif key == ':':
+                found = True
+                attrs['align'] = '"center"'
+            elif key == ')':
+                found = True
+                attrs['align'] = '"right"'
+            elif key == '^':
+                found = True
+                attrs['valign'] = '"top"'
+            elif key == 'v':
+                found = True
+                attrs['valign'] = '"bottom"'
+            elif key == '#':
+                arg = parser.get_token()
+                try:
+                    if len(arg) != 6: raise ValueError
+                    dummy = int(arg, 16)
+                except ValueError:
+                    msg = _('Expected a color value "%(arg)s" after "%(key)s"') % {
+                        'arg': arg, 'key': key}
+                else:
+                    found = True
+                    attrs['bgcolor'] = '"#%s"' % arg
+            return found, self.formatter.rawHTML(msg)
+
+        # scan attributes
+        attr, msg = wikiutil.parseAttributes(self.request, attrdef, '>', table_extension)
+        if msg:
+            msg = '<strong class="highlight">%s</strong>' % msg
+        #self.request.log("parseAttributes returned %r" % attr)
+        return attr, msg
+
+    def _tableZ_repl(self, word):
+        """Handle table row end."""
+        if self.in_table:
+            result = ''
+            # REMOVED: check for self.in_li, p should always close
+            if self.formatter.in_p:
+                result = self.formatter.paragraph(0)
+            result += self.formatter.table_cell(0) + self.formatter.table_row(0)
+            return result
+        else:
+            return self.formatter.text(word)
+
+    def _table_repl(self, word):
+        """Handle table cell separator."""
+        if self.in_table:
+            result = []
+            # check for attributes
+            attrs, attrerr = self._getTableAttrs(word)
+
+            # start the table row?
+            if self.table_rowstart:
+                self.table_rowstart = 0
+                result.append(self.formatter.table_row(1, attrs))
+            else:
+                # Close table cell, first closing open p
+                # REMOVED check for self.in_li, paragraph should close always!
+                if self.formatter.in_p:
+                    result.append(self.formatter.paragraph(0))
+                result.append(self.formatter.table_cell(0))
+
+            # check for adjacent cell markers
+            if word.count("|") > 2:
+                if not attrs.has_key('align'):
+                    attrs['align'] = '"center"'
+                if not attrs.has_key('colspan'):
+                    attrs['colspan'] = '"%d"' % (word.count("|")/2)
+
+            # return the complete cell markup
+            result.append(self.formatter.table_cell(1, attrs) + attrerr)         
+            result.append(self._line_anchordef())
+            return ''.join(result) 
+        else:
+            return self.formatter.text(word)
+
+
+    def _heading_repl(self, word):
+        """Handle section headings."""
+        import sha
+
+        h = word.strip()
+        level = 1
+        while h[level:level+1] == '=':
+            level += 1
+        depth = min(5,level)
+
+        # this is needed for Included pages
+        # TODO but it might still result in unpredictable results
+        # when included the same page multiple times
+        title_text = h[level:-level].strip()
+        pntt = self.formatter.page.page_name + title_text
+        self.titles.setdefault(pntt, 0)
+        self.titles[pntt] += 1
+
+        unique_id = ''
+        if self.titles[pntt] > 1:
+            unique_id = '-%d' % self.titles[pntt]
+        result = self._closeP()
+        result += self.formatter.heading(1, depth, id="head-"+sha.new(pntt.encode(config.charset)).hexdigest()+unique_id)
+                                     
+        return (result + self.formatter.text(title_text) +
+                self.formatter.heading(0, depth))
+    
+    def _parser_repl(self, word):
+        """Handle parsed code displays."""
+        if word[:3] == '{{{':
+            word = word[3:]
+
+        self.parser = None
+        self.parser_name = None
+        s_word = word.strip()
+        if s_word == '#!':
+            # empty bang paths lead to a normal code display
+            # can be used to escape real, non-empty bang paths
+            word = ''
+            self.in_pre = 3
+            return self._closeP() + self.formatter.preformatted(1)
+        elif s_word[:2] == '#!':
+            # First try to find a parser for this (will go away in 2.0)
+            parser_name = s_word[2:].split()[0]
+            self.setParser(parser_name)
+
+        if self.parser:
+            self.parser_name = parser_name
+            self.in_pre = 2
+            self.parser_lines = [word]
+            return ''
+        elif s_word:
+            self.in_pre = 3
+            return self._closeP() + self.formatter.preformatted(1) + \
+                   self.formatter.text(s_word + ' (-)')
+        else:
+            self.in_pre = 1
+            return ''
+
+    def _pre_repl(self, word):
+        """Handle code displays."""
+        word = word.strip()
+        if word == '{{{' and not self.in_pre:
+            self.in_pre = 3
+            return self._closeP() + self.formatter.preformatted(self.in_pre)
+        elif word == '}}}' and self.in_pre:
+            self.in_pre = 0
+            self.inhibit_p = 0
+            return self.formatter.preformatted(self.in_pre)
+        return self.formatter.text(word)
+
+
+    def _smiley_repl(self, word):
+        """Handle smileys."""
+        return self.formatter.smiley(word)
+
+    _smileyA_repl = _smiley_repl
+
+
+    def _comment_repl(self, word):
+        # if we are in a paragraph, we must close it so that normal text following
+        # in the line below the comment will reopen a new paragraph.
+        if self.formatter.in_p:
+            self.formatter.paragraph(0)
+        self.line_is_empty = 1 # markup following comment lines treats them as if they were empty
+        return self.formatter.comment(word)
+
+    def _closeP(self):
+        if self.formatter.in_p:
+            return self.formatter.paragraph(0)
+        return ''
+        
+    def _macro_repl(self, word):
+        """Handle macros ([[macroname]])."""
+        macro_name = word[2:-2]
+        self.inhibit_p = 0 # 1 fixes UserPreferences, 0 fixes paragraph formatting for macros
+
+        # check for arguments
+        args = None
+        if macro_name.count("("):
+            macro_name, args = macro_name.split('(', 1)
+            args = args[:-1]
+
+        # create macro instance
+        if self.macro is None:
+            self.macro = macro.Macro(self)
+        return self.formatter.macro(self.macro, macro_name, args)
+
+    def scan(self, scan_re, line):
+        """ Scans one line
+        
+        Append text before match, invoke replace() with match, and add text after match.
+        """
+        result = []
+        lastpos = 0
+
+        ###result.append(u'<span class="info">[scan: <tt>"%s"</tt>]</span>' % line)
+      
+        for match in scan_re.finditer(line):
+            # Add text before the match
+            if lastpos < match.start():
+                
+                ###result.append(u'<span class="info">[add text before match: <tt>"%s"</tt>]</span>' % line[lastpos:match.start()])
+                
+                if not (self.inhibit_p or self.in_pre or self.formatter.in_p):
+                    result.append(self.formatter.paragraph(1, css_class="line862"))
+                result.append(self.formatter.text(line[lastpos:match.start()]))
+            
+            # Replace match with markup
+            if not (self.inhibit_p or self.in_pre or self.formatter.in_p or
+                    self.in_table or self.in_list):
+                result.append(self.formatter.paragraph(1, css_class="line867"))
+            result.append(self.replace(match))
+            lastpos = match.end()
+        
+        ###result.append('<span class="info">[no match, add rest: <tt>"%s"<tt>]</span>' % line[lastpos:])
+        
+        # Add paragraph with the remainder of the line
+        if not (self.in_pre or self.in_li or self.in_dd or self.inhibit_p or
+                self.formatter.in_p) and lastpos < len(line):
+            result.append(self.formatter.paragraph(1, css_class="line874"))
+        result.append(self.formatter.text(line[lastpos:]))
+        return u''.join(result)
+
+    def replace(self, match):
+        """ Replace match using type name """
+        result = []
+        for type, hit in match.groupdict().items():
+            if hit is not None and type != "hmarker":
+                
+                ###result.append(u'<span class="info">[replace: %s: "%s"]</span>' % (type, hit))
+                if self.in_pre and type not in ['pre', 'ent']:
+                    return self.formatter.text(hit) 
+                else:
+                    # Open p for certain types
+                    if not (self.inhibit_p or self.formatter.in_p
+                            or self.in_pre or (type in self.no_new_p_before)):
+                        result.append(self.formatter.paragraph(1, css_class="line891"))
+                    
+                    # Get replace method and replece hit
+                    replace = getattr(self, '_' + type + '_repl')
+                    result.append(replace(hit))
+                    return ''.join(result)
+        else:
+            # We should never get here
+            import pprint
+            raise Exception("Can't handle match " + `match`
+                + "\n" + pprint.pformat(match.groupdict())
+                + "\n" + pprint.pformat(match.groups()) )
+
+        return ""
+
+    def _line_anchordef(self):
+        if self.line_anchors and not self.line_anchor_printed:
+            self.line_anchor_printed = 1
+            return self.formatter.line_anchordef(self.lineno)
+        else:
+            return ''
+
+    def format(self, formatter):
+        """ For each line, scan through looking for magic
+            strings, outputting verbatim any intervening text.
+        """
+        self.formatter = formatter
+        self.hilite_re = self.formatter.page.hilite_re
+
+        # prepare regex patterns
+        rules = self.formatting_rules.replace('\n', '|')
+        if self.cfg.bang_meta:
+            rules = ur'(?P<notword>!%(word_rule)s)|%(rules)s' % {
+                'word_rule': self.word_rule,
+                'rules': rules,
+            }
+        self.request.clock.start('compile_huge_and_ugly')        
+        scan_re = re.compile(rules, re.UNICODE)
+        number_re = re.compile(self.ol_rule, re.UNICODE)
+        term_re = re.compile(self.dl_rule, re.UNICODE)
+        indent_re = re.compile("^\s*", re.UNICODE)
+        eol_re = re.compile(r'\r?\n', re.UNICODE)
+        self.request.clock.stop('compile_huge_and_ugly')        
+
+        # get text and replace TABs
+        rawtext = self.raw.expandtabs()
+
+        # go through the lines
+        self.lineno = self.start_line
+        self.lines = eol_re.split(rawtext)
+        self.line_is_empty = 0
+
+        self.in_processing_instructions = 1
+
+        # Main loop
+        for line in self.lines:
+            self.lineno += 1
+            self.line_anchor_printed = 0
+            if not self.in_table:
+                self.request.write(self._line_anchordef())
+            self.table_rowstart = 1
+            self.line_was_empty = self.line_is_empty
+            self.line_is_empty = 0
+            self.first_list_item = 0
+            self.inhibit_p = 0
+
+            # ignore processing instructions
+            if self.in_processing_instructions:
+                found = False
+                for pi in ("##", "#format", "#refresh", "#redirect", "#deprecated",
+                           "#pragma", "#form", "#acl", "#language"):
+                    if line.lower().startswith(pi):
+                        self.request.write(self.formatter.comment(line))
+                        found = True
+                        break
+                if not found:
+                    self.in_processing_instructions = 0
+                else:
+                    continue # do not parse this line
+            if self.in_pre:
+                # TODO: move this into function
+                # still looking for processing instructions
+                # TODO: use strings for pre state, not numbers
+                if self.in_pre == 1:
+                    self.parser = None
+                    parser_name = ''
+                    if (line.strip()[:2] == "#!"):
+                        parser_name = line.strip()[2:].split()[0]
+                        self.setParser(parser_name)
+
+                    if self.parser:
+                        self.in_pre = 2
+                        self.parser_lines = [line]
+                        self.parser_name = parser_name
+                        continue
+                    else:
+                        self.request.write(self._closeP() +
+                                           self.formatter.preformatted(1))
+                        self.in_pre = 3
+                if self.in_pre == 2:
+                    # processing mode
+                    endpos = line.find("}}}")
+                    if endpos == -1:
+                        self.parser_lines.append(line)
+                        continue
+                    if line[:endpos]:
+                        self.parser_lines.append(line[:endpos])
+                    
+                    # Close p before calling parser
+                    # TODO: do we really need this?
+                    self.request.write(self._closeP())
+                    res = self.formatter.parser(self.parser_name, self.parser_lines)
+                    self.request.write(res)
+                    del self.parser_lines
+                    self.in_pre = 0
+                    self.parser = None
+
+                    # send rest of line through regex machinery
+                    line = line[endpos+3:]
+                    if not line.strip(): # just in the case "}}} " when we only have blanks left...
+                        continue
+            else:
+                # we don't have \n as whitespace any more
+                # This is the space between lines we join to one paragraph
+                line += ' '
+                
+                # Paragraph break on empty lines
+                if not line.strip():
+                    if self.in_table:
+                        self.request.write(self.formatter.table(0))
+                        self.request.write(self._line_anchordef())
+                        self.in_table = 0
+                    # CHANGE: removed check for not self.list_types
+                    # p should close on every empty line
+                    if self.formatter.in_p:
+                        self.request.write(self.formatter.paragraph(0))
+                    self.line_is_empty = 1
+                    continue
+
+                # Check indent level
+                indent = indent_re.match(line)
+                indlen = len(indent.group(0))
+                indtype = "ul"
+                numtype = None
+                numstart = None
+                if indlen:
+                    match = number_re.match(line)
+                    if match:
+                        numtype, numstart = match.group(0).strip().split('.')
+                        numtype = numtype[0]
+
+                        if numstart and numstart[0] == "#":
+                            numstart = int(numstart[1:])
+                        else:
+                            numstart = None
+
+                        indtype = "ol"
+                    else:
+                        match = term_re.match(line)
+                        if match:
+                            indtype = "dl"
+
+                # output proper indentation tags
+                self.request.write(self._indent_to(indlen, indtype, numtype, numstart))
+
+                # Table mode
+                # TODO: move into function?                
+                if (not self.in_table and line[indlen:indlen + 2] == "||"
+                    and line[-3:] == "|| " and len(line) >= 5 + indlen):
+                    # Start table
+                    if self.list_types and not self.in_li:
+                        self.request.write(self.formatter.listitem(1, style="list-style-type:none"))
+                        ## CHANGE: no automatic p on li
+                        ##self.request.write(self.formatter.paragraph(1))
+                        self.in_li = 1
+                        
+                    # CHANGE: removed check for self.in_li
+                    # paragraph should end before table, always!
+                    if self.formatter.in_p:
+                        self.request.write(self.formatter.paragraph(0))
+                    attrs, attrerr = self._getTableAttrs(line[indlen+2:])
+                    self.request.write(self.formatter.table(1, attrs) + attrerr)
+                    self.in_table = True # self.lineno
+                elif (self.in_table and not
+                      # intra-table comments should not break a table
+                      (line[:2] == "##" or  
+                       line[indlen:indlen + 2] == "||" and
+                       line[-3:] == "|| " and
+                       len(line) >= 5 + indlen)):
+                    
+                    # Close table
+                    self.request.write(self.formatter.table(0))
+                    self.request.write(self._line_anchordef())
+                    self.in_table = 0
+                                            
+            # Scan line, format and write
+            formatted_line = self.scan(scan_re, line)
+            self.request.write(formatted_line)
+
+            if self.in_pre == 3:
+                self.request.write(self.formatter.linebreak())
+
+        # Close code displays, paragraphs, tables and open lists
+        self.request.write(self._undent())
+        if self.in_pre: self.request.write(self.formatter.preformatted(0))
+        if self.formatter.in_p: self.request.write(self.formatter.paragraph(0))
+        if self.in_table: self.request.write(self.formatter.table(0))
+
+    # Private helpers ------------------------------------------------------------
+    
+    def setParser(self, name):
+        """ Set parser to parser named 'name' """
+        try:
+            self.parser = wikiutil.importPlugin(self.request.cfg, "parser", name, "Parser")
+        except wikiutil.PluginMissingError:
+            self.parser = None
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/parser/text_pascal.py	Tue May 16 20:12:29 2006 +0200
@@ -0,0 +1,50 @@
+# -*- coding: iso-8859-1 -*-
+"""
+	MoinMoin - Pascal Source Parser
+
+    @copyright: 2004-2005 by Johannes Berg <johannes@sipsolutions.net>
+    @license: GNU GPL, see COPYING for details.
+"""
+
+from MoinMoin.util.ParserBase import ParserBase
+
+Dependencies = []
+
+class Parser(ParserBase):
+
+    parsername = 'ColorizedPascal'
+    extensions = ['.pas']
+    Dependencies = []
+
+    def __init__(self, raw, request, **kw):
+        ParserBase.__init__(self,raw,request,**kw)
+        self._ignore_case = 1
+
+    def setupRules(self):
+        ParserBase.setupRules(self)
+        
+        self.addRulePair("Comment","\(\*","\*\)")
+        self.addRulePair("Comment","\{","\}")
+        self.addRule("Comment","//.*$")
+        self.addRulePair("String",'\'','\'')
+        self.addRule("Char",r"'\\.'|#[a-f0-9][a-f0-9]")
+        self.addRule("Number",r"[0-9](\.[0-9]*)?(eE[+-][0-9])?|\$[0-9a-fA-F]+")
+        self.addRule("ID","[a-zA-Z_][0-9a-zA-Z_]*")
+        self.addRule("SPChar",r"[~!%^&*()+=|\[\]:;,.<>/?{}-]")
+        
+        reserved_words = ['class','interface','set','uses','unit',
+                          'byte','integer','longint','float','double',
+                          'extended','char','shortint','boolean',
+                          'var','const','private','public','protected',
+                          'new','this','super','abstract','native',
+                          'synchronized','transient','volatile','strictfp',
+                          'if','else','while','for','do','case','default',
+                          'try','except','finally','raise','continue','break',
+                          'begin','end','type','class','implementation',
+                          'procedure','function','constructor','destructor']
+        
+        self.addReserved(reserved_words)
+        
+        constant_words = ['true','false','nil']
+        
+        self.addConstant(constant_words)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/parser/text_python.py	Tue May 16 20:12:29 2006 +0200
@@ -0,0 +1,125 @@
+# -*- coding: iso-8859-1 -*-
+"""
+    MoinMoin - highlighting Python Source Parser
+
+    @copyright: 2001 by Jürgen Hermann <jh@web.de>
+    @license: GNU GPL, see COPYING for details.
+"""
+
+import StringIO
+import keyword, token, tokenize, sha
+from MoinMoin import config, wikiutil
+from MoinMoin.util.ParserBase import parse_start_step
+
+_KEYWORD = token.NT_OFFSET + 1
+_TEXT    = token.NT_OFFSET + 2
+
+_tokens = {
+    token.NUMBER:       'Number',
+    token.OP:           'Operator',
+    token.STRING:       'String',
+    tokenize.COMMENT:   'Comment',
+    token.NAME:         'ID',
+    token.ERRORTOKEN:   'Error',
+    _KEYWORD:           'ResWord',
+    _TEXT:              'Text',
+}
+
+Dependencies = []
+
+class Parser:
+    """ Send colored python source.
+    """
+
+    extensions = ['.py']
+    Dependencies = []
+
+    def __init__(self, raw, request, **kw):
+        """ Store the source text.
+        """
+        self.raw = raw.expandtabs().rstrip()
+        self.request = request
+        self.form = request.form
+        self._ = request.getText
+
+        self.show_num, self.num_start, self.num_step, attrs = parse_start_step(request, kw.get('format_args',''))
+
+    def format(self, formatter):
+        """ Parse and send the colored source.
+        """
+        # store line offsets in self.lines
+        self.lines = [0, 0]
+        pos = 0
+        while 1:
+            pos = self.raw.find('\n', pos) + 1
+            if not pos: break
+            self.lines.append(pos)
+        self.lines.append(len(self.raw))
+
+        self._code_id = sha.new(self.raw.encode(config.charset)).hexdigest()
+        self.request.write(formatter.code_area(1, self._code_id, 'ColorizedPython', self.show_num, self.num_start, self.num_step))
+        self.formatter = formatter
+        self.request.write(formatter.code_line(1))
+        #len('%d' % (len(self.lines)-1, )))
+        
+        # parse the source and write it
+        self.pos = 0
+        text = StringIO.StringIO(self.raw)
+        try:
+            tokenize.tokenize(text.readline, self)
+        except tokenize.TokenError, ex:
+            msg = ex[0]
+            line = ex[1][0]
+            errmsg = (self.formatter.linebreak() + 
+                      self.formatter.strong(1) + "ERROR: %s" % msg + self.formatter.strong(0) +
+                      self.formatter.linebreak() +
+                      wikiutil.escape(self.raw[self.lines[line]:]))
+            self.request.write(errmsg)
+        self.request.write(self.formatter.code_line(0))
+        self.request.write(formatter.code_area(0, self._code_id))
+
+    def __call__(self, toktype, toktext, (srow,scol), (erow,ecol), line):
+        """ Token handler.
+        """
+        if 0: print "type", toktype, token.tok_name[toktype], "text", toktext, \
+                    "start", srow,scol, "end", erow,ecol, "<br>"
+
+        # calculate new positions
+        oldpos = self.pos
+        newpos = self.lines[srow] + scol
+        self.pos = newpos + len(toktext)
+
+        # handle newlines
+        if toktype in [token.NEWLINE, tokenize.NL]:
+            self.request.write(self.formatter.code_line(0))
+            self.request.write(self.formatter.code_line(1))
+            return
+
+        # send the original whitespace, if needed
+        if newpos > oldpos:
+            self.request.write(self.formatter.text(self.raw[oldpos:newpos]))
+
+        # skip indenting tokens
+        if toktype in [token.INDENT, token.DEDENT]:
+            self.pos = newpos
+            return
+
+        # map token type to a color group
+        if token.LPAR <= toktype and toktype <= token.OP:
+            toktype = token.OP
+        elif toktype == token.NAME and keyword.iskeyword(toktext):
+            toktype = _KEYWORD
+        tokid = _tokens.get(toktype, _tokens[_TEXT])
+
+        # send text
+        first = 1
+        for part in toktext.split('\n'):
+            if not first:
+                self.request.write(self.formatter.code_line(0))
+                self.request.write(self.formatter.code_line(1))
+            else:
+                first = 0
+            self.request.write(self.formatter.code_token(1, tokid) +
+                               self.formatter.text(part) +
+                               self.formatter.code_token(0, tokid))
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/parser/text_rst.py	Tue May 16 20:12:29 2006 +0200
@@ -0,0 +1,602 @@
+# -*- coding: iso-8859-1 -*-
+"""
+    MoinMoin - ReStructured Text Parser
+
+    @copyright: 2004 by Matthew Gilbert <gilbert AT voxmea DOT net>
+        and by Alexander Schremmer <alex AT alexanderweb DOT de>
+    @license: GNU GPL, see COPYING for details.
+
+    REQUIRES docutils 0.3.10 or later (must be later than December 30th, 2005)
+"""
+
+import re
+import new
+import StringIO
+import __builtin__
+import sys
+
+import types
+import os
+
+# docutils imports are below
+from MoinMoin.parser.text_moin_wiki import Parser as WikiParser
+from MoinMoin.Page import Page
+from MoinMoin.action import AttachFile
+from MoinMoin import wikiutil
+
+Dependencies = [] # this parser just depends on the raw text
+
+# --- make docutils safe by overriding all module-scoped names related to IO ---
+
+# TODO: Add an error message to dummyOpen so that the user knows what they did
+# requested an unsupported feature of docutils in MoinMoin.
+def dummyOpen(x, y=None, z=None): return
+
+class dummyIO(StringIO.StringIO):
+    def __init__(self, destination=None, destination_path=None,
+                 encoding=None, error_handler='', autoclose=1,
+                 handle_io_errors=1, source_path=None):
+        StringIO.StringIO.__init__(self)
+
+class dummyUrllib2:
+    def urlopen(a):
+        return StringIO.StringIO()
+    urlopen = staticmethod(urlopen)
+
+# # # All docutils imports must be contained below here
+try:
+    import docutils
+    from docutils.core import publish_parts
+    from docutils.writers import html4css1
+    from docutils.nodes import reference
+    from docutils.parsers import rst
+    from docutils.parsers.rst import directives, roles
+# # # All docutils imports must be contained above here
+
+    ErrorParser = None # used in the case of missing docutils
+    docutils.io.FileOutput = docutils.io.FileInput = dummyIO
+except ImportError:
+    # we need to workaround this totally broken plugin interface that does
+    # not allow us to raise exceptions
+    class ErrorParser:
+        caching = 0
+        Dependencies = Dependencies # copy dependencies from module-scope
+
+        def __init__(self, raw, request, **kw):
+            self.raw = raw
+            self.request = request
+    
+        def format(self, formatter):
+            _ = self.request.getText
+            from MoinMoin.parser import plain
+            self.request.write(formatter.sysmsg(1) +
+                               formatter.rawHTML(_('Rendering of reStructured text is not possible, ''please'' install docutils.')) +
+                               formatter.sysmsg(0))
+            plain.Parser(self.raw, self.request).format(formatter)
+    
+    # Create a pseudo docutils environment
+    docutils = html4css1 = dummyUrllib2()
+    html4css1.HTMLTranslator = html4css1.Writer = object
+
+def safe_import(name, globals = None, locals = None, fromlist = None):
+    mod = __builtin__.__import__(name, globals, locals, fromlist)
+    if mod:
+        mod.open = dummyOpen
+        mod.urllib2 = dummyUrllib2
+    return mod
+
+# Go through and change all docutils modules to use a dummyOpen and dummyUrllib2
+# module. Also make sure that any docutils imported modules also get the dummy
+# implementations.
+for i in sys.modules.keys():
+    if i.startswith('docutils') and sys.modules[i]:
+        sys.modules[i].open = dummyOpen
+        sys.modules[i].urllib2 = dummyUrllib2
+        sys.modules[i].__import__ = safe_import
+        
+# --- End of dummy-code --------------------------------------------------------
+
+def html_escape_unicode(node):
+    # Find Python function that does this for me. string.encode('ascii',
+    # 'xmlcharrefreplace') only 2.3 and above.
+    for i in node:
+        if ord(i) > 127:
+            node = node.replace(i, '&#%d;' % (ord(i)))
+    return node
+
+class MoinWriter(html4css1.Writer):
+
+    config_section = 'MoinMoin writer'
+    config_section_dependencies = ('writers',)
+
+    #"""Final translated form of `document`."""
+    output = None
+
+    def wiki_resolver(self, node):
+        """
+            Normally an unknown reference would be an error in an reST document.
+            However, this is how new documents are created in the wiki. This
+            passes on unknown references to eventually be handled by
+            MoinMoin.
+        """
+        if hasattr(node, 'indirect_reference_name'):
+            node['refuri'] = node.indirect_reference_name
+        elif (len(node['ids']) != 0):
+            # If the node has an id then it's probably an internal link. Let
+            # docutils generate an error.
+            return False
+        elif node.hasattr('name'):
+            node['refuri'] = node['name']
+        else:
+            node['refuri'] = node['refname']
+        del node['refname']
+        node.resolved = 1
+        self.nodes.append(node)
+        return True
+
+    wiki_resolver.priority = 001
+
+    def __init__(self, formatter, request):
+        html4css1.Writer.__init__(self)
+        self.formatter = formatter
+        self.request = request
+        # Add our wiki unknown_reference_resolver to our list of functions to
+        # run when a target isn't found
+        self.unknown_reference_resolvers = [self.wiki_resolver]
+        # We create a new parser to process MoinMoin wiki style links in the
+        # reST.
+        self.wikiparser = WikiParser('', self.request)
+        self.wikiparser.formatter = self.formatter
+        self.wikiparser.hilite_re = None
+        self.nodes = []
+        # Make sure it's a supported docutils version.
+        required_version = (0, 3, 10)
+        current_version = tuple([int(i) for i in (docutils.__version__.split('.')+['0','0'])[:3]])
+        if current_version < required_version:
+            err = 'ERROR: The installed docutils version is %s;' % ('.'.join([str(i) for i in current_version]))
+            err += ' version %s or later is required.' % ('.'.join([str(i) for i in required_version]))
+            raise RuntimeError, err
+
+    def translate(self):
+        visitor = MoinTranslator(self.document,
+                                 self.formatter,
+                                 self.request,
+                                 self.wikiparser,
+                                 self)
+        self.document.walkabout(visitor)
+        self.visitor = visitor
+        # Docutils 0.5.0 and later require the writer to have the visitor 
+        # attributes.
+        if (hasattr(html4css1.Writer, 'visitor_attributes')):
+            for attr in html4css1.Writer.visitor_attributes:
+                setattr(self, attr, getattr(visitor, attr))
+        self.output = html_escape_unicode(visitor.astext())
+
+class Parser:
+    caching = 1
+    Dependencies = Dependencies # copy dependencies from module-scope
+
+    def __init__(self, raw, request, **kw):
+        self.raw = raw
+        self.request = request
+        self.form = request.form
+
+    def format(self, formatter):
+        # Create our simple parser
+        parser = MoinDirectives(self.request)
+
+        parts = publish_parts(
+            source = self.raw,
+            writer = MoinWriter(formatter, self.request),
+            settings_overrides = {
+                'halt_level': 5,
+                'traceback': True,
+                'file_insertion_enabled': 0,
+                'raw_enabled': 0,
+                'stylesheet_path': '',
+                'template': '',
+            }
+        )
+
+        html = []
+        if parts['title']:
+            html.append(formatter.rawHTML('<h1>%s</h1>' % parts['title']))
+        # If there is only one subtitle then it is held in parts['subtitle'].
+        # However, if there is more than one subtitle then this is empty and
+        # fragment contains all of the subtitles.
+        if parts['subtitle']:
+            html.append(formatter.rawHTML('<h2>%s</h2>' % parts['subtitle']))
+        if parts['docinfo']:
+            html.append(parts['docinfo'])
+        html.append(parts['fragment'])
+        self.request.write(html_escape_unicode('\n'.join(html)))
+
+class RawHTMLList(list):
+    """
+        RawHTMLList catches all html appended to internal HTMLTranslator lists.
+        It passes the HTML through the MoinMoin rawHTML formatter to strip 
+        markup when necessary. This is to support other formatting outputs
+        (such as ?action=show&mimetype=text/plain).
+    """
+    
+    def __init__(self, formatter):
+        self.formatter = formatter
+        
+    def append(self, text):
+        f = sys._getframe()
+        if f.f_back.f_code.co_filename.endswith('html4css1.py'):
+            if isinstance(text, types.StringType) or isinstance(text, types.UnicodeType):
+                text = self.formatter.rawHTML(text)
+        list.append(self, text)
+
+class MoinTranslator(html4css1.HTMLTranslator):
+
+    def __init__(self, document, formatter, request, parser, writer):
+        html4css1.HTMLTranslator.__init__(self, document)
+        self.formatter = formatter
+        self.request = request
+        # Using our own writer when needed. Save the old one to restore
+        # after the page has been processed by the html4css1 parser.
+        self.original_write, self.request.write = self.request.write, self.capture_wiki_formatting
+        self.wikiparser = parser
+        self.wikiparser.request = request
+        # MoinMoin likes to start the initial headers at level 3 and the title
+        # gets level 2, so to comply with their styles, we do here also.
+        # TODO: Could this be fixed by passing this value in settings_overrides?
+        self.initial_header_level = 3
+        # Temporary place for wiki returned markup. This will be filled when
+        # replacing the default writer with the capture_wiki_formatting
+        # function (see visit_image for an example).
+        self.wiki_text = ''
+        self.setup_wiki_handlers()
+        self.setup_admonitions_handlers()
+        
+        # Make all internal lists RawHTMLLists, see RawHTMLList class
+        # comment for more information.
+        for i in self.__dict__:
+            if isinstance(getattr(self, i), types.ListType):
+                setattr(self, i, RawHTMLList(formatter))
+
+    def depart_docinfo(self, node):
+        """
+            depart_docinfo assigns a new list to self.body, we need to re-make that
+            into a RawHTMLList.
+        """
+        html4css1.HTMLTranslator.depart_docinfo(self, node)
+        self.body = RawHTMLList(self.formatter)
+
+    def capture_wiki_formatting(self, text):
+        """
+            Captures MoinMoin generated markup to the instance variable
+            wiki_text.
+        """
+        # For some reason getting empty strings here which of course overwrites
+        # what we really want (this is called multiple times per MoinMoin
+        # format call, which I don't understand).
+        self.wiki_text += text
+
+    def process_wiki_text(self, text):
+        """
+            This sequence is repeated numerous times, so its captured as a
+            single call here. Its important that wiki_text is blanked before we
+            make the format call. format will call request.write which we've
+            hooked to capture_wiki_formatting. If wiki_text is not blanked
+            before a call to request.write we will get the old markup as well as
+            the newly generated markup.
+
+            TODO: Could implement this as a list so that it acts as a stack. I
+            don't like having to remember to blank wiki_text.
+        """
+        self.wiki_text = ''
+        self.wikiparser.raw = text
+        self.wikiparser.format(self.formatter)
+
+    def add_wiki_markup(self):
+        """
+            Place holder in case this becomes more elaborate someday. For now it
+            only appends the MoinMoin generated markup to the html body and
+            raises SkipNode.
+        """
+        self.body.append(self.wiki_text)
+        self.wiki_text = ''
+        raise docutils.nodes.SkipNode
+
+    def astext(self):
+        self.request.write = self.original_write
+        return html4css1.HTMLTranslator.astext(self)
+
+    def fixup_wiki_formatting(self, text):
+        replacement = {'<p>': '', '</p>': '', '\n': '', '> ': '>'}
+        for src, dst in replacement.items():
+            text = text.replace(src, dst)
+        # Everything seems to have a space ending the text block. We want to
+        # get rid of this
+        if text and text[-1] == ' ':
+            text = text[:-1]
+        return text
+
+    def visit_reference(self, node):
+        """
+            Pass links to MoinMoin to get the correct wiki space url. Extract
+            the url and pass it on to the html4css1 writer to handle. Inline
+            images are also handled by visit_image. Not sure what the "drawing:"
+            link scheme is used for, so for now it is handled here.
+
+            Also included here is a hack to allow MoinMoin macros. This routine
+            checks for a link which starts with "[[". This link is passed to the
+            MoinMoin formatter and the resulting markup is inserted into the
+            document in the place of the original link reference.
+        """
+        if 'refuri' in node.attributes:
+            refuri = node['refuri']
+            prefix = ''
+            link = refuri
+            if ':' in refuri:
+                prefix, link = refuri.lstrip().split(':', 1)
+            
+            # First see if MoinMoin should handle completely. Exits through add_wiki_markup.
+            if ((refuri.startswith('[[') and refuri.endswith(']]')) or 
+                    (prefix == 'drawing') or
+                    (prefix == 'inline')):
+                self.process_wiki_text(refuri)
+                # Don't call fixup_wiki_formatting because who knows what
+                # MoinMoin is inserting. (exits through add_wiki_markup)
+                self.add_wiki_markup()
+
+            # From here down, all links are handled by docutils (except 
+            # missing attachments), just fixup node['refuri'].
+            if prefix == 'attachment':
+                attach_file = AttachFile.getFilename(self.request, 
+                        self.request.page.page_name, link)
+                if not os.path.exists(attach_file):
+                    # Attachment doesn't exist, give to MoinMoin to insert
+                    # upload text.
+                    self.process_wiki_text(refuri)
+                    self.add_wiki_markup()
+                # Attachment exists, just get a link to it.
+                node['refuri'] = AttachFile.getAttachUrl(self.request.page.page_name, 
+                        link, self.request)
+                if not [i for i in node.children if i.__class__ == docutils.nodes.image]:
+                    node['classes'].append(prefix)                
+            elif prefix == 'wiki':
+                wikitag, wikiurl, wikitail, err = wikiutil.resolve_wiki(self.request, link)
+                wikiurl = wikiutil.mapURL(self.request, wikiurl)
+                node['refuri'] = wikiutil.join_wiki(wikiurl, wikitail)
+                # Only add additional class information if the reference does
+                # not have a child image (don't want to add additional markup
+                # for images with targets).
+                if not [i for i in node.children if i.__class__ == docutils.nodes.image]:
+                    node['classes'].append('interwiki')
+            elif prefix != '':
+                # Some link scheme (http, file, https, mailto, etc.), add class
+                # information if the reference doesn't have a child image (don't 
+                # want additional markup for images with targets). 
+                # Don't touch the refuri.
+                if not [i for i in node.children if i.__class__ == docutils.nodes.image]:
+                    node['classes'].append(prefix)
+            else:
+                # Default case - make a link to a wiki page.
+                page = MoinMoin.Page.Page(self.request, refuri)
+                node['refuri'] = page.url(self.request)
+                if not page.exists():
+                    node['classes'].append('nonexistent')
+        html4css1.HTMLTranslator.visit_reference(self, node)
+
+    def visit_image(self, node):
+        """
+            Need to intervene in the case of inline images. We need MoinMoin to
+            give us the actual src line to the image and then we can feed this
+            to the default html4css1 writer. NOTE: Since the writer can't "open"
+            this image the scale attribute doesn't work without directly
+            specifying the height or width (or both).
+
+            TODO: Need to handle figures similarly.
+        """
+        uri = node['uri'].lstrip()
+        prefix = ''          # assume no prefix
+        attach_name = uri
+        if ':' in uri:
+            prefix = uri.split(':', 1)[0]
+            attach_name = uri.split(':', 1)[1]
+        # if prefix isn't URL, try to display in page
+        if not prefix.lower() in ('file', 'http', 'https', 'ftp'):
+            attach_file = AttachFile.getFilename(self.request, 
+                    self.request.page.page_name, attach_name)
+            if not os.path.exists(attach_file):
+                # Attachment doesn't exist, MoinMoin should process it
+                if prefix == '':
+                    prefix = 'inline:'
+                self.process_wiki_text(prefix + attach_name)
+                self.wiki_text = self.fixup_wiki_formatting(self.wiki_text)
+                self.add_wiki_markup()
+            # Attachment exists, get a link to it.
+            # create the url
+            node['uri'] = AttachFile.getAttachUrl(self.request.page.page_name, 
+                    attach_name, self.request, addts = 1)
+            if not node.hasattr('alt'):
+                node['alt'] = node.get('name', uri)
+        html4css1.HTMLTranslator.visit_image(self, node)
+
+    def create_wiki_functor(self, moin_func):
+        moin_callable = getattr(self.formatter, moin_func)
+        def visit_func(self, node):
+            self.wiki_text = ''
+            self.request.write(moin_callable(1))
+            self.body.append(self.wiki_text)
+        def depart_func(self, node):
+            self.wiki_text = ''
+            self.request.write(moin_callable(0))
+            self.body.append(self.wiki_text)
+        return visit_func, depart_func
+
+    def setup_wiki_handlers(self):
+        """
+            Have the MoinMoin formatter handle markup when it makes sense. These
+            are portions of the document that do not contain reST specific
+            markup. This allows these portions of the document to look
+            consistent with other wiki pages.
+
+            Setup dispatch routines to handle basic document markup. The
+            hanlders dict is the html4css1 handler name followed by the wiki
+            handler name.
+        """
+        handlers = {
+            # Text Markup
+            'emphasis': 'emphasis',
+            'strong': 'strong',
+            'literal': 'code',
+            # Blocks
+            'literal_block': 'preformatted',
+            # Simple Lists
+            # bullet-lists are handled completely by docutils because it uses
+            # the node context to decide when to make a compact list 
+            # (no <p> tags).
+            'list_item': 'listitem',
+            # Definition List
+            'definition_list': 'definition_list',
+        }
+        for rest_func, moin_func in handlers.items():
+            visit_func, depart_func = self.create_wiki_functor(moin_func)
+            visit_func = new.instancemethod(visit_func, self, MoinTranslator)
+            depart_func = new.instancemethod(depart_func, self, MoinTranslator)
+            setattr(self, 'visit_%s' % (rest_func), visit_func)
+            setattr(self, 'depart_%s' % (rest_func), depart_func)
+
+    # Enumerated list takes an extra paramter so we handle this differently
+    def visit_enumerated_list(self, node):
+        self.wiki_text = ''
+        self.request.write(self.formatter.number_list(1, start=node.get('start', None)))
+        self.body.append(self.wiki_text)
+
+    def depart_enumerated_list(self, node):
+        self.wiki_text = ''
+        self.request.write(self.formatter.number_list(0))
+        self.body.append(self.wiki_text)
+
+    # Admonitions are handled here -=- tmacam
+    def create_admonition_functor(self, admotion_class):
+        tag_class = 'admonition_' + admotion_class
+        def visit_func(self, node):
+            self.wiki_text = ''
+            self.request.write(self.formatter.div(1,
+                                                  attr={'class': tag_class},
+                                                  allowed_attrs=[]))
+            self.body.append(self.wiki_text)
+        def depart_func(self, node):
+            self.wiki_text = ''
+            self.request.write(self.formatter.div(0))
+            self.body.append(self.wiki_text)
+            
+        return visit_func, depart_func 
+
+    def setup_admonitions_handlers(self):
+        """
+            Admonitions are handled here... We basically surround admonitions
+            in a div with class admonition_{name of the admonition}.
+        """
+        handled_admonitions = [
+            'attention',
+            'caution',
+            'danger',
+            'error',
+            'hint',
+            'important',
+            'note',
+            'tip',
+            'warning',
+        ]
+        for adm in handled_admonitions:
+            visit_func, depart_func = self.create_admonition_functor(adm)
+            visit_func = new.instancemethod(visit_func, self, MoinTranslator)
+            depart_func = new.instancemethod(depart_func, self, MoinTranslator)
+            setattr(self, 'visit_%s' % (adm), visit_func)
+            setattr(self, 'depart_%s' % (adm), depart_func)
+
+
+class MoinDirectives:
+    """
+        Class to handle all custom directive handling. This code is called as
+        part of the parsing stage.
+    """
+
+    def __init__(self, request):
+        self.request = request
+
+        # include MoinMoin pages
+        directives.register_directive('include', self.include)
+
+        # used for MoinMoin macros
+        directives.register_directive('macro', self.macro)
+
+        # disallow a few directives in order to prevent XSS
+        # for directive in ('meta', 'include', 'raw'):
+        for directive in ('meta', 'raw'):
+            directives.register_directive(directive, None)
+
+        # disable the raw role
+        roles._roles['raw'] = None
+
+        # As a quick fix for infinite includes we only allow a fixed number of
+        # includes per page
+        self.num_includes = 0
+        self.max_includes = 10
+
+    # Handle the include directive rather than letting the default docutils
+    # parser handle it. This allows the inclusion of MoinMoin pages instead of
+    # something from the filesystem.
+    def include(self, name, arguments, options, content, lineno,
+                content_offset, block_text, state, state_machine):
+        # content contains the included file name
+
+        _ = self.request.getText
+
+        # Limit the number of documents that can be included
+        if self.num_includes < self.max_includes:
+            self.num_includes += 1
+        else:
+            lines = [_("**Maximum number of allowed includes exceeded**")]
+            state_machine.insert_input(lines, 'MoinDirectives')
+            return
+
+        if len(content):
+            page = Page(page_name = content[0], request = self.request)
+            if page.exists():
+                text = page.get_raw_body()
+                lines = text.split('\n')
+                # Remove the "#format rst" line
+                if lines[0].startswith("#format"):
+                    del lines[0]
+            else:
+                lines = [_("**Could not find the referenced page: %s**") % (content[0],)]
+            # Insert the text from the included document and then continue
+            # parsing
+            state_machine.insert_input(lines, 'MoinDirectives')
+        return
+
+    include.content = True
+
+    # Add additional macro directive.
+    # This allows MoinMoin macros to be used either by using the directive
+    # directly or by using the substitution syntax. Much cleaner than using the
+    # reference hack (`[[SomeMacro]]`_). This however simply adds a node to the
+    # document tree which is a reference, but through a much better user
+    # interface.
+    def macro(self, name, arguments, options, content, lineno,
+                content_offset, block_text, state, state_machine):
+        # content contains macro to be called
+        if len(content):
+            # Allow either with or without brackets
+            if content[0].startswith('[['):
+                macro = content[0]
+            else:
+                macro = '[[%s]]' % content[0]
+            ref = reference(macro, refuri = macro)
+            ref['name'] = macro
+            return [ref]
+        return
+
+    macro.content = True
+
+if ErrorParser: # fixup in case of missing docutils
+    Parser = ErrorParser
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/parser/text_xslt.py	Tue May 16 20:12:29 2006 +0200
@@ -0,0 +1,146 @@
+# -*- coding: iso-8859-1 -*-
+"""
+    MoinMoin - XML Parser
+
+    This parser was tested with 4Suite 1.0a4 and 1.0b1.
+
+    What's new:
+    * much cleaner code
+    * stylesheet can be extended to support other format:
+        e.g. Docbook parser using docbook->html .xsl stylesheet
+
+    @copyright: 2001, 2003 by Jürgen Hermann <jh@web.de>
+    @copyright: 2005 by Henry Ho <henryho167@hotmail.com>
+    @copyright: 2005 by MoinMoin:AlexanderSchremmer
+    @license: GNU GPL, see COPYING for details.
+"""
+
+# cStringIO cannot be used because it doesn't handle Unicode.
+import StringIO
+import re
+
+from MoinMoin import caching, config, wikiutil, Page
+
+Dependencies = []
+
+class Parser:
+    """ Send XML file formatted via XSLT. """
+    caching = 1
+    Dependencies = Dependencies
+
+    def __init__(self, raw, request, **kw):
+        self.raw = raw.encode(config.charset)
+        self.request = request
+        self.form = request.form
+        self._ = request.getText
+        self.base_scheme = 'wiki'
+        self.base_uri = 'wiki://Self/'
+        self.key = 'xslt'
+
+    def format(self, formatter):
+        """ Send the text. """
+        _ = self._
+
+        if not self.request.cfg.allow_xslt:
+            # use plain parser if XSLT is not allowed
+            # can be activated in wikiconfig.py
+            from MoinMoin.parser import plain
+            self.request.write(formatter.sysmsg(1) +
+                               formatter.rawHTML(_('XSLT option disabled, please look at HelpOnConfiguration.')) +
+                               formatter.sysmsg(0))
+            plain.Parser(self.raw, self.request).format(formatter)
+            return
+
+        try:
+            # try importing Ft from 4suite
+            # if import fails or its version is not 1.x, error msg
+            from Ft.Xml import __version__ as ft_version
+            assert ft_version.startswith('1.')
+        except (ImportError, AssertionError):
+            self.request.write(self.request.formatter.sysmsg(1) +
+                               self.request.formatter.text(_('XSLT processing is not available, please install 4suite 1.x.')) +
+                               self.request.formatter.sysmsg(0))
+        else:
+            from Ft.Lib import Uri
+            from Ft.Xml import InputSource
+            from Ft.Xml.Xslt.Processor import Processor
+            from Ft import FtException
+
+            msg = None
+
+            try:
+                # location of SchemeRegisteryResolver has changed since 1.0a4
+                if ft_version >= "1.0a4":
+                    import Ft.Lib.Resolvers # Do not remove! it looks unused, but breaks when removed!!!
+                                                                        
+                class MoinResolver(Uri.SchemeRegistryResolver):
+                    """ supports resolving self.base_uri for actual pages in MoinMoin """
+                    def __init__(self, handlers, base_scheme):
+                        Uri.SchemeRegistryResolver.__init__(self, handlers)
+                        self.supportedSchemes.append(base_scheme)
+
+                # setting up vars for xslt Processor
+                out_file = StringIO.StringIO()
+                wiki_resolver = MoinResolver(
+                                    handlers={self.base_scheme: self._resolve_page,},
+                                    base_scheme=self.base_scheme)
+                input_factory = InputSource.InputSourceFactory(resolver=wiki_resolver)
+
+                page_uri = self.base_uri + wikiutil.url_quote(formatter.page.page_name)
+                raw = self.raw.strip()
+
+                self.processor = Processor()
+                self.append_stylesheet() # hook, for extending this parser
+                self.processor.run(
+                    input_factory.fromString(raw, uri=page_uri),
+                    outputStream=out_file)
+                result = out_file.getvalue()
+                result = self.parse_result(result) # hook, for extending this parser
+
+            except FtException, msg:
+                etype = "XSLT"
+            except Uri.UriException, msg:
+                etype = "XSLT"
+            except IOError, msg:
+                etype = "I/O"
+
+            if msg:
+                text = wikiutil.escape(self.raw)
+                text = text.expandtabs()
+                text = text.replace('\n', '<br>\n')
+                text = text.replace(' ', '&nbsp;')
+                before = _('%(errortype)s processing error') % {'errortype': etype,}
+                title = u"<strong>%s: %s</strong><p>" % (before, msg)
+                self.request.write(title)
+                self.request.write(text.decode(config.charset))
+            else:
+                self.request.write(result)
+                cache = caching.CacheEntry(self.request, formatter.page, self.key)
+                cache.update(result)
+
+    def _resolve_page(self, uri, base):
+        """ URI will be resolved into StringIO with actual page content """
+        from Ft.Lib import Uri
+        base_uri = self.base_uri
+
+        if uri.startswith(base_uri):
+            pagename = uri[len(base_uri):]
+            page = Page.Page(self.request, pagename)
+            if page.exists():
+                result = StringIO.StringIO(page.getPageText().encode(config.charset))
+            else:
+                raise Uri.UriException(Uri.UriException.RESOURCE_ERROR, loc=uri,
+                                       msg='Page does not exist')
+        else:
+            result = Uri.UriResolverBase.resolve(self, uri, base)
+
+        return result
+
+    def append_stylesheet(self):
+        """ for other parsers based on xslt (i.e. docbook-xml) """
+        pass
+
+    def parse_result(self, result):
+        """ additional parsing to the resulting XSLT'ed result before saving """
+        return result
+
--- a/MoinMoin/parser/wiki.py	Tue May 16 13:10:13 2006 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1117 +0,0 @@
-# -*- coding: iso-8859-1 -*-
-"""
-    MoinMoin - MoinMoin Wiki Markup Parser
-
-    @copyright: 2000, 2001, 2002 by Jürgen Hermann <jh@web.de>
-    @license: GNU GPL, see COPYING for details.
-"""
-
-import os, re
-from MoinMoin import config, wikiutil, macro
-from MoinMoin.Page import Page
-from MoinMoin.util import web
-
-Dependencies = []
-
-class Parser:
-    """
-        Object that turns Wiki markup into HTML.
-
-        All formatting commands can be parsed one line at a time, though
-        some state is carried over between lines.
-
-        Methods named like _*_repl() are responsible to handle the named regex
-        patterns defined in print_html().
-    """
-
-    # allow caching
-    caching = 1
-    Dependencies = []
-
-    # some common strings
-    PARENT_PREFIX = wikiutil.PARENT_PREFIX
-    attachment_schemas = ["attachment", "inline", "drawing"]
-    punct_pattern = re.escape(u'''"\'}]|:,.)?!''')
-    url_pattern = (u'http|https|ftp|nntp|news|mailto|telnet|wiki|file|irc|' +
-            u'|'.join(attachment_schemas) + 
-            (config.url_schemas and u'|' + u'|'.join(config.url_schemas) or ''))
-
-    # some common rules
-    word_rule = ur'(?:(?<![%(u)s%(l)s])|^)%(parent)s(?:%(subpages)s(?:[%(u)s][%(l)s]+){2,})+(?![%(u)s%(l)s]+)' % {
-        'u': config.chars_upper,
-        'l': config.chars_lower,
-        'subpages': wikiutil.CHILD_PREFIX + '?',
-        'parent': ur'(?:%s)?' % re.escape(PARENT_PREFIX),
-    }
-    url_rule = ur'%(url_guard)s(%(url)s)\:([^\s\<%(punct)s]|([%(punct)s][^\s\<%(punct)s]))+' % {
-        'url_guard': u'(^|(?<!\w))',
-        'url': url_pattern,
-        'punct': punct_pattern,
-    }
-
-    ol_rule = ur"^\s+(?:[0-9]+|[aAiI])\.(?:#\d+)?\s"
-    dl_rule = ur"^\s+.*?::\s"
-
-    # the big, fat, ugly one ;)
-    formatting_rules = ur"""(?P<ent_numeric>&#(\d{1,5}|x[0-9a-fA-F]+);)
-(?:(?P<emph_ibb>'''''(?=[^']+'''))
-(?P<emph_ibi>'''''(?=[^']+''))
-(?P<emph_ib_or_bi>'{5}(?=[^']))
-(?P<emph>'{2,3})
-(?P<u>__)
-(?P<sup>\^.*?\^)
-(?P<sub>,,[^,]{1,40},,)
-(?P<tt>\{\{\{.*?\}\}\})
-(?P<parser>(\{\{\{(#!.*|\s*$)))
-(?P<pre>(\{\{\{ ?|\}\}\}))
-(?P<small>(\~- ?|-\~))
-(?P<big>(\~\+ ?|\+\~))
-(?P<strike>(--\(|\)--))
-(?P<rule>-{4,})
-(?P<comment>^\#\#.*$)
-(?P<macro>\[\[(%%(macronames)s)(?:\(.*?\))?\]\]))
-(?P<ol>%(ol_rule)s)
-(?P<dl>%(dl_rule)s)
-(?P<li>^\s+\*\s*)
-(?P<li_none>^\s+\.\s*)
-(?P<indent>^\s+)
-(?P<tableZ>\|\| $)
-(?P<table>(?:\|\|)+(?:<[^>]*?>)?(?!\|? $))
-(?P<heading>^\s*(?P<hmarker>=+)\s.*\s(?P=hmarker) $)
-(?P<interwiki>[A-Z][a-zA-Z]+\:[^\s'\"\:\<\|]([^\s%(punct)s]|([%(punct)s][^\s%(punct)s]))+)
-(?P<word>%(word_rule)s)
-(?P<url_bracket>\[((%(url)s)\:|#|\:)[^\s\]]+(\s[^\]]+)?\])
-(?P<url>%(url_rule)s)
-(?P<email>[-\w._+]+\@[\w-]+(\.[\w-]+)+)
-(?P<smiley>(?<=\s)(%(smiley)s)(?=\s))
-(?P<smileyA>^(%(smiley)s)(?=\s))
-(?P<ent_symbolic>&[a-zA-Z]+;)
-(?P<ent>[<>&])
-(?P<wikiname_bracket>\[".*?"\])
-(?P<tt_bt>`.*?`)"""  % {
-
-        'url': url_pattern,
-        'punct': punct_pattern,
-        'ol_rule': ol_rule,
-        'dl_rule': dl_rule,
-        'url_rule': url_rule,
-        'word_rule': word_rule,
-        'smiley': u'|'.join(map(re.escape, config.smileys.keys()))}
-
-    # Don't start p before these 
-    no_new_p_before = ("heading rule table tableZ tr td "
-                       "ul ol dl dt dd li li_none indent "
-                       "macro parser pre")
-    no_new_p_before = no_new_p_before.split()
-    no_new_p_before = dict(zip(no_new_p_before, [1] * len(no_new_p_before)))
-
-    def __init__(self, raw, request, **kw):
-        self.raw = raw
-        self.request = request
-        self.form = request.form
-        self._ = request.getText
-        self.cfg = request.cfg
-        self.line_anchors = kw.get('line_anchors', True)
-        self.macro = None
-        self.start_line = kw.get('start_line', 0)
-
-        self.is_em = 0
-        self.is_b = 0
-        self.is_u = 0
-        self.is_strike = 0
-        self.lineno = 0
-        self.in_list = 0 # between <ul/ol/dl> and </ul/ol/dl>
-        self.in_li = 0 # between <li> and </li>
-        self.in_dd = 0 # between <dd> and </dd>
-        self.in_pre = 0
-        self.in_table = 0
-        self.is_big = False
-        self.is_small = False
-        self.inhibit_p = 0 # if set, do not auto-create a <p>aragraph
-        self.titles = request._page_headings
-
-        # holds the nesting level (in chars) of open lists
-        self.list_indents = []
-        self.list_types = []
-        
-        self.formatting_rules = self.formatting_rules % {'macronames': u'|'.join(macro.getNames(self.cfg))}
-
-    def _close_item(self, result):
-        #result.append("<!-- close item begin -->\n")
-        if self.in_table:
-            result.append(self.formatter.table(0))
-            self.in_table = 0
-        if self.in_li:
-            self.in_li = 0
-            if self.formatter.in_p:
-                result.append(self.formatter.paragraph(0))
-            result.append(self.formatter.listitem(0))
-        if self.in_dd:
-            self.in_dd = 0
-            if self.formatter.in_p:
-                result.append(self.formatter.paragraph(0))
-            result.append(self.formatter.definition_desc(0))
-        #result.append("<!-- close item end -->\n")
-
-
-    def interwiki(self, url_and_text, **kw):
-        # TODO: maybe support [wiki:Page http://wherever/image.png] ?
-        if len(url_and_text) == 1:
-            url = url_and_text[0]
-            text = None
-        else:
-            url, text = url_and_text
-
-        # keep track of whether this is a self-reference, so links
-        # are always shown even the page doesn't exist.
-        is_self_reference = 0
-        url2 = url.lower()
-        if url2.startswith('wiki:self:'):
-            url = url[10:] # remove "wiki:self:"
-            is_self_reference = 1
-        elif url2.startswith('wiki:'):
-            url = url[5:] # remove "wiki:"
-           
-        tag, tail = wikiutil.split_wiki(url)
-        if text is None:
-            if tag:
-                text = tail
-            else:
-                text = url
-                url = ""
-        elif (url.startswith(wikiutil.CHILD_PREFIX) or # fancy link to subpage [wiki:/SubPage text]
-              is_self_reference or # [wiki:Self:LocalPage text] or [:LocalPage:text]
-              Page(self.request, url).exists()): # fancy link to local page [wiki:LocalPage text]
-            return self._word_repl(url, text)
-
-        wikitag, wikiurl, wikitail, wikitag_bad = wikiutil.resolve_wiki(self.request, url)
-        href = wikiutil.join_wiki(wikiurl, wikitail)
-
-        # check for image URL, and possibly return IMG tag
-        if not kw.get('pretty_url', 0) and wikiutil.isPicture(wikitail):
-            return self.formatter.image(src=href)
-
-        # link to self?
-        if wikitag is None:
-            return self._word_repl(wikitail)
-              
-        return (self.formatter.interwikilink(1, tag, tail) + 
-                self.formatter.text(text) +
-                self.formatter.interwikilink(0, tag, tail))
-
-    def attachment(self, url_and_text, **kw):
-        """ This gets called on attachment URLs.
-        """
-        _ = self._
-        if len(url_and_text) == 1:
-            url = url_and_text[0]
-            text = None
-        else:
-            url, text = url_and_text
-
-        inline = url[0] == 'i'
-        drawing = url[0] == 'd'
-        url = url.split(":", 1)[1]
-        url = wikiutil.url_unquote(url, want_unicode=True)
-        text = text or url
-
-        from MoinMoin.action import AttachFile
-        if drawing:
-            return self.formatter.attachment_drawing(url, text)
-
-        # check for image URL, and possibly return IMG tag
-        # (images are always inlined, just like for other URLs)
-        if not kw.get('pretty_url', 0) and wikiutil.isPicture(url):
-            return self.formatter.attachment_image(url)
-                
-        # inline the attachment
-        if inline:
-            return self.formatter.attachment_inlined(url, text)
-
-        return self.formatter.attachment_link(url, text)
-
-    def _u_repl(self, word):
-        """Handle underline."""
-        self.is_u = not self.is_u
-        return self.formatter.underline(self.is_u)
-
-    def _strike_repl(self, word):
-        """Handle strikethrough."""
-        # XXX we don't really enforce the correct sequence --( ... )-- here
-        self.is_strike = not self.is_strike
-        return self.formatter.strike(self.is_strike)
-
-    def _small_repl(self, word):
-        """Handle small."""
-        if word.strip() == '~-' and self.is_small:
-            return self.formatter.text(word)
-        if word.strip() == '-~' and not self.is_small:
-            return self.formatter.text(word)
-        self.is_small = not self.is_small
-        return self.formatter.small(self.is_small)
-
-    def _big_repl(self, word):
-        """Handle big."""
-        if word.strip() == '~+' and self.is_big:
-            return self.formatter.text(word)
-        if word.strip() == '+~' and not self.is_big:
-            return self.formatter.text(word)
-        self.is_big = not self.is_big
-        return self.formatter.big(self.is_big)
-
-    def _emph_repl(self, word):
-        """Handle emphasis, i.e. '' and '''."""
-        ##print "#", self.is_b, self.is_em, "#"
-        if len(word) == 3:
-            self.is_b = not self.is_b
-            if self.is_em and self.is_b:
-                self.is_b = 2
-            return self.formatter.strong(self.is_b)
-        else:
-            self.is_em = not self.is_em
-            if self.is_em and self.is_b:
-                self.is_em = 2
-            return self.formatter.emphasis(self.is_em)
-
-    def _emph_ibb_repl(self, word):
-        """Handle mixed emphasis, i.e. ''''' followed by '''."""
-        self.is_b = not self.is_b
-        self.is_em = not self.is_em
-        if self.is_em and self.is_b:
-            self.is_b = 2
-        return self.formatter.emphasis(self.is_em) + self.formatter.strong(self.is_b)
-
-    def _emph_ibi_repl(self, word):
-        """Handle mixed emphasis, i.e. ''''' followed by ''."""
-        self.is_b = not self.is_b
-        self.is_em = not self.is_em
-        if self.is_em and self.is_b:
-            self.is_em = 2
-        return self.formatter.strong(self.is_b) + self.formatter.emphasis(self.is_em)
-
-    def _emph_ib_or_bi_repl(self, word):
-        """Handle mixed emphasis, exactly five '''''."""
-        ##print "*", self.is_b, self.is_em, "*"
-        b_before_em = self.is_b > self.is_em > 0
-        self.is_b = not self.is_b
-        self.is_em = not self.is_em
-        if b_before_em:
-            return self.formatter.strong(self.is_b) + self.formatter.emphasis(self.is_em)
-        else:
-            return self.formatter.emphasis(self.is_em) + self.formatter.strong(self.is_b)
-
-
-    def _sup_repl(self, word):
-        """Handle superscript."""
-        return self.formatter.sup(1) + \
-            self.formatter.text(word[1:-1]) + \
-            self.formatter.sup(0)
-
-    def _sub_repl(self, word):
-        """Handle subscript."""
-        return self.formatter.sub(1) + \
-            self.formatter.text(word[2:-2]) + \
-            self.formatter.sub(0)
-
-
-    def _rule_repl(self, word):
-        """Handle sequences of dashes."""
-        result = self._undent() + self._closeP()
-        if len(word) <= 4:
-            result = result + self.formatter.rule()
-        else:
-            # Create variable rule size 1 - 6. Actual size defined in css.
-            size = min(len(word), 10) - 4
-            result = result + self.formatter.rule(size)
-        return result
-
-
-    def _word_repl(self, word, text=None):
-        """Handle WikiNames."""
-
-        # check for parent links
-        # !!! should use wikiutil.AbsPageName here, but setting `text`
-        # correctly prevents us from doing this for now
-        if word.startswith(wikiutil.PARENT_PREFIX):
-            if not text:
-                text = word
-            word = '/'.join(filter(None, self.formatter.page.page_name.split('/')[:-1] + [word[wikiutil.PARENT_PREFIX_LEN:]]))
-
-        if not text:
-            # if a simple, self-referencing link, emit it as plain text
-            if word == self.formatter.page.page_name:
-                return self.formatter.text(word)
-            text = word
-        if word.startswith(wikiutil.CHILD_PREFIX):
-            word = self.formatter.page.page_name + '/' + word[wikiutil.CHILD_PREFIX_LEN:]
-
-        # handle anchors
-        parts = word.split("#", 1)
-        anchor = ""
-        if len(parts)==2:
-            word, anchor = parts
-
-        return (self.formatter.pagelink(1, word, anchor=anchor) +
-                self.formatter.text(text) +
-                self.formatter.pagelink(0, word))
-
-    def _notword_repl(self, word):
-        """Handle !NotWikiNames."""
-        return self.formatter.nowikiword(word[1:])
-
-    def _interwiki_repl(self, word):
-        """Handle InterWiki links."""
-        wikitag, wikiurl, wikitail, wikitag_bad = wikiutil.resolve_wiki(self.request, word)
-        if wikitag_bad:
-            return self.formatter.text(word)
-        else:
-            return self.interwiki(["wiki:" + word])
-
-
-    def _url_repl(self, word):
-        """Handle literal URLs including inline images."""
-        scheme = word.split(":", 1)[0]
-
-        if scheme == "wiki":
-            return self.interwiki([word])
-        if scheme in self.attachment_schemas:
-            return self.attachment([word])
-
-        if wikiutil.isPicture(word):
-            word = wikiutil.mapURL(self.request, word)
-            # Get image name http://here.com/dir/image.gif -> image
-            name = word.split('/')[-1]
-            name = ''.join(name.split('.')[:-1])
-            return self.formatter.image(src=word, alt=name)
-        else:
-            return (self.formatter.url(1, word, css=scheme) +
-                    self.formatter.text(word) +
-                    self.formatter.url(0))
-
-
-    def _wikiname_bracket_repl(self, word):
-        """Handle special-char wikinames."""
-        wikiname = word[2:-2]
-        if wikiname:
-            return self._word_repl(wikiname)
-        else:
-            return self.formatter.text(word)
-
-
-    def _url_bracket_repl(self, word):
-        """Handle bracketed URLs."""
-
-        # Local extended link?
-        if word[1] == ':':
-            words = word[2:-1].split(':', 1)
-            if len(words) == 1:
-                words = words * 2
-            words[0] = 'wiki:Self:%s' % words[0]
-            return self.interwiki(words, pretty_url=1)
-            #return self._word_repl(words[0], words[1])
-
-        # Traditional split on space
-        words = word[1:-1].split(None, 1)
-        if len(words) == 1:
-            words = words * 2
-
-        if words[0][0] == '#':
-            # anchor link
-            return (self.formatter.url(1, words[0]) +
-                    self.formatter.text(words[1]) +
-                    self.formatter.url(0))
-
-        scheme = words[0].split(":", 1)[0]
-        if scheme == "wiki":
-            return self.interwiki(words, pretty_url=1)
-        if scheme in self.attachment_schemas:
-            return self.attachment(words, pretty_url=1)
-
-        if wikiutil.isPicture(words[1]) and re.match(self.url_rule, words[1]):
-            return (self.formatter.url(1, words[0], css='external', do_escape=0) +
-                    self.formatter.image(title=words[0], alt=words[0], src=words[1]) +
-                    self.formatter.url(0))
-        else:
-            return (self.formatter.url(1, words[0], css=scheme, do_escape=0) +
-                    self.formatter.text(words[1]) +
-                    self.formatter.url(0))
-
-
-    def _email_repl(self, word):
-        """Handle email addresses (without a leading mailto:)."""
-        return (self.formatter.url(1, "mailto:" + word, css='mailto') +
-                self.formatter.text(word) +
-                self.formatter.url(0))
-
-
-    def _ent_repl(self, word):
-        """Handle SGML entities."""
-        return self.formatter.text(word)
-        #return {'&': '&amp;',
-        #        '<': '&lt;',
-        #        '>': '&gt;'}[word]
-
-    def _ent_numeric_repl(self, word):
-        """Handle numeric (decimal and hexadecimal) SGML entities."""
-        return self.formatter.rawHTML(word)
-
-    def _ent_symbolic_repl(self, word):
-        """Handle symbolic SGML entities."""
-        return self.formatter.rawHTML(word)
-    
-    def _indent_repl(self, match):
-        """Handle pure indentation (no - * 1. markup)."""
-        result = []
-        if not (self.in_li or self.in_dd):
-            self._close_item(result)
-            self.in_li = 1
-            css_class = None
-            if self.line_was_empty and not self.first_list_item:
-                css_class = 'gap'
-            result.append(self.formatter.listitem(1, css_class=css_class, style="list-style-type:none"))
-        return ''.join(result)
-
-    def _li_none_repl(self, match):
-        """Handle type=none (" .") lists."""
-        result = []
-        self._close_item(result)
-        self.in_li = 1
-        css_class = None
-        if self.line_was_empty and not self.first_list_item:
-            css_class = 'gap'
-        result.append(self.formatter.listitem(1, css_class=css_class, style="list-style-type:none"))
-        return ''.join(result)
-
-    def _li_repl(self, match):
-        """Handle bullet (" *") lists."""
-        result = []
-        self._close_item(result)
-        self.in_li = 1
-        css_class = None
-        if self.line_was_empty and not self.first_list_item:
-            css_class = 'gap'
-        result.append(self.formatter.listitem(1, css_class=css_class))
-        return ''.join(result)
-
-    def _ol_repl(self, match):
-        """Handle numbered lists."""
-        return self._li_repl(match)
-
-    def _dl_repl(self, match):
-        """Handle definition lists."""
-        result = []
-        self._close_item(result)
-        self.in_dd = 1
-        result.extend([
-            self.formatter.definition_term(1),
-            self.formatter.text(match[1:-3].lstrip(' ')),
-            self.formatter.definition_term(0),
-            self.formatter.definition_desc(1),
-        ])
-        return ''.join(result)
-
-
-    def _indent_level(self):
-        """Return current char-wise indent level."""
-        return len(self.list_indents) and self.list_indents[-1]
-
-
-    def _indent_to(self, new_level, list_type, numtype, numstart):
-        """Close and open lists."""
-        open = []   # don't make one out of these two statements!
-        close = []
-
-        if self._indent_level() != new_level and self.in_table:
-            close.append(self.formatter.table(0))
-            self.in_table = 0
-        
-        while self._indent_level() > new_level:
-            self._close_item(close)
-            if self.list_types[-1] == 'ol':
-                tag = self.formatter.number_list(0)
-            elif self.list_types[-1] == 'dl':
-                tag = self.formatter.definition_list(0)
-            else:
-                tag = self.formatter.bullet_list(0)
-            close.append(tag)
-
-            del self.list_indents[-1]
-            del self.list_types[-1]
-            
-            if self.list_types: # we are still in a list
-                if self.list_types[-1] == 'dl':
-                    self.in_dd = 1
-                else:
-                    self.in_li = 1
-                
-        # Open new list, if necessary
-        if self._indent_level() < new_level:
-            self.list_indents.append(new_level)
-            self.list_types.append(list_type)
-
-            if self.formatter.in_p:
-                close.append(self.formatter.paragraph(0))
-            
-            if list_type == 'ol':
-                tag = self.formatter.number_list(1, numtype, numstart)
-            elif list_type == 'dl':
-                tag = self.formatter.definition_list(1)
-            else:
-                tag = self.formatter.bullet_list(1)
-            open.append(tag)
-            
-            self.first_list_item = 1
-            self.in_li = 0
-            self.in_dd = 0
-            
-        # If list level changes, close an open table
-        if self.in_table and (open or close):
-            close[0:0] = [self.formatter.table(0)]
-            self.in_table = 0
-        
-        self.in_list = self.list_types != []
-        return ''.join(close) + ''.join(open)
-
-
-    def _undent(self):
-        """Close all open lists."""
-        result = []
-        #result.append("<!-- _undent start -->\n")
-        self._close_item(result)
-        for type in self.list_types[::-1]:
-            if type == 'ol':
-                result.append(self.formatter.number_list(0))
-            elif type == 'dl':
-                result.append(self.formatter.definition_list(0))
-            else:
-                result.append(self.formatter.bullet_list(0))
-        #result.append("<!-- _undent end -->\n")
-        self.list_indents = []
-        self.list_types = []
-        return ''.join(result)
-
-
-    def _tt_repl(self, word):
-        """Handle inline code."""
-        return self.formatter.code(1) + \
-            self.formatter.text(word[3:-3]) + \
-            self.formatter.code(0)
-
-
-    def _tt_bt_repl(self, word):
-        """Handle backticked inline code."""
-        # if len(word) == 2: return "" // removed for FCK editor
-        return self.formatter.code(1, css="backtick") + \
-            self.formatter.text(word[1:-1]) + \
-            self.formatter.code(0)
-
-
-    def _getTableAttrs(self, attrdef):
-        # skip "|" and initial "<"
-        while attrdef and attrdef[0] == "|":
-            attrdef = attrdef[1:]
-        if not attrdef or attrdef[0] != "<":
-            return {}, ''
-        attrdef = attrdef[1:]
-
-        # extension for special table markup
-        def table_extension(key, parser, attrs, wiki_parser=self):
-            """ returns: tuple (found_flag, msg)
-                found_flag: whether we found something and were able to process it here
-                  true for special stuff like 100% or - or #AABBCC
-                  false for style xxx="yyy" attributes
-                msg: "" or an error msg
-            """
-            _ = wiki_parser._
-            found = False
-            msg = ''
-            if key[0] in "0123456789":
-                token = parser.get_token()
-                if token != '%':
-                    wanted = '%'
-                    msg = _('Expected "%(wanted)s" after "%(key)s", got "%(token)s"') % {
-                        'wanted': wanted, 'key': key, 'token': token}
-                else:
-                    try:
-                        dummy = int(key)
-                    except ValueError:
-                        msg = _('Expected an integer "%(key)s" before "%(token)s"') % {
-                            'key': key, 'token': token}
-                    else:
-                        found = True
-                        attrs['width'] = '"%s%%"' % key
-            elif key == '-':
-                arg = parser.get_token()
-                try:
-                    dummy = int(arg)
-                except ValueError:
-                    msg = _('Expected an integer "%(arg)s" after "%(key)s"') % {
-                        'arg': arg, 'key': key}
-                else:
-                    found = True
-                    attrs['colspan'] = '"%s"' % arg
-            elif key == '|':
-                arg = parser.get_token()
-                try:
-                    dummy = int(arg)
-                except ValueError:
-                    msg = _('Expected an integer "%(arg)s" after "%(key)s"') % {
-                        'arg': arg, 'key': key}
-                else:
-                    found = True
-                    attrs['rowspan'] = '"%s"' % arg
-            elif key == '(':
-                found = True
-                attrs['align'] = '"left"'
-            elif key == ':':
-                found = True
-                attrs['align'] = '"center"'
-            elif key == ')':
-                found = True
-                attrs['align'] = '"right"'
-            elif key == '^':
-                found = True
-                attrs['valign'] = '"top"'
-            elif key == 'v':
-                found = True
-                attrs['valign'] = '"bottom"'
-            elif key == '#':
-                arg = parser.get_token()
-                try:
-                    if len(arg) != 6: raise ValueError
-                    dummy = int(arg, 16)
-                except ValueError:
-                    msg = _('Expected a color value "%(arg)s" after "%(key)s"') % {
-                        'arg': arg, 'key': key}
-                else:
-                    found = True
-                    attrs['bgcolor'] = '"#%s"' % arg
-            return found, self.formatter.rawHTML(msg)
-
-        # scan attributes
-        attr, msg = wikiutil.parseAttributes(self.request, attrdef, '>', table_extension)
-        if msg:
-            msg = '<strong class="highlight">%s</strong>' % msg
-        #self.request.log("parseAttributes returned %r" % attr)
-        return attr, msg
-
-    def _tableZ_repl(self, word):
-        """Handle table row end."""
-        if self.in_table:
-            result = ''
-            # REMOVED: check for self.in_li, p should always close
-            if self.formatter.in_p:
-                result = self.formatter.paragraph(0)
-            result += self.formatter.table_cell(0) + self.formatter.table_row(0)
-            return result
-        else:
-            return self.formatter.text(word)
-
-    def _table_repl(self, word):
-        """Handle table cell separator."""
-        if self.in_table:
-            result = []
-            # check for attributes
-            attrs, attrerr = self._getTableAttrs(word)
-
-            # start the table row?
-            if self.table_rowstart:
-                self.table_rowstart = 0
-                result.append(self.formatter.table_row(1, attrs))
-            else:
-                # Close table cell, first closing open p
-                # REMOVED check for self.in_li, paragraph should close always!
-                if self.formatter.in_p:
-                    result.append(self.formatter.paragraph(0))
-                result.append(self.formatter.table_cell(0))
-
-            # check for adjacent cell markers
-            if word.count("|") > 2:
-                if not attrs.has_key('align'):
-                    attrs['align'] = '"center"'
-                if not attrs.has_key('colspan'):
-                    attrs['colspan'] = '"%d"' % (word.count("|")/2)
-
-            # return the complete cell markup
-            result.append(self.formatter.table_cell(1, attrs) + attrerr)         
-            result.append(self._line_anchordef())
-            return ''.join(result) 
-        else:
-            return self.formatter.text(word)
-
-
-    def _heading_repl(self, word):
-        """Handle section headings."""
-        import sha
-
-        h = word.strip()
-        level = 1
-        while h[level:level+1] == '=':
-            level += 1
-        depth = min(5,level)
-
-        # this is needed for Included pages
-        # TODO but it might still result in unpredictable results
-        # when included the same page multiple times
-        title_text = h[level:-level].strip()
-        pntt = self.formatter.page.page_name + title_text
-        self.titles.setdefault(pntt, 0)
-        self.titles[pntt] += 1
-
-        unique_id = ''
-        if self.titles[pntt] > 1:
-            unique_id = '-%d' % self.titles[pntt]
-        result = self._closeP()
-        result += self.formatter.heading(1, depth, id="head-"+sha.new(pntt.encode(config.charset)).hexdigest()+unique_id)
-                                     
-        return (result + self.formatter.text(title_text) +
-                self.formatter.heading(0, depth))
-    
-    def _parser_repl(self, word):
-        """Handle parsed code displays."""
-        if word[:3] == '{{{':
-            word = word[3:]
-
-        self.parser = None
-        self.parser_name = None
-        s_word = word.strip()
-        if s_word == '#!':
-            # empty bang paths lead to a normal code display
-            # can be used to escape real, non-empty bang paths
-            word = ''
-            self.in_pre = 3
-            return self._closeP() + self.formatter.preformatted(1)
-        elif s_word[:2] == '#!':
-            # First try to find a parser for this (will go away in 2.0)
-            parser_name = s_word[2:].split()[0]
-            self.setParser(parser_name)
-
-        if self.parser:
-            self.parser_name = parser_name
-            self.in_pre = 2
-            self.parser_lines = [word]
-            return ''
-        elif s_word:
-            self.in_pre = 3
-            return self._closeP() + self.formatter.preformatted(1) + \
-                   self.formatter.text(s_word + ' (-)')
-        else:
-            self.in_pre = 1
-            return ''
-
-    def _pre_repl(self, word):
-        """Handle code displays."""
-        word = word.strip()
-        if word == '{{{' and not self.in_pre:
-            self.in_pre = 3
-            return self._closeP() + self.formatter.preformatted(self.in_pre)
-        elif word == '}}}' and self.in_pre:
-            self.in_pre = 0
-            self.inhibit_p = 0
-            return self.formatter.preformatted(self.in_pre)
-        return self.formatter.text(word)
-
-
-    def _smiley_repl(self, word):
-        """Handle smileys."""
-        return self.formatter.smiley(word)
-
-    _smileyA_repl = _smiley_repl
-
-
-    def _comment_repl(self, word):
-        # if we are in a paragraph, we must close it so that normal text following
-        # in the line below the comment will reopen a new paragraph.
-        if self.formatter.in_p:
-            self.formatter.paragraph(0)
-        self.line_is_empty = 1 # markup following comment lines treats them as if they were empty
-        return self.formatter.comment(word)
-
-    def _closeP(self):
-        if self.formatter.in_p:
-            return self.formatter.paragraph(0)
-        return ''
-        
-    def _macro_repl(self, word):
-        """Handle macros ([[macroname]])."""
-        macro_name = word[2:-2]
-        self.inhibit_p = 0 # 1 fixes UserPreferences, 0 fixes paragraph formatting for macros
-
-        # check for arguments
-        args = None
-        if macro_name.count("("):
-            macro_name, args = macro_name.split('(', 1)
-            args = args[:-1]
-
-        # create macro instance
-        if self.macro is None:
-            self.macro = macro.Macro(self)
-        return self.formatter.macro(self.macro, macro_name, args)
-
-    def scan(self, scan_re, line):
-        """ Scans one line
-        
-        Append text before match, invoke replace() with match, and add text after match.
-        """
-        result = []
-        lastpos = 0
-
-        ###result.append(u'<span class="info">[scan: <tt>"%s"</tt>]</span>' % line)
-      
-        for match in scan_re.finditer(line):
-            # Add text before the match
-            if lastpos < match.start():
-                
-                ###result.append(u'<span class="info">[add text before match: <tt>"%s"</tt>]</span>' % line[lastpos:match.start()])
-                
-                if not (self.inhibit_p or self.in_pre or self.formatter.in_p):
-                    result.append(self.formatter.paragraph(1, css_class="line862"))
-                result.append(self.formatter.text(line[lastpos:match.start()]))
-            
-            # Replace match with markup
-            if not (self.inhibit_p or self.in_pre or self.formatter.in_p or
-                    self.in_table or self.in_list):
-                result.append(self.formatter.paragraph(1, css_class="line867"))
-            result.append(self.replace(match))
-            lastpos = match.end()
-        
-        ###result.append('<span class="info">[no match, add rest: <tt>"%s"<tt>]</span>' % line[lastpos:])
-        
-        # Add paragraph with the remainder of the line
-        if not (self.in_pre or self.in_li or self.in_dd or self.inhibit_p or
-                self.formatter.in_p) and lastpos < len(line):
-            result.append(self.formatter.paragraph(1, css_class="line874"))
-        result.append(self.formatter.text(line[lastpos:]))
-        return u''.join(result)
-
-    def replace(self, match):
-        """ Replace match using type name """
-        result = []
-        for type, hit in match.groupdict().items():
-            if hit is not None and type != "hmarker":
-                
-                ###result.append(u'<span class="info">[replace: %s: "%s"]</span>' % (type, hit))
-                if self.in_pre and type not in ['pre', 'ent']:
-                    return self.formatter.text(hit) 
-                else:
-                    # Open p for certain types
-                    if not (self.inhibit_p or self.formatter.in_p
-                            or self.in_pre or (type in self.no_new_p_before)):
-                        result.append(self.formatter.paragraph(1, css_class="line891"))
-                    
-                    # Get replace method and replece hit
-                    replace = getattr(self, '_' + type + '_repl')
-                    result.append(replace(hit))
-                    return ''.join(result)
-        else:
-            # We should never get here
-            import pprint
-            raise Exception("Can't handle match " + `match`
-                + "\n" + pprint.pformat(match.groupdict())
-                + "\n" + pprint.pformat(match.groups()) )
-
-        return ""
-
-    def _line_anchordef(self):
-        if self.line_anchors and not self.line_anchor_printed:
-            self.line_anchor_printed = 1
-            return self.formatter.line_anchordef(self.lineno)
-        else:
-            return ''
-
-    def format(self, formatter):
-        """ For each line, scan through looking for magic
-            strings, outputting verbatim any intervening text.
-        """
-        self.formatter = formatter
-        self.hilite_re = self.formatter.page.hilite_re
-
-        # prepare regex patterns
-        rules = self.formatting_rules.replace('\n', '|')
-        if self.cfg.bang_meta:
-            rules = ur'(?P<notword>!%(word_rule)s)|%(rules)s' % {
-                'word_rule': self.word_rule,
-                'rules': rules,
-            }
-        self.request.clock.start('compile_huge_and_ugly')        
-        scan_re = re.compile(rules, re.UNICODE)
-        number_re = re.compile(self.ol_rule, re.UNICODE)
-        term_re = re.compile(self.dl_rule, re.UNICODE)
-        indent_re = re.compile("^\s*", re.UNICODE)
-        eol_re = re.compile(r'\r?\n', re.UNICODE)
-        self.request.clock.stop('compile_huge_and_ugly')        
-
-        # get text and replace TABs
-        rawtext = self.raw.expandtabs()
-
-        # go through the lines
-        self.lineno = self.start_line
-        self.lines = eol_re.split(rawtext)
-        self.line_is_empty = 0
-
-        self.in_processing_instructions = 1
-
-        # Main loop
-        for line in self.lines:
-            self.lineno += 1
-            self.line_anchor_printed = 0
-            if not self.in_table:
-                self.request.write(self._line_anchordef())
-            self.table_rowstart = 1
-            self.line_was_empty = self.line_is_empty
-            self.line_is_empty = 0
-            self.first_list_item = 0
-            self.inhibit_p = 0
-
-            # ignore processing instructions
-            if self.in_processing_instructions:
-                found = False
-                for pi in ("##", "#format", "#refresh", "#redirect", "#deprecated",
-                           "#pragma", "#form", "#acl", "#language"):
-                    if line.lower().startswith(pi):
-                        self.request.write(self.formatter.comment(line))
-                        found = True
-                        break
-                if not found:
-                    self.in_processing_instructions = 0
-                else:
-                    continue # do not parse this line
-            if self.in_pre:
-                # TODO: move this into function
-                # still looking for processing instructions
-                # TODO: use strings for pre state, not numbers
-                if self.in_pre == 1:
-                    self.parser = None
-                    parser_name = ''
-                    if (line.strip()[:2] == "#!"):
-                        parser_name = line.strip()[2:].split()[0]
-                        self.setParser(parser_name)
-
-                    if self.parser:
-                        self.in_pre = 2
-                        self.parser_lines = [line]
-                        self.parser_name = parser_name
-                        continue
-                    else:
-                        self.request.write(self._closeP() +
-                                           self.formatter.preformatted(1))
-                        self.in_pre = 3
-                if self.in_pre == 2:
-                    # processing mode
-                    endpos = line.find("}}}")
-                    if endpos == -1:
-                        self.parser_lines.append(line)
-                        continue
-                    if line[:endpos]:
-                        self.parser_lines.append(line[:endpos])
-                    
-                    # Close p before calling parser
-                    # TODO: do we really need this?
-                    self.request.write(self._closeP())
-                    res = self.formatter.parser(self.parser_name, self.parser_lines)
-                    self.request.write(res)
-                    del self.parser_lines
-                    self.in_pre = 0
-                    self.parser = None
-
-                    # send rest of line through regex machinery
-                    line = line[endpos+3:]
-                    if not line.strip(): # just in the case "}}} " when we only have blanks left...
-                        continue
-            else:
-                # we don't have \n as whitespace any more
-                # This is the space between lines we join to one paragraph
-                line += ' '
-                
-                # Paragraph break on empty lines
-                if not line.strip():
-                    if self.in_table:
-                        self.request.write(self.formatter.table(0))
-                        self.request.write(self._line_anchordef())
-                        self.in_table = 0
-                    # CHANGE: removed check for not self.list_types
-                    # p should close on every empty line
-                    if self.formatter.in_p:
-                        self.request.write(self.formatter.paragraph(0))
-                    self.line_is_empty = 1
-                    continue
-
-                # Check indent level
-                indent = indent_re.match(line)
-                indlen = len(indent.group(0))
-                indtype = "ul"
-                numtype = None
-                numstart = None
-                if indlen:
-                    match = number_re.match(line)
-                    if match:
-                        numtype, numstart = match.group(0).strip().split('.')
-                        numtype = numtype[0]
-
-                        if numstart and numstart[0] == "#":
-                            numstart = int(numstart[1:])
-                        else:
-                            numstart = None
-
-                        indtype = "ol"
-                    else:
-                        match = term_re.match(line)
-                        if match:
-                            indtype = "dl"
-
-                # output proper indentation tags
-                self.request.write(self._indent_to(indlen, indtype, numtype, numstart))
-
-                # Table mode
-                # TODO: move into function?                
-                if (not self.in_table and line[indlen:indlen + 2] == "||"
-                    and line[-3:] == "|| " and len(line) >= 5 + indlen):
-                    # Start table
-                    if self.list_types and not self.in_li:
-                        self.request.write(self.formatter.listitem(1, style="list-style-type:none"))
-                        ## CHANGE: no automatic p on li
-                        ##self.request.write(self.formatter.paragraph(1))
-                        self.in_li = 1
-                        
-                    # CHANGE: removed check for self.in_li
-                    # paragraph should end before table, always!
-                    if self.formatter.in_p:
-                        self.request.write(self.formatter.paragraph(0))
-                    attrs, attrerr = self._getTableAttrs(line[indlen+2:])
-                    self.request.write(self.formatter.table(1, attrs) + attrerr)
-                    self.in_table = True # self.lineno
-                elif (self.in_table and not
-                      # intra-table comments should not break a table
-                      (line[:2] == "##" or  
-                       line[indlen:indlen + 2] == "||" and
-                       line[-3:] == "|| " and
-                       len(line) >= 5 + indlen)):
-                    
-                    # Close table
-                    self.request.write(self.formatter.table(0))
-                    self.request.write(self._line_anchordef())
-                    self.in_table = 0
-                                            
-            # Scan line, format and write
-            formatted_line = self.scan(scan_re, line)
-            self.request.write(formatted_line)
-
-            if self.in_pre == 3:
-                self.request.write(self.formatter.linebreak())
-
-        # Close code displays, paragraphs, tables and open lists
-        self.request.write(self._undent())
-        if self.in_pre: self.request.write(self.formatter.preformatted(0))
-        if self.formatter.in_p: self.request.write(self.formatter.paragraph(0))
-        if self.in_table: self.request.write(self.formatter.table(0))
-
-    # Private helpers ------------------------------------------------------------
-    
-    def setParser(self, name):
-        """ Set parser to parser named 'name' """
-        try:
-            self.parser = wikiutil.importPlugin(self.request.cfg, "parser", name, "Parser")
-        except wikiutil.PluginMissingError:
-            self.parser = None
-
-
--- a/MoinMoin/parser/xslt.py	Tue May 16 13:10:13 2006 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,146 +0,0 @@
-# -*- coding: iso-8859-1 -*-
-"""
-    MoinMoin - XML Parser
-
-    This parser was tested with 4Suite 1.0a4 and 1.0b1.
-
-    What's new:
-    * much cleaner code
-    * stylesheet can be extended to support other format:
-        e.g. Docbook parser using docbook->html .xsl stylesheet
-
-    @copyright: 2001, 2003 by Jürgen Hermann <jh@web.de>
-    @copyright: 2005 by Henry Ho <henryho167@hotmail.com>
-    @copyright: 2005 by MoinMoin:AlexanderSchremmer
-    @license: GNU GPL, see COPYING for details.
-"""
-
-# cStringIO cannot be used because it doesn't handle Unicode.
-import StringIO
-import re
-
-from MoinMoin import caching, config, wikiutil, Page
-
-Dependencies = []
-
-class Parser:
-    """ Send XML file formatted via XSLT. """
-    caching = 1
-    Dependencies = Dependencies
-
-    def __init__(self, raw, request, **kw):
-        self.raw = raw.encode(config.charset)
-        self.request = request
-        self.form = request.form
-        self._ = request.getText
-        self.base_scheme = 'wiki'
-        self.base_uri = 'wiki://Self/'
-        self.key = 'xslt'
-
-    def format(self, formatter):
-        """ Send the text. """
-        _ = self._
-
-        if not self.request.cfg.allow_xslt:
-            # use plain parser if XSLT is not allowed
-            # can be activated in wikiconfig.py
-            from MoinMoin.parser import plain
-            self.request.write(formatter.sysmsg(1) +
-                               formatter.rawHTML(_('XSLT option disabled, please look at HelpOnConfiguration.')) +
-                               formatter.sysmsg(0))
-            plain.Parser(self.raw, self.request).format(formatter)
-            return
-
-        try:
-            # try importing Ft from 4suite
-            # if import fails or its version is not 1.x, error msg
-            from Ft.Xml import __version__ as ft_version
-            assert ft_version.startswith('1.')
-        except (ImportError, AssertionError):
-            self.request.write(self.request.formatter.sysmsg(1) +
-                               self.request.formatter.text(_('XSLT processing is not available, please install 4suite 1.x.')) +
-                               self.request.formatter.sysmsg(0))
-        else:
-            from Ft.Lib import Uri
-            from Ft.Xml import InputSource
-            from Ft.Xml.Xslt.Processor import Processor
-            from Ft import FtException
-
-            msg = None
-
-            try:
-                # location of SchemeRegisteryResolver has changed since 1.0a4
-                if ft_version >= "1.0a4":
-                    import Ft.Lib.Resolvers # Do not remove! it looks unused, but breaks when removed!!!
-                                                                        
-                class MoinResolver(Uri.SchemeRegistryResolver):
-                    """ supports resolving self.base_uri for actual pages in MoinMoin """
-                    def __init__(self, handlers, base_scheme):
-                        Uri.SchemeRegistryResolver.__init__(self, handlers)
-                        self.supportedSchemes.append(base_scheme)
-
-                # setting up vars for xslt Processor
-                out_file = StringIO.StringIO()
-                wiki_resolver = MoinResolver(
-                                    handlers={self.base_scheme: self._resolve_page,},
-                                    base_scheme=self.base_scheme)
-                input_factory = InputSource.InputSourceFactory(resolver=wiki_resolver)
-
-                page_uri = self.base_uri + wikiutil.url_quote(formatter.page.page_name)
-                raw = self.raw.strip()
-
-                self.processor = Processor()
-                self.append_stylesheet() # hook, for extending this parser
-                self.processor.run(
-                    input_factory.fromString(raw, uri=page_uri),
-                    outputStream=out_file)
-                result = out_file.getvalue()
-                result = self.parse_result(result) # hook, for extending this parser
-
-            except FtException, msg:
-                etype = "XSLT"
-            except Uri.UriException, msg:
-                etype = "XSLT"
-            except IOError, msg:
-                etype = "I/O"
-
-            if msg:
-                text = wikiutil.escape(self.raw)
-                text = text.expandtabs()
-                text = text.replace('\n', '<br>\n')
-                text = text.replace(' ', '&nbsp;')
-                before = _('%(errortype)s processing error') % {'errortype': etype,}
-                title = u"<strong>%s: %s</strong><p>" % (before, msg)
-                self.request.write(title)
-                self.request.write(text.decode(config.charset))
-            else:
-                self.request.write(result)
-                cache = caching.CacheEntry(self.request, formatter.page, self.key)
-                cache.update(result)
-
-    def _resolve_page(self, uri, base):
-        """ URI will be resolved into StringIO with actual page content """
-        from Ft.Lib import Uri
-        base_uri = self.base_uri
-
-        if uri.startswith(base_uri):
-            pagename = uri[len(base_uri):]
-            page = Page.Page(self.request, pagename)
-            if page.exists():
-                result = StringIO.StringIO(page.getPageText().encode(config.charset))
-            else:
-                raise Uri.UriException(Uri.UriException.RESOURCE_ERROR, loc=uri,
-                                       msg='Page does not exist')
-        else:
-            result = Uri.UriResolverBase.resolve(self, uri, base)
-
-        return result
-
-    def append_stylesheet(self):
-        """ for other parsers based on xslt (i.e. docbook-xml) """
-        pass
-
-    def parse_result(self, result):
-        """ additional parsing to the resulting XSLT'ed result before saving """
-        return result
-
--- a/MoinMoin/wikiutil.py	Tue May 16 13:10:13 2006 +0200
+++ b/MoinMoin/wikiutil.py	Tue May 16 20:12:29 2006 +0200
@@ -755,12 +755,140 @@
 
 def pagelinkmarkup(pagename):
     """ return markup that can be used as link to page <pagename> """
-    from MoinMoin.parser.wiki import Parser
+    from MoinMoin.parser.text_moin_wiki import Parser
     if re.match(Parser.word_rule + "$", pagename):
         return pagename
     else:
         return u'["%s"]' % pagename
 
+# mimetype stuff ------------------------------------------------------------
+class MimeType(object):
+    """ represents a mimetype like text/plain """
+    sanitize_mapping = {
+        # this stuff is text, but got application/* for unknown reasons
+        ('application', 'docbook+xml'): ('text', 'docbook'),
+        ('application', 'x-latex'): ('text', 'latex'),
+        ('application', 'x-tex'): ('text', 'tex'),
+        ('application', 'javascript'): ('text', 'javascript'),
+    }
+    spoil_mapping = {} # inverse mapping of above
+    
+    def __init__(self, mimestr=None, filename=None):
+        self.major = self.minor = None # sanitized mime type and subtype
+        self.params = {} # parameters like "charset" or others
+        self.charset = None # this stays None until we know for sure!
+
+        for key, value in self.sanitize_mapping.items():
+            self.spoil_mapping[value] = key
+
+        if mimestr:
+            self.parse_mimetype(mimestr)
+        elif filename:
+            self.parse_filename(filename)
+    
+    def parse_filename(self, filename):
+        import mimetypes
+        mtype, encoding = mimetypes.guess_type()
+        if mtype is None:
+            mtype = 'application/octet-stream'
+        self.parse_mimetype(mtype)
+        
+    def parse_mimetype(self, mimestr):
+        """ take a string like used in content-type and parse it into components,
+            alternatively it also can process some abbreviated string like "wiki"
+        """
+        parameters = mimestr.split(";")
+        parameters = [p.strip() for p in parameters]
+        mimetype, parameters = parameters[0], parameters[1:]
+        mimetype = mimetype.split('/')
+        if len(mimetype) >= 2:
+            major, minor = mimetype[:2] # we just ignore more than 2 parts
+        else:
+            major, minor = self.parse_format(mimetype[0])
+        self.major = major.lower()
+        self.minor = minor.lower()
+        for param in parameters:
+            key, value = param.split('=')
+            if value[0] == '"' and value[-1] == '"': # remove quotes
+                value = value[1:-1]
+            self.params[key.lower()] = value
+        if self.params.has_key('charset'):
+            self.charset = self.params['charset'].lower()
+        self.sanitize()
+            
+    def parse_format(self, format):
+        """ maps from what we currently use on-page in a #format xxx processing
+            instruction to a sanitized mimetype major, minor tuple.
+            can also be user later for easier entry by the user, so he can just
+            type "wiki" instead of "text/moin-wiki".
+        """
+        format = format.lower()
+        if format in ('plain', 'csv', 'rst', 'docbook', 'latex', 'tex', 'html', 'css',
+                      'xml', 'python', 'perl', 'php', 'ruby', 'javascript',
+                      'cplusplus', 'java', 'pascal', 'diff', 'gettext', 'xslt', ):
+            mimetype = 'text', format
+        else:
+            mapping = {
+                'wiki': ('text', 'moin-wiki'),
+                'irc': ('text', 'irssi'),
+            }
+            try:
+                mimetype = mapping[format]
+            except KeyError:
+                mimetype = 'text', 'x-%s' % format
+        return mimetype
+
+    def sanitize(self):
+        """ convert to some representation that makes sense - this is not necessarily
+            conformant to /etc/mime.types or IANA listing, but if something is
+            readable text, we will return some text/* mimetype, not application/*,
+            because we need text/plain as fallback and not application/octet-stream.
+        """
+        self.major, self.minor = self.sanitize_mapping.get((self.major, self.minor), (self.major, self.minor))
+
+    def spoil(self):
+        """ this returns something conformant to /etc/mime.type or IANA as a string,
+            kind of inverse operation of sanitize(), but doesn't change self
+        """
+        major, minor = self.spoil_mapping.get((self.major, self.minor), (self.major, self.minor))
+        return self.content_type(major, minor)
+
+    def content_type(self, major=None, minor=None, charset=None, params=None):
+        """ return a string suitable for Content-Type header
+        """
+        major = major or self.major
+        minor = minor or self.minor
+        params = params or self.params or {}
+        if major == 'text':
+            charset = charset or self.charset or params.get('charset', config.charset)
+            params['charset'] = charset
+        mimestr = "%s/%s" % (major, minor)
+        params = ['%s="%s"' % (key.lower(), value) for key, value in params.items()]
+        params.insert(0, mimestr)
+        return "; ".join(params)
+
+    def mime_type(self):
+        """ return a string major/minor only, no params """
+        return "%s/%s" % (self.major, self.minor)
+
+    def module_name(self):
+        """ convert this mimetype to a string useable as python module name,
+            we yield the exact module name first and then proceed to shorter
+            module names (useful for falling back to them, if the more special
+            module is not found) - e.g. first "text_python", next "text".
+            Finally, we yield "application_octet_stream" as the most general
+            mimetype we have.
+            Hint: the fallback handler module for text/* should be implemented
+                  in module "text" (not "text_plain")
+        """
+        mimetype = self.mime_type()
+        modname = mimetype.replace("/", "_").replace("-", "_").replace(".", "_")
+        fragments = modname.split('_')
+        for length in range(len(fragments), 0, -1):
+            yield "_".join(fragments[:length])
+        yield "application_octet_stream"
+
+
 #############################################################################
 ### Plugins
 #############################################################################
--- a/docs/CHANGES	Tue May 16 13:10:13 2006 +0200
+++ b/docs/CHANGES	Tue May 16 20:12:29 2006 +0200
@@ -55,6 +55,10 @@
       WSGI not
       FCGI not
       TWISTED not
+    * added wikiutil.MimeType class (works internally with sanitized mime
+      types because the official ones suck)
+    * renamed parsers to module names representing sane mimetypes, e.g.:
+      parser.wiki -> parser.text_moin_wiki
 
   New Features: