changeset 497:ef41e35985dc

new scripting method, using 'moin' command. new style migration scripts. imported from: moin--main--1.5--patch-501
author Thomas Waldmann <tw@waldmann-edv.de>
date Sat, 25 Mar 2006 22:31:55 +0000
parents 1f8a60be2d6d
children fa788cd35485
files ChangeLog MoinMoin/_tests/test_repair_language.py MoinMoin/script/.cvsignore MoinMoin/script/__init__.py MoinMoin/script/_util.py MoinMoin/script/cli/__init__.py MoinMoin/script/cli/show.py MoinMoin/script/export/__init__.py MoinMoin/script/export/dump.py MoinMoin/script/import/__init__.py MoinMoin/script/import/irclog.py MoinMoin/script/lupy/__init__.py MoinMoin/script/lupy/build.py MoinMoin/script/lupy/optimize.py MoinMoin/script/migration/1050300.py MoinMoin/script/migration/1050301.py MoinMoin/script/migration/__init__.py MoinMoin/script/migration/data.py MoinMoin/script/migration/migutil.py MoinMoin/script/moin.py MoinMoin/script/old/accounts/.cvsignore MoinMoin/script/old/accounts/__init__.py MoinMoin/script/old/accounts/moin_usercheck-jh-new.py MoinMoin/script/old/accounts/moin_usercheck.py MoinMoin/script/old/cachecleaner.py MoinMoin/script/old/globaledit.py MoinMoin/script/old/packages/__init__.py MoinMoin/script/old/packages/create_pagepacks.py MoinMoin/script/old/pagescleaner.py MoinMoin/script/old/print_stats.py MoinMoin/script/old/reducewiki/__init__.py MoinMoin/script/old/reducewiki/reducewiki.py MoinMoin/script/old/repair_language.py MoinMoin/script/old/unicode/__init__.py MoinMoin/script/old/unicode/mk_chartypes.py MoinMoin/script/old/xmlrpc-tools/.cvsignore MoinMoin/script/old/xmlrpc-tools/HelloWorld.py MoinMoin/script/old/xmlrpc-tools/UpdateGroupTest.py MoinMoin/script/old/xmlrpc-tools/WhoAmI.py MoinMoin/script/old/xmlrpc-tools/__init__.py MoinMoin/script/old/xmlrpc-tools/get_es_pages.py MoinMoin/script/old/xmlrpc-tools/getmasterpages2.py MoinMoin/script/old/xmlrpc-tools/getsystempages.py MoinMoin/script/old/xmlrpc-tools/getsystempages2.py MoinMoin/script/old/xmlrpc-tools/putPageTest.py MoinMoin/script/old/xmlrpc-tools/wikibackup.py MoinMoin/script/old/xmlrpc-tools/wikirestore.py MoinMoin/scripts/.cvsignore MoinMoin/scripts/__init__.py MoinMoin/scripts/_util.py MoinMoin/scripts/accounts/.cvsignore MoinMoin/scripts/accounts/__init__.py MoinMoin/scripts/accounts/moin_usercheck-jh-new.py MoinMoin/scripts/accounts/moin_usercheck.py MoinMoin/scripts/cachecleaner.py MoinMoin/scripts/globaledit.py MoinMoin/scripts/import/IrcLogImporter.py MoinMoin/scripts/import/__init__.py MoinMoin/scripts/migration/12_to_13_mig01.py MoinMoin/scripts/migration/12_to_13_mig02.py MoinMoin/scripts/migration/12_to_13_mig03.py MoinMoin/scripts/migration/12_to_13_mig04.py MoinMoin/scripts/migration/12_to_13_mig05.py MoinMoin/scripts/migration/12_to_13_mig06.py MoinMoin/scripts/migration/12_to_13_mig07.py MoinMoin/scripts/migration/12_to_13_mig08.py MoinMoin/scripts/migration/12_to_13_mig09.py MoinMoin/scripts/migration/12_to_13_mig10.py MoinMoin/scripts/migration/12_to_13_mig11.py MoinMoin/scripts/migration/__init__.py MoinMoin/scripts/migration/migutil.py MoinMoin/scripts/moin MoinMoin/scripts/moin_build_index.py MoinMoin/scripts/moin_dump.py MoinMoin/scripts/moin_optimize_index.py MoinMoin/scripts/packages/__init__.py MoinMoin/scripts/packages/create_pagepacks.py MoinMoin/scripts/pagescleaner.py MoinMoin/scripts/print_stats.py MoinMoin/scripts/reducewiki/__init__.py MoinMoin/scripts/reducewiki/reducewiki.py MoinMoin/scripts/repair_language.py MoinMoin/scripts/unicode/__init__.py MoinMoin/scripts/unicode/mk_chartypes.py MoinMoin/scripts/xmlrpc-tools/.cvsignore MoinMoin/scripts/xmlrpc-tools/HelloWorld.py MoinMoin/scripts/xmlrpc-tools/UpdateGroupTest.py MoinMoin/scripts/xmlrpc-tools/WhoAmI.py MoinMoin/scripts/xmlrpc-tools/__init__.py MoinMoin/scripts/xmlrpc-tools/get_es_pages.py MoinMoin/scripts/xmlrpc-tools/getmasterpages2.py MoinMoin/scripts/xmlrpc-tools/getsystempages.py MoinMoin/scripts/xmlrpc-tools/getsystempages2.py MoinMoin/scripts/xmlrpc-tools/putPageTest.py MoinMoin/scripts/xmlrpc-tools/wikibackup.py MoinMoin/scripts/xmlrpc-tools/wikirestore.py MoinMoin/userform.py MoinMoin/util/filesys.py MoinMoin/wikiutil.py docs/CHANGES docs/README.migration setup.py
diffstat 102 files changed, 2922 insertions(+), 4137 deletions(-) [+]
line wrap: on
line diff
--- a/ChangeLog	Sat Mar 25 11:18:58 2006 +0000
+++ b/ChangeLog	Sat Mar 25 22:31:55 2006 +0000
@@ -2,6 +2,224 @@
 # arch-tag: automatic-ChangeLog--arch@arch.thinkmo.de--2003-archives/moin--main--1.5
 #
 
+2006-03-25 23:31:55 GMT	Thomas Waldmann <tw@waldmann-edv.de>	patch-501
+
+    Summary:
+      new scripting method, using 'moin' command. new style migration scripts.
+    Revision:
+      moin--main--1.5--patch-501
+
+    new scripting method, using 'moin' command. new style migration scripts.
+    
+
+    new files:
+     MoinMoin/script/.arch-ids/moin.py.id
+     MoinMoin/script/cli/.arch-ids/=id
+     MoinMoin/script/cli/.arch-ids/__init__.py.id
+     MoinMoin/script/cli/.arch-ids/show.py.id
+     MoinMoin/script/cli/__init__.py MoinMoin/script/cli/show.py
+     MoinMoin/script/export/.arch-ids/=id
+     MoinMoin/script/export/.arch-ids/__init__.py.id
+     MoinMoin/script/export/__init__.py
+     MoinMoin/script/import/.arch-ids/=id
+     MoinMoin/script/import/.arch-ids/__init__.py.id
+     MoinMoin/script/import/__init__.py
+     MoinMoin/script/lupy/.arch-ids/=id
+     MoinMoin/script/lupy/.arch-ids/__init__.py.id
+     MoinMoin/script/lupy/__init__.py
+     MoinMoin/script/migration/.arch-ids/1050300.py.id
+     MoinMoin/script/migration/.arch-ids/1050301.py.id
+     MoinMoin/script/migration/.arch-ids/data.py.id
+     MoinMoin/script/migration/1050300.py
+     MoinMoin/script/migration/1050301.py
+     MoinMoin/script/migration/data.py MoinMoin/script/moin.py
+     MoinMoin/script/old/.arch-ids/=id
+
+    removed files:
+     MoinMoin/scripts/.arch-ids/moin.id
+     MoinMoin/scripts/import/.arch-ids/=id
+     MoinMoin/scripts/import/.arch-ids/__init__.py.id
+     MoinMoin/scripts/import/__init__.py
+     MoinMoin/scripts/migration/.arch-ids/12_to_13_mig01.py.id
+     MoinMoin/scripts/migration/.arch-ids/12_to_13_mig02.py.id
+     MoinMoin/scripts/migration/.arch-ids/12_to_13_mig03.py.id
+     MoinMoin/scripts/migration/.arch-ids/12_to_13_mig04.py.id
+     MoinMoin/scripts/migration/.arch-ids/12_to_13_mig05.py.id
+     MoinMoin/scripts/migration/.arch-ids/12_to_13_mig06.py.id
+     MoinMoin/scripts/migration/.arch-ids/12_to_13_mig07.py.id
+     MoinMoin/scripts/migration/.arch-ids/12_to_13_mig08.py.id
+     MoinMoin/scripts/migration/.arch-ids/12_to_13_mig09.py.id
+     MoinMoin/scripts/migration/.arch-ids/12_to_13_mig10.py.id
+     MoinMoin/scripts/migration/.arch-ids/12_to_13_mig11.py.id
+     MoinMoin/scripts/migration/12_to_13_mig01.py
+     MoinMoin/scripts/migration/12_to_13_mig02.py
+     MoinMoin/scripts/migration/12_to_13_mig03.py
+     MoinMoin/scripts/migration/12_to_13_mig04.py
+     MoinMoin/scripts/migration/12_to_13_mig05.py
+     MoinMoin/scripts/migration/12_to_13_mig06.py
+     MoinMoin/scripts/migration/12_to_13_mig07.py
+     MoinMoin/scripts/migration/12_to_13_mig08.py
+     MoinMoin/scripts/migration/12_to_13_mig09.py
+     MoinMoin/scripts/migration/12_to_13_mig10.py
+     MoinMoin/scripts/migration/12_to_13_mig11.py
+     MoinMoin/scripts/moin
+
+    modified files:
+     ChangeLog MoinMoin/_tests/test_repair_language.py
+     MoinMoin/script/__init__.py MoinMoin/script/_util.py
+     MoinMoin/script/export/dump.py
+     MoinMoin/script/import/irclog.py MoinMoin/script/lupy/build.py
+     MoinMoin/script/lupy/optimize.py
+     MoinMoin/script/migration/__init__.py
+     MoinMoin/script/old/accounts/moin_usercheck-jh-new.py
+     MoinMoin/userform.py MoinMoin/util/filesys.py
+     MoinMoin/wikiutil.py docs/CHANGES docs/README.migration
+     setup.py
+
+    renamed files:
+     MoinMoin/scripts/.arch-ids/.cvsignore.id
+       ==> MoinMoin/script/.arch-ids/.cvsignore.id
+     MoinMoin/scripts/.arch-ids/=id
+       ==> MoinMoin/script/.arch-ids/=id
+     MoinMoin/scripts/.arch-ids/__init__.py.id
+       ==> MoinMoin/script/.arch-ids/__init__.py.id
+     MoinMoin/scripts/.arch-ids/_util.py.id
+       ==> MoinMoin/script/.arch-ids/_util.py.id
+     MoinMoin/scripts/.arch-ids/cachecleaner.py.id
+       ==> MoinMoin/script/old/.arch-ids/cachecleaner.py.id
+     MoinMoin/scripts/.arch-ids/globaledit.py.id
+       ==> MoinMoin/script/old/.arch-ids/globaledit.py.id
+     MoinMoin/scripts/.arch-ids/moin_build_index.py.id
+       ==> MoinMoin/script/lupy/.arch-ids/build.py.id
+     MoinMoin/scripts/.arch-ids/moin_dump.py.id
+       ==> MoinMoin/script/export/.arch-ids/dump.py.id
+     MoinMoin/scripts/.arch-ids/moin_optimize_index.py.id
+       ==> MoinMoin/script/lupy/.arch-ids/optimize.py.id
+     MoinMoin/scripts/.arch-ids/pagescleaner.py.id
+       ==> MoinMoin/script/old/.arch-ids/pagescleaner.py.id
+     MoinMoin/scripts/.arch-ids/print_stats.py.id
+       ==> MoinMoin/script/old/.arch-ids/print_stats.py.id
+     MoinMoin/scripts/.arch-ids/repair_language.py.id
+       ==> MoinMoin/script/old/.arch-ids/repair_language.py.id
+     MoinMoin/scripts/accounts/.arch-ids/.cvsignore.id
+       ==> MoinMoin/script/old/accounts/.arch-ids/.cvsignore.id
+     MoinMoin/scripts/accounts/.arch-ids/=id
+       ==> MoinMoin/script/old/accounts/.arch-ids/=id
+     MoinMoin/scripts/accounts/.arch-ids/__init__.py.id
+       ==> MoinMoin/script/old/accounts/.arch-ids/__init__.py.id
+     MoinMoin/scripts/accounts/.arch-ids/moin_usercheck-jh-new.py.id
+       ==> MoinMoin/script/old/accounts/.arch-ids/moin_usercheck-jh-new.py.id
+     MoinMoin/scripts/accounts/.arch-ids/moin_usercheck.py.id
+       ==> MoinMoin/script/old/accounts/.arch-ids/moin_usercheck.py.id
+     MoinMoin/scripts/cachecleaner.py
+       ==> MoinMoin/script/old/cachecleaner.py
+     MoinMoin/scripts/globaledit.py
+       ==> MoinMoin/script/old/globaledit.py
+     MoinMoin/scripts/import/.arch-ids/IrcLogImporter.py.id
+       ==> MoinMoin/script/import/.arch-ids/irclog.py.id
+     MoinMoin/scripts/import/IrcLogImporter.py
+       ==> MoinMoin/script/import/irclog.py
+     MoinMoin/scripts/migration/.arch-ids/=id
+       ==> MoinMoin/script/migration/.arch-ids/=id
+     MoinMoin/scripts/migration/.arch-ids/__init__.py.id
+       ==> MoinMoin/script/migration/.arch-ids/__init__.py.id
+     MoinMoin/scripts/migration/.arch-ids/migutil.py.id
+       ==> MoinMoin/script/migration/.arch-ids/migutil.py.id
+     MoinMoin/scripts/moin_build_index.py
+       ==> MoinMoin/script/lupy/build.py
+     MoinMoin/scripts/moin_dump.py
+       ==> MoinMoin/script/export/dump.py
+     MoinMoin/scripts/moin_optimize_index.py
+       ==> MoinMoin/script/lupy/optimize.py
+     MoinMoin/scripts/packages/.arch-ids/=id
+       ==> MoinMoin/script/old/packages/.arch-ids/=id
+     MoinMoin/scripts/packages/.arch-ids/__init__.py.id
+       ==> MoinMoin/script/old/packages/.arch-ids/__init__.py.id
+     MoinMoin/scripts/packages/.arch-ids/create_pagepacks.py.id
+       ==> MoinMoin/script/old/packages/.arch-ids/create_pagepacks.py.id
+     MoinMoin/scripts/pagescleaner.py
+       ==> MoinMoin/script/old/pagescleaner.py
+     MoinMoin/scripts/print_stats.py
+       ==> MoinMoin/script/old/print_stats.py
+     MoinMoin/scripts/reducewiki/.arch-ids/=id
+       ==> MoinMoin/script/old/reducewiki/.arch-ids/=id
+     MoinMoin/scripts/reducewiki/.arch-ids/__init__.py.id
+       ==> MoinMoin/script/old/reducewiki/.arch-ids/__init__.py.id
+     MoinMoin/scripts/reducewiki/.arch-ids/reducewiki.py.id
+       ==> MoinMoin/script/old/reducewiki/.arch-ids/reducewiki.py.id
+     MoinMoin/scripts/repair_language.py
+       ==> MoinMoin/script/old/repair_language.py
+     MoinMoin/scripts/unicode/.arch-ids/=id
+       ==> MoinMoin/script/old/unicode/.arch-ids/=id
+     MoinMoin/scripts/unicode/.arch-ids/__init__.py.id
+       ==> MoinMoin/script/old/unicode/.arch-ids/__init__.py.id
+     MoinMoin/scripts/unicode/.arch-ids/mk_chartypes.py.id
+       ==> MoinMoin/script/old/unicode/.arch-ids/mk_chartypes.py.id
+     MoinMoin/scripts/xmlrpc-tools/.arch-ids/.cvsignore.id
+       ==> MoinMoin/script/old/xmlrpc-tools/.arch-ids/.cvsignore.id
+     MoinMoin/scripts/xmlrpc-tools/.arch-ids/=id
+       ==> MoinMoin/script/old/xmlrpc-tools/.arch-ids/=id
+     MoinMoin/scripts/xmlrpc-tools/.arch-ids/HelloWorld.py.id
+       ==> MoinMoin/script/old/xmlrpc-tools/.arch-ids/HelloWorld.py.id
+     MoinMoin/scripts/xmlrpc-tools/.arch-ids/UpdateGroupTest.py.id
+       ==> MoinMoin/script/old/xmlrpc-tools/.arch-ids/UpdateGroupTest.py.id
+     MoinMoin/scripts/xmlrpc-tools/.arch-ids/WhoAmI.py.id
+       ==> MoinMoin/script/old/xmlrpc-tools/.arch-ids/WhoAmI.py.id
+     MoinMoin/scripts/xmlrpc-tools/.arch-ids/__init__.py.id
+       ==> MoinMoin/script/old/xmlrpc-tools/.arch-ids/__init__.py.id
+     MoinMoin/scripts/xmlrpc-tools/.arch-ids/get_es_pages.py.id
+       ==> MoinMoin/script/old/xmlrpc-tools/.arch-ids/get_es_pages.py.id
+     MoinMoin/scripts/xmlrpc-tools/.arch-ids/getmasterpages2.py.id
+       ==> MoinMoin/script/old/xmlrpc-tools/.arch-ids/getmasterpages2.py.id
+     MoinMoin/scripts/xmlrpc-tools/.arch-ids/getsystempages.py.id
+       ==> MoinMoin/script/old/xmlrpc-tools/.arch-ids/getsystempages.py.id
+     MoinMoin/scripts/xmlrpc-tools/.arch-ids/getsystempages2.py.id
+       ==> MoinMoin/script/old/xmlrpc-tools/.arch-ids/getsystempages2.py.id
+     MoinMoin/scripts/xmlrpc-tools/.arch-ids/putPageTest.py.id
+       ==> MoinMoin/script/old/xmlrpc-tools/.arch-ids/putPageTest.py.id
+     MoinMoin/scripts/xmlrpc-tools/.arch-ids/wikibackup.py.id
+       ==> MoinMoin/script/old/xmlrpc-tools/.arch-ids/wikibackup.py.id
+     MoinMoin/scripts/xmlrpc-tools/.arch-ids/wikirestore.py.id
+       ==> MoinMoin/script/old/xmlrpc-tools/.arch-ids/wikirestore.py.id
+
+    new directories:
+     MoinMoin/script/.arch-ids MoinMoin/script/cli
+     MoinMoin/script/cli/.arch-ids MoinMoin/script/export
+     MoinMoin/script/export/.arch-ids MoinMoin/script/import
+     MoinMoin/script/import/.arch-ids MoinMoin/script/lupy
+     MoinMoin/script/lupy/.arch-ids
+     MoinMoin/script/migration/.arch-ids MoinMoin/script/old
+     MoinMoin/script/old/.arch-ids
+     MoinMoin/script/old/accounts/.arch-ids
+     MoinMoin/script/old/packages/.arch-ids
+     MoinMoin/script/old/reducewiki/.arch-ids
+     MoinMoin/script/old/unicode/.arch-ids
+     MoinMoin/script/old/xmlrpc-tools/.arch-ids
+
+    removed directories:
+     MoinMoin/scripts/.arch-ids MoinMoin/scripts/accounts/.arch-ids
+     MoinMoin/scripts/import MoinMoin/scripts/import/.arch-ids
+     MoinMoin/scripts/migration/.arch-ids
+     MoinMoin/scripts/packages/.arch-ids
+     MoinMoin/scripts/reducewiki/.arch-ids
+     MoinMoin/scripts/unicode/.arch-ids
+     MoinMoin/scripts/xmlrpc-tools/.arch-ids
+
+    renamed directories:
+     MoinMoin/scripts
+       ==> MoinMoin/script
+     MoinMoin/scripts/accounts
+       ==> MoinMoin/script/old/accounts
+     MoinMoin/scripts/packages
+       ==> MoinMoin/script/old/packages
+     MoinMoin/scripts/reducewiki
+       ==> MoinMoin/script/old/reducewiki
+     MoinMoin/scripts/unicode
+       ==> MoinMoin/script/old/unicode
+     MoinMoin/scripts/xmlrpc-tools
+       ==> MoinMoin/script/old/xmlrpc-tools
+
+
 2006-03-25 12:18:58 GMT	Thomas Waldmann <tw@waldmann-edv.de>	patch-500
 
     Summary:
--- a/MoinMoin/_tests/test_repair_language.py	Sat Mar 25 11:18:58 2006 +0000
+++ b/MoinMoin/_tests/test_repair_language.py	Sat Mar 25 22:31:55 2006 +0000
@@ -1,6 +1,6 @@
 # -*- coding: utf-8 -*-
 """
-    MoinMoin - MoinMoin.scripts.repair_language tests
+    MoinMoin - MoinMoin.script.repair_language tests
 
     @copyright: 2003-2004 by Nir Soffer <nirs@freeshell.org>
     @license: GNU GPL, see COPYING for details.
@@ -8,7 +8,7 @@
 
 import unittest
 
-from MoinMoin.scripts.repair_language import repairText
+from MoinMoin.script.repair_language import repairText
 
 
 class RepairTestCase(unittest.TestCase):
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/script/.cvsignore	Sat Mar 25 22:31:55 2006 +0000
@@ -0,0 +1,4 @@
+*.pyo
+*.pyc
+{arch}
+.arch-ids
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/script/__init__.py	Sat Mar 25 22:31:55 2006 +0000
@@ -0,0 +1,14 @@
+# -*- coding: iso-8859-1 -*-
+"""
+    MoinMoin - Extension Script Package
+
+    @copyright: 2006 by Thomas Waldmann
+    @license: GNU GPL, see COPYING for details.
+"""
+
+from MoinMoin.util import pysupport
+
+# create a list of extension scripts from the subpackage directory
+extension_scripts = pysupport.getPackageModules(__file__)
+modules = extension_scripts
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/script/_util.py	Sat Mar 25 22:31:55 2006 +0000
@@ -0,0 +1,142 @@
+# -*- coding: iso-8859-1 -*-
+"""
+    MoinMoin - Command line utilities
+
+    @copyright: 2000, 2001, 2002 by Jrgen Hermann <jh@web.de>
+    @license: GNU GPL, see COPYING for details.
+"""
+
+import os, sys, time
+
+flag_quiet = 0
+script_module = '__main__'
+
+#############################################################################
+### Logging
+#############################################################################
+
+def fatal(msgtext, **kw):
+    """ Print error msg to stderr and exit. """
+    sys.stderr.write("FATAL ERROR: " + msgtext + "\n")
+    if kw.get('usage', 0):
+        maindict = vars(sys.modules[script_module])
+        if maindict.has_key('usage'):
+            maindict['usage']()
+    sys.exit(1)
+
+
+def log(msgtext):
+    """ Optionally print error msg to stderr. """
+    if not flag_quiet:
+        sys.stderr.write(msgtext + "\n")
+
+
+#############################################################################
+### Commandline Support
+#############################################################################
+
+class Script:
+    def __init__(self, script, usage, argv=None, def_values=None):
+        #print "argv:", argv, "def_values:", repr(def_values)
+        if argv is None:
+            self.argv = sys.argv[1:]
+        else:
+            self.argv = argv
+        self.def_values = def_values
+        self.script_module = sys.modules[script]
+
+        global _start_time
+        _start_time = time.clock()
+
+        import optparse
+        from MoinMoin import version
+
+        cmd = self.script_module.__name__.split('.')[-1].replace('_', '-')
+        rev = "%s %s [%s]" % (version.project, version.release, version.revision)
+        sys.argv[0] = cmd
+
+        self.parser = optparse.OptionParser(
+            usage="%(cmd)s %(usage)s\n\n" % {'cmd': cmd, 'usage': usage, },
+            version=rev)
+        self.parser.allow_interspersed_args = False
+        if def_values:
+            self.parser.set_defaults(**def_values.__dict__)
+        self.parser.add_option(
+            "-q", "--quiet", 
+            action="store_true", dest="quiet",
+            help="Be quiet (no informational messages)"
+        )
+        self.parser.add_option(
+            "--show-timing", 
+            action="store_true", dest="show_timing", default=False,
+            help="Show timing values [default: %default]"
+        )
+
+    def run(self, showtime=1):
+        """ Run the main function of a command. """
+        global flag_quiet
+        try:
+            try:
+                self.options, self.args = self.parser.parse_args(self.argv)
+                flag_quiet = self.options.quiet
+                self.mainloop()
+            except KeyboardInterrupt:
+                log("*** Interrupted by user!")
+            except SystemExit:
+                showtime = 0
+                raise
+        finally:
+            if showtime:
+                self.logRuntime()
+
+    def logRuntime(self):
+        """ Print the total command run time. """
+        if self.options.show_timing:
+            log("Needed %.3f secs." % (time.clock() - _start_time,))
+
+
+class MoinScript(Script):
+    """ Moin main script class """
+
+    def __init__(self, argv=None, def_values=None):
+        Script.__init__(self, __name__, "[options]", argv, def_values)
+        # those are options potentially useful for all sub-commands:
+        self.parser.add_option(
+            "--config-dir", metavar="DIR", dest="config_dir",
+            help=("Path to the directory containing the wiki "
+                  "configuration files. [default: current directory]")
+        )
+        self.parser.add_option(
+            "--wiki-url", metavar="WIKIURL", dest="wiki_url",
+            help="URL of a single wiki to migrate e.g. localhost/mywiki/ [default: CLI]"
+        )
+        self.parser.add_option(
+            "--page", dest="page", default='',
+            help="wiki page name [default: %default]"
+        )
+    
+    def init_request(self):
+        """ create request """
+        from MoinMoin.request import RequestCLI
+        if self.options.wiki_url:
+            self.request = RequestCLI(self.options.wiki_url, self.options.page)
+        else:
+            self.request = RequestCLI(pagename=self.options.page)
+        
+    def mainloop(self):
+        # Insert config dir or the current directory to the start of the path.
+        config_dir = self.options.config_dir
+        if config_dir and not os.path.isdir(config_dir):
+            fatal("bad path given to --config-dir option")
+        sys.path.insert(0, os.path.abspath(config_dir or os.curdir))
+
+        args = self.args
+        if len(args) < 2:
+            self.parser.error("you must specify a command module and name.")
+            sys.exit(1)
+
+        cmd_module, cmd_name = args[:2]
+        from MoinMoin import wikiutil
+        plugin_class = wikiutil.importBuiltinPlugin('script.%s' % cmd_module, cmd_name, 'PluginScript')
+        plugin_class(args[2:], self.options).run() # all starts again there
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/script/cli/__init__.py	Sat Mar 25 22:31:55 2006 +0000
@@ -0,0 +1,14 @@
+# -*- coding: iso-8859-1 -*-
+"""
+    MoinMoin - Migration Script Package
+
+    @copyright: 2006 by Thomas Waldmann
+    @license: GNU GPL, see COPYING for details.
+"""
+
+from MoinMoin.util import pysupport
+
+# create a list of extension scripts from the subpackage directory
+migration_scripts = pysupport.getPackageModules(__file__)
+modules = migration_scripts
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/script/cli/show.py	Sat Mar 25 22:31:55 2006 +0000
@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+# -*- coding: iso-8859-1 -*-
+"""
+    MoinMoin - cli show script
+
+    Just run a CLI request and show the output.
+    Currently, we require --page option for the pagename, this is ugly, but
+    matches the RequestCLI interface...
+               
+    @copyright: 2006 by Thomas Waldmann
+    @license: GNU GPL, see COPYING for details.
+"""
+
+from MoinMoin.request import RequestCLI
+from MoinMoin.script._util import MoinScript
+
+class PluginScript(MoinScript):
+    """ show page script class """
+
+    def __init__(self, argv, def_values):
+        MoinScript.__init__(self, argv, def_values)
+    
+    def mainloop(self):
+        self.init_request()
+        self.request.run()
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/script/export/__init__.py	Sat Mar 25 22:31:55 2006 +0000
@@ -0,0 +1,14 @@
+# -*- coding: iso-8859-1 -*-
+"""
+    MoinMoin - Export Script Package
+
+    @copyright: 2006 by Thomas Waldmann
+    @license: GNU GPL, see COPYING for details.
+"""
+
+from MoinMoin.util import pysupport
+
+# create a list of extension scripts from the subpackage directory
+export_scripts = pysupport.getPackageModules(__file__)
+modules = export_scripts
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/script/export/dump.py	Sat Mar 25 22:31:55 2006 +0000
@@ -0,0 +1,183 @@
+# -*- coding: iso-8859-1 -*-
+"""
+    MoinMoin - Dump a MoinMoin wiki to static pages
+
+    You must run this script as owner of the wiki files, usually this is the
+    web server user.
+
+    @copyright: 2002-2004 by Jrgen Hermann <jh@web.de>,
+                2005-2006 by Thomas Waldmann
+    @license: GNU GPL, see COPYING for details.
+
+"""
+
+import sys, os, time, StringIO, codecs, shutil, re, errno
+
+from MoinMoin import config, wikiutil, Page
+from MoinMoin.script import _util
+from MoinMoin.script._util import MoinScript
+from MoinMoin.action import AttachFile
+
+url_prefix = "."
+HTML_SUFFIX = ".html"
+
+page_template = u'''<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
+<html>
+<head>
+<meta http-equiv="content-type" content="text/html; charset=%(charset)s">
+<title>%(pagename)s</title>
+<link rel="stylesheet" type="text/css" media="all" charset="utf-8" href="%(theme)s/css/common.css">
+<link rel="stylesheet" type="text/css" media="screen" charset="utf-8" href="%(theme)s/css/screen.css">
+<link rel="stylesheet" type="text/css" media="print" charset="utf-8" href="%(theme)s/css/print.css">
+</head>
+<body>
+<table>
+<tr>
+<td>
+%(logo_html)s
+</td>
+<td>
+%(navibar_html)s
+</td>
+</tr>
+</table>
+<hr>
+<div id="page">
+<h1 id="title">%(pagename)s</h1>
+%(pagehtml)s
+</div>
+<hr>
+%(timestamp)s
+</body>
+</html>
+'''
+
+def _attachment(request, pagename, filename, outputdir):
+    source_dir = AttachFile.getAttachDir(request, pagename)
+    source_file = os.path.join(source_dir, filename)
+    dest_dir = os.path.join(outputdir, "attachments", wikiutil.quoteWikinameFS(pagename))
+    dest_file = os.path.join(dest_dir, filename)
+    dest_url = "attachments/%s/%s" % (wikiutil.quoteWikinameFS(pagename), filename)
+    if os.access(source_file, os.R_OK):
+        if not os.access(dest_dir, os.F_OK):
+            try:
+                os.makedirs(dest_dir)
+            except:
+                _util.fatal("Cannot create attachment directory '%s'" % dest_dir)
+        elif not os.path.isdir(dest_dir):
+            _util.fatal("'%s' is not a directory" % dest_dir)
+
+        shutil.copyfile(source_file, dest_file)
+        _util.log('Writing "%s"...' % dest_url)
+        return dest_url
+    else:
+        return ""
+  
+
+class PluginScript(MoinScript):
+    """ Dump script class """
+    
+    def __init__(self, argv=None, def_values=None):
+        MoinScript.__init__(self, argv, def_values)
+        self.parser.add_option(
+            "-t", "--target-dir", dest="target_dir",
+            help="Write html dump to DIRECTORY"
+        )
+
+    def mainloop(self):
+        """ moin-dump's main code. """
+
+        # Prepare output directory
+        outputdir = os.path.abspath(self.options.target_dir)
+        try:
+            os.mkdir(outputdir)
+            _util.log("Created output directory '%s'!" % outputdir)
+        except OSError, err:
+            if err.errno != errno.EEXIST:
+                _util.fatal("Cannot create output directory '%s'!" % outputdir)
+
+        # Insert config dir or the current directory to the start of the path.
+        config_dir = self.options.config_dir
+        if config_dir and os.path.isfile(config_dir):
+            config_dir = os.path.dirname(config_dir)
+        if config_dir and not os.path.isdir(config_dir):
+            _util.fatal("bad path given to --config-dir option")
+        sys.path.insert(0, os.path.abspath(config_dir or os.curdir))
+
+        self.init_request()
+        request = self.request
+
+        # fix url_prefix so we get relative paths in output html
+        original_url_prefix = request.cfg.url_prefix
+        request.cfg.url_prefix = url_prefix
+
+        if self.options.page:
+            pages = [self.options.page]
+        else:
+            # Get all existing pages in the wiki
+            pages = request.rootpage.getPageList(user='')
+            pages.sort()
+
+        wikiutil.quoteWikinameURL = lambda pagename, qfn=wikiutil.quoteWikinameFS: (qfn(pagename) + HTML_SUFFIX)
+
+        AttachFile.getAttachUrl = lambda pagename, filename, request, addts=0, escaped=0: (_attachment(request, pagename, filename, outputdir))
+
+        errfile = os.path.join(outputdir, 'error.log')
+        errlog = open(errfile, 'w')
+        errcnt = 0
+
+        page_front_page = wikiutil.getSysPage(request, request.cfg.page_front_page).page_name
+        page_title_index = wikiutil.getSysPage(request, 'TitleIndex').page_name
+        page_word_index = wikiutil.getSysPage(request, 'WordIndex').page_name
+        
+        navibar_html = ''
+        for p in [page_front_page, page_title_index, page_word_index]:
+            navibar_html += '&nbsp;[<a href="%s">%s</a>]' % (wikiutil.quoteWikinameURL(p), wikiutil.escape(p))
+
+        for pagename in pages:
+            # we have the same name in URL and FS
+            file = wikiutil.quoteWikinameURL(pagename) 
+            _util.log('Writing "%s"...' % file)
+            try:
+                pagehtml = ''
+                page = Page.Page(request, pagename)
+                request.page = page
+                try:
+                    request.reset()
+                    pagehtml = request.redirectedOutput(page.send_page, request, count_hit=0, content_only=1)
+                except:
+                    errcnt = errcnt + 1
+                    print >>sys.stderr, "*** Caught exception while writing page!"
+                    print >>errlog, "~" * 78
+                    print >>errlog, file # page filename
+                    import traceback
+                    traceback.print_exc(None, errlog)
+            finally:
+                logo_html = re.sub(original_url_prefix + "/?", "", request.cfg.logo_string)
+                timestamp = time.strftime("%Y-%m-%d %H:%M")
+                filepath = os.path.join(outputdir, file)
+                fileout = codecs.open(filepath, 'w', config.charset)
+                fileout.write(page_template % {
+                    'charset': config.charset,
+                    'pagename': pagename,
+                    'pagehtml': pagehtml,
+                    'logo_html': logo_html,
+                    'navibar_html': navibar_html,
+                    'timestamp': timestamp,
+                    'theme': request.cfg.theme_default,
+                })
+                fileout.close()
+
+        # copy FrontPage to "index.html"
+        indexpage = page_front_page
+        if self.options.page:
+            indexpage = self.options.page
+        shutil.copyfile(
+            os.path.join(outputdir, wikiutil.quoteWikinameFS(indexpage) + HTML_SUFFIX),
+            os.path.join(outputdir, 'index' + HTML_SUFFIX)
+        )
+
+        errlog.close()
+        if errcnt:
+            print >>sys.stderr, "*** %d error(s) occurred, see '%s'!" % (errcnt, errfile)
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/script/import/__init__.py	Sat Mar 25 22:31:55 2006 +0000
@@ -0,0 +1,14 @@
+# -*- coding: iso-8859-1 -*-
+"""
+    MoinMoin - Import Script Package
+
+    @copyright: 2006 by Thomas Waldmann
+    @license: GNU GPL, see COPYING for details.
+"""
+
+from MoinMoin.util import pysupport
+
+# create a list of extension scripts from the subpackage directory
+import_scripts = pysupport.getPackageModules(__file__)
+modules = import_scripts
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/script/import/irclog.py	Sat Mar 25 22:31:55 2006 +0000
@@ -0,0 +1,72 @@
+#!/usr/bin/env python
+"""
+    MoinMoin - Push files into the wiki.
+
+    This script pushes files from a directory into the wiki (to be exact: it
+    pushes all except the last file, as this is maybe still written to in
+    case of irc logs).
+    One application is to use it to store IRC logs into the wiki.
+
+    Usage:
+    moin --config-dir=... --wiki-url=... import irclog --author=IrcLogImporter --file-dir=.
+    
+    @copyright: 2005 by MoinMoin:AlexanderSchremmer
+                2006 by MoinMoin:ThomasWaldmann
+    @license: GNU GPL, see COPYING for details.
+"""
+
+# this function generates a pagename from the file name
+def filename_function(filename):
+    filename = filename.lstrip('#')
+    splitted = filename.split('.')
+    return '/'.join(splitted[0:2])
+
+import os
+
+from MoinMoin.PageEditor import PageEditor
+from MoinMoin.script._util import MoinScript
+
+def decodeLinewise(text):
+    resultList = []
+    for line in text.splitlines():
+        try:
+            decoded_line = line.decode("utf-8")
+        except UnicodeDecodeError:
+            decoded_line = line.decode("iso-8859-1")
+        resultList.append(decoded_line)
+    return '\n'.join(resultList)
+
+
+class PluginScript(MoinScript):
+    """ irclog importer script class """
+
+    def __init__(self, argv, def_values):
+        MoinScript.__init__(self, argv, def_values)
+        self.parser.add_option(
+            "--author", dest="author", default="IrcLogImporter",
+            help="Use AUTHOR for edit history / RecentChanges"
+        )
+        self.parser.add_option(
+            "--file-dir", dest="file_dir", default='.',
+            help="read files from DIRECTORY"
+        )
+    
+    def mainloop(self):
+        self.init_request()
+        request = self.request
+        for root, dirs, files in os.walk(self.options.file_dir):
+            files.sort()
+            for filename in files[:-1]: # do not push the last file as it is constantly written to
+                pagename = self.options.page + filename_function(filename)
+                print "Pushing %r as %r" % (filename, pagename)
+                p = PageEditor(request, pagename, do_editor_backup=0, uid_override=self.options.author)
+                if p.exists():
+                    continue
+                fileObj = open(os.path.join(root, filename), 'rb')
+                try:
+                    p.saveText("#format plain\n" + decodeLinewise(fileObj.read()), 0)
+                except PageEditor.SaveError, e:
+                    print "Got %r" % (e, )
+                fileObj.close()
+        print "Finished."
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/script/lupy/__init__.py	Sat Mar 25 22:31:55 2006 +0000
@@ -0,0 +1,14 @@
+# -*- coding: iso-8859-1 -*-
+"""
+    MoinMoin - Fullsearch Index Script Package
+
+    @copyright: 2006 by Thomas Waldmann
+    @license: GNU GPL, see COPYING for details.
+"""
+
+from MoinMoin.util import pysupport
+
+# create a list of extension scripts from the subpackage directory
+index_scripts = pysupport.getPackageModules(__file__)
+modules = index_scripts
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/script/lupy/build.py	Sat Mar 25 22:31:55 2006 +0000
@@ -0,0 +1,50 @@
+# -*- coding: iso-8859-1 -*-
+"""
+    MoinMoin - build lupy search engine's index
+
+    You must run this script as owner of the wiki files, usually this is the
+    web server user.
+
+    @copyright: 2005 by Florian Festi, Nir Soffer
+    @license: GNU GPL, see COPYING for details.
+"""
+
+import os
+
+from MoinMoin.script import _util
+from MoinMoin.script._util import MoinScript
+from MoinMoin.request import RequestCLI
+from MoinMoin.lupy import Index
+
+
+class IndexScript(MoinScript):
+    """ Lupy general index script class """
+
+    def __init__(self, argv, def_values):
+        MoinScript.__init__(self, argv, def_values)
+        self.parser.add_option(
+            "--files", metavar="FILES", dest="file_list",
+            help="filename of file list, e.g. files.lst (one file per line)"
+        )
+        self.parser.add_option(
+            "--update", action="store_true", dest="update",
+            help="when given, update an existing index"
+        )
+    
+    def mainloop(self):
+        self.init_request()
+        # Do we have additional files to index?
+        if self.options.file_list:
+            self.files = file(self.options.file_list)
+        else:
+            self.files = None
+        self.command()
+
+class PluginScript(IndexScript):
+    """ Lupy index build script class """
+
+    def command(self):
+        Index(self.request).indexPages(self.files, self.options.update)
+        #Index(self.request).test(self.request)
+
+        
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/script/lupy/optimize.py	Sat Mar 25 22:31:55 2006 +0000
@@ -0,0 +1,23 @@
+# -*- coding: iso-8859-1 -*-
+"""
+    MoinMoin - optimize lupy search engine's index
+
+    You must run this script as owner of the wiki files, usually this is the
+    web server user.
+
+    @copyright: 2005 by Florian Festi, Nir Soffer,
+                2006 by Thomas Waldmann
+    @license: GNU GPL, see COPYING for details.
+"""
+doit = 0
+
+from MoinMoin.script.lupy.build import IndexScript
+from MoinMoin.lupy import Index
+
+class PluginScript(IndexScript):
+    def command(self):
+        if doit:
+            Index(self.request).optimize()
+        else:
+            print "See http://moinmoin.wikiwikiweb.de/MoinMoinBugs/LupyOptimizeBreaksIndex !"
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/script/migration/1050300.py	Sat Mar 25 22:31:55 2006 +0000
@@ -0,0 +1,28 @@
+# -*- coding: iso-8859-1 -*-
+"""
+    MoinMoin - migration from base rev 1050300
+
+    We add a filter plugin dir here.
+
+    @copyright: 2006 by Thomas Waldmann
+    @license: GNU GPL, see COPYING for details.
+"""
+from MoinMoin.script.migration.migutil import opj, listdir, copy_file, move_file, copy_dir, makedir
+
+def execute(script, data_dir, rev):
+    plugindir = opj(data_dir, 'plugin')
+    for d in ['filter', ]:
+        thisdir = opj(plugindir, d)
+        makedir(thisdir)
+        fname = opj(thisdir, '__init__.py')
+        f = open(fname, 'w')
+        f.write('''\
+# -*- coding: iso-8859-1 -*-
+
+from MoinMoin.util import pysupport
+
+modules = pysupport.getPackageModules(__file__)
+''')
+        f.close()
+    return rev+1
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/script/migration/1050301.py	Sat Mar 25 22:31:55 2006 +0000
@@ -0,0 +1,13 @@
+# -*- coding: iso-8859-1 -*-
+"""
+    MoinMoin - dummy migration terminator script
+
+    This must be the last migration script.
+
+    @copyright: 2006 by Thomas Waldmann
+    @license: GNU GPL, see COPYING for details.
+"""
+
+def execute(script, data_dir, rev):
+    return None
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/script/migration/__init__.py	Sat Mar 25 22:31:55 2006 +0000
@@ -0,0 +1,14 @@
+# -*- coding: iso-8859-1 -*-
+"""
+    MoinMoin - Migration Script Package
+
+    @copyright: 2006 by Thomas Waldmann
+    @license: GNU GPL, see COPYING for details.
+"""
+
+from MoinMoin.util import pysupport
+
+# create a list of extension scripts from the subpackage directory
+migration_scripts = pysupport.getPackageModules(__file__)
+modules = migration_scripts
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/script/migration/data.py	Sat Mar 25 22:31:55 2006 +0000
@@ -0,0 +1,61 @@
+# -*- coding: iso-8859-1 -*-
+"""
+    MoinMoin - data_dir migration main script (new style)
+
+    You can use this script to migrate your wiki's data_dir to the format
+    expected by the current MoinMoin code. It will read data/meta to determine
+    what needs to be done and call other migration scripts as needed.
+
+    You must run this script as owner of the wiki files, usually this is the
+    web server user (like www-data).
+
+    Important: you must have run all 12_to_13* and the final 152_to_1050300
+               mig scripts ONCE and in correct order manually before attempting
+               to use the new style migration stuff.
+               
+    @copyright: 2006 by Thomas Waldmann
+    @license: GNU GPL, see COPYING for details.
+"""
+
+import os
+
+from MoinMoin import wikiutil
+from MoinMoin.script._util import MoinScript
+
+class PluginScript(MoinScript):
+    """ Migration script class """
+
+    def __init__(self, argv, def_values):
+        MoinScript.__init__(self, argv, def_values)
+        self.parser.add_option(
+            "--all", action="store_true", dest="all_wikis",
+            help="when given, update all wikis that belong to this farm"
+        )
+    
+    def mainloop(self):
+        self.init_request()
+        request = self.request
+        data_dir = request.cfg.data_dir
+        meta_fname = os.path.join(data_dir, 'meta')
+        while True:
+            try:
+                meta = wikiutil.MetaDict(meta_fname)
+                try:
+                    curr_rev = meta['data_format_revision']
+                    mig_name = str(curr_rev)
+                    execute = wikiutil.importBuiltinPlugin('script.migration', mig_name)
+                    print "Calling migration script for %s, base revision %d" % (data_dir, curr_rev)
+                    curr_rev = execute(self, data_dir, curr_rev)
+                    if curr_rev is None:
+                        print "Final mig script reached, migration is complete."
+                        break
+                    else:
+                        print "Returned. New rev is %d." % curr_rev
+                        meta['data_format_revision'] = curr_rev
+                        meta.sync()
+                except wikiutil.PluginMissingError:
+                    print "Error: There is no script for %s." % mig_name
+                    break
+            finally:
+                del meta
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/script/migration/migutil.py	Sat Mar 25 22:31:55 2006 +0000
@@ -0,0 +1,111 @@
+# -*- coding: iso-8859-1 -*-
+"""
+    MoinMoin - utility functions used by the migration scripts
+
+    @copyright: 2005 by Thomas Waldmann (MoinMoin:ThomasWaldmann)
+    @license: GNU GPL, see COPYING for details.
+"""
+import os, sys, shutil
+
+opj = os.path.join # yes, I am lazy
+join = os.path.join
+
+
+def fatalError(msg):
+    """ Exit with error message on fatal errors """
+    print "Fatal error:", msg
+    print "Stoping"
+    sys.exit(1)
+
+
+def error(msg):
+    """ Report minor error and continue """
+    print "Error:", msg
+
+
+def backup(src, dst):
+    """ Create a backup of src directory in dst, create empty src
+
+    @param src: source
+    @param dst: destination
+    """
+    print "Create backup of '%s' in '%s'" % (src, dst)
+
+    if not os.path.isdir(src):
+        fatalError("can't find '%s'. You must run this script from the directory where '%s' is located." % src)
+
+    try:
+        os.rename(src, dst)
+    except OSError:
+        fatalError("can't rename '%s' to '%s'" % (src, dst))
+
+    try:
+        os.mkdir(src)
+    except OSError:
+        fatalError("can't create '%s'" % src)
+
+    
+def listdir(path):
+    """ Return list of files in path, filtering certain files """
+    names = [name for name in os.listdir(path)
+             if not name.startswith('.') and
+             not name.endswith('.pickle') and
+             name != 'CVS']
+    return names
+
+
+def makedir(newdir):
+    """ Create a directory, if it doesn't exist """
+    try:
+        os.mkdir(newdir)
+    except OSError:
+        pass
+
+def copy_dir(dir_from, dir_to):
+    """ Copy a complete directory """
+    print "%s/ -> %s/" % (dir_from, dir_to)
+    try:
+        shutil.copytree(dir_from, dir_to)
+    except:
+        error("can't copy '%s' to '%s'" % (dir_from, dir_to))
+
+
+def copy_file(fname_from, fname_to):
+    """ Copy a single file """
+    print "%s -> %s" % (fname_from, fname_to)
+    try:
+        data = open(fname_from).read()
+        open(fname_to, "w").write(data)
+        st=os.stat(fname_from)
+        os.utime(fname_to, (st.st_atime,st.st_mtime))
+    except:
+        error("can't copy '%s' to '%s'" % (fname_from, fname_to))
+
+
+def move_file(fname_from, fname_to):
+    """ Move a single file """
+    print "%s -> %s" % (fname_from, fname_to)
+    try:
+        os.rename(fname_from, fname_to)
+    except:
+        error("can't move '%s' to '%s'" % (fname_from, fname_to))
+
+
+def copy(items, srcdir, dstdir):
+    """ copy items from src dir into dst dir
+
+    @param items: list of items to copy
+    @param srcdir: source directory to copy items from
+    @param dstdir: destination directory to copy into
+    """
+    for item in items:
+        src = join(srcdir, item)
+        dst = join(dstdir, item)
+
+        # Copy directories
+        if os.path.isdir(src):
+            copy_dir(src, dst)
+        elif os.path.isfile(src):
+            copy_file(src, dst)
+        else:
+            error("can't find '%s'" % src)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/script/moin.py	Sat Mar 25 22:31:55 2006 +0000
@@ -0,0 +1,23 @@
+#!/usr/bin/env python
+# -*- coding: iso-8859-1 -*-
+"""
+    MoinMoin - "moin" is the main script command and calls other stuff as
+    a sub-command.
+
+    Usage: moin cmdmodule cmdname [options]
+               
+    @copyright: 2006 by Thomas Waldmann
+    @license: GNU GPL, see COPYING for details.
+"""
+
+def run():
+    from MoinMoin.script import _util
+    _util.MoinScript().run(showtime=0)
+    
+if __name__ == "__main__":
+    # Insert the path to MoinMoin in the start of the path
+    import sys, os
+    sys.path.insert(0, os.path.join(os.path.dirname(sys.argv[0]), os.pardir, os.pardir))
+
+    run()
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/script/old/accounts/.cvsignore	Sat Mar 25 22:31:55 2006 +0000
@@ -0,0 +1,3 @@
+{arch}
+.arch-ids
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/script/old/accounts/__init__.py	Sat Mar 25 22:31:55 2006 +0000
@@ -0,0 +1,8 @@
+# -*- coding: iso-8859-1 -*-
+"""
+    MoinMoin - account managment Scripts
+
+    @copyright: 2004 by Thomas Waldmann
+    @license: GNU GPL, see COPYING for details.
+"""
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/script/old/accounts/moin_usercheck-jh-new.py	Sat Mar 25 22:31:55 2006 +0000
@@ -0,0 +1,289 @@
+#!/usr/bin/env python
+"""
+MoinMoin - check / process user accounts tool
+GPL code written by Thomas Waldmann, 20031005
+
+Why is this needed?
+===================
+When using ACLs, a wiki user name has to be unique, there must not be
+multiple accounts having the same username. The problem is, that this
+was possible before the introduction of ACLs and many users, who forgot
+their ID, simply created a new ID using the same user name.
+
+Because access rights (when using ACLs) depend on the NAME (not the ID),
+this must be cleaned up before using ACLs or users will have difficulties
+changing settings and saving their account data (system won't accept the
+save, if the user name and email is not unique).
+
+How to use this tool?
+=====================
+
+0. Check the settings at top of the code!
+   Making a backup of your wiki might be also a great idea.
+   
+1. Best is to first look at duplicate user names:
+    --usersunique
+
+   If everything looks OK there, you may save that to disk:
+    --usersunique --save
+
+2. Now, check also for duplicate email addresses:
+    --emailsunique
+
+   If everything looks OK, you may save that to disk:
+    --emailsunique --save
+
+3. If the announced action is incorrect, you may choose to better manually
+disable some accounts:
+    --disableuser 1234567.8.90 --save
+
+4. After cleaning up, do 1. and 2. again. There should be no output now, if
+   everything is OK.
+   
+5. Optionally you may want to make wikinames out of the user names
+    --wikinames
+    --wikinames --save
+    
+"""
+
+# ----------------------------------------------------------------------------
+# if a user subsribes to magicpages, it means that he wants to keep
+# exactly THIS account - this will avoid deleting it.
+magicpages = [
+    "ThisAccountIsCorrect", 
+    "DieserAccountIstRichtig",
+]
+
+# ----------------------------------------------------------------------------
+from MoinMoin.script import _util
+config = None
+
+
+#############################################################################
+### Main program
+#############################################################################
+
+class MoinUserCheck(_util.Script):
+    def __init__(self):
+        _util.Script.__init__(self, __name__, "[options]")
+
+        # --config=DIR
+        self.parser.add_option(
+            "--config", metavar="DIR", dest="configdir",
+            help="Path to wikiconfig.py (or its directory)"
+        )
+
+        # --disableuser=UID
+        self.parser.add_option(
+            "--disableuser", metavar="UID", dest="disableuser",
+            help="Disable the user with user id UID;"
+                " this can't be combined with options below!"
+        )
+
+        # Flags
+        self._addFlag("usersunique",
+            "Makes user names unique (by appending the ID to"
+            " name and email, disabling subscribed pages and"
+            " disabling all, but the latest saved user account);"
+            " default is to SHOW what will be happening, you"
+            " need to give the --save option to really do it."
+        )
+        self._addFlag("emailsunique",
+            "Makes user emails unique;"
+            " default is to show, use --save to save it."
+        )
+        self._addFlag("wikinames",
+            "Convert user account names to wikinames (camel-case)."
+        )
+        self._addFlag("lastsaved",
+            "Normally the account most recently USED will"
+            " survive and the others will be disabled."
+            " Using --lastsaved, the account most recently"
+            " SAVED will survive."
+        )
+        self._addFlag("save",
+            "If specified as LAST option, will allow the other"
+            " options to save user accounts back to disk."
+            " If not specified, no settings will be changed permanently."
+        )
+
+
+    def _addFlag(self, name, help):
+        self.parser.add_option("--" + name,
+            action="store_true", dest=name, default=0, help=help)
+
+
+    def mainloop(self):
+        """ moin-usercheck's main code.
+        """
+        import os, sys
+
+        # we don't expect non-option arguments
+        if len(self.args) != 0:
+            self.parser.error("incorrect number of arguments")
+
+        # check for correct option combination
+        flags_given = (
+               self.options.usersunique 
+            or self.options.emailsunique 
+            or self.options.wikinames)
+
+        if flags_given and self.options.disableuser:
+            # XXX: why is this? only because the former option parser code was braindead?
+            self.parser.error("--disableuser can't be combined with other options!")
+
+        # no option given ==> show usage
+        if not (flags_given or self.options.disableuser):
+            self.parser.print_help()
+            sys.exit(1)
+
+        #
+        # Load the configuration
+        #
+        configdir = self.options.configdir
+        if configdir:
+            if os.path.isfile(configdir): configdir = os.path.dirname(configdir)
+            if not os.path.isdir(configdir):
+                _util.fatal("Bad path %r given to --config parameter" % configdir)
+            configdir = os.path.abspath(configdir)
+            sys.path[0:0] = [configdir]
+            os.chdir(configdir)
+
+        global config
+        from MoinMoin import config
+        if config.default_config:
+            _util.fatal("You have to be in the directory containing wikiconfig.py, "
+                "or use the --config option!")
+
+        # XXX: globals bad bad bad!
+        #global users, names, emails, uids_noemail
+        users = {} # uid : UserObject
+        names = {} # name : [uid, uid, uid]
+        emails = {} # email : [uid, uid, uid]
+        uids_noemail = {} # uid : name
+
+        # XXX: Refactor to methods!
+        from MoinMoin import user, wikiutil
+
+        def collect_data():
+            import re
+
+            for uid in user.getUserList():
+                u = user.User(None, uid)
+                users[uid] = u
+        
+                # collect name duplicates:
+                if names.has_key(u.name):
+                    names[u.name].append(uid)
+                else:
+                    names[u.name] = [uid]
+        
+                # collect email duplicates:
+                if u.email:
+                    if emails.has_key(u.email):
+                        emails[u.email].append(uid)
+                    else:
+                        emails[u.email] = [uid]
+        
+                # collect account with no or invalid email address set:
+                if not u.email or not re.match(".*@.*\..*", u.email):
+                    uids_noemail[uid] = u.name
+        
+        
+        def hasmagicpage(uid):
+            u = users[uid]
+            return u.isSubscribedTo(magicpages)
+        
+        
+        def disableUser(uid):
+            u = users[uid]
+            print " %-20s %-25s %-35s" % (uid, u.name, u.email),
+            keepthis = hasmagicpage(uid)
+            if keepthis:
+                print "- keeping (magicpage)!"
+                u.save() # update timestamp, so this will be latest next run
+            elif not u.disabled: # only disable once
+                u.disabled = 1
+                u.name = "%s-%s" % (u.name, uid)
+                if u.email:
+                    u.email = "%s-%s" % (u.email, uid)
+                u.subscribed_pages = "" # avoid using email
+                if self.options.save:
+                    u.save()
+                    print "- disabled."
+                else:
+                    print "- would be disabled."
+        
+        
+        def getsortvalue(uid,user):
+            t_ls = float(user.last_saved) # when user did last SAVE of his account data
+            if self.options.lastsaved:
+                return t_ls
+            else: # last USED (we check the page trail for that)
+                try:
+                    t_lu = float(os.path.getmtime(os.path.join(config.user_dir, uid+".trail")))
+                except OSError:
+                    t_lu = t_ls # better than having nothing
+                return t_lu
+        
+        
+        def process(uidlist):
+            sortlist = []
+            for uid in uidlist:
+                u = users[uid]
+                sortlist.append((getsortvalue(uid,u),uid))
+            sortlist.sort()
+            #print sortlist
+            # disable all, but the last/latest one
+            for t,uid in sortlist[:-1]:
+                disableUser(uid)
+            # show what will be kept
+            uid = sortlist[-1][1]
+            u = users[uid]
+            print " %-20s %-25s %-35s - keeping%s!" % (uid, u.name, u.email, hasmagicpage(uid) and " (magicpage)" or "")
+        
+        
+        def make_users_unique():
+            for name in names.keys():
+                if len(names[name])>1:
+                    process(names[name])
+        
+        
+        def make_emails_unique():
+            for email in emails.keys():
+                if len(emails[email])>1:
+                    process(emails[email])
+        
+        
+        def make_WikiNames():
+            import string
+            for uid in users.keys():
+                u = users[uid]
+                if u.disabled: continue
+                if not wikiutil.isStrictWikiname(u.name):
+                    newname = string.capwords(u.name).replace(" ","").replace("-","")
+                    if not wikiutil.isStrictWikiname(newname):
+                        print " %-20s %-25s - no WikiName, giving up" % (uid, u.name)
+                    else:
+                        print " %-20s %-25s - no WikiName -> %s" % (uid, u.name, newname)
+                        if self.options.save:
+                            u.name = newname
+                            u.save()
+
+        collect_data()
+        if self.options.disableuser:
+            disableUser(self.options.disableuser)
+        else:
+            if self.options.usersunique:
+                make_users_unique()
+            if self.options.emailsunique: 
+                make_emails_unique()
+            if self.options.wikinames:
+                make_WikiNames()
+
+
+def run():
+    MoinUserCheck().run()
+
+if __name__ == "__main__":
+    run()
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/script/old/accounts/moin_usercheck.py	Sat Mar 25 22:31:55 2006 +0000
@@ -0,0 +1,252 @@
+#!/usr/bin/env python
+# -*- coding: iso-8859-1 -*-
+"""
+MoinMoin - check / process user accounts tool
+GPL code written by Thomas Waldmann, 20031005
+
+Why is this needed?
+===================
+When using ACLs, a wiki user name has to be unique, there must not be
+multiple accounts having the same username. The problem is, that this
+was possible before the introduction of ACLs and many users, who forgot
+their ID, simply created a new ID using the same user name.
+
+Because access rights (when using ACLs) depend on the NAME (not the ID),
+this must be cleaned up before using ACLs or users will have difficulties
+changing settings and saving their account data (system won't accept the
+save, if the user name and email is not unique).
+
+How to use this tool?
+=====================
+
+0. Check the settings at top of the code!
+   Making a backup of your wiki might be also a great idea.
+   
+1. Best is to first look at duplicate user names:
+    --usersunique
+
+   If everything looks OK there, you may save that to disk:
+    --usersunique --save
+
+2. Now, check also for duplicate email addresses:
+    --emailsunique
+
+   If everything looks OK, you may save that to disk:
+    --emailsunique --save
+
+3. If the announced action is incorrect, you may choose to better manually
+disable some accounts:
+    --disableuser 1234567.8.90 --save
+
+4. After cleaning up, do 1. and 2. again. There should be no output now, if
+   everything is OK.
+   
+5. Optionally you may want to make wikinames out of the user names
+    --wikinames
+    --wikinames --save
+    
+"""
+
+import sys, re
+
+# ----------------------------------------------------------------------------
+# CHECK THESE SETTINGS, then remove or comment out the following line:
+#print "Check the settings in the script first, please!" ; sys.exit(1)
+
+# this is where your moinmoin code is (if you installed it using
+# setup.py into your python site-packages, then you don't need that setting):
+sys.path.insert(0, '/home/twaldmann/moincvs/moin--main')
+
+# this is where your wikiconfig.py is:
+sys.path.insert(0, '/org/org.linuxwiki/cgi-bin')
+
+# if you include other stuff in your wikiconfig, you might need additional
+# pathes in your search path. Put them here:
+sys.path.insert(0, '/org/wiki')
+
+# if a user subsribes to magicpage, it means that he wants to keep
+# exactly THIS account - this will avoid deleting it.
+#magicpage = "ThisAccountIsCorrect"
+magicpage = "DieserAccountIstRichtig"
+
+# ----------------------------------------------------------------------------
+
+from MoinMoin.user import *
+from MoinMoin import config, wikiutil
+
+def collect_data():
+    for uid in getUserList(request): # XXX FIXME make request object for getting config vars there
+        u = User(None, uid)
+        users[uid] = u
+
+        # collect name duplicates:
+        if names.has_key(u.name):
+            names[u.name].append(uid)
+        else:
+            names[u.name] = [uid]
+
+        # collect email duplicates:
+        if u.email:
+            if emails.has_key(u.email):
+                emails[u.email].append(uid)
+            else:
+                emails[u.email] = [uid]
+
+        # collect account with no or invalid email address set:
+        if not u.email or not re.match(".*@.*\..*", u.email):
+            uids_noemail[uid] = u.name
+
+def hasmagicpage(uid):
+    u = users[uid]
+    return u.subscribed_pages.find(magicpage) >= 0
+
+def disableUser(uid):
+    u = users[uid]
+    print " %-20s %-25s %-35s" % (uid, u.name, u.email),
+    keepthis = hasmagicpage(uid)
+    if keepthis:
+        print "- keeping (magicpage)!"
+        u.save() # update timestamp, so this will be latest next run
+    elif not u.disabled: # only disable once
+        u.disabled = 1
+        u.name = "%s-%s" % (u.name, uid)
+        if u.email:
+            u.email = "%s-%s" % (u.email, uid)
+        u.subscribed_pages = "" # avoid using email
+        if save:
+            u.save()
+            print "- disabled."
+        else:
+            print "- would be disabled."
+
+def getsortvalue(uid,user):
+    t_ls = float(user.last_saved) # when user did last SAVE of his account data
+    if lastsaved:
+        return t_ls
+    else: # last USED (we check the page trail for that)
+        try:
+            t_lu = float(os.path.getmtime(os.path.join(config.user_dir, uid+".trail")))
+        except OSError:
+            t_lu = t_ls # better than having nothing
+        return t_lu
+
+def process(uidlist):
+    sortlist = []
+    for uid in uidlist:
+        u = users[uid]
+        sortlist.append((getsortvalue(uid,u),uid))
+    sortlist.sort()
+    #print sortlist
+    # disable all, but the last/latest one
+    for t,uid in sortlist[:-1]:
+        disableUser(uid)
+    # show what will be kept
+    uid = sortlist[-1][1]
+    u = users[uid]
+    print " %-20s %-25s %-35s - keeping%s!" % (uid, u.name, u.email, hasmagicpage(uid) and " (magicpage)" or "")
+
+def make_users_unique():
+    for name in names.keys():
+        if len(names[name])>1:
+            process(names[name])
+        
+def make_emails_unique():
+    for email in emails.keys():
+        if len(emails[email])>1:
+            process(emails[email])
+
+
+def make_WikiNames():
+    import string
+    for uid in users.keys():
+        u = users[uid]
+        if u.disabled: continue
+        if not wikiutil.isStrictWikiname(u.name):
+            newname = string.capwords(u.name).replace(" ","").replace("-","")
+            if not wikiutil.isStrictWikiname(newname):
+                print " %-20s %-25s - no WikiName, giving up" % (uid, u.name)
+            else:
+                print " %-20s %-25s - no WikiName -> %s" % (uid, u.name, newname)
+                if save:
+                    u.name = newname
+                    u.save()
+            
+def do_removepasswords():
+    for uid in users.keys():
+        u = users[uid]
+        # user.User already clears the old cleartext passwords on loading,
+        # so nothing to do here!
+        if save:
+            # we can't encrypt the cleartext password as it is cleared
+            # already. and we would not trust it anyway, so we don't WANT
+            # to do that either!
+            # Just save the account data without cleartext password:
+            print " %-20s %-25s - saving" % (uid, u.name)
+            u.save()
+            
+# here the main routine starts --------------------------------
+usersunique = emailsunique = lastsaved = save = 0
+disableuser = wikinames = removepasswords = 0
+
+users = {} # uid : UserObject
+names = {} # name : [uid, uid, uid]
+emails = {} # email : [uid, uid, uid]
+uids_noemail = {} # uid : name
+
+def run():
+    global usersunique, emailsunique, lastsaved, save, disableuser, wikinames
+    global users, names, emails, uids_noemail, removepasswords
+    
+    if "--usersunique" in sys.argv:  usersunique = 1
+    if "--emailsunique" in sys.argv: emailsunique = 1
+    if "--lastsaved" in sys.argv:    lastsaved = 1
+    if "--wikinames" in sys.argv:    wikinames = 1
+    if "--removepasswords" in sys.argv:    removepasswords = 1
+    if "--save" in sys.argv:         save = 1
+
+    if "--disableuser" in sys.argv:  disableuser = 1
+
+    if not usersunique and not emailsunique and not disableuser and \
+       not wikinames and not removepasswords:
+        print """%s
+    Options:
+        --usersunique       makes user names unique (by appending the ID to
+                            name and email, disabling subscribed pages and
+                            disabling all, but the latest saved user account)
+                            default is to SHOW what will be happening, you
+                            need to give the --save option to really do it.
+
+        --emailsunique      makes user emails unique
+                            default is to show, use --save to save it.
+
+        --lastsaved         normally the account most recently USED will
+                            survive and the others will be disabled.
+                            using --lastsaved, the account most recently
+                            SAVED will survive.
+
+        --disableuser uid   disable the user with user id uid
+                            this can't be combined with the options above!
+                            
+        --wikinames         try to make "WikiNames" out of "user names"
+        --removepasswords   remove pre-1.1 cleartext passwords from accounts
+        
+        --save              if specified as LAST option, will allow the other
+                            options to save user accounts back to disk.
+                            if not specified, no settings will be permanently
+                            changed.
+
+    """ % sys.argv[0]
+        return
+        
+    collect_data()
+    if usersunique:  make_users_unique()
+    if emailsunique: make_emails_unique()
+    if disableuser:  disableUser(sys.argv[2])
+    if wikinames:    make_WikiNames()
+    if removepasswords: do_removepasswords()
+
+if __name__ == "__main__":
+    run()
+
+# EOF
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/script/old/cachecleaner.py	Sat Mar 25 22:31:55 2006 +0000
@@ -0,0 +1,40 @@
+#!/usr/bin/env python
+# -*- coding: iso-8859-1 -*-
+"""
+    MoinMoin - clear the cache
+
+    @copyright: 2005 by Thomas Waldmann (MoinMoin:ThomasWaldmann)
+    @license: GNU GPL, see COPYING for details.
+
+    globally delete cache files in data/pages/PageName/cache/ directories
+    
+    Usage:
+    First change the base path and fname to match your needs.
+    Then do ./cachecleaner.py
+
+    You will usually do this after changing MoinMoin code, by either upgrading
+    version, installing or removing macros. This often makes the text_html
+    files invalid, so you have to remove them (the wiki will recreate them
+    automatically).
+    
+    text_html is the name of the cache file used for compiled pages formatted
+    by the wiki text to html formatter,
+"""
+
+base = "." # directory containing the data directory
+fnames = ['text_html', 'pagelinks', ] # cache filenames to delete
+
+def run():
+    import os
+    pagesdir = os.path.join(base, 'data', 'pages')
+    for f in os.listdir(pagesdir):
+        for fname in fnames:
+            cachefile = os.path.join(pagesdir, f, 'cache', fname)
+            try:
+                os.remove(cachefile)
+            except:
+                pass
+    
+if __name__ == '__main__':
+    run()
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/script/old/globaledit.py	Sat Mar 25 22:31:55 2006 +0000
@@ -0,0 +1,110 @@
+#!/usr/bin/env python
+"""
+    Script for doing global changes to all pages in a wiki.
+
+    You either need to have your wiki configs in sys.path or you
+    need to invoke this script from the same directory.
+
+    @copyright: 2004, Thomas Waldmann
+    @license: GPL licensed, see COPYING for details
+"""
+
+debug = False
+
+url = "moinmaster.wikiwikiweb.de/"
+
+import sys
+sys.path.insert(0, '/org/de.wikiwikiweb.moinmaster/bin15') # farmconfig/wikiconfig location
+sys.path.insert(0, '../..')
+
+def do_edit(pagename, origtext):
+    if pagename in ['LocalSpellingWords','LocalBadContent',] or pagename.endswith('Template'):
+        return origtext
+    language_line = format_line = masterpage = None
+    acl_lines = []
+    master_lines = []
+    pragma_lines = []
+    comment_lines = []
+    content_lines = []
+    lines = origtext.splitlines()
+    header = True
+    for l in lines:
+        if not l.startswith('#'):
+            header = False
+        if header:
+            if l.startswith('#acl '):
+                acl_lines.append(l)
+            elif l.startswith('#language '):
+                language_line = l
+            elif l.startswith('#format '):
+                format_line = l
+            elif l.startswith('##master-page:'):
+                masterpage = l.split(':',1)[1].strip()
+                master_lines.append(l)
+            elif l.startswith('##master-date:'):
+                master_lines.append(l)
+            elif l.startswith('##'):
+                comment_lines.append(l)
+            elif l.startswith('#'):
+                pragma_lines.append(l)
+        else:
+            content_lines.append(l)
+
+    if not language_line:
+        language_line = '#language en'
+    if not format_line:
+        format_line = '#format wiki'
+    if not acl_lines and (
+        masterpage is None or masterpage not in ['FrontPage', 'WikiSandBox',] and not masterpage.endswith('Template')):
+        acl_lines = ['#acl MoinPagesEditorGroup:read,write,delete,revert All:read']
+    if not master_lines:
+        master_lines = ['##master-page:Unknown-Page', '##master-date:Unknown-Date',]
+
+    c1old = "## Please edit (or translate) system/help pages on the moinmaster wiki ONLY."
+    c2old = "## For more information, please see MoinMaster:MoinPagesEditorGroup."
+    c1 = "## Please edit system and help pages ONLY in the moinmaster wiki! For more"
+    c2 = "## information, please see MoinMaster:MoinPagesEditorGroup."
+    for c in (c1old, c2old, c1, c2):
+        if c in comment_lines:
+            comment_lines.remove(c)
+        
+    comment_lines = [c1, c2, ] + comment_lines
+
+    if content_lines and content_lines[-1].strip(): # not an empty line at EOF
+        content_lines.append('')
+
+    if masterpage and masterpage.endswith('Template'):
+        changedtext = master_lines + [format_line, language_line,] + pragma_lines + content_lines
+    else:
+        changedtext = comment_lines + master_lines + acl_lines + [format_line, language_line,] + pragma_lines + content_lines
+    changedtext = '\n'.join(changedtext)
+    return changedtext
+
+if __name__ == '__main__':
+    if debug:
+        import codecs
+        origtext = codecs.open('origtext', 'r', 'utf-8').read()
+        origtext = origtext.replace('\r\n','\n')
+        changedtext = do_edit("", origtext)
+        changedtext = changedtext.replace('\n','\r\n')
+        f = codecs.open('changedtext', 'w', 'utf-8')
+        f.write(changedtext)
+        f.close()
+    else:
+
+        from MoinMoin import PageEditor, wikiutil
+        from MoinMoin.request import RequestCLI
+
+        request = RequestCLI(url=url)
+        # Get all existing pages in the wiki
+        pagelist = request.rootpage.getPageList(user='')
+
+        for pagename in pagelist:
+            request = RequestCLI(url=url, pagename=pagename.encode('utf-8'))
+            p = PageEditor.PageEditor(request, pagename, do_editor_backup=0)
+            origtext = p.get_raw_body()
+            changedtext = do_edit(pagename, origtext)
+            if changedtext and changedtext != origtext:
+                print "Writing %s ..." % repr(pagename)
+                p._write_file(changedtext)
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/script/old/packages/__init__.py	Sat Mar 25 22:31:55 2006 +0000
@@ -0,0 +1,7 @@
+# -*- coding: iso-8859-1 -*-
+"""
+    MoinMoin - create language packages
+
+    @copyright: 2005 by Thomas Waldmann (MoinMoin:ThomasWaldmann)
+    @license: GNU GPL, see COPYING for details.
+"""
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/script/old/packages/create_pagepacks.py	Sat Mar 25 22:31:55 2006 +0000
@@ -0,0 +1,202 @@
+#!/usr/bin/env python
+# -*- coding: iso-8859-1 -*-
+"""
+    MoinMoin - Package Generator
+
+    @copyright: 2005 by Alexander Schremmer
+    @license: GNU GPL, see COPYING for details.
+"""
+
+import os, sys
+import zipfile
+import threading
+import xmlrpclib
+from sets import Set
+from datetime import datetime
+from time import sleep
+
+# your MoinMoin package path here
+sys.path.insert(0, r"../../..")
+sys.path.insert(0, r".")
+
+from MoinMoin import config, wikidicts, wikiutil
+from MoinMoin.Page import Page
+from MoinMoin.PageEditor import PageEditor
+from MoinMoin.request import RequestCLI
+from MoinMoin.packages import packLine, unpackLine, MOIN_PACKAGE_FILE
+
+master_url ="http://moinmaster.wikiwikiweb.de/?action=xmlrpc2"
+
+EXTRA = u'extra'
+NODIST = u'nodist'
+ALL = u'all_languages'
+COMPRESSION_LEVEL = zipfile.ZIP_STORED
+
+def buildPageSets():
+    """ Calculates which pages should go into which package. """
+    pageSets = {}
+
+    #allPages = Set(xmlrpclib.ServerProxy(master_url).getAllPages())
+    allPages = Set(request.rootpage.getPageList())
+
+    systemPages = wikidicts.Group(request, "SystemPagesGroup").members()
+
+    for pagename in systemPages:
+        if pagename.endswith("Group"):
+            #print x + " -> " + repr(wikidicts.Group(request, x).members())
+            gd.addgroup(request, pagename)
+
+    langPages = Set()
+    for name, group in gd.dictdict.items():
+        group.expandgroups(gd)
+        groupPages = Set(group.members() + [name])
+        name = name.replace("SystemPagesIn", "").replace("Group", "")
+        pageSets[name] = groupPages
+        langPages |= groupPages
+
+    specialPages = Set(["SystemPagesGroup"])
+
+    masterNonSystemPages = allPages - langPages - specialPages
+
+    moinI18nPages = Set([x for x in masterNonSystemPages if x.startswith("MoinI18n")])
+    
+    nodistPages = moinI18nPages | Set(["InterWikiMap", ])
+
+    extraPages = masterNonSystemPages - nodistPages
+
+    pageSets[ALL] = langPages
+    
+    for name in pageSets.keys():
+        if name not in (u"English"):
+            pageSets[name] -= pageSets[u"English"]
+            pageSets[name] -= nodistPages
+
+    pageSets[EXTRA] = extraPages   # stuff that maybe should be in some language group
+    pageSets[NODIST] = nodistPages # we dont want to have them in dist archive
+    return pageSets
+
+def packagePages(pagelist, filename, function):
+    """ Puts pages from pagelist into filename and calls function on them on installation. """
+    try:
+        os.remove(filename)
+    except OSError:
+        pass
+    zf = zipfile.ZipFile(filename, "w", COMPRESSION_LEVEL)
+
+    cnt = 0
+    script = [packLine(['MoinMoinPackage', '1']),
+              ]
+
+    for pagename in pagelist:
+        pagename = pagename.strip()
+        page = Page(request, pagename)
+        if page.exists():
+            cnt += 1
+            script.append(packLine([function, str(cnt), pagename]))
+            timestamp = wikiutil.version2timestamp(page.mtime_usecs())
+            zi = zipfile.ZipInfo(filename=str(cnt), date_time=datetime.fromtimestamp(timestamp).timetuple()[:6])
+            zi.compress_type = COMPRESSION_LEVEL
+            zf.writestr(zi, page.get_raw_body().encode("utf-8"))
+        else:
+            #print >>sys.stderr, "Could not find the page %s." % pagename.encode("utf-8")
+            pass
+
+    script += [packLine(['Print', 'Installed MoinMaster page bundle %s.' % os.path.basename(filename)])]
+
+    zf.writestr(MOIN_PACKAGE_FILE, u"\n".join(script).encode("utf-8"))
+    zf.close()
+
+def removePages(pagelist):
+    """ Pages from pagelist get removed from the underlay directory. """
+    import shutil
+    for pagename in pagelist:
+        pagename = pagename.strip()
+        page = Page(request, pagename)
+        try:
+            underlay, path = page.getPageBasePath(-1)
+            shutil.rmtree(path)
+        except:
+            pass
+
+def packageCompoundInstaller(bundledict, filename):
+    """ Creates a package which installs all other packages. """
+    try:
+        os.remove(filename)
+    except OSError:
+        pass
+    zf = zipfile.ZipFile(filename, "w", COMPRESSION_LEVEL)
+
+    script = [packLine(['MoinMoinPackage', '1']),
+              ]
+
+    script += [packLine(["InstallPackage", "SystemPagesSetup", name + ".zip"])
+               for name in bundledict.keys() if name not in (NODIST, EXTRA, ALL, u"English")]
+    script += [packLine(['Print', 'Installed all MoinMaster page bundles.'])]
+
+    zf.writestr(MOIN_PACKAGE_FILE, u"\n".join(script).encode("utf-8"))
+    zf.close()
+
+def getMasterPages():
+    """ Leechezzz. """
+    master = xmlrpclib.ServerProxy(master_url)
+    maxThreads = 100
+
+    def downloadpage(wiki, pagename):
+        source = wiki.getPage(pagename)
+        if source.find("##master-page:FrontPage") != -1:
+            source += u"""\n\n||<tablestyle="background: lightyellow; width:100%; text-align:center">[[en]] If you want to add help pages in your favorite language, see '''SystemPagesSetup'''.||\n"""
+
+        PageEditor(request, pagename, uid_override="Fetching ...")._write_file(source)
+        #print "Fetched " + pagename.encode("utf-8")
+
+    stopped = []
+    running = []
+
+    print "Loading master page list ..."
+    pagelist = master.getAllPages()
+    print "Preparing threads ..."
+    for pagename in pagelist:
+        t = threading.Thread(target=downloadpage, args=(master, pagename), name=pagename.encode("unicode_escape"))
+        stopped.append(t)
+
+    print "Starting scheduler ..."
+    while len(running) > 0 or len(stopped) != 0:
+        for x in running:
+            if not x.isAlive():
+                #print "Found dead thread " + repr(x)
+                running.remove(x)
+        print "running %i| stopped %i" % (len(running), len(stopped))
+        for i in xrange(min(maxThreads - len(running), len(stopped))):
+            t = stopped.pop()
+            running.append(t)
+            t.start()
+            #print "Scheduled %s." % repr(t)
+        sleep(1)
+
+def run():
+    request = RequestCLI(url='localhost/')
+    request.form = request.args = request.setup_args()
+
+    gd = wikidicts.GroupDict(request)
+    gd.reset()
+
+    #getMasterPages()
+    print "Building page sets ..."
+    pageSets = buildPageSets()
+
+    print "Creating packages ..."
+    generate_filename = lambda name: os.path.join('testwiki', 'underlay', 'pages', 'SystemPagesSetup', 'attachments', '%s.zip' % name)
+
+    packageCompoundInstaller(pageSets, generate_filename(ALL))
+
+    [packagePages(list(pages), generate_filename(name), "ReplaceUnderlay") 
+        for name, pages in pageSets.items() if not name in (u'English', ALL, NODIST)]
+
+    [removePages(list(pages)) 
+        for name, pages in pageSets.items() if not name in (u'English', ALL)]
+
+    print "Finished."
+
+if __name__ == "__main__":
+    run()
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/script/old/pagescleaner.py	Sat Mar 25 22:31:55 2006 +0000
@@ -0,0 +1,64 @@
+#!/usr/bin/env python
+# -*- coding: iso-8859-1 -*-
+"""
+    MoinMoin - display unused or trash page directories in data/pages
+    
+    Usage:
+    First change the base path to match your needs.
+    Then do ./pagescleaner.py >cleanthem.sh
+    Then please review cleanthem.sh and run it, if it is OK.
+
+    @copyright: 2005 by Thomas Waldmann (MoinMoin:ThomasWaldmann)
+    @license: GNU GPL, see COPYING for details.
+"""
+
+import os
+
+base = "."
+pagebasedir = base + "/data/pages"
+
+def qualify(p):
+    dir = os.listdir(p)
+    if not dir:
+        return 'empty'
+
+    # check if we have something of potential value
+    revs = []
+    if 'revisions' in dir:
+        revs = os.listdir(os.path.join(p, 'revisions'))
+    atts = []
+    if 'attachments' in dir:
+        atts = os.listdir(os.path.join(p, 'attachments'))
+
+    if not revs and not atts:
+        return 'trash'
+    
+    if 'current-locked' in dir:
+        return 'current-locked'
+    elif 'current' in dir:
+        try:
+            current = open(os.path.join(p, 'current')).read().strip()
+            curr = int(current)
+        except:
+            return 'current damaged'
+        if current not in revs:
+            return 'deleted'
+    else:
+        return 'no current'
+
+    return 'ok'
+
+def run():
+    for p in os.listdir(pagebasedir):
+        pagedir = os.path.join(pagebasedir, p)
+        status = qualify(pagedir)
+        if status in ['trash', 'empty', ]:
+            print "mv '%s' trash # %s" % (pagedir,status)
+        elif status in ['deleted', ]:
+            print "mv '%s' deleted # %s" % (pagedir,status)
+        else:
+            print "# %s: '%s'" % (status, pagedir)
+
+if __name__ == "__main__":
+    run()
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/script/old/print_stats.py	Sat Mar 25 22:31:55 2006 +0000
@@ -0,0 +1,38 @@
+#!/usr/bin/env python
+# -*- coding: iso-8859-1 -*-
+"""
+    MoinMoin - Print statistics gathered by hotshot profiler
+
+    Usage:
+        print_stats.py statsfile
+    
+    Typical usage:
+     1. Edit moin.py and activate the hotshot profiler, set profile file name
+     2. Run moin.py
+     3. Do some request, with a browser, script or ab
+     4. Stop moin.py
+     5. Run this tool: print_stats.py moin.prof
+
+    Currently CGI and twisted also have a hotshot profiler integration.
+    
+    @copyright: 2005 by Thomas Waldmann (MoinMoin:ThomasWaldmann)
+    @license: GNU GPL, see COPYING for details.
+"""
+def run():
+    import sys
+    from hotshot import stats
+
+    if len(sys.argv) != 2:
+        print __doc__
+        sys.exit()
+        
+    # Load and print stats 
+    s = stats.load(sys.argv[1])
+    s.strip_dirs()
+    s.sort_stats('cumulative', 'time', 'calls')
+    s.print_stats(40)
+    s.print_callers(40)
+
+if __name__ == "__main__":
+    run()
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/script/old/reducewiki/__init__.py	Sat Mar 25 22:31:55 2006 +0000
@@ -0,0 +1,7 @@
+# -*- coding: iso-8859-1 -*-
+"""
+    MoinMoin - reduce a wiki to the latest revision of each page
+
+    @copyright: 2005 by Thomas Waldmann (MoinMoin:ThomasWaldmann)
+    @license: GNU GPL, see COPYING for details.
+"""
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/script/old/reducewiki/reducewiki.py	Sat Mar 25 22:31:55 2006 +0000
@@ -0,0 +1,91 @@
+#!/usr/bin/env python
+"""
+    Use this script to reduce a data/ directory to the latest page revision of
+    each non-deleted page (plus all attachments).
+    
+    This is used to make the distributed underlay directory, but can also be
+    used for other purposes.
+    
+    So we change like this:      
+        * data/pages/PageName/revisions/{1,2,3,4}
+          -> data/pages/revisions/1
+        * data/pages/PageName/current (pointing to e.g. 4)
+          -> same (pointing to 1)
+        * data/pages/PageName/edit-log and data/edit-log
+          -> do not copy
+        * data/pages/PageName/attachments/*
+          -> just copy
+
+    Steps for a successful conversion:
+
+        1. Stop your wiki and make a backup of old data and code
+
+        2. Make a copy of the wiki's "data" directory to your working dir
+
+        3. Run this script from your working dir
+
+        4. If there was no error, you will find:
+            data.pre-reduce - the script renames your data directory copy to that name
+            data - reduced data dir
+
+        5. Verify conversion results (number of pages, ...)
+
+        6. Test it - if something has gone wrong, you still have your backup.
+
+
+    @copyright: 2005 Thomas Waldmann
+    @license: GPL, see COPYING for details
+"""
+
+url = 'moinmaster.wikiwikiweb.de/'
+destdir = 'underlay'
+
+import sys
+sys.path.insert(0, '/org/de.wikiwikiweb.moinmaster/bin15') # farmconfig/wikiconfig location
+sys.path.insert(0, '../../..')
+
+import os, os.path, shutil, codecs
+from MoinMoin import config
+from MoinMoin import wikiutil
+from MoinMoin.request import RequestCLI
+from MoinMoin.Page import Page
+from MoinMoin.PageEditor import PageEditor
+from MoinMoin.action import AttachFile
+
+def copypage(request, rootdir, pagename):
+    """quick and dirty!"""
+    pagedir = os.path.join(rootdir, 'pages', wikiutil.quoteWikinameFS(pagename))
+    os.makedirs(pagedir)
+    
+    revstr = '%08d' % 1
+    cf = os.path.join(pagedir, 'current')
+    open(cf, 'w').write(revstr+'\n')
+    
+    revdir = os.path.join(pagedir, 'revisions')
+    os.makedirs(revdir)
+    tf = os.path.join(revdir, revstr)
+    p = Page(request, pagename)
+    text = p.get_raw_body().replace("\n","\r\n")
+    codecs.open(tf, 'wb', config.charset).write(text)
+
+    source_dir = AttachFile.getAttachDir(request, pagename)
+    if os.path.exists(source_dir):
+        dest_dir = os.path.join(pagedir, "attachments")
+        os.makedirs(dest_dir)
+        for filename in os.listdir(source_dir):
+            source_file = os.path.join(source_dir, filename)
+            dest_file = os.path.join(dest_dir, filename)
+            shutil.copyfile(source_file, dest_file)
+
+def run():
+    request = RequestCLI(url=url)
+    request.form = request.args = request.setup_args()
+
+    pagelist = list(request.rootpage.getPageList(user=''))
+    for pagename in pagelist:
+        copypage(request, destdir, pagename)
+        
+
+if __name__ == "__main__":
+    run()
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/script/old/repair_language.py	Sat Mar 25 22:31:55 2006 +0000
@@ -0,0 +1,167 @@
+#!/usr/bin/env python
+""" repair-language - repair page language setting.
+
+Usage:
+
+    repair-language option
+
+Options:
+
+    verify - verify pages, does not change anything, print page revision
+        that should be repaired.
+
+    repair - repair all page revisions.
+
+Step by step instructions:
+
+ 1. Stop your wiki.
+
+ 2. Make a backup of 'data' directory.
+
+ 3. Run this script from your wiki data directory, where your pages
+    directory lives.
+
+ 4. Fix permissions on the data directory, as explained in HelpOnInstalling.
+
+ 5. Verify that pages are fine after repair, if you find a problem,
+    restore your data directory from backup.
+
+Why run this script?
+
+    In patch-325 a new #language processing instruction has been added.
+    Pages that specify the language with it are displayed using correct
+    direction, even if language_default use different direction.
+
+    In the past, pages used to have ##language:xx comment. This comment
+    has no effect, and should be replaced with newer #language xx
+    processing instruction.
+
+    This script replace ##language:xx to #language xx  in page headers.
+    It convert all page revisions, so you can safely revert back to old
+    revision and get correct page direction.
+
+    You can run the script multiple times if needed.
+
+@copyright: 2004 Nir Soffer <nirs AT freeshell DOT org>
+@license: GPL, see COPYING for details
+"""
+
+import codecs
+import os, sys
+
+# Insert THIS moin dir first into sys path, or you would run another
+# version of moin!
+sys.path.insert(0, '../..')
+
+from MoinMoin import i18n
+valid_languages = i18n.wikiLanguages()
+
+
+def listdir(path):
+    """ Return list of files in path, filtering certain files """
+    names = [name for name in os.listdir(path)
+             if not name.startswith('.') and
+             not name.endswith('.pickle') and
+             name != 'CVS']
+    return names
+
+
+def repairText(text):
+    """ Repair page text
+
+    We change only this type of lines that currently are in moinmaster
+    ##language:\s*xx
+
+    Warning: will not repair the language if there is more text on the
+    same line, e.g. ##language:fr make it french!
+
+    @param text: the page text, unicode
+    @rtype: 2 tuple, (unicode, int)
+    @return: text after replacement, lines changed
+    """
+    lineend = u'\r\n'
+    needle = u'##language:'
+    changed = 0
+
+    # Get text lines
+    lines = text.splitlines()
+    
+    # Look in page header
+    for i in range(len(lines)):
+        line = lines[i]
+        if not line.startswith(u'#'):
+            break # end of header
+        
+        if line.startswith(needle):
+            # Get language from rest of line
+            lang = line[len(needle):].strip()
+            # Normalize language names. Language files are named xx_yy,
+            # but iso names use xx-yy. This can confuse people.
+            lang = lang.replace(u"_", u"-")
+                
+            # Validate lang, make new style language processing
+            # instruction.
+            if lang in valid_languages:
+                line = u'#language %s' % lang
+                lines[i] = line
+                changed += 1
+
+    if changed:
+        # Join lines back, make sure there is trailing line end
+        text = lineend.join(lines) + lineend
+    return text, changed
+
+
+def processPages(path, repair):
+    """ Process page directory
+    
+    @param repair: repair or just test
+    """
+    charset = 'utf-8'
+    
+    pages = [p for p in listdir(path) if os.path.isdir(os.path.join(path, p))]
+    for page in pages:
+        revdir = os.path.join(path, page, 'revisions')
+        if not os.path.isdir(revdir):
+            print 'Error: %s: missing revisions directory' % page
+            continue
+        
+        for rev in listdir(revdir):
+            revpath = os.path.join(revdir, rev)
+            # Open file, read text
+            f = codecs.open(revpath, 'rb', charset)
+            text = f.read()
+            f.close()
+            text, changed = repairText(text)
+
+            if changed and repair:
+                # Save converted text
+                f = codecs.open(revpath, 'wb', charset)
+                f.write(text)
+                f.close()
+                print 'Repaired %s revision %s' % (page, rev)
+            elif changed:
+                print 'Should repair %s revision %s' % (page, rev)
+
+
+if __name__ == '__main__':
+
+    # Check for pages directory in current directory
+    path = os.path.abspath('pages')
+    if not os.path.isdir(path):
+        print "Error: could not find 'pages' directory"
+        print 'Run this script from your wiki data directory'
+        print __doc__
+        sys.exit(1)   
+    
+    options = {'verify': 0, 'repair': 1,}
+    
+    if len(sys.argv) != 2 or sys.argv[1] not in options:
+        print __doc__
+        sys.exit(1)
+
+    processPages(path, repair=options[sys.argv[1]])
+    
+
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/script/old/unicode/__init__.py	Sat Mar 25 22:31:55 2006 +0000
@@ -0,0 +1,7 @@
+# -*- coding: iso-8859-1 -*-
+"""
+    MoinMoin - build unicode char tables
+
+    @copyright: 2005 by Thomas Waldmann (MoinMoin:ThomasWaldmann)
+    @license: GNU GPL, see COPYING for details.
+"""
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/script/old/unicode/mk_chartypes.py	Sat Mar 25 22:31:55 2006 +0000
@@ -0,0 +1,39 @@
+"""
+    Build MoinMoin/util/chartypes.py with
+    UCS-2 character types (upper/lower/digits/spaces).
+    
+    @copyright: 2004 Thomas Waldmann
+    @license: GNU GPL, see COPYING for details
+"""
+
+uppercase = []
+lowercase = []
+digits = []
+space = []
+for code in range(1,65535):
+    c = unichr(code)
+    str = "\\u%04x" % code
+    if c.isupper():
+        uppercase.append(str)
+    elif c.islower():
+        lowercase.append(str)
+    elif c.isdigit():
+        digits.append(str)
+    elif c.isspace():
+        space.append(str)
+
+chars_upper = u''.join(uppercase)
+chars_lower = u''.join(lowercase+digits)
+chars_digits = u''.join(digits)
+chars_spaces = u''.join(space)
+
+print """
+_chartypes = {
+    'chars_upper': u"%(chars_upper)s",
+    'chars_lower': u"%(chars_lower)s",
+    'chars_digits': u"%(chars_digits)s",
+    'chars_spaces': u"%(chars_spaces)s",
+}
+
+""" % globals()
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/script/old/xmlrpc-tools/.cvsignore	Sat Mar 25 22:31:55 2006 +0000
@@ -0,0 +1,3 @@
+{arch}
+.arch-ids
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/script/old/xmlrpc-tools/HelloWorld.py	Sat Mar 25 22:31:55 2006 +0000
@@ -0,0 +1,17 @@
+#!/usr/bin/env python
+"""
+This script is a sample for xmlrpc calls.
+
+It calls the HelloWorld.py xmlrpc plugin.
+
+GPL software, 2003-08-10 Thomas Waldmann
+"""
+
+def run():
+    import xmlrpclib
+    srcwiki = xmlrpclib.ServerProxy("http://moinmaster.wikiwikiweb.de:8000/?action=xmlrpc2")
+    print srcwiki.HelloWorld("Hello Wiki User!\n")
+
+if __name__ == "__main__":
+    run()
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/script/old/xmlrpc-tools/UpdateGroupTest.py	Sat Mar 25 22:31:55 2006 +0000
@@ -0,0 +1,46 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+This script is just an example how to update a group definition page using xmlrpc.
+
+GPL software, 2005 Thomas Waldmann
+"""
+def run():
+    import sys
+    sys.path.insert(0, '../../..')
+
+    import xmlrpclib
+    from MoinMoin.support.BasicAuthTransport import BasicAuthTransport
+
+    user = "XmlRpc"
+    password = "wrong"
+    dsttrans = BasicAuthTransport(user, password)
+    mywiki = xmlrpclib.ServerProxy("http://enterprise.wikiwikiweb.de:8888/?action=xmlrpc2", transport=dsttrans)
+
+    groupname = "TestGroup"
+    groupdesc = "This is just a test."
+    groupmembers = ["TestUser1", "TestUser2",]
+    print mywiki.UpdateGroup(groupname, groupdesc, groupmembers)
+
+    groupname = "TestAclGroup"
+    groupdesc = "This is just a test."
+    groupmembers = ["TestUser3",]
+    print mywiki.UpdateGroup(groupname, groupdesc, groupmembers, "All:read,write,delete,revert")
+
+    del mywiki
+    del dsttrans
+
+    user = "XmlRpc"
+    password = "completelywrong"
+    dsttrans = BasicAuthTransport(user, password)
+    mywiki = xmlrpclib.ServerProxy("http://enterprise.wikiwikiweb.de:8888/?action=xmlrpc2", transport=dsttrans)
+
+    groupname = "TestGroup"
+    groupdesc = "This is just a test."
+    groupmembers = ["WrongUser1", "WrongUser2",]
+    print mywiki.UpdateGroup(groupname, groupdesc, groupmembers)
+
+
+if __name__ == "__main__":
+    run()
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/script/old/xmlrpc-tools/WhoAmI.py	Sat Mar 25 22:31:55 2006 +0000
@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+"""
+This script checks whether the wiki authenticates and trusts you.
+
+It calls the TrustMe.py xmlrpc plugin. To use http auth, you need to configure
+the srcwiki with auth = [http, moin_cookie] in its wikiconfig.
+
+GPL software, 2005 Thomas Waldmann
+"""
+
+def run():
+    user = "ThomasWaldmann"
+    password = "wrong"
+
+    import sys, xmlrpclib
+    sys.path.insert(0, '../../..')
+    from MoinMoin.support.BasicAuthTransport import BasicAuthTransport
+
+    srctrans = BasicAuthTransport(user, password)
+    srcwiki = xmlrpclib.ServerProxy("http://moinmaster.wikiwikiweb.de/?action=xmlrpc2", transport=srctrans)
+
+    print srcwiki.WhoAmI()
+
+if __name__ == "__main__":
+    run()
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/script/old/xmlrpc-tools/__init__.py	Sat Mar 25 22:31:55 2006 +0000
@@ -0,0 +1,8 @@
+# -*- coding: iso-8859-1 -*-
+"""
+    MoinMoin - xmlrpc example Scripts
+
+    @copyright: 2004 by Thomas Waldmann
+    @license: GNU GPL, see COPYING for details.
+"""
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/script/old/xmlrpc-tools/get_es_pages.py	Sat Mar 25 22:31:55 2006 +0000
@@ -0,0 +1,32 @@
+""" get some pages from another wiki """
+
+def run():
+    import sys, os, xmlrpclib, codecs
+
+    sys.path.insert(0, "..")
+    from MoinMoin import wikiutil
+
+    s = xmlrpclib.ServerProxy("http://wainu.ii.uned.es/wainuki/?action=xmlrpc2")
+    index = open("index")
+
+    for l in index:
+        d = l.split('||')
+        pn = d[3].strip()
+        pd = s.getPage(pn)
+        dn = wikiutil.quoteWikinameFS(pn.decode("utf-8"))
+        os.mkdir(dn)
+        cn = os.path.join(dn,'current')
+        f = open(cn,'w')
+        f.write('00000001\n')
+        f.close()
+        dn2 = os.path.join(dn, 'revisions')
+        os.mkdir(dn2)
+        fn = os.path.join(dn2,'00000001')
+        f = codecs.open(fn,"wb","utf-8")
+        pd = pd.replace('\n','\r\n')
+        f.write(pd)
+        f.close()
+
+if __name__ == "__main__":
+    run()
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/script/old/xmlrpc-tools/getmasterpages2.py	Sat Mar 25 22:31:55 2006 +0000
@@ -0,0 +1,48 @@
+#!/usr/bin/env python
+"""
+This script is a hack because moinmaster wiki does not support
+xmlrpc due to unknown reasons. It gets all SystemPages from srcwiki
+via action=raw and stores them into dstwiki via xmlrpc.
+
+We use wiki rpc v2 here.
+
+GPL software, 2003-09-27 Thomas Waldmann
+"""
+
+import xmlrpclib, urllib
+from MoinMoin import wikiutil
+from MoinMoin.support.BasicAuthTransport import BasicAuthTransport
+
+srcurlformat = "http://moinmaster.wikiwikiweb.de/%s?action=raw"
+user = "YourWikiName"
+password = "yourbasicauthpassword"
+srcwiki = xmlrpclib.ServerProxy("http://moinmaster.wikiwikiweb.de/?action=xmlrpc2")
+dsttrans = BasicAuthTransport(user,password)
+dstwiki = xmlrpclib.ServerProxy("http://devel.linuxwiki.org/moin--main/__xmlrpc/?action=xmlrpc2", transport=dsttrans)
+
+def rawGetPage(srcurl, pagename, encoding='iso-8859-1'):
+    url = srcurl % wikiutil.quoteWikinameFS(pagename.encode(encoding))
+    pagedata = urllib.urlopen(url).read()
+    return unicode(pagedata, encoding).encode('utf-8')
+
+def transferpage(srcurlformat, dstwiki, pagename):
+    pagedata = srcwiki.getPage(pagename)
+    #pagedata = rawGetPage(srcurlformat, pagename, 'iso-8859-1')
+    rc = dstwiki.putPage(pagename, pagedata)
+    print "Transferred %s. Len = %d, rc = %s" % (pagename.encode('ascii','replace'), len(pagedata), str(rc))
+
+def run():
+    allsystempagesgroup = 'AllSystemPagesGroup'
+    transferpage(srcurlformat, dstwiki, allsystempagesgroup)
+    allgrouppages = dstwiki.listLinks(allsystempagesgroup)
+
+    for langgrouppage in allgrouppages:
+        pagename = langgrouppage['name']
+        transferpage(srcurlformat, dstwiki, pagename)
+        pages = dstwiki.listLinks(pagename)
+        for page in pages:
+            transferpage(srcurlformat, dstwiki, page['name'])
+
+if __name__ == "__main__":
+    run()
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/script/old/xmlrpc-tools/getsystempages.py	Sat Mar 25 22:31:55 2006 +0000
@@ -0,0 +1,35 @@
+#!/usr/bin/env python
+"""
+This script gets all SystemPages from srcwiki via xmlrpc and
+stores them into dstwiki via xmlrpc. We use wiki rpc v1 here.
+
+*** DO NOT USE, SEE getsystempages2.py ***
+
+GPL software, 2003-08-10 Thomas Waldmann
+"""
+
+from xmlrpclib import *
+
+srcwiki = ServerProxy("http://moinmaster.wikiwikiweb.de/?action=xmlrpc")
+#srcwiki = ServerProxy("http://moinmaster.wikiwikiweb.de/?action=xmlrpc")
+dstwiki = ServerProxy("http://devel.linuxwiki.org/moin--cvs?action=xmlrpc")
+
+def transferpage(srcwiki, dstwiki, pagename):
+    pagedata = srcwiki.getPage(pagename).data
+    dstwiki.putPage(pagename, Binary(pagedata))
+    print "Transferred %s." % pagename
+
+def run():
+    allsystempagesgroup = 'AllSystemPagesGroup'
+    transferpage(srcwiki, dstwiki, allsystempagesgroup)
+    allgrouppages = srcwiki.listLinks(allsystempagesgroup)
+    for langgrouppage in allgrouppages:
+        pagename = langgrouppage['name']
+        transferpage(srcwiki, dstwiki, pagename)
+        pages = srcwiki.listLinks(pagename)
+        for page in pages:
+            transferpage(srcwiki, dstwiki, page['name'])
+
+if __name__ == "__main__":
+    run()
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/script/old/xmlrpc-tools/getsystempages2.py	Sat Mar 25 22:31:55 2006 +0000
@@ -0,0 +1,38 @@
+#!/usr/bin/env python
+"""
+This script gets all SystemPages from srcwiki via xmlrpc and
+stores them into dstwiki via xmlrpc. We use wiki rpc v2 here.
+
+GPL software, 2003-08-10 Thomas Waldmann
+"""
+
+import xmlrpclib
+from MoinMoin.support.BasicAuthTransport import BasicAuthTransport
+
+#srcwiki = xmlrpclib.ServerProxy("http://moinmaster.wikiwikiweb.de/FrontPage?action=xmlrpc")
+user = "YourWikiName"
+password = "yourbasicauthpassword"
+srctrans = BasicAuthTransport(user,password)
+dsttrans = BasicAuthTransport(user,password)
+srcwiki = xmlrpclib.ServerProxy("http://devel.linuxwiki.org/moin--cvs/__xmlrpc/?action=xmlrpc2", transport=srctrans)
+dstwiki = xmlrpclib.ServerProxy("http://devel.linuxwiki.org/moin--cvs/__xmlrpc/?action=xmlrpc2", transport=dsttrans)
+
+def transferpage(srcwiki, dstwiki, pagename):
+    pagedata = srcwiki.getPage(pagename)
+    dstwiki.putPage(pagename, pagedata)
+    print "Transferred %s." % pagename.encode('ascii', 'replace')
+
+def run():
+    allsystempagesgroup = 'AllSystemPagesGroup'
+    transferpage(srcwiki, dstwiki, allsystempagesgroup)
+    allgrouppages = srcwiki.listLinks(allsystempagesgroup)
+    for langgrouppage in allgrouppages:
+        pagename = langgrouppage['name']
+        transferpage(srcwiki, dstwiki, pagename)
+        pages = srcwiki.listLinks(pagename)
+        for page in pages:
+            transferpage(srcwiki, dstwiki, page['name'])
+
+if __name__ == "__main__":
+    run()
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/script/old/xmlrpc-tools/putPageTest.py	Sat Mar 25 22:31:55 2006 +0000
@@ -0,0 +1,38 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+This script is just an example how to put data into a wiki using xmlrpc.
+We use wiki rpc v2 here.
+
+This script only works if you edited MoinMoin/wikirpc.py (see the comment
+in the putPage handler) to not require http auth (trusted user) and to
+really use the pagename we give.
+
+This can be done for migrating data into an offline moin wiki running on
+localhost - don't put a wiki configured like this on the internet!
+
+GPL software, 2005 Thomas Waldmann
+"""
+def run():
+    import xmlrpclib
+    mywiki = xmlrpclib.ServerProxy("http://localhost/mywiki/?action=xmlrpc2")
+
+    # first a simple test in pure ascii
+    pagename = "ApureAsciiPage"
+    pagedata = "My first test."
+    mywiki.putPage(pagename, pagedata)
+
+    # now let's use some utf-8 encoded pagename and text
+    # this stuff will only look correct if you use utf-8 enabled equipment.
+    pagename = "SomeUtf8Pagename-äöüÄÖÜߢ" # we use some german chars here
+    pagedata = "Some UTF-8 content: äöü ÄÖÜ ß ¢"
+    mywiki.putPage(pagename, pagedata)
+
+    # if you have data in iso-8859-1 (latin1) encoding, then use code similar to:
+    # pagename = latin1pagename.decode('iso-8859-1').encode('utf-8')
+    # pagedata = latin1pagedata.decode('iso-8859-1').encode('utf-8')
+    # mywiki.putPage(pagename, pagedata)
+
+if __name__ == "__main__":
+    run()
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/script/old/xmlrpc-tools/wikibackup.py	Sat Mar 25 22:31:55 2006 +0000
@@ -0,0 +1,52 @@
+#!/usr/bin/env python
+"""
+This script gets all Pages from a wiki via xmlrpc and
+stores them into a backup file. We use wiki rpc v2 here.
+
+Important note:
+
+This script ONLY handles the current versions of the wiki pages.
+
+It does NOT handle:
+    * event or edit logs (page history)
+    * old versions of pages
+    * attachments
+    * user account data
+    * MoinMoin code or config running the wiki
+    
+So this is definitely NOT a complete backup.
+
+GPL software, 2003-08-10 Thomas Waldmann
+"""
+def run():
+    import xmlrpclib
+    from MoinMoin.support.BasicAuthTransport import BasicAuthTransport
+
+    #user = "username"
+    #password = "xxxxxxxx"
+    #srctrans = BasicAuthTransport(user,password)
+    #srcwiki = xmlrpclib.ServerProxy("http://devel.linuxwiki.org/moin--cvs/__xmlrpc/?action=xmlrpc2", transport=srctrans)
+    srcwiki = xmlrpclib.ServerProxy("http://devel.linuxwiki.org/moin--cvs/?action=xmlrpc2")
+
+    try:
+        import cPickle as pickle
+    except ImportError:
+        import pickle
+
+    # Set pickle protocol, see http://docs.python.org/lib/node64.html
+    PICKLE_PROTOCOL = pickle.HIGHEST_PROTOCOL
+
+    backup={}
+    allpages = srcwiki.getAllPages()
+    for pagename in allpages:
+        pagedata = srcwiki.getPage(pagename)
+        print "Got %s." % pagename
+        backup[pagename]=pagedata
+
+    backupfile = open("wikibackup.pickle","w")
+    pickle.dump(backup, backupfile, PICKLE_PROTOCOL)
+    backupfile.close()
+
+if __name__ == "__main__":
+    run()
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/script/old/xmlrpc-tools/wikirestore.py	Sat Mar 25 22:31:55 2006 +0000
@@ -0,0 +1,49 @@
+#!/usr/bin/env python
+"""
+This script reads a wikibackup.pickle file and puts
+all Pages contained there into a wiki via xmlrpc.
+We use wiki rpc v2 here.
+
+Important note:
+
+This script ONLY handles the current versions of the wiki pages.
+
+It does NOT handle:
+    * event or edit logs (page history)
+    * old versions of pages
+    * attachments
+    * user account data
+    * MoinMoin code or config running the wiki
+    
+So this is definitely NOT a complete restore.
+
+GPL software, 2003-10-24 Thomas Waldmann
+"""
+def run():
+    import xmlrpclib
+    from MoinMoin.support.BasicAuthTransport import BasicAuthTransport
+
+    user = "ThomasWaldmann"
+    password = "xxxxxxxxxxxx"
+    dsttrans = BasicAuthTransport(user,password)
+    dstwiki = xmlrpclib.ServerProxy("http://devel.linuxwiki.org/moin--cvs/__xmlrpc/?action=xmlrpc2", transport=dsttrans)
+    #dstwiki = xmlrpclib.ServerProxy("http://devel.linuxwiki.org/moin--cvs/?action=xmlrpc2")
+
+    try:
+        import cPickle as pickle
+    except ImportError:
+        import pickle
+
+    backupfile = open("wikibackup.pickle","r")
+    backup = pickle.load(backupfile)
+    backupfile.close()
+
+    allpages = backup.keys()
+    for pagename in allpages:
+        pagedata = backup[pagename]
+        dstwiki.putPage(pagename, pagedata) # TODO: add error check
+        print "Put %s." % pagename
+
+if __name__ == "__main__":
+    run()
+
--- a/MoinMoin/scripts/.cvsignore	Sat Mar 25 11:18:58 2006 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,4 +0,0 @@
-*.pyo
-*.pyc
-{arch}
-.arch-ids
--- a/MoinMoin/scripts/__init__.py	Sat Mar 25 11:18:58 2006 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,8 +0,0 @@
-# -*- coding: iso-8859-1 -*-
-"""
-    MoinMoin - Scripts
-
-    @copyright: 2001 by Jrgen Hermann <jh@web.de>
-    @license: GNU GPL, see COPYING for details.
-"""
-
--- a/MoinMoin/scripts/_util.py	Sat Mar 25 11:18:58 2006 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,93 +0,0 @@
-# -*- coding: iso-8859-1 -*-
-"""
-    MoinMoin - Command line utilities
-
-    @copyright: 2000, 2001, 2002 by Jrgen Hermann <jh@web.de>
-    @license: GNU GPL, see COPYING for details.
-"""
-
-import os, sys
-
-flag_quiet = 0
-script_module = '__main__'
-
-
-#############################################################################
-### Logging
-#############################################################################
-
-def fatal(msgtext, **kw):
-    """ Print error msg to stderr and exit.
-    """
-    sys.stderr.write("FATAL ERROR: " + msgtext + "\n")
-    if kw.get('usage', 0):
-        maindict = vars(sys.modules[script_module])
-        if maindict.has_key('usage'):
-            maindict['usage']()
-    sys.exit(1)
-
-
-def log(msgtext):
-    """ Optionally print error msg to stderr.
-    """
-    if not flag_quiet:
-        sys.stderr.write(msgtext + "\n")
-
-
-#############################################################################
-### Commandline Support
-#############################################################################
-
-class Script:
-
-    def __init__(self, script, usage):
-        import sys, time
-
-        self.script_module = sys.modules[script]
-
-        global _start_time
-        _start_time = time.clock()
-
-        import optparse
-        from MoinMoin import version
-
-        cmd = self.script_module.__name__.split('.')[-1].replace('_', '-')
-        rev = "%s %s [%s]" % (version.project, version.release, version.revision)
-        sys.argv[0] = cmd
-
-        self.parser = optparse.OptionParser(
-            usage="%(cmd)s %(usage)s\n\n" % {'cmd': cmd, 'usage': usage, },
-            version=rev)
-        self.parser.add_option(
-            "-q", "--quiet", 
-            action="store_true", dest="quiet",
-            help="Be quiet (no informational messages)"
-        )
-
-
-    def run(self):
-        """ Run the main function of a command.
-        """
-        global flag_quiet
-
-        showtime = 1
-        try:
-            try:
-                self.options, self.args = self.parser.parse_args()
-                flag_quiet = self.options.quiet
-                self.mainloop()
-            except KeyboardInterrupt:
-                log("*** Interrupted by user!")
-            except SystemExit:
-                showtime = 0
-                raise
-        finally:
-            if showtime: self.logRuntime()
-
-
-    def logRuntime(self):
-        """ Print the total command run time.
-        """
-        import time
-        log("Needed %.3f secs." % (time.clock() - _start_time,))
-
--- a/MoinMoin/scripts/accounts/.cvsignore	Sat Mar 25 11:18:58 2006 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,3 +0,0 @@
-{arch}
-.arch-ids
-
--- a/MoinMoin/scripts/accounts/__init__.py	Sat Mar 25 11:18:58 2006 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,8 +0,0 @@
-# -*- coding: iso-8859-1 -*-
-"""
-    MoinMoin - account managment Scripts
-
-    @copyright: 2004 by Thomas Waldmann
-    @license: GNU GPL, see COPYING for details.
-"""
-
--- a/MoinMoin/scripts/accounts/moin_usercheck-jh-new.py	Sat Mar 25 11:18:58 2006 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,289 +0,0 @@
-#!/usr/bin/env python
-"""
-MoinMoin - check / process user accounts tool
-GPL code written by Thomas Waldmann, 20031005
-
-Why is this needed?
-===================
-When using ACLs, a wiki user name has to be unique, there must not be
-multiple accounts having the same username. The problem is, that this
-was possible before the introduction of ACLs and many users, who forgot
-their ID, simply created a new ID using the same user name.
-
-Because access rights (when using ACLs) depend on the NAME (not the ID),
-this must be cleaned up before using ACLs or users will have difficulties
-changing settings and saving their account data (system won't accept the
-save, if the user name and email is not unique).
-
-How to use this tool?
-=====================
-
-0. Check the settings at top of the code!
-   Making a backup of your wiki might be also a great idea.
-   
-1. Best is to first look at duplicate user names:
-    --usersunique
-
-   If everything looks OK there, you may save that to disk:
-    --usersunique --save
-
-2. Now, check also for duplicate email addresses:
-    --emailsunique
-
-   If everything looks OK, you may save that to disk:
-    --emailsunique --save
-
-3. If the announced action is incorrect, you may choose to better manually
-disable some accounts:
-    --disableuser 1234567.8.90 --save
-
-4. After cleaning up, do 1. and 2. again. There should be no output now, if
-   everything is OK.
-   
-5. Optionally you may want to make wikinames out of the user names
-    --wikinames
-    --wikinames --save
-    
-"""
-
-# ----------------------------------------------------------------------------
-# if a user subsribes to magicpages, it means that he wants to keep
-# exactly THIS account - this will avoid deleting it.
-magicpages = [
-    "ThisAccountIsCorrect", 
-    "DieserAccountIstRichtig",
-]
-
-# ----------------------------------------------------------------------------
-from MoinMoin.scripts import _util
-config = None
-
-
-#############################################################################
-### Main program
-#############################################################################
-
-class MoinUserCheck(_util.Script):
-    def __init__(self):
-        _util.Script.__init__(self, __name__, "[options]")
-
-        # --config=DIR
-        self.parser.add_option(
-            "--config", metavar="DIR", dest="configdir",
-            help="Path to wikiconfig.py (or its directory)"
-        )
-
-        # --disableuser=UID
-        self.parser.add_option(
-            "--disableuser", metavar="UID", dest="disableuser",
-            help="Disable the user with user id UID;"
-                " this can't be combined with options below!"
-        )
-
-        # Flags
-        self._addFlag("usersunique",
-            "Makes user names unique (by appending the ID to"
-            " name and email, disabling subscribed pages and"
-            " disabling all, but the latest saved user account);"
-            " default is to SHOW what will be happening, you"
-            " need to give the --save option to really do it."
-        )
-        self._addFlag("emailsunique",
-            "Makes user emails unique;"
-            " default is to show, use --save to save it."
-        )
-        self._addFlag("wikinames",
-            "Convert user account names to wikinames (camel-case)."
-        )
-        self._addFlag("lastsaved",
-            "Normally the account most recently USED will"
-            " survive and the others will be disabled."
-            " Using --lastsaved, the account most recently"
-            " SAVED will survive."
-        )
-        self._addFlag("save",
-            "If specified as LAST option, will allow the other"
-            " options to save user accounts back to disk."
-            " If not specified, no settings will be changed permanently."
-        )
-
-
-    def _addFlag(self, name, help):
-        self.parser.add_option("--" + name,
-            action="store_true", dest=name, default=0, help=help)
-
-
-    def mainloop(self):
-        """ moin-usercheck's main code.
-        """
-        import os, sys
-
-        # we don't expect non-option arguments
-        if len(self.args) != 0:
-            self.parser.error("incorrect number of arguments")
-
-        # check for correct option combination
-        flags_given = (
-               self.options.usersunique 
-            or self.options.emailsunique 
-            or self.options.wikinames)
-
-        if flags_given and self.options.disableuser:
-            # XXX: why is this? only because the former option parser code was braindead?
-            self.parser.error("--disableuser can't be combined with other options!")
-
-        # no option given ==> show usage
-        if not (flags_given or self.options.disableuser):
-            self.parser.print_help()
-            sys.exit(1)
-
-        #
-        # Load the configuration
-        #
-        configdir = self.options.configdir
-        if configdir:
-            if os.path.isfile(configdir): configdir = os.path.dirname(configdir)
-            if not os.path.isdir(configdir):
-                _util.fatal("Bad path %r given to --config parameter" % configdir)
-            configdir = os.path.abspath(configdir)
-            sys.path[0:0] = [configdir]
-            os.chdir(configdir)
-
-        global config
-        from MoinMoin import config
-        if config.default_config:
-            _util.fatal("You have to be in the directory containing wikiconfig.py, "
-                "or use the --config option!")
-
-        # XXX: globals bad bad bad!
-        #global users, names, emails, uids_noemail
-        users = {} # uid : UserObject
-        names = {} # name : [uid, uid, uid]
-        emails = {} # email : [uid, uid, uid]
-        uids_noemail = {} # uid : name
-
-        # XXX: Refactor to methods!
-        from MoinMoin import user, wikiutil
-
-        def collect_data():
-            import re
-
-            for uid in user.getUserList():
-                u = user.User(None, uid)
-                users[uid] = u
-        
-                # collect name duplicates:
-                if names.has_key(u.name):
-                    names[u.name].append(uid)
-                else:
-                    names[u.name] = [uid]
-        
-                # collect email duplicates:
-                if u.email:
-                    if emails.has_key(u.email):
-                        emails[u.email].append(uid)
-                    else:
-                        emails[u.email] = [uid]
-        
-                # collect account with no or invalid email address set:
-                if not u.email or not re.match(".*@.*\..*", u.email):
-                    uids_noemail[uid] = u.name
-        
-        
-        def hasmagicpage(uid):
-            u = users[uid]
-            return u.isSubscribedTo(magicpages)
-        
-        
-        def disableUser(uid):
-            u = users[uid]
-            print " %-20s %-25s %-35s" % (uid, u.name, u.email),
-            keepthis = hasmagicpage(uid)
-            if keepthis:
-                print "- keeping (magicpage)!"
-                u.save() # update timestamp, so this will be latest next run
-            elif not u.disabled: # only disable once
-                u.disabled = 1
-                u.name = "%s-%s" % (u.name, uid)
-                if u.email:
-                    u.email = "%s-%s" % (u.email, uid)
-                u.subscribed_pages = "" # avoid using email
-                if self.options.save:
-                    u.save()
-                    print "- disabled."
-                else:
-                    print "- would be disabled."
-        
-        
-        def getsortvalue(uid,user):
-            t_ls = float(user.last_saved) # when user did last SAVE of his account data
-            if self.options.lastsaved:
-                return t_ls
-            else: # last USED (we check the page trail for that)
-                try:
-                    t_lu = float(os.path.getmtime(os.path.join(config.user_dir, uid+".trail")))
-                except OSError:
-                    t_lu = t_ls # better than having nothing
-                return t_lu
-        
-        
-        def process(uidlist):
-            sortlist = []
-            for uid in uidlist:
-                u = users[uid]
-                sortlist.append((getsortvalue(uid,u),uid))
-            sortlist.sort()
-            #print sortlist
-            # disable all, but the last/latest one
-            for t,uid in sortlist[:-1]:
-                disableUser(uid)
-            # show what will be kept
-            uid = sortlist[-1][1]
-            u = users[uid]
-            print " %-20s %-25s %-35s - keeping%s!" % (uid, u.name, u.email, hasmagicpage(uid) and " (magicpage)" or "")
-        
-        
-        def make_users_unique():
-            for name in names.keys():
-                if len(names[name])>1:
-                    process(names[name])
-        
-        
-        def make_emails_unique():
-            for email in emails.keys():
-                if len(emails[email])>1:
-                    process(emails[email])
-        
-        
-        def make_WikiNames():
-            import string
-            for uid in users.keys():
-                u = users[uid]
-                if u.disabled: continue
-                if not wikiutil.isStrictWikiname(u.name):
-                    newname = string.capwords(u.name).replace(" ","").replace("-","")
-                    if not wikiutil.isStrictWikiname(newname):
-                        print " %-20s %-25s - no WikiName, giving up" % (uid, u.name)
-                    else:
-                        print " %-20s %-25s - no WikiName -> %s" % (uid, u.name, newname)
-                        if self.options.save:
-                            u.name = newname
-                            u.save()
-
-        collect_data()
-        if self.options.disableuser:
-            disableUser(self.options.disableuser)
-        else:
-            if self.options.usersunique:
-                make_users_unique()
-            if self.options.emailsunique: 
-                make_emails_unique()
-            if self.options.wikinames:
-                make_WikiNames()
-
-
-def run():
-    MoinUserCheck().run()
-
-if __name__ == "__main__":
-    run()
--- a/MoinMoin/scripts/accounts/moin_usercheck.py	Sat Mar 25 11:18:58 2006 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,252 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: iso-8859-1 -*-
-"""
-MoinMoin - check / process user accounts tool
-GPL code written by Thomas Waldmann, 20031005
-
-Why is this needed?
-===================
-When using ACLs, a wiki user name has to be unique, there must not be
-multiple accounts having the same username. The problem is, that this
-was possible before the introduction of ACLs and many users, who forgot
-their ID, simply created a new ID using the same user name.
-
-Because access rights (when using ACLs) depend on the NAME (not the ID),
-this must be cleaned up before using ACLs or users will have difficulties
-changing settings and saving their account data (system won't accept the
-save, if the user name and email is not unique).
-
-How to use this tool?
-=====================
-
-0. Check the settings at top of the code!
-   Making a backup of your wiki might be also a great idea.
-   
-1. Best is to first look at duplicate user names:
-    --usersunique
-
-   If everything looks OK there, you may save that to disk:
-    --usersunique --save
-
-2. Now, check also for duplicate email addresses:
-    --emailsunique
-
-   If everything looks OK, you may save that to disk:
-    --emailsunique --save
-
-3. If the announced action is incorrect, you may choose to better manually
-disable some accounts:
-    --disableuser 1234567.8.90 --save
-
-4. After cleaning up, do 1. and 2. again. There should be no output now, if
-   everything is OK.
-   
-5. Optionally you may want to make wikinames out of the user names
-    --wikinames
-    --wikinames --save
-    
-"""
-
-import sys, re
-
-# ----------------------------------------------------------------------------
-# CHECK THESE SETTINGS, then remove or comment out the following line:
-#print "Check the settings in the script first, please!" ; sys.exit(1)
-
-# this is where your moinmoin code is (if you installed it using
-# setup.py into your python site-packages, then you don't need that setting):
-sys.path.insert(0, '/home/twaldmann/moincvs/moin--main')
-
-# this is where your wikiconfig.py is:
-sys.path.insert(0, '/org/org.linuxwiki/cgi-bin')
-
-# if you include other stuff in your wikiconfig, you might need additional
-# pathes in your search path. Put them here:
-sys.path.insert(0, '/org/wiki')
-
-# if a user subsribes to magicpage, it means that he wants to keep
-# exactly THIS account - this will avoid deleting it.
-#magicpage = "ThisAccountIsCorrect"
-magicpage = "DieserAccountIstRichtig"
-
-# ----------------------------------------------------------------------------
-
-from MoinMoin.user import *
-from MoinMoin import config, wikiutil
-
-def collect_data():
-    for uid in getUserList(request): # XXX FIXME make request object for getting config vars there
-        u = User(None, uid)
-        users[uid] = u
-
-        # collect name duplicates:
-        if names.has_key(u.name):
-            names[u.name].append(uid)
-        else:
-            names[u.name] = [uid]
-
-        # collect email duplicates:
-        if u.email:
-            if emails.has_key(u.email):
-                emails[u.email].append(uid)
-            else:
-                emails[u.email] = [uid]
-
-        # collect account with no or invalid email address set:
-        if not u.email or not re.match(".*@.*\..*", u.email):
-            uids_noemail[uid] = u.name
-
-def hasmagicpage(uid):
-    u = users[uid]
-    return u.subscribed_pages.find(magicpage) >= 0
-
-def disableUser(uid):
-    u = users[uid]
-    print " %-20s %-25s %-35s" % (uid, u.name, u.email),
-    keepthis = hasmagicpage(uid)
-    if keepthis:
-        print "- keeping (magicpage)!"
-        u.save() # update timestamp, so this will be latest next run
-    elif not u.disabled: # only disable once
-        u.disabled = 1
-        u.name = "%s-%s" % (u.name, uid)
-        if u.email:
-            u.email = "%s-%s" % (u.email, uid)
-        u.subscribed_pages = "" # avoid using email
-        if save:
-            u.save()
-            print "- disabled."
-        else:
-            print "- would be disabled."
-
-def getsortvalue(uid,user):
-    t_ls = float(user.last_saved) # when user did last SAVE of his account data
-    if lastsaved:
-        return t_ls
-    else: # last USED (we check the page trail for that)
-        try:
-            t_lu = float(os.path.getmtime(os.path.join(config.user_dir, uid+".trail")))
-        except OSError:
-            t_lu = t_ls # better than having nothing
-        return t_lu
-
-def process(uidlist):
-    sortlist = []
-    for uid in uidlist:
-        u = users[uid]
-        sortlist.append((getsortvalue(uid,u),uid))
-    sortlist.sort()
-    #print sortlist
-    # disable all, but the last/latest one
-    for t,uid in sortlist[:-1]:
-        disableUser(uid)
-    # show what will be kept
-    uid = sortlist[-1][1]
-    u = users[uid]
-    print " %-20s %-25s %-35s - keeping%s!" % (uid, u.name, u.email, hasmagicpage(uid) and " (magicpage)" or "")
-
-def make_users_unique():
-    for name in names.keys():
-        if len(names[name])>1:
-            process(names[name])
-        
-def make_emails_unique():
-    for email in emails.keys():
-        if len(emails[email])>1:
-            process(emails[email])
-
-
-def make_WikiNames():
-    import string
-    for uid in users.keys():
-        u = users[uid]
-        if u.disabled: continue
-        if not wikiutil.isStrictWikiname(u.name):
-            newname = string.capwords(u.name).replace(" ","").replace("-","")
-            if not wikiutil.isStrictWikiname(newname):
-                print " %-20s %-25s - no WikiName, giving up" % (uid, u.name)
-            else:
-                print " %-20s %-25s - no WikiName -> %s" % (uid, u.name, newname)
-                if save:
-                    u.name = newname
-                    u.save()
-            
-def do_removepasswords():
-    for uid in users.keys():
-        u = users[uid]
-        # user.User already clears the old cleartext passwords on loading,
-        # so nothing to do here!
-        if save:
-            # we can't encrypt the cleartext password as it is cleared
-            # already. and we would not trust it anyway, so we don't WANT
-            # to do that either!
-            # Just save the account data without cleartext password:
-            print " %-20s %-25s - saving" % (uid, u.name)
-            u.save()
-            
-# here the main routine starts --------------------------------
-usersunique = emailsunique = lastsaved = save = 0
-disableuser = wikinames = removepasswords = 0
-
-users = {} # uid : UserObject
-names = {} # name : [uid, uid, uid]
-emails = {} # email : [uid, uid, uid]
-uids_noemail = {} # uid : name
-
-def run():
-    global usersunique, emailsunique, lastsaved, save, disableuser, wikinames
-    global users, names, emails, uids_noemail, removepasswords
-    
-    if "--usersunique" in sys.argv:  usersunique = 1
-    if "--emailsunique" in sys.argv: emailsunique = 1
-    if "--lastsaved" in sys.argv:    lastsaved = 1
-    if "--wikinames" in sys.argv:    wikinames = 1
-    if "--removepasswords" in sys.argv:    removepasswords = 1
-    if "--save" in sys.argv:         save = 1
-
-    if "--disableuser" in sys.argv:  disableuser = 1
-
-    if not usersunique and not emailsunique and not disableuser and \
-       not wikinames and not removepasswords:
-        print """%s
-    Options:
-        --usersunique       makes user names unique (by appending the ID to
-                            name and email, disabling subscribed pages and
-                            disabling all, but the latest saved user account)
-                            default is to SHOW what will be happening, you
-                            need to give the --save option to really do it.
-
-        --emailsunique      makes user emails unique
-                            default is to show, use --save to save it.
-
-        --lastsaved         normally the account most recently USED will
-                            survive and the others will be disabled.
-                            using --lastsaved, the account most recently
-                            SAVED will survive.
-
-        --disableuser uid   disable the user with user id uid
-                            this can't be combined with the options above!
-                            
-        --wikinames         try to make "WikiNames" out of "user names"
-        --removepasswords   remove pre-1.1 cleartext passwords from accounts
-        
-        --save              if specified as LAST option, will allow the other
-                            options to save user accounts back to disk.
-                            if not specified, no settings will be permanently
-                            changed.
-
-    """ % sys.argv[0]
-        return
-        
-    collect_data()
-    if usersunique:  make_users_unique()
-    if emailsunique: make_emails_unique()
-    if disableuser:  disableUser(sys.argv[2])
-    if wikinames:    make_WikiNames()
-    if removepasswords: do_removepasswords()
-
-if __name__ == "__main__":
-    run()
-
-# EOF
-
--- a/MoinMoin/scripts/cachecleaner.py	Sat Mar 25 11:18:58 2006 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,40 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: iso-8859-1 -*-
-"""
-    MoinMoin - clear the cache
-
-    @copyright: 2005 by Thomas Waldmann (MoinMoin:ThomasWaldmann)
-    @license: GNU GPL, see COPYING for details.
-
-    globally delete cache files in data/pages/PageName/cache/ directories
-    
-    Usage:
-    First change the base path and fname to match your needs.
-    Then do ./cachecleaner.py
-
-    You will usually do this after changing MoinMoin code, by either upgrading
-    version, installing or removing macros. This often makes the text_html
-    files invalid, so you have to remove them (the wiki will recreate them
-    automatically).
-    
-    text_html is the name of the cache file used for compiled pages formatted
-    by the wiki text to html formatter,
-"""
-
-base = "." # directory containing the data directory
-fnames = ['text_html', 'pagelinks', ] # cache filenames to delete
-
-def run():
-    import os
-    pagesdir = os.path.join(base, 'data', 'pages')
-    for f in os.listdir(pagesdir):
-        for fname in fnames:
-            cachefile = os.path.join(pagesdir, f, 'cache', fname)
-            try:
-                os.remove(cachefile)
-            except:
-                pass
-    
-if __name__ == '__main__':
-    run()
-
--- a/MoinMoin/scripts/globaledit.py	Sat Mar 25 11:18:58 2006 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,110 +0,0 @@
-#!/usr/bin/env python
-"""
-    Script for doing global changes to all pages in a wiki.
-
-    You either need to have your wiki configs in sys.path or you
-    need to invoke this script from the same directory.
-
-    @copyright: 2004, Thomas Waldmann
-    @license: GPL licensed, see COPYING for details
-"""
-
-debug = False
-
-url = "moinmaster.wikiwikiweb.de/"
-
-import sys
-sys.path.insert(0, '/org/de.wikiwikiweb.moinmaster/bin15') # farmconfig/wikiconfig location
-sys.path.insert(0, '../..')
-
-def do_edit(pagename, origtext):
-    if pagename in ['LocalSpellingWords','LocalBadContent',] or pagename.endswith('Template'):
-        return origtext
-    language_line = format_line = masterpage = None
-    acl_lines = []
-    master_lines = []
-    pragma_lines = []
-    comment_lines = []
-    content_lines = []
-    lines = origtext.splitlines()
-    header = True
-    for l in lines:
-        if not l.startswith('#'):
-            header = False
-        if header:
-            if l.startswith('#acl '):
-                acl_lines.append(l)
-            elif l.startswith('#language '):
-                language_line = l
-            elif l.startswith('#format '):
-                format_line = l
-            elif l.startswith('##master-page:'):
-                masterpage = l.split(':',1)[1].strip()
-                master_lines.append(l)
-            elif l.startswith('##master-date:'):
-                master_lines.append(l)
-            elif l.startswith('##'):
-                comment_lines.append(l)
-            elif l.startswith('#'):
-                pragma_lines.append(l)
-        else:
-            content_lines.append(l)
-
-    if not language_line:
-        language_line = '#language en'
-    if not format_line:
-        format_line = '#format wiki'
-    if not acl_lines and (
-        masterpage is None or masterpage not in ['FrontPage', 'WikiSandBox',] and not masterpage.endswith('Template')):
-        acl_lines = ['#acl MoinPagesEditorGroup:read,write,delete,revert All:read']
-    if not master_lines:
-        master_lines = ['##master-page:Unknown-Page', '##master-date:Unknown-Date',]
-
-    c1old = "## Please edit (or translate) system/help pages on the moinmaster wiki ONLY."
-    c2old = "## For more information, please see MoinMaster:MoinPagesEditorGroup."
-    c1 = "## Please edit system and help pages ONLY in the moinmaster wiki! For more"
-    c2 = "## information, please see MoinMaster:MoinPagesEditorGroup."
-    for c in (c1old, c2old, c1, c2):
-        if c in comment_lines:
-            comment_lines.remove(c)
-        
-    comment_lines = [c1, c2, ] + comment_lines
-
-    if content_lines and content_lines[-1].strip(): # not an empty line at EOF
-        content_lines.append('')
-
-    if masterpage and masterpage.endswith('Template'):
-        changedtext = master_lines + [format_line, language_line,] + pragma_lines + content_lines
-    else:
-        changedtext = comment_lines + master_lines + acl_lines + [format_line, language_line,] + pragma_lines + content_lines
-    changedtext = '\n'.join(changedtext)
-    return changedtext
-
-if __name__ == '__main__':
-    if debug:
-        import codecs
-        origtext = codecs.open('origtext', 'r', 'utf-8').read()
-        origtext = origtext.replace('\r\n','\n')
-        changedtext = do_edit("", origtext)
-        changedtext = changedtext.replace('\n','\r\n')
-        f = codecs.open('changedtext', 'w', 'utf-8')
-        f.write(changedtext)
-        f.close()
-    else:
-
-        from MoinMoin import PageEditor, wikiutil
-        from MoinMoin.request import RequestCLI
-
-        request = RequestCLI(url=url)
-        # Get all existing pages in the wiki
-        pagelist = request.rootpage.getPageList(user='')
-
-        for pagename in pagelist:
-            request = RequestCLI(url=url, pagename=pagename.encode('utf-8'))
-            p = PageEditor.PageEditor(request, pagename, do_editor_backup=0)
-            origtext = p.get_raw_body()
-            changedtext = do_edit(pagename, origtext)
-            if changedtext and changedtext != origtext:
-                print "Writing %s ..." % repr(pagename)
-                p._write_file(changedtext)
-
--- a/MoinMoin/scripts/import/IrcLogImporter.py	Sat Mar 25 11:18:58 2006 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,74 +0,0 @@
-#!/usr/bin/env python
-"""
-MoinMoin - Push files into the wiki.
-
-This script pushes files from a directory into the wiki. It is usable in order
-to mirror IRC logs for example.
-
-    @copyright: 2005 by MoinMoin:AlexanderSchremmer
-    @license: GNU GPL, see COPYING for details.
-"""
-
-### Configuration
-# the request URL, important for farm configurations
-url = "moinmoin.wikiwikiweb.de/"
-
-# the author, visible in RecentChanges etc.
-author = "IrcLogImporter"
-
-# the directory that should be pushed
-local_dir = '/home/aschremmer/channel-logging/logs/ChannelLogger/freenode/#moin-dev'
-
-# basepage of the pushed files
-base_page = 'MoinMoinChat/Logs/'
-
-# this function generates a pagename from the file name
-def filename_function(filename):
-    filename = filename.lstrip('#')
-    splitted = filename.split('.')
-    return '/'.join(splitted[0:2])
-### end of configuration
-
-import os, sys
-sys.path.insert(0, '/srv/moin_tw/moin--main--1.5')
-sys.path.insert(0, '/srv/de.wikiwikiweb.moinmaster/bin15')
-
-from MoinMoin import wikiutil
-from MoinMoin.request import RequestCLI
-from MoinMoin.PageEditor import PageEditor
-
-def decodeLinewise(text):
-    resultList = []
-    for line in text.splitlines():
-        try:
-            decoded_line = line.decode("utf-8")
-        except UnicodeDecodeError:
-            decoded_line = line.decode("iso-8859-1")
-        resultList.append(decoded_line)
-    return '\n'.join(resultList)
-
-def run():
-    request = RequestCLI(url=url) #pagename necessary here?
-
-    for root, dirs, files in os.walk(local_dir):
-        files.sort()
-        for filename in files[:-1]: # do not push the last file as it is constantly written to
-            pagename = base_page + filename_function(filename)
-            print "Pushing %r as %r" % (filename, pagename)
-            p = PageEditor(request, pagename,
-                           do_editor_backup=0, uid_override=author)
-            if p.exists():
-                continue
-                        
-            fileObj = open(os.path.join(root, filename), 'rb')
-            try:
-                p.saveText("#format plain\n" + decodeLinewise(fileObj.read()), 0)
-            except PageEditor.SaveError, e:
-                print "Got %r" % (e, )
-            fileObj.close()
-
-    print "Finished."
-
-if __name__ == "__main__":
-    run()
-
--- a/MoinMoin/scripts/import/__init__.py	Sat Mar 25 11:18:58 2006 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,8 +0,0 @@
-# -*- coding: iso-8859-1 -*-
-"""
-    MoinMoin - Scripts
-
-    @copyright: 2001 by Jrgen Hermann <jh@web.de>
-    @license: GNU GPL, see COPYING for details.
-"""
-
--- a/MoinMoin/scripts/migration/12_to_13_mig01.py	Sat Mar 25 11:18:58 2006 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,184 +0,0 @@
-#!/usr/bin/env python
-"""
-    12_to_13.py - migration from moin 1.2 to moin 1.3
-    * switch the wiki to utf-8 encoding
-    * switch quoting mechanism from _xx to (xx)
-    * switch timestamps from float secs to int usecs
-
-    Steps for a successful migration to utf-8:
-        1. stop your wiki and make a backup
-        2. make a copy of the wiki's "data" directory to your working dir
-        3. clean up your working copy of the data dir:
-            a. if you use CVS or GNU arch remove stuff like CVS/, .cvsignore
-               or .arch-ids/ etc.
-            b. remove *.pickle (used by moin for caching some information,
-               will be re-created automatically), especially:
-                   I. data/user/userdict.pickle
-                   II. data/dicts.pickle
-            c. if you used symlinks in data/text or elsewhere, remove them
-        4. make sure that from_encoding and to_encoding matches your needs (see
-           beginning of script below and config.charset in moin_config.py) and
-           run python2.3 12_to_13_mig1.py from your working dir
-        5. if there was no error, you will find:
-            data.pre-mig1 (the script renames your data directory copy to that name)
-            data (result, converted to utf-8)
-        6. verify conversion results (number of pages, size of logs, attachments,
-           number of backup copies) - everything should be reasonable before
-           you proceed. Usually the file size gets larger when converting from
-           iso8859-1 (or other non-unicode charset) to utf-8 except if your
-           content is ASCII-only, then it will keep its size.
-        7. copy additional files from data.pre-mig1 to data (maybe intermaps, logs,
-           etc.). Be aware that the file contents AND file names of wiki content
-           may have changed, so DO NOT copy the cache/ directory, but let
-           the wiki recreate it.
-        8. replace the data directory your wiki uses with the data directory
-           you created by previous steps. DO NOT simply copy the converted stuff
-           into the original or you will duplicate pages and create chaos!
-        9. test it. if something has gone wrong, you still have your backup.
-
-
-        10. if you use dictionaries for spellchecking, you have to convert them
-            to config.charset, too. Remove your dict.cache before re-starting
-            your wiki.
-
-    @copyright: 2004 Thomas Waldmann
-    @license: GPL, see COPYING for details
-"""
-
-from_encoding = 'iso8859-1'
-#from_encoding = 'utf-8'
-
-to_encoding = 'utf-8'
-
-import os.path, sys, shutil, urllib
-
-sys.path.insert(0, '../../..')
-from MoinMoin import wikiutil
-
-from migutil import opj, listdir, copy_file, copy_dir
-
-# this is a copy of the wikiutil.unquoteFilename of moin 1.2.1
-
-def unquoteFilename12(filename, encoding):
-    """
-    Return decoded original filename when given an encoded filename.
-    
-    @param filename: encoded filename
-    @rtype: string
-    @return: decoded, original filename
-    """
-    str = urllib.unquote(filename.replace('_', '%'))
-    try:
-        newstr = str.decode(encoding)
-    except UnicodeDecodeError: # try again with iso
-        newstr = str.decode('iso-8859-1')
-    return newstr
-
-unquoteWikiname12 = unquoteFilename12
-
-
-def convert_string(str, enc_from, enc_to):
-    try:
-        newstr = str.decode(enc_from)
-    except UnicodeDecodeError: # try again with iso
-        newstr = str.decode('iso-8859-1')
-    return newstr.encode(enc_to)
-    
-def qf_convert_string(str, enc_from, enc_to):
-    str = unquoteWikiname12(str, enc_from)
-    str = wikiutil.quoteWikinameFS(str, enc_to)
-    return str
-
-def convert_file(fname_from, fname_to, enc_from, enc_to):
-    print "%s -> %s" % (fname_from, fname_to)
-    file_from = open(fname_from, "rb")
-    if os.path.exists(fname_to):
-        raise "file exists %s" % fname_to
-    file_to = open(fname_to, "wb")
-    for line in file_from:
-        file_to.write(convert_string(line, enc_from, enc_to))
-    file_to.close()
-    file_from.close()
-    st=os.stat(fname_from)
-    os.utime(fname_to, (st.st_atime,st.st_mtime))
-
-def convert_textdir(dir_from, dir_to, enc_from, enc_to, is_backupdir=0):
-    os.mkdir(dir_to)
-    for fname_from in listdir(dir_from):
-        if is_backupdir:
-            fname, timestamp = fname_from.split('.',1)
-            timestamp = str(wikiutil.timestamp2version(float(timestamp)))
-        else:
-            fname = fname_from
-        fname = qf_convert_string(fname, enc_from, enc_to)
-        if is_backupdir:
-            fname_to = '.'.join([fname, timestamp])
-        else:
-            fname_to = fname
-        convert_file(opj(dir_from, fname_from), opj( dir_to, fname_to),
-                     enc_from, enc_to)
-
-def convert_pagedir(dir_from, dir_to, enc_from, enc_to):
-    os.mkdir(dir_to)
-    for dname_from in listdir(dir_from):
-        dname_to = qf_convert_string(dname_from, enc_from, enc_to)
-        print "%s -> %s" % (dname_from, dname_to)
-        shutil.copytree(opj(dir_from, dname_from), opj(dir_to, dname_to), 1)
-        try:
-            convert_editlog(opj(dir_from, dname_from, 'last-edited'),
-                            opj(dir_to, dname_to, 'last-edited'),
-                            enc_from, enc_to)
-        except IOError:
-            pass # we ignore if it doesnt exist
-
-def convert_userdir(dir_from, dir_to, enc_from, enc_to):
-    os.mkdir(dir_to)
-    for fname in listdir(dir_from):
-        convert_file(opj(dir_from, fname), opj(dir_to, fname),
-                     enc_from, enc_to)
-
-def convert_editlog(log_from, log_to, enc_from, enc_to):
-        file_from = open(log_from)
-        file_to = open(log_to, "w")
-        for line in file_from:
-            line = line.replace('\r','')
-            line = line.replace('\n','')
-            if not line.strip(): # skip empty lines
-                continue
-            fields = line.split('\t')
-            fields[0] = qf_convert_string(fields[0], enc_from, enc_to)
-            fields[2] = str(wikiutil.timestamp2version(float(fields[2])))
-            if len(fields) < 6:
-                fields.append('') # comment
-            if len(fields) < 7:
-                fields.append('SAVE') # action
-            fields[5] = convert_string(fields[5], enc_from, enc_to)
-            line = '\t'.join(fields) + '\n'
-            file_to.write(line)
-
-origdir = 'data.pre-mig1'
-
-try:
-    os.rename('data', origdir)
-    os.mkdir('data')
-except OSError:
-    print "You need to be in the directory where your copy of the 'data' directory is located."
-    sys.exit(1)
-
-convert_textdir(opj(origdir, 'text'), opj('data', 'text'), from_encoding, to_encoding)
-
-convert_textdir(opj(origdir, 'backup'), opj('data', 'backup'), from_encoding, to_encoding, 1)
-
-convert_pagedir(opj(origdir, 'pages'), opj('data', 'pages'), from_encoding, to_encoding)
-
-convert_userdir(opj(origdir, 'user'), opj('data', 'user'), from_encoding, to_encoding)
-
-convert_editlog(opj(origdir, 'editlog'), opj('data', 'editlog'), from_encoding, to_encoding)
-
-copy_file(opj(origdir, 'event.log'), opj('data', 'event.log'))
-
-copy_dir(opj(origdir, 'plugin'), opj('data', 'plugin'))
-
-copy_file(opj(origdir, 'intermap.txt'), opj('data', 'intermap.txt'))
-
-
--- a/MoinMoin/scripts/migration/12_to_13_mig02.py	Sat Mar 25 11:18:58 2006 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,174 +0,0 @@
-#!/usr/bin/env python
-"""
-    migration from moin 1.3 < patch-78 to moin 1.3 >= patch-78
-    * switch quoting mechanism from (xx)(xx) to (xxxx)
-    * charset isn't changed, it was utf-8 before and will be utf-8 after
-    
-    Steps for a successful migration:
-        1. stop your wiki and make a backup
-        2. make a copy of the wiki's "data" directory to your working dir
-        3. run this script from your working dir
-        4. if there was no error, you will find:
-            data.pre-mig2 (the script renames your data directory copy to that name)
-            data (result, converted)
-        5. verify conversion results (number of pages, size of logs, attachments,
-           number of backup copies) - everything should be reasonable before
-           you proceed.
-        6. copy additional files from data.pre-mig2 to data (maybe intermaps, logs,
-           etc.). Be aware that the file contents AND file names of wiki content
-           may have changed, so DO NOT copy the cache/ directory, but let
-           the wiki recreate it.
-        7. replace the data directory your wiki uses with the data directory
-           you created by previous steps. DO NOT simply copy the converted stuff
-           into the original or you will duplicate pages and create chaos!
-        8. test it. if something has gone wrong, you still have your backup.
-        9. if you use dictionaries for spellchecking, you have to convert them
-           to config.charset, too. Remove your dict.cache before re-starting
-           your wiki.
-
-    @copyright: 2004 Thomas Waldmann
-    @license: GPL, see COPYING for details
-"""
-
-from_encoding = 'utf-8'
-to_encoding = 'utf-8'
-
-import os.path, sys, shutil, urllib
-
-sys.path.insert(0, '../../..')
-from MoinMoin import wikiutil
-
-from migutil import opj, listdir, copy_file, copy_dir
-
-# this is a copy of the wikiutil.unquoteWikiname of moin--main--1.3--patch-77
-def unquoteWikinameOld(filename, charsets=[from_encoding,]):
-    """
-    Return decoded original filename when given an encoded filename.
-    @param filename: encoded filename
-    @rtype: string
-    @return: decoded, original filename
-    """
-    if isinstance(filename, type(u'')): # from some places we get called with unicode
-        filename = filename.encode(from_encoding)
-    fn = ''
-    i = 0
-    while i < len(filename):
-        c = filename[i]
-        if c == '(':
-            c1 = filename[i+1]
-            c2 = filename[i+2]
-            close = filename[i+3]
-            if close != ')':
-                raise Exception('filename encoding invalid')
-            i+=4
-            fn = fn + chr( 16 * int(c1,16) + int(c2, 16) )
-        else:
-            fn = fn + c
-            i+=1
-    return wikiutil.decodeUserInput(fn, charsets)
-
-
-def convert_string(str, enc_from, enc_to):
-    return str.decode(enc_from).encode(enc_to)
-
-
-def qf_convert_string(str, enc_from, enc_to):
-    """ Convert filename from pre patch 78 quoting to new quoting 
-    
-    The old quoting function from patch 77 can convert name ONLY from 
-    the old way to the new, so if you have a partially converted 
-    directory, as it the situation as of moin--main--1.3--patch-86, 
-    it does not work.
-    
-    The new unquoting function is backward compatible, and can unquote
-    both post and pre patch 78 file names.
-    """
-    str = wikiutil.unquoteWikiname(str, [enc_from])
-    str = wikiutil.quoteWikinameFS(str, enc_to)
-    return str
-
-
-def convert_file(fname_from, fname_to, enc_from, enc_to):
-    print "%s -> %s" % (fname_from, fname_to)
-    file_from = open(fname_from)
-    file_to = open(fname_to, "w")
-    for line in file_from:
-        file_to.write(convert_string(line, enc_from, enc_to))
-    file_to.close()
-    file_from.close()
-    st=os.stat(fname_from)
-    os.utime(fname_to, (st.st_atime,st.st_mtime))
-
-
-def convert_textdir(dir_from, dir_to, enc_from, enc_to, is_backupdir=0):
-    os.mkdir(dir_to)
-    for fname_from in listdir(dir_from):
-        if is_backupdir:
-            fname, timestamp = fname_from.split('.')
-        else:
-            fname = fname_from
-        fname = qf_convert_string(fname, enc_from, enc_to)
-        if is_backupdir:
-            fname_to = '.'.join([fname, timestamp])
-        else:
-            fname_to = fname
-        convert_file(opj(dir_from, fname_from), opj(dir_to, fname_to),
-                     enc_from, enc_to)
-
-
-def convert_pagedir(dir_from, dir_to, enc_from, enc_to):
-    os.mkdir(dir_to)
-    for dname_from in listdir(dir_from):
-        dname_to = qf_convert_string(dname_from, enc_from, enc_to)
-        print "%s -> %s" % (dname_from, dname_to)
-        shutil.copytree(opj(dir_from, dname_from), opj(dir_to, dname_to), 1)
-        try:
-            convert_editlog(opj(dir_from, dname_from, 'last-edited'),
-                            opj(dir_to, dname_to, 'last-edited'),
-                            enc_from, enc_to)
-        except IOError:
-            pass # we ignore if it doesnt exist
-
-def convert_userdir(dir_from, dir_to, enc_from, enc_to):
-    os.mkdir(dir_to)
-    for fname in listdir(dir_from):
-        convert_file(opj(dir_from, fname), opj(dir_to, fname),
-                     enc_from, enc_to)
-
-
-def convert_editlog(log_from, log_to, enc_from, enc_to):
-        file_from = open(log_from)
-        file_to = open(log_to, "w")
-        for line in file_from:
-            fields = line.split('\t')
-            fields[0] = qf_convert_string(fields[0], enc_from, enc_to)
-            fields[5] = convert_string(fields[5], enc_from, enc_to)
-            line = '\t'.join(fields)
-            file_to.write(line)
-
-origdir = 'data.pre-mig2'
-
-# Backup original dir and create new empty dir
-try:
-    os.rename('data', origdir)
-    os.mkdir('data')
-except OSError:
-    print "You need to be in the directory where your copy of the 'data' directory is located."
-    sys.exit(1)
-
-convert_textdir(opj(origdir, 'text'), opj('data', 'text'), from_encoding, to_encoding)
-
-convert_textdir(opj(origdir, 'backup'), opj('data', 'backup'), from_encoding, to_encoding, 1)
-
-convert_pagedir(opj(origdir, 'pages'), opj('data', 'pages'), from_encoding, to_encoding)
-
-convert_userdir(opj(origdir, 'user'), opj('data', 'user'), from_encoding, to_encoding)
-
-convert_editlog(opj(origdir, 'editlog'), opj('data', 'editlog'), from_encoding, to_encoding)
-
-copy_file(opj(origdir, 'event.log'), opj('data', 'event.log'))
-
-copy_dir(opj(origdir, 'plugin'), opj('data', 'plugin'))
-
-copy_file(opj(origdir, 'intermap.txt'), opj('data', 'intermap.txt'))
-
--- a/MoinMoin/scripts/migration/12_to_13_mig03.py	Sat Mar 25 11:18:58 2006 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,145 +0,0 @@
-#!/usr/bin/env python
-"""
-    migration from moin 1.3 < patch-101 to moin 1.3 >= patch-101
-    We heavily change the file system layout here:
-    * data/backup/PageName.<UTC timestamp> -> data/pages/PageName/backup/<UTC timestamp>
-    * data/text/PageName -> data/pages/PageName/text
-    * data/pages/PageName/edit-lock stays the same
-    * data/pages/PageName/last-edited isn't used any more as we have the same in last line of page edit-log
-    * data/pages/PageName/attachments/* stays the same
-    * data/editlog -> stays there (as edit-log), but also gets splitted into data/pages/PageName/edit-log
-    * data/event.log -> stays there (as event-log)
-
-    We will use this, but don't need to convert, as it will be recreated automatically:
-    * data/cache/Page.py/PageName.<formatter> -> data/pages/PageName/cache/<formatter>
-    * data/cache/pagelinks/PageName -> data/pages/PageName/cache/pagelinks
-    * data/cache/charts/hitcounts-PageName -> data/pages/PageName/cache/hitcounts
-
-    
-    Steps for a successful migration:
-
-        1. Stop your wiki and make a backup of old data and code
-
-        2. Make a copy of the wiki's "data" directory to your working dir
-
-        3. Run this script from your working dir
-
-        4. If there was no error, you will find:
-            data.pre-mig3 - the script renames your data directory copy to that name
-            data - converted data dir
-
-        5. Verify conversion results (number of pages, size of logs, attachments,
-           number of backup copies) - everything should be reasonable before
-           you proceed.
-
-        6. Copy additional files from data.pre-mig3 to data (maybe intermaps, logs,
-           etc.). Be aware that the file contents AND file names of wiki content
-           may have changed, so DO NOT copy the files inside the cache/ directory,
-           let the wiki refill it.
-
-        7. Replace the data directory your wiki uses with the data directory
-           you created by previous steps. DO NOT simply copy the converted stuff
-           into the original or you will duplicate pages and create chaos!
-
-        8. Test it - if something has gone wrong, you still have your backup.
-
-
-    @copyright: 2004 Thomas Waldmann
-    @license: GPL, see COPYING for details
-"""
-
-import os, sys, shutil, urllib
-
-sys.path.insert(0, '../../..')
-from MoinMoin import wikiutil
-
-from migutil import opj, copy_file, copy_dir, listdir
-
-origdir = 'data.pre-mig3'
-
-def convert_textdir(dir_from, dir_to, is_backupdir=0):
-    for fname_from in listdir(dir_from):
-        if is_backupdir:
-            fname, timestamp = fname_from.split('.')
-        else:
-            fname = fname_from
-        try:
-            os.mkdir(opj(dir_to, 'pages', fname))
-        except: pass
-        try:
-            os.mkdir(opj(dir_to, 'pages', fname, 'backup'))
-        except: pass
-        try:
-            os.mkdir(opj(dir_to, 'pages', fname, 'cache'))
-        except: pass
-        if is_backupdir:
-            fname_to = opj('pages', fname, 'backup', timestamp)
-        else:
-            fname_to = opj('pages', fname, 'text')
-        copy_file(opj(dir_from, fname_from), opj(dir_to, fname_to))
-
-        #we don't have cache, mig2 doesn't convert it
-        #try:
-        #    cache_from = opj(origdir,'cache','charts','hitcounts-%s' % fname)
-        #    cache_to = opj(dir_to, 'pages', fname, 'cache', 'hitcounts')
-        #    if os.path.exists(cache_from):
-        #        copy_file(cache_from, cache_to)
-        #except: pass
-
-
-def convert_pagedir(dir_from, dir_to):
-    os.mkdir(dir_to)
-    for dname_from in listdir(dir_from):
-        print "%s" % (dname_from,)
-        dname_to = dname_from
-        shutil.copytree(opj(dir_from, dname_from), opj(dir_to, dname_to), 1)
-        try:
-            os.remove(opj(dir_to, dname_to, 'last-edited'))
-        except: pass
-
-
-def convert_editlog(file_from, file_to, dir_to):
-    for l in open(file_from):
-        data = l.split('\t')
-        pagename = data[0]
-        timestamp = data[2]
-        data[2] = str(long(float(timestamp))) # we only want integer (must be long for py 2.2.x)
-        data = '\t'.join(data)
-        
-        f = open(file_to, 'a')
-        f.write(data)
-        f.close()
-        
-        try:
-            file_to2 = opj(dir_to, pagename, 'edit-log')
-            f = open(file_to2, 'a')
-            f.write(data)
-            f.close()
-        except: pass
-
-# Backup original dir and create new empty dir
-try:
-    os.rename('data', origdir)
-    os.mkdir('data')
-except OSError:
-    print "You need to be in the directory where your copy of the 'data' directory is located."
-    sys.exit(1)
-
-convert_pagedir(opj(origdir, 'pages'), opj('data', 'pages'))
-
-convert_textdir(opj(origdir,'text'), 'data')
-
-convert_textdir(opj(origdir, 'backup'), 'data', 1)
-
-convert_editlog(opj(origdir, 'editlog'),
-                opj('data', 'edit-log'),
-                opj('data', 'pages'))
-
-copy_file(opj(origdir, 'event.log'), opj('data', 'event.log'))
-
-copy_dir(opj(origdir, 'plugin'), opj('data', 'plugin'))
-
-copy_dir(opj(origdir, 'user'), opj('data', 'user'))
-
-copy_file(opj(origdir, 'intermap.txt'), opj('data', 'intermap.txt'))
-
--- a/MoinMoin/scripts/migration/12_to_13_mig04.py	Sat Mar 25 11:18:58 2006 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,157 +0,0 @@
-#!/usr/bin/env python
-"""
-    migration from moin 1.3 < patch-196 to moin 1.3 >= patch-196
-    Because of trouble with float timestamps, we migrate to usec timestamp resolution here.
-    * data/pages/PageName/backup/<UTC timestamp> -> .../<UTC timestamp in usecs>
-    * data/user/<uid>.bookmark -> convert to usecs
-    * data/edit-log and data/pages/PageName/edit-log -> convert to usecs
-    * data/event-log -> convert to usecs
-    
-    Steps for a successful migration:
-
-        1. Stop your wiki and make a backup of old data and code
-
-        2. Make a copy of the wiki's "data" directory to your working dir
-
-        3. Run this script from your working dir
-
-        4. If there was no error, you will find:
-            data.pre-mig4 - the script renames your data directory copy to that name
-            data - converted data dir
-
-        5. Verify conversion results (number of pages, size of logs, attachments,
-           number of backup copies) - everything should be reasonable before
-           you proceed.
-
-        6. Copy additional files from data.pre-mig4 to data (maybe intermaps, logs,
-           etc.). Be aware that the file contents AND file names of wiki content
-           may have changed, so DO NOT copy the files inside the cache/ directory,
-           let the wiki refill it.
-
-        7. Replace the data directory your wiki uses with the data directory
-           you created by previous steps. DO NOT simply copy the converted stuff
-           into the original or you will duplicate pages and create chaos!
-
-        8. Test it - if something has gone wrong, you still have your backup.
-
-
-    @copyright: 2004 Thomas Waldmann
-    @license: GPL, see COPYING for details
-"""
-
-
-import os.path, sys, urllib
-
-sys.path.insert(0, '../../..')
-from MoinMoin import wikiutil
-
-from migutil import opj, listdir, copy_file, copy_dir
-
-def convert_ts(ts_from):
-    if ts_from > 5000000000: # far more than 32bits?
-        ts_to = ts_from # we already have usec kind of timestamp
-    else:
-        ts_to = wikiutil.timestamp2version(ts_from)
-    return long(ts_to) # must be long for py 2.2.x
-
-def convert_eventlog(file_from, file_to):
-    if not os.path.exists(file_from): 
-        return
-    f = open(file_to, 'a')
-    for l in open(file_from):
-        if not l.strip():
-            continue
-        data = l.split('\t')
-        data[0] = str(convert_ts(float(data[0]))) # we want usecs
-        data = '\t'.join(data)
-        f.write(data)
-    f.close()
-        
-def convert_editlog(file_from, file_to):
-    if not os.path.exists(file_from): 
-        return
-    f = open(file_to, 'a')
-    for l in open(file_from):
-        data = l.split('\t')
-        pagename = data[0]
-        timestamp = data[2]
-        data[2] = str(convert_ts(float(timestamp))) # we want usecs
-        data = '\t'.join(data)
-        f.write(data)
-    f.close()
-        
-def convert_pagedir(dir_from, dir_to, is_backupdir=0):
-    os.mkdir(dir_to)
-    for pagedir in listdir(dir_from):
-        text_from = opj(dir_from, pagedir, 'text')
-        text_to = opj(dir_to, pagedir, 'text')
-        os.mkdir(opj(dir_to, pagedir))
-        copy_file(text_from, text_to)
-        
-        backupdir_from = opj(dir_from, pagedir, 'backup')
-        backupdir_to = opj(dir_to, pagedir, 'backup')
-        if os.path.exists(backupdir_from):
-            os.mkdir(backupdir_to)
-            for ts in listdir(backupdir_from):
-                ts_usec = str(convert_ts(float(ts)))
-                backup_from = opj(backupdir_from, ts)
-                backup_to = opj(backupdir_to, ts_usec)
-                copy_file(backup_from, backup_to)
-        
-        editlog_from = opj(dir_from, pagedir, 'edit-log')
-        editlog_to = opj(dir_to, pagedir, 'edit-log')
-        convert_editlog(editlog_from, editlog_to)
-        
-        #cachedir_from = opj(dir_from, pagedir, 'cache')
-        #cachedir_to = opj(dir_to, pagedir, 'cache')
-        #if os.path.exists(cachedir_from):
-        #    os.mkdir(cachedir_to)
-        #    try:
-        #        copy_file(
-        #            opj(cachedir_from, 'hitcounts'),
-        #            opj(cachedir_to, 'hitcounts'))
-        #    except: pass
-
-        attachdir_from = opj(dir_from, pagedir, 'attachments')
-        attachdir_to = opj(dir_to, pagedir, 'attachments')
-        if os.path.exists(attachdir_from):
-            try:
-                copy_dir(attachdir_from, attachdir_to)
-            except: pass
-
-
-def convert_userdir(dir_from, dir_to):
-    os.mkdir(dir_to)
-    for fname in listdir(dir_from):
-        if fname.endswith('.bookmark'):
-            bm = open(opj(dir_from, fname)).read().strip()
-            bm = str(wikiutil.timestamp2version(float(bm)))
-            f = open(opj(dir_to, fname), 'w')
-            f.write(bm)
-            f.close()
-        else:
-            copy_file(opj(dir_from, fname), opj(dir_to, fname))
-
-
-origdir = 'data.pre-mig4'
-
-# Backup original dir and create new empty dir
-try:
-    os.rename('data', origdir)
-    os.mkdir('data')
-except OSError:
-    print "You need to be in the directory where your copy of the 'data' directory is located."
-    sys.exit(1)
-
-convert_pagedir(opj(origdir, 'pages'), opj('data', 'pages'))
-
-convert_editlog(opj(origdir, 'edit-log'), opj('data', 'edit-log'))
-
-convert_eventlog(opj(origdir, 'event.log'), opj('data', 'event-log'))
-
-convert_userdir(opj(origdir, 'user'), opj('data', 'user'))
-
-copy_dir(opj(origdir, 'plugin'), opj('data', 'plugin'))
-
-copy_file(opj(origdir, 'intermap.txt'), opj('data', 'intermap.txt'))
-
--- a/MoinMoin/scripts/migration/12_to_13_mig05.py	Sat Mar 25 11:18:58 2006 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,331 +0,0 @@
-#!/usr/bin/env python
-"""
-    migration from moin 1.3 < patch-221 to moin 1.3 >= patch-221
-    We need to make versioning completely different. Problem:
-        * old versioning used UNIX timestamps (32bits), but had collisions due
-          to seconds resolution (on the FS, they were avoided by using floats
-          in early moin versions, but floats suck and xmlrpc only does ints).
-        * then we moved to usecs resolution, collision problem solved, but
-          xmlrpc broke because it can't handle long ints. Oh well ... 8-(
-        * So for the 3rd try, we now just enumerate versions 1,2,3,4,...
-          This makes xmlrpc happy again (and matches better how xmlrpc was
-          designed, as it has separate fields for timestamp and version),
-          but we now have to keep the timestamp somewhere else. The appropriate
-          place is of course the edit-log.
-    
-    So we change like this:      
-        * data/pages/PageName/backup/<UTC timestamp in usecs>
-          -> data/pages/PageName/revisions/<revno>
-    A page save is now done like that:
-        * mv 'current' 'notcurrent'
-        * if success ('current' was there):
-            * revno = read('notcurrent')
-            * revno++
-            * write('notcurrent', revno)
-            * save to revisions/<revno>
-            * mv 'notcurrent' 'current'
-        * else give error msg and let user retry save
-            
-    * data/user/<uid>.bookmark stays in usecs
-    * data/event-log stays in usecs
-    * data/edit-log and data/pages/PageName/edit-log stay in usecs and:
-        * old: PageName UserIp TimeUSecs UserHost UserId Comment Action
-        * new: TimeUSecs PageRev Action PageName UserIp UserHost UserId Extra Comment
-        *                =======                                        =====
-         * PageRev is identical to the filename in revisions/ directory
-         * Extra is used for some stuff formerly put into comment field, like
-           revert info or attach filename
-           
-    Steps for a successful migration:
-
-        1. Stop your wiki and make a backup of old data and code
-
-        2. Make a copy of the wiki's "data" directory to your working dir
-
-        3. Run this script from your working dir
-
-        4. If there was no error, you will find:
-            data.pre-mig5 - the script renames your data directory copy to that name
-            data - converted data dir
-
-        5. Verify conversion results (number of pages, size of logs, attachments,
-           number of backup copies) - everything should be reasonable before
-           you proceed.
-
-        6. Copy additional files from data.pre-mig5 to data (maybe intermaps, logs,
-           etc.). Be aware that the file contents AND file names of wiki content
-           may have changed, so DO NOT copy the files inside the cache/ directory,
-           let the wiki refill it.
-
-        7. Replace the data directory your wiki uses with the data directory
-           you created by previous steps. DO NOT simply copy the converted stuff
-           into the original or you will duplicate pages and create chaos!
-
-        8. Test it - if something has gone wrong, you still have your backup.
-
-
-    @copyright: 2004 Thomas Waldmann
-    @license: GPL, see COPYING for details
-"""
-
-
-import os.path, sys, urllib
-
-# Insert THIS moin dir first into sys path, or you would run another
-# version of moin!
-sys.path.insert(0, '../../..')
-from MoinMoin import wikiutil
-
-from migutil import opj, listdir, copy_file, move_file, copy_dir
-
-# info[pagename][timestamp_usecs] = (file_from, (...))
-# if file_from is None, we have just a log entry, but no associated file yet
-info = {}
-info2 = {}
-exists = {}
-pagelist = []
-
-def gather_editlog(dir_from, el_from):
-    """ this gathers everything that is in edit-log into internal
-        data structures, converting to the future format
-    """
-    if not os.path.exists(el_from): 
-        return
-    for l in open(el_from):
-        data = l.rstrip('\n').split('\t')
-        origlen = len(data)
-        while len(data) < 7: data.append('')
-        (pagename,ip,timestamp,host,id,comment,action) = data
-        if origlen == 6:
-            action = comment
-            comment = ''
-        
-        extra = ''
-        if action == 'SAVE/REVERT': # we missed to convert that in mig4
-            ts = long(comment) # must be long for py 2.2.x
-            if ts < 4000000000: # UNIX timestamp (secs)
-                extra = str(wikiutil.timestamp2version(ts))
-            else: # usecs timestamp
-                extra = str(ts)
-            # later we convert this timestamp to a revision number
-            comment = ''
-        if action in ['ATTNEW','ATTDRW','ATTDEL',]:
-            extra = comment # filename
-            comment = '' # so we can use comments on ATT* in future
-
-        timestamp = long(timestamp) # must be long for py 2.2.x
-        data = [timestamp,'',action,pagename,ip,host,id,extra,comment]
-        
-        entry = info.get(pagename, {})
-        entry[timestamp] = [None, data]
-        info[pagename] = entry
-        
-def gather_pagedirs(dir_from, is_backupdir=0):
-    """ this gathers information from the pagedirs, i.e. text and backup
-        files (and also the local editlog) and tries to merge/synchronize
-        with the informations gathered from editlog
-    """
-    global pagelist
-    pagelist = listdir(dir_from)
-    for pagename in pagelist:
-        editlog_from = opj(dir_from, pagename, 'edit-log')
-        gather_editlog(dir_from, editlog_from)
-         
-        entry = info.get(pagename, {})
-
-        loglist = [] # editlog timestamps of page revisions
-        for ts,data in entry.items():
-            if data[1][2] in ['SAVE','SAVENEW','SAVE/REVERT',]:
-                loglist.append(ts)
-        loglist.sort()
-        lleftover = loglist[:]
-        
-        # remember the latest log entry
-        if lleftover:
-            llatest = lleftover[-1]
-        else:
-            llatest = None
-            
-        backupdir_from = opj(dir_from, pagename, 'backup')
-        if os.path.exists(backupdir_from):
-            backuplist = listdir(backupdir_from)
-            bleftover = backuplist[:]
-            for bfile in backuplist:
-                backup_from = opj(backupdir_from, bfile)
-                ts = long(bfile)
-                if ts in loglist: # we have an editlog entry, exact match
-                    entry[ts][0] = backup_from
-                    lleftover.remove(ts)
-                    bleftover.remove(bfile)
-            
-        text_from = opj(dir_from, pagename, 'text')
-        found_text = False
-        if os.path.exists(text_from): # we have a text file, it should match latest log entry
-            exists[pagename] = True
-            mtime = os.path.getmtime(text_from)
-            if llatest and llatest in lleftover:
-                ts = llatest
-                if abs(wikiutil.timestamp2version(mtime) - ts) < 2000000: # less than a second diff
-                    entry[ts][0] = text_from
-                    lleftover.remove(ts)
-                    found_text = True
-            else: # we have no log entries left 8(
-                ts = wikiutil.timestamp2version(mtime)
-                data = [ts,'','SAVE', pagename,'','','','','missing editlog entry for this page version']
-                entry[ts] = [text_from, data]
-        else:
-            # this page was maybe deleted, so we remember for later:
-            exists[pagename] = False
-            if llatest in lleftover: # if a page is deleted, the last log entry has no file
-                entry[llatest][0] = None
-                lleftover.remove(llatest)
-                        
-        if os.path.exists(backupdir_from):
-            backuplist = listdir(backupdir_from)
-            for bfile in backuplist:
-                if not bfile in bleftover: continue
-                backup_from = opj(backupdir_from, bfile)
-                bts = long(bfile) # must be long for py 2.2.x
-                for ts in lleftover:
-                    tdiff = abs(bts-ts)
-                    if tdiff < 2000000: # editlog, inexact match
-                        entry[ts][0] = backup_from
-                        lleftover.remove(ts)
-                        bleftover.remove(bfile)
-                    elif 3599000000 <= tdiff <= 3601000000: # editlog, win32 daylight saving bug
-                        entry[ts][0] = backup_from
-                        lleftover.remove(ts)
-                        bleftover.remove(bfile)
-                        print "Warning: Win32 daylight saving bug encountered & fixed!"
-                        
-            if len(bleftover) == 1 and len(lleftover) == 1: # only 1 left, must be this
-                backup_from = opj(backupdir_from, bleftover[0])
-                entry[lleftover[0]][0] = backup_from
-                lleftover = []
-                bleftover = []
-            
-            # fake some log entries
-            for bfile in bleftover:
-                backup_from = opj(backupdir_from, bfile)
-                bts = long(bfile) # must be long py 2.2.x
-                data = [ts,'','SAVE',pagename,'','','','','missing editlog entry for this page version']
-                entry[bts] = [backup_from, data]
-                
-        # check if we still haven't matched the "text" file
-        if not found_text and os.path.exists(text_from):
-            if llatest in lleftover: # latest log entry still free
-                entry[llatest][0] = text_from # take it. do not care about mtime of file.
-                lleftover.remove(llatest)
-            else: # log for "text" file is missing or latest was taken by other rev 8(
-                mtime = os.path.getmtime(text_from)
-                ts = wikiutil.timestamp2version(mtime) # take mtime, we have nothing better
-                data = [ts,'','SAVE', pagename,'','','','','missing editlog entry for this page version']
-                entry[ts] = [text_from, data]
-                
-        # delete unmatching log entries
-        for ts in lleftover:
-            #print "XXX Deleting leftover log entry: %r" % entry[ts]
-            del entry[ts]
-        
-        info[pagename] = entry
-
-def remove_trash(dir_from):
-    for pagename in info:
-        # omit dead pages and MoinEditorBackup
-        if pagename in pagelist and (
-           os.path.exists(opj(dir_from, pagename, 'text')) or
-           os.path.exists(opj(dir_from, pagename, 'backup'))
-           ) and not pagename.endswith('MoinEditorBackup'):
-            info2[pagename] = info[pagename]
-
-def generate_pages(dir_from, dir_to):
-    for pagename in info2:
-        entry = info2.get(pagename, {})
-        tslist = entry.keys()
-        if tslist:
-            pagedir = opj(dir_to, 'pages', pagename)
-            os.makedirs(opj(pagedir, 'revisions'))
-            editlog_file = opj(pagedir, 'edit-log')
-            f = open(editlog_file, 'w')
-            rev = 0
-            tslist.sort()
-            for ts in tslist:
-                rev += 1
-                revstr = '%08d' % rev
-                file_from, data = entry[ts]
-                data[0] = str(ts)
-                data[1] = revstr
-                if data[2].endswith('/REVERT'):
-                    # replace the timestamp with the revision number
-                    revertts = long(data[7]) # must be long for py 2.2.x
-                    try:
-                        revertrev = int(entry[revertts][1][1])
-                    except KeyError:
-                        # never should trigger...
-                        print "********* KeyError %s entry[%d][1][1] **********" % (pagename, revertts)
-                        revertrev = 0
-                    data[7] = '%08d' % revertrev
-                f.write('\t'.join(data)+'\n')
-                if file_from is not None:
-                    file_to = opj(pagedir, 'revisions', revstr)
-                    copy_file(file_from, file_to)
-            f.close()
-                
-            curr_file = opj(pagedir, 'current')
-            f = open(curr_file, 'w')
-            f.write(revstr)
-            f.close()
-
-        att_from = opj(dir_from, 'pages', pagename, 'attachments')
-        if os.path.exists(att_from):
-            att_to = opj(pagedir, 'attachments')
-            copy_dir(att_from, att_to)
-        
-
-def generate_editlog(dir_from, dir_to):
-    editlog = {}
-    for pagename in info2:
-        entry = info2.get(pagename, {})
-        for ts in entry:
-            file_from, data = entry[ts]
-            editlog[ts] = data
-    
-    tslist = editlog.keys()
-    tslist.sort()
-    
-    editlog_file = opj(dir_to, 'edit-log')
-    f = open(editlog_file, 'w')
-    for ts in tslist:
-        data = editlog[ts]
-        f.write('\t'.join(data)+'\n')
-    f.close()
-
-        
-origdir = 'data.pre-mig5'
-
-# Backup original dir and create new empty dir
-try:
-    os.rename('data', origdir)
-    os.mkdir('data')
-except OSError:
-    print "You need to be in the directory where your copy of the 'data' directory is located."
-    sys.exit(1)
-
-gather_editlog(origdir, opj(origdir, 'edit-log'))
-gather_pagedirs(opj(origdir, 'pages'))
-
-remove_trash(opj(origdir, 'pages'))
-
-generate_pages(origdir, 'data')
-generate_editlog(origdir, 'data')
-
-
-copy_dir(opj(origdir, 'plugin'), opj('data', 'plugin'))
-
-copy_dir(opj(origdir, 'user'), opj('data', 'user'))
-
-copy_file(opj(origdir, 'event-log'), opj('data', 'event-log'))
-
-copy_file(opj(origdir, 'intermap.txt'), opj('data', 'intermap.txt'))
-
-
--- a/MoinMoin/scripts/migration/12_to_13_mig06.py	Sat Mar 25 11:18:58 2006 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,101 +0,0 @@
-#!/usr/bin/env python
-"""
-    12_to_13.py - migration from < moin--main--patch-248 to >= patch 249
-    * convert event-log from iso8859-1 to config.charset (utf-8) encoding
-
-    Steps for a successful migration to utf-8:
-        1. stop your wiki and make a backup
-        2. make a copy of the wiki's "data" directory to your working dir
-        3. clean up your working copy of the data dir:
-            a. if you use CVS or GNU arch remove stuff like CVS/, .cvsignore
-               or .arch-ids/ etc.
-            b. remove *.pickle (used by moin for caching some information,
-               will be re-created automatically), especially:
-                   I. data/user/userdict.pickle
-                   II. data/dicts.pickle
-            c. if you used symlinks in data/text or elsewhere, remove them
-        4. make sure that from_encoding and to_encoding matches your needs (see
-           beginning of script below and config.charset in moin_config.py) and
-           run python2.3 12_to_13_mig6.py from your working dir
-        5. if there was no error, you will find:
-            data.pre-mig6 (the script renames your data directory copy to that name)
-            data (result, converted to utf-8)
-        6. verify conversion results (number of pages, size of logs, attachments,
-           number of backup copies) - everything should be reasonable before
-           you proceed. Usually the file size gets larger when converting from
-           iso8859-1 (or other non-unicode charset) to utf-8 except if your
-           content is ASCII-only, then it will keep its size.
-        7. copy additional files from data.pre-mig6 to data (maybe intermaps, logs,
-           etc.). Be aware that the file contents AND file names of wiki content
-           may have changed, so DO NOT copy the cache/ directory, but let
-           the wiki recreate it.
-        8. replace the data directory your wiki uses with the data directory
-           you created by previous steps. DO NOT simply copy the converted stuff
-           into the original or you will duplicate pages and create chaos!
-        9. test it. if something has gone wrong, you still have your backup.
-
-
-        10. if you use dictionaries for spellchecking, you have to convert them
-            to config.charset, too. Remove your dict.cache before re-starting
-            your wiki.
-
-    @copyright: 2004 Thomas Waldmann
-    @license: GPL, see COPYING for details
-"""
-
-from_encoding = 'iso8859-1'
-to_encoding = 'utf-8'
-
-import os.path, sys, shutil, urllib
-
-sys.path.insert(0, '../../..')
-from MoinMoin import wikiutil
-
-from migutil import opj, listdir, copy_file, copy_dir
-
-def convert_string(str, enc_from, enc_to):
-    return str.decode(enc_from).encode(enc_to)
-
-def convert_eventlog(fname_from, fname_to, enc_from, enc_to):
-    print "%s -> %s" % (fname_from, fname_to)
-    file_from = open(fname_from)
-    file_to = open(fname_to, "w")
-        
-    for line in file_from:
-        line = line.replace('\r','')
-        line = line.replace('\n','')
-        fields = line.split('\t')
-        kvpairs = fields[2]
-        kvpairs = kvpairs.split('&')
-        kvlist = []
-        for kvpair in kvpairs:
-            key, val = kvpair.split('=')
-            key = urllib.unquote(key)
-            val = urllib.unquote(val)
-            key = convert_string(key, enc_from, enc_to)
-            val = convert_string(val, enc_from, enc_to)
-            key = urllib.quote(key)
-            val = urllib.quote(val)
-            kvlist.append("%s=%s" % (key,val))
-        fields[2] = '&'.join(kvlist)
-        line = '\t'.join(fields) + '\n'
-        file_to.write(line)
-
-    file_to.close()
-    file_from.close()
-    st=os.stat(fname_from)
-    os.utime(fname_to, (st.st_atime,st.st_mtime))
-
-origdir = 'data.pre-mig6'
-
-try:
-    os.rename('data', origdir)
-except OSError:
-    print "You need to be in the directory where your copy of the 'data' directory is located."
-    sys.exit(1)
-
-copy_dir(origdir, 'data')
-os.remove(opj('data','event-log')) # old format
-convert_eventlog(opj(origdir, 'event-log'), opj('data', 'event-log'), from_encoding, to_encoding)
-
-
--- a/MoinMoin/scripts/migration/12_to_13_mig07.py	Sat Mar 25 11:18:58 2006 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,85 +0,0 @@
-#!/usr/bin/env python
-"""
-    12_to_13.py - converting CRLF / LF style to the future standard
-    Use this to convert from 1.3 pre patch-275 to patch-275.
-    
-    Changes:
-    * use OS style for logs (== no change, same as it was)
-    * use CRLF for page files on any platform (text/* mandates it!) -
-      and we will use that MIME type soon.
-    * use LF only internally in moin, convert from/to CRLF early/late
-      where needed
-
-    @copyright: 2004 Thomas Waldmann
-    @license: GPL, see COPYING for details
-"""
-
-import os.path, sys, urllib
-
-# Insert THIS moin dir first into sys path, or you would run another
-# version of moin!
-sys.path.insert(0, '../../..')
-from MoinMoin import wikiutil
-
-from migutil import opj, listdir, copy_file, move_file, copy_dir
-
-def tocrlf(fni, fno):
-    """ rewrite a text file using CRLF for line endings, no matter what
-        it was before.
-    """
-    fi = open(fni, "rb")
-    data = fi.read()
-    fi.close()
-    data = data.replace("\r","")
-    lines = data.split("\n")
-    data = "\r\n".join(lines)
-    if data[-2:] != "\r\n":
-        data += "\r\n"
-    fo = open(fno, "wb")
-    fo.write(data)
-    fo.close()
-    st=os.stat(fni)
-    os.utime(fno, (st.st_atime,st.st_mtime))
-                
-def process_pagedirs(dir_from, dir_to):
-    pagelist = listdir(dir_from)
-    for pagename in pagelist:
-        pagedir_from = opj(dir_from, pagename)
-        pagedir_to = opj(dir_to, pagename)
-        
-        # first we copy all, even the stuff we convert later:
-        copy_dir(pagedir_from, pagedir_to)
-        
-        rev_from = opj(pagedir_from, 'revisions')
-        rev_to = opj(pagedir_to, 'revisions')
-        if os.path.exists(rev_from):
-            revlist = listdir(rev_from)
-            for rfile in revlist:
-                rev = int(rfile)
-                r_from = opj(rev_from, rfile)
-                r_to = opj(rev_to, rfile)
-                tocrlf(r_from, r_to)
-
-origdir = 'data.pre-mig7'
-
-try:
-    os.rename('data', origdir)
-except OSError:
-    print "You need to be in the directory where your copy of the 'data' directory is located."
-    sys.exit(1)
-
-os.makedirs(opj('data','pages'))
-
-process_pagedirs(opj(origdir, 'pages'), opj('data', 'pages'))
-
-copy_dir(opj(origdir, 'plugin'), opj('data', 'plugin'))
-
-copy_dir(opj(origdir, 'user'), opj('data', 'user'))
-
-copy_file(opj(origdir, 'edit-log'), opj('data', 'edit-log'))
-copy_file(opj(origdir, 'event-log'), opj('data', 'event-log'))
-
-copy_file(opj(origdir, 'intermap.txt'), opj('data', 'intermap.txt'))
-
-
-
--- a/MoinMoin/scripts/migration/12_to_13_mig08.py	Sat Mar 25 11:18:58 2006 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,218 +0,0 @@
-#!/usr/bin/env python
-"""
-    migration from moin 1.3 < patch-305 to moin 1.3 >= patch-305
-    Here we fix 2 errors that crept in by use of mig1(?) and mig5:
-    * the edit-log misses 1 field (missing TAB) on faked "missing editlog
-      entry" entries
-    * we accidently gave ATTNEW/DRW/DEL an incremented revno (although
-      attaching a file doesn't change page content and revision), so we need
-      to convert those entries to use revno == 99999999 and renumber the
-      normal entries so we have no missing numbers in between
-    * edit-log's action field sometimes was empty (default: SAVE)
-    
-    Steps for a successful migration:
-
-        1. Stop your wiki and make a backup of old data and code
-
-        2. Make a copy of the wiki's "data" directory to your working dir
-
-        3. Run this script from your working dir
-
-        4. If there was no error, you will find:
-            data.pre-mig8 - the script renames your data directory copy to that name
-            data - converted data dir
-
-        5. Verify conversion results (number of pages, size of logs, attachments,
-           number of backup copies) - everything should be reasonable before
-           you proceed.
-
-        6. Copy additional files from data.pre-mig8 to data (maybe intermaps, logs,
-           etc.). Be aware that the file contents AND file names of wiki content
-           may have changed, so DO NOT copy the files inside the cache/ directory,
-           let the wiki refill it.
-
-        7. Replace the data directory your wiki uses with the data directory
-           you created by previous steps. DO NOT simply copy the converted stuff
-           into the original or you will duplicate pages and create chaos!
-
-        8. Test it - if something has gone wrong, you still have your backup.
-
-
-    @copyright: 2004 Thomas Waldmann
-    @license: GPL, see COPYING for details
-"""
-
-
-import os.path, sys, urllib
-
-# Insert THIS moin dir first into sys path, or you would run another
-# version of moin!
-sys.path.insert(0, '../../..')
-from MoinMoin import wikiutil
-
-from migutil import opj, listdir, copy_file, move_file, copy_dir
-
-# info[pagename][timestamp_usecs] = [revno_new, [...]]
-# if revno_new is 99999999, we haven't assigned a new revno to this entry
-info = {}
-
-def gather_editlog(el_from, forcepagename=None):
-    """ this gathers everything that is in edit-log into internal
-        data structures, converting to the future format
-    """
-    if not os.path.exists(el_from): 
-        return
-    for l in open(el_from):
-        data = l.rstrip('\n').rstrip('\r').split('\t')
-        while len(data) < 9:
-            data.append('')
-        (timestampstr,revstr,action,pagename,ip,host,id,extra,comment) = data
-        
-        if forcepagename: # we use this for edit-log in pagedirs (for renamed pages!)
-            pagename = forcepagename
-
-        if not action: # FIX: sometimes action is empty ...
-            action = 'SAVE'
-
-        if action in ['ATTNEW','ATTDRW','ATTDEL',]:
-            revstr = '99999999' # FIXES revno
-            # use reserved value, ATT action doesn't create new rev of anything
-
-        if (comment == '' and extra == '' and id == 'missing editlog entry for this page version') or \
-           (extra == '' and id == '' and comment == 'missing editlog entry for this page version'):
-            # FIX omitted field bug on fake entries
-            comment = 'missing edit-log entry for this revision' # more precise
-            extra = ''
-            id = ''
-            
-        rev = int(revstr)
-        data = [timestampstr,rev,action,pagename,ip,host,id,extra,comment]
-        
-        entry = info.get(pagename, {})
-        timestamp = long(timestampstr) # must be long for py 2.2.x
-        entry[timestamp] = [99999999, data] # new revno, data
-        info[pagename] = entry
-        
-def gather_pagedirs(dir_from):
-    """ this gathers edit-log information from the pagedirs, just to make sure
-    """
-    pagedir = opj(dir_from, 'pages')
-    pagelist = listdir(pagedir)
-    for pagename in pagelist:
-        editlog_from = opj(pagedir, pagename, 'edit-log')
-        gather_editlog(editlog_from, pagename)
-
-
-def generate_pages(dir_from, dir_to):
-    revactions = ['SAVE','SAVENEW','SAVE/REVERT',] # these actions create revisions
-    for pn in info:
-        entry = info.get(pn, {})
-        tslist = entry.keys()
-        if tslist:
-            pagedir = opj(dir_to, 'pages', pn)
-            revdir = opj(pagedir, 'revisions')
-            os.makedirs(revdir)
-            editlog_file = opj(pagedir, 'edit-log')
-            f = open(editlog_file, 'w')
-            revnew = 0
-            tslist.sort()
-            for ts in tslist:
-                data = entry[ts][1]
-                datanew = data[:]
-                (timestamp,rev,action,pagename,ip,host,id,extra,comment) = data
-                revstr = '%08d' % rev
-                if action in revactions:
-                    revnew += 1
-                    revnewstr = '%08d' % revnew
-                    entry[ts][0] = revnew # remember what new revno we chose
-                else: # ATTNEW,ATTDRW,ATTDEL
-                    revnewstr = '99999999'
-                if action.endswith('/REVERT'):
-                    # replace the old revno with the correct new revno
-                    revertrevold = int(extra)
-                    revertrevnew = 0
-                    for ts2 in tslist:
-                        data2 = entry[ts2][1]
-                        (timestamp2,rev2,action2,pagename2,ip2,host2,id2,extra2,comment2) = data2
-                        if rev2 == revertrevold:
-                            revertrevnew = entry[ts2][0]
-                    datanew[7] = '%08d' % revertrevnew
-                    
-                datanew[1] = revnewstr
-                f.write('\t'.join(datanew)+'\n') # does make a CRLF on win32 in the file
-                
-                if action in revactions: # we DO have a page rev for this one
-                    file_from = opj(dir_from, 'pages', pn, 'revisions', revstr)
-                    file_to = opj(revdir, revnewstr)
-                    copy_file(file_from, file_to)
-            f.close()
-            
-            # check if page exists or is deleted in orig dir
-            pagedir_from = opj(dir_from, 'pages', pn)
-            revdir_from = opj(pagedir_from, 'revisions')
-            try:
-                curr_file_from = opj(pagedir_from, 'current')
-                currentfrom = open(curr_file_from).read().strip() # try to access it
-                page_exists = 1
-            except:
-                page_exists = 0
-                
-            # re-make correct DELETED status!
-            if page_exists:
-                curr_file = opj(pagedir, 'current')
-                f = open(curr_file, 'w')
-                f.write("%08d\n" % revnew) # we add a \n, so it is easier to hack in there manually
-                f.close()
-
-        att_from = opj(dir_from, 'pages', pn, 'attachments')
-        if os.path.exists(att_from):
-            att_to = opj(pagedir, 'attachments')
-            copy_dir(att_from, att_to)
-        
-
-def generate_editlog(dir_from, dir_to):
-    editlog = {}
-    for pagename in info:
-        entry = info.get(pagename, {})
-        for ts in entry:
-            file_from, data = entry[ts]
-            editlog[ts] = data
-    
-    tslist = editlog.keys()
-    tslist.sort()
-    
-    editlog_file = opj(dir_to, 'edit-log')
-    f = open(editlog_file, 'w')
-    for ts in tslist:
-        datatmp = editlog[ts][:]
-        rev = datatmp[1]
-        datatmp[1] = '%08d' % rev
-        f.write('\t'.join(datatmp)+'\n')
-    f.close()
-
-        
-origdir = 'data.pre-mig8'
-
-# Backup original dir and create new empty dir
-try:
-    os.rename('data', origdir)
-    os.mkdir('data')
-except OSError:
-    print "You need to be in the directory where your copy of the 'data' directory is located."
-    sys.exit(1)
-
-#gather_editlog(opj(origdir, 'edit-log'))
-gather_pagedirs(origdir)
-
-generate_editlog(origdir, 'data')
-generate_pages(origdir, 'data')
-
-copy_dir(opj(origdir, 'plugin'), opj('data', 'plugin'))
-
-copy_dir(opj(origdir, 'user'), opj('data', 'user'))
-
-copy_file(opj(origdir, 'event-log'), opj('data', 'event-log'))
-
-copy_file(opj(origdir, 'intermap.txt'), opj('data', 'intermap.txt'))
-
-
--- a/MoinMoin/scripts/migration/12_to_13_mig09.py	Sat Mar 25 11:18:58 2006 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,159 +0,0 @@
-#!/usr/bin/env python
-"""
-Migration from moin--main--1.3 pre patch-332 to post patch-332.
-
-In patch-332 we changed the format of page lists in user data file. They
-are now tab separated instead of comma separated, and page names are not
-quoted using file system quoting.
-
-You can run the script multiple times with no damage.
-
-
-Steps for a successful migration:
-
- 1. Stop your wiki
-
- 2. Make a backup of your wiki 'data' directory
-
-    WARNING: THIS SCRIPT MIGHT CORRUPT YOUR 'DATA' DIRECTORY. DON'T
-    COMPLAIN LATER, MAKE BACKUP NOW!
-
- 3. Move the wiki's 'data' directory to your working dir
-
- 4. Run this script from your working dir
-
- 5. If there was no error, you will find:
-    data.pre-mig9   - backup of original data directory
-    data            - converted data dir
-
- 6. Verify conversion results (number of pages, size of logs,
-    attachments, number of backup copies) - everything should be
-    reasonable before you proceed.
-
-    NOTE: THE CACHE DIRECTORY IS NOT COPIED - DO NOT COPY IT, IT WILL BE
-    CREATED AND FILLED BY THE WIKI AUTOMATICALLY.
-
- 7. Move the converted data directory into your wiki. Do not simply copy
-    the converted stuff into the original or you will duplicate pages
-    and create chaos!
-
- 8. Fix permissions on your data directory, see HelpOnInstalling.
-
- 9. Test it - if something has gone wrong, you still have your backup.
-
-
-@copyright: 2004 Thomas Waldmann
-@license: GPL, see COPYING for details
-"""
-
-import os, sys, codecs
-join = os.path.join
-
-# Insert THIS moin dir first into sys path, or you might run another
-# version of moin and get unpredicted results!
-sys.path.insert(0, '../../..')
-
-from MoinMoin import wikiutil, user
-from MoinMoin.scripts.migration import migutil
-
-
-def convert_quicklinks(string):
-    """ Convert quicklinks from pre patch-332 to new format """
-    # No need to convert new style list
-    if '\t' in string:
-        return string
-        
-    names = [name.strip() for name in string.split(',')]
-    names = [wikiutil.unquoteWikiname(name) for name in names if name != '']
-    string = user.encodeList(names)
-    return string
-
-
-def convert_subscribed_pages(string):
-    """ Convert subscribed pages from pre patch-332 to new format """
-    # No need to convert new style list
-    if '\t' in string:
-        return string
-
-    # This might break pages that contain ',' in the name, we can't do
-    # anything about it. This was the reason we changed the format.
-    names = [name.strip() for name in string.split(',')]
-    string = user.encodeList(names)
-    return string
-
-    
-def convertUserData(text):
-    """ Convert user data
-
-    @param text: text of user file, unicode
-    @rtype: unicode
-    @return: convected user data
-    """
-    lines = text.splitlines()
-    for i in range(len(lines)):
-        line = lines[i]
-        try:
-            key, value = line.split('=', 1)
-        except ValueError:
-            continue
-        if key == u'quicklinks':
-            value = convert_quicklinks(value)
-        elif key == u'subscribed_pages':
-            value = convert_subscribed_pages(value)
-        lines[i] = u'%s=%s' % (key, value)
-
-    # Join back, append newline to last line
-    text = u'\n'.join(lines) + u'\n'
-    return text
-        
-
-def convertUsers(srcdir, dstdir):
-    """ Convert users files
-
-    @param srcdir: old users dir
-    @param dstdir: new users dir
-    """
-    charset = 'utf-8'
-    
-    # Create dstdir
-    if not os.path.exists(dstdir):
-        try:
-            os.mkdir(dstdir)
-        except OSError:
-            migutil.fatalError("can't create user directory at '%s'" % dstdir)
-
-    if not os.path.isdir(srcdir):
-        migutil.fatalError("can't find user directory at '%s'" % srcdir)
-
-    for name in migutil.listdir(srcdir):
-        if name == 'README' or name.endswith('.trail'):
-            # Copy as is
-            migutil.copy_file(join(srcdir, name), join(dstdir, name))
-        else:
-            srcfile = join(srcdir, name)
-            f = codecs.open(srcfile, 'rb', charset)
-            text = f.read()
-            f.close()
-            text = convertUserData(text)
-            dstfile = join(dstdir, name)
-            f = codecs.open(dstfile, 'wb', charset)
-            f.write(text)
-            f.close()
-            print "Converted '%s' to '%s'" % (srcfile, dstfile)
-
-
-if __name__ == '__main__':
-          
-    # Backup original dir
-    datadir = 'data'
-    origdir = 'data.pre-mig9'
-    migutil.backup(datadir, origdir)
-
-    # Copy ALL stuff from original dir into new data dir. Don't change
-    # or drop anything from the original directory expect cache files.
-    names = ['edit-log', 'event-log', 'intermap.txt', 'pages', 'plugin']
-    migutil.copy(names, origdir, datadir)
-
-    # Convert user directory
-    convertUsers(join(origdir, 'user'), join(datadir, 'user'))
-
--- a/MoinMoin/scripts/migration/12_to_13_mig10.py	Sat Mar 25 11:18:58 2006 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,94 +0,0 @@
-#!/usr/bin/env python
-"""
-    migration from moin 1.3 < patch-xxx to moin 1.3 >= patch-xxx
-    We fix 2 issues here:
-    * we forgot to handle edit-lock files. We simply delete them now.
-    * we convert attachment names to utf-8
-    
-    Steps for a successful migration:
-
-        1. Stop your wiki and make a backup of old data and code
-
-        2. Make a copy of the wiki's "data" directory to your working dir
-
-        3. make sure that from_encoding and to_encoding matches your needs (see
-           beginning of script below and config.charset in moin_config.py) and
-           run python2.3 12_to_13_mig10.py from your working dir
-        
-        4. If there was no error, you will find:
-            data.pre-mig10 - the script renames your data directory copy to that name
-            data - converted data dir
-
-        5. Verify conversion results (number of pages, size of logs, attachments,
-           number of backup copies) - everything should be reasonable before
-           you proceed.
-
-        6. Copy additional files from data.pre-mig10 to data (maybe intermaps, logs,
-           etc.). Be aware that the file contents AND file names of wiki content
-           may have changed, so DO NOT copy the files inside the cache/ directory,
-           let the wiki refill it.
-
-        7. Replace the data directory your wiki uses with the data directory
-           you created by previous steps. DO NOT simply copy the converted stuff
-           into the original or you will duplicate pages and create chaos!
-
-        8. Test it - if something has gone wrong, you still have your backup.
-
-
-    @copyright: 2005 Thomas Waldmann
-    @license: GPL, see COPYING for details
-"""
-
-from_encoding = 'iso8859-1'
-#from_encoding = 'utf-8'
-
-to_encoding = 'utf-8'
-
-import os, os.path, sys, urllib
-
-# Insert THIS moin dir first into sys path, or you would run another
-# version of moin!
-sys.path.insert(0, '../../..')
-from MoinMoin import wikiutil
-
-from migutil import opj, listdir, copy_file, move_file, copy_dir
-
-def migrate(dir_to):
-    """ this removes edit-lock files from the pagedirs and
-        converts attachment filenames
-    """
-    pagesdir = opj(dir_to, 'pages')
-    pagelist = listdir(pagesdir)
-    for pagename in pagelist:
-        pagedir = opj(pagesdir, pagename)
-        editlock = opj(pagedir, 'edit-lock')
-        try:
-            os.remove(editlock)
-        except:
-            pass
-
-        attachdir = os.path.join(pagedir, 'attachments')
-        for root, dirs, files in os.walk(attachdir):
-            for f in  files:
-                try:
-                    f.decode(to_encoding)
-                except UnicodeDecodeError:
-                    fnew = f.decode(from_encoding).encode(to_encoding)
-                    os.rename(os.path.join(root,f), os.path.join(root, fnew))
-                    print 'renamed', f, '\n ->', fnew, ' in dir:', root
-
-        
-origdir = 'data.pre-mig10'
-destdir = 'data'
-
-# Backup original dir and create new empty dir
-try:
-    os.rename(destdir, origdir)
-except OSError:
-    print "You need to be in the directory where your copy of the 'data' directory is located."
-    sys.exit(1)
-
-copy_dir(origdir, destdir)
-migrate(destdir)
-
-
--- a/MoinMoin/scripts/migration/12_to_13_mig11.py	Sat Mar 25 11:18:58 2006 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,83 +0,0 @@
-#!/usr/bin/env python
-"""
-    migration from moin 1.2 to moin 1.3
-    For 1.3, the plugin module loader needs some __init__.py files.
-    Although we supply those files in the new "empty wiki template" in
-    wiki/data, many people forgot to update their plugin directories,
-    so we do that via this mig script now.
-    
-    Steps for a successful migration:
-
-        1. Stop your wiki and make a backup of old data and code
-
-        2. Make a copy of the wiki's "data" directory to your working dir
-
-        3. If there was no error, you will find:
-            data.pre-mig11 - the script renames your data directory copy to that name
-            data - converted data dir
-
-        4. Copy additional files from data.pre-mig11 to data (maybe intermaps, logs,
-           etc.). Be aware that the file contents AND file names of wiki content
-           may have changed, so DO NOT copy the files inside the cache/ directory,
-           let the wiki refill it.
-
-        5. Replace the data directory your wiki uses with the data directory
-           you created by previous steps. DO NOT simply copy the converted stuff
-           into the original or you will duplicate pages and create chaos!
-
-        6. Test it - if something has gone wrong, you still have your backup.
-
-
-    @copyright: 2005 Thomas Waldmann
-    @license: GPL, see COPYING for details
-"""
-
-
-import os.path, sys, urllib
-
-# Insert THIS moin dir first into sys path, or you would run another
-# version of moin!
-sys.path.insert(0, '../../..')
-from MoinMoin import wikiutil
-
-from migutil import opj, listdir, copy_file, move_file, copy_dir, makedir
-
-def migrate(destdir):
-    plugindir = opj(destdir, 'plugin')
-    makedir(plugindir)
-    fname = opj(plugindir, '__init__.py')
-    f = open(fname, 'w')
-    f.write('''\
-# *** Do not remove this! ***
-# Although being empty, the presence of this file is important for plugins
-# working correctly.
-''')
-    f.close()
-    for d in ['action', 'formatter', 'macro', 'parser', 'processor', 'theme', 'xmlrpc', ]:
-        thisdir = opj(plugindir, d)
-        makedir(thisdir)
-        fname = opj(thisdir, '__init__.py')
-        f = open(fname, 'w')
-        f.write('''\
-# -*- coding: iso-8859-1 -*-
-
-from MoinMoin.util import pysupport
-
-modules = pysupport.getPackageModules(__file__)
-''')
-        f.close()
-
-origdir = 'data.pre-mig11'
-destdir = 'data'
-
-# Backup original dir and create new empty dir
-try:
-    os.rename(destdir, origdir)
-except OSError:
-    print "You need to be in the directory where your copy of the 'data' directory is located."
-    sys.exit(1)
-
-copy_dir(origdir, destdir)
-migrate(destdir)
-
-
--- a/MoinMoin/scripts/migration/__init__.py	Sat Mar 25 11:18:58 2006 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,7 +0,0 @@
-# -*- coding: iso-8859-1 -*-
-"""
-    MoinMoin - migration scripts
-
-    @copyright: 2005 by Thomas Waldmann (MoinMoin:ThomasWaldmann)
-    @license: GNU GPL, see COPYING for details.
-"""
--- a/MoinMoin/scripts/migration/migutil.py	Sat Mar 25 11:18:58 2006 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,111 +0,0 @@
-# -*- coding: iso-8859-1 -*-
-"""
-    MoinMoin - utility functions used by the migration scripts
-
-    @copyright: 2005 by Thomas Waldmann (MoinMoin:ThomasWaldmann)
-    @license: GNU GPL, see COPYING for details.
-"""
-import os, sys, shutil
-
-opj = os.path.join # yes, I am lazy
-join = os.path.join
-
-
-def fatalError(msg):
-    """ Exit with error message on fatal errors """
-    print "Fatal error:", msg
-    print "Stoping"
-    sys.exit(1)
-
-
-def error(msg):
-    """ Report minor error and continue """
-    print "Error:", msg
-
-
-def backup(src, dst):
-    """ Create a backup of src directory in dst, create empty src
-
-    @param src: source
-    @param dst: destination
-    """
-    print "Create backup of '%s' in '%s'" % (src, dst)
-
-    if not os.path.isdir(src):
-        fatalError("can't find '%s'. You must run this script from the directory where '%s' is located." % src)
-
-    try:
-        os.rename(src, dst)
-    except OSError:
-        fatalError("can't rename '%s' to '%s'" % (src, dst))
-
-    try:
-        os.mkdir(src)
-    except OSError:
-        fatalError("can't create '%s'" % src)
-
-    
-def listdir(path):
-    """ Return list of files in path, filtering certain files """
-    names = [name for name in os.listdir(path)
-             if not name.startswith('.') and
-             not name.endswith('.pickle') and
-             name != 'CVS']
-    return names
-
-
-def makedir(newdir):
-    """ Create a directory, if it doesn't exist """
-    try:
-        os.mkdir(newdir)
-    except OSError:
-        pass
-
-def copy_dir(dir_from, dir_to):
-    """ Copy a complete directory """
-    print "%s/ -> %s/" % (dir_from, dir_to)
-    try:
-        shutil.copytree(dir_from, dir_to)
-    except:
-        error("can't copy '%s' to '%s'" % (dir_from, dir_to))
-
-
-def copy_file(fname_from, fname_to):
-    """ Copy a single file """
-    print "%s -> %s" % (fname_from, fname_to)
-    try:
-        data = open(fname_from).read()
-        open(fname_to, "w").write(data)
-        st=os.stat(fname_from)
-        os.utime(fname_to, (st.st_atime,st.st_mtime))
-    except:
-        error("can't copy '%s' to '%s'" % (fname_from, fname_to))
-
-
-def move_file(fname_from, fname_to):
-    """ Move a single file """
-    print "%s -> %s" % (fname_from, fname_to)
-    try:
-        os.rename(fname_from, fname_to)
-    except:
-        error("can't move '%s' to '%s'" % (fname_from, fname_to))
-
-
-def copy(items, srcdir, dstdir):
-    """ copy items from src dir into dst dir
-
-    @param items: list of items to copy
-    @param srcdir: source directory to copy items from
-    @param dstdir: destination directory to copy into
-    """
-    for item in items:
-        src = join(srcdir, item)
-        dst = join(dstdir, item)
-
-        # Copy directories
-        if os.path.isdir(src):
-            copy_dir(src, dst)
-        elif os.path.isfile(src):
-            copy_file(src, dst)
-        else:
-            error("can't find '%s'" % src)
--- a/MoinMoin/scripts/moin	Sat Mar 25 11:18:58 2006 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,18 +0,0 @@
-#!/usr/bin/env python
-"""
-    MoinMoin CLI interface - not working yet!
-
-"""
-
-import sys
-sys.path.insert(0, '/home/twaldmann/moincvs/moin--main')
-sys.path.insert(0, '/org/wiki')
-sys.path.insert(0, '/org/org.linuxwiki/cgi-bin')
-
-from MoinMoin.request import RequestCLI
-
-def run():
-    page = sys.argv[1]
-    req = RequestCLI(page)
-    req.run()
-
--- a/MoinMoin/scripts/moin_build_index.py	Sat Mar 25 11:18:58 2006 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,80 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: iso-8859-1 -*-
-"""
-MoinMoin - build lupy search engine's index
-
-You must run this script as owner of the wiki files, usually this is the
-web server user.
-
-@copyright: 2005 by Florian Festi, Nir Soffer
-@license: GNU GPL, see COPYING for details.
-"""
-
-import os
-
-# Insert the path to MoinMoin in the start of the path
-import sys
-sys.path.insert(0, os.path.join(os.path.dirname(sys.argv[0]), 
-                                os.pardir, os.pardir))
-
-from MoinMoin.scripts import _util
-from MoinMoin.request import RequestCLI
-from MoinMoin.lupy import Index
-
-
-class IndexScript(_util.Script):
-    """ General index script class """
-
-    def __init__(self):
-        _util.Script.__init__(self, __name__, "[options]")
-        self.parser.add_option(
-            "--config-dir", metavar="DIR", dest="config_dir",
-            help=("Path to the directory containing the wiki "
-                  "configuration files. [default: current directory]")
-        )
-        self.parser.add_option(
-            "--wiki-url", metavar="WIKIURL", dest="wiki_url",
-            help="URL of wiki e.g. localhost/mywiki/ [default: CLI]"
-        )
-        self.parser.add_option(
-            "--files", metavar="FILES", dest="file_list",
-            help="filename of file list, e.g. files.lst (one file per line)"
-        )
-        self.parser.add_option(
-            "--update", action="store_true", dest="update",
-            help="when given, update an existing index"
-        )
-    
-    def mainloop(self):
-        # Insert config dir or the current directory to the start of the path.
-        config_dir = self.options.config_dir
-        if config_dir and not os.path.isdir(config_dir):
-            _util.fatal("bad path given to --config-dir option")
-        sys.path.insert(0, os.path.abspath(config_dir or os.curdir))
-
-        # Create request 
-        if self.options.wiki_url:
-            self.request = RequestCLI(self.options.wiki_url)
-        else:
-            self.request = RequestCLI()
-
-        # Do we have additional files to index?
-        if self.options.file_list:
-            self.files = file(self.options.file_list)
-        else:
-            self.files = None
-
-        self.command()
-
-class BuildIndex(IndexScript):
-    def command(self):
-        Index(self.request).indexPages(self.files, self.options.update)
-        #Index(self.request).test(self.request)
-
-
-def run():
-    BuildIndex().run()
-
-if __name__ == "__main__":
-    run()
-
--- a/MoinMoin/scripts/moin_dump.py	Sat Mar 25 11:18:58 2006 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,211 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: iso-8859-1 -*-
-"""
-MoinMoin - Dump a MoinMoin wiki to static pages
-
-You must run this script as owner of the wiki files, usually this is the
-web server user.
-
-@copyright: 2002-2004 by Jrgen Hermann <jh@web.de>
-@copyright: 2005 Thomas Waldmann
-@license: GNU GPL, see COPYING for details.
-"""
-
-import sys, os, time, StringIO, codecs, shutil, re, errno
-
-# Insert the path to MoinMoin in the start of the path
-sys.path.insert(0, os.path.join(os.path.dirname(sys.argv[0]), 
-                                os.pardir, os.pardir))
-
-from MoinMoin import config, wikiutil, Page
-from MoinMoin.scripts import _util
-from MoinMoin.request import RequestCLI
-from MoinMoin.action import AttachFile
-
-url_prefix = "."
-HTML_SUFFIX = ".html"
-
-page_template = u'''<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
-<html>
-<head>
-<meta http-equiv="content-type" content="text/html; charset=%(charset)s">
-<title>%(pagename)s</title>
-<link rel="stylesheet" type="text/css" media="all" charset="utf-8" href="%(theme)s/css/common.css">
-<link rel="stylesheet" type="text/css" media="screen" charset="utf-8" href="%(theme)s/css/screen.css">
-<link rel="stylesheet" type="text/css" media="print" charset="utf-8" href="%(theme)s/css/print.css">
-</head>
-<body>
-<table>
-<tr>
-<td>
-%(logo_html)s
-</td>
-<td>
-%(navibar_html)s
-</td>
-</tr>
-</table>
-<hr>
-<div id="page">
-<h1 id="title">%(pagename)s</h1>
-%(pagehtml)s
-</div>
-<hr>
-%(timestamp)s
-</body>
-</html>
-'''
-
-def _attachment(request, pagename, filename, outputdir):
-    source_dir = AttachFile.getAttachDir(request, pagename)
-    source_file = os.path.join(source_dir, filename)
-    dest_dir = os.path.join(outputdir, "attachments", wikiutil.quoteWikinameFS(pagename))
-    dest_file = os.path.join(dest_dir, filename)
-    dest_url = "attachments/%s/%s" % (wikiutil.quoteWikinameFS(pagename), filename)
-    if os.access(source_file, os.R_OK):
-        if not os.access(dest_dir, os.F_OK):
-            try:
-                os.makedirs(dest_dir)
-            except:
-                _util.fatal("Cannot create attachment directory '%s'" % dest_dir)
-        elif not os.path.isdir(dest_dir):
-            _util.fatal("'%s' is not a directory" % dest_dir)
-
-        shutil.copyfile(source_file, dest_file)
-        _util.log('Writing "%s"...' % dest_url)
-        return dest_url
-    else:
-        return ""
-  
-
-class MoinDump(_util.Script):
-    
-    def __init__(self):
-        _util.Script.__init__(self, __name__, "[options] <target-directory>")
-        self.parser.add_option(
-            "--config-dir", metavar="DIR", dest="config_dir",
-            help=("Path to the directory containing the wiki "
-                  "configuration files. [default: current directory]")
-        )
-        self.parser.add_option(
-            "--wiki-url", metavar="WIKIURL", dest="wiki_url",
-            help="URL of wiki e.g. localhost/mywiki/ [default: CLI]"
-        )
-        self.parser.add_option(
-            "--page", metavar="NAME", dest="page",
-            help="Dump a single page (with possibly broken links)"
-        )
-
-    def mainloop(self):
-        """ moin-dump's main code. """
-
-        if len(sys.argv) == 1:
-            self.parser.print_help()
-            sys.exit(1)
-
-        # Prepare output directory
-        outputdir = self.args[0]
-        outputdir = os.path.abspath(outputdir)
-        try:
-            os.mkdir(outputdir)
-            _util.log("Created output directory '%s'!" % outputdir)
-        except OSError, err:
-            if err.errno != errno.EEXIST:
-                _util.fatal("Cannot create output directory '%s'!" % outputdir)
-
-        # Insert config dir or the current directory to the start of the path.
-        config_dir = self.options.config_dir
-        if config_dir and os.path.isfile(config_dir):
-            config_dir = os.path.dirname(config_dir)
-        if config_dir and not os.path.isdir(config_dir):
-            _util.fatal("bad path given to --config-dir option")
-        sys.path.insert(0, os.path.abspath(config_dir or os.curdir))
-
-        # Create request 
-        if self.options.wiki_url:
-            request = RequestCLI(self.options.wiki_url)
-        else:
-            request = RequestCLI()
-
-        # fix url_prefix so we get relative paths in output html
-        original_url_prefix = request.cfg.url_prefix
-        request.cfg.url_prefix = url_prefix
-
-        if self.options.page:
-            pages = [self.options.page]
-        else:
-            # Get all existing pages in the wiki
-            pages = request.rootpage.getPageList(user='')
-            pages.sort()
-
-        wikiutil.quoteWikinameURL = lambda pagename, qfn=wikiutil.quoteWikinameFS: (qfn(pagename) + HTML_SUFFIX)
-
-        AttachFile.getAttachUrl = lambda pagename, filename, request, addts=0, escaped=0: (_attachment(request, pagename, filename, outputdir))
-
-        errfile = os.path.join(outputdir, 'error.log')
-        errlog = open(errfile, 'w')
-        errcnt = 0
-
-        page_front_page = wikiutil.getSysPage(request, request.cfg.page_front_page).page_name
-        page_title_index = wikiutil.getSysPage(request, 'TitleIndex').page_name
-        page_word_index = wikiutil.getSysPage(request, 'WordIndex').page_name
-        
-        navibar_html = ''
-        for p in [page_front_page, page_title_index, page_word_index]:
-            navibar_html += '&nbsp;[<a href="%s">%s</a>]' % (wikiutil.quoteWikinameURL(p), wikiutil.escape(p))
-
-        for pagename in pages:
-            # we have the same name in URL and FS
-            file = wikiutil.quoteWikinameURL(pagename) 
-            _util.log('Writing "%s"...' % file)
-            try:
-                pagehtml = ''
-                page = Page.Page(request, pagename)
-                request.page = page
-                try:
-                    request.reset()
-                    pagehtml = request.redirectedOutput(page.send_page, request, count_hit=0, content_only=1)
-                except:
-                    errcnt = errcnt + 1
-                    print >>sys.stderr, "*** Caught exception while writing page!"
-                    print >>errlog, "~" * 78
-                    print >>errlog, file # page filename
-                    import traceback
-                    traceback.print_exc(None, errlog)
-            finally:
-                logo_html = re.sub(original_url_prefix + "/?", "", request.cfg.logo_string)
-                timestamp = time.strftime("%Y-%m-%d %H:%M")
-                filepath = os.path.join(outputdir, file)
-                fileout = codecs.open(filepath, 'w', config.charset)
-                fileout.write(page_template % {
-                    'charset': config.charset,
-                    'pagename': pagename,
-                    'pagehtml': pagehtml,
-                    'logo_html': logo_html,
-                    'navibar_html': navibar_html,
-                    'timestamp': timestamp,
-                    'theme': request.cfg.theme_default,
-                })
-                fileout.close()
-
-        # copy FrontPage to "index.html"
-        indexpage = page_front_page
-        if self.options.page:
-            indexpage = self.options.page
-        shutil.copyfile(
-            os.path.join(outputdir, wikiutil.quoteWikinameFS(indexpage) + HTML_SUFFIX),
-            os.path.join(outputdir, 'index' + HTML_SUFFIX)
-        )
-
-        errlog.close()
-        if errcnt:
-            print >>sys.stderr, "*** %d error(s) occurred, see '%s'!" % (errcnt, errfile)
-
-
-def run():
-    MoinDump().run()
-
-
-if __name__ == "__main__":
-    run()
-
--- a/MoinMoin/scripts/moin_optimize_index.py	Sat Mar 25 11:18:58 2006 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,48 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: iso-8859-1 -*-
-"""
-MoinMoin - optimize lupy search engine's index
-
-You must run this script as owner of the wiki files, usually this is the
-web server user.
-
-@copyright: 2005 by Florian Festi, Nir Soffer
-@license: GNU GPL, see COPYING for details.
-"""
-doit = 1
-import os
-
-# Insert the path to MoinMoin in the start of the path
-import sys
-
-sys.path.insert(0, os.path.join(os.path.dirname(sys.argv[0]), 
-                                os.pardir, os.pardir))
-
-if not doit:
-    print """
-Until the following bug is closed, we avoid running this script:
-
-http://moinmoin.wikiwikiweb.de/MoinMoinBugs/LupyOptimizeBreaksIndex
-
-If you like, help us finding the problem.
-
-Terminating now, doing NOTHING...
-"""
-    sys.exit(1)
-
-from MoinMoin.scripts.moin_build_index import IndexScript
-from MoinMoin.request import RequestCLI
-from MoinMoin.lupy import Index
-
-
-class OptimizeIndex(IndexScript):
-    def command(self):
-        Index(self.request).optimize()
-
-
-def run():
-    OptimizeIndex().run()
-
-if __name__ == "__main__":
-    run()
-
--- a/MoinMoin/scripts/packages/__init__.py	Sat Mar 25 11:18:58 2006 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,7 +0,0 @@
-# -*- coding: iso-8859-1 -*-
-"""
-    MoinMoin - create language packages
-
-    @copyright: 2005 by Thomas Waldmann (MoinMoin:ThomasWaldmann)
-    @license: GNU GPL, see COPYING for details.
-"""
--- a/MoinMoin/scripts/packages/create_pagepacks.py	Sat Mar 25 11:18:58 2006 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,202 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: iso-8859-1 -*-
-"""
-    MoinMoin - Package Generator
-
-    @copyright: 2005 by Alexander Schremmer
-    @license: GNU GPL, see COPYING for details.
-"""
-
-import os, sys
-import zipfile
-import threading
-import xmlrpclib
-from sets import Set
-from datetime import datetime
-from time import sleep
-
-# your MoinMoin package path here
-sys.path.insert(0, r"../../..")
-sys.path.insert(0, r".")
-
-from MoinMoin import config, wikidicts, wikiutil
-from MoinMoin.Page import Page
-from MoinMoin.PageEditor import PageEditor
-from MoinMoin.request import RequestCLI
-from MoinMoin.packages import packLine, unpackLine, MOIN_PACKAGE_FILE
-
-master_url ="http://moinmaster.wikiwikiweb.de/?action=xmlrpc2"
-
-EXTRA = u'extra'
-NODIST = u'nodist'
-ALL = u'all_languages'
-COMPRESSION_LEVEL = zipfile.ZIP_STORED
-
-def buildPageSets():
-    """ Calculates which pages should go into which package. """
-    pageSets = {}
-
-    #allPages = Set(xmlrpclib.ServerProxy(master_url).getAllPages())
-    allPages = Set(request.rootpage.getPageList())
-
-    systemPages = wikidicts.Group(request, "SystemPagesGroup").members()
-
-    for pagename in systemPages:
-        if pagename.endswith("Group"):
-            #print x + " -> " + repr(wikidicts.Group(request, x).members())
-            gd.addgroup(request, pagename)
-
-    langPages = Set()
-    for name, group in gd.dictdict.items():
-        group.expandgroups(gd)
-        groupPages = Set(group.members() + [name])
-        name = name.replace("SystemPagesIn", "").replace("Group", "")
-        pageSets[name] = groupPages
-        langPages |= groupPages
-
-    specialPages = Set(["SystemPagesGroup"])
-
-    masterNonSystemPages = allPages - langPages - specialPages
-
-    moinI18nPages = Set([x for x in masterNonSystemPages if x.startswith("MoinI18n")])
-    
-    nodistPages = moinI18nPages | Set(["InterWikiMap", ])
-
-    extraPages = masterNonSystemPages - nodistPages
-
-    pageSets[ALL] = langPages
-    
-    for name in pageSets.keys():
-        if name not in (u"English"):
-            pageSets[name] -= pageSets[u"English"]
-            pageSets[name] -= nodistPages
-
-    pageSets[EXTRA] = extraPages   # stuff that maybe should be in some language group
-    pageSets[NODIST] = nodistPages # we dont want to have them in dist archive
-    return pageSets
-
-def packagePages(pagelist, filename, function):
-    """ Puts pages from pagelist into filename and calls function on them on installation. """
-    try:
-        os.remove(filename)
-    except OSError:
-        pass
-    zf = zipfile.ZipFile(filename, "w", COMPRESSION_LEVEL)
-
-    cnt = 0
-    script = [packLine(['MoinMoinPackage', '1']),
-              ]
-
-    for pagename in pagelist:
-        pagename = pagename.strip()
-        page = Page(request, pagename)
-        if page.exists():
-            cnt += 1
-            script.append(packLine([function, str(cnt), pagename]))
-            timestamp = wikiutil.version2timestamp(page.mtime_usecs())
-            zi = zipfile.ZipInfo(filename=str(cnt), date_time=datetime.fromtimestamp(timestamp).timetuple()[:6])
-            zi.compress_type = COMPRESSION_LEVEL
-            zf.writestr(zi, page.get_raw_body().encode("utf-8"))
-        else:
-            #print >>sys.stderr, "Could not find the page %s." % pagename.encode("utf-8")
-            pass
-
-    script += [packLine(['Print', 'Installed MoinMaster page bundle %s.' % os.path.basename(filename)])]
-
-    zf.writestr(MOIN_PACKAGE_FILE, u"\n".join(script).encode("utf-8"))
-    zf.close()
-
-def removePages(pagelist):
-    """ Pages from pagelist get removed from the underlay directory. """
-    import shutil
-    for pagename in pagelist:
-        pagename = pagename.strip()
-        page = Page(request, pagename)
-        try:
-            underlay, path = page.getPageBasePath(-1)
-            shutil.rmtree(path)
-        except:
-            pass
-
-def packageCompoundInstaller(bundledict, filename):
-    """ Creates a package which installs all other packages. """
-    try:
-        os.remove(filename)
-    except OSError:
-        pass
-    zf = zipfile.ZipFile(filename, "w", COMPRESSION_LEVEL)
-
-    script = [packLine(['MoinMoinPackage', '1']),
-              ]
-
-    script += [packLine(["InstallPackage", "SystemPagesSetup", name + ".zip"])
-               for name in bundledict.keys() if name not in (NODIST, EXTRA, ALL, u"English")]
-    script += [packLine(['Print', 'Installed all MoinMaster page bundles.'])]
-
-    zf.writestr(MOIN_PACKAGE_FILE, u"\n".join(script).encode("utf-8"))
-    zf.close()
-
-def getMasterPages():
-    """ Leechezzz. """
-    master = xmlrpclib.ServerProxy(master_url)
-    maxThreads = 100
-
-    def downloadpage(wiki, pagename):
-        source = wiki.getPage(pagename)
-        if source.find("##master-page:FrontPage") != -1:
-            source += u"""\n\n||<tablestyle="background: lightyellow; width:100%; text-align:center">[[en]] If you want to add help pages in your favorite language, see '''SystemPagesSetup'''.||\n"""
-
-        PageEditor(request, pagename, uid_override="Fetching ...")._write_file(source)
-        #print "Fetched " + pagename.encode("utf-8")
-
-    stopped = []
-    running = []
-
-    print "Loading master page list ..."
-    pagelist = master.getAllPages()
-    print "Preparing threads ..."
-    for pagename in pagelist:
-        t = threading.Thread(target=downloadpage, args=(master, pagename), name=pagename.encode("unicode_escape"))
-        stopped.append(t)
-
-    print "Starting scheduler ..."
-    while len(running) > 0 or len(stopped) != 0:
-        for x in running:
-            if not x.isAlive():
-                #print "Found dead thread " + repr(x)
-                running.remove(x)
-        print "running %i| stopped %i" % (len(running), len(stopped))
-        for i in xrange(min(maxThreads - len(running), len(stopped))):
-            t = stopped.pop()
-            running.append(t)
-            t.start()
-            #print "Scheduled %s." % repr(t)
-        sleep(1)
-
-def run():
-    request = RequestCLI(url='localhost/')
-    request.form = request.args = request.setup_args()
-
-    gd = wikidicts.GroupDict(request)
-    gd.reset()
-
-    #getMasterPages()
-    print "Building page sets ..."
-    pageSets = buildPageSets()
-
-    print "Creating packages ..."
-    generate_filename = lambda name: os.path.join('testwiki', 'underlay', 'pages', 'SystemPagesSetup', 'attachments', '%s.zip' % name)
-
-    packageCompoundInstaller(pageSets, generate_filename(ALL))
-
-    [packagePages(list(pages), generate_filename(name), "ReplaceUnderlay") 
-        for name, pages in pageSets.items() if not name in (u'English', ALL, NODIST)]
-
-    [removePages(list(pages)) 
-        for name, pages in pageSets.items() if not name in (u'English', ALL)]
-
-    print "Finished."
-
-if __name__ == "__main__":
-    run()
-
--- a/MoinMoin/scripts/pagescleaner.py	Sat Mar 25 11:18:58 2006 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,64 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: iso-8859-1 -*-
-"""
-    MoinMoin - display unused or trash page directories in data/pages
-    
-    Usage:
-    First change the base path to match your needs.
-    Then do ./pagescleaner.py >cleanthem.sh
-    Then please review cleanthem.sh and run it, if it is OK.
-
-    @copyright: 2005 by Thomas Waldmann (MoinMoin:ThomasWaldmann)
-    @license: GNU GPL, see COPYING for details.
-"""
-
-import os
-
-base = "."
-pagebasedir = base + "/data/pages"
-
-def qualify(p):
-    dir = os.listdir(p)
-    if not dir:
-        return 'empty'
-
-    # check if we have something of potential value
-    revs = []
-    if 'revisions' in dir:
-        revs = os.listdir(os.path.join(p, 'revisions'))
-    atts = []
-    if 'attachments' in dir:
-        atts = os.listdir(os.path.join(p, 'attachments'))
-
-    if not revs and not atts:
-        return 'trash'
-    
-    if 'current-locked' in dir:
-        return 'current-locked'
-    elif 'current' in dir:
-        try:
-            current = open(os.path.join(p, 'current')).read().strip()
-            curr = int(current)
-        except:
-            return 'current damaged'
-        if current not in revs:
-            return 'deleted'
-    else:
-        return 'no current'
-
-    return 'ok'
-
-def run():
-    for p in os.listdir(pagebasedir):
-        pagedir = os.path.join(pagebasedir, p)
-        status = qualify(pagedir)
-        if status in ['trash', 'empty', ]:
-            print "mv '%s' trash # %s" % (pagedir,status)
-        elif status in ['deleted', ]:
-            print "mv '%s' deleted # %s" % (pagedir,status)
-        else:
-            print "# %s: '%s'" % (status, pagedir)
-
-if __name__ == "__main__":
-    run()
-
--- a/MoinMoin/scripts/print_stats.py	Sat Mar 25 11:18:58 2006 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,38 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: iso-8859-1 -*-
-"""
-    MoinMoin - Print statistics gathered by hotshot profiler
-
-    Usage:
-        print_stats.py statsfile
-    
-    Typical usage:
-     1. Edit moin.py and activate the hotshot profiler, set profile file name
-     2. Run moin.py
-     3. Do some request, with a browser, script or ab
-     4. Stop moin.py
-     5. Run this tool: print_stats.py moin.prof
-
-    Currently CGI and twisted also have a hotshot profiler integration.
-    
-    @copyright: 2005 by Thomas Waldmann (MoinMoin:ThomasWaldmann)
-    @license: GNU GPL, see COPYING for details.
-"""
-def run():
-    import sys
-    from hotshot import stats
-
-    if len(sys.argv) != 2:
-        print __doc__
-        sys.exit()
-        
-    # Load and print stats 
-    s = stats.load(sys.argv[1])
-    s.strip_dirs()
-    s.sort_stats('cumulative', 'time', 'calls')
-    s.print_stats(40)
-    s.print_callers(40)
-
-if __name__ == "__main__":
-    run()
-
--- a/MoinMoin/scripts/reducewiki/__init__.py	Sat Mar 25 11:18:58 2006 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,7 +0,0 @@
-# -*- coding: iso-8859-1 -*-
-"""
-    MoinMoin - reduce a wiki to the latest revision of each page
-
-    @copyright: 2005 by Thomas Waldmann (MoinMoin:ThomasWaldmann)
-    @license: GNU GPL, see COPYING for details.
-"""
--- a/MoinMoin/scripts/reducewiki/reducewiki.py	Sat Mar 25 11:18:58 2006 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,91 +0,0 @@
-#!/usr/bin/env python
-"""
-    Use this script to reduce a data/ directory to the latest page revision of
-    each non-deleted page (plus all attachments).
-    
-    This is used to make the distributed underlay directory, but can also be
-    used for other purposes.
-    
-    So we change like this:      
-        * data/pages/PageName/revisions/{1,2,3,4}
-          -> data/pages/revisions/1
-        * data/pages/PageName/current (pointing to e.g. 4)
-          -> same (pointing to 1)
-        * data/pages/PageName/edit-log and data/edit-log
-          -> do not copy
-        * data/pages/PageName/attachments/*
-          -> just copy
-
-    Steps for a successful conversion:
-
-        1. Stop your wiki and make a backup of old data and code
-
-        2. Make a copy of the wiki's "data" directory to your working dir
-
-        3. Run this script from your working dir
-
-        4. If there was no error, you will find:
-            data.pre-reduce - the script renames your data directory copy to that name
-            data - reduced data dir
-
-        5. Verify conversion results (number of pages, ...)
-
-        6. Test it - if something has gone wrong, you still have your backup.
-
-
-    @copyright: 2005 Thomas Waldmann
-    @license: GPL, see COPYING for details
-"""
-
-url = 'moinmaster.wikiwikiweb.de/'
-destdir = 'underlay'
-
-import sys
-sys.path.insert(0, '/org/de.wikiwikiweb.moinmaster/bin15') # farmconfig/wikiconfig location
-sys.path.insert(0, '../../..')
-
-import os, os.path, shutil, codecs
-from MoinMoin import config
-from MoinMoin import wikiutil
-from MoinMoin.request import RequestCLI
-from MoinMoin.Page import Page
-from MoinMoin.PageEditor import PageEditor
-from MoinMoin.action import AttachFile
-
-def copypage(request, rootdir, pagename):
-    """quick and dirty!"""
-    pagedir = os.path.join(rootdir, 'pages', wikiutil.quoteWikinameFS(pagename))
-    os.makedirs(pagedir)
-    
-    revstr = '%08d' % 1
-    cf = os.path.join(pagedir, 'current')
-    open(cf, 'w').write(revstr+'\n')
-    
-    revdir = os.path.join(pagedir, 'revisions')
-    os.makedirs(revdir)
-    tf = os.path.join(revdir, revstr)
-    p = Page(request, pagename)
-    text = p.get_raw_body().replace("\n","\r\n")
-    codecs.open(tf, 'wb', config.charset).write(text)
-
-    source_dir = AttachFile.getAttachDir(request, pagename)
-    if os.path.exists(source_dir):
-        dest_dir = os.path.join(pagedir, "attachments")
-        os.makedirs(dest_dir)
-        for filename in os.listdir(source_dir):
-            source_file = os.path.join(source_dir, filename)
-            dest_file = os.path.join(dest_dir, filename)
-            shutil.copyfile(source_file, dest_file)
-
-def run():
-    request = RequestCLI(url=url)
-    request.form = request.args = request.setup_args()
-
-    pagelist = list(request.rootpage.getPageList(user=''))
-    for pagename in pagelist:
-        copypage(request, destdir, pagename)
-        
-
-if __name__ == "__main__":
-    run()
-
--- a/MoinMoin/scripts/repair_language.py	Sat Mar 25 11:18:58 2006 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,167 +0,0 @@
-#!/usr/bin/env python
-""" repair-language - repair page language setting.
-
-Usage:
-
-    repair-language option
-
-Options:
-
-    verify - verify pages, does not change anything, print page revision
-        that should be repaired.
-
-    repair - repair all page revisions.
-
-Step by step instructions:
-
- 1. Stop your wiki.
-
- 2. Make a backup of 'data' directory.
-
- 3. Run this script from your wiki data directory, where your pages
-    directory lives.
-
- 4. Fix permissions on the data directory, as explained in HelpOnInstalling.
-
- 5. Verify that pages are fine after repair, if you find a problem,
-    restore your data directory from backup.
-
-Why run this script?
-
-    In patch-325 a new #language processing instruction has been added.
-    Pages that specify the language with it are displayed using correct
-    direction, even if language_default use different direction.
-
-    In the past, pages used to have ##language:xx comment. This comment
-    has no effect, and should be replaced with newer #language xx
-    processing instruction.
-
-    This script replace ##language:xx to #language xx  in page headers.
-    It convert all page revisions, so you can safely revert back to old
-    revision and get correct page direction.
-
-    You can run the script multiple times if needed.
-
-@copyright: 2004 Nir Soffer <nirs AT freeshell DOT org>
-@license: GPL, see COPYING for details
-"""
-
-import codecs
-import os, sys
-
-# Insert THIS moin dir first into sys path, or you would run another
-# version of moin!
-sys.path.insert(0, '../..')
-
-from MoinMoin import i18n
-valid_languages = i18n.wikiLanguages()
-
-
-def listdir(path):
-    """ Return list of files in path, filtering certain files """
-    names = [name for name in os.listdir(path)
-             if not name.startswith('.') and
-             not name.endswith('.pickle') and
-             name != 'CVS']
-    return names
-
-
-def repairText(text):
-    """ Repair page text
-
-    We change only this type of lines that currently are in moinmaster
-    ##language:\s*xx
-
-    Warning: will not repair the language if there is more text on the
-    same line, e.g. ##language:fr make it french!
-
-    @param text: the page text, unicode
-    @rtype: 2 tuple, (unicode, int)
-    @return: text after replacement, lines changed
-    """
-    lineend = u'\r\n'
-    needle = u'##language:'
-    changed = 0
-
-    # Get text lines
-    lines = text.splitlines()
-    
-    # Look in page header
-    for i in range(len(lines)):
-        line = lines[i]
-        if not line.startswith(u'#'):
-            break # end of header
-        
-        if line.startswith(needle):
-            # Get language from rest of line
-            lang = line[len(needle):].strip()
-            # Normalize language names. Language files are named xx_yy,
-            # but iso names use xx-yy. This can confuse people.
-            lang = lang.replace(u"_", u"-")
-                
-            # Validate lang, make new style language processing
-            # instruction.
-            if lang in valid_languages:
-                line = u'#language %s' % lang
-                lines[i] = line
-                changed += 1
-
-    if changed:
-        # Join lines back, make sure there is trailing line end
-        text = lineend.join(lines) + lineend
-    return text, changed
-
-
-def processPages(path, repair):
-    """ Process page directory
-    
-    @param repair: repair or just test
-    """
-    charset = 'utf-8'
-    
-    pages = [p for p in listdir(path) if os.path.isdir(os.path.join(path, p))]
-    for page in pages:
-        revdir = os.path.join(path, page, 'revisions')
-        if not os.path.isdir(revdir):
-            print 'Error: %s: missing revisions directory' % page
-            continue
-        
-        for rev in listdir(revdir):
-            revpath = os.path.join(revdir, rev)
-            # Open file, read text
-            f = codecs.open(revpath, 'rb', charset)
-            text = f.read()
-            f.close()
-            text, changed = repairText(text)
-
-            if changed and repair:
-                # Save converted text
-                f = codecs.open(revpath, 'wb', charset)
-                f.write(text)
-                f.close()
-                print 'Repaired %s revision %s' % (page, rev)
-            elif changed:
-                print 'Should repair %s revision %s' % (page, rev)
-
-
-if __name__ == '__main__':
-
-    # Check for pages directory in current directory
-    path = os.path.abspath('pages')
-    if not os.path.isdir(path):
-        print "Error: could not find 'pages' directory"
-        print 'Run this script from your wiki data directory'
-        print __doc__
-        sys.exit(1)   
-    
-    options = {'verify': 0, 'repair': 1,}
-    
-    if len(sys.argv) != 2 or sys.argv[1] not in options:
-        print __doc__
-        sys.exit(1)
-
-    processPages(path, repair=options[sys.argv[1]])
-    
-
-
-
--- a/MoinMoin/scripts/unicode/__init__.py	Sat Mar 25 11:18:58 2006 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,7 +0,0 @@
-# -*- coding: iso-8859-1 -*-
-"""
-    MoinMoin - build unicode char tables
-
-    @copyright: 2005 by Thomas Waldmann (MoinMoin:ThomasWaldmann)
-    @license: GNU GPL, see COPYING for details.
-"""
--- a/MoinMoin/scripts/unicode/mk_chartypes.py	Sat Mar 25 11:18:58 2006 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,39 +0,0 @@
-"""
-    Build MoinMoin/util/chartypes.py with
-    UCS-2 character types (upper/lower/digits/spaces).
-    
-    @copyright: 2004 Thomas Waldmann
-    @license: GNU GPL, see COPYING for details
-"""
-
-uppercase = []
-lowercase = []
-digits = []
-space = []
-for code in range(1,65535):
-    c = unichr(code)
-    str = "\\u%04x" % code
-    if c.isupper():
-        uppercase.append(str)
-    elif c.islower():
-        lowercase.append(str)
-    elif c.isdigit():
-        digits.append(str)
-    elif c.isspace():
-        space.append(str)
-
-chars_upper = u''.join(uppercase)
-chars_lower = u''.join(lowercase+digits)
-chars_digits = u''.join(digits)
-chars_spaces = u''.join(space)
-
-print """
-_chartypes = {
-    'chars_upper': u"%(chars_upper)s",
-    'chars_lower': u"%(chars_lower)s",
-    'chars_digits': u"%(chars_digits)s",
-    'chars_spaces': u"%(chars_spaces)s",
-}
-
-""" % globals()
-
--- a/MoinMoin/scripts/xmlrpc-tools/.cvsignore	Sat Mar 25 11:18:58 2006 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,3 +0,0 @@
-{arch}
-.arch-ids
-
--- a/MoinMoin/scripts/xmlrpc-tools/HelloWorld.py	Sat Mar 25 11:18:58 2006 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,17 +0,0 @@
-#!/usr/bin/env python
-"""
-This script is a sample for xmlrpc calls.
-
-It calls the HelloWorld.py xmlrpc plugin.
-
-GPL software, 2003-08-10 Thomas Waldmann
-"""
-
-def run():
-    import xmlrpclib
-    srcwiki = xmlrpclib.ServerProxy("http://moinmaster.wikiwikiweb.de:8000/?action=xmlrpc2")
-    print srcwiki.HelloWorld("Hello Wiki User!\n")
-
-if __name__ == "__main__":
-    run()
-
--- a/MoinMoin/scripts/xmlrpc-tools/UpdateGroupTest.py	Sat Mar 25 11:18:58 2006 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,46 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-"""
-This script is just an example how to update a group definition page using xmlrpc.
-
-GPL software, 2005 Thomas Waldmann
-"""
-def run():
-    import sys
-    sys.path.insert(0, '../../..')
-
-    import xmlrpclib
-    from MoinMoin.support.BasicAuthTransport import BasicAuthTransport
-
-    user = "XmlRpc"
-    password = "wrong"
-    dsttrans = BasicAuthTransport(user, password)
-    mywiki = xmlrpclib.ServerProxy("http://enterprise.wikiwikiweb.de:8888/?action=xmlrpc2", transport=dsttrans)
-
-    groupname = "TestGroup"
-    groupdesc = "This is just a test."
-    groupmembers = ["TestUser1", "TestUser2",]
-    print mywiki.UpdateGroup(groupname, groupdesc, groupmembers)
-
-    groupname = "TestAclGroup"
-    groupdesc = "This is just a test."
-    groupmembers = ["TestUser3",]
-    print mywiki.UpdateGroup(groupname, groupdesc, groupmembers, "All:read,write,delete,revert")
-
-    del mywiki
-    del dsttrans
-
-    user = "XmlRpc"
-    password = "completelywrong"
-    dsttrans = BasicAuthTransport(user, password)
-    mywiki = xmlrpclib.ServerProxy("http://enterprise.wikiwikiweb.de:8888/?action=xmlrpc2", transport=dsttrans)
-
-    groupname = "TestGroup"
-    groupdesc = "This is just a test."
-    groupmembers = ["WrongUser1", "WrongUser2",]
-    print mywiki.UpdateGroup(groupname, groupdesc, groupmembers)
-
-
-if __name__ == "__main__":
-    run()
-
--- a/MoinMoin/scripts/xmlrpc-tools/WhoAmI.py	Sat Mar 25 11:18:58 2006 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,26 +0,0 @@
-#!/usr/bin/env python
-"""
-This script checks whether the wiki authenticates and trusts you.
-
-It calls the TrustMe.py xmlrpc plugin. To use http auth, you need to configure
-the srcwiki with auth = [http, moin_cookie] in its wikiconfig.
-
-GPL software, 2005 Thomas Waldmann
-"""
-
-def run():
-    user = "ThomasWaldmann"
-    password = "wrong"
-
-    import sys, xmlrpclib
-    sys.path.insert(0, '../../..')
-    from MoinMoin.support.BasicAuthTransport import BasicAuthTransport
-
-    srctrans = BasicAuthTransport(user, password)
-    srcwiki = xmlrpclib.ServerProxy("http://moinmaster.wikiwikiweb.de/?action=xmlrpc2", transport=srctrans)
-
-    print srcwiki.WhoAmI()
-
-if __name__ == "__main__":
-    run()
-
--- a/MoinMoin/scripts/xmlrpc-tools/__init__.py	Sat Mar 25 11:18:58 2006 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,8 +0,0 @@
-# -*- coding: iso-8859-1 -*-
-"""
-    MoinMoin - xmlrpc example Scripts
-
-    @copyright: 2004 by Thomas Waldmann
-    @license: GNU GPL, see COPYING for details.
-"""
-
--- a/MoinMoin/scripts/xmlrpc-tools/get_es_pages.py	Sat Mar 25 11:18:58 2006 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,32 +0,0 @@
-""" get some pages from another wiki """
-
-def run():
-    import sys, os, xmlrpclib, codecs
-
-    sys.path.insert(0, "..")
-    from MoinMoin import wikiutil
-
-    s = xmlrpclib.ServerProxy("http://wainu.ii.uned.es/wainuki/?action=xmlrpc2")
-    index = open("index")
-
-    for l in index:
-        d = l.split('||')
-        pn = d[3].strip()
-        pd = s.getPage(pn)
-        dn = wikiutil.quoteWikinameFS(pn.decode("utf-8"))
-        os.mkdir(dn)
-        cn = os.path.join(dn,'current')
-        f = open(cn,'w')
-        f.write('00000001\n')
-        f.close()
-        dn2 = os.path.join(dn, 'revisions')
-        os.mkdir(dn2)
-        fn = os.path.join(dn2,'00000001')
-        f = codecs.open(fn,"wb","utf-8")
-        pd = pd.replace('\n','\r\n')
-        f.write(pd)
-        f.close()
-
-if __name__ == "__main__":
-    run()
-
--- a/MoinMoin/scripts/xmlrpc-tools/getmasterpages2.py	Sat Mar 25 11:18:58 2006 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,48 +0,0 @@
-#!/usr/bin/env python
-"""
-This script is a hack because moinmaster wiki does not support
-xmlrpc due to unknown reasons. It gets all SystemPages from srcwiki
-via action=raw and stores them into dstwiki via xmlrpc.
-
-We use wiki rpc v2 here.
-
-GPL software, 2003-09-27 Thomas Waldmann
-"""
-
-import xmlrpclib, urllib
-from MoinMoin import wikiutil
-from MoinMoin.support.BasicAuthTransport import BasicAuthTransport
-
-srcurlformat = "http://moinmaster.wikiwikiweb.de/%s?action=raw"
-user = "YourWikiName"
-password = "yourbasicauthpassword"
-srcwiki = xmlrpclib.ServerProxy("http://moinmaster.wikiwikiweb.de/?action=xmlrpc2")
-dsttrans = BasicAuthTransport(user,password)
-dstwiki = xmlrpclib.ServerProxy("http://devel.linuxwiki.org/moin--main/__xmlrpc/?action=xmlrpc2", transport=dsttrans)
-
-def rawGetPage(srcurl, pagename, encoding='iso-8859-1'):
-    url = srcurl % wikiutil.quoteWikinameFS(pagename.encode(encoding))
-    pagedata = urllib.urlopen(url).read()
-    return unicode(pagedata, encoding).encode('utf-8')
-
-def transferpage(srcurlformat, dstwiki, pagename):
-    pagedata = srcwiki.getPage(pagename)
-    #pagedata = rawGetPage(srcurlformat, pagename, 'iso-8859-1')
-    rc = dstwiki.putPage(pagename, pagedata)
-    print "Transferred %s. Len = %d, rc = %s" % (pagename.encode('ascii','replace'), len(pagedata), str(rc))
-
-def run():
-    allsystempagesgroup = 'AllSystemPagesGroup'
-    transferpage(srcurlformat, dstwiki, allsystempagesgroup)
-    allgrouppages = dstwiki.listLinks(allsystempagesgroup)
-
-    for langgrouppage in allgrouppages:
-        pagename = langgrouppage['name']
-        transferpage(srcurlformat, dstwiki, pagename)
-        pages = dstwiki.listLinks(pagename)
-        for page in pages:
-            transferpage(srcurlformat, dstwiki, page['name'])
-
-if __name__ == "__main__":
-    run()
-
--- a/MoinMoin/scripts/xmlrpc-tools/getsystempages.py	Sat Mar 25 11:18:58 2006 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,35 +0,0 @@
-#!/usr/bin/env python
-"""
-This script gets all SystemPages from srcwiki via xmlrpc and
-stores them into dstwiki via xmlrpc. We use wiki rpc v1 here.
-
-*** DO NOT USE, SEE getsystempages2.py ***
-
-GPL software, 2003-08-10 Thomas Waldmann
-"""
-
-from xmlrpclib import *
-
-srcwiki = ServerProxy("http://moinmaster.wikiwikiweb.de/?action=xmlrpc")
-#srcwiki = ServerProxy("http://moinmaster.wikiwikiweb.de/?action=xmlrpc")
-dstwiki = ServerProxy("http://devel.linuxwiki.org/moin--cvs?action=xmlrpc")
-
-def transferpage(srcwiki, dstwiki, pagename):
-    pagedata = srcwiki.getPage(pagename).data
-    dstwiki.putPage(pagename, Binary(pagedata))
-    print "Transferred %s." % pagename
-
-def run():
-    allsystempagesgroup = 'AllSystemPagesGroup'
-    transferpage(srcwiki, dstwiki, allsystempagesgroup)
-    allgrouppages = srcwiki.listLinks(allsystempagesgroup)
-    for langgrouppage in allgrouppages:
-        pagename = langgrouppage['name']
-        transferpage(srcwiki, dstwiki, pagename)
-        pages = srcwiki.listLinks(pagename)
-        for page in pages:
-            transferpage(srcwiki, dstwiki, page['name'])
-
-if __name__ == "__main__":
-    run()
-
--- a/MoinMoin/scripts/xmlrpc-tools/getsystempages2.py	Sat Mar 25 11:18:58 2006 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,38 +0,0 @@
-#!/usr/bin/env python
-"""
-This script gets all SystemPages from srcwiki via xmlrpc and
-stores them into dstwiki via xmlrpc. We use wiki rpc v2 here.
-
-GPL software, 2003-08-10 Thomas Waldmann
-"""
-
-import xmlrpclib
-from MoinMoin.support.BasicAuthTransport import BasicAuthTransport
-
-#srcwiki = xmlrpclib.ServerProxy("http://moinmaster.wikiwikiweb.de/FrontPage?action=xmlrpc")
-user = "YourWikiName"
-password = "yourbasicauthpassword"
-srctrans = BasicAuthTransport(user,password)
-dsttrans = BasicAuthTransport(user,password)
-srcwiki = xmlrpclib.ServerProxy("http://devel.linuxwiki.org/moin--cvs/__xmlrpc/?action=xmlrpc2", transport=srctrans)
-dstwiki = xmlrpclib.ServerProxy("http://devel.linuxwiki.org/moin--cvs/__xmlrpc/?action=xmlrpc2", transport=dsttrans)
-
-def transferpage(srcwiki, dstwiki, pagename):
-    pagedata = srcwiki.getPage(pagename)
-    dstwiki.putPage(pagename, pagedata)
-    print "Transferred %s." % pagename.encode('ascii', 'replace')
-
-def run():
-    allsystempagesgroup = 'AllSystemPagesGroup'
-    transferpage(srcwiki, dstwiki, allsystempagesgroup)
-    allgrouppages = srcwiki.listLinks(allsystempagesgroup)
-    for langgrouppage in allgrouppages:
-        pagename = langgrouppage['name']
-        transferpage(srcwiki, dstwiki, pagename)
-        pages = srcwiki.listLinks(pagename)
-        for page in pages:
-            transferpage(srcwiki, dstwiki, page['name'])
-
-if __name__ == "__main__":
-    run()
-
--- a/MoinMoin/scripts/xmlrpc-tools/putPageTest.py	Sat Mar 25 11:18:58 2006 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,38 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-"""
-This script is just an example how to put data into a wiki using xmlrpc.
-We use wiki rpc v2 here.
-
-This script only works if you edited MoinMoin/wikirpc.py (see the comment
-in the putPage handler) to not require http auth (trusted user) and to
-really use the pagename we give.
-
-This can be done for migrating data into an offline moin wiki running on
-localhost - don't put a wiki configured like this on the internet!
-
-GPL software, 2005 Thomas Waldmann
-"""
-def run():
-    import xmlrpclib
-    mywiki = xmlrpclib.ServerProxy("http://localhost/mywiki/?action=xmlrpc2")
-
-    # first a simple test in pure ascii
-    pagename = "ApureAsciiPage"
-    pagedata = "My first test."
-    mywiki.putPage(pagename, pagedata)
-
-    # now let's use some utf-8 encoded pagename and text
-    # this stuff will only look correct if you use utf-8 enabled equipment.
-    pagename = "SomeUtf8Pagename-äöüÄÖÜߢ" # we use some german chars here
-    pagedata = "Some UTF-8 content: äöü ÄÖÜ ß ¢"
-    mywiki.putPage(pagename, pagedata)
-
-    # if you have data in iso-8859-1 (latin1) encoding, then use code similar to:
-    # pagename = latin1pagename.decode('iso-8859-1').encode('utf-8')
-    # pagedata = latin1pagedata.decode('iso-8859-1').encode('utf-8')
-    # mywiki.putPage(pagename, pagedata)
-
-if __name__ == "__main__":
-    run()
-
--- a/MoinMoin/scripts/xmlrpc-tools/wikibackup.py	Sat Mar 25 11:18:58 2006 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,52 +0,0 @@
-#!/usr/bin/env python
-"""
-This script gets all Pages from a wiki via xmlrpc and
-stores them into a backup file. We use wiki rpc v2 here.
-
-Important note:
-
-This script ONLY handles the current versions of the wiki pages.
-
-It does NOT handle:
-    * event or edit logs (page history)
-    * old versions of pages
-    * attachments
-    * user account data
-    * MoinMoin code or config running the wiki
-    
-So this is definitely NOT a complete backup.
-
-GPL software, 2003-08-10 Thomas Waldmann
-"""
-def run():
-    import xmlrpclib
-    from MoinMoin.support.BasicAuthTransport import BasicAuthTransport
-
-    #user = "username"
-    #password = "xxxxxxxx"
-    #srctrans = BasicAuthTransport(user,password)
-    #srcwiki = xmlrpclib.ServerProxy("http://devel.linuxwiki.org/moin--cvs/__xmlrpc/?action=xmlrpc2", transport=srctrans)
-    srcwiki = xmlrpclib.ServerProxy("http://devel.linuxwiki.org/moin--cvs/?action=xmlrpc2")
-
-    try:
-        import cPickle as pickle
-    except ImportError:
-        import pickle
-
-    # Set pickle protocol, see http://docs.python.org/lib/node64.html
-    PICKLE_PROTOCOL = pickle.HIGHEST_PROTOCOL
-
-    backup={}
-    allpages = srcwiki.getAllPages()
-    for pagename in allpages:
-        pagedata = srcwiki.getPage(pagename)
-        print "Got %s." % pagename
-        backup[pagename]=pagedata
-
-    backupfile = open("wikibackup.pickle","w")
-    pickle.dump(backup, backupfile, PICKLE_PROTOCOL)
-    backupfile.close()
-
-if __name__ == "__main__":
-    run()
-
--- a/MoinMoin/scripts/xmlrpc-tools/wikirestore.py	Sat Mar 25 11:18:58 2006 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,49 +0,0 @@
-#!/usr/bin/env python
-"""
-This script reads a wikibackup.pickle file and puts
-all Pages contained there into a wiki via xmlrpc.
-We use wiki rpc v2 here.
-
-Important note:
-
-This script ONLY handles the current versions of the wiki pages.
-
-It does NOT handle:
-    * event or edit logs (page history)
-    * old versions of pages
-    * attachments
-    * user account data
-    * MoinMoin code or config running the wiki
-    
-So this is definitely NOT a complete restore.
-
-GPL software, 2003-10-24 Thomas Waldmann
-"""
-def run():
-    import xmlrpclib
-    from MoinMoin.support.BasicAuthTransport import BasicAuthTransport
-
-    user = "ThomasWaldmann"
-    password = "xxxxxxxxxxxx"
-    dsttrans = BasicAuthTransport(user,password)
-    dstwiki = xmlrpclib.ServerProxy("http://devel.linuxwiki.org/moin--cvs/__xmlrpc/?action=xmlrpc2", transport=dsttrans)
-    #dstwiki = xmlrpclib.ServerProxy("http://devel.linuxwiki.org/moin--cvs/?action=xmlrpc2")
-
-    try:
-        import cPickle as pickle
-    except ImportError:
-        import pickle
-
-    backupfile = open("wikibackup.pickle","r")
-    backup = pickle.load(backupfile)
-    backupfile.close()
-
-    allpages = backup.keys()
-    for pagename in allpages:
-        pagedata = backup[pagename]
-        dstwiki.putPage(pagename, pagedata) # TODO: add error check
-        print "Put %s." % pagename
-
-if __name__ == "__main__":
-    run()
-
--- a/MoinMoin/userform.py	Sat Mar 25 11:18:58 2006 +0000
+++ b/MoinMoin/userform.py	Sat Mar 25 22:31:55 2006 +0000
@@ -138,7 +138,7 @@
                 return _("Please provide your email address. If you lose your"
                          " login information, you can get it by email.")
 
-            # Email should be unique - see also MoinMoin/scripts/moin_usercheck.py
+            # Email should be unique - see also MoinMoin/script/accounts/moin_usercheck.py
             if theuser.email and self.request.cfg.user_email_unique:
                 users = user.getUserList(self.request)
                 for uid in users:
@@ -230,7 +230,7 @@
                 return _("Please provide your email address. If you lose your"
                          " login information, you can get it by email.")
 
-            # Email should be unique - see also MoinMoin/scripts/moin_usercheck.py
+            # Email should be unique - see also MoinMoin/script/accounts/moin_usercheck.py
             if theuser.email and self.request.cfg.user_email_unique:
                 users = user.getUserList(self.request)
                 for uid in users:
--- a/MoinMoin/util/filesys.py	Sat Mar 25 11:18:58 2006 +0000
+++ b/MoinMoin/util/filesys.py	Sat Mar 25 22:31:55 2006 +0000
@@ -14,6 +14,13 @@
 ### Misc Helpers
 #############################################################################
 
+def chmod(name, mode, catchexception=True):
+    try:
+        return os.chmod(name, mode)
+    except OSError:
+        if not catchexception:
+            raise
+
 def makedirs(name, mode=0777):
     """ Super-mkdir; create a leaf directory and all intermediate ones.
     
--- a/MoinMoin/wikiutil.py	Sat Mar 25 11:18:58 2006 +0000
+++ b/MoinMoin/wikiutil.py	Sat Mar 25 22:31:55 2006 +0000
@@ -7,9 +7,10 @@
 """
     
 import os, re, difflib, urllib, cgi
+import codecs
 
 from MoinMoin import util, version, config
-from MoinMoin.util import pysupport
+from MoinMoin.util import pysupport, filesys
 
 # Exceptions
 class InvalidFileNameError(Exception):
@@ -37,7 +38,6 @@
     """
 
     import locale
-    import codecs
     cur_charset = locale.getdefaultlocale()[1]
     try:
         return unicode(text, 'utf-8')
@@ -393,7 +393,91 @@
         This must ONLY be used for display purposes.
     """
     return v/1000000.0
+
+
+# This is the list of meta attribute names to be treated as integers.
+# IMPORTANT: do not use any meta attribute names with "-" (or any other chars
+# invalid in python attribute names), use e.g. _ instead.
+INTEGER_METAS = ['current', 'revision', # for page storage (moin 2.0)
+                 'data_format_revision', # for data_dir format spec (use by mig scripts)
+                ]
+
+class MetaDict(dict):
+    """ store meta informations as a dict """
+    def __init__(self, metafilename):
+        """ create a MetaDict from metafilename """
+        dict.__init__(self)
+        self.metafilename = metafilename
+        self.dirty = False
+        self.loaded = False
+
+    def _get_meta(self):
+        """ get the meta dict from an arbitrary filename.
+            does not keep state, does uncached, direct disk access.
+            @param metafilename: the name of the file to read
+            @return: dict with all values or {} if empty or error
+        """
+        # XXX what does happen if the metafile is being written to in another process?
+        try:
+            metafile = codecs.open(self.metafilename, "r", "utf-8")
+            meta = metafile.read() # this is much faster than the file's line-by-line iterator
+            metafile.close()
+        except IOError:
+            meta = u''
+        for line in meta.splitlines():
+            key, value = line.split(':', 1)
+            value = value.strip()
+            if key in INTEGER_METAS:
+                value = int(value)
+            dict.__setitem__(self, key, value)
+        self.loaded = True
     
+    def _put_meta(self):
+        """ put the meta dict into an arbitrary filename.
+            does not keep or modify state, does uncached, direct disk access.
+            @param metafilename: the name of the file to write
+            @param metadata: dict of the data to write to the file
+        """
+        meta = []
+        for key, value in self.items():
+            if key in INTEGER_METAS:
+                value = str(value)
+            meta.append("%s: %s" % (key, value))
+        meta = '\n'.join(meta)
+        # XXX what does happen if the metafile is being read or written to in another process?
+        metafile = codecs.open(self.metafilename, "w", "utf-8")
+        metafile.write(meta)
+        metafile.close()
+        filesys.chmod(self.metafilename, 0666 & config.umask)
+        self.dirty = False
+
+    def sync(self, mtime_usecs=None):
+        """ sync the in-memory dict to disk (if dirty) """
+        if self.dirty:
+            if not mtime_usecs is None:
+                self.__setitem__('mtime', str(mtime_usecs))
+            self._put_meta()
+
+    def __getitem__(self, key):
+        try:
+            return dict.__getitem__(self, key)
+        except KeyError:
+            if not self.loaded:
+                self._get_meta() # lazy loading of metadata
+                return dict.__getitem__(self, key)
+            else:
+                raise
+
+    def __setitem__(self, key, value):
+        try:
+            oldvalue = dict.__getitem__(self, key)
+        except KeyError:
+            oldvalue = None
+        if value != oldvalue:
+            dict.__setitem__(self, key, value)
+            self.dirty = True
+
+
 #############################################################################
 ### InterWiki
 #############################################################################
@@ -719,7 +803,7 @@
         return importBuiltinPlugin(kind, name, function)
 
 
-def importWikiPlugin(cfg, kind, name, function):
+def importWikiPlugin(cfg, kind, name, function="execute"):
     """ Import plugin from the wiki data directory
     
     See importPlugin docstring.
@@ -730,7 +814,7 @@
     return importNameFromPlugin(moduleName, function)
 
 
-def importBuiltinPlugin(kind, name, function):
+def importBuiltinPlugin(kind, name, function="execute"):
     """ Import builtin plugin from MoinMoin package 
     
     See importPlugin docstring.
--- a/docs/CHANGES	Sat Mar 25 11:18:58 2006 +0000
+++ b/docs/CHANGES	Sat Mar 25 22:31:55 2006 +0000
@@ -36,6 +36,7 @@
 Version 1.5.current:
 
   New features:
+    * HINT: please read README.migration
     * Login and Logout are actions now, therefore you stay on the page where
       you were before.
     * UserPreferences is also an action now and moved from top border (in
@@ -69,8 +70,6 @@
     * Added ImageLink macro. Thanks to Jeff Kunce, Marcin Zalewski, Reimar
       Bauer and Chong-Dae Park for working on it.
     * Lupy stuff (still experimental, partly broken and disabled by default):
-      HINT: Please update your data/plugin/ directory with the "filter" directory
-            as you see in wiki/data/plugin/filter.
       * Attachment search using lupy (lupy_search = 1 in your config)
         Title search will also search attachment filenames.
         Full text search will also search attachment contents.
@@ -93,6 +92,22 @@
         If we succeed, we will replace Lupy stuff by some Xapian interface
         code in moin.
         But: the filters will likely stay, as we also need them with Xapian.
+      * A new MoinMoin script interface was introduced:
+        
+        Syntax: moin [options] <cmdmodule> <cmdname> [options]
+        
+        For example:
+        moin --config-dir=/my/cfgdir --wiki-url=wiki.example.org/ \
+             export dump --page=WikiSandBox
+        
+        This will call the "moin" script, which will use the --config-dir and
+        --wiki-url options to initialize, then it will go to MoinMoin.script
+        module, import the export.dump module from there and run it, providing
+        the additional --page value to it.
+
+        The old scripts that have not been migrated to this new interface can
+        still be found in MoinMoin/script/old/ - including the old migration
+        scripts.
 
   Bugfixes:
     * cookie_lifetime didn't work comfortable for low values. The cookie was
--- a/docs/README.migration	Sat Mar 25 11:18:58 2006 +0000
+++ b/docs/README.migration	Sat Mar 25 22:31:55 2006 +0000
@@ -1,10 +1,42 @@
-1.3 to 1.5 migration
-====================
+Post 1.5.3 new style migration
+==============================
 
-Just read CHANGES, you don't need to read the stuff below.
+First you have to do all steps described in the sections below or it won't work.
 
-1.2 to 1.3 migration docs
-=========================
+After you have switched to new style migration, the procedure will be the same
+with every moin upgrade, simply run a command similar to this:
+
+sudo -u www-data moin --config-dir=/path/to/config_dir --wiki-url=wiki.example.org/ migration data 
+
+The new style mig stuff will then load the config for that wiki, get into its
+data_dir, read the meta file content and determine what it has to do internally.
+
+1.5.3 migration
+===============
+
+First make sure you have run all the old mig scripts ONCE (and only once) on
+your data dirs.
+
+The old style stuff moved here: MoinMoin/script/old/migration/
+
+In that directory, there is also a new 152_to_1050300.py mig script - you
+need to run it as the last mig script to switch to new style mig scripts.
+It puts a file "meta" in your data dirs that hold the data_format_revision
+value. The new style mig scripts use that value to make it much simpler for
+you in future.
+
+After this, please continue in section "Post 1.5.3 new style migration".
+
+1.3.4/1.3.5 migration
+=====================
+We added some mig scripts in moin 1.3.4. So if you have done the 1.2 to 1.3
+migration with some earlier moin version (like 1.3.3), then please run the
+new scripts, too:
+ * 12_to_13_mig10.py
+ * 12_to_13_mig11.py
+
+1.2 to 1.3 migration
+====================
 
 Migration from 1.2 to 1.3 is done by those basic steps:
  1. make a backup
--- a/setup.py	Sat Mar 25 11:18:58 2006 +0000
+++ b/setup.py	Sat Mar 25 22:31:55 2006 +0000
@@ -103,7 +103,7 @@
         The scripts are created in an uniform scheme: they start the
         run() function in the module
 
-            <packagename>.scripts.<mangled_scriptname>
+            <packagename>.script.<mangled_scriptname>
 
         The mangling of script names replaces '-' and '/' characters
         with '-' and '.', so that they are valid module paths. 
@@ -145,12 +145,12 @@
             try:
                 if sys.platform == "win32":
                     file.write('@echo off\n'
-                        'if NOT "%%_4ver%%" == "" %(python)s -c "from %(package)s.scripts.%(module)s import run; run()" %%$\n'
-                        'if     "%%_4ver%%" == "" %(python)s -c "from %(package)s.scripts.%(module)s import run; run()" %%*\n'
+                        'if NOT "%%_4ver%%" == "" %(python)s -c "from %(package)s.script.%(module)s import run; run()" %%$\n'
+                        'if     "%%_4ver%%" == "" %(python)s -c "from %(package)s.script.%(module)s import run; run()" %%*\n'
                         % script_vars)
                 else:
                     file.write('#! %(python)s\n'
-                        'from %(package)s.scripts.%(module)s import run\n'
+                        'from %(package)s.script.%(module)s import run\n'
                         'run()\n'
                         % script_vars)
             finally:
@@ -173,7 +173,7 @@
     return script
 
 # build list of scripts from their implementation modules
-moin_scripts = map(scriptname, glob.glob('MoinMoin/scripts/[!_]*.py'))
+moin_scripts = map(scriptname, glob.glob('MoinMoin/script/[!_]*.py'))
 
 
 #############################################################################
@@ -198,20 +198,22 @@
         'MoinMoin',
         'MoinMoin.action',
         'MoinMoin.converter',
+        'MoinMoin.filter',
         'MoinMoin.formatter',
         'MoinMoin.i18n',
         'MoinMoin.logfile',
         'MoinMoin.macro',
         'MoinMoin.parser',
         'MoinMoin.processor',
-        'MoinMoin.scripts',
-        'MoinMoin.scripts.accounts',
-        'MoinMoin.scripts.migration',
-        'MoinMoin.scripts.reducewiki',
-        'MoinMoin.scripts.unicode',
-        'MoinMoin.scripts.xmlrpc-tools',
-        'MoinMoin.scripts.packages',
-        'MoinMoin.scripts.import',
+        'MoinMoin.script',
+        'MoinMoin.script.accounts',
+        'MoinMoin.script.cli',
+        'MoinMoin.script.migration',
+        'MoinMoin.script.reducewiki',
+        'MoinMoin.script.unicode',
+        'MoinMoin.script.xmlrpc-tools',
+        'MoinMoin.script.packages',
+        'MoinMoin.script.import',
         'MoinMoin.server',
         'MoinMoin.stats',
         'MoinMoin.support',