changeset 847:b2542a3cfefe storage-ng

remove current storage code
author Thomas Waldmann <tw AT waldmann-edv DOT de>
date Tue, 20 Sep 2011 23:52:38 +0200
parents 7f0a56769cbe
children ebd15cde707e
files MoinMoin/storage/__init__.py MoinMoin/storage/_tests/__init__.py MoinMoin/storage/_tests/test_backends.py MoinMoin/storage/_tests/test_backends_flatfile.py MoinMoin/storage/_tests/test_backends_fs.py MoinMoin/storage/_tests/test_backends_fs19.py MoinMoin/storage/_tests/test_backends_fs2.py MoinMoin/storage/_tests/test_backends_hg.py MoinMoin/storage/_tests/test_backends_memory.py MoinMoin/storage/_tests/test_backends_router.py MoinMoin/storage/_tests/test_backends_sqla.py MoinMoin/storage/_tests/test_indexing.py MoinMoin/storage/_tests/test_middleware_acl.py MoinMoin/storage/_tests/test_serialization.py MoinMoin/storage/_tests/tests_backend_api.py MoinMoin/storage/backends/__init__.py MoinMoin/storage/backends/_flatutils.py MoinMoin/storage/backends/_fsutils.py MoinMoin/storage/backends/fileserver.py MoinMoin/storage/backends/flatfile.py MoinMoin/storage/backends/fs.py MoinMoin/storage/backends/fs19.py MoinMoin/storage/backends/fs19_logfile.py MoinMoin/storage/backends/fs2.py MoinMoin/storage/backends/hg.py MoinMoin/storage/backends/memory.py MoinMoin/storage/backends/sqla.py MoinMoin/storage/middleware/__init__.py MoinMoin/storage/middleware/acl.py MoinMoin/storage/middleware/indexing.py MoinMoin/storage/middleware/router.py MoinMoin/storage/middleware/serialization.py
diffstat 32 files changed, 0 insertions(+), 9807 deletions(-) [+]
line wrap: on
line diff
--- a/MoinMoin/storage/__init__.py	Sat Sep 10 03:23:36 2011 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,930 +0,0 @@
-# Copyright: 2008 MoinMoin:ChristopherDenter
-# Copyright: 2008 MoinMoin:JohannesBerg
-# Copyright: 2009-2010 MoinMoin:ThomasWaldmann
-# License: GNU GPL v2 (or any later version), see LICENSE.txt for details.
-
-"""
-    MoinMoin - Backends - Storage API Definition.
-
-    The storage API consists of the classes defined in this module. That is:
-    Backend, Item, Revision, NewRevision and StoredRevision.
-
-    A concrete backend implements the abstract methods defined by the API,
-    but also uses concrete methods that have already been defined in this
-    module.
-    A backend is a collection of items. Examples for backends include SQL,
-    mercurial or filesystem. All of those are means to store data.
-
-    Items are the units you store within those backends. You can store content
-    of arbitrary type in an item, e.g. text, images or even films.
-
-    An item itself has revisions and metadata. For instance, you can use that
-    to show a diff between two `versions` of a page, where the page "Foo" is
-    represented by an item and the two versions are represented by two
-    revisions of that item.
-
-    Metadata is data that describes other data. An item has metadata. Each
-    revision has metadata as well. E.g. "Which user created this revision?"
-    would be something stored in the metadata of a revision, while "Who created
-    this page in the first place?" would be answered by looking at the metadata
-    of the first revision. Thus, an item basically is a collection of revisions
-    which contain the content for the item. The last revision represents the most
-    recent contents. A stored item can have metadata or revisions, or both.
-
-    For normal operation, revision data and metadata are immutable as soon as the
-    revision is committed to storage (by calling the commit() method on the item
-    that holds the revision), thus making it a StoredRevision.
-    Item metadata, on the other hand, as infrequently used as it may be, is mutable.
-    Hence, it can only be modified under a read lock.
-"""
-
-
-import os
-import sys
-import shutil
-import time
-
-from MoinMoin import log
-logging = log.getLogger(__name__)
-
-from UserDict import DictMixin
-from MoinMoin.storage.error import RevisionNumberMismatchError, AccessError, \
-                                   BackendError, NoSuchItemError, \
-                                   RevisionAlreadyExistsError, ItemAlreadyExistsError
-
-from MoinMoin.config import SIZE, MTIME, HASH_ALGORITHM
-
-import hashlib
-
-
-class Backend(object):
-    """
-    This class abstracts access to backends. If you want to write a specific
-    backend, say a mercurial backend, you have to implement the methods below.
-    A backend knows of its items and can perform several item related operations
-    such as get_item, create_item, etc.
-    """
-    #
-    # If you need to write a backend it is sufficient
-    # to implement the methods of this class. That
-    # way you don't *have to* implement the other classes
-    # like Item and Revision as well. Though, if you want
-    # to, you can do it as well.
-    # Assuming my_item is instanceof(Item), when you call
-    # my_item.create_revision(42), internally the
-    # _create_revision() method of the item's backend is
-    # invoked and the item passes itself as parameter.
-    #
-    def __init__(self, *args, **kw):
-        """
-        Create the backend instance.
-        """
-        pass
-
-    def close(self):
-        """
-        Close all resources the backend is using.
-        """
-        pass
-
-    def get_item(self, itemname):
-        """
-        Returns item object or raises Exception if that item does not exist.
-
-        When implementing this, don't rely on has_item unless you've overridden it.
-
-        :type itemname: unicode
-        :param itemname: The name of the item we want to get.
-        :rtype: item object
-        :raises NoSuchItemError: No item with name 'itemname' is known to this backend.
-        """
-        raise NotImplementedError()
-
-    def has_item(self, itemname):
-        """
-        Override this method!
-
-        This method is added for convenience. With it you don't need to try get_item
-        and catch an exception that may be thrown if the item doesn't exist yet.
-
-        :type itemname: unicode
-        :param itemname: The name of the item of which we want to know whether it exists.
-        :rtype: bool
-        """
-        try:
-            self.get_item(itemname)
-            return True
-        except NoSuchItemError:
-            return False
-
-    def create_item(self, itemname):
-        """
-        Creates an item with a given itemname. If that item already exists,
-        raise an exception.
-
-        :type itemname: unicode
-        :param itemname: Name of the item we want to create.
-        :rtype: item object
-        :raises ItemAlreadyExistsError: The item you were trying to create already exists.
-        """
-        raise NotImplementedError()
-
-    def iteritems(self):
-        """
-        Iterate over all items.
-
-        May use an index internally to optimize.
-
-        :rtype: iterator of item objects
-        """
-        raise NotImplementedError()
-
-    def iter_items_noindex(self):
-        """
-        Iterate over all items.
-
-        Must not use an index as this method is used to *build* the index.
-
-        :rtype: iterator of item objects
-        """
-        raise NotImplementedError()
-
-    def _get_revision(self, item, revno):
-        """
-        For a given item and revision number, return the corresponding revision
-        of that item.
-        Note: If you pass -1 as revno, this shall return the latest revision of the item.
-
-        :type item: Object of class Item.
-        :param item: The Item on which we want to operate.
-        :type revno: int
-        :param revno: Indicate which revision is wanted precisely. If revno is
-        -1, return the most recent revision.
-        :rtype: Object of class Revision
-        :raises NoSuchRevisionError: No revision with that revno was found on item.
-        """
-        raise NotImplementedError()
-
-    def _list_revisions(self, item):
-        """
-        For a given item, return a list containing all revision numbers (as ints)
-        of the revisions the item has. The list must be ordered, starting with
-        the oldest revision number.
-        Since we allow to totally destroy certain revisions, list_revisions does
-        not need to return subsequent, but only monotone revision numbers.
-
-        :type item: Object of class Item.
-        :param item: The Item on which we want to operate.
-        :returns: list of ints (possibly empty)
-        """
-        raise NotImplementedError()
-
-    def _create_revision(self, item, revno):
-        """
-        Takes an item object and creates a new revision. Note that you need to pass
-        a revision number for concurrency reasons. The revno passed must be
-        greater than the revision number of the item's most recent revision.
-        The newly created revision object is returned to the caller.
-
-        :type item: Object of class Item.
-        :param item: The Item on which we want to operate.
-        :type revno: int
-        :param revno: Indicate which revision we want to create.
-        @precondition: item.get_revision(-1).revno < revno
-        :returns: Object of class Revision.
-        :raises RevisionAlreadyExistsError: Raised if a revision with that number
-        already exists on item.
-        :raises RevisionNumberMismatchError: Raised if precondition is not
-        fulfilled.
-        """
-        raise NotImplementedError()
-
-    def _destroy_revision(self, revision):
-        """
-        Similarly to self._destroy_item. The given revision is completely destroyed.
-        As this is an irreversible action, great care must be taken when performing it.
-
-        In case the revision has already been destroyed by someone else (e.g. another
-        process) this method should just pass silently as the job is already done.
-
-        If the revision cannot be destroyed for technical reasons (e.g. missing permissions
-        on disk), this method shall raise a CouldNotDestroyError.
-
-        Note: Again, backends not capable of really erasing something should at the very
-              least ignore the existence of the revision in question. (The only hint will
-              be the gap in item.list_revisions().
-
-        :type revision: Object of class StoredRevision
-        :param revision: The revision we want to destroy completely.
-        :raises CouldNotDestroyError: Raised in case the revision could not be destroyed.
-        """
-        raise NotImplementedError()
-
-    def _rename_item(self, item, newname):
-        """
-        Renames a given item. Raises Exception if the item you are trying to rename
-        does not exist or if the newname is already chosen by another item.
-
-        :type item: Object of class Item.
-        :param item: The Item on which we want to operate.
-        :type newname: string
-        :param newname: Name of item after this operation has succeeded.
-        @precondition: self.has_item(newname) == False
-        @postcondition: self.has_item(newname) == True
-        :raises ItemAlreadyExistsError: Raised if an item with name 'newname'
-        already exists.
-        :returns: None
-        """
-        raise NotImplementedError()
-
-    def _commit_item(self, revision):
-        """
-        Commits the changes that have been done to a given item. That is, after you
-        created a revision on that item and filled it with data you still need to
-        commit() it. You need to pass the revision you want to commit. The item
-        can be looked up by the revision's 'item' property.
-
-        :type revision: Object of class NewRevision.
-        :param revision: The revision we want to commit to  storage.
-        :returns: None
-        """
-        raise NotImplementedError()
-
-    def _rollback_item(self, revision):
-        """
-        This method is invoked when external events happen that cannot be handled in a
-        sane way and thus the changes that have been made must be rolled back.
-
-        :type revision: Object of class NewRevision.
-        :param revision: The revision we want to roll back.
-        :returns: None
-        """
-        raise NotImplementedError()
-
-    def _destroy_item(self, item):
-        """
-        Use this method carefully!
-
-        This method attempts to completely *destroy* an item with all its revisions and
-        metadata. After that, it will be impossible to access the item again via the
-        storage API. This is very different from the deletion a user can perform on
-        a wiki item, as such a deletion does not really delete anything from disk but
-        just hides the former existence of the item. Such a deletion is undoable, while
-        having destroyed an item is not.
-
-        In case the item has already been destroyed by someone else (e.g. another process)
-        this method should just pass silently as the job is already done.
-
-        If the item cannot be destroyed for technical reasons (e.g. missing permissions
-        on disk), this method shall raise a CouldNotDestroyError.
-
-        Note: Several backends (in particular those based on VCS) do not, by their nature,
-              support erasing any content that has been put into them at some point.
-              Those backends then need to emulate erasure as best they can. They should at
-              least ignore the former existence of the item completely.
-              A wiki admin must be aware that when using such a backend, he either needs
-              to invoke an erasure (clone old, dirtied backend to new, fresh backend) script
-              from time to time to get rid of the stuff, or not choose a backend of this
-              kind (in case disk space is limited and large items are uploaded).
-
-        :type item: Object of class Item
-        :param item: The item we want to destroy
-        :raises CouldNotDestroyError: Raised in case the revision could not be destroyed.
-        :returns: None
-        """
-        # XXX Should this perhaps return a bool indicating whether erasure was actually performed on disk or something like that?
-        raise NotImplementedError()
-
-    def _change_item_metadata(self, item):
-        """
-        This method is used to acquire a lock on an item. This is necessary to prevent
-        side effects caused by concurrency.
-
-        You need to call this method before altering the metadata of the item.
-        E.g.:   item.change_metadata()  # Invokes this method
-                item["metadata_key"] = "metadata_value"
-                item.publish_metadata()
-
-        As you can see, the lock acquired by this method is released by calling
-        the publish_metadata() method on the item.
-
-        :type item: Object of class Item.
-        :param item: The Item on which we want to operate.
-        @precondition: item not already locked
-        :returns: None
-        """
-        raise NotImplementedError()
-
-    def _publish_item_metadata(self, item):
-        """
-        This method tries to release a lock on the given item and put the newly
-        added Metadata of the item to storage.
-
-        You need to call this method after altering the metadata of the item.
-        E.g.:   item.change_metadata()
-                item["metadata_key"] = "metadata_value"
-                item.publish_metadata()  # Invokes this method
-
-        The lock this method releases is acquired by the _change_metadata method.
-
-        :type item: Object of class Item.
-        :param item: The Item on which we want to operate.
-        :raises AssertionError: item was not locked XXX use more special exception
-        :returns: None
-        """
-        raise NotImplementedError()
-
-    def _read_revision_data(self, revision, chunksize):
-        """
-        Called to read a given amount of bytes of a revision's data. By default, all
-        data is read.
-
-        :type revision: Object of class StoredRevision.
-        :param revision: The revision on which we want to operate.
-        :type chunksize: int
-        :param chunksize: amount of bytes to be read at a time
-        :returns: string
-        """
-        raise NotImplementedError()
-
-    def _write_revision_data(self, revision, data):
-        """
-        When this method is called, the passed data is written to the revision's data.
-
-        :type revision: Object of class NewRevision.
-        :param revision: The revision on which we want to operate.
-        :type data: str
-        :param data: The data to be written on the revision.
-        :returns: None
-        """
-        raise NotImplementedError()
-
-    def _get_item_metadata(self, item):
-        """
-        Load metadata for a given item, return dict.
-
-        :type item: Object of class Item.
-        :param item: The Item on which we want to operate.
-        :returns: dict of metadata key / value pairs.
-        """
-        raise NotImplementedError()
-
-    def _get_revision_metadata(self, revision):
-        """
-        Load metadata for a given revision, returns dict.
-
-        :type revision: Object of a subclass of Revision.
-        :param revision: The revision on which we want to operate.
-        :returns: dict of metadata key / value pairs.
-        """
-        raise NotImplementedError()
-
-    def _seek_revision_data(self, revision, position, mode):
-        """
-        Set the revision's cursor on the revision's data.
-
-        :type revision: Object of StoredRevision.
-        :param revision: The revision on which we want to operate.
-        :type position: int
-        :param position: Indicates where to position the cursor
-        :type mode: int
-        :param mode: 0 for 'absolute positioning', 1 to seek 'relatively to the
-        current position', 2 to seek 'relative to the files end'.
-        :returns: None
-        """
-        raise NotImplementedError()
-
-    def _tell_revision_data(self, revision):
-        """
-        Tell the revision's cursor's position on the revision's data.
-
-        :type revision: Object of type StoredRevision.
-        :param revision: The revision on which tell() was invoked.
-        :returns: int indicating the cursor's position.
-        """
-        raise NotImplementedError()
-
-    # item copying
-    def _copy_item_progress(self, verbose, st):
-        if verbose:
-            progress_char = dict(converts='.', skips='s', fails='F')
-            sys.stdout.write(progress_char[st])
-
-    def copy_item(self, item, verbose=False, name=None):
-        def same_revision(rev1, rev2):
-            for k, v in rev1.iteritems():
-                if rev2[k] != v:
-                    return False
-            return True
-
-        if name is None:
-            name = item.name
-
-        status = dict(converts={}, skips={}, fails={})
-        revisions = item.list_revisions()
-
-        try:
-            new_item = self.get_item(name)
-        except NoSuchItemError:
-            new_item = self.create_item(name)
-
-        # This only uses the metadata of the item that we clone.
-        # Arguments for doing this:
-        #   * If old stuff ends up in item after clone, that'd be counter intuitive
-        #   * When caching some data from the latest rev in the item, we don't want the old stuff.
-        new_item.change_metadata()
-        for k, v in item.iteritems():
-            new_item[k] = v
-        new_item.publish_metadata()
-
-        for revno in revisions:
-            revision = item.get_revision(revno)
-
-            try:
-                new_rev = new_item.create_revision(revision.revno)
-            except RevisionAlreadyExistsError:
-                existing_revision = new_item.get_revision(revision.revno)
-                st = same_revision(existing_revision, revision) and 'skips' or 'fails'
-            else:
-                for k, v in revision.iteritems():
-                    new_rev[k] = v
-                shutil.copyfileobj(revision, new_rev)
-                new_item.commit()
-                st = 'converts'
-            try:
-                status[st][name].append(revision.revno)
-            except KeyError:
-                status[st][name] = [revision.revno]
-            self._copy_item_progress(verbose, st)
-
-        return status['converts'], status['skips'], status['fails']
-
-    # cloning support
-    def _clone_before(self, source, verbose):
-        if verbose:
-            # reopen stdout file descriptor with write mode
-            # and 0 as the buffer size (unbuffered)
-            sys.stdout = os.fdopen(os.dup(sys.stdout.fileno()), 'w', 0)
-            sys.stdout.write("[converting %s to %s]: " % (source.__class__.__name__,
-                                                          self.__class__.__name__, ))
-
-    def _clone_after(self, source, verbose):
-        if verbose:
-            sys.stdout.write("\n")
-
-    def clone(self, source, verbose=False, only_these=[]):
-        """
-        Create exact copy of source Backend with all the Items into THIS
-        backend. If you don't want all items, you can give an item name list
-        in only_these.
-
-        Note: this is a generic implementation, you can maybe specialize it to
-              make it faster in your backend implementation (esp. if creating
-              new items is expensive).
-
-        Return a tuple consisting of three dictionaries (Item name:Revision
-        numbers list): converted, skipped and failed Items dictionary.
-        """
-        def item_generator(source, only_these):
-            if only_these:
-                for name in only_these:
-                    try:
-                        yield source.get_item(name)
-                    except NoSuchItemError:
-                        # TODO Find out why this fails sometimes.
-                        #sys.stdout.write("Unable to copy %s\n" % itemname)
-                        pass
-            else:
-                for item in source.iteritems():
-                    yield item
-
-        self._clone_before(source, verbose)
-
-        converts, skips, fails = {}, {}, {}
-        for item in item_generator(source, only_these):
-            c, s, f = self.copy_item(item, verbose)
-            converts.update(c)
-            skips.update(s)
-            fails.update(f)
-
-        self._clone_after(source, verbose)
-        return converts, skips, fails
-
-
-class Item(object, DictMixin):
-    """
-    An item object collects the information of an item (e.g. a page) that is
-    stored in persistent storage. It has metadata and revisions.
-    An item object is just a proxy to the information stored in the backend.
-    It doesn't necessarily live very long.
-
-    Do NOT create instances of this class directly, but use backend.get_item
-    or backend.create_item!
-    """
-    def __init__(self, backend, itemname):
-        """
-        Initialize an item. Memorize the backend to which it belongs.
-
-        :type backend: Object of a subclass of Backend.
-        :param backend: The backend that stores this item.
-        :type itemname: unicode
-        :param itemname: The name representing this item in the backend. Unique
-        within the backend.
-        """
-        self._backend = backend
-        self._name = itemname
-        self._locked = False
-        self._read_accessed = False
-        self._metadata = None  # Will be loaded lazily upon first real access.
-        self._uncommitted_revision = None
-
-    def get_name(self):
-        """
-        name is a read-only property of this class.
-        """
-        return self._name
-
-    name = property(get_name, doc="This is the name of this item. This attribute is read-only.")
-
-    @property
-    def next_revno(self):
-        """
-        The revno of the most recent committed revision + 1.
-        I.e., the next revision's revno.
-        """
-        revs = self.list_revisions()
-        try:
-            return revs[-1] + 1
-        except IndexError:
-            # No revisions yet (empty sequence)
-            return 0
-
-    def __setitem__(self, key, value):
-        """
-        In order to access the item's metadata you can use the well-known dict-like
-        semantics Python's dictionaries offer. If you want to set a value,
-        my_item["key"] = "value" will do the trick. Note that keys must be of the
-        type string (or unicode).
-        Values must be of the type str, unicode or tuple, in which case every element
-        of the tuple must be a string, unicode or tuple object.
-        You must wrap write accesses to metadata in change_metadata/publish_metadata calls.
-        Keys starting with two underscores are reserved and cannot be used.
-
-        :type key: str or unicode
-        :param key: The keyword that is used to look up the corresponding value.
-        :type value: str, unicode, int, long, float, bool, complex or a nested tuple thereof.
-        :param value: The value that is referenced by the keyword `key` in this
-        specific item's metadata dict.
-        """
-        if not self._locked:
-            raise AttributeError("Cannot write to unlocked metadata")
-        if not isinstance(key, (str, unicode)):
-            raise TypeError("Key must be string type")
-        if key.startswith('__'):
-            raise TypeError("Key must not begin with two underscores")
-        check_value_type_is_valid(value)
-        if self._metadata is None:
-            self._metadata = self._backend._get_item_metadata(self)
-        self._metadata[key] = value
-
-    def __delitem__(self, key):
-        """
-        Delete an item metadata key/value pair.
-
-        :type key: str or unicode
-        :param key: Key identifying a unique key/value pair in this item's metadata.
-        @postcondition: self[key] raises KeyError
-        """
-        if not self._locked:
-            raise AttributeError("Cannot write to unlocked metadata")
-        if key.startswith('__'):
-            raise KeyError(key)
-        if self._metadata is None:
-            self._metadata = self._backend._get_item_metadata(self)
-        del self._metadata[key]
-
-    def __getitem__(self, key):
-        """
-        See __setitem__.__doc__ -- You may use my_item["key"] to get the corresponding
-        metadata value. Note however, that the key you pass must be of type str or unicode.
-
-        :type key: str or unicode
-        :param key: The key refering to the value we want to return.
-        :returns: self._metadata[key]
-        """
-        self._read_accessed = True
-        if not isinstance(key, (unicode, str)):
-            raise TypeError("key must be string type")
-        if key.startswith('__'):
-            raise KeyError(key)
-        if self._metadata is None:
-            self._metadata = self._backend._get_item_metadata(self)
-
-        return self._metadata[key]
-
-    def keys(self):
-        """
-        This method returns a list of all metadata keys of this item (i.e., a list of Strings.)
-        That allows using Python's `for mdkey in itemobj: do_something` syntax.
-
-        :returns: list of metadata keys not starting with two leading underscores
-        """
-        if self._metadata is None:
-            self._metadata = self._backend._get_item_metadata(self)
-
-        return [key for key in self._metadata if not key.startswith("__")]
-
-    def change_metadata(self):
-        """
-        @see: Backend._change_item_metadata.__doc__
-        """
-        if self._uncommitted_revision is not None:
-            raise RuntimeError(("You tried to change the metadata of the item %r but there "
-                                "are uncommitted revisions on that item. Commit first.") % (self.name))
-        if self._read_accessed:
-            raise AccessError("Cannot lock after reading metadata")
-
-        self._backend._change_item_metadata(self)
-        self._locked = True
-
-    def publish_metadata(self):
-        """
-        @see: Backend._publish_item_metadata.__doc__
-        """
-        if not self._locked:
-            raise AccessError("cannot publish without change_metadata")
-        self._backend._publish_item_metadata(self)
-        self._read_accessed = False
-        self._locked = False
-
-    def get_revision(self, revno):
-        """
-        @see: Backend._get_revision.__doc__
-        """
-        return self._backend._get_revision(self, revno)
-
-    def list_revisions(self):
-        """
-        @see: Backend._list_revisions.__doc__
-        """
-        return self._backend._list_revisions(self)
-
-    def rename(self, newname):
-        """
-        @see: Backend._rename_item.__doc__
-        """
-        if not isinstance(newname, (str, unicode)):
-            raise TypeError("Item names must have string type, not %s" % (type(newname)))
-
-        self._backend._rename_item(self, newname)
-        self._name = newname
-
-    def commit(self):
-        """
-        @see: Backend._commit_item.__doc__
-        """
-        rev = self._uncommitted_revision
-        assert rev is not None
-        rev[HASH_ALGORITHM] = unicode(rev._rev_hash.hexdigest())
-        rev[SIZE] = rev._size
-        if MTIME not in rev:
-            rev[MTIME] = int(time.time())
-        self._backend._commit_item(rev)
-        self._uncommitted_revision = None
-
-    def rollback(self):
-        """
-        @see: Backend._rollback_item.__doc__
-        """
-        self._backend._rollback_item(self._uncommitted_revision)
-        self._uncommitted_revision = None
-
-    def create_revision(self, revno):
-        """
-        @see: Backend._create_revision.__doc__
-
-        Please note that we do not require the revnos to be subsequent, but they
-        need to be monotonic. I.e., a sequence like 0, 1, 5, 9, 10 is ok, but
-        neither 0, 1, 1, 2, 3 nor 0, 1, 3, 2, 9 are.
-        This is done so as to allow functionality like unserializing a backend
-        whose item's revisions have been subject to destroy().
-        """
-        if self._locked:
-            raise RuntimeError(("You tried to create revision #%d on the item %r, but there "
-                                "is unpublished metadata on that item. Publish first.") % (revno, self.name))
-        current_revno = self.next_revno - 1
-        if current_revno >= revno:
-            raise RevisionNumberMismatchError("You cannot create a revision with revno %s. Your revno must be greater than " % revno + \
-                                              "the item's last revision, which is %s." % current_revno)
-        if self._uncommitted_revision is not None:
-            return self._uncommitted_revision
-        else:
-            self._uncommitted_revision = self._backend._create_revision(self, revno)
-            return self._uncommitted_revision
-
-    def destroy(self):
-        """
-        @see: Backend._destroy_item.__doc__
-        """
-        return self._backend._destroy_item(self)
-
-
-class Revision(object, DictMixin):
-    """
-    This class serves as superclass for StoredRevision and NewRevision.
-    An object of either subclass represents a revision of an item. An item can have
-    several revisions at a time, one being the most recent revision.
-    This is a principle that is similar to the concepts used in Version Control
-    Systems.
-
-    Each revision object has a creation timestamp in the 'timestamp' property
-    that defaults to None for newly created revisions in which case it will be
-    assigned at commit() time. It is writable for use by converter backends, but
-    care must be taken in that case to create monotone timestamps!
-    """
-    def __init__(self, item, revno):
-        """
-        Initialize the revision.
-
-        :type item: Object of class Item.
-        :param item: The item to which this revision belongs.
-        :type revno: int
-        :param revno: The unique number identifying this revision on the item.
-        :type timestamp: int
-        :param timestamp: int representing the UNIX time this revision was
-        created. (UNIX time: seconds since the epoch, i.e. 1st of January 1970, 00:00 UTC)
-        """
-        self._revno = revno
-        self._item = item
-        self._backend = item._backend
-        self._metadata = None
-
-    def _get_item(self):
-        return self._item
-
-    item = property(_get_item)
-
-    def get_revno(self):
-        """
-        Getter for the read-only revno property.
-        """
-        return self._revno
-
-    revno = property(get_revno, doc=("This property stores the revno of the revision object. "
-                                     "Only read-only access is allowed."))
-
-    @property
-    def timestamp(self):
-        """This property returns the creation timestamp of the revision"""
-        return self[MTIME]
-
-    def _load_metadata(self):
-        self._metadata = self._backend._get_revision_metadata(self)
-
-    def __getitem__(self, key):
-        """
-        @see: Item.__getitem__.__doc__
-        """
-        if not isinstance(key, (unicode, str)):
-            raise TypeError("key must be string type")
-        if key.startswith('__'):
-            raise KeyError(key)
-        if self._metadata is None:
-            self._load_metadata()
-
-        return self._metadata[key]
-
-    def keys(self):
-        """
-        @see: Item.keys.__doc__
-        """
-        if self._metadata is None:
-            self._load_metadata()
-
-        return [key for key in self._metadata if not key.startswith("__")]
-
-    def read(self, chunksize=-1):
-        """
-        @see: Backend._read_revision_data.__doc__
-        """
-        return self._backend._read_revision_data(self, chunksize)
-
-    def seek(self, position, mode=0):
-        """
-        @see: StringIO.StringIO().seek.__doc__
-        """
-        self._backend._seek_revision_data(self, position, mode)
-
-    def tell(self):
-        """
-        @see: StringIO.StringIO().tell.__doc__
-        """
-        return self._backend._tell_revision_data(self)
-
-
-class StoredRevision(Revision):
-    """
-    This is the brother of NewRevision. It allows reading data from a revision
-    that has already been stored in storage. It doesn't allow data manipulation
-    and can only be used for information retrieval.
-
-    Do NOT create instances of this class directly, but use item.get_revision or
-    one of the other methods intended for getting stored revisions.
-    """
-    def __init__(self, item, revno):
-        """
-        Initialize the StoredRevision
-        """
-        Revision.__init__(self, item, revno)
-
-    def __setitem__(self, key, value):
-        """
-        Revision metadata cannot be altered, thus, we raise an Exception.
-        """
-        raise AttributeError("Metadata of already existing revisions may not be altered.")
-
-    def __delitem__(self, key):
-        """
-        Revision metadata cannot be altered, thus, we raise an Exception.
-        """
-        raise AttributeError("Metadata of already existing revisions may not be altered.")
-
-    def destroy(self):
-        """
-        @see: Backend._destroy_revision.__doc__
-        """
-        self._backend._destroy_revision(self)
-
-
-class NewRevision(Revision):
-    """
-    This is basically the same as Revision but with mutable metadata and data properties.
-
-    Do NOT create instances of this class directly, but use item.create_revision.
-    """
-    def __init__(self, item, revno):
-        """
-        Initialize the NewRevision
-        """
-        Revision.__init__(self, item, revno)
-        self._metadata = {}
-        # these values need to be kept uptodate to that item.commit() can
-        # use them to update the metadata of the rev before committing it:
-        self._size = 0
-        self._rev_hash = hashlib.new(HASH_ALGORITHM)
-
-    def __setitem__(self, key, value):
-        """
-        Internal method used for dict-like access to the NewRevisions metadata-dict.
-        Keys starting with two underscores are reserved and cannot be used.
-
-        :type key: str or unicode
-        :param key: The keyword that is used to look up the corresponding value.
-        :type value: str, unicode, int, long, float, bool, complex or a nested tuple thereof.
-        :param value: The value that is referenced by the keyword `key` in this
-        specific items metadata-dict.
-        """
-        if not isinstance(key, (str, unicode)):
-            raise TypeError("Key must be string type")
-        if key.startswith('__'):
-            raise TypeError("Key must not begin with two underscores")
-        check_value_type_is_valid(value)
-
-        self._metadata[key] = value
-
-    def __delitem__(self, key):
-        if key.startswith('__'):
-            raise KeyError(key)
-
-        del self._metadata[key]
-
-    def write(self, data):
-        """
-        @see: Backend._write_revision_data.__doc__
-        """
-        self._size += len(data)
-        self._rev_hash.update(data)
-        self._backend._write_revision_data(self, data)
-
-
-# Little helper function:
-def check_value_type_is_valid(value):
-    """
-    For metadata-values, we allow only immutable types, namely:
-    str, unicode, bool, int, long, float, complex and tuple.
-    Since tuples can contain other types, we need to check the types recursively.
-
-    :type value: str, unicode, int, long, float, complex, tuple
-    :param value: A value of which we want to know if it is a valid metadata value.
-    :returns: bool
-    """
-    accepted = (bool, str, unicode, int, long, float, complex)
-    if isinstance(value, accepted):
-        return True
-    elif isinstance(value, tuple):
-        for element in value:
-            if not check_value_type_is_valid(element):
-                raise TypeError("Value must be one of %s or a nested tuple thereof. Not %r" % (accepted, type(value)))
-        else:
-            return True
-
--- a/MoinMoin/storage/_tests/__init__.py	Sat Sep 10 03:23:36 2011 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,7 +0,0 @@
-# Copyright: 2011 The MoinMoin development team
-# License: GNU GPL v2 (or any later version), see LICENSE.txt for details.
-
-"""
-    MoinMoin - Storage test package.
-"""
-
--- a/MoinMoin/storage/_tests/test_backends.py	Sat Sep 10 03:23:36 2011 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,701 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright: 2008 MoinMoin:PawelPacana
-# Copyright: 2008 MoinMoin:ChristopherDenter
-# Copyright: 2008 MoinMoin:JohannesBerg
-# License: GNU GPL v2 (or any later version), see LICENSE.txt for details.
-
-"""
-    MoinMoin - Backend tests
-
-    This module provides class for testing backends. This class tries
-    to cover sane backend usage examples.
-
-    This class should be inherited by descendant backend test classes.
-    Add tests general for all backends here. Your backend-specific tests
-    put in class inherited from this one.
-"""
-
-
-import pytest, re, time
-
-from flask import g as flaskg
-
-from MoinMoin.storage import Item, NewRevision
-from MoinMoin.storage.backends import memory
-from MoinMoin.storage.error import NoSuchItemError, ItemAlreadyExistsError, NoSuchRevisionError, RevisionAlreadyExistsError
-from MoinMoin.config import SIZE
-
-item_names = (u"quite_normal",
-              u"äöüßłóąćółąńśćżź",
-              u"with space",
-              u"name#special(characters?.\,",
-              u"very_long_name_" * 100 + u"ending_1",
-              u"very_long_name_" * 100 + u"ending_2", )
-
-invalid_names = (42, {}, (1, ), [1], )
-
-class BackendTest(object):
-    """
-    Generic class for backend tests.
-
-    Creates a new backend for each test so they can assume to be
-    sandboxed.
-    """
-
-    valid_names = item_names
-    invalid_names = invalid_names
-
-    def setup_method(self, method):
-        self.backend = self.create_backend()
-
-    def teardown_method(self, method):
-        self.kill_backend()
-        self.backend = None
-
-    def create_rev_item_helper(self, name):
-        item = self.backend.create_item(name)
-        item.create_revision(0)
-        item.commit()
-        return item
-
-    def create_meta_item_helper(self, name):
-        item = self.backend.create_item(name)
-        item.change_metadata()
-        item.publish_metadata()
-        return item
-
-    def get_item_check(self, name):
-        item = self.backend.get_item(name)
-        assert item.name == name
-
-    def rename_item_check(self, old_name, new_name):
-        item = self.backend.get_item(old_name)
-        item.rename(new_name)
-        assert item.name == new_name
-        assert self.backend.has_item(new_name)
-        assert not self.backend.has_item(old_name)
-
-    def test_create_get_rename_get_rev_item(self):
-        def create_rev_item(name):
-            item = self.backend.create_item(name)
-            assert item.name == name
-            item.create_revision(0)
-            item.commit()
-            assert self.backend.has_item(name)
-
-        for num, item_name in enumerate(self.valid_names):
-            yield create_rev_item, item_name
-            yield self.get_item_check, item_name
-            new_name = u"renamed_revitem_%d" % num
-            yield self.rename_item_check, item_name, new_name
-            yield self.get_item_check, new_name
-
-    def test_create_get_rename_get_meta_item(self):
-        def create_meta_item(name):
-            item = self.backend.create_item(name)
-            assert item.name == name
-            item.change_metadata()
-            item.publish_metadata()
-            assert self.backend.has_item(name)
-
-        for num, item_name in enumerate(self.valid_names):
-            yield create_meta_item, item_name
-            yield self.get_item_check, item_name
-            new_name = u"renamed_revitem_%d" % num
-            yield self.rename_item_check, item_name, new_name
-            yield self.get_item_check, new_name
-
-    def test_item_rename_to_existing(self):
-        item1 = self.create_rev_item_helper(u"fresh_item")
-        item2 = self.create_rev_item_helper(u"try to rename")
-        pytest.raises(ItemAlreadyExistsError, item1.rename, item2.name)
-
-    def rename_item_invalid_name(self, name, newname):
-        item = self.backend.create_item(name)
-        pytest.raises(TypeError, item.rename, newname)
-
-    def test_item_rename_to_invalid(self):
-        for num, invalid_name in enumerate(self.invalid_names):
-            yield self.rename_item_invalid_name, u"item_%s" % num, invalid_name
-
-    def test_item_rename_threesome(self):
-        item1 = self.create_rev_item_helper(u"item1")
-        item2 = self.create_rev_item_helper(u"item2")
-        item1.create_revision(1)
-        item1.commit()
-        item2.rename(u"item3")
-        item1.rename(u"item2")
-        assert len(item1.list_revisions()) == 2
-
-    def create_item_invalid_name(self, name):
-        pytest.raises(TypeError, self.backend.create_item, name)
-
-    def test_create_item_wrong_itemname(self):
-        for item_name in self.invalid_names:
-            yield self.create_item_invalid_name, item_name
-
-    def test_create_order(self):
-        item1 = self.backend.create_item(u'1')
-        item2 = self.backend.create_item(u'2')
-        revision1 = item1.create_revision(0)
-        revision2 = item2.create_revision(0)
-        revision1.write('1')
-        revision2.write('2')
-        item2.commit()
-        item1.commit()
-        item1 = self.backend.get_item(u'1')
-        item2 = self.backend.get_item(u'2')
-        revision1 = item1.get_revision(0)
-        revision2 = item2.get_revision(0)
-        assert revision1.read() == '1'
-        assert revision2.read() == '2'
-
-    def test_create_rev_item_again(self):
-        self.create_rev_item_helper(u"item1")
-        pytest.raises(ItemAlreadyExistsError, self.backend.create_item, u"item1")
-
-    def test_create_meta_item_again(self):
-        self.create_meta_item_helper(u"item2")
-        pytest.raises(ItemAlreadyExistsError, self.backend.create_item, u"item2")
-
-    def test_get_item_that_doesnt_exist(self):
-        pytest.raises(NoSuchItemError, self.backend.get_item, u"i_do_not_exist")
-
-    def test_has_item(self):
-        self.create_rev_item_helper(u"versioned")
-        self.create_meta_item_helper(u"unversioned")
-        assert self.backend.has_item(u"versioned")
-        assert self.backend.has_item(u"unversioned")
-
-    def test_has_item_that_doesnt_exist(self):
-        assert not self.backend.has_item(u"i_do_not_exist")
-
-    def test_iteritems_1(self):
-        for num in range(10, 20):
-            self.create_rev_item_helper(u"item_" + str(num).zfill(2))
-        for num in range(10):
-            self.create_meta_item_helper(u"item_" + str(num).zfill(2))
-        itemlist = sorted([item.name for item in self.backend.iteritems()])
-        for num, itemname in enumerate(itemlist):
-            assert itemname == u"item_" + str(num).zfill(2)
-        assert len(itemlist) == 20
-
-    def test_iteritems_2(self):
-        self.create_rev_item_helper(u'abcdefghijklmn')
-        count = 0
-        for item in self.backend.iteritems():
-            count += 1
-        assert count > 0
-
-    def test_iteritems_3(self):
-        self.create_rev_item_helper(u"without_meta")
-        self.create_rev_item_helper(u"with_meta")
-        item = self.backend.get_item(u"with_meta")
-        item.change_metadata()
-        item[u"meta"] = u"data"
-        item.publish_metadata()
-        itemlist = [item for item in self.backend.iteritems()]
-        assert len(itemlist) == 2
-
-    def test_existing_item_create_revision(self):
-        self.create_rev_item_helper(u"existing")
-        item = self.backend.get_item(u"existing")
-        old_rev = item.get_revision(-1)
-        rev = item.create_revision(old_rev.revno + 1)
-        item.rollback()
-        rev = item.get_revision(-1)
-        old_keys = old_rev.keys()
-        new_keys = rev.keys()
-        old_keys.sort()
-        new_keys.sort()
-        assert old_keys == new_keys
-        for key, value in old_rev.iteritems():
-            assert rev[key] == value
-        assert old_rev.read() == rev.read()
-
-    def test_new_item_create_revision(self):
-        item = self.backend.create_item(u'internal')
-        rev = item.create_revision(0)
-        item.rollback()
-        assert not self.backend.has_item(item.name)
-
-    def test_item_commit_revision(self):
-        item = self.backend.create_item(u"item#11")
-        rev = item.create_revision(0)
-        rev.write("python rocks")
-        item.commit()
-        rev = item.get_revision(0)
-        assert rev.read() == "python rocks"
-
-    def test_item_writing_data_multiple_times(self):
-        item = self.backend.create_item(u"multiple")
-        rev = item.create_revision(0)
-        rev.write("Alle ")
-        rev.write("meine ")
-        rev.write("Entchen")
-        item.commit()
-        rev = item.get_revision(0)
-        assert rev.read() == "Alle meine Entchen"
-
-    def test_item_write_seek_read(self):
-        item = self.backend.create_item(u"write_seek_read")
-        rev = item.create_revision(0)
-        write_data = "some data"
-        rev.write(write_data)
-        rev.seek(0)
-        read_data = rev.read()
-        assert read_data == write_data
-        item.commit()
-        rev = item.get_revision(0)
-        assert rev.read() == write_data
-
-    def test_item_seek_tell_read(self):
-        item = self.backend.create_item(u"write_seek_read")
-        rev = item.create_revision(0)
-        write_data = "0123456789"
-        rev.write(write_data)
-        rev.seek(0)
-        assert rev.tell() == 0
-        read_data = rev.read()
-        assert read_data == write_data
-        rev.seek(4)
-        assert rev.tell() == 4
-        read_data = rev.read()
-        assert read_data == write_data[4:]
-        item.commit()
-        rev = item.get_revision(0)
-        rev.seek(0)
-        assert rev.tell() == 0
-        read_data = rev.read()
-        assert read_data == write_data
-        rev.seek(4)
-        assert rev.tell() == 4
-        read_data = rev.read()
-        assert read_data == write_data[4:]
-
-    def test_item_reading_chunks(self):
-        item = self.backend.create_item(u"slices")
-        rev = item.create_revision(0)
-        rev.write("Alle meine Entchen")
-        item.commit()
-        rev = item.get_revision(0)
-        chunk = rev.read(1)
-        data = ""
-        while chunk != "":
-            data += chunk
-            chunk = rev.read(1)
-        assert data == "Alle meine Entchen"
-
-    def test_item_reading_negative_chunk(self):
-        item = self.backend.create_item(u"negative_chunk")
-        rev = item.create_revision(0)
-        rev.write("Alle meine Entchen" * 10)
-        item.commit()
-        rev = item.get_revision(0)
-        assert rev.read(-1) == "Alle meine Entchen" * 10
-        rev.seek(0)
-        assert rev.read(-123) == "Alle meine Entchen" * 10
-
-    def test_seek_and_tell(self):
-        item = self.backend.create_item(u"seek&tell")
-        rev = item.create_revision(0)
-        data = "wilhelm tell seekfried what time it is"
-        rev.write(data)
-        item.commit()
-
-        rev = item.get_revision(0)
-        offset = 5
-
-        # absolute
-        rev.seek(offset)
-        assert rev.tell() == offset
-        assert rev.read() == data[offset:]
-
-        # relative
-        rev.seek(offset)
-        rev.seek(offset, 1)
-        assert rev.tell() == 2 * offset
-        assert rev.read() == data[2*offset:]
-
-        # relative to EOF
-        rev.seek(-offset, 2)
-        assert rev.tell() == len(data) - offset
-        assert rev.read() == data[-offset:]
-
-    def test_item_get_revision(self):
-        item = self.backend.create_item(u"item#12")
-        rev = item.create_revision(0)
-        rev.write("jefferson airplane rocks")
-        item.commit()
-        another_rev = item.get_revision(0)
-        assert another_rev.read() == "jefferson airplane rocks"
-
-    def test_item_next_revno(self):
-        item = self.backend.create_item(u"next_revno")
-        assert item.next_revno == 0
-        item.create_revision(item.next_revno).write("foo")
-        item.commit()
-        assert item.next_revno == 1
-
-    def test_item_list_revisions_with_revmeta_changes(self):
-        item = self.backend.create_item(u"item_13")
-        for revno in range(0, 10):
-            rev = item.create_revision(revno)
-            rev[u"revno"] = u"%s" % revno
-            item.commit()
-        assert item.list_revisions() == range(0, 10)
-
-    def test_item_list_revisions_with_revdata_changes(self):
-        item = self.backend.create_item(u"item_13")
-        for revno in range(0, 10):
-            rev = item.create_revision(revno)
-            rev.write("%s" % revno)
-            item.commit()
-        assert item.list_revisions() == range(0, 10)
-
-    def test_item_list_revisions_without_changes(self):
-        item = self.backend.create_item(u"item_13")
-        for revno in range(0, 10):
-            item.create_revision(revno)
-            item.commit()
-        assert item.list_revisions() == range(0, 10)
-
-    def test_item_list_revisions_equality(self):
-        item = self.backend.create_item(u"new_item_15")
-        revs_before = item.list_revisions()
-        rev = item.create_revision(0)
-        assert item.list_revisions() == revs_before
-        item.rollback()
-
-    def test_item_list_revisions_equality_nonempty_revlist(self):
-        item = self.backend.create_item(u"new_item_16")
-        rev = item.create_revision(0)
-        rev.write("something interesting")
-        item.commit()
-        revs_before = item.list_revisions()
-        rev2 = item.create_revision(1)
-        assert item.list_revisions() == revs_before
-        item.rollback()
-
-    def test_item_list_revisions_without_committing(self):
-        item = self.backend.create_item(u"new_item_14")
-        assert item.list_revisions() == []
-
-    def test_mixed_commit_metadata1(self):
-        item = self.backend.create_item(u'mixed1')
-        item.create_revision(0)
-        pytest.raises(RuntimeError, item.change_metadata)
-        item.rollback()
-
-    def test_mixed_commit_metadata2(self):
-        item = self.backend.create_item(u'mixed2')
-        item.change_metadata()
-        pytest.raises(RuntimeError, item.create_revision, 0)
-
-    def test_item_metadata_change_and_publish(self):
-        item = self.backend.create_item(u"test item metadata change")
-        item.change_metadata()
-        item[u"creator"] = u"Vincent van Gogh"
-        item.publish_metadata()
-        item2 = self.backend.get_item(u"test item metadata change")
-        assert item2[u"creator"] == u"Vincent van Gogh"
-
-    def test_item_metadata_invalid_change(self):
-        item = self.backend.create_item(u"test item metadata invalid change")
-        try:
-            item[u"this should"] = "FAIL!"
-            assert False
-        except AttributeError:
-            pass
-
-    def test_item_metadata_without_publish(self):
-        item = self.backend.create_item(u"test item metadata invalid change")
-        item.change_metadata()
-        item[u"change but"] = u"don't publish"
-        pytest.raises(NoSuchItemError, self.backend.get_item, "test item metadata invalid change")
-
-    def test_item_create_existing_mixed_1(self):
-        item1 = self.backend.create_item(u'existing now 0')
-        item1.change_metadata()
-        item2 = self.backend.create_item(u'existing now 0')
-        item1.publish_metadata()
-        item2.create_revision(0)
-        pytest.raises(ItemAlreadyExistsError, item2.commit)
-
-    def test_item_create_existing_mixed_2(self):
-        item1 = self.backend.create_item(u'existing now 0')
-        item1.change_metadata()
-        item2 = self.backend.create_item(u'existing now 0')
-        item2.create_revision(0)
-        item2.commit()
-        pytest.raises(ItemAlreadyExistsError, item1.publish_metadata)
-
-    def test_item_multiple_change_metadata_after_create(self):
-        name = u"foo"
-        item1 = self.backend.create_item(name)
-        item2 = self.backend.create_item(name)
-        item1.change_metadata()
-        item2.change_metadata()
-        item1[u"a"] = u"a"
-        item2[u"a"] = u"b"
-        item1.publish_metadata()
-        pytest.raises(ItemAlreadyExistsError, item2.publish_metadata)
-        item = self.backend.get_item(name)
-        assert item[u"a"] == u"a"
-
-    def test_existing_item_change_metadata(self):
-        self.create_meta_item_helper(u"existing now 2")
-        item = self.backend.get_item(u'existing now 2')
-        item.change_metadata()
-        item[u'asdf'] = u'b'
-        item.publish_metadata()
-        item = self.backend.get_item(u'existing now 2')
-        assert item[u'asdf'] == u'b'
-
-    def test_metadata(self):
-        self.create_rev_item_helper(u'no metadata')
-        item = self.backend.get_item(u'no metadata')
-        pytest.raises(KeyError, item.__getitem__, u'asdf')
-
-    def test_revision(self):
-        self.create_meta_item_helper(u'no revision')
-        item = self.backend.get_item(u'no revision')
-        pytest.raises(NoSuchRevisionError, item.get_revision, -1)
-
-    def test_create_revision_change_meta(self):
-        item = self.backend.create_item(u"double")
-        rev = item.create_revision(0)
-        rev[u"revno"] = u"0"
-        item.commit()
-        item.change_metadata()
-        item[u"meta"] = u"data"
-        item.publish_metadata()
-        item = self.backend.get_item(u"double")
-        assert item[u"meta"] == u"data"
-        rev = item.get_revision(0)
-        assert rev[u"revno"] == u"0"
-
-    def test_create_revision_change_empty_meta(self):
-        item = self.backend.create_item(u"double")
-        rev = item.create_revision(0)
-        rev[u"revno"] = u"0"
-        item.commit()
-        item.change_metadata()
-        item.publish_metadata()
-        item = self.backend.get_item(u"double")
-        rev = item.get_revision(0)
-        assert rev[u"revno"] == u"0"
-
-    def test_change_meta_create_revision(self):
-        item = self.backend.create_item(u"double")
-        item.change_metadata()
-        item[u"meta"] = u"data"
-        item.publish_metadata()
-        rev = item.create_revision(0)
-        rev[u"revno"] = u"0"
-        item.commit()
-        item = self.backend.get_item(u"double")
-        assert item[u"meta"] == u"data"
-        rev = item.get_revision(0)
-        assert rev[u"revno"] == u"0"
-
-    def test_meta_after_rename(self):
-        item = self.backend.create_item(u"re")
-        item.change_metadata()
-        item[u"previous_name"] = u"re"
-        item.publish_metadata()
-        item.rename(u"er")
-        assert item[u"previous_name"] == u"re"
-
-    def test_long_names_back_and_forth(self):
-        item = self.backend.create_item(u"long_name_" * 100 + u"with_happy_end")
-        item.create_revision(0)
-        item.commit()
-        assert self.backend.has_item(u"long_name_" * 100 + u"with_happy_end")
-        item = self.backend.iteritems().next()
-        assert item.name == u"long_name_" * 100 + u"with_happy_end"
-
-    def test_revisions_after_rename(self):
-        item = self.backend.create_item(u"first one")
-        for revno in xrange(10):
-            rev = item.create_revision(revno)
-            rev[u"revno"] = unicode(revno)
-            item.commit()
-        assert item.list_revisions() == range(10)
-        item.rename(u"second one")
-        assert not self.backend.has_item(u"first one")
-        assert self.backend.has_item(u"second one")
-        item1 = self.backend.create_item(u"first_one")
-        item1.create_revision(0)
-        item1.commit()
-        assert len(item1.list_revisions()) == 1
-        item2 = self.backend.get_item(u"second one")
-        assert item2.list_revisions() == range(10)
-        for revno in xrange(10):
-            rev = item2.get_revision(revno)
-            assert rev[u"revno"] == unicode(revno)
-
-    def test_concurrent_create_revision(self):
-        self.create_rev_item_helper(u"concurrent")
-        item1 = self.backend.get_item(u"concurrent")
-        item2 = self.backend.get_item(u"concurrent")
-        item1.create_revision(1)
-        item2.create_revision(1)
-        item1.commit()
-        pytest.raises(RevisionAlreadyExistsError, item2.commit)
-
-    def test_timestamp(self):
-        tnow = int(time.time())
-        item = self.backend.create_item(u'ts1')
-        rev = item.create_revision(0)
-        item.commit()
-        item = self.backend.get_item(u'ts1')
-        ts = item.get_revision(0).timestamp
-        assert tnow <= ts <= ts + 60
-
-    def test_size(self):
-        item = self.backend.create_item(u'size1')
-        rev = item.create_revision(0)
-        rev.write('asdf')
-        rev.write('asdf')
-        item.commit()
-        rev = item.get_revision(0)
-        assert rev[SIZE] == 8
-
-    def test_size_2(self):
-        item = self.backend.create_item(u'size2')
-        rev0 = item.create_revision(0)
-        data0 = 'asdf'
-        rev0.write(data0)
-        item.commit()
-        rev1 = item.create_revision(1)
-        item.commit()
-        rev1 = item.get_revision(1)
-        assert rev1[SIZE] == 0
-        rev0 = item.get_revision(0)
-        assert rev0[SIZE] == len(data0)
-
-    def test_various_revision_metadata_values(self):
-        def test_value(value, no):
-            item = self.backend.create_item(u'valid_values_%d' % no)
-            rev = item.create_revision(0)
-            key = u"key%d" % no
-            rev[key] = value
-            item.commit()
-            rev = item.get_revision(0)
-            assert rev[key] == value
-
-        for no, value in enumerate(('string', 13, 42L, 3.14, 23+0j,
-                                       ('1', 1, 1L, 1+0j, (1, ), ), u'ąłć', (u'ó', u'żźć'), )):
-            yield test_value, value, no
-
-    def test_destroy_item(self):
-        itemname = u"I will be completely destroyed"
-        rev_data = "I will be completely destroyed, too, hopefully"
-        item = self.backend.create_item(itemname)
-        rev = item.create_revision(0)
-        rev.write(rev_data)
-        item.commit()
-
-        item.destroy()
-        assert not self.backend.has_item(itemname)
-        item_names = [item.name for item in self.backend.iteritems()]
-        assert not itemname in item_names
-
-    def test_destroy_revision(self):
-        itemname = u"I will see my children die"        # removed the smiley ':-(' temporarily as it slows the test in addition with a failure
-        rev_data = "I will die!"
-        persistent_rev = "I will see my sibling die :-("
-        item = self.backend.create_item(itemname)
-        rev = item.create_revision(0)
-        rev.write(rev_data)
-        item.commit()
-        rev = item.create_revision(1)
-        rev.write(persistent_rev)
-        item.commit()
-        assert item.list_revisions() == range(2)
-
-        rev = item.get_revision(0)
-        rev.destroy()
-        assert item.list_revisions() == [1]
-        assert self.backend.has_item(itemname)
-        assert item.get_revision(-1).read() == persistent_rev
-
-        third = "3rd revision"
-        rev = item.create_revision(2)
-        rev.write(third)
-        item.commit()
-        rev = item.get_revision(2)
-        assert item.get_revision(-1).read() == third
-        assert len(item.list_revisions()) == 2
-        rev.destroy()
-        assert len(item.list_revisions()) == 1
-        last = item.get_revision(-1)
-        assert last.revno == 1
-        last_data = last.read()
-        assert last_data != third
-        assert last_data == persistent_rev
-
-    def test_clone_backend(self):
-        src = flaskg.storage
-        dst = memory.MemoryBackend()
-
-        dollys_name = u"Dolly The Sheep"
-        item = src.create_item(dollys_name)
-        rev = item.create_revision(0)
-        rev.write("maeh")
-        rev[u'origin'] = u'reagenzglas'
-        item.commit()
-
-        brothers_name = u"Dolly's brother"
-        item = src.create_item(brothers_name)
-        item.change_metadata()
-        item[u'no revisions'] = True
-        item.publish_metadata()
-
-        dst.clone(src, verbose=False)
-
-        assert len(list(dst.iteritems())) == 2
-        assert dst.has_item(dollys_name)
-        rev = dst.get_item(dollys_name).get_revision(0)
-        data = rev.read()
-        assert data == "maeh"
-        meta = dict(rev.iteritems())
-        assert u'origin' in meta
-        assert meta[u'origin'] == u'reagenzglas'
-
-        assert dst.has_item(brothers_name)
-        item = dst.get_item(brothers_name)
-        meta = dict(item.iteritems())
-        assert u'no revisions' in meta
-        assert meta[u'no revisions'] is True
-
-    def test_iteritems_item_names_after_rename(self):
-        item = self.backend.create_item(u'first')
-        item.create_revision(0)
-        item.commit()
-        item.rename(u'second')
-        item.create_revision(1)
-        item.commit()
-        # iteritems provides actual name
-        items = [item for item in self.backend.iteritems()]
-        assert len(items) == 1
-        assert items[0].name == u'second'
-        rev0 = items[0].get_revision(0)
-        assert rev0.item.name == u'second'
-        rev1 = items[0].get_revision(1)
-        assert rev1.item.name == u'second'
-
-    def test_iteritems_after_destroy(self):
-        item = self.backend.create_item(u'first')
-        item.create_revision(0)
-        item.commit()
-        item.create_revision(1)
-        item.commit()
-        assert len([item for item in self.backend.iteritems()]) == 1
-        rev = item.get_revision(-1)
-        rev.destroy()
-        assert len([item for item in self.backend.iteritems()]) == 1
-        item.destroy()
-        assert len([item for item in self.backend.iteritems()]) == 0
-
--- a/MoinMoin/storage/_tests/test_backends_flatfile.py	Sat Sep 10 03:23:36 2011 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,29 +0,0 @@
-# Copyright: 2009 MoinMoin:ThomasWaldmann
-# License: GNU GPL v2 (or any later version), see LICENSE.txt for details.
-
-"""
-    MoinMoin - Test - FlatFileBackend
-"""
-
-
-import pytest
-
-pytest.skip("BackendTest base class tests quite some stuff that this very simple backend does not provide")
-# e.g.: revisioning, extremely long item names, metadata support
-# TODO: either fix base class so that it is more useful even to test simple backends,
-#       or implement some specific, more simple tests here.
-
-import tempfile, shutil
-
-from MoinMoin.storage._tests.test_backends import BackendTest
-from MoinMoin.storage.backends.flatfile import FlatFileBackend
-
-class TestFlatFileBackend(BackendTest):
-
-    def create_backend(self):
-        self.tempdir = tempfile.mkdtemp('', 'moin-')
-        return FlatFileBackend(self.tempdir)
-
-    def kill_backend(self):
-        shutil.rmtree(self.tempdir)
-
--- a/MoinMoin/storage/_tests/test_backends_fs.py	Sat Sep 10 03:23:36 2011 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,58 +0,0 @@
-# Copyright: 2008 MoinMoin:JohannesBerg
-# License: GNU GPL v2 (or any later version), see LICENSE.txt for details.
-
-"""
-    MoinMoin - Test - FSBackend
-"""
-
-
-import py, os, tempfile, shutil
-
-from MoinMoin.storage._tests.test_backends import BackendTest
-from MoinMoin.storage.backends.fs import FSBackend
-
-class TestFSBackend(BackendTest):
-
-    def create_backend(self):
-        self.tempdir = tempfile.mkdtemp('', 'moin-')
-        return FSBackend(self.tempdir)
-
-    def kill_backend(self):
-        try:
-            for root, dirs, files in os.walk(self.tempdir):
-                for d in dirs:
-                    assert not d.endswith('.lock')
-                for f in files:
-                    assert not f.endswith('.lock')
-                    assert not f.startswith('tmp-')
-        finally:
-            shutil.rmtree(self.tempdir)
-
-    def test_large(self):
-        i = self.backend.create_item(u'large')
-        r = i.create_revision(0)
-        r[u'0'] = u'x' * 100
-        r[u'1'] = u'y' * 200
-        r[u'2'] = u'z' * 300
-        for x in xrange(1000):
-            r.write('lalala! ' * 10)
-        i.commit()
-
-        i = self.backend.get_item(u'large')
-        r = i.get_revision(0)
-        assert r[u'0'] == u'x' * 100
-        assert r[u'1'] == u'y' * 200
-        assert r[u'2'] == u'z' * 300
-        for x in xrange(1000):
-            assert r.read(8 * 10) == 'lalala! ' * 10
-        assert r.read() == ''
-
-    def test_all_unlocked(self):
-        i1 = self.backend.create_item(u'existing now 1')
-        i1.create_revision(0)
-        i1.commit()
-        i2 = self.backend.get_item(u'existing now 1')
-        i2.change_metadata()
-        # if we leave out the latter line, it fails
-        i2.publish_metadata()
-
--- a/MoinMoin/storage/_tests/test_backends_fs19.py	Sat Sep 10 03:23:36 2011 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,299 +0,0 @@
-# -*- coding: utf-8 -*-
-# Copyright: 2008-2010 MoinMoin:ThomasWaldmann
-# License: GNU GPL v2 (or any later version), see LICENSE.txt for details.
-
-"""
-    MoinMoin - fs19 read-only backend tests
-"""
-
-
-import os, re, tempfile, shutil
-
-import pytest
-
-from flask import current_app as app
-
-from MoinMoin.config import CONTENTTYPE, TAGS
-from MoinMoin.storage import Item
-from MoinMoin.storage.backends._fsutils import quoteWikinameFS, unquoteWikiname
-from MoinMoin.storage.backends.fs19 import FSPageBackend, regenerate_acl, process_categories
-from MoinMoin.storage.error import NoSuchItemError, NoSuchRevisionError
-
-item_data = "Foo Bar"
-item_name = "test_page"
-item_mtime = 12345678
-item_comment = "saved test item"
-item_revisions = 2
-
-deleted_item_acl = "All:"
-deleted_item_data = "#acl %s\r\nFoo bar" % deleted_item_acl
-deleted_item_name = "deleted_page"
-
-attachment_name = u"test.txt"
-attachment_data = "attachment"
-attachment_mtime1 = 12340000
-attachment_mtime2 = 12345000
-attachment_comment = "saved test attachment"
-
-logentry = lambda *items: "\t".join(items)
-item_editlog = "\r\n".join([
-    logentry(str(item_mtime * 1000000), '00000001', 'SAVE', item_name, '', '', '', '', item_comment),
-    logentry(str(attachment_mtime1 * 1000000), '99999999', 'ATTNEW', item_name, '', '', '', attachment_name, attachment_comment),
-    logentry(str(item_mtime * 1000000 + 1), '00000002', 'SAVE', item_name, '', '', '', '', item_comment),
-    logentry(str(attachment_mtime2 * 1000000), '99999999', 'ATTNEW', item_name, '', '', '', attachment_name, attachment_comment),
-])
-
-deleted_item_editlog = "\r\n".join([
-    logentry(str(item_mtime * 1000000), '00000001', 'SAVE', item_name, '', '', '', '', item_comment),
-    logentry(str(item_mtime * 1000000 + 1), '00000002', 'SAVE/DELETE', item_name, '', '', '', '', item_comment),
-])
-
-items = [# name, rev, data, logline, attachments
-         (item_name, 1, item_data, item_editlog, [attachment_name]),
-         (item_name, 2, item_data, item_editlog, []),
-         (u"äöüßłó ąćółąńśćżź", 1, item_data, '', []),
-         (ur"name#special(characters?.\,", 1, item_data, '', []),
-         (deleted_item_name, 1, deleted_item_data, '', [attachment_name]),
-         (deleted_item_name, 2, '', '', []), # no rev 2 data, no edit-log
-        ]
-
-class TestFS19Backend(object):
-    """
-    MoinMoin - fs19 read-only backend tests
-    """
-
-    def setup_method(self, method):
-        # create backend
-        self.tempdir = d = tempfile.mkdtemp('', 'moin-')
-        self.backend = FSPageBackend(self.tempdir, self.tempdir)
-        # populate it manually because the backend is just read-only
-        join = os.path.join
-        for name, revno, revdata, logdata, attachments in items:
-            pagedir = join(d, 'pages', quoteWikinameFS(name))
-            try:
-                os.makedirs(join(pagedir, 'revisions'))
-                os.makedirs(join(pagedir, 'attachments'))
-            except:
-                pass
-            f = file(join(pagedir, 'current'), 'w')
-            f.write('%08d' % revno)
-            f.close()
-            if revdata:
-                f = file(join(pagedir, 'revisions', '%08d' % revno), 'w')
-                f.write(revdata)
-                f.close()
-            if logdata:
-                f = file(join(pagedir, 'edit-log'), 'a')
-                f.write(logdata)
-                f.close()
-            for attachment in attachments:
-                f = file(join(pagedir, 'attachments', attachment.encode('utf-8')), 'w')
-                f.write(attachment_data)
-                f.close()
-
-    def teardown_method(self, method):
-        # remove backend data
-        shutil.rmtree(self.tempdir)
-        self.backend = None
-
-    def test_get_item_that_doesnt_exist(self):
-        pytest.raises(NoSuchItemError, self.backend.get_item, "i_do_not_exist")
-        pytest.raises(NoSuchItemError, self.backend.get_item, item_name + "/not_exist.txt")
-
-    def test_has_item_that_doesnt_exist(self):
-        assert not self.backend.has_item("i_do_not_exist")
-        assert not self.backend.has_item(item_name + "/not_exist.txt")
-
-    def test_get_item_that_exists(self):
-        for itemdata in items:
-            name = itemdata[0]
-            item = self.backend.get_item(name)
-            assert isinstance(item, Item)
-            assert item.name == name
-
-    def test_get_item_attachment(self):
-        name = item_name + '/' + attachment_name
-        item = self.backend.get_item(name)
-        assert isinstance(item, Item)
-        assert item.name == name
-
-    def test_has_item(self):
-        for itemdata in items:
-            name = itemdata[0]
-            exists = self.backend.has_item(name)
-            assert exists
-
-    def test_iteritems(self):
-        have_items = set([item.name for item in self.backend.iteritems()])
-        expected_items = set()
-        for itemdata in items:
-            itemname = itemdata[0]
-            attachments = itemdata[4]
-            expected_items |= set([itemname] + ['%s/%s' % (itemname, att) for att in attachments])
-        assert have_items == expected_items
-
-    def test_rev_reading_chunks(self):
-        item = self.backend.get_item(item_name)
-        rev = item.get_revision(0)
-        chunk = rev.read(1)
-        data = ""
-        while chunk != "":
-            data += chunk
-            chunk = rev.read(1)
-        assert data == item_data
-
-    def test_rev_reading_attachment(self):
-        name = item_name + '/' + attachment_name
-        item = self.backend.get_item(name)
-        rev = item.get_revision(0)
-        data = rev.read()
-        assert data == attachment_data
-
-    def test_deleted_rev_reading(self):
-        item = self.backend.get_item(deleted_item_name)
-        rev = item.get_revision(0)
-        data = rev.read()
-        assert data != ""
-        rev = item.get_revision(1)
-        data = rev.read()
-        assert data == ""
-
-    def test_metadata_that_doesnt_exist(self):
-        item = self.backend.get_item(item_name)
-        pytest.raises(KeyError, item.__getitem__, 'asdf')
-
-    def test_metadata_mtime(self):
-        item = self.backend.get_item(item_name)
-        rev = item.get_revision(0)
-        assert rev.timestamp == item_mtime
-
-    def test_metadata_mtime_attachment(self):
-        name = item_name + '/' + attachment_name
-        item = self.backend.get_item(name)
-        rev = item.get_revision(0)
-        rev_timestamp = rev.timestamp
-        assert rev_timestamp == attachment_mtime2
-
-    def test_item_revision_count(self):
-        item = self.backend.get_item(item_name)
-        revs = item.list_revisions()
-        assert revs == range(item_revisions)
-
-    def test_revision_that_doesnt_exist(self):
-        item = self.backend.get_item(item_name)
-        pytest.raises(NoSuchRevisionError, item.get_revision, 42)
-
-    def test_revision_attachment_that_doesnt_exist(self):
-        name = item_name + '/' + attachment_name
-        item = self.backend.get_item(name)
-        pytest.raises(NoSuchRevisionError, item.get_revision, 1) # attachment only has rev 0
-
-    def test_revision_attachment_acl(self):
-        name = deleted_item_name + '/' + attachment_name
-        item = self.backend.get_item(name)
-        rev = item.get_revision(0)
-        assert rev['acl'] == deleted_item_acl
-
-
-class TestAclRegeneration(object):
-    """
-    test ACL regeneration
-
-    We need to regenerate ACLs for moin 1.9 (fs19) -> 2.0 migration, because we need to cleanly
-    remove revert and delete rights.
-    """
-    def testAclRegeneration(self):
-        tests = [
-            (u'', u''),
-            (u'All:', u'All:'), # no rights, no change
-            (u'All:read', u'All:read'), # single entry, no change
-            (u'All:read,write,revert', u'All:read,write'), # single entry, remove 'revert'
-            (u'All:read,write,delete', u'All:read,write'), # single entry, remove 'delete'
-            (u'BadGuy: Default', u'BadGuy: Default'), # multiple entries, do not expand Default
-            (u'Known:read,delete,write,revert All:read',
-             u'Known:read,write All:read'), # multiple entries, remove 'delete'/'revert'
-            (u'Joe Doe,Jane Doe:delete,read,write All:',
-             u'Joe Doe,Jane Doe:read,write All:'), # multiple entries, blanks in names, remove 'delete'
-        ]
-        acl_rights_valid = app.cfg.acl_rights_contents
-        for acl, expected in tests:
-            result = regenerate_acl(acl, acl_rights_valid)
-            assert result == expected
-
-
-class TestTagsGeneration(object):
-    """
-    test tags generation from categories
-    """
-    def testTagsGeneration(self):
-        tests = [
-            (u'', u'', []),
-            (u"""1\r
-----\r
-""",
-             u"""1\r
-""",
-             []),
-            (u"""2\r
-----\r
-CategoryFoo\r
-""",
-             u"""2\r
-""",
-             [u'CategoryFoo']),
-            (u"""3\r
-----\r
-CategoryFoo CategoryBar\r
-""",
-             u"""3\r
-""",
-             [u'CategoryFoo', u'CategoryBar']),
-            (u"""4\r
-----\r
-CategoryFoo\r
-CategoryBar\r
-""",
-             u"""4\r
-""",
-             [u'CategoryFoo', u'CategoryBar']),
-            (u"""5\r
-----\r
-CategoryFoo\r
-CategoryBar\r
-\r
-what ever\r
-""",
-             u"""5\r
-\r
-what ever\r
-""",
-             [u'CategoryFoo', u'CategoryBar']),
-        ]
-        item_category_regex = re.compile(ur'(?P<all>Category(?P<key>(?!Template)\S+))', re.UNICODE)
-        for data, expected_data, expected_tags in tests:
-            meta = {CONTENTTYPE: 'text/x.moin.wiki'}
-            data = process_categories(meta, data, item_category_regex)
-            assert meta.get(TAGS, []) == expected_tags
-            assert data == expected_data
-
-def test__decode_list():
-    from MoinMoin.storage.backends.fs19 import _decode_list
-    test_line = "test_item1 \t test_item2\n \t test_item3 \t"
-    result = _decode_list(test_line)
-    expected = ('test_item1', 'test_item2', 'test_item3')
-    assert result == expected
-
-def test__decode_dict():
-    from MoinMoin.storage.backends.fs19 import _decode_dict
-    test_line = "test_item1: first item\n \t test_item: second item2 \t \ntest_item3: third item \t"
-    result = _decode_dict(test_line)
-    expected = {'test_item1': ' first item', 'test_item3': ' third item', 'test_item': ' second item2'}
-    assert result == expected
-
-def test_hash_hexdigest():
-    from MoinMoin.storage.backends.fs19 import hash_hexdigest
-    result = hash_hexdigest('test_content')
-    assert result[0] == 12
-    with pytest.raises(ValueError):
-        hash_hexdigest(u'test_content')
-
--- a/MoinMoin/storage/_tests/test_backends_fs2.py	Sat Sep 10 03:23:36 2011 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,59 +0,0 @@
-# Copyright: 2008 MoinMoin:JohannesBerg
-# License: GNU GPL v2 (or any later version), see LICENSE.txt for details.
-
-"""
-    MoinMoin - Test - FS2Backend
-"""
-
-import py, os, tempfile, shutil
-
-from flask import current_app as app
-
-from MoinMoin.storage._tests.test_backends import BackendTest
-from MoinMoin.storage.backends.fs2 import FS2Backend
-
-class TestFS2Backend(BackendTest):
-
-    def create_backend(self):
-        self.tempdir = tempfile.mkdtemp('', 'moin-')
-        return FS2Backend(self.tempdir)
-
-    def kill_backend(self):
-        try:
-            for root, dirs, files in os.walk(self.tempdir):
-                for d in dirs:
-                    assert not d.endswith('.lock')
-                for f in files:
-                    assert not f.endswith('.lock')
-                    assert not f.startswith('tmp-')
-        finally:
-            shutil.rmtree(self.tempdir)
-
-    def test_large(self):
-        i = self.backend.create_item(u'large')
-        r = i.create_revision(0)
-        r['0'] = 'x' * 100
-        r['1'] = 'y' * 200
-        r['2'] = 'z' * 300
-        for x in xrange(1000):
-            r.write('lalala! ' * 10)
-        i.commit()
-
-        i = self.backend.get_item(u'large')
-        r = i.get_revision(0)
-        assert r['0'] == 'x' * 100
-        assert r['1'] == 'y' * 200
-        assert r['2'] == 'z' * 300
-        for x in xrange(1000):
-            assert r.read(8 * 10) == 'lalala! ' * 10
-        assert r.read() == ''
-
-    def test_all_unlocked(self):
-        i1 = self.backend.create_item(u'existing now 1')
-        i1.create_revision(0)
-        i1.commit()
-        i2 = self.backend.get_item(u'existing now 1')
-        i2.change_metadata()
-        # if we leave out the latter line, it fails
-        i2.publish_metadata()
-
--- a/MoinMoin/storage/_tests/test_backends_hg.py	Sat Sep 10 03:23:36 2011 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,130 +0,0 @@
-# Copyright: 2008 MoinMoin:PawelPacana
-# License: GNU GPL v2 (or any later version), see LICENSE.txt for details.
-
-"""
-    MoinMoin - MercurialBackend tests
-
-    Testcases for MercurialBackend based on stable version
-    of Mercurial.
-"""
-
-from tempfile import mkdtemp, mkstemp, gettempdir
-import shutil
-import os
-import pytest
-
-try:
-    import mercurial
-except ImportError:
-    pytest.skip('Cannot test without Mercurial installed.')
-
-from MoinMoin.storage._tests.test_backends import BackendTest
-from MoinMoin.storage.backends.hg import MercurialBackend
-from MoinMoin.storage.error import BackendError
-
-class TestMercurialBackend(BackendTest):
-    #pytestmark = pytest.mark.xfail(reason='not maintained')
-
-    def create_backend(self):
-        self.test_dir = mkdtemp()
-        return MercurialBackend(self.test_dir)
-
-    def kill_backend(self):
-        shutil.rmtree(self.test_dir)
-
-    def test_backend_init(self):
-        emptydir, file = mkdtemp(), mkstemp()[1]
-        nonexisting = os.path.join(gettempdir(), 'to-be-created')
-        nonexisting_nested = os.path.join(gettempdir(), 'second-to-be-created/and-also-nested')
-        dirstruct = mkdtemp()
-        os.mkdir(os.path.join(dirstruct, "meta"))
-        os.mkdir(os.path.join(dirstruct, "rev"))
-        try:
-            assert isinstance(MercurialBackend(nonexisting), MercurialBackend)
-            assert isinstance(MercurialBackend(nonexisting_nested), MercurialBackend)
-            assert isinstance(MercurialBackend(emptydir), MercurialBackend)
-            assert isinstance(MercurialBackend(emptydir), MercurialBackend) # init on existing
-            pytest.raises(BackendError, MercurialBackend, file)
-            assert isinstance(MercurialBackend(dirstruct), MercurialBackend)
-        finally:
-            shutil.rmtree(emptydir)
-            shutil.rmtree(dirstruct)
-            shutil.rmtree(nonexisting)
-            os.unlink(file)
-
-    def test_permission(self):
-        import sys
-        if sys.platform == 'win32':
-            pytest.skip("Not much usable test on win32.")
-        no_perms = os.path.join("/", "permission-error-dir")
-        pytest.raises(BackendError, MercurialBackend, no_perms)
-
-    def test_backend_init_non_empty_datadir(self):
-        datadir = mkdtemp()
-        os.mkdir(os.path.join(datadir, "meta"))
-        os.mkdir(os.path.join(datadir, "rev"))
-        try:
-            revitem = mkstemp(dir=os.path.join(datadir, "rev"))[1]
-            assert isinstance(MercurialBackend(datadir), MercurialBackend)
-            os.unlink(revitem)
-            metaitem = mkstemp(dir=os.path.join(datadir, "meta"))[1]
-            assert isinstance(MercurialBackend(datadir), MercurialBackend)
-            os.unlink(metaitem)
-        finally:
-            shutil.rmtree(datadir)
-
-    def test_large_revision_meta(self):
-        item = self.backend.create_item(u'existing')
-        rev = item.create_revision(0)
-        for num in xrange(10000):
-            revval = "revision metadata value for key %d" % num
-            rev[u"%s" % num] = revval * 10
-        item.commit()
-        item = self.backend.get_item(u'existing')
-        rev = item.get_revision(-1)
-        assert len(dict(rev)) == 10000 + 3 # 'sha1', 'size', 'mtime' key is added automatically on commit
-        for num in xrange(10000):
-            revval = "revision metadata value for key %d" % num
-            assert rev[u"%s" % num] == revval * 10
-
-    def test_data_after_rename(self):
-        item = self.backend.create_item(u'before')
-        rev = item.create_revision(0)
-        rev.write("aaa")
-        item.commit()
-        item.rename(u'after')
-        rev = item.create_revision(1)
-        rev.write("bbb")
-        item.commit()
-        rev = item.get_revision(0)
-        assert rev.read() == "aaa"
-        rev = item.get_revision(1)
-        assert rev.read() == "bbb"
-
-    def test_revision_metadata_key_name(self):
-        item = self.backend.create_item(u'metakey')
-        rev = item.create_revision(0)
-        rev[u'_meta_'] = u"dummy"
-        item.commit()
-        item = self.backend.get_item(u'metakey')
-        rev = item.get_revision(-1)
-        assert rev[u'_meta_'] == u"dummy"
-
-    def test_index_files_in_repository(self):
-        item = self.backend.create_item(u'indexed')
-        rev = item.create_revision(0)
-        item.commit()
-        repo_items = [i for i in self.backend._repo['']]
-        assert len(repo_items) == 2
-        assert item._id in repo_items
-        assert u"%s.rev" % (item._id) in repo_items
-        rev = item.get_revision(-1)
-        rev.destroy()
-        repo_items = [i for i in self.backend._repo['']]
-        assert len(repo_items) == 3
-        assert u"%s.rip" % (item._id) in repo_items
-        item.destroy()
-        repo_items = [i for i in self.backend._repo['']]
-        assert len(repo_items) == 1
-        assert u"%s.rip" % (item._id) in repo_items
-
--- a/MoinMoin/storage/_tests/test_backends_memory.py	Sat Sep 10 03:23:36 2011 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,40 +0,0 @@
-# Copyright: 2008 MoinMoin:ChristopherDenter
-# Copyright: 2008 MoinMoin:JohannesBerg
-# Copyright: 2008 MoinMoin:AlexanderSchremmer
-# License: GNU GPL v2 (or any later version), see LICENSE.txt for details.
-
-"""
-    MoinMoin - Test - MemoryBackend
-
-    This defines tests for the MemoryBackend.
-"""
-
-
-from MoinMoin.storage._tests.test_backends import BackendTest
-from MoinMoin.storage.backends.memory import MemoryBackend, TracingBackend
-
-class TestMemoryBackend(BackendTest):
-    """
-    Test the MemoryBackend
-    """
-
-    def create_backend(self):
-        return MemoryBackend()
-
-    def kill_backend(self):
-        pass
-
-class TestTracingBackend(BackendTest):
-    def create_backend(self):
-        import random
-        return TracingBackend()#"/tmp/codebuf%i.py" % random.randint(1, 2**30))
-
-    def kill_backend(self):
-        func = self.backend.get_func()
-        try:
-            func(MemoryBackend()) # should not throw any exc
-        except:
-            # I get exceptions here because py.test seems to handle setup/teardown incorrectly
-            # in generative tests
-            pass #print "EXC"
-
--- a/MoinMoin/storage/_tests/test_backends_router.py	Sat Sep 10 03:23:36 2011 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,188 +0,0 @@
-# Copyright: 2009 MoinMoin:ChristopherDenter
-# License: GNU GPL v2 (or any later version), see LICENSE.txt for details.
-
-"""
-    MoinMoin - Test - RouterBackend
-
-    This defines tests for the RouterBackend
-"""
-
-import os
-import time
-
-import pytest
-
-from flask import current_app as app
-
-from whoosh.query import Term, And, Every
-
-from MoinMoin.config import NAME, MTIME
-from MoinMoin.error import ConfigurationError
-from MoinMoin.storage._tests.test_backends import BackendTest
-from MoinMoin.storage.backends.memory import MemoryBackend
-from MoinMoin.storage.middleware.router import RouterBackend
-from MoinMoin.search.indexing import WhooshIndex
-
-class TestRouterBackend(BackendTest):
-    """
-    Test the MemoryBackend
-    """
-
-    def create_backend(self):
-        self.root = MemoryBackend()
-        self.ns_user_profile = app.cfg.ns_user_profile
-        self.users = MemoryBackend()
-        self.child = MemoryBackend()
-        self.other = MemoryBackend()
-        self.mapping = [('child', self.child), ('other/', self.other), (self.ns_user_profile, self.users), ('/', self.root)]
-        return RouterBackend(self.mapping, cfg=app.cfg)
-
-    def kill_backend(self):
-        pass
-
-    def teardown_method(self, method):
-        # clean the index directory after each test as messes with the backend history
-        # XXX tests with backend.history should not be failing due to contents in index directory
-        # the contents of the directory and the way backend.history is handled should be implemented
-        # in a better way
-        index_dir = WhooshIndex()._index_dir
-        for values in os.walk(index_dir):
-            for index_file_name in values[2]:
-                index_file = index_dir + '/' + index_file_name
-                os.remove(index_file)
-
-    def test_correct_backend(self):
-        mymap = {u'rootitem': self.root,         # == /rootitem
-                 u'child/joe': self.child,       # Direct child of namespace.
-                 u'other/jane': self.other,      # Direct child of namespace.
-                 u'child/': self.child,          # Root of namespace itself (!= root)
-                 u'other/': self.other,          # Root of namespace
-                 u'': self.root,                 # Due to lack of any namespace info
-                }
-
-        assert not (self.root is self.child is self.other)
-        for itemname, backend in mymap.iteritems():
-            assert self.backend._get_backend(itemname)[0] is backend
-
-    def test_store_and_get(self):
-        itemname = u'child/foo'
-        item = self.backend.create_item(itemname)
-        assert item.name == itemname
-        # using item._backend to get the backend makes this test fail.
-        test_backend, child_name, root_name = item._get_backend(itemname)
-        assert test_backend is self.child
-        item.change_metadata()
-        item[u'just'] = u'testing'
-        item.publish_metadata()
-        # using item._backend to get the backend makes this test fail.
-        test_backend, child_name, root_name = item._get_backend(itemname)
-        assert test_backend is self.child
-        assert item[u'just'] == u'testing'
-        assert item.name == itemname
-
-    def test_traversal(self):
-        mymap = {u'rootitem': self.root,         # == /rootitem
-                 u'child/joe': self.child,       # Direct child of namespace.
-                 u'other/jane': self.other,      # Direct child of namespace.
-                 u'child/': self.child,          # Root of namespace itself (!= root)
-                 u'other/': self.other,          # Root of namespace
-                 u'': self.root,                 # Due to lack of any namespace info
-                }
-
-        items_in = []
-        for itemname, backend in mymap.iteritems():
-            item = self.backend.create_item(itemname)
-            assert item.name == itemname
-            rev = item.create_revision(0)
-            rev.write("This is %s" % itemname)
-            item.commit()
-            items_in.append(item)
-            assert self.backend.has_item(itemname)
-
-        items_out = list(self.backend.iteritems())
-
-        items_in = [item.name for item in items_in]
-        items_out = [item.name for item in items_out]
-        items_in.sort()
-        items_out.sort()
-
-        assert items_in == items_out
-
-    def test_user_in_traversal(self):
-        userid = u'1249291178.45.20407'
-        user = self.backend.create_item(self.ns_user_profile + userid)
-        user.change_metadata()
-        user[u"name"] = u"joe"
-        user.publish_metadata()
-
-        all_items = list(self.backend.iteritems())
-        all_items = [item.name for item in all_items]
-        assert (self.ns_user_profile + userid) in all_items
-        assert self.backend.has_item(self.ns_user_profile + userid)
-
-    def test_nonexisting_namespace(self):
-        itemname = u'nonexisting/namespace/somewhere/deep/below'
-        item = self.backend.create_item(itemname)
-        rev = item.create_revision(0)
-        item.commit()
-        assert self.root.has_item(itemname)
-
-    def test_cross_backend_rename(self):
-        itemname = u'i_will_be_moved'
-        item = self.backend.create_item(u'child/' + itemname)
-        item.create_revision(0)
-        item.commit()
-        assert self.child.has_item(itemname)
-        newname = u'i_was_moved'
-        item.rename(u'other/' + newname)
-        print [item.name for item in self.child.iteritems()]
-        assert not self.child.has_item(itemname)
-        assert not self.child.has_item(newname)
-        assert not self.child.has_item(u'other/' + newname)
-        assert self.other.has_item(newname)
-
-    def test_itemname_equals_namespace(self):
-        itemname = u'child'
-        backend, name, mountpoint = self.backend._get_backend(itemname)
-        assert backend is self.child
-        assert name == ''
-        assert mountpoint == 'child'
-
-    def test_search_item_history_order(self):
-        item_name = u'some item'
-        item = self.backend.create_item(item_name)
-        for rev_no in range(3):
-            rev = item.create_revision(rev_no)
-            item.commit()
-        query = Term("name_exact", item_name)
-        results = list(self.backend.search(query, all_revs=True, sortedby="rev_no"))
-        print results
-        assert results[0].get("rev_no") == 0
-        assert results[1].get("rev_no") == 1
-        assert results[2].get("rev_no") == 2
-        results = list(self.backend.search(query, all_revs=True, sortedby="rev_no", reverse=True))
-        print results
-        assert results[0].get("rev_no") == 2
-        assert results[1].get("rev_no") == 1
-        assert results[2].get("rev_no") == 0
-
-    def test_search_global_history_order(self):
-        names = [u'foo', u'bar', u'baz', ]
-        for item_name in names:
-            item = self.backend.create_item(item_name)
-            rev = item.create_revision(0)
-            item.commit()
-            time.sleep(1) # make sure we have different MTIME
-        query = Every()
-        results = list(self.backend.search(query, all_revs=True, sortedby=[MTIME, "rev_no"]))
-        print results
-        assert results[0].get(NAME) == names[0]
-        assert results[1].get(NAME) == names[1]
-        assert results[2].get(NAME) == names[2]
-        results = list(self.backend.search(query, all_revs=True, sortedby=[MTIME, "rev_no"], reverse=True))
-        print results
-        assert results[0].get(NAME) == names[2]
-        assert results[1].get(NAME) == names[1]
-        assert results[2].get(NAME) == names[0]
-
-
--- a/MoinMoin/storage/_tests/test_backends_sqla.py	Sat Sep 10 03:23:36 2011 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,160 +0,0 @@
-# Copyright: 2009 MoinMoin:ChristopherDenter
-# License: GNU GPL v2 (or any later version), see LICENSE.txt for details.
-
-"""
-    MoinMoin - Test - SQLAlchemyBackend
-
-    This defines tests for the SQLAlchemyBackend.
-"""
-
-
-from StringIO import StringIO
-
-import py
-
-from MoinMoin.storage._tests.test_backends import BackendTest
-from MoinMoin.storage.backends.sqla import SQLAlchemyBackend, SQLARevision, Data
-from MoinMoin.search.indexing import WhooshIndex
-
-class TestSQLABackend(BackendTest):
-
-    def create_backend(self):
-        # when running py.test, all the index files are removed in index dir(please see teardown_method in test_backends_router)
-        # initializing WhooshIndex creates all_revisions_index and latest_revisions_index in there.
-        # without index files item.commit() raises EmptyIndexError
-        # SQLAlchemyBackend do not initializes revision files there
-        WhooshIndex()
-        return SQLAlchemyBackend(verbose=True)
-
-    def kill_backend(self):
-        pass
-
-
-class TestChunkedRevDataStorage(object):
-    raw_data = "This is a very long sentence so I can properly test my program. I hope it works."
-
-    def setup_method(self, meth):
-        self.sqlabackend = SQLAlchemyBackend('sqlite:///:memory:')
-        self.item = self.sqlabackend.create_item(u"test_item")
-        self.rev = self.item.create_revision(0)
-        self.rev.write(self.raw_data)
-        self.item.commit()
-        self.rev = self.item.get_revision(0)
-
-    def test_read_empty(self):
-        item = self.sqlabackend.create_item(u"empty_item")
-        rev = item.create_revision(0)
-        assert rev.read() == ''
-        item.commit()
-        rev = item.get_revision(0)
-        assert rev.read() == ''
-
-    def test_write_many_times(self):
-        item = self.sqlabackend.create_item(u"test_write_many_times")
-        rev = item.create_revision(0)
-        rev._data._last_chunk.chunksize = 4
-        rev.write("foo")
-        rev.write("b")
-        rev._data._last_chunk.chunksize = 4
-        rev.write("aaar")
-        item.commit()
-        rev = item.get_revision(0)
-        assert [chunk.data for chunk in rev._data._chunks] == ["foob", "aaar"]
-
-    def test_write_chunksize_special(self):
-        item = self.sqlabackend.create_item(u"test_write_chunksize_special")
-        rev = item.create_revision(0)
-        CHUNKSIZE = rev._data._last_chunk.chunksize
-        data = "x" * CHUNKSIZE
-        rev.write(data)
-        item.commit()
-        rev = item.get_revision(0)
-        # there should be exactly one chunk (if write() works correctly)
-        assert len(rev._data._chunks) == 1
-        # read all we have
-        read_data = rev.read()
-        assert read_data == data
-        # read CHUNKSIZE bytes
-        rev.seek(0, 0)
-        read_data = rev.read(CHUNKSIZE)
-        assert read_data == data
-        # start in middle and read up to CHUNK end
-        rev.seek(CHUNKSIZE/2, 0)
-        read_data = rev.read(CHUNKSIZE/2)
-        assert read_data == data[CHUNKSIZE/2:]
-        # create another, empty rev
-        rev = item.create_revision(1)
-        CHUNKSIZE = rev._data._last_chunk.chunksize
-        data = ""
-        rev.write(data)
-        item.commit()
-        # read 0 bytes at pos 0
-        rev = item.get_revision(1)
-        # there should be no chunks (if write() works correctly)
-        #assert len(rev._data._chunks) == 0
-        # read all we have (== nothing)
-        read_data = rev.read()
-        assert read_data == data
-
-    def test_read_more_than_is_there(self):
-        assert self.rev.read(len(self.raw_data) + 1) == self.raw_data
-
-    def test_full_read(self):
-        assert self.rev.read() == self.raw_data
-
-    def test_read_first_bytes(self):
-        assert self.rev.read(5) == self.raw_data[:5]
-
-    def test_read_successive(self):
-        assert self.rev.read(5) == self.raw_data[:5]
-        assert self.rev.read(5) == self.raw_data[5:10]
-        assert self.rev.read(5) == self.raw_data[10:15]
-        assert self.rev.read() == self.raw_data[15:]
-
-    def test_with_different_chunksizes(self):
-        # mainly a write() test
-        for chunksize in range(1, len(self.raw_data) + 2):
-            Data.chunksize = chunksize
-            data = Data()
-            data.write(self.raw_data)
-            data.close()
-            assert data.read() == self.raw_data
-
-    def test_with_different_offsets(self):
-        offsets = range(self.rev._data._last_chunk.chunksize)
-        for offset in offsets:
-            data = Data()
-            data.write(self.raw_data)
-            data.close()
-            assert data.read(offset) == self.raw_data[:offset]
-            assert data.read() == self.raw_data[offset:]
-
-    def test_seek_and_tell(self):
-        data_len = len(self.raw_data)
-        half = data_len / 2
-        tests = [
-            (0, 0),
-            (0, 1),
-            (0, data_len-1),
-            (0, data_len),
-            (0, data_len+1), # beyond EOF
-            (0, half),
-            (1, 0),
-            (1, half),
-            (1, -half),
-            (1, 0),
-            (2, 0),
-            (2, -1),
-            (2, -data_len+1),
-            (2, -data_len),
-        ]
-        sio = StringIO(self.raw_data)
-        for mode, pos in tests:
-            if mode == 1: # relative
-                sio.seek(half, 0)
-                self.rev._data.seek(half, 0)
-            sio.seek(pos, mode)
-            self.rev._data.seek(pos, mode)
-            assert sio.tell() == self.rev._data.tell()
-            assert sio.read() == self.rev._data.read()
-
--- a/MoinMoin/storage/_tests/test_indexing.py	Sat Sep 10 03:23:36 2011 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,97 +0,0 @@
-# Copyright: 2011 MoinMoin:MichaelMayorov
-# License: GNU GPL v2 (or any later version), see LICENSE.txt for details.
-
-"""
-    MoinMoin - Test - indexing
-"""
-
-import py
-
-from MoinMoin._tests import update_item, nuke_item
-from MoinMoin._tests.wikiconfig import Config
-from MoinMoin.storage.middleware.indexing import ItemIndex
-from MoinMoin.config import NAME
-
-# Revisions for tests
-document_revs = [{"wikiname": u"Test",
-                  "name": u"DocumentOne",
-                  "uuid": u"68054804bd7141609b7c441143adf83d",
-                  "rev_no": 0,
-                  "mtime":  1172969203.1,
-                  "content": u"Some not very long content line",
-                  "contenttype": u"text/plain;charset=utf-8",
-                  "tags": [u"Rest", u"in", u"peace"],
-                  "itemlinks": [u"Home", u"Find"],
-                  "itemtransclusions": [u"Another", u"Stuff"],
-                  "language": u"en",
-                  "address": u"127.0.0.1",
-                  "hostname": u"localhost",
-                 },
-                 {"wikiname": u"Test",
-                  "name": u"DocumentOne",
-                  "uuid": u"68054804bd7141609b7c441143adf83d",
-                  "rev_no": 1,
-                  "mtime":  1172969203.9,
-                  "content": u"This line should be much better, but it isn't",
-                  "contenttype": u"text/plain;charset=utf-8",
-                  "tags": [u"first_tag", u"second_tag"],
-                  "itemlinks": [u"Home", u"Find"],
-                  "itemtransclusions": [u"Another", u"Stuff"],
-                  "language": u"en",
-                  "address": u"127.0.0.1",
-                  "hostname": u"localhost",
-                 },
-                ]
-
-class TestIndexing(object):
-
-    def setup_method(self, method):
-        self.wikiconfig = Config()
-        self.item_index = ItemIndex(self.wikiconfig, force_create=True)
-        self.all_revs_ix = self.item_index.index_object.all_revisions_index
-        self.latest_revs_ix = self.item_index.index_object.latest_revisions_index
-
-    def teardown_method(self, method):
-        self.item_index.remove_index()
-
-    def test_create_item(self):
-        """ Try to search for non-existent revision, add it to backend and then search again """
-        revision = document_revs[0]
-        with self.all_revs_ix.searcher() as searcher:
-            found_document = searcher.document(name_exact=revision[NAME])
-        assert found_document is None
-        with self.latest_revs_ix.searcher() as searcher:
-            found_document = searcher.document(name_exact=revision[NAME])
-        assert found_document is None
-        backend_rev = update_item(revision[NAME], revision["rev_no"],
-                                  revision, revision["content"])
-        with self.all_revs_ix.searcher() as searcher:
-            found_document = searcher.document(name_exact=revision[NAME])
-        assert found_document is not None and found_document[NAME] == revision[NAME]
-        with self.latest_revs_ix.searcher() as searcher:
-            found_document = searcher.document(name_exact=revision[NAME])
-        assert found_document is not None and found_document[NAME] == revision[NAME]
-
-    def test_create_rev(self):
-        """ Create 2 item revisions and try to search for them in backend """
-        revision1, revision2 = document_revs
-        backend_rev = update_item(revision1[NAME], revision1["rev_no"], revision1, revision1["content"])
-        backend_rev = update_item(revision2[NAME], revision2["rev_no"], revision2, revision2["content"])
-        with self.all_revs_ix.searcher() as searcher:
-            found_documents = list(searcher.documents(name_exact=revision1[NAME]))
-        assert len(found_documents) == 2
-        with self.latest_revs_ix.searcher() as searcher:
-            found_documents = list(searcher.documents(name_exact=revision2[NAME]))
-        assert len(found_documents) == 1 and found_documents[0]["rev_no"] == 1
-
-    def test_destroy(self):
-        """ Create & Destroy test for backend item """
-        py.test.skip("Anonymous can't destroy stuff from backend, thus we leave this test for now")
-        revision = document_revs[0]
-        backend_rev = update_item(revision[NAME], revision["rev_no"], revision, revision["content"])
-        with self.all_revs_ix.searcher() as searcher:
-            found_documents = list(searcher.documents(name_exact=revision[NAME]))
-            assert len(found_documents) == 1
-            nuke_item(revision[NAME])
-            found_document = searcher.document(name_exact=revision[NAME])
-            assert found_document is None
--- a/MoinMoin/storage/_tests/test_middleware_acl.py	Sat Sep 10 03:23:36 2011 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,111 +0,0 @@
-# Copyright: 2009 MoinMoin:ChristopherDenter
-# License: GNU GPL v2 (or any later version), see LICENSE.txt for details.
-
-"""
-    MoinMoin - Test - ACLMiddleWare
-
-    This defines tests for the ACLMiddleWare
-"""
-
-
-import pytest
-
-from flask import g as flaskg
-
-from MoinMoin.config import ACL
-from MoinMoin.storage.error import AccessDeniedError
-from MoinMoin.storage._tests.test_backends import BackendTest
-from MoinMoin._tests import wikiconfig
-
-
-class TestACLMiddleware(BackendTest):
-    class Config(wikiconfig.Config):
-        content_acl = dict(default=u"All:admin,read,write,destroy,create")
-
-
-    def create_backend(self):
-        # Called before *each* testcase. Provides fresh backends every time.
-        return flaskg.storage
-
-    def kill_backend(self):
-        pass
-
-
-    def get_item(self, name):
-        # Just as a shortcut
-        return flaskg.storage.get_item(name)
-
-    def create_item_acl(self, name, acl):
-        item = flaskg.storage.create_item(name)
-        rev = item.create_revision(0)
-        rev[ACL] = acl
-        item.commit()
-        return item
-
-
-    def test_noaccess(self):
-        name = u"noaccess"
-        self.create_item_acl(name, u"All:")
-        assert pytest.raises(AccessDeniedError, self.get_item, name)
-
-    def test_create_item(self):
-        class Config(wikiconfig.Config):
-            # no create
-            content_acl = dict(default=u"All:admin,read,write,destroy")
-
-        backend = flaskg.storage
-        assert pytest.raises(AccessDeniedError, backend.create_item, u"I will never exist")
-
-        item = self.create_item_acl(u"i will exist!", u"All:read,write")
-        rev = item.create_revision(1)
-        data = "my very existent data"
-        rev.write(data)
-        item.commit()
-        assert item.get_revision(1).read() == data
-
-    def test_read_access_allowed(self):
-        name = u"readaccessallowed"
-        self.create_item_acl(name, u"All:read")
-        # Should simply pass...
-        item = self.get_item(name)
-
-        # Should not...
-        assert pytest.raises(AccessDeniedError, item.create_revision, 1)
-        assert pytest.raises(AccessDeniedError, item.change_metadata)
-
-    def test_write_after_create(self):
-        name = u"writeaftercreate"
-        item = self.create_item_acl(name, u"All:")
-        assert pytest.raises(AccessDeniedError, item.create_revision, 1)
-
-    def test_modify_without_acl_change(self):
-        name = u"copy_without_acl_change"
-        acl = u"All:read,write"
-        self.create_item_acl(name, acl)
-        item = self.get_item(name)
-        rev = item.create_revision(1)
-        # This should pass
-        rev[ACL] = acl
-        item.commit()
-
-    def test_copy_with_acl_change(self):
-        name = u"copy_with_acl_change"
-        acl = u"All:read,write"
-        self.create_item_acl(name, acl)
-        item = self.get_item(name)
-        rev = item.create_revision(1)
-        # without admin rights it is disallowed to change ACL
-        pytest.raises(AccessDeniedError, rev.__setitem__, ACL, acl + u",destroy")
-
-    def test_write_without_read(self):
-        name = u"write_but_not_read"
-        acl = u"All:write"
-        item = flaskg.storage.create_item(name)
-        rev = item.create_revision(0)
-        rev[ACL] = acl
-        rev.write("My name is " + name)
-        item.commit()
-
-        pytest.raises(AccessDeniedError, item.get_revision, -1)
-        pytest.raises(AccessDeniedError, item.get_revision, 0)
-
--- a/MoinMoin/storage/_tests/test_serialization.py	Sat Sep 10 03:23:36 2011 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,168 +0,0 @@
-# Copyright: 2009-2010 MoinMoin:ThomasWaldmann
-# License: GNU GPL v2 (or any later version), see LICENSE.txt for details.
-
-"""
-    MoinMoin - Test - XML (de)serialization
-
-    TODO: provide fresh backend per test class (or even per test method?).
-    TODO: use xpath for testing (or any other way so sequence of metadata
-          keys does not matter)
-"""
-
-
-from StringIO import StringIO
-
-from flask import g as flaskg
-
-from MoinMoin._tests import become_trusted, update_item
-from MoinMoin.storage.middleware.serialization import Entry, create_value_object, serialize, unserialize
-
-XML_DECL = '<?xml version="1.0" encoding="UTF-8"?>\n'
-
-
-class TestSerializeRev(object):
-
-    def test_serialize_rev(self):
-        become_trusted()
-        params = (u'foo1', 0, dict(m1=u"m1", mtime=1234), 'bar1')
-        item = update_item(*params)
-        rev = item.get_revision(0)
-        xmlfile = StringIO()
-        serialize(rev, xmlfile)
-        xml = xmlfile.getvalue()
-        expected = (XML_DECL +
-                    '<revision revno="0">'
-                    '<meta>'
-                    '<entry key="mimetype"><str>application/octet-stream</str>\n</entry>\n'
-                    '<entry key="sha1"><str>763675d6a1d8d0a3a28deca62bb68abd8baf86f3</str>\n</entry>\n'
-                    '<entry key="m1"><str>m1</str>\n</entry>\n'
-                    '<entry key="uuid"><str>foo1</str>\n</entry>\n'
-                    '<entry key="name"><str>foo1</str>\n</entry>\n'
-                    '<entry key="mtime"><int>1234</int>\n</entry>\n'
-                    '<entry key="size"><int>4</int>\n</entry>\n'
-                    '</meta>\n'
-                    '<data coding="base64"><chunk>YmFyMQ==</chunk>\n</data>\n'
-                    '</revision>\n')
-        print expected
-        print xml
-        assert expected == xml
-
-
-class TestSerializeItem(object):
-
-    def test_serialize_item(self):
-        become_trusted()
-        testparams = [
-            (u'foo2', 0, dict(m1=u"m1r0", mtime=1234), 'bar2'),
-            (u'foo2', 1, dict(m1=u"m1r1", mtime=1235), 'baz2'),
-        ]
-        for params in testparams:
-            item = update_item(*params)
-        xmlfile = StringIO()
-        serialize(item, xmlfile)
-        xml = xmlfile.getvalue()
-        expected = (XML_DECL +
-                    '<item name="foo2">'
-                    '<meta></meta>\n'
-                    '<revision revno="0">'
-                    '<meta>'
-                    '<entry key="mimetype"><str>application/octet-stream</str>\n</entry>\n'
-                    '<entry key="sha1"><str>033c4846b506a4a48e32cdf54515c91d3499adb3</str>\n</entry>\n'
-                    '<entry key="m1"><str>m1r0</str>\n</entry>\n'
-                    '<entry key="uuid"><str>foo2</str>\n</entry>\n'
-                    '<entry key="name"><str>foo2</str>\n</entry>\n'
-                    '<entry key="mtime"><int>1234</int>\n</entry>\n'
-                    '<entry key="size"><int>4</int>\n</entry>\n'
-                    '</meta>\n'
-                    '<data coding="base64"><chunk>YmFyMg==</chunk>\n</data>\n'
-                    '</revision>\n'
-                    '<revision revno="1">'
-                    '<meta>'
-                    '<entry key="mimetype"><str>application/octet-stream</str>\n</entry>\n'
-                    '<entry key="sha1"><str>f91d8fc20a5de853e62105cc1ee0bf47fd7ded0f</str>\n</entry>\n'
-                    '<entry key="m1"><str>m1r1</str>\n</entry>\n'
-                    '<entry key="uuid"><str>foo2</str>\n</entry>\n'
-                    '<entry key="name"><str>foo2</str>\n</entry>\n'
-                    '<entry key="mtime"><int>1235</int>\n</entry>\n'
-                    '<entry key="size"><int>4</int>\n</entry>\n'
-                    '</meta>\n'
-                    '<data coding="base64"><chunk>YmF6Mg==</chunk>\n</data>\n'
-                    '</revision>\n'
-                    '</item>\n')
-        print expected
-        print xml
-        assert expected == xml
-
-class TestSerializeBackend(object):
-
-    def test_serialize_backend(self):
-        become_trusted()
-        testparams = [
-            (u'foo3', 0, dict(m1=u"m1r0foo3"), 'bar1'),
-            (u'foo4', 0, dict(m1=u"m1r0foo4"), 'bar2'),
-            (u'foo4', 1, dict(m1=u"m1r1foo4"), 'baz2'),
-        ]
-        for params in testparams:
-            update_item(*params)
-        xmlfile = StringIO()
-        serialize(flaskg.storage, xmlfile)
-        xml = xmlfile.getvalue()
-        assert xml.startswith(XML_DECL + '<backend>')
-        assert xml.endswith('</backend>\n')
-        # this is not very precise testing:
-        assert '<item name="foo3"><meta></meta>' in xml
-        assert '<revision revno="0"><meta>' in xml
-        assert '<entry key="mimetype"><str>application/octet-stream</str>\n</entry>' in xml
-        assert '<entry key="m1"><str>m1r0foo3</str>\n</entry>' in xml
-        assert '<entry key="name"><str>foo3</str>\n</entry>' in xml
-        assert '<data coding="base64"><chunk>YmFyMQ==</chunk>\n</data>' in xml
-        assert '<item name="foo4"><meta></meta>' in xml
-        assert '<entry key="m1"><str>m1r0foo4</str>\n</entry>' in xml
-        assert '<entry key="name"><str>foo4</str>\n</entry>' in xml
-        assert '<data coding="base64"><chunk>YmFyMg==</chunk>\n</data>' in xml
-        assert '<revision revno="1"><meta>' in xml
-        assert '<entry key="m1"><str>m1r1foo4</str>\n</entry>' in xml
-        assert '<entry key="name"><str>foo4</str>\n</entry>' in xml
-        assert '<data coding="base64"><chunk>YmF6Mg==</chunk>\n</data>' in xml
-
-
-class TestSerializer2(object):
-    def test_Entry(self):
-        test_data = [
-            ('foo', 'bar', '<entry key="foo"><bytes>bar</bytes>\n</entry>\n'),
-            (u'foo', u'bar', '<entry key="foo"><str>bar</str>\n</entry>\n'),
-            ('''<"a"&'b'>''', '<c&d>', '''<entry key="&lt;&quot;a&quot;&amp;'b'&gt;"><bytes>&lt;c&amp;d&gt;</bytes>\n</entry>\n'''),
-        ]
-        for k, v, expected_xml in test_data:
-            e = Entry(k, v)
-            xmlfile = StringIO()
-            serialize(e, xmlfile)
-            xml = xmlfile.getvalue()
-            assert xml == XML_DECL + expected_xml
-
-        for expected_k, expected_v, xml in test_data:
-            xmlfile = StringIO(xml)
-            result = {}
-            unserialize(Entry(attrs={'key': expected_k}, rev_or_item=result), xmlfile)
-            assert expected_k in result
-            assert result[expected_k] == expected_v
-
-    def test_Values(self):
-        test_data = [
-            ('bar', '<bytes>bar</bytes>\n'),
-            (u'bar', '<str>bar</str>\n'),
-            (42, '<int>42</int>\n'),
-            (True, '<bool>True</bool>\n'),
-            (23.42, '<float>23.42</float>\n'),
-            (complex(1.2, 2.3), '<complex>(1.2+2.3j)</complex>\n'),
-            ((1, 2), '<tuple><int>1</int>\n<int>2</int>\n</tuple>\n'),
-            ((1, u'bar'), '<tuple><int>1</int>\n<str>bar</str>\n</tuple>\n'),
-            ((1, (u'bar', u'baz')), '<tuple><int>1</int>\n<tuple><str>bar</str>\n<str>baz</str>\n</tuple>\n</tuple>\n'),
-        ]
-        for v, expected_xml in test_data:
-            v = create_value_object(v)
-            xmlfile = StringIO()
-            serialize(v, xmlfile)
-            xml = xmlfile.getvalue()
-            assert xml == XML_DECL + expected_xml
-
--- a/MoinMoin/storage/_tests/tests_backend_api.py	Sat Sep 10 03:23:36 2011 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,75 +0,0 @@
-# Copyright: 2008 MoinMoin:JohannesBerg
-# License: GNU GPL v2 (or any later version), see LICENSE.txt for details.
-
-"""
-    MoinMoin - Test - storage API
-"""
-
-import pytest
-
-from MoinMoin.storage import Backend, Item, StoredRevision, NewRevision
-from MoinMoin.storage.error import NoSuchItemError
-
-class TestBackendAPI(object):
-    def test_has_item(self):
-        class HasNoItemsBackend(Backend):
-            def get_item(self, name):
-                raise NoSuchItemError('should not be visible')
-        be = HasNoItemsBackend()
-        assert not be.has_item('asdf')
-
-    def test_unicode_meta(self):
-        class HasAnyItemBackend(Backend):
-            def get_item(self, name):
-                return Item(self, name)
-            def _change_item_metadata(self, item):
-                pass
-            def _get_item_metadata(self, item):
-                return {}
-            def _publish_item_metadata(self, item):
-                pass
-        be = HasAnyItemBackend()
-        item = be.get_item('a')
-        item.change_metadata()
-        item[u'a'] = u'b'
-        item.publish_metadata()
-
-    def test_reserved_metadata(self):
-        class ReservedMetaDataBackend(Backend):
-            def get_item(self, name):
-                return Item(self, name)
-            def _change_item_metadata(self, item):
-                pass
-            def _get_item_metadata(self, item):
-                return {'__asdf': 'xx'}
-            def _publish_item_metadata(self, item):
-                pass
-            def _get_revision(self, item, revno):
-                assert revno == 0
-                return StoredRevision(item, revno)
-            def _create_revision(self, item, revno):
-                assert revno == 1
-                return NewRevision(item, revno)
-            def _rollback_item(self, item):
-                pass
-            def _get_revision_metadata(self, rev):
-                return {'__asdf': 'xx'}
-            def _list_revisions(self, item):
-                return [0]
-
-        be = ReservedMetaDataBackend()
-        item = be.get_item('a')
-        assert not item.keys()
-
-        oldrev = item.get_revision(0)
-        assert not oldrev.keys()
-
-        newrev = item.create_revision(1)
-        pytest.raises(TypeError, newrev.__setitem__, '__reserved')
-
-        assert not newrev.keys()
-
-        newrev['a'] = 'b'
-        assert newrev['a'] == 'b'
-
-        item.rollback()
--- a/MoinMoin/storage/backends/__init__.py	Sat Sep 10 03:23:36 2011 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,130 +0,0 @@
-# Copyright: 2007 MoinMoin:HeinrichWendel
-# Copyright: 2008 MoinMoin:PawelPacana
-# Copyright: 2009 MoinMoin:ChristopherDenter
-# Copyright: 2009-2011 MoinMoin:ThomasWaldmann
-# License: GNU GPL v2 (or any later version), see LICENSE.txt for details.
-
-"""
-MoinMoin - Storage Backends
-"""
-
-
-from flask import current_app as app
-from flask import g as flaskg
-
-from MoinMoin.storage.error import NoSuchItemError, RevisionAlreadyExistsError
-from MoinMoin.error import ConfigurationError
-from MoinMoin.storage.backends import fs, fs2, fs19, memory
-from MoinMoin.storage.middleware import router
-from MoinMoin.storage.middleware.serialization import unserialize
-
-CONTENT = 'content'
-USERPROFILES = 'userprofiles'
-TRASH = 'trash'
-
-FS19_PREFIX = "fs19:"
-FS_PREFIX = "fs:"
-FS2_PREFIX = "fs2:"
-HG_PREFIX = "hg:"
-SQLA_PREFIX = "sqla:"
-MEMORY_PREFIX = "memory:"
-
-
-def create_simple_mapping(backend_uri='fs:instance', content_acl=None, user_profile_acl=None):
-    """
-    When configuring storage, the admin needs to provide a namespace_mapping.
-    To ease creation of such a mapping, this function provides sane defaults
-    for different types of backends.
-    The admin can just call this function, pass a hint on what type of backend
-    he wants to use and a proper mapping is returned.
-    If the user did not specify anything, we use three FSBackends with user/,
-    data/ and trash/ directories by default.
-    """
-    # XXX How to properly get these values from the users config?
-    ns_content = u'/'
-    ns_user_profile = u'UserProfile/'
-    ns_trash = u'Trash/'
-
-    if not content_acl:
-        content_acl = dict(
-            before=u'',
-            default=u'All:read,write,create', # mostly harmless by default
-            after=u'',
-            hierarchic=False,
-        )
-
-    if not user_profile_acl:
-        user_profile_acl = dict(
-            before=u'All:', # harmless by default
-            default=u'',
-            after=u'',
-            hierarchic=False,
-        )
-
-    def _create_backends(BackendClass, backend_uri):
-        backends = []
-        for name in [CONTENT, USERPROFILES, TRASH, ]:
-            parms = dict(nsname=name)
-            backend = BackendClass(backend_uri % parms)
-            backends.append(backend)
-        return backends
-
-    if backend_uri.startswith(FS_PREFIX):
-        instance_uri = backend_uri[len(FS_PREFIX):]
-        content, userprofile, trash = _create_backends(fs.FSBackend, instance_uri)
-
-    elif backend_uri.startswith(FS2_PREFIX):
-        instance_uri = backend_uri[len(FS2_PREFIX):]
-        content, userprofile, trash = _create_backends(fs2.FS2Backend, instance_uri)
-
-    elif backend_uri.startswith(HG_PREFIX):
-        # Due to external dependency that may not always be present, import hg backend here:
-        from MoinMoin.storage.backends import hg
-        instance_uri = backend_uri[len(HG_PREFIX):]
-        content, userprofile, trash = _create_backends(hg.MercurialBackend, instance_uri)
-
-    elif backend_uri.startswith(SQLA_PREFIX):
-        # XXX Move this import to the module level if we depend on sqlalchemy and it is in sys.path
-        from MoinMoin.storage.backends import sqla
-        instance_uri = backend_uri[len(SQLA_PREFIX):]
-        content, userprofile, trash = _create_backends(sqla.SQLAlchemyBackend, instance_uri)
-
-    elif backend_uri == MEMORY_PREFIX:
-        instance_uri = ''
-        content, userprofile, trash = _create_backends(memory.MemoryBackend, instance_uri)
-
-    elif backend_uri.startswith(FS19_PREFIX):
-        # special case: old moin19 stuff
-        from os import path
-        data_dir = backend_uri[len(FS19_PREFIX):]
-        userprofile = fs19.FSUserBackend(path.join(data_dir, 'user'), '/dev/shm') # assumes user below data_dir
-        content = fs19.FSPageBackend(data_dir, '/dev/shm', deleted_mode='keep', default_markup=u'wiki')
-        namespace_mapping = [
-                        # no trash
-                        (ns_user_profile, userprofile, user_profile_acl),
-                        (ns_content, content, content_acl),
-        ]
-        return namespace_mapping
-
-    else:
-        raise ConfigurationError("No proper backend uri provided. Given: %r" % backend_uri)
-
-    namespace_mapping = [
-                    (ns_trash, trash, content_acl),
-                    (ns_user_profile, userprofile, user_profile_acl),
-                    (ns_content, content, content_acl),
-    ]
-
-    return namespace_mapping
-
-
-def upgrade_sysitems(xmlfile):
-    """
-    Upgrade the wiki's system pages from an XML file.
-    """
-    tmp_backend = router.RouterBackend([('/', memory.MemoryBackend())], cfg=app.cfg)
-    unserialize(tmp_backend, xmlfile)
-
-    # clone to real backend from config WITHOUT checking ACLs!
-    flaskg.unprotected_storage.clone(tmp_backend)
-
--- a/MoinMoin/storage/backends/_flatutils.py	Sat Sep 10 03:23:36 2011 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,74 +0,0 @@
-# Copyright: 2010 MoinMoin:ThomasWaldmann
-# License: GNU GPL v2 (or any later version), see LICENSE.txt for details.
-
-"""
-    MoinMoin - helpers for flatfile meta/data stores
-"""
-
-from MoinMoin.config import NAME, ACL, CONTENTTYPE, MTIME, LANGUAGE
-
-
-def split_body(body):
-    """ Extract the processing instructions / acl / etc. at the beginning of a page's body.
-
-        Hint: if you have a Page object p, you already have the result of this function in
-              p.meta and (even better) parsed/processed stuff in p.pi.
-
-        Returns a list of (pi, restofline) tuples and a string with the rest of the body.
-    """
-    pi = {}
-    while body.startswith('#'):
-        try:
-            line, body = body.split('\n', 1) # extract first line
-            line = line.rstrip('\r')
-        except ValueError:
-            line = body
-            body = ''
-
-        # end parsing on empty (invalid) PI
-        if line == "#":
-            body = line + '\n' + body
-            break
-
-        if line[1] == '#':# two hash marks are a comment
-            comment = line[2:]
-            if not comment.startswith(' '):
-                # we don't require a blank after the ##, so we put one there
-                comment = ' ' + comment
-                line = '##%s' % comment
-
-        verb, args = (line[1:] + ' ').split(' ', 1) # split at the first blank
-        pi.setdefault(verb.lower(), []).append(args.strip())
-
-    for key, value in pi.iteritems():
-        if key in ['#', ]:
-            # transform the lists to tuples:
-            pi[key] = tuple(value)
-        elif key in ['acl', ]:
-            # join the list of values to a single value
-            pi[key] = u' '.join(value)
-        else:
-            # for keys that can't occur multiple times, don't use a list:
-            pi[key] = value[-1] # use the last value to copy 1.9 parsing behaviour
-
-    return pi, body
-
-
-def add_metadata_to_body(metadata, data):
-    """
-    Adds the processing instructions to the data.
-    """
-    meta_keys = [NAME, ACL, CONTENTTYPE, MTIME, LANGUAGE, ]
-
-    metadata_data = ""
-    for key, value in metadata.iteritems():
-        if key not in meta_keys:
-            continue
-        # special handling for list metadata
-        if isinstance(value, (list, tuple)):
-            for line in value:
-                metadata_data += "#%s %s\n" % (key, line)
-        else:
-            metadata_data += "#%s %s\n" % (key, value)
-    return metadata_data + data
-
--- a/MoinMoin/storage/backends/_fsutils.py	Sat Sep 10 03:23:36 2011 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,97 +0,0 @@
-# Copyright: 2010 MoinMoin:ThomasWaldmann
-# License: GNU GPL v2 (or any later version), see LICENSE.txt for details.
-
-"""
-    MoinMoin - helper for fs based backends
-"""
-
-
-import re
-
-from MoinMoin import config
-
-
-# Precompiled patterns for file name [un]quoting
-UNSAFE = re.compile(r'[^a-zA-Z0-9_]+')
-QUOTED = re.compile(r'\(([a-fA-F0-9]+)\)')
-
-
-def quoteWikinameFS(wikiname, charset=config.charset):
-    """ Return file system representation of a Unicode WikiName.
-
-    Warning: will raise UnicodeError if wikiname can not be encoded using
-    charset. The default value of config.charset, 'utf-8' can encode any
-    character.
-
-    :param wikiname: wiki name [unicode]
-    :param charset: charset to encode string (before quoting)
-    :rtype: string
-    :returns: quoted name, safe for any file system
-    """
-    filename = wikiname.encode(charset)
-
-    quoted = []
-    location = 0
-    for needle in UNSAFE.finditer(filename):
-        # append leading safe stuff
-        quoted.append(filename[location:needle.start()])
-        location = needle.end()
-        # Quote and append unsafe stuff
-        quoted.append('(')
-        for character in needle.group():
-            quoted.append('%02x' % ord(character))
-        quoted.append(')')
-
-    # append rest of string
-    quoted.append(filename[location:])
-    return ''.join(quoted)
-
-
-class InvalidFileNameError(Exception):
-    """ Called when we find an invalid file name """
-    pass
-
-
-def unquoteWikiname(filename, charset=config.charset):
-    """ Return Unicode WikiName from quoted file name.
-
-    raises an InvalidFileNameError in case of unquoting problems.
-
-    :param filename: quoted wiki name
-    :param charset: charset to use for decoding (after unquoting)
-    :rtype: unicode
-    :returns: WikiName
-    """
-    # From some places we get called with Unicode strings
-    if isinstance(filename, unicode):
-        filename = filename.encode(config.charset)
-
-    parts = []
-    start = 0
-    for needle in QUOTED.finditer(filename):
-        # append leading unquoted stuff
-        parts.append(filename[start:needle.start()])
-        start = needle.end()
-        # Append quoted stuff
-        group = needle.group(1)
-        # Filter invalid filenames
-        if (len(group) % 2 != 0):
-            raise InvalidFileNameError(filename)
-        try:
-            for i in range(0, len(group), 2):
-                byte = group[i:i+2]
-                character = chr(int(byte, 16))
-                parts.append(character)
-        except ValueError:
-            # byte not in hex, e.g 'xy'
-            raise InvalidFileNameError(filename)
-
-    # append rest of string
-    if start == 0:
-        wikiname = filename
-    else:
-        parts.append(filename[start:len(filename)])
-        wikiname = ''.join(parts)
-
-    return wikiname.decode(charset)
-
--- a/MoinMoin/storage/backends/fileserver.py	Sat Sep 10 03:23:36 2011 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,215 +0,0 @@
-# Copyright: 2008-2011 MoinMoin:ThomasWaldmann
-# License: GNU GPL v2 (or any later version), see LICENSE.txt for details.
-
-"""
-MoinMoin - file server backend
-
-You can use this backend to directly get read-only access to your
-wiki server's filesystem.
-
-TODO: nearly working, but needs more work at other places,
-      e.g. in the router backend, to be useful.
-"""
-
-
-import os, stat
-from StringIO import StringIO
-
-from MoinMoin import log
-logging = log.getLogger(__name__)
-
-from MoinMoin import config
-
-from MoinMoin.storage import Backend, Item, StoredRevision
-from MoinMoin.storage.error import NoSuchItemError, NoSuchRevisionError
-from MoinMoin.util.mimetype import MimeType
-
-from MoinMoin.config import NAME, ACL, CONTENTTYPE, ACTION, COMMENT, MTIME, SIZE, HASH_ALGORITHM
-
-class FSError(Exception):
-    """ file serving backend error """
-
-class NoFileError(FSError):
-    """ tried to create a FileItem for a path that is not a file """
-
-class NoDirError(FSError):
-    """ tried to create a DirItem for a path that is not a directory """
-
-class FileServerBackend(Backend):
-    """
-    File Server Backend - serves files directly from host's filesystem
-
-    For method docstrings, please see the "Backend" base class.
-    """
-    def __init__(self, root_dir):
-        """
-        Initialise file serving backend.
-
-        :type root_dir: unicode
-        :param root_dir: root directory below which we serve files
-        """
-        root_dir = root_dir.rstrip('/')
-        assert root_dir
-        self.root_dir = unicode(root_dir)
-
-    def _item2path(self, itemname):
-        # XXX check whether ../.. precautions are needed,
-        # looks like not, because moin sanitizes the item name before
-        # calling the storage code
-        return os.path.join(self.root_dir, itemname)
-
-    def _path2item(self, path):
-        root = self.root_dir
-        assert path.startswith(root)
-        return path[len(root)+1:]
-
-    def iter_items_noindex(self):
-        for dirpath, dirnames, filenames in os.walk(self.root_dir):
-            name = self._path2item(dirpath)
-            if name:
-                # XXX currently there is an issue with whoosh indexing if fileserver
-                # backend is mounted at / and the item name is empty, resulting in a
-                # completely empty item name - avoid this for now.
-                yield DirItem(self, name)
-            for filename in filenames:
-                try:
-                    item = FileItem(self, self._path2item(os.path.join(dirpath, filename)))
-                except (NoFileError, NoSuchItemError):
-                    pass  # not a regular file, maybe socket or ...
-                else:
-                    yield item
-
-    iteritems = iter_items_noindex
-
-    def get_item(self, itemname):
-        try:
-            return FileItem(self, itemname)
-        except NoFileError:
-            try:
-                return DirItem(self, itemname)
-            except NoDirError:
-                raise NoSuchItemError()
-
-    def _get_item_metadata(self, item):
-        return item._fs_meta
-
-    def _list_revisions(self, item):
-        return item._fs_revisions
-
-    def _get_revision(self, item, revno):
-        if isinstance(item, FileItem):
-            return FileRevision(item, revno)
-        elif isinstance(item, DirItem):
-            return DirRevision(item, revno)
-        else:
-            raise
-
-    def _get_revision_metadata(self, rev):
-        return rev._fs_meta
-
-    def _read_revision_data(self, rev, chunksize):
-        if rev._fs_data_file is None:
-            rev._fs_data_file = open(rev._fs_data_fname, 'rb') # XXX keeps file open as long as rev exists
-        return rev._fs_data_file.read(chunksize)
-
-    def _seek_revision_data(self, rev, position, mode):
-        if rev._fs_data_file is None:
-            rev._fs_data_file = open(rev._fs_data_fname, 'rb') # XXX keeps file open as long as rev exists
-        return rev._fs_data_file.seek(position, mode)
-
-    def _tell_revision_data(self, rev):
-        if rev._fs_data_file is None:
-            rev._fs_data_file = open(rev._fs_data_fname, 'rb') # XXX keeps file open as long as rev exists
-        return rev._fs_data_file.tell()
-
-
-# Specialized Items/Revisions
-
-class FileDirItem(Item):
-    """ A filesystem file or directory """
-    def __init__(self, backend, name):
-        Item.__init__(self, backend, name)
-        filepath = backend._item2path(name)
-        try:
-            self._fs_stat = os.stat(filepath)
-        except OSError as err:
-            raise NoSuchItemError("No such item, %r" % name)
-        self._fs_revisions = [0] # there is only 1 revision of each file/dir
-        self._fs_meta = {} # no item level metadata
-        self._fs_filepath = filepath
-
-class DirItem(FileDirItem):
-    """ A filesystem directory """
-    def __init__(self, backend, name):
-        FileDirItem.__init__(self, backend, name)
-        if not stat.S_ISDIR(self._fs_stat.st_mode):
-            raise NoDirError("Item is not a directory: %r" % name)
-
-class FileItem(FileDirItem):
-    """ A filesystem file """
-    def __init__(self, backend, name):
-        FileDirItem.__init__(self, backend, name)
-        if not stat.S_ISREG(self._fs_stat.st_mode):
-            raise NoFileError("Item is not a regular file: %r" % name)
-
-
-class FileDirRevision(StoredRevision):
-    """ A filesystem file or directory """
-    def __init__(self, item, revno):
-        if revno > 0:
-            raise NoSuchRevisionError('Item %r has no revision %d (filesystem items just have revno 0)!' %
-                    (item.name, revno))
-        if revno == -1:
-            revno = 0
-        StoredRevision.__init__(self, item, revno)
-        filepath = item._fs_filepath
-        st = item._fs_stat
-        meta = { # make something up
-            NAME: item.name,
-            MTIME: int(st.st_mtime),
-            ACTION: u'SAVE',
-            SIZE: st.st_size,
-            HASH_ALGORITHM: u'' # XXX fake it, send_file needs it for etag and crashes ithout the hash
-        }
-        self._fs_meta = meta
-        self._fs_data_fname = filepath
-        self._fs_data_file = None
-
-class DirRevision(FileDirRevision):
-    """ A filesystem directory """
-    def __init__(self, item, revno):
-        FileDirRevision.__init__(self, item, revno)
-        self._fs_meta.update({
-            CONTENTTYPE: u'text/x.moin.wiki;charset=utf-8',
-        })
-        # create a directory "page" in wiki markup:
-        try:
-            dirs = []
-            files = []
-            names = os.listdir(self._fs_data_fname)
-            for name in names:
-                filepath = os.path.join(self._fs_data_fname, name)
-                if os.path.isdir(filepath):
-                    dirs.append(name)
-                else:
-                    files.append(name)
-            content = [
-                u"= Directory contents =",
-                u" * [[../]]",
-            ]
-            content.extend(u" * [[/%s|%s/]]" % (name, name) for name in sorted(dirs))
-            content.extend(u" * [[/%s|%s]]" % (name, name) for name in sorted(files))
-            content = u'\r\n'.join(content)
-        except OSError as err:
-            content = unicode(err)
-        self._fs_data_file = StringIO(content.encode(config.charset))
-
-class FileRevision(FileDirRevision):
-    """ A filesystem file """
-    def __init__(self, item, revno):
-        FileDirRevision.__init__(self, item, revno)
-        contenttype = MimeType(filename=self._fs_data_fname).content_type()
-        self._fs_meta.update({
-            CONTENTTYPE: unicode(contenttype),
-        })
-
--- a/MoinMoin/storage/backends/flatfile.py	Sat Sep 10 03:23:36 2011 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,177 +0,0 @@
-# Copyright: 2008 MoinMoin:JohannesBerg
-# Copyright: 2009-2010 MoinMoin:ThomasWaldmann
-# License: GNU GPL v2 (or any later version), see LICENSE.txt for details.
-
-"""
-    MoinMoin - flat file backend
-
-    This backend is not useful for a wiki that you actually keep online.
-    Instead, it is intended to be used for MoinMoin internally to keep
-    the documentation that is part of the source tree editable via the
-    wiki server locally.
-
-    This backend stores no item metadata and no old revisions, as such
-    you cannot use it safely for a wiki. Inside the MoinMoin source tree,
-    however, the wiki content is additionally kept under source control,
-    therefore this backend is actually useful to edit documentation that
-    is part of MoinMoin.
-
-    The backend _does_ store some revision metadata, namely that which
-    used to traditionally be part of the page header.
-"""
-
-
-import os, re, errno
-from cStringIO import StringIO
-
-from MoinMoin import log
-logging = log.getLogger(__name__)
-
-from MoinMoin.storage import Backend, Item, StoredRevision, NewRevision
-from MoinMoin.storage.error import NoSuchItemError, NoSuchRevisionError, \
-                                   ItemAlreadyExistsError, \
-                                   RevisionAlreadyExistsError
-from MoinMoin.storage.backends._fsutils import quoteWikinameFS, unquoteWikiname
-from MoinMoin.storage.backends._flatutils import add_metadata_to_body, split_body
-from MoinMoin.config import ACTION, MTIME
-
-
-class FlatFileBackend(Backend):
-    def __init__(self, path):
-        """
-        Initialise filesystem backend, creating initial files and some internal structures.
-
-        :param path: storage path
-        """
-        self._path = path
-        try:
-            os.makedirs(path)
-        except OSError as err:
-            if err.errno != errno.EEXIST:
-                raise BackendError(str(err))
-
-    def _quote(self, name):
-        return quoteWikinameFS(name)
-
-    def _unquote(self, name):
-        return unquoteWikiname(name)
-
-    def _rev_path(self, name):
-        return os.path.join(self._path, self._quote(name))
-
-    def _exists(self, name):
-        revpath = self._rev_path(name)
-        return os.path.exists(revpath)
-
-    def get_item(self, itemname):
-        if not self._exists(itemname):
-            raise NoSuchItemError("No such item, %r" % (itemname))
-        return Item(self, itemname)
-
-    def has_item(self, itemname):
-        return self._exists(itemname)
-
-    def create_item(self, itemname):
-        if not isinstance(itemname, (str, unicode)):
-            raise TypeError("Item names must have string type, not %s" % (type(itemname)))
-        elif self.has_item(itemname):
-            raise ItemAlreadyExistsError("An Item with the name %r already exists!" % (itemname))
-        return Item(self, itemname)
-
-    def iter_items_noindex(self):
-        filenames = os.listdir(self._path)
-        for filename in filenames:
-            yield Item(self, self._unquote(filename))
-
-    iteritems = iter_items_noindex
-
-    def _get_revision(self, item, revno):
-        if revno > 0:
-            raise NoSuchRevisionError("No Revision #%d on Item %s" % (revno, item.name))
-
-        revpath = self._rev_path(item.name)
-        if not os.path.exists(revpath):
-            raise NoSuchRevisionError("No Revision #%d on Item %s" % (revno, item.name))
-
-        rev = StoredRevision(item, 0)
-        with open(revpath, 'rb') as f:
-            data = f.read()
-        rev._metadata, data = split_body(data)
-        rev._metadata[ACTION] = 'SAVE'
-        rev._metadata[SIZE] = len(data)
-        rev._data = StringIO(data)
-        return rev
-
-    def _list_revisions(self, item):
-        if self._exists(item.name):
-            return [0]
-        else:
-            return []
-
-    def _create_revision(self, item, revno):
-        assert revno <= 1
-        rev = NewRevision(item, 0)
-        rev._data = StringIO()
-        return rev
-
-    def _destroy_revision(self, revision):
-        revpath = self._rev_path(revision.item.name)
-        try:
-            os.unlink(revpath)
-        except OSError as err:
-            if err.errno != errno.ENOENT:
-                raise CouldNotDestroyError("Could not destroy revision #%d of item '%r' [errno: %d]" % (
-                    revision.revno, revision.item.name, err.errno))
-            #else:
-            #    someone else already killed this revision, we silently ignore this error
-
-    def _rename_item(self, item, newname):
-        try:
-            os.rename(self._rev_path(item.name), self._rev_path(newname))
-        except OSError:
-            raise ItemAlreadyExistsError('')
-
-    def _commit_item(self, rev):
-        revpath = self._rev_path(rev.item.name)
-        rev._data.seek(0)
-        data = rev._data.read()
-        data = add_metadata_to_body(rev, data)
-        with open(revpath, 'wb') as f:
-            f.write(data)
-
-    def _destroy_item(self, item):
-        revpath = self._rev_path(item.name)
-        try:
-            os.unlink(revpath)
-        except OSError as err:
-            if err.errno != errno.ENOENT:
-                raise CouldNotDestroyError("Could not destroy item '%r' [errno: %d]" % (
-                    item.name, err.errno))
-            #else:
-            #    someone else already killed this item, we silently ignore this error
-
-    def _rollback_item(self, rev):
-        pass
-
-    def _change_item_metadata(self, item):
-        pass
-
-    def _publish_item_metadata(self, item):
-        pass
-
-    def _read_revision_data(self, rev, chunksize):
-        return rev._data.read(chunksize)
-
-    def _write_revision_data(self, rev, data):
-        rev._data.write(data)
-
-    def _get_item_metadata(self, item):
-        return {}
-
-    def _seek_revision_data(self, rev, position, mode):
-        rev._data.seek(position, mode)
-
-    def _tell_revision_data(self, rev):
-        return rev._data.tell()
-
-
--- a/MoinMoin/storage/backends/fs.py	Sat Sep 10 03:23:36 2011 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,474 +0,0 @@
-# Copyright: 2008 MoinMoin:JohannesBerg
-# Copyright: 2009-2010 MoinMoin:ThomasWaldmann
-# License: GNU GPL v2 (or any later version), see LICENSE.txt for details.
-
-"""
-    MoinMoin - FS (filesystem) backend
-
-    XXX: Does NOT work on win32. some problems are documented below (see XXX),
-         some are maybe NOT.
-"""
-
-
-import os, struct, tempfile, random, errno, shutil
-import cPickle as pickle
-
-from MoinMoin import log
-logging = log.getLogger(__name__)
-
-from MoinMoin.util import pycdb as cdb
-
-from MoinMoin.util.lock import ExclusiveLock
-from MoinMoin.util import filesys
-
-from MoinMoin.storage import Backend as BackendBase
-from MoinMoin.storage import Item as ItemBase
-from MoinMoin.storage import StoredRevision as StoredRevisionBase
-from MoinMoin.storage import NewRevision as NewRevisionBase
-
-from MoinMoin.storage.error import NoSuchItemError, NoSuchRevisionError, \
-                                   ItemAlreadyExistsError, \
-                                   RevisionAlreadyExistsError, RevisionNumberMismatchError, \
-                                   CouldNotDestroyError
-
-PICKLEPROTOCOL = 1
-
-
-class Item(ItemBase):
-    pass
-
-class StoredRevision(StoredRevisionBase):
-    pass
-
-class NewRevision(NewRevisionBase):
-    pass
-
-class FSBackend(BackendBase):
-    """
-    Basic filesystem backend, described at
-    http://moinmo.in/JohannesBerg/FilesystemStorage
-    """
-    def __init__(self, path, reserved_metadata_space=508):
-        """
-        Initialise filesystem backend, creating initial files and
-        some internal structures.
-
-        :param path: storage path
-        :param reserved_metadata_space: space reserved for revision metadata
-                                        initially, increase if you expect a
-                                        lot of very long ACL strings or so.
-                                        We need four additional bookkeeping bytes
-                                        so the default of 508 means data starts
-                                        at byte 512 in the file by default.
-        """
-        self._path = path
-        self._name_db = os.path.join(path, 'name-mapping')
-        self._itemspace = 128
-        self._revmeta_reserved_space = reserved_metadata_space
-
-        try:
-            os.makedirs(path)
-        except OSError as err:
-            if err.errno != errno.EEXIST:
-                raise BackendError(str(err))
-
-        # if no name-mapping db yet, create an empty one
-        # (under lock, re-tests existence too)
-        if not os.path.exists(self._name_db):
-            self._do_locked(self._name_db + '.lock', self._create_new_cdb, None)
-
-    def _create_new_cdb(self, arg):
-        """
-        Create new name-mapping if it doesn't exist yet,
-        call this under the name-mapping.lock.
-        """
-        if not os.path.exists(self._name_db):
-            with cdb.cdbmake(self._name_db, self._name_db + '.tmp') as maker:
-                pass
-
-    def _get_item_id(self, itemname):
-        """
-        Get ID of item (or None if no such item exists)
-
-        :param itemname: name of item (unicode)
-        """
-        with cdb.init(self._name_db) as c:
-            return c.get(itemname.encode('utf-8'))
-
-    def get_item(self, itemname):
-        item_id = self._get_item_id(itemname)
-        if item_id is None:
-            raise NoSuchItemError("No such item '%r'." % itemname)
-
-        item = Item(self, itemname)
-        item._fs_item_id = item_id
-        item._fs_metadata = None
-
-        return item
-
-    def has_item(self, itemname):
-        return self._get_item_id(itemname) is not None
-
-    def create_item(self, itemname):
-        if not isinstance(itemname, (str, unicode)):
-            raise TypeError("Item names must be of str/unicode type, not %s." % type(itemname))
-
-        elif self.has_item(itemname):
-            raise ItemAlreadyExistsError("An item '%r' already exists!" % itemname)
-
-        item = Item(self, itemname)
-        item._fs_item_id = None
-        item._fs_metadata = {}
-
-        return item
-
-    def iter_items_noindex(self):
-        with cdb.init(self._name_db) as c:
-            r = c.each()
-            while r:
-                item = Item(self, r[0].decode('utf-8'))
-                item._fs_item_id = r[1]
-                yield item
-                r = c.each()
-
-    iteritems = iter_items_noindex
-
-    def _get_revision(self, item, revno):
-        item_id = item._fs_item_id
-
-        if revno == -1:
-            revs = item.list_revisions()
-            if not revs:
-                raise NoSuchRevisionError("Item has no revisions.")
-            revno = max(revs)
-
-        revpath = os.path.join(self._path, item_id, 'rev.%d' % revno)
-        if not os.path.exists(revpath):
-            raise NoSuchRevisionError("Item '%r' has no revision #%d." % (item.name, revno))
-
-        rev = StoredRevision(item, revno)
-        rev._fs_revpath = revpath
-        rev._fs_file = None
-        rev._fs_metadata = None
-
-        return rev
-
-    def _list_revisions(self, item):
-        if item._fs_item_id is None:
-            return []
-        p = os.path.join(self._path, item._fs_item_id)
-        l = os.listdir(p)
-        ret = sorted([int(i[4:]) for i in l if i.startswith('rev.')])
-        return ret
-
-    def _create_revision(self, item, revno):
-        if item._fs_item_id is None:
-            revs = []
-        else:
-            revs = self._list_revisions(item)
-        last_rev = max(-1, -1, *revs)
-
-        if revno in revs:
-            raise RevisionAlreadyExistsError("Item '%r' already has a revision #%d." % (item.name, revno))
-        elif revno != last_rev + 1:
-            raise RevisionNumberMismatchError("The latest revision of the item '%r' is #%d, thus you cannot create revision #%d. \
-                                               The revision number must be latest_revision + 1." % (item.name, last_rev, revno))
-
-        rev = NewRevision(item, revno)
-        rev._revno = revno
-        fd, rev._fs_revpath = tempfile.mkstemp('-rev', 'tmp-', self._path)
-        rev._fs_file = f = os.fdopen(fd, 'wb+') # XXX keeps file open as long a rev exists
-        rev._datastart = self._revmeta_reserved_space + 4
-        f.write(struct.pack('!I', rev._datastart))
-        f.seek(rev._datastart)
-
-        return rev
-
-    def _destroy_revision(self, revision):
-        if revision._fs_file is not None:
-            revision._fs_file.close()
-        try:
-            os.unlink(revision._fs_revpath)
-        except OSError as err:
-            if err.errno != errno.ENOENT:
-                raise CouldNotDestroyError("Could not destroy revision #%d of item '%r' [errno: %d]" % (
-                    revision.revno, revision.item.name, err.errno))
-            #else:
-            #    someone else already killed this revision, we silently ignore this error
-
-    def _do_locked(self, lockname, fn, arg):
-        l = ExclusiveLock(lockname, 30)
-        l.acquire(30)
-        try:
-            ret = fn(arg)
-        finally:
-            l.release()
-
-        return ret
-
-    def _rename_item_locked(self, arg):
-        item, newname = arg
-        nn = newname.encode('utf-8')
-        npath = os.path.join(self._path, item._fs_item_id, 'name')
-
-        with cdb.init(self._name_db) as c:
-            with cdb.cdbmake(self._name_db + '.ndb', self._name_db + '.tmp') as maker:
-                r = c.each()
-                while r:
-                    i, v = r
-                    if i == nn:
-                        raise ItemAlreadyExistsError("Target item '%r' already exists!" % newname)
-                    elif v == item._fs_item_id:
-                        maker.add(nn, v)
-                    else:
-                        maker.add(i, v)
-                    r = c.each()
-
-        filesys.rename(self._name_db + '.ndb', self._name_db)
-        with open(npath, mode='wb') as nf:
-            nf.write(nn)
-
-    def _rename_item(self, item, newname):
-        self._do_locked(os.path.join(self._path, 'name-mapping.lock'),
-                        self._rename_item_locked, (item, newname))
-
-    def _add_item_internally_locked(self, arg):
-        """
-        See _add_item_internally, this is just internal for locked operation.
-        """
-        item, newrev, metadata = arg
-        cntr = 0
-        done = False
-        while not done:
-            itemid = '%d' % random.randint(0, self._itemspace - 1)
-            ipath = os.path.join(self._path, itemid)
-            cntr += 1
-            try:
-                os.mkdir(ipath)
-                done = True
-            except OSError as err:
-                if err.errno != errno.EEXIST:
-                    raise
-            if cntr > 2 and not done and self._itemspace <= 2 ** 31:
-                self._itemspace *= 2
-                cntr = 0
-            elif cntr > 20:
-                # XXX: UnexpectedBackendError() that propagates to user?
-                raise Exception('Item space full!')
-
-        nn = item.name.encode('utf-8')
-
-        class DuplicateError(Exception):
-            """ raise if we have a duplicate name """
-
-        try:
-            with cdb.init(self._name_db) as c:
-                with cdb.cdbmake(self._name_db + '.ndb', self._name_db + '.tmp') as maker:
-                    r = c.each()
-                    while r:
-                        i, v = r
-                        if i == nn:
-                            raise DuplicateError
-                        else:
-                            maker.add(i, v)
-                        r = c.each()
-                    maker.add(nn, itemid)
-        except DuplicateError:
-            # Oops. This item already exists! Clean up and error out.
-            os.unlink(self._name_db + '.ndb')
-            os.rmdir(ipath)
-            if newrev is not None:
-                os.unlink(newrev)
-            raise ItemAlreadyExistsError("Item '%r' already exists!" % item.name)
-
-        if newrev is not None:
-            rp = os.path.join(self._path, itemid, 'rev.0')
-            filesys.rename(newrev, rp)
-
-        if metadata:
-            # only write metadata file if we have any
-            meta = os.path.join(self._path, itemid, 'meta')
-            with open(meta, 'wb') as f:
-                pickle.dump(metadata, f, protocol=PICKLEPROTOCOL)
-
-        # write 'name' file of item
-        npath = os.path.join(ipath, 'name')
-        with open(npath, mode='wb') as nf:
-            nf.write(nn)
-
-        # make item retrievable (by putting the name-mapping in place)
-        filesys.rename(self._name_db + '.ndb', self._name_db)
-
-        item._fs_item_id = itemid
-
-    def _add_item_internally(self, item, newrev=None, metadata=None):
-        """
-        This method adds a new item. It locks the name-mapping database to
-        ensure putting the item into place and adding it to the name-mapping
-        db is atomic.
-
-        If the newrev or metadata arguments are given, then it also adds the
-        revision or metadata to the item before making it discoverable.
-
-        If the item's name already exists, it doesn't do anything but raise
-        a ItemAlreadyExistsError; if the newrev was given the file is unlinked.
-
-        :param newrev: new revision's temporary file path
-        :param metadata: item metadata dict
-        """
-        self._do_locked(os.path.join(self._path, 'name-mapping.lock'),
-                        self._add_item_internally_locked, (item, newrev, metadata))
-
-    def _commit_item(self, rev):
-        item = rev.item
-        metadata = dict(rev)
-        md = pickle.dumps(metadata, protocol=PICKLEPROTOCOL)
-
-        hasdata = rev._fs_file.tell() > self._revmeta_reserved_space + 4
-
-        if hasdata and len(md) > self._revmeta_reserved_space:
-            oldrp = rev._fs_revpath
-            oldf = rev._fs_file
-            fd, rev._fs_revpath = tempfile.mkstemp('-rev', 'tmp-', self._path)
-            rev._fs_file = f = os.fdopen(fd, 'wb+')
-            f.write(struct.pack('!I', len(md) + 4))
-            # write metadata
-            f.write(md)
-            # copy already written data
-            oldf.seek(self._revmeta_reserved_space + 4)
-            shutil.copyfileobj(oldf, f)
-            oldf.close()
-            os.unlink(oldrp)
-        else:
-            if not hasdata:
-                rev._fs_file.seek(0)
-                rev._fs_file.write(struct.pack('!L', len(md) + 4))
-            else:
-                rev._fs_file.seek(4)
-            rev._fs_file.write(md)
-        rev._fs_file.close()
-
-        if item._fs_item_id is None:
-            self._add_item_internally(item, newrev=rev._fs_revpath)
-        else:
-            rp = os.path.join(self._path, item._fs_item_id, 'rev.%d' % rev.revno)
-            try:
-                filesys.rename_no_overwrite(rev._fs_revpath, rp, delete_old=True)
-            except OSError as err:
-                if err.errno != errno.EEXIST:
-                    raise
-                raise RevisionAlreadyExistsError("")
-
-    def _rollback_item(self, rev):
-        rev._fs_file.close()
-        os.unlink(rev._fs_revpath)
-
-    def _destroy_item_locked(self, item):
-        with cdb.init(self._name_db) as c:
-            with cdb.cdbmake(self._name_db + '.ndb', self._name_db + '.tmp') as maker:
-                r = c.each()
-                while r:
-                    i, v = r
-                    if v != item._fs_item_id:
-                        maker.add(i, v)
-                    r = c.each()
-
-        filesys.rename(self._name_db + '.ndb', self._name_db)
-        path = os.path.join(self._path, item._fs_item_id)
-        try:
-            shutil.rmtree(path)
-        except OSError as err:
-            raise CouldNotDestroyError("Could not destroy item '%r' [errno: %d]" % (
-                item.name, err.errno))
-
-    def _destroy_item(self, item):
-        self._do_locked(os.path.join(self._path, 'name-mapping.lock'),
-                        self._destroy_item_locked, item)
-
-    def _change_item_metadata(self, item):
-        if not item._fs_item_id is None:
-            lp = os.path.join(self._path, item._fs_item_id, 'meta.lock')
-            item._fs_metadata_lock = ExclusiveLock(lp, 30)
-            item._fs_metadata_lock.acquire(30)
-
-    def _publish_item_metadata(self, item):
-        if item._fs_item_id is None:
-            self._add_item_internally(item, metadata=item._fs_metadata)
-        else:
-            assert item._fs_metadata_lock.isLocked()
-            md = item._fs_metadata
-            if md is None:
-                # metadata unchanged
-                pass
-            elif not md:
-                # metadata now empty, just rm the metadata file
-                try:
-                    os.unlink(os.path.join(self._path, item._fs_item_id, 'meta'))
-                except OSError as err:
-                    if err.errno != errno.ENOENT:
-                        raise
-                    # ignore, there might not have been metadata
-            else:
-                tmp = os.path.join(self._path, item._fs_item_id, 'meta.tmp')
-                with open(tmp, 'wb') as f:
-                    pickle.dump(md, f, protocol=PICKLEPROTOCOL)
-
-                filesys.rename(tmp, os.path.join(self._path, item._fs_item_id, 'meta'))
-            item._fs_metadata_lock.release()
-            del item._fs_metadata_lock
-
-    def _read_revision_data(self, rev, chunksize):
-        if rev._fs_file is None:
-            self._get_revision_metadata(rev)
-        return rev._fs_file.read(chunksize)
-
-    def _write_revision_data(self, rev, data):
-        rev._fs_file.write(data)
-
-    def _get_item_metadata(self, item):
-        if item._fs_item_id is not None:
-            p = os.path.join(self._path, item._fs_item_id, 'meta')
-            try:
-                with open(p, 'rb') as f:
-                    metadata = pickle.load(f)
-            except IOError as err:
-                if err.errno != errno.ENOENT:
-                    raise
-                # no such file means no metadata was stored
-                metadata = {}
-            item._fs_metadata = metadata
-        return item._fs_metadata
-
-    def _get_revision_metadata(self, rev):
-        if rev._fs_file is None:
-            rev._fs_file = f = open(rev._fs_revpath, 'rb+') # XXX keeps file open as long as rev exists
-                                                            # XXX further, this is easily triggered by accessing ANY
-                                                            # XXX revision metadata (e.g. the timestamp or size or ACL)
-            datastart = f.read(4)
-            datastart = struct.unpack('!L', datastart)[0]
-            rev._datastart = pos = datastart
-        else:
-            f = rev._fs_file
-            pos = f.tell()
-            f.seek(4)
-        rev._fs_metadata = pickle.load(f)
-        f.seek(pos)
-        return rev._fs_metadata
-
-    def _seek_revision_data(self, rev, position, mode):
-        if rev._fs_file is None:
-            self._get_revision_metadata(rev)
-
-        if mode == 0:
-            rev._fs_file.seek(position + rev._datastart, mode)
-        else:
-            rev._fs_file.seek(position, mode)
-
-    def _tell_revision_data(self, revision):
-        if revision._fs_file is None:
-            self._get_revision_metadata(revision)
-
-        pos = revision._fs_file.tell()
-        return pos - revision._datastart
-
-
--- a/MoinMoin/storage/backends/fs19.py	Sat Sep 10 03:23:36 2011 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,822 +0,0 @@
-# Copyright: 2008 MoinMoin:JohannesBerg
-# Copyright: 2008-2010 MoinMoin:ThomasWaldmann
-# License: GNU GPL v2 (or any later version), see LICENSE.txt for details.
-
-"""
-    MoinMoin - Backend for moin 1.9 compatible filesystem data storage.
-
-    This backend is needed because we need to be able to read existing data
-    to convert them to the more powerful new backend(s).
-
-    This backend is neither intended for nor capable of being used for production.
-
-    Note: we do not support emulation of trashbin-like deletion, you have to
-          choose a deleted_mode (see below) when creating a FSPageBackend.
-"""
-
-
-import os
-from StringIO import StringIO
-import hashlib
-
-MAX_NAME_LEN = 1000 # max length of a page name, page+attach name, user name
-
-from sqlalchemy import create_engine, MetaData, Table, Column, String, Unicode, Integer
-
-try:
-    from sqlalchemy.exc import IntegrityError
-except ImportError:
-    from sqlalchemy.exceptions import IntegrityError
-
-from MoinMoin import log
-logging = log.getLogger(__name__)
-
-from MoinMoin import config
-from MoinMoin.config import ACL, CONTENTTYPE, UUID, NAME, NAME_OLD, REVERTED_TO, \
-                            ACTION, ADDRESS, HOSTNAME, USERID, MTIME, EXTRA, COMMENT, \
-                            IS_SYSITEM, SYSITEM_VERSION, \
-                            TAGS, SIZE, HASH_ALGORITHM
-from MoinMoin.storage import Backend, Item, StoredRevision
-from MoinMoin.storage.backends._fsutils import quoteWikinameFS, unquoteWikiname
-from MoinMoin.storage.backends._flatutils import split_body
-from MoinMoin.storage.error import NoSuchItemError, NoSuchRevisionError
-from MoinMoin.util.mimetype import MimeType
-from MoinMoin.util.crypto import make_uuid, UUID_LEN
-
-
-DELETED_MODE_KEEP = 'keep'
-DELETED_MODE_KILL = 'kill'
-
-CONTENTTYPE_DEFAULT = u'text/plain'
-CONTENTTYPE_MOINWIKI = u'text/x.moin.wiki'
-FORMAT_TO_CONTENTTYPE = {
-    'wiki': u'text/x.moin.wiki',
-    'text/wiki': CONTENTTYPE_MOINWIKI,
-    'text/moin-wiki': CONTENTTYPE_MOINWIKI,
-    'creole': u'text/x.moin.creole',
-    'text/creole': u'text/x.moin.creole',
-    'rst': u'text/rst',
-    'text/rst': u'text/rst',
-    'plain': u'text/plain',
-    'text/plain': u'text/plain',
-}
-
-class Index(object):
-    """
-    maintain mappings with names / old userid (for user profile items) / uuid
-    """
-    def __init__(self, path, username_unique=True):
-        engine = create_engine('sqlite:///%s/index.db' % path, echo=False)
-        metadata = MetaData()
-        metadata.bind = engine
-        self.users = Table('users', metadata,
-                           Column('uuid', Unicode, index=True, unique=True),
-                           Column('name', Unicode, index=True, unique=username_unique),
-                           Column('old_id', String, index=True, unique=True),
-                           Column('refcount', Integer), # reference count in edit-log
-                     )
-        self.content = Table('content', metadata,
-                             Column('uuid', Unicode, index=True, unique=True),
-                             Column('name', Unicode, index=True, unique=True),
-                       )
-        metadata.create_all()
-
-    def close(self):
-        engine = self.users.metadata.bind
-        engine.dispose()
-
-    def user_uuid(self, name='', old_id='', refcount=False):
-        """
-        Get uuid for user name, create a new uuid if we don't already have one.
-
-        :param name: name of user (unicode)
-        :param old_id: moin 1.x user id (str)
-        """
-        idx = self.users
-        if old_id:
-            results = idx.select(idx.c.old_id==old_id).execute()
-        elif name:
-            results = idx.select(idx.c.name==name).execute()
-        else:
-            raise ValueError("you need to give name or old_id")
-        row = results.fetchone()
-        results.close()
-        if row is not None:
-            uuid = row[idx.c.uuid]
-            if refcount:
-                refs = row[idx.c.refcount]
-                refs += 1
-                idx.update().where(idx.c.uuid==uuid).values(refcount=refs).execute()
-        else:
-            uuid = make_uuid()
-            if not name:
-                # if we don't have a name, we were called from EditLog with just a old_id
-                # an no name - to avoid non-unique name, assign uuid also to name
-                name = uuid
-            try:
-                refs = refcount and 1 or 0
-                idx.insert().values(name=name, uuid=uuid, old_id=old_id, refcount=refs).execute()
-            except IntegrityError as err:
-                # input maybe has duplicate names in user profiles
-                logging.warning("Multiple user profiles for name: %r" % name)
-        return uuid
-
-    def user_old_id(self, uuid):
-        """
-        Get old_id for some user with uuid <uuid>.
-
-        :param name: uuid - uuid of user (str)
-        """
-        idx = self.users
-        results = idx.select(idx.c.uuid==uuid).execute()
-        row = results.fetchone()
-        results.close()
-        if row is not None:
-            old_id = row[idx.c.old_id]
-            return old_id
-
-    def content_uuid(self, name):
-        """
-        Get uuid for a content name, create a new uuid if we don't already have one.
-
-        :param name: name of content item (page or page/attachment, unicode)
-        """
-        idx = self.content
-        results = idx.select(idx.c.name==name).execute()
-        row = results.fetchone()
-        results.close()
-        if row is not None:
-            uuid = row[idx.c.uuid]
-            return uuid
-        else:
-            uuid = make_uuid()
-            try:
-                idx.insert().values(name=name, uuid=uuid).execute()
-            except IntegrityError as err:
-                # shouldn't happen
-                logging.warning(str(err))
-            return uuid
-
-
-class FSPageBackend(Backend):
-    """
-    MoinMoin 1.9 compatible, read-only, "just for the migration" filesystem backend.
-
-    Everything not needed for the migration will likely just raise a NotImplementedError.
-    """
-    def __init__(self, path, idx_path, syspages=False, deleted_mode=DELETED_MODE_KEEP,
-                 default_markup=u'wiki',
-                 item_category_regex=ur'(?P<all>Category(?P<key>(?!Template)\S+))'):
-        """
-        Initialise filesystem backend.
-
-        :param path: storage path (data_dir)
-        :param idx_path: path for index storage
-        :param syspages: either False (not syspages) or revision number of syspages
-        :param deleted_mode: 'kill' - just ignore deleted pages (pages with
-                                      non-existing current revision) and their attachments
-                                      as if they were not there.
-                                      Non-deleted pages (pages with an existing current
-                                      revision) that have non-current deleted revisions
-                                      will be treated as for 'keep'.
-                             'keep' - keep deleted pages as items with empty revisions,
-                                      keep their attachments. (default)
-        :param default_markup: used if a page has no #format line, moin 1.9's default
-                               'wiki' and we also use this default here.
-        """
-        self._path = path
-        self._syspages = syspages
-        assert deleted_mode in (DELETED_MODE_KILL, DELETED_MODE_KEEP, )
-        self.deleted_mode = deleted_mode
-        self.format_default = default_markup
-        self.item_category_regex = re.compile(item_category_regex, re.UNICODE)
-        self.idx = Index(idx_path)
-
-    def close(self):
-        self.idx.close()
-
-    def _get_item_path(self, name, *args):
-        """
-        Returns the full path to the page directory.
-        """
-        name = quoteWikinameFS(name)
-        path = os.path.join(self._path, 'pages', name, *args)
-        return path
-
-    def _get_rev_path(self, itemname, revno):
-        """
-        Returns the full path to the revision's data file.
-
-        Revno 0 from API will get translated into "00000001" filename.
-        """
-        return self._get_item_path(itemname, "revisions", "%08d" % (revno + 1))
-
-    def _get_att_path(self, itemname, attachname):
-        """
-        Returns the full path to the attachment file.
-        """
-        return self._get_item_path(itemname, "attachments", attachname.encode('utf-8'))
-
-    def _current_path(self, itemname):
-        return self._get_item_path(itemname, "current")
-
-    def has_item(self, itemname):
-        return os.path.isfile(self._current_path(itemname))
-
-    def iter_items_noindex(self):
-        pages_dir = os.path.join(self._path, 'pages')
-        for f in os.listdir(pages_dir):
-            itemname = unquoteWikiname(f)
-            try:
-                item = FsPageItem(self, itemname)
-            except NoSuchItemError:
-                continue
-            else:
-                yield item
-                for attachitem in item.iter_attachments():
-                    yield attachitem
-
-    iteritems = iter_items_noindex
-
-    def get_item(self, itemname):
-        try:
-            # first try to get a page:
-            return FsPageItem(self, itemname)
-        except NoSuchItemError:
-            # do a second try, interpreting it as attachment:
-            return FsAttachmentItem(self, itemname)
-
-    def _get_item_metadata(self, item):
-        return item._fs_meta
-
-    def _list_revisions(self, item):
-        # we report ALL revision numbers:
-        # - zero-based (because the new storage api works zero based)
-        # - we even include deleted revisions' revnos
-        return range(item._fs_current + 1)
-
-    def _get_revision(self, item, revno):
-        if isinstance(item, FsPageItem):
-            return FsPageRevision(item, revno)
-        elif isinstance(item, FsAttachmentItem):
-            return FsAttachmentRevision(item, revno)
-        else:
-            raise
-
-    def _get_revision_metadata(self, rev):
-        return rev._fs_meta
-
-    def _read_revision_data(self, rev, chunksize):
-        if rev._fs_data_file is None:
-            rev._fs_data_file = open(rev._fs_data_fname, 'rb') # XXX keeps file open as long as rev exists
-        return rev._fs_data_file.read(chunksize)
-
-    def _seek_revision_data(self, rev, position, mode):
-        if rev._fs_data_file is None:
-            rev._fs_data_file = open(rev._fs_data_fname, 'rb') # XXX keeps file open as long as rev exists
-        return rev._fs_data_file.seek(position, mode)
-
-
-# Specialized Items/Revisions
-
-class FsPageItem(Item):
-    """ A moin 1.9 filesystem item (page) """
-    def __init__(self, backend, itemname):
-        Item.__init__(self, backend, itemname)
-        currentpath = self._backend._current_path(itemname)
-        editlogpath = self._backend._get_item_path(itemname, 'edit-log')
-        self._fs_meta = {} # 'current' is the only page metadata and handled elsewhere
-        try:
-            with open(currentpath, 'r') as f:
-                current = int(f.read().strip()) - 1 # new api is 0-based, old is 1-based
-        except (OSError, IOError):
-            # no current file means no item
-            raise NoSuchItemError("No such item, %r" % itemname)
-        except ValueError:
-            # we have a current file, but its content is damaged
-            raise # TODO: current = determine_current(revdir, editlog)
-        self._fs_current = current
-        self._fs_editlog = EditLog(editlogpath, idx=backend.idx)
-        self._syspages = backend._syspages
-        if backend.deleted_mode == DELETED_MODE_KILL:
-            try:
-                FsPageRevision(self, current)
-            except NoSuchRevisionError:
-                raise NoSuchItemError('deleted_mode wants killing/ignoring of page %r and its attachments' % itemname)
-        uuid = backend.idx.content_uuid(itemname)
-        self.uuid = self._fs_meta[UUID] = uuid
-        self._fs_meta[NAME] = itemname
-
-    def iter_attachments(self):
-        attachmentspath = self._backend._get_item_path(self.name, 'attachments')
-        try:
-            attachments = os.listdir(attachmentspath)
-        except OSError:
-            attachments = []
-        for f in attachments:
-            attachname = f.decode('utf-8')
-            try:
-                name = '%s/%s' % (self.name, attachname)
-                item = FsAttachmentItem(self._backend, name)
-            except NoSuchItemError:
-                continue
-            else:
-                yield item
-
-
-class FsPageRevision(StoredRevision):
-    """ A moin 1.9 filesystem item revision (page, combines meta+data) """
-    def __init__(self, item, revno):
-        StoredRevision.__init__(self, item, revno)
-        if revno == -1: # not used by converter, but nice to try a life wiki
-            revno = item._fs_current
-        backend = self._backend = item._backend
-        revpath = backend._get_rev_path(item.name, revno)
-        editlog = item._fs_editlog
-        # we just read the page and parse it here, makes the rest of the code simpler:
-        try:
-            with codecs.open(revpath, 'r', config.charset) as f:
-                content = f.read()
-        except (IOError, OSError):
-            if revno == item._fs_current and item._backend.deleted_mode == DELETED_MODE_KILL:
-                raise NoSuchRevisionError('deleted_mode wants killing/ignoring')
-            # handle deleted revisions (for all revnos with 0<=revno<=current) here
-            # we prepare some values for the case we don't find a better value in edit-log:
-            meta = {MTIME: -1, # fake, will get 0 in the end
-                    NAME: item.name, # will get overwritten with name from edit-log
-                                     # if we have an entry there
-                   }
-            try:
-                previous_meta = FsPageRevision(item, revno-1)._fs_meta
-                # if this page revision is deleted, we have no on-page metadata.
-                # but some metadata is required, thus we have to copy it from the
-                # (non-deleted) revision revno-1:
-                for key in [ACL, NAME, CONTENTTYPE, MTIME, ]:
-                    if key in previous_meta:
-                        meta[key] = previous_meta[key]
-            except NoSuchRevisionError:
-                pass # should not happen
-            meta[MTIME] += 1 # it is now either 0 or prev rev mtime + 1
-            data = u''
-            try:
-                editlog_data = editlog.find_rev(revno)
-            except KeyError:
-                if 0 <= revno <= item._fs_current:
-                    editlog_data = { # make something up
-                        ACTION: u'SAVE/DELETE',
-                    }
-                else:
-                    raise NoSuchRevisionError('Item %r has no revision %d (not even a deleted one)!' %
-                            (item.name, revno))
-        else:
-            try:
-                editlog_data = editlog.find_rev(revno)
-            except KeyError:
-                if 0 <= revno <= item._fs_current:
-                    editlog_data = { # make something up
-                        NAME: item.name,
-                        MTIME: int(os.path.getmtime(revpath)),
-                        ACTION: u'SAVE',
-                    }
-            meta, data = split_body(content)
-        meta.update(editlog_data)
-        format = meta.pop('format', backend.format_default)
-        meta[CONTENTTYPE] = FORMAT_TO_CONTENTTYPE.get(format, CONTENTTYPE_DEFAULT)
-        if item._syspages:
-            meta[IS_SYSITEM] = True
-            meta[SYSITEM_VERSION] = item._syspages
-        data = self._process_data(meta, data)
-        data = data.encode(config.charset)
-        size, hash_name, hash_digest = hash_hexdigest(data)
-        meta[hash_name] = hash_digest
-        meta[SIZE] = size
-        self._fs_meta = {}
-        for k, v in meta.iteritems():
-            if isinstance(v, list):
-                v = tuple(v)
-            self._fs_meta[k] = v
-        self._fs_data_fname = None # "file" is already opened here:
-        self._fs_data_file = StringIO(data)
-
-        acl_line = self._fs_meta.get(ACL)
-        if acl_line is not None:
-            self._fs_meta[ACL] = regenerate_acl(acl_line, config.ACL_RIGHTS_CONTENTS)
-
-    def _process_data(self, meta, data):
-        """ In moin 1.x markup, not all metadata is stored in the page's header.
-            E.g. categories are stored in the footer of the page content. For
-            moin2, we extract that stuff from content and put it into metadata.
-        """
-        if meta[CONTENTTYPE] == CONTENTTYPE_MOINWIKI:
-            data = process_categories(meta, data, self._backend.item_category_regex)
-        return data
-
-
-def process_categories(meta, data, item_category_regex):
-    # process categories to tags
-    # find last ---- in the data plus the categories below it
-    m = re.search(r'\n\r?\s*-----*', data[::-1])
-    if m:
-        start = m.start()
-        end = m.end()
-        # categories are after the ---- line
-        if start > 0:
-            categories = data[-start:]
-        else:
-            categories = u''
-        # remove the ---- line from the content
-        data = data[:-end]
-        if categories:
-            # for CategoryFoo, group 'all' matches CategoryFoo, group 'key' matches just Foo
-            # we use 'all' so we don't need to rename category items
-            matches = list(item_category_regex.finditer(categories))
-            if matches:
-                tags = [m.group('all') for m in matches]
-                meta.setdefault(TAGS, []).extend(tags)
-                # remove everything between first and last category from the content
-                start = matches[0].start()
-                end = matches[-1].end()
-                rest = categories[:start] + categories[end:]
-                data += u'\r\n' + rest.lstrip()
-        data = data.rstrip() + u'\r\n'
-    return data
-
-
-class FsAttachmentItem(Item):
-    """ A moin 1.9 filesystem item (attachment) """
-    def __init__(self, backend, name):
-        Item.__init__(self, backend, name)
-        try:
-            itemname, attachname = name.rsplit('/')
-        except ValueError: # no '/' in there
-            raise NoSuchItemError("No such attachment item, %r" % name)
-        editlogpath = self._backend._get_item_path(itemname, 'edit-log')
-        self._fs_current = 0 # attachments only have 1 revision with revno 0
-        self._fs_meta = {} # no attachment item level metadata
-        self._fs_editlog = EditLog(editlogpath, idx=backend.idx)
-        attachpath = self._backend._get_att_path(itemname, attachname)
-        if not os.path.isfile(attachpath):
-            # no attachment file means no item
-            raise NoSuchItemError("No such attachment item, %r" % name)
-        self._fs_attachname = attachname
-        self._fs_attachpath = attachpath
-        # fetch parent page's ACL as it protected the attachment also:
-        try:
-            parentpage = FsPageItem(backend, itemname)
-            parent_current_rev = parentpage.get_revision(-1)
-            acl = parent_current_rev._fs_meta.get(ACL)
-        except (NoSuchItemError, NoSuchRevisionError):
-            acl = None
-        self._fs_parent_acl = acl
-        self._syspages = backend._syspages
-        uuid = backend.idx.content_uuid(name)
-        self.uuid = self._fs_meta[UUID] = uuid
-        self._fs_meta[NAME] = name
-
-class FsAttachmentRevision(StoredRevision):
-    """ A moin 1.9 filesystem item revision (attachment) """
-    def __init__(self, item, revno):
-        if revno != 0:
-            raise NoSuchRevisionError('Item %r has no revision %d (attachments just have revno 0)!' %
-                    (item.name, revno))
-        StoredRevision.__init__(self, item, revno)
-        attpath = item._fs_attachpath
-        editlog = item._fs_editlog
-        try:
-            editlog_data = editlog.find_attach(item._fs_attachname)
-        except KeyError:
-            editlog_data = { # make something up
-                MTIME: int(os.path.getmtime(attpath)),
-                ACTION: u'SAVE',
-            }
-        meta = editlog_data
-        # attachments in moin 1.9 were protected by their "parent" page's acl
-        if item._fs_parent_acl is not None:
-            meta[ACL] = item._fs_parent_acl # XXX not needed for acl_hierarchic
-        meta[CONTENTTYPE] = unicode(MimeType(filename=item._fs_attachname).content_type())
-        with open(attpath, 'rb') as f:
-            size, hash_name, hash_digest = hash_hexdigest(f)
-        meta[hash_name] = hash_digest
-        meta[SIZE] = size
-        if item._syspages:
-            meta[IS_SYSITEM] = True
-            meta[SYSITEM_VERSION] = item._syspages
-        self._fs_meta = meta
-        self._fs_data_fname = attpath
-        self._fs_data_file = None
-
-
-from fs19_logfile import LogFile
-
-
-class EditLog(LogFile):
-    """ Access the edit-log and return metadata as the new api wants it. """
-    def __init__(self, filename, buffer_size=4096, idx=None):
-        LogFile.__init__(self, filename, buffer_size)
-        self._NUM_FIELDS = 9
-        self.idx = idx
-
-    def parser(self, line):
-        """ Parse edit-log line into fields """
-        fields = line.strip().split(u'\t')
-        fields = (fields + [u''] * self._NUM_FIELDS)[:self._NUM_FIELDS]
-        keys = (MTIME, '__rev', ACTION, NAME, ADDRESS, HOSTNAME, USERID, EXTRA, COMMENT)
-        result = dict(zip(keys, fields))
-        # do some conversions/cleanups/fallbacks:
-        result[MTIME] = int(long(result[MTIME] or 0) / 1000000) # convert usecs to secs
-        result['__rev'] = int(result['__rev']) - 1 # old storage is 1-based, we want 0-based
-        result[NAME] = unquoteWikiname(result[NAME])
-        action = result[ACTION]
-        extra = result[EXTRA]
-        if extra:
-            if action.startswith('ATT'):
-                result[NAME] += u'/' + extra # append filename to pagename
-                # keep EXTRA for find_attach
-            elif action == 'SAVE/RENAME':
-                if extra:
-                    result[NAME_OLD] = extra
-                del result[EXTRA]
-                result[ACTION] = u'RENAME'
-            elif action == 'SAVE/REVERT':
-                if extra:
-                    result[REVERTED_TO] = int(extra)
-                del result[EXTRA]
-                result[ACTION] = u'REVERT'
-        userid = result[USERID]
-        if userid:
-            result[USERID] = self.idx.user_uuid(old_id=userid, refcount=True)
-        return result
-
-    def find_rev(self, revno):
-        """ Find metadata for some revno revision in the edit-log. """
-        for meta in self:
-            if meta['__rev'] == revno:
-                break
-        else:
-            self.to_begin()
-            raise KeyError
-        del meta['__rev']
-        meta = dict([(k, v) for k, v in meta.items() if v]) # remove keys with empty values
-        if meta.get(ACTION) == u'SAVENEW':
-            # replace SAVENEW with just SAVE
-            meta[ACTION] = u'SAVE'
-        return meta
-
-    def find_attach(self, attachname):
-        """ Find metadata for some attachment name in the edit-log. """
-        for meta in self.reverse(): # use reverse iteration to get the latest upload's data
-            if (meta['__rev'] == 99999998 and  # 99999999-1 because of 0-based
-                meta[ACTION] == 'ATTNEW' and
-                meta[EXTRA] == attachname):
-                break
-        else:
-            self.to_end()
-            raise KeyError
-        del meta['__rev']
-        del meta[EXTRA] #  we have full name in NAME
-        meta[ACTION] = u'SAVE'
-        meta = dict([(k, v) for k, v in meta.items() if v]) # remove keys with empty values
-        return meta
-
-
-from MoinMoin import security
-
-def regenerate_acl(acl_string, acl_rights_valid):
-    """ recreate ACL string to remove invalid rights """
-    assert isinstance(acl_string, unicode)
-    result = []
-    for modifier, entries, rights in security.ACLStringIterator(acl_rights_valid, acl_string):
-        if (entries, rights) == (['Default'], []):
-            result.append("Default")
-        else:
-            result.append("%s%s:%s" % (
-                          modifier,
-                          u','.join(entries),
-                          u','.join(rights) # iterator has removed invalid rights
-                         ))
-    result = u' '.join(result)
-    logging.debug("regenerate_acl %r -> %r" % (acl_string, result))
-    return result
-
-
-import re, codecs
-from MoinMoin import config
-
-class FSUserBackend(Backend):
-    """
-    MoinMoin 1.9 compatible, read-only, "just for the migration" filesystem backend.
-
-    Everything not needed for the migration will likely just raise a NotImplementedError.
-    """
-    def __init__(self, path, idx_path, kill_save=False):
-        """
-        Initialise filesystem backend.
-
-        :param path: storage path (user_dir)
-        :param idx_path: path for index storage
-        :param data_path: storage path (data_dir) - only used for index storage
-        """
-        self._path = path
-        if kill_save:
-            # XXX dirty trick because this backend is read-only,
-            # XXX to be able to use the wiki logged-in
-            from MoinMoin.user import User
-            User.save = lambda x: None # do nothing, we can't save
-        self.idx = Index(idx_path)
-
-    def _get_item_path(self, name, *args):
-        """
-        Returns the full path to the user profile.
-        """
-        path = os.path.join(self._path, name, *args)
-        return path
-
-    def has_item(self, itemname):
-        return os.path.isfile(self._get_item_path(itemname))
-
-    def iter_items_noindex(self):
-        for old_id in os.listdir(self._path):
-            try:
-                item = FsUserItem(self, old_id=old_id)
-            except NoSuchItemError:
-                continue
-            else:
-                yield item
-
-    iteritems = iter_items_noindex
-
-    def get_item(self, itemname):
-        return FsUserItem(self, itemname)
-
-    def _get_item_metadata(self, item):
-        return item._fs_meta
-
-    def _list_revisions(self, item):
-        # user items have no revisions (storing everything in item metadata)
-        return []
-
-    def _get_revision(self, item, revno):
-        raise NoSuchRevisionError('Item %r has no revision %d (no revisions at all)!' %
-                (item.name, revno))
-
-
-# Specialized Items/Revisions
-
-class FsUserItem(Item):
-    """ A moin 1.9 filesystem item (user) """
-    user_re = re.compile(r'^\d+\.\d+(\.\d+)?$')
-
-    def __init__(self, backend, itemname=None, old_id=None):
-        if itemname is not None:
-            # get_item calls us with a new itemname (uuid)
-            uuid = str(itemname)
-            old_id = backend.idx.user_old_id(uuid=uuid)
-        if not self.user_re.match(old_id):
-            raise NoSuchItemError("userid does not match user_re")
-        Item.__init__(self, backend, itemname) # itemname might be None still
-        try:
-            meta = self._parse_userprofile(old_id)
-        except (OSError, IOError):
-            # no current file means no item
-            raise NoSuchItemError("No such item, %r" % itemname)
-        self._fs_meta = meta = self._process_usermeta(meta)
-        if itemname is None:
-            # iteritems calls us without itemname, just with old_id
-            uuid = backend.idx.user_uuid(name=meta['name'], old_id=old_id)
-            itemname = unicode(uuid)
-            Item.__init__(self, backend, itemname) # XXX init again, with itemname
-        self.uuid = meta[UUID] = uuid
-
-    def _parse_userprofile(self, old_id):
-        with codecs.open(self._backend._get_item_path(old_id), "r", config.charset) as meta_file:
-            metadata = {}
-            for line in meta_file:
-                if line.startswith('#') or line.strip() == "":
-                    continue
-                key, value = line.strip().split('=', 1)
-                # Decode list values
-                if key.endswith('[]'):
-                    key = key[:-2]
-                    value = _decode_list(value)
-
-                # Decode dict values
-                elif key.endswith('{}'):
-                    key = key[:-2]
-                    value = _decode_dict(value)
-
-                metadata[key] = value
-        return metadata
-
-    def _process_usermeta(self, metadata):
-        # stuff we want to have stored as boolean:
-        bool_defaults = [ # taken from cfg.checkbox_defaults
-            ('show_comments', 'False'),
-            ('edit_on_doubleclick', 'True'),
-            ('want_trivial', 'False'),
-            ('mailto_author', 'False'),
-            ('disabled', 'False'),
-        ]
-        for key, default in bool_defaults:
-            metadata[key] = metadata.get(key, default) in ['True', 'true', '1']
-
-        # stuff we want to have stored as integer:
-        int_defaults = [
-            ('edit_rows', '0'),
-        ]
-        for key, default in int_defaults:
-            metadata[key] = int(metadata.get(key, default))
-
-        # rename last_saved to MTIME, int MTIME should be enough:
-        metadata[MTIME] = int(float(metadata.get('last_saved', '0')))
-
-        # rename subscribed_pages to subscribed_items
-        metadata['subscribed_items'] = metadata.get('subscribed_pages', [])
-
-        # convert bookmarks from usecs (and str) to secs (int)
-        metadata['bookmarks'] = [(interwiki, int(long(bookmark)/1000000))
-                                 for interwiki, bookmark in metadata.get('bookmarks', {}).items()]
-
-        # stuff we want to get rid of:
-        kill = ['real_language', # crap (use 'language')
-                'wikiname_add_spaces', # crap magic (you get it like it is)
-                'recoverpass_key', # user can recover again if needed
-                'editor_default', # not used any more
-                'editor_ui', # not used any more
-                'external_target', # ancient, not used any more
-                'passwd', # ancient, not used any more (use enc_passwd)
-                'show_emoticons', # ancient, not used any more
-                'show_fancy_diff', # kind of diff display now depends on mimetype
-                'show_fancy_links', # not used any more (now link rendering depends on theme)
-                'show_toolbar', # not used any more
-                'show_topbottom', # crap
-                'show_nonexist_qm', # crap, can be done by css
-                'show_page_trail', # theme decides whether to show trail
-                'remember_last_visit', # we show trail, user can click there
-                'remember_me', # don't keep sessions open for a long time
-                'subscribed_pages', # renamed to subscribed_items
-                'edit_cols', # not used any more
-                'jid', # no jabber support
-                'tz_offset', # we have real timezone now
-                'date_fmt', # not used any more
-                'datetime_fmt', # not used any more
-                'last_saved', # renamed to MTIME
-                'email_subscribed_events', # XXX no support yet
-                'jabber_subscribed_events', # XXX no support yet
-               ]
-        for key in kill:
-            if key in metadata:
-                del metadata[key]
-
-        # finally, remove some empty values (that have empty defaults anyway or
-        # make no sense when empty):
-        empty_kill = ['aliasname', 'bookmarks', 'enc_password',
-                      'language', 'css_url', 'email', ] # XXX check subscribed_items, quicklinks
-        for key in empty_kill:
-            if key in metadata and metadata[key] in [u'', tuple(), {}, [], ]:
-                del metadata[key]
-
-        return metadata
-
-
-def _decode_list(line):
-    """
-    Decode list of items from user data file
-
-    :param line: line containing list of items, encoded with _encode_list
-    :rtype: list of unicode strings
-    :returns: list of items in encoded in line
-    """
-    items = [item.strip() for item in line.split('\t')]
-    items = [item for item in items if item]
-    return tuple(items)
-
-def _decode_dict(line):
-    """
-    Decode dict of key:value pairs from user data file
-
-    :param line: line containing a dict, encoded with _encode_dict
-    :rtype: dict
-    :returns: dict  unicode:unicode items
-    """
-    items = [item.strip() for item in line.split('\t')]
-    items = [item for item in items if item]
-    items = [item.split(':', 1) for item in items]
-    return dict(items)
-
-def hash_hexdigest(content, bufsize=4096):
-    size = 0
-    hash = hashlib.new(HASH_ALGORITHM)
-    if hasattr(content, "read"):
-        while True:
-            buf = content.read(bufsize)
-            hash.update(buf)
-            size += len(buf)
-            if not buf:
-                break
-    elif isinstance(content, str):
-        hash.update(content)
-        size = len(content)
-    else:
-        raise ValueError("unsupported content object: %r" % content)
-    return size, HASH_ALGORITHM, unicode(hash.hexdigest())
-
--- a/MoinMoin/storage/backends/fs19_logfile.py	Sat Sep 10 03:23:36 2011 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,452 +0,0 @@
-# Copyright: 2005-2007 MoinMoin:ThomasWaldmann
-# License: GNU GPL v2 (or any later version), see LICENSE.txt for details.
-
-"""
-    MoinMoin - LogFile package
-
-    This module supports buffered log reads, iterating forward and backward line-by-line, etc.
-"""
-
-
-from MoinMoin import log
-logging = log.getLogger(__name__)
-
-import os, codecs, errno
-from MoinMoin import config, wikiutil
-
-class LogError(Exception):
-    """ Base class for log errors """
-
-class LogMissing(LogError):
-    """ Raised when the log is missing """
-
-
-class LineBuffer:
-    """
-    Reads lines from a file
-
-    :ivar len: number of lines in self.lines
-    :ivar lines: list of lines (unicode)
-    :ivar offsets: list of file offsets for each line. additionally the position
-                   after the last read line is stored into self.offsets[-1]
-    """
-    def __init__(self, file, offset, size, forward=True):
-        """
-
-        TODO: when this gets refactored, don't use "file" (is a builtin)
-
-        :param file: open file object
-        :param offset: position in file to start from
-        :param size: aproximate number of bytes to read
-        :param forward : read from offset on or from offset-size to offset
-        :type forward: boolean
-        """
-        self.loglevel = logging.NOTSET
-        if forward:
-            begin = offset
-            logging.log(self.loglevel, "LineBuffer.init: forward seek %d read %d" % (begin, size))
-            file.seek(begin)
-            lines = file.readlines(size)
-        else:
-            if offset < 2 * size:
-                begin = 0
-                size = offset
-            else:
-                begin = offset - size
-            logging.log(self.loglevel, "LineBuffer.init: backward seek %d read %d" % (begin, size))
-            file.seek(begin)
-            lines = file.read(size).splitlines(True)
-            if begin != 0:
-                # remove potentially incomplete first line
-                begin += len(lines[0])
-                lines = lines[1:]
-                # XXX check for min one line read
-
-        linecount = len(lines)
-
-        # now calculate the file offsets of all read lines
-        offsets = [len(line) for line in lines]
-        offsets.append(0) # later this element will have the file offset after the last read line
-
-        lengthpreviousline = 0
-        offset = begin
-        for i in xrange(linecount+1):
-            offset += lengthpreviousline
-            lengthpreviousline = offsets[i]
-            offsets[i] = offset
-
-        self.offsets = offsets
-        self.len = linecount
-        # Decode lines after offset in file is calculated
-        self.lines = [unicode(line, config.charset) for line in lines]
-
-
-class LogFile:
-    """
-    .filter: function that gets the values from .parser.
-             must return True to keep it or False to remove it
-
-    Overwrite .parser() and .add() to customize this class to special log files
-    """
-
-    def __init__(self, filename, buffer_size=4096):
-        """
-        :param filename: name of the log file
-        :param buffer_size: approx. size of one buffer in bytes
-        """
-        self.loglevel = logging.NOTSET
-        self.__filename = filename
-        self.__buffer = None # currently used buffer, points to one of the following:
-        self.__buffer1 = None
-        self.__buffer2 = None
-        self.buffer_size = buffer_size
-        self.__lineno = 0
-        self.filter = None
-
-    def __iter__(self):
-        return self
-
-    def reverse(self):
-        """ yield log entries in reverse direction starting from last one
-
-        :rtype: iterator
-        """
-        self.to_end()
-        while True:
-            try:
-                logging.log(self.loglevel, "LogFile.reverse %s" % self.__filename)
-                result = self.previous()
-            except StopIteration:
-                return
-            yield result
-
-    def sanityCheck(self):
-        """ Check for log file write access.
-
-        :rtype: string (error message) or None
-        """
-        if not os.access(self.__filename, os.W_OK):
-            return "The log '%s' is not writable!" % (self.__filename, )
-        return None
-
-    def __getattr__(self, name):
-        """
-        generate some attributes when needed
-        """
-        if name == "_LogFile__rel_index": # Python black magic: this is the real name of the __rel_index attribute
-            # starting iteration from begin
-            self.__buffer1 = LineBuffer(self._input, 0, self.buffer_size)
-            self.__buffer2 = LineBuffer(self._input,
-                                        self.__buffer1.offsets[-1],
-                                        self.buffer_size)
-            self.__buffer = self.__buffer1
-            self.__rel_index = 0
-            return 0
-        elif name == "_input":
-            try:
-                # Open the file (NOT using codecs.open, it breaks our offset calculation. We decode it later.).
-                # Use binary mode in order to retain \r - otherwise the offset calculation would fail.
-                self._input = file(self.__filename, "rb", )
-            except IOError as err:
-                if err.errno == errno.ENOENT: # "file not found"
-                    # XXX workaround if edit-log does not exist: just create it empty
-                    # if this workaround raises another error, we don't catch
-                    # it, so the admin will see it.
-                    f = file(self.__filename, "ab")
-                    f.write('')
-                    f.close()
-                    self._input = file(self.__filename, "rb", )
-                else:
-                    logging.error("logfile: %r IOERROR errno %d (%s)" % (self.__filename, err.errno, os.strerror(err.errno)))
-                    raise
-            return self._input
-        elif name == "_output":
-            self._output = codecs.open(self.__filename, 'a', config.charset)
-            return self._output
-        else:
-            raise AttributeError(name)
-
-    def size(self):
-        """ Return log size in bytes
-
-        Return 0 if the file does not exist. Raises other OSError.
-
-        :returns: size of log file in bytes
-        :rtype: Int
-        """
-        try:
-            return os.path.getsize(self.__filename)
-        except OSError as err:
-            if err.errno == errno.ENOENT:
-                return 0
-            raise
-
-    def lines(self):
-        """ Return number of lines in the log file
-
-        Return 0 if the file does not exist. Raises other OSError.
-
-        Expensive for big log files - O(n)
-
-        :returns: size of log file in lines
-        :rtype: Int
-        """
-        try:
-            f = file(self.__filename, 'r')
-            try:
-                count = 0
-                for line in f:
-                    count += 1
-                return count
-            finally:
-                f.close()
-        except (OSError, IOError) as err:
-            if err.errno == errno.ENOENT:
-                return 0
-            raise
-
-    def peek(self, lines):
-        """ Move position in file forward or backwards by "lines" count
-
-        It adjusts .__lineno if set.
-        This function is not aware of filters!
-
-        :param lines: number of lines, may be negative to move backward
-        :rtype: boolean
-        :returns: True if moving more than to the beginning and moving
-                 to the end or beyond
-        """
-        logging.log(self.loglevel, "LogFile.peek %s" % self.__filename)
-        self.__rel_index += lines
-        while self.__rel_index < 0:
-            if self.__buffer is self.__buffer2:
-                if self.__buffer.offsets[0] == 0:
-                    # already at the beginning of the file
-                    self.__rel_index = 0
-                    self.__lineno = 0
-                    return True
-                else:
-                    # change to buffer 1
-                    self.__buffer = self.__buffer1
-                    self.__rel_index += self.__buffer.len
-            else: # self.__buffer is self.__buffer1
-                if self.__buffer.offsets[0] == 0:
-                    # already at the beginning of the file
-                    self.__rel_index = 0
-                    self.__lineno = 0
-                    return True
-                else:
-                    # load previous lines
-                    self.__buffer2 = self.__buffer1
-                    self.__buffer1 = LineBuffer(self._input,
-                                                self.__buffer.offsets[0],
-                                                self.buffer_size,
-                                                forward=False)
-                    self.__buffer = self.__buffer1
-                    self.__rel_index += self.__buffer.len
-
-        while self.__rel_index >= self.__buffer.len:
-            if self.__buffer is self.__buffer1:
-                # change to buffer 2
-                self.__rel_index -= self.__buffer.len
-                self.__buffer = self.__buffer2
-            else: # self.__buffer is self.__buffer2
-                # try to load next buffer
-                tmpbuff = LineBuffer(self._input,
-                                     self.__buffer.offsets[-1],
-                                     self.buffer_size)
-                if tmpbuff.len == 0:
-                    # end of file
-                    if self.__lineno is not None:
-                        self.__lineno += (lines -
-                                         (self.__rel_index - self.__buffer.len))
-                    self.__rel_index = self.__buffer.len # point to after last read line
-                    return True
-                # shift buffers
-                self.__rel_index -= self.__buffer.len
-                self.__buffer1 = self.__buffer2
-                self.__buffer2 = tmpbuff
-                self.__buffer = self.__buffer2
-
-        if self.__lineno is not None:
-            self.__lineno += lines
-        return False
-
-    def __next(self):
-        """get next line already parsed"""
-        if self.peek(0):
-            raise StopIteration
-        result = self.parser(self.__buffer.lines[self.__rel_index])
-        self.peek(1)
-        return result
-
-    def next(self):
-        """get next line that passes through the filter
-        :returns: next entry
-        raises StopIteration at file end
-        """
-        result = None
-        while result is None:
-            while result is None:
-                logging.log(self.loglevel, "LogFile.next %s" % self.__filename)
-                result = self.__next()
-            if self.filter and not self.filter(result):
-                result = None
-        return result
-
-    def __previous(self):
-        """get previous line already parsed"""
-        if self.peek(-1):
-            raise StopIteration
-        return self.parser(self.__buffer.lines[self.__rel_index])
-
-    def previous(self):
-        """get previous line that passes through the filter
-        :returns: previous entry
-        raises StopIteration at file begin
-        """
-        result = None
-        while result is None:
-            while result is None:
-                logging.log(self.loglevel, "LogFile.previous %s" % self.__filename)
-                result = self.__previous()
-            if self.filter and not self.filter(result):
-                result = None
-        return result
-
-    def to_begin(self):
-        """moves file position to the begin"""
-        logging.log(self.loglevel, "LogFile.to_begin %s" % self.__filename)
-        if self.__buffer1 is None or self.__buffer1.offsets[0] != 0:
-            self.__buffer1 = LineBuffer(self._input,
-                                        0,
-                                        self.buffer_size)
-            self.__buffer2 = LineBuffer(self._input,
-                                        self.__buffer1.offsets[-1],
-                                        self.buffer_size)
-        self.__buffer = self.__buffer1
-        self.__rel_index = 0
-        self.__lineno = 0
-
-    def to_end(self):
-        """moves file position to the end"""
-        logging.log(self.loglevel, "LogFile.to_end %s" % self.__filename)
-        self._input.seek(0, 2) # to end of file
-        size = self._input.tell()
-        if self.__buffer2 is None or size > self.__buffer2.offsets[-1]:
-            self.__buffer2 = LineBuffer(self._input,
-                                        size,
-                                        self.buffer_size,
-                                        forward=False)
-
-            self.__buffer1 = LineBuffer(self._input,
-                                        self.__buffer2.offsets[0],
-                                        self.buffer_size,
-                                        forward=False)
-        self.__buffer = self.__buffer2
-        self.__rel_index = self.__buffer2.len
-        self.__lineno = None
-
-    def position(self):
-        """ Return the current file position
-
-        This can be converted into a String using back-ticks and then be rebuild.
-        For this plain file implementation position is an Integer.
-        """
-        return self.__buffer.offsets[self.__rel_index]
-
-    def seek(self, position, line_no=None):
-        """ moves file position to an value formerly gotten from .position().
-        To enable line counting line_no must be provided.
-        .seek is much more efficient for moving long distances than .peek.
-        raises ValueError if position is invalid
-        """
-        logging.log(self.loglevel, "LogFile.seek %s pos %d" % (self.__filename, position))
-        if self.__buffer1:
-            logging.log(self.loglevel, "b1 %r %r" % (self.__buffer1.offsets[0], self.__buffer1.offsets[-1]))
-        if self.__buffer2:
-            logging.log(self.loglevel, "b2 %r %r" % (self.__buffer2.offsets[0], self.__buffer2.offsets[-1]))
-        if self.__buffer1 and self.__buffer1.offsets[0] <= position < self.__buffer1.offsets[-1]:
-            # position is in .__buffer1
-            self.__rel_index = self.__buffer1.offsets.index(position)
-            self.__buffer = self.__buffer1
-        elif self.__buffer2 and self.__buffer2.offsets[0] <= position < self.__buffer2.offsets[-1]:
-            # position is in .__buffer2
-            self.__rel_index = self.__buffer2.offsets.index(position)
-            self.__buffer = self.__buffer2
-        elif self.__buffer1 and self.__buffer1.offsets[-1] == position:
-            # we already have one buffer directly before where we want to go
-            self.__buffer2 = LineBuffer(self._input,
-                                        position,
-                                        self.buffer_size)
-            self.__buffer = self.__buffer2
-            self.__rel_index = 0
-        elif self.__buffer2 and self.__buffer2.offsets[-1] == position:
-            # we already have one buffer directly before where we want to go
-            self.__buffer1 = self.__buffer2
-            self.__buffer2 = LineBuffer(self._input,
-                                        position,
-                                        self.buffer_size)
-            self.__buffer = self.__buffer2
-            self.__rel_index = 0
-        else:
-            # load buffers around position
-            self.__buffer1 = LineBuffer(self._input,
-                                        position,
-                                        self.buffer_size,
-                                        forward=False)
-            self.__buffer2 = LineBuffer(self._input,
-                                        position,
-                                        self.buffer_size)
-            self.__buffer = self.__buffer2
-            self.__rel_index = 0
-            # XXX test for valid position
-        self.__lineno = line_no
-
-    def line_no(self):
-        """:returns: the current line number or None if line number is unknown"""
-        return self.__lineno
-
-    def calculate_line_no(self):
-        """ Calculate the current line number from buffer offsets
-
-        If line number is unknown it is calculated by parsing the whole file.
-        This may be expensive.
-        """
-        self._input.seek(0, 0)
-        lines = self._input.read(self.__buffer.offsets[self.__rel_index])
-        self.__lineno = len(lines.splitlines())
-        return self.__lineno
-
-    def parser(self, line):
-        """
-        Converts the line from file to program representation.
-        This implementation uses TAB separated strings.
-        This method should be overwritten by the sub classes.
-
-        :param line: line as read from file
-        :returns: parsed line or None on error
-        """
-        return line.split("\t")
-
-    def add(self, *data):
-        """
-        add line to log file
-        This implementation save the values as TAB separated strings.
-        This method should be overwritten by the sub classes.
-        """
-        line = "\t".join(data)
-        self._add(line)
-
-    def _add(self, line):
-        """
-        :param line: flat line
-        :type line: String
-        write on entry in the log file
-        """
-        if line is not None:
-            if line[-1] != '\n':
-                line += '\n'
-            self._output.write(line)
-            self._output.close() # does this maybe help against the sporadic fedora wikis 160 \0 bytes in the edit-log?
-            del self._output # re-open the output file automagically
--- a/MoinMoin/storage/backends/fs2.py	Sat Sep 10 03:23:36 2011 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,455 +0,0 @@
-# Copyright: 2008 MoinMoin:JohannesBerg ("fs2" is originally based on "fs" from JB)
-# Copyright: 2009-2010 MoinMoin:ThomasWaldmann
-# License: GNU GPL v2 (or any later version), see LICENSE.txt for details.
-
-"""
-    MoinMoin - FS2 backend
-
-    Features:
-    * store metadata and data separately
-    * use uuids for item storage names
-    * uses content hash addressing for revision data storage
-    * use sqlalchemy/sqlite (not cdb/self-made DBs like fs does)
-"""
-
-
-import os, tempfile, errno, shutil
-
-import cPickle as pickle
-
-from flask import current_app as app
-
-from sqlalchemy import create_engine, MetaData, Table, Column, String, Unicode, Integer
-
-try:
-    from sqlalchemy.exc import IntegrityError
-except ImportError:
-    from sqlalchemy.exceptions import IntegrityError
-
-from sqlalchemy.pool import NullPool
-
-from werkzeug import cached_property
-
-from MoinMoin import log
-logging = log.getLogger(__name__)
-
-from MoinMoin.util.lock import ExclusiveLock
-from MoinMoin.util import filesys
-from MoinMoin.util.crypto import make_uuid, UUID_LEN
-
-from MoinMoin.storage import Backend as BackendBase
-from MoinMoin.storage import Item as ItemBase
-from MoinMoin.storage import StoredRevision as StoredRevisionBase
-from MoinMoin.storage import NewRevision as NewRevisionBase
-
-from MoinMoin.storage.error import NoSuchItemError, NoSuchRevisionError, \
-                                   ItemAlreadyExistsError, \
-                                   RevisionAlreadyExistsError, RevisionNumberMismatchError, \
-                                   CouldNotDestroyError
-
-PICKLEPROTOCOL = 1
-
-MAX_NAME_LEN = 500
-from MoinMoin.config import HASH_ALGORITHM
-
-
-class Item(ItemBase):
-    def __init__(self, backend, item_name, _fs_item_id=None, _fs_metadata=None, *args, **kw):
-        self._fs_item_id = _fs_item_id
-        self._fs_metadata = _fs_metadata
-        super(Item, self).__init__(backend, item_name, *args, **kw)
-
-
-class StoredRevision(StoredRevisionBase):
-    def __init__(self, item, revno, *args, **kw):
-        self._fs_file_data = None
-        if revno == -1:
-            revs = item.list_revisions()
-            if not revs:
-                raise NoSuchRevisionError("Item '%r' has no revisions." % (item.name, ))
-            revno = max(revs)
-        super(StoredRevision, self).__init__(item, revno, *args, **kw)
-        # fail early if we don't have such a revision:
-        self._fs_path_meta = self._backend._make_path('meta', item._fs_item_id, '%d.rev' % revno)
-        if not os.path.exists(self._fs_path_meta):
-            raise NoSuchRevisionError("Item '%r' has no revision #%d." % (item.name, revno))
-
-    @cached_property
-    def _fs_metadata(self):
-        with open(self._fs_path_meta, 'rb') as f:
-            try:
-                metadata = pickle.load(f)
-            except EOFError:
-                metadata = {}
-        return metadata
-
-    @cached_property
-    def _fs_path_data(self):
-        data_hash = self._fs_metadata[HASH_ALGORITHM]
-        return self._backend._make_path('data', data_hash)
-
-
-class NewRevision(NewRevisionBase):
-    def __init__(self, item, revno, *args, **kw):
-        super(NewRevision, self).__init__(item, revno, *args, **kw)
-        def maketemp(kind):
-            tmp_dir = self._backend._make_path(kind)
-            fd, tmp_path = tempfile.mkstemp('.tmp', '', tmp_dir)
-            tmp_file = os.fdopen(fd, 'wb+') # XXX keeps file open as long a rev exists
-            return tmp_file, tmp_path
-
-        self._fs_file_meta, self._fs_path_meta = maketemp('meta')
-        self._fs_file_data, self._fs_path_data = maketemp('data')
-
-
-class FS2Backend(BackendBase):
-    """
-    FS2 backend
-    """
-    def __init__(self, path):
-        """
-        Initialise filesystem backend, creating initial files and some internal structures.
-
-        :param path: storage path
-        """
-        self._path = path
-
-        # create <path>, meta data and revision content data storage subdirs
-        meta_path = self._make_path('meta')
-        data_path = self._make_path('data')
-        for path in (self._path, meta_path, data_path):
-            try:
-                os.makedirs(path)
-            except OSError as err:
-                if err.errno != errno.EEXIST:
-                    raise BackendError(str(err))
-
-        engine = create_engine('sqlite:///%s' % self._make_path('index.db'), poolclass=NullPool, echo=False)
-        metadata = MetaData()
-        metadata.bind = engine
-
-        # item_name -> item_id mapping
-        self._name2id = Table('name2id', metadata,
-                            Column('item_name', Unicode(MAX_NAME_LEN), primary_key=True),
-                            Column('item_id', String(UUID_LEN), index=True, unique=True),
-                        )
-
-        metadata.create_all()
-
-    def close(self):
-        engine = self._name2id.metadata.bind
-        engine.dispose()
-
-    def _make_path(self, *args):
-        return os.path.join(self._path, *args)
-
-    def _get_item_id(self, itemname):
-        """
-        Get ID of item (or None if no such item exists)
-
-        :param itemname: name of item (unicode)
-        """
-        name2id = self._name2id
-        results = name2id.select(name2id.c.item_name==itemname).execute()
-        row = results.fetchone()
-        results.close()
-        if row is not None:
-            item_id = row[name2id.c.item_id]
-            item_id = str(item_id) # we get unicode
-            return item_id
-
-    def _get_item_name(self, itemid):
-        """
-        Get name of item (or None if no such item exists)
-
-        :param itemid: id of item (str)
-        """
-        name2id = self._name2id
-        results = name2id.select(name2id.c.item_id==itemid).execute()
-        row = results.fetchone()
-        results.close()
-        if row is not None:
-            item_name = row[name2id.c.item_name]
-            return item_name
-
-    def get_item(self, itemname):
-        item_id = self._get_item_id(itemname)
-        if item_id is None:
-            raise NoSuchItemError("No such item '%r'." % itemname)
-
-        return Item(self, itemname, _fs_item_id=item_id)
-
-    def has_item(self, itemname):
-        return self._get_item_id(itemname) is not None
-
-    def create_item(self, itemname):
-        if not isinstance(itemname, (str, unicode)):
-            raise TypeError("Item names must be of str/unicode type, not %s." % type(itemname))
-
-        elif self.has_item(itemname):
-            raise ItemAlreadyExistsError("An item '%r' already exists!" % itemname)
-
-        return Item(self, itemname, _fs_metadata={})
-
-    def iter_items_noindex(self):
-        name2id = self._name2id
-        results = name2id.select().execute()
-        for row in results:
-            item_name = row[name2id.c.item_name]
-            item_id = row[name2id.c.item_id]
-            item_id = str(item_id) # we get unicode!
-            item = Item(self, item_name, _fs_item_id=item_id)
-            yield item
-        results.close()
-
-    iteritems = iter_items_noindex
-
-    def _get_revision(self, item, revno):
-        return StoredRevision(item, revno)
-
-    def _list_revisions(self, item):
-        if item._fs_item_id is None:
-            return []
-        p = self._make_path('meta', item._fs_item_id)
-        l = os.listdir(p)
-        suffix = '.rev'
-        ret = sorted([int(i[:-len(suffix)]) for i in l if i.endswith(suffix)])
-        return ret
-
-    def _create_revision(self, item, revno):
-        if item._fs_item_id is None:
-            revs = []
-        else:
-            revs = self._list_revisions(item)
-        last_rev = max(-1, -1, *revs)
-
-        if revno in revs:
-            raise RevisionAlreadyExistsError("Item '%r' already has a revision #%d." % (item.name, revno))
-        elif revno != last_rev + 1:
-            raise RevisionNumberMismatchError("The latest revision of the item '%r' is #%d, thus you cannot create revision #%d. \
-                                               The revision number must be latest_revision + 1." % (item.name, last_rev, revno))
-
-        return NewRevision(item, revno)
-
-    def _destroy_revision(self, rev):
-        self._close_revision_data(rev)
-        try:
-            os.unlink(rev._fs_path_meta)
-            # XXX do refcount data files and if zero, kill it
-            #os.unlink(rev._fs_path_data)
-        except OSError as err:
-            if err.errno != errno.ENOENT:
-                raise CouldNotDestroyError("Could not destroy revision #%d of item '%r' [errno: %d]" % (
-                    rev.revno, rev.item.name, err.errno))
-            #else:
-            #    someone else already killed this revision, we silently ignore this error
-
-    def _do_locked(self, lockname, fn, arg):
-        l = ExclusiveLock(lockname, 30)
-        l.acquire(30)
-        try:
-            ret = fn(arg)
-        finally:
-            l.release()
-
-        return ret
-
-    def _rename_item_locked(self, arg):
-        item, newname = arg
-        item_id = item._fs_item_id
-
-        name2id = self._name2id
-        try:
-            results = name2id.update().where(name2id.c.item_id==item_id).values(item_name=newname).execute()
-            results.close()
-        except IntegrityError:
-            raise ItemAlreadyExistsError("Target item '%r' already exists!" % newname)
-
-    def _rename_item(self, item, newname):
-        self._do_locked(self._make_path('name-mapping.lock'),
-                        self._rename_item_locked, (item, newname))
-
-    def _add_item_internally_locked(self, arg):
-        """
-        See _add_item_internally, this is just internal for locked operation.
-        """
-        item, revmeta, revdata, revdata_target, itemmeta = arg
-        item_id = make_uuid()
-        item_name = item.name
-
-        name2id = self._name2id
-        try:
-            results = name2id.insert().values(item_id=item_id, item_name=item_name).execute()
-            results.close()
-        except IntegrityError:
-            raise ItemAlreadyExistsError("Item '%r' already exists!" % item_name)
-
-        os.mkdir(self._make_path('meta', item_id))
-
-        if revdata is not None:
-            filesys.rename(revdata, revdata_target)
-
-        if revmeta is not None:
-            rp = self._make_path('meta', item_id, '%d.rev' % 0)
-            filesys.rename(revmeta, rp)
-
-        if itemmeta:
-            # only write item level metadata file if we have any
-            mp = self._make_path('meta', item_id, 'item')
-            with open(mp, 'wb') as f:
-                pickle.dump(itemmeta, f, protocol=PICKLEPROTOCOL)
-
-        item._fs_item_id = item_id
-
-    def _add_item_internally(self, item, revmeta=None, revdata=None, revdata_target=None, itemmeta=None):
-        """
-        This method adds a new item. It locks the name-mapping database to
-        ensure putting the item into place and adding it to the name-mapping
-        db is atomic.
-
-        If the newrev or metadata arguments are given, then it also adds the
-        revision or metadata to the item before making it discoverable.
-
-        If the item's name already exists, it doesn't do anything but raise
-        a ItemAlreadyExistsError; if the newrev was given the file is unlinked.
-
-        :param revmeta: new revision's temporary meta file path
-        :param revdata: new revision's temporary data file path
-        :param itemmeta: item metadata dict
-        """
-        self._do_locked(self._make_path('name-mapping.lock'),
-                        self._add_item_internally_locked, (item, revmeta, revdata, revdata_target, itemmeta))
-
-    def _commit_item(self, rev):
-        item = rev.item
-        metadata = dict(rev)
-        md = pickle.dumps(metadata, protocol=PICKLEPROTOCOL)
-
-        rev._fs_file_meta.write(md)
-
-        self._close_revision_meta(rev)
-        self._close_revision_data(rev)
-
-        data_hash = metadata[HASH_ALGORITHM]
-
-        pd = self._make_path('data', data_hash)
-        if item._fs_item_id is None:
-            self._add_item_internally(item, revmeta=rev._fs_path_meta, revdata=rev._fs_path_data, revdata_target=pd)
-        else:
-            try:
-                filesys.rename_no_overwrite(rev._fs_path_data, pd, delete_old=True)
-            except OSError as err:
-                if err.errno != errno.EEXIST:
-                    raise
-
-            pm = self._make_path('meta', item._fs_item_id, '%d.rev' % rev.revno)
-            try:
-                filesys.rename_no_overwrite(rev._fs_path_meta, pm, delete_old=True)
-            except OSError as err:
-                if err.errno != errno.EEXIST:
-                    raise
-                raise RevisionAlreadyExistsError("")
-
-    def _rollback_item(self, rev):
-        self._close_revision_meta(rev)
-        self._close_revision_data(rev)
-        os.unlink(rev._fs_path_meta)
-        os.unlink(rev._fs_path_data)
-
-    def _destroy_item_locked(self, item):
-        item_id = item._fs_item_id
-
-        name2id = self._name2id
-        results = name2id.delete().where(name2id.c.item_id==item_id).execute()
-        results.close()
-
-        path = self._make_path('meta', item_id)
-        try:
-            shutil.rmtree(path)
-        except OSError as err:
-            raise CouldNotDestroyError("Could not destroy item '%r' [errno: %d]" % (
-                item.name, err.errno))
-        # XXX do refcount data files and if zero, kill it
-
-    def _destroy_item(self, item):
-        self._do_locked(self._make_path('name-mapping.lock'),
-                        self._destroy_item_locked, item)
-
-    def _change_item_metadata(self, item):
-        if not item._fs_item_id is None:
-            lp = self._make_path('meta', item._fs_item_id, 'item.lock')
-            item._fs_metadata_lock = ExclusiveLock(lp, 30)
-            item._fs_metadata_lock.acquire(30)
-
-    def _publish_item_metadata(self, item):
-        if item._fs_item_id is None:
-            self._add_item_internally(item, itemmeta=item._fs_metadata)
-        else:
-            assert item._fs_metadata_lock.isLocked()
-            md = item._fs_metadata
-            if md is None:
-                # metadata unchanged
-                pass
-            elif not md:
-                # metadata now empty, just rm the metadata file
-                try:
-                    os.unlink(self._make_path('meta', item._fs_item_id, 'item'))
-                except OSError as err:
-                    if err.errno != errno.ENOENT:
-                        raise
-                    # ignore, there might not have been metadata
-            else:
-                tmp = self._make_path('meta', item._fs_item_id, 'item.tmp')
-                with open(tmp, 'wb') as f:
-                    pickle.dump(md, f, protocol=PICKLEPROTOCOL)
-
-                filesys.rename(tmp, self._make_path('meta', item._fs_item_id, 'item'))
-            item._fs_metadata_lock.release()
-            del item._fs_metadata_lock
-
-    def _get_item_metadata(self, item):
-        if item._fs_item_id is not None:
-            p = self._make_path('meta', item._fs_item_id, 'item')
-            try:
-                with open(p, 'rb') as f:
-                    metadata = pickle.load(f)
-            except IOError as err:
-                if err.errno != errno.ENOENT:
-                    raise
-                # no such file means no metadata was stored
-                metadata = {}
-            item._fs_metadata = metadata
-        return item._fs_metadata
-
-    def _get_revision_metadata(self, rev):
-        return rev._fs_metadata
-
-    def _open_revision_data(self, rev, mode='rb'):
-        if rev._fs_file_data is None:
-            rev._fs_file_data = open(rev._fs_path_data, mode) # XXX keeps file open as long as rev exists
-
-    def _close_revision_data(self, rev):
-        if rev._fs_file_data is not None:
-            rev._fs_file_data.close()
-
-    def _close_revision_meta(self, rev):
-        if rev._fs_file_meta is not None:
-            rev._fs_file_meta.close()
-
-    def _seek_revision_data(self, rev, position, mode):
-        self._open_revision_data(rev)
-        rev._fs_file_data.seek(position, mode)
-
-    def _tell_revision_data(self, rev):
-        self._open_revision_data(rev)
-        return rev._fs_file_data.tell()
-
-    def _read_revision_data(self, rev, chunksize):
-        self._open_revision_data(rev)
-        return rev._fs_file_data.read(chunksize)
-
-    def _write_revision_data(self, rev, data):
-        # we assume that the file is already open for writing
-        rev._fs_file_data.write(data)
-
-
--- a/MoinMoin/storage/backends/hg.py	Sat Sep 10 03:23:36 2011 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,718 +0,0 @@
-# Copyright: 2008 MoinMoin:PawelPacana
-# License: GNU GPL v2 (or any later version), see LICENSE.txt for details.
-
-"""
-    MoinMoin - MercurialBackend
-
-    This package contains code for MoinMoin storage backend using a
-    Mercurial (hg) distributed version control system. This backend provides
-    several advantages compared to MoinMoin's default filesystem backend:
-
-    - revisioning and concurrency issues handled using Mercurial's internal
-      mechanisms
-    - cloning of the page database, allowing easy backup, synchronization and
-      forking of wikis
-    - offline, commandline edits with support of custom mercurial extensions
-      for non-trivial tasks
-
-    Note: the related MoinMoin/action/GraphInfo.py code, which provided a
-          graphical history view for hg backend was removed at 2010-09-25,
-          because it needed refactoring for flask/jinja2, but was unmaintained.
-          If you'ld like to work on it, pull it from repo history.
-"""
-
-
-from __future__ import absolute_import, division
-
-import os
-import time
-import errno
-import weakref
-import tempfile
-import StringIO
-import itertools
-import cPickle as pickle
-from datetime import datetime
-import hashlib
-
-os.environ["HGENCODING"] = "utf-8" # must be set before importing mercurial
-os.environ["HGMERGE"] = "internal:fail"
-
-from mercurial import hg, ui, util, cmdutil, commands
-from mercurial.node import short, nullid
-from mercurial.revlog import LookupError
-
-try:
-    from mercurial.error import RepoError
-except ImportError:
-    from mercurial.repo import RepoError
-
-try:
-    import mercurial.match
-except ImportError:
-    pass
-
-from MoinMoin.util import pycdb as cdb
-
-from MoinMoin.config import USERID, COMMENT, MTIME
-from MoinMoin.storage import Backend, Item, StoredRevision, NewRevision
-from MoinMoin.storage.error import (BackendError, NoSuchItemError, NoSuchRevisionError,
-                                   RevisionNumberMismatchError, ItemAlreadyExistsError,
-                                   RevisionAlreadyExistsError)
-WINDOW_SIZE = 256
-PICKLE_PROTOCOL = 1
-DEFAULT_USER = 'storage'
-DEFAULT_COMMIT_MESSAGE = '...'
-WIKI_METADATA_PREFIX = '_meta_'
-BACKEND_METADATA_PREFIX = '_backend_'
-
-class MercurialBackend(Backend):
-    """Implements backend storage using Mercurial VCS."""
-
-    def __init__(self, path):
-        """
-        Create data directories and initialize mercurial repository.
-        If direcrories or repository exists, reuse it. Create name-mapping.
-        """
-        self._path = os.path.abspath(path)
-        self._rev_path = os.path.join(self._path, 'rev')
-        self._meta_path = os.path.join(self._path, 'meta')
-        self._meta_db = os.path.join(self._meta_path, 'name-mapping')
-        try:
-            self._ui = ui.ui(quiet=True, interactive=False)
-        except:
-            self._ui = ui.ui()
-            self._ui.setconfig('ui', 'quiet', 'true')
-            self._ui.setconfig('ui', 'interactive', 'false')
-        for path in (self._path, self._rev_path, self._meta_path):
-            try:
-                os.makedirs(path)
-            except OSError as err:
-                if err.errno != errno.EEXIST:
-                    raise BackendError(str(err))
-        try:
-            self._repo = hg.repository(self._ui, self._rev_path)
-        except RepoError:
-            self._repo = hg.repository(self._ui, self._rev_path, create=True)
-
-        self._rev_item_lockrefs = {}    # versioned items lock references
-        self._meta_item_lockrefs = {}   # non-versioned items lock references
-        self._create_cdb()
-
-    def get_item(self, itemname):
-        """
-        Return an Item with given name.
-        Raise NoSuchItemError if Item does not exist.
-        """
-        id = self._hash(itemname)
-        try:
-            self._repo.changectx('')[id]
-        except LookupError:
-            if not self._has_meta(id):
-                raise NoSuchItemError('Item does not exist: %s' % itemname)
-        item = Item(self, itemname)
-        item._id = id
-        return item
-
-    def has_item(self, itemname):
-        """Return True if Item with given name exists."""
-        id = self._hash(itemname)
-        return id in self._repo.changectx('') or self._has_meta(id)
-
-    def create_item(self, itemname):
-        """
-        Create Item with given name.
-        Raise ItemAlreadyExistsError if Item already exists.
-        Return Item object.
-        """
-        if not isinstance(itemname, (str, unicode)):
-            raise TypeError("Wrong Item name type: %s" % type(itemname))
-        if self.has_item(itemname):
-            raise ItemAlreadyExistsError("Item with that name already exists: %s" % itemname)
-        item = Item(self, itemname)
-        item._id = None
-        return item
-
-    def iter_items_noindex(self):
-        """
-        Return generator for iterating through collection of Items
-        in repository.
-        """
-        def filter(id):
-            return id.endswith(".rev") or id.endswith(".rip")
-
-        ctx = self._repo.changectx('')
-        for id in itertools.ifilterfalse(filter, ctx):
-            item = Item(self, self._name(id))
-            item._id = id
-            yield item
-        with cdb.init(self._meta_db) as c:
-            record = c.each()
-            while record:
-                item = Item(self, record[1])
-                item._id = record[0]
-                yield item
-                record = c.each()
-
-    iteritems = iter_items_noindex
-
-    def _get_revision(self, item, revno):
-        """
-        Return given Revision of an Item. Raise NoSuchRevisionError
-        if Revision does not exist.
-        Return MercurialStoredRevision object.
-        """
-        try:
-            with self._revisions_index(item) as index:
-                if revno == -1:
-                    revno = index.last_key
-                if revno not in index:
-                    raise NoSuchRevisionError("Item Revision does not exist: %s" % revno)
-        except IOError:
-            raise NoSuchRevisionError("Item Revision does not exist: %s" % revno)
-
-        revision = MercurialStoredRevision(item, revno)
-        revision._item_id = item._id
-        revision._metadata = None
-        revision._data = None
-        return revision
-
-    def _list_revisions(self, item):
-        """Return a list of Item Revision numbers."""
-        if not item._id:
-            return []
-        else:
-            try:
-                with self._revisions_index(item) as index:
-                    revs = [key for key in index]
-                return revs
-            except IOError:
-                return []
-
-    def _create_revision(self, item, revno):
-        """Create new Item Revision. Return NewRevision object."""
-        try:
-            with self._revisions_index(item) as index:
-                if revno in index:
-                    raise RevisionAlreadyExistsError("Item Revision already exists: %s" % revno)
-                if revno != index.last_key + 1:
-                    raise RevisionNumberMismatchError("Unable to create revision number %d. "
-                        "New Revision number must be next to latest Revision number." % revno)
-        except IOError:
-            if revno != 0:
-                raise RevisionNumberMismatchError("Unable to create revision number %d. "
-                        "New Revision number must be next to latest Revision number." % revno)
-
-        rev = NewRevision(item, revno)
-        rev._data = None
-        rev._revno = revno
-        rev._item_id = item._id
-        rev._tmp_fpath = tempfile.mkstemp("-rev", "tmp-", dir=self._rev_path)[1]
-        return rev
-
-    def _destroy_revision(self, revision):
-        item = revision.item
-        lock = self._lock_rev_item(item)
-        try:
-            with self._revisions_index(item) as revisions:
-                with self._destroyed_index(item, create=True) as destroyed:
-                    destroyed[revision.revno] = revisions[revision.revno]
-                    del revisions[revision.revno]
-                    if destroyed.empty:
-                        self._repo[None].add(["%s.rip" % item._id])
-            self._commit_files(["%s.rev" % item._id, "%s.rip" % item._id], message='(revision destroy)')
-        finally:
-            lock.release()
-
-    def _rename_item(self, item, newname):
-        """
-        Rename given Item name to newname.
-        Raise ItemAlreadyExistsError if destination exists.
-
-        Also rename versioned index file to follow new item name.
-        """
-        newid = self._hash(newname)
-        try:
-            lock = self._lock_rev_item(item)
-            try:
-                if self.has_item(newname):
-                    raise ItemAlreadyExistsError("Destination item already exists: %s" % newname)
-                self._repo.changectx('')[item._id]
-
-                src, dst = os.path.join(self._rev_path, item._id), os.path.join(self._rev_path, newid)
-                commands.rename(self._ui, self._repo, src, dst)
-                commands.rename(self._ui, self._repo, "%s.rev" % src, "%s.rev" % dst)
-                # this commit will update items filelog in repository
-                # we provide 'name' metadata to be able to use self._name from this internal revision too
-                meta = self._encode_metadata({'name': newname,
-                                              'renamed_to': (newid, newname),
-                                              'renamed_id': item._id}, BACKEND_METADATA_PREFIX)
-                self._commit_files(['%s.rev' % item._id, '%s.rev' % newid, item._id, newid], extra=meta,
-                        message='(renamed %s to %s)' % (item.name.encode('utf-8'), newname.encode('utf-8')))
-            finally:
-                lock.release()
-        except LookupError:
-            pass
-        if self._has_meta(item._id):
-            lock = self._lock_meta_item(item)
-            try:
-                src = os.path.join(self._meta_path, "%s.meta" % item._id)
-                dst = os.path.join(self._meta_path, "%s.meta" % newid)
-                try:
-                    util.rename(src, dst)
-                except OSError as err:
-                    if err == errno.EEXIST:
-                        pass  # if metadata is empty, there is no file, only entry in cdb
-                self._add_to_cdb(newid, newname, replace=item._id)
-            finally:
-                lock.release()
-        item._id = newid
-
-    def _commit_item(self, revision, second_parent=None):
-        """
-        Commit given Item Revision to repository. Update and commit Item index file.
-        If Revision already exists, raise RevisionAlreadyExistsError.
-        """
-        # If there hasn't been a timestamp already assigned, assign one.
-        # Note: this is done here primarily to avoid test breakage, the production
-        #       timestamps are generated by indexing, see update_index()
-        if MTIME not in revision:
-            revision[MTIME] = int(time.time())
-        item = revision.item
-        lock = self._lock_rev_item(item)
-        try:
-            if not item._id:
-                self._add_item(item)
-            elif revision.revno in self._list_revisions(item):
-                raise RevisionAlreadyExistsError("Item Revision already exists: %s" % revision.revno)
-
-            util.rename(revision._tmp_fpath, os.path.join(self._rev_path, item._id))
-            if revision.revno > 0:
-                parents = [self._get_changectx(self._get_revision(item, revision.revno - 1)).node()]
-                if second_parent:
-                    parents.append(second_parent)
-            else:
-                self._revisions_index(item, create=True).close()
-                self._repo[None].add([item._id, "%s.rev" % item._id])
-                parents = []
-            internal_meta = {'rev': revision.revno,
-                             'name': item.name,
-                             'id': item._id,
-                             'parents': " ".join(parents)}
-            meta = self._encode_metadata(internal_meta, BACKEND_METADATA_PREFIX)
-            meta.update(self._encode_metadata(revision, WIKI_METADATA_PREFIX))
-
-            date = datetime.fromtimestamp(revision[MTIME]).isoformat(sep=' ')
-            user = revision.get(USERID, DEFAULT_USER).encode("utf-8")
-            msg = revision.get(COMMENT, DEFAULT_COMMIT_MESSAGE).encode("utf-8")
-
-            self._commit_files([item._id], message=msg, user=user, extra=meta, date=date)
-            self._append_revision(item, revision)
-        finally:
-            lock.release()
-
-    def _rollback_item(self, revision):
-        pass
-
-    def _destroy_item(self, item):
-        self._repo[None].remove(['%s.rev' % item._id, item._id], unlink=True)
-        with self._destroyed_index(item, create=True) as index:
-            if index.empty:
-                self._repo[None].add(["%s.rip" % item._id])
-            index.truncate()
-        self._commit_files(['%s.rev' % item._id, '%s.rip' % item._id, item._id], message='(item destroy)')
-        try:
-            os.remove(os.path.join(self._meta_path, "%s.meta" % item._id))
-        except OSError as err:
-            if err.errno == errno.EACCES:
-                raise CouldNotDestroyError
-
-    def _change_item_metadata(self, item):
-        """Start Item Metadata transaction."""
-        if item._id:
-            item._lock = self._lock_meta_item(item)
-
-    def _publish_item_metadata(self, item):
-        """Dump Item Metadata to file and finish transaction."""
-        def write_meta_item(meta_path, metadata):
-            fd, fpath = tempfile.mkstemp("-meta", "tmp-", self._meta_path)
-            with os.fdopen(fd, 'wb') as f:
-                pickle.dump(metadata, f, protocol=PICKLE_PROTOCOL)
-            util.rename(fpath, meta_path)
-
-        if item._id:
-            if item._metadata is None:
-                pass
-            elif not item._metadata:
-                try:
-                    os.remove(os.path.join(self._meta_path, "%s.meta" % item._id))
-                except OSError:
-                    pass
-            else:
-                write_meta_item(os.path.join(self._meta_path, "%s.meta" % item._id), item._metadata)
-            item._lock.release()
-        else:
-            self._add_item(item)
-            self._add_to_cdb(item._id, item.name)
-            if item._metadata:
-                write_meta_item(os.path.join(self._meta_path, "%s.meta" % item._id), item._metadata)
-
-    def _open_revision_data(self, revision):
-        if revision._data is None:
-            revision._data = StringIO.StringIO(self._get_filectx(revision).data())
-            # More effective would be to read revision data from working copy if this is last revision,
-            # however this involves locking file: there may be read on write operation (_write_revision_data).
-            #
-            # if revision.revno == self._list_revisions(revision.item)[-1]:
-            #   revision._data = open(os.path.join(self._rev_path, revision._item_id))
-
-    def _read_revision_data(self, revision, chunksize):
-        """
-        Read given amount of bytes of Revision data.
-        By default, all data is read.
-        """
-        self._open_revision_data(revision)
-        return revision._data.read(chunksize)
-
-    def _write_revision_data(self, revision, data):
-        """Write data to the given Revision."""
-        # We can open file in create_revision and pass it here but this would lead
-        # to problems as in FSBackend with too many opened files.
-        with open(revision._tmp_fpath, 'a') as f:
-            f.write(data)
-
-    def _get_item_metadata(self, item):
-        """Load Item Metadata from file. Return metadata dictionary."""
-        if item._id:
-            try:
-                with open(os.path.join(self._meta_path, "%s.meta" % item._id), "rb") as f:
-                    item._metadata = pickle.load(f)
-            except IOError:
-                item._metadata = {}
-        else:
-            item._metadata = {}
-        return item._metadata
-
-    def _get_revision_metadata(self, revision):
-        """Return given Revision Metadata dictionary."""
-        extra = self._get_changectx(revision).extra()
-        return self._decode_metadata(extra, WIKI_METADATA_PREFIX)
-
-    def _seek_revision_data(self, revision, position, mode):
-        """Set the Revisions cursor on the Revisions data."""
-        self._open_revision_data(revision)
-        revision._data.seek(position, mode)
-
-    def _tell_revision_data(self, revision):
-        """Tell the Revision data cursor position."""
-        self._open_revision_data(revision)
-        return revision._data.tell()
-
-    def _hash(self, itemname):
-        """Compute Item ID from given name."""
-        return hashlib.new('md5', itemname.encode('utf-8')).hexdigest()
-
-    def _name(self, itemid):
-        """Resolve Item name by given ID."""
-        try:
-            # there is accurate link between fctx and ctx only if there was some change
-            # so therefore we take first filelog entry
-            fctx = self._repo.changectx('')[itemid].filectx(0)
-            meta = fctx.changectx().extra()
-            return self._decode_metadata(meta, BACKEND_METADATA_PREFIX)['name']
-        except LookupError:
-            with cdb.init(self._meta_db) as c:
-                return c.get(itemid)
-
-    def _iter_changelog(self, reverse=True, filter_id=None, start_rev=None, filter_meta=None):
-        """
-        Return generator fo iterating over repository changelog.
-        Yields Changecontext object.
-        """
-        def split_windows(start, end, windowsize=WINDOW_SIZE):
-            while start < end:
-                yield start, min(windowsize, end-start)
-                start += windowsize
-
-        def wanted(changerev):
-            ctx = self._repo.changectx(changerev)
-            try:
-                meta = self._decode_metadata(ctx.extra(), BACKEND_METADATA_PREFIX)
-                if filter_meta is None:
-                    item_id, item_rev, item_name = meta['id'], meta['rev'], meta['name']
-                    try:
-                        item = Item(self, item_name)
-                        item._id = item_id
-                        with self._destroyed_index(item) as destroyed:
-                            # item is destroyed when destroyed index exists, but is empty
-                            if destroyed.empty or item_rev in destroyed:
-                                check = False
-                            else:
-                                check = not filter_id or item_id == filter_id
-                        return check
-                    except IOError:
-                        return not filter_id or item_id == filter_id
-                else:
-                    return filter_meta in meta
-            except KeyError:
-                return False
-
-        start, end = start_rev or -1, 0
-        try:
-            size = len(self._repo.changelog)
-        except TypeError:
-            size = self._repo.changelog.count()
-        if not size:
-            change_revs = []
-        else:
-            if not reverse:
-                start, end = end, start
-            change_revs = cmdutil.revrange(self._repo, ['%d:%d' % (start, end, )])
-
-        for i, window in split_windows(0, len(change_revs)):
-            revs = [changerev for changerev in change_revs[i:i+window] if wanted(changerev)]
-            for revno in revs:
-                yield self._repo.changectx(revno)
-
-    def _get_filectx(self, revision):
-        """
-        Get Filecontext object corresponding to given Revision.
-        Retrieve necessary information from index file.
-        """
-        with self._revisions_index(revision.item) as index:
-            data = index[revision.revno]
-            fctx = self._repo.filectx(data['id'], fileid=data['filenode'])
-        return fctx
-
-    def _get_changectx(self, revision):
-        """
-        Get Changecontext object corresponding to given Revision.
-        Retrieve necessary information from index file.
-        """
-        with self._revisions_index(revision.item) as index:
-            ctx = self._repo.changectx(index[revision.revno]['node'])
-        return ctx
-
-    def _lock(self, lockpath, lockref):
-        """Acquire weak reference to lock object."""
-        if lockref and lockref():
-            return lockref()
-        lock = self._repo._lock(lockpath, wait=True, releasefn=None, acquirefn=None, desc='')
-        lockref = weakref.ref(lock)
-        return lock
-
-    def _lock_meta_item(self, item):
-        """Acquire Item Metadata lock."""
-        return self._lock_item(item, self._meta_path, self._meta_item_lockrefs)
-
-    def _lock_rev_item(self, item):
-        """Acquire versioned Item lock."""
-        return self._lock_item(item, self._rev_path, self._rev_item_lockrefs)
-
-    def _lock_item(self, item, root_path, lock_dict):
-        path = os.path.join(root_path, "%s.lock" % item._id)
-        return self._lock(path, lock_dict.setdefault(item._id, None))
-
-    def _add_item(self, item):
-        """Assign ID to given Item. Raise ItemAlreadyExistsError if Item exists."""
-        if self.has_item(item.name):
-            raise ItemAlreadyExistsError("Destination item already exists: %s" % item.name)
-        item._id = self._hash(item.name)
-
-    def _append_revision(self, item, revision):
-        """Add Item Revision to index file to speed up further lookups."""
-        fctx = self._repo.changectx('')[item._id]
-        with self._revisions_index(item, create=True) as index:
-            index[revision.revno] = {'node': short(fctx.node()), 'id': item._id, 'filenode': short(fctx.filenode())}
-        self._commit_files(['%s.rev' % item._id], message='(revision append)')
-
-    def _commit_files(self, files, message=DEFAULT_COMMIT_MESSAGE, user=DEFAULT_USER, extra={}, date=None, force=True):
-        try:
-            match = mercurial.match.exact(self._rev_path, '', files)
-            self._repo.commit(match=match, text=message, user=user, extra=extra, date=date, force=force)
-        except NameError:
-            self._repo.commit(files=files, text=message, user=user, extra=extra, date=date, force=force)
-
-    def _encode_metadata(self, dict, prefix):
-        meta = {}
-        for k, v in dict.iteritems():
-            meta["%s%s" % (prefix, k)] = pickle.dumps(v)
-        return meta
-
-    def _decode_metadata(self, dict, prefix):
-        meta = {}
-        for k, v in dict.iteritems():
-            if k.startswith(prefix):
-                meta[k[len(prefix):]] = pickle.loads(v)
-        return meta
-
-    def _has_meta(self, itemid):
-        """Return True if Item with given ID has Metadata. Otherwise return None."""
-        with cdb.init(self._meta_db) as c:
-            return c.get(itemid)
-
-    def _add_to_cdb(self, itemid, itemname, replace=None):
-        """Add Item Metadata file to name-mapping."""
-        class DuplicateError(Exception):
-            """ raise for duplicate item names """
-
-        try:
-            with cdb.init(self._meta_db) as c:
-                with cdb.cdbmake("%s.ndb" % self._meta_db, "%s.tmp" % self._meta_db) as maker:
-                    record = c.each()
-                    while record:
-                        id, name = record
-                        if id == itemid:
-                            raise DuplicateError
-                        elif id == replace:
-                            pass
-                        else:
-                            maker.add(id, name)
-                        record = c.each()
-                    maker.add(itemid, itemname.encode('utf-8'))
-                util.rename("%s.ndb" % self._meta_db, self._meta_db)
-        except DuplicateError:
-            os.unlink(self._meta_db + '.ndb')
-            raise ItemAlreadyExistsError("Destination item already exists: %s" % itemname)
-
-    def _create_cdb(self):
-        """Create name-mapping file for storing Item Metadata files mappings."""
-        if not os.path.exists(self._meta_db):
-            with cdb.cdbmake(self._meta_db, "%s.tmp" % self._meta_db) as maker:
-                pass
-
-    def _destroyed_index(self, item, create=False):
-        return Index(os.path.join(self._rev_path, "%s.rip" % item._id), create)
-
-    def _revisions_index(self, item, create=False):
-        return Index(os.path.join(self._rev_path, "%s.rev" % item._id), create)
-
-
-    # extended API below - needed for drawing revision graph
-    def _get_revision_node(self, revision):
-        """
-        Return tuple consisting of (SHA1, short SHA1) changeset (node) IDs
-        corresponding to given Revision.
-        """
-        try:
-            with self._open_item_index(revision.item) as revfile:
-                revs = revfile.read().splitlines()
-            node = revs[revision.revno].split()[1]
-            return node, short(node)
-        except IOError:
-            return nullid, short(nullid)
-
-    def _get_revision_parents(self, revision):
-        """Return parent revision numbers of Revision."""
-        def get_revision(node):
-            meta = self._repo.changectx(node).extra()
-            return self._decode_metadata(meta, BACKEND_METADATA_PREFIX)['rev']
-
-        meta = self._get_changectx(revision).extra()
-        parents = self._decode_metadata(meta, BACKEND_METADATA_PREFIX)['parents'].split()
-        return [get_revision(node) for node in parents]
-
-
-class MercurialStoredRevision(StoredRevision):
-
-    def __init__(self, item, revno):
-        StoredRevision.__init__(self, item, revno)
-        self._data = None
-
-    def get_parents(self):
-        return self._backend._get_revision_parents(self)
-
-    def get_node(self):
-        return self._backend._get_revision_node(self)
-
-
-class Index(object):
-    """
-    Keys are int, values are dictionaries with keys: 'id', 'node', 'filenode'.
-    Fixed record size to ease reverse file lookups. Record consists of (in order):
-    revno(6 chars), id(32 chars), node(12 chars), filenode(12 chars) separated by
-    whitespace.
-    """
-    RECORD_SAMPLE = '000001 cdfea0c03df2d58eeb8e509ffeab1c94 abfa65835085 b80de5d13875\n'
-
-    def __init__(self, fpath, create=False):
-        if create:
-            self._file = open(fpath, 'a+')
-        else:
-            self._file = open(fpath)
-        self._fpath = fpath
-
-    def __enter__(self):
-        return self
-
-    def __exit__(self, exc_type, exc_val, exc_tb):
-        self.close()
-
-    def __getitem__(self, key):
-        for record in self._iter_record():
-            if int(record[0]) == key:
-                return {'id': record[1], 'node': record[2], 'filenode': record[3]}
-        raise KeyError
-
-    def __setitem__(self, key, value):
-        record = "%s %s %s %s\n" % (str(key).zfill(6), value['id'], value['node'], value['filenode'])
-        self._file.write(record)
-        pass
-
-    def __delitem__(self, key):
-        tmp_fd, tmp_path = tempfile.mkstemp("-index", "tmp-", os.path.dirname(self._fpath))
-        with open(tmp_path, 'w') as tmp:
-            for record in self._iter_record(reverse=False):
-                if key != int(record[0]):
-                    tmp.write(' '.join(record) + os.linesep)
-        util.rename(tmp_path, self._fpath)
-
-    def __iter__(self):
-        for record in self._iter_record(reverse=False):
-            yield int(record[0])
-
-    def __contains__(self, key):
-        if self.empty:
-            return False
-        for record in self._iter_record():
-            if int(record[0]) == key:
-                return True
-        return False
-
-    @property
-    def last_key(self):
-        try:
-            last_record = self._iter_record().next()
-            return int(last_record[0])
-        except StopIteration:
-            return -1
-
-    @property
-    def empty(self):
-        return os.path.getsize(self._fpath) == 0
-
-    def close(self):
-        self._file.close()
-
-    def truncate(self):
-        self._file.seek(0)
-        self._file.truncate()
-
-    def _iter_record(self, reverse=True):
-        """Iterates forwards/backwards on file yielding records."""
-        record_size = len(self.RECORD_SAMPLE)
-        if reverse:
-            self._file.seek(0, 2)
-            pointer = self._file.tell()
-            pointer -= record_size
-            while pointer >= 0:
-                self._file.seek(pointer)
-                pointer -= record_size
-                line = self._file.read(record_size)
-                yield line.strip().split()
-        else:
-            self._file.seek(0)
-            for line in self._file:
-                yield line.split()
-
-
--- a/MoinMoin/storage/backends/memory.py	Sat Sep 10 03:23:36 2011 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,437 +0,0 @@
-# Copyright: 2008 MoinMoin:ChristopherDenter
-# Copyright: 2008 MoinMoin:JohannesBerg
-# Copyright: 2008 MoinMoin:AlexanderSchremmer
-# License: GNU GPL v2 (or any later version), see LICENSE.txt for details.
-
-"""
-    MoinMoin - MemoryBackend + TracingBackend
-
-    This module contains a simple Backend that stores all data in memory
-    and a TracingBackend that can generate a python function that contains
-    all recorded operations.
-
-    This is mainly done for testing and documentation / demonstration purposes.
-    Thus, this backend IS NOT designed for concurrent use.
-
-    DO NOT (even for the smallest glimpse of a second) consider to use this
-    backend for any production site that needs persistant storage.
-
-    ---
-"""
-
-
-import StringIO
-from threading import Lock
-import time
-
-from MoinMoin.storage import Backend as BackendBase
-from MoinMoin.storage import Item as ItemBase
-from MoinMoin.storage import StoredRevision as StoredRevisionBase
-from MoinMoin.storage import NewRevision as NewRevisionBase
-from MoinMoin.storage import Revision as RevisionBase
-
-from MoinMoin.storage.error import NoSuchItemError, NoSuchRevisionError, \
-                                   ItemAlreadyExistsError, \
-                                   RevisionAlreadyExistsError, RevisionNumberMismatchError
-
-
-class Item(ItemBase):
-    pass
-
-class StoredRevision(StoredRevisionBase):
-    pass
-
-class NewRevision(NewRevisionBase):
-    pass
-
-class MemoryBackend(BackendBase):
-    Item = Item
-    StoredRevision = StoredRevision
-    NewRevision = NewRevision
-    """
-    Implementation of the MemoryBackend. All data is kept in attributes of this
-    class. As soon as the MemoryBackend-object goes out of scope, your data is LOST.
-
-    Docstrings for the methods can be looked up in the superclass Backend, found
-    in MoinMoin.storage.
-    """
-    def __init__(self, backend_uri=''):
-        """
-        Initialize this Backend.
-
-        We accept a (unused) uri parameter, because other backends have this, too.
-        """
-        self._last_itemid = 0
-        self._itemmap = {}                  # {itemname : itemid}   // names may change...
-        self._item_metadata = {}            # {id : {metadata}}
-        self._item_revisions = {}           # {id : {revision_id : (revision_data, {revision_metadata})}}
-        self._item_metadata_lock = {}       # {id : Lockobject}
-
-    def get_item(self, itemname):
-        """
-        @see: Backend.get_item.__doc__
-        """
-        if not self.has_item(itemname):
-            raise NoSuchItemError("No such item, %r" % (itemname))
-
-        item = self.Item(self, itemname)
-        item._item_id = self._itemmap[itemname]
-
-        if not item._item_id in self._item_metadata:  # Maybe somebody already got an instance of this Item and thus there already is a Lock for that Item.
-            self._item_metadata_lock[item._item_id] = Lock()
-
-        return item
-
-    def has_item(self, itemname):
-        """
-        @see: Backend.get_item.__doc__
-
-        Overriding the default has_item-method because we can simply look the name
-        up in our nice dictionary.
-        Whenever possible, you should aim to override the dummy has_item-method.
-        """
-        return itemname in self._itemmap
-
-    def create_item(self, itemname):
-        """
-        @see: Backend.create_item.__doc__
-
-        Note: DON'T rely on the dummy has_item-method here.
-        """
-        if not isinstance(itemname, (str, unicode)):
-            raise TypeError("Itemnames must have string type, not %s" % (type(itemname)))
-        elif self.has_item(itemname):
-            raise ItemAlreadyExistsError("An Item with the name %r already exists!" % (itemname))
-
-        item = self.Item(self, itemname)
-        item._item_id = None
-        return item
-
-    def _destroy_item(self, item):
-        """
-        @see: Backend._destroy_item.__doc__
-        """
-        item_map = self._itemmap
-        item_meta = self._item_metadata
-        item_revs = self._item_revisions
-        item_lock = self._item_metadata_lock
-
-        try:
-            item_id = item_map[item.name]
-            del item_map[item.name]
-        except KeyError:
-            # No need to proceed further. The item has already been destroyed by someone else.
-            return
-
-        for struct in (item_meta, item_revs, item_lock):
-            try:
-                del struct[item_id]
-            except KeyError:
-                pass
-
-    def iter_items_noindex(self):
-        """
-        @see: Backend.iter_items_noindex.__doc__
-        """
-        for itemname in self._itemmap.keys():
-            yield self.get_item(itemname)
-
-    iteritems = iter_items_noindex
-
-    def _get_revision(self, item, revno):
-        """
-        @see: Backend._get_revision.__doc__
-        """
-        item_id = item._item_id
-        revisions = item.list_revisions()
-
-        if revno == -1 and revisions:
-            revno = max(item.list_revisions())
-        try:
-            data = self._item_revisions[item_id][revno][0]
-            metadata = self._item_revisions[item_id][revno][1]
-        except KeyError:
-            raise NoSuchRevisionError("No Revision #%d on Item %s - Available revisions: %r" % (revno, item.name, revisions))
-        else:
-            revision = self.StoredRevision(item, revno)
-            revision._data = StringIO.StringIO(data)
-            revision._metadata = metadata
-            return revision
-
-    def _list_revisions(self, item):
-        """
-        @see: Backend._list_revisions.__doc__
-        """
-        try:
-            return self._item_revisions[item._item_id].keys()
-        except KeyError:
-            return []
-
-    def _create_revision(self, item, revno):
-        """
-        @see: Backend._create_revision.__doc__
-        """
-        try:
-            last_rev = max(self._item_revisions[item._item_id].iterkeys())
-        except (ValueError, KeyError):
-            last_rev = -1
-        if revno != last_rev + 1:
-            raise RevisionNumberMismatchError(("The latest revision of the item '%r' is %d, thus you cannot create revision number %d. "
-                                               "The revision number must be latest_revision + 1.") % (item.name, last_rev, revno))
-        try:
-            if revno in self._item_revisions[item._item_id]:
-                raise RevisionAlreadyExistsError("A Revision with the number %d already exists on the item %r" % (revno, item.name))
-        except KeyError:
-            pass  # First if-clause will raise an Exception if the Item has just
-                  # been created (and not committed), because there is no entry in self._item_revisions yet. Thus, silenced.
-
-        new_revision = self.NewRevision(item, revno)
-        new_revision._revno = revno
-        new_revision._data = StringIO.StringIO()
-        return new_revision
-
-    def _destroy_revision(self, revision):
-        """
-        @see: Backend._destroy_revision.__doc__
-        """
-        try:
-            item_id = self._itemmap[revision.item.name]
-            del self._item_revisions[item_id][revision.revno]
-        except KeyError:
-            # The revision has already been destroyed by someone else. No need to make our hands dirty.
-            return
-
-    def _rename_item(self, item, newname):
-        """
-        @see: Backend._rename_item.__doc__
-        """
-        if self.has_item(newname):
-            raise ItemAlreadyExistsError("Cannot rename Item %s to %s since there already is an Item with that name." % (item.name, newname))
-
-        name = None
-        for itemname, itemid in self._itemmap.iteritems():
-            if itemid == item._item_id:
-                name = itemname
-                break
-        assert name is not None
-
-        copy_me = self._itemmap[name]
-        self._itemmap[newname] = copy_me
-        del self._itemmap[name]
-
-    def _add_item_internally(self, item):
-        """
-        Given an item, store it persistently and initialize it. Please note
-        that this method takes care of the internal counter we use to give each
-        Item a unique ID.
-        Not defined by superclass.
-
-        :type item: Object of class Item.
-        :param item: Item we want to add.
-        """
-        item._item_id = self._last_itemid
-        self._itemmap[item.name] = item._item_id
-        self._item_metadata[item._item_id] = {}
-        self._item_revisions[item._item_id] = {}  # no revisions yet
-        self._item_metadata_lock[item._item_id] = Lock()
-        self._last_itemid += 1
-
-    def _commit_item(self, revision):
-        """
-        @see: Backend._commit_item.__doc__
-        """
-        item = revision.item
-        if item._item_id is None:
-            if self.has_item(item.name):
-                raise ItemAlreadyExistsError("You tried to commit an Item with the name %r, but there already is an Item with that name." % item.name)
-            self._add_item_internally(item)
-        elif self.has_item(item.name) and (revision.revno in self._item_revisions[item._item_id]):
-            item._uncommitted_revision = None  # Discussion-Log: http://moinmo.in/MoinMoinChat/Logs/moin-dev/2008-06-20 around 17:27
-            raise RevisionAlreadyExistsError("A Revision with the number %d already exists on the Item %r!" % (revision.revno, item.name))
-
-        revision._data.seek(0)
-
-        if revision._metadata is None:
-            revision._metadata = {}
-        self._item_revisions[item._item_id][revision.revno] = (revision._data.getvalue(), revision._metadata.copy())
-
-    def _rollback_item(self, rev):
-        """
-        @see: Backend._rollback_item.__doc__
-        """
-        # Since we have no temporary files or other things to deal with in this backend,
-        # we can just set the items uncommitted revision to None.
-        pass
-
-    def _change_item_metadata(self, item):
-        """
-        @see: Backend._change_item_metadata.__doc__
-        """
-        if item._item_id is None:
-            # If this is the case it means that we operate on an Item that has not been
-            # committed yet and thus we should not use a Lock in persistant storage.
-            pass
-        else:
-            self._item_metadata_lock[item._item_id].acquire()
-
-    def _publish_item_metadata(self, item):
-        """
-        @see: Backend._publish_item_metadata.__doc__
-        """
-        if item._item_id is None and self.has_item(item.name):
-            raise  ItemAlreadyExistsError("The Item whose metadata you tried to publish already exists.")
-        if item._item_id is None:
-            # not committed yet, no locking, store item
-            self._add_item_internally(item)
-        else:
-            self._item_metadata_lock[item._item_id].release()
-        if item._metadata is not None:
-            self._item_metadata[item._item_id] = item._metadata.copy()
-        else:
-            self._item_metadata[item._item_id] = {}
-
-    def _read_revision_data(self, revision, chunksize):
-        """
-        @see: Backend._read_revision_data.__doc__
-        """
-        return revision._data.read(chunksize)
-
-    def _write_revision_data(self, revision, data):
-        """
-        @see: Backend._write_revision_data.__doc__
-        """
-        revision._data.write(data)
-
-    def _get_item_metadata(self, item):
-        """
-        Load metadata for a given item, return dict.
-
-        :type item: Object of class Item.
-        :param item: Item for which we want to get the metadata dict.
-        :returns: dict
-        """
-        try:
-            return dict(self._item_metadata[item._item_id])
-        except KeyError:  # The Item we are operating on has not been committed yet.
-            return dict()
-
-    def _get_revision_metadata(self, revision):
-        """
-        Load metadata for a given Revision, returns dict.
-
-        :type revision: Object of subclass of Revision.
-        :param revision: Revision for which we want to get the metadata dict.
-        :returns: dict
-        """
-        item = revision._item
-        return self._item_revisions[item._item_id][revision.revno][1]
-
-    def _seek_revision_data(self, revision, position, mode):
-        """
-        @see: Backend._seek_revision_data.__doc__
-        """
-        revision._data.seek(position, mode)
-
-    def _tell_revision_data(self, revision):
-        """
-        @see: Backend._tell_revision_data.__doc__
-        """
-        return revision._data.tell()
-
-
-# ------ The tracing backend
-
-class TracingItem(Item):
-    pass
-
-class TracingNewRevision(NewRevision):
-    pass
-
-class TracingStoredRevision(StoredRevision):
-    pass
-
-
-class TracingBackend(MemoryBackend):
-    """ Records every operation. When you are finished calling things, run get_code or get_func."""
-    # XXX could use weakrefs to determine if objects are still alive and keep them alive according
-    # to the sampled info in order to emulate scalability issues
-    Item = TracingItem
-    StoredRevision = TracingStoredRevision
-    NewRevision = TracingNewRevision
-    codebuffer = []
-
-    def __init__(self, filename=None):
-        MemoryBackend.__init__(self)
-        self._backend = self # hehe, more uniform code :)
-        self.filename = filename
-
-    def log_expr(self, expr):
-        self.codebuffer.append(expr)
-
-    def get_code(self):
-        return "\n".join(["def run(backend, got_exc=lambda x: None):", "    pass"] + self.codebuffer)
-
-    def get_func(self):
-        if self.filename:
-            file(self.filename, "w").write(self.get_code())
-        l = {}
-        eval(compile(self.get_code(), self.filename or "not_on_disk", "exec"), l, l)
-        return l["run"]
-
-def _get_thingie_id(thingie, item):
-    """ Generates a unique id for item depending on its class of objects. """
-    if thingie == "backend":
-        return "backend"
-    return "%s_%i" % (thingie, id(item), )
-
-def _retval_to_expr(retval):
-    """ Determines whether we need to do an assignment and generates the assignment subexpr if necessary. """
-    for thingie, klass in (("item", Item), ("rev", RevisionBase)):
-        if isinstance(retval, klass):
-            return _get_thingie_id(thingie, retval) + " = "
-    return ""
-
-def _get_thingie_wrapper(thingie):
-    def wrap_thingie(func):
-        def wrapper(*args, **kwargs):
-            exc = None
-            log = args[0]._backend.log_expr
-            level = 4
-            retval = None
-            try:
-                try:
-                    retval = func(*args, **kwargs)
-                except Exception as e:
-                    exc = type(e).__name__ # yes, not very exact
-                    log(" " * level + "try:")
-                    level += 4
-                    raise
-            finally:
-                log(" " * level + "%s%s.%s(*%s, **%s)" % (_retval_to_expr(retval),
-                _get_thingie_id(thingie, args[0]), func.func_name, repr(args[1:]), repr(kwargs)))
-                if exc:
-                    level -= 4
-                    log(" " * level + "except Exception, e:")
-                    level += 4
-                    log(" " * level + "if type(e).__name__ != %r:" % (exc, ))
-                    level += 4
-                    log(" " * level + "got_exc(e)")
-            return retval
-        return wrapper
-    return wrap_thingie
-
-
-wrap_rev = _get_thingie_wrapper("rev")
-wrap_item = _get_thingie_wrapper("item")
-wrap_be = _get_thingie_wrapper("backend")
-
-def do_meta_patchery():
-    for fromclass, toclass, wrappergen in ((MemoryBackend, TracingBackend, wrap_be), (Item, TracingItem, wrap_item),
-                               (NewRevision, TracingNewRevision, wrap_rev), (StoredRevision, TracingStoredRevision, wrap_rev)):
-        for name, func in fromclass.__dict__.iteritems():
-            if not name.startswith("_") and hasattr(func, 'func_name'):
-                setattr(toclass, name, wrappergen(func))
-do_meta_patchery()
-
-del do_meta_patchery, wrap_rev, wrap_item, wrap_be, _get_thingie_wrapper
-
--- a/MoinMoin/storage/backends/sqla.py	Sat Sep 10 03:23:36 2011 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,652 +0,0 @@
-# Copyright: 2009 MoinMoin:ChristopherDenter
-# Copyright: 2009 MoinMoin:ThomasWaldmann
-# License: GNU GPL v2 (or any later version), see LICENSE.txt for details.
-
-"""
-    MoinMoin - Backends - SQLAlchemy Backend
-
-    This backend utilizes the power of SQLAlchemy.
-    You can use it to store your wiki contents using any database supported by
-    SQLAlchemy. This includes SQLite, PostgreSQL and MySQL.
-
-    XXX Note that this backend is not currently ready for use! See the TODOs. XXX
-
-
-    Talking to the DB
-    =================
-
-    In order to communicate with the database, we need to establish a connection
-    by requesting a 'session'. `Session` is a class that was bound to the backend object.
-    When we create an instance of it, we can persist our objects, modify or delete them.
-    (Note that the SA docs suggest keeping the session class in the module scope. That
-    does not work for us as we need to to be able to create multiple backend objects,
-    each potentially bound to a different database. If the session was global in the module,
-    all backends would maintain a connection to the database whose backend was created last.)
-    Usually a session is created at the beginning of a request and disposed after the request
-    has been processed. This is a bit difficult to realize as we are completely unaware of
-    requests on the storage layer.
-    Furthermore the backend may be used to store large amounts of data. In order to properly
-    deal with such BLOBs, we split them manually into smaller chunks that we can 'stream'
-    sequentially from and to the database. (Note that there is no such thing as a file-like
-    read(n) API for our DBMSs and we don't want to read a large BLOB into memory all at once).
-    It is also very important that we dispose of all sessions that we create properly and in
-    a 'timely' manner, because the number of concurrent connections that are allowed for
-    a database may be very limited (e.g., 5). A session is properly ended by invoking one of
-    the following methods: session.commit(), session.rollback() or session.close().
-    As some attributes on our mapped objects are loaded lazily, a the mapped object must
-    be bound to a session obviously for the load operation to succeed. In order to accomplish
-    that, we currently add the mapped objects to a session and close that session after the object
-    has gone out of scope. This is a HACK, because we use __del__, which is very unreliable.
-    The proper way to deal with this would be adding revision.close() (and perhaps even item.close?)
-    to the storage API and free all resources that were acquired in that method. That of course
-    means that we need to adjust all storage-related code and add the close() calls.
-
-
-    TODO
-    ====
-    The following is a list of things that need to be done before this backend can be used productively
-    (not including beta tests):
-
-        * Data.read must be changed to operate on dynamically loaded chunks. I.e., the data._chunks must
-          be set to lazy='dynamic', which will then be a query instead of a collection.
-        * Find a proper solution for methods that issue many SQL queries.
-        * MetaData should definitely NOT simply be stored as a dict in a PickleType Column. Store that properly,
-          perhaps in (a) seperate table(s).
-        * Find out why RC lists an item that was just written below Trash/ as well. (Likely a UI bug.)
-        * Add revision.close() (and perhaps item.close()?) to the storage API and make use of it.
-          With the help of __del__, find all places that do not properly close connections. Do NOT rely on
-          __del__. Use it only as a last resort to close connections.
-        * Perhaps restructure the code. (Move methods that don't have to be on the Item/Revision classes
-          into the backend, for example.)
-        * Make sure the sketched approach is threadsafe for our purposes. Perhaps use the contextual session
-          instead.
-        * Currently there only is SQLARevision. Make sure that operations that are not allowed (such as modifying
-          the data of an already stored revision) raise the appropriate exceptions.
-"""
-
-
-from threading import Lock
-
-from sqlalchemy import create_engine, Column, Unicode, Integer, Binary, PickleType, ForeignKey
-from sqlalchemy.exc import IntegrityError, DataError
-from sqlalchemy.orm import sessionmaker, relation, backref
-from sqlalchemy.orm.exc import NoResultFound
-from sqlalchemy.schema import UniqueConstraint
-from sqlalchemy.ext.declarative import declarative_base
-# Only used/needed for development/testing:
-from sqlalchemy.pool import StaticPool
-
-from MoinMoin.storage import Backend, Item, Revision, NewRevision, StoredRevision
-from MoinMoin.storage.error import ItemAlreadyExistsError, NoSuchItemError, NoSuchRevisionError, \
-                                   RevisionAlreadyExistsError, StorageError
-
-
-Base = declarative_base()
-
-NAME_LEN = 512
-
-
-class SQLAlchemyBackend(Backend):
-    """
-    The actual SQLAlchemyBackend. Take note that the session class is bound to
-    the individual backend it belongs to.
-    """
-    def __init__(self, db_uri=None, verbose=False):
-        """
-        :type db_uri: str
-        :param db_uri: The database uri that we pass on to SQLAlchemy.
-                       May contain user/password/host/port/etc.
-        :type verbose: bool
-        :param verbose: Verbosity setting. If set to True this will print all SQL queries
-                        to the console.
-        """
-        if db_uri is None:
-            # These are settings that apply only for development / testing only. The additional args are necessary
-            # due to some limitations of the in-memory sqlite database.
-            db_uri = 'sqlite:///:memory:'
-            self.engine = create_engine(db_uri, poolclass=StaticPool, connect_args={'check_same_thread': False})
-        else:
-            self.engine = create_engine(db_uri, echo=verbose, echo_pool=verbose)
-
-        # Our factory for sessions. Note: We do NOT define this module-level because then different SQLABackends
-        # using different engines (potentially different databases) would all use the same Session object with the
-        # same engine that the backend instance that was created last bound it to.
-        # XXX Should this perhaps use the scoped/contextual session instead?
-        self.Session = sessionmaker(bind=self.engine, expire_on_commit=False)
-
-        # Create the database schema (for all tables)
-        SQLAItem.metadata.bind = self.engine
-        SQLAItem.metadata.create_all()
-        # {id : Lockobject} -- lock registry for item metadata locks
-        self._item_metadata_lock = {}
-
-    def close(self):
-        self.engine.dispose()
-
-    def has_item(self, itemname):
-        """
-        @see: Backend.has_item.__doc__
-        """
-        try:
-            session = self.Session()
-            session.query(SQLAItem).filter_by(_name=itemname).one()
-            return True
-        except NoResultFound:
-            return False
-        finally:
-            # Since we simply return a bool, we can (and must) close the session here without problems.
-            session.close()
-
-    def get_item(self, itemname):
-        """
-        @see: Backend.get_item.__doc__
-        """
-        session = self.Session()
-        # The following fails if not EXACTLY one column is found, i.e., it also fails
-        # if MORE than one item is found, which should not happen since names should be
-        # unique.
-        try:
-            # Query for the item that matches the given itemname.
-            item = session.query(SQLAItem).filter_by(_name=itemname).one()
-            # SQLA doesn't call __init__, so we need to take care of that.
-            item.setup(self)
-            # Maybe somebody already got an instance of this Item and thus there already is a Lock for that Item.
-            if not item.id in self._item_metadata_lock:
-                self._item_metadata_lock[item.id] = Lock()
-            return item
-        except NoResultFound:
-            raise NoSuchItemError("The item '%s' could not be found." % itemname)
-        finally:
-            session.close()
-
-    def create_item(self, itemname):
-        """
-        @see: Backend.create_item.__doc__
-        """
-        if not isinstance(itemname, (str, unicode)):
-            raise TypeError("Itemnames must have string type, not %s" % (type(itemname)))
-
-        # This 'premature' check is ok since it may take some time until item.commit()
-        # is invoked and only there can the database raise an IntegrityError if the
-        # uniqueness-constraint for the item name is violated.
-        if self.has_item(itemname):
-            raise ItemAlreadyExistsError("An item with the name %s already exists." % itemname)
-
-        item = SQLAItem(self, itemname)
-        return item
-
-    def iter_items_noindex(self):
-        """
-        Returns an iterator over all items available in this backend.
-        (Like the dict method).
-        As iteritems() is used rather often while accessing *all* items in most cases,
-        we preload them all at once and then just iterate over them, yielding each
-        item individually to conform with the storage API.
-        The benefit is that we do not issue a query for each individual item, but
-        only a single query.
-
-        @see: Backend.iter_items_noindex.__doc__
-        """
-        session = self.Session()
-        all_items = session.query(SQLAItem).all()
-        session.close()
-        for item in all_items:
-            item.setup(self)
-            yield item
-
-    iteritems = iter_items_noindex
-
-    def _create_revision(self, item, revno):
-        """
-        @see: Backend._create_revision.__doc__
-        """
-        rev = SQLARevision(item, revno)
-        # Add a session to the object here so it can flush the written data to the
-        # database chunkwise. This is somewhat ugly.
-        rev.session = self.Session()
-        rev.session.add(rev)
-        return rev
-
-    def _rename_item(self, item, newname):
-        """
-        @see: Backend._rename_item.__doc__
-        """
-        if item.id is None:
-            raise AssertionError("Item not yet committed to storage. Cannot be renamed.")
-
-        session = self.Session()
-        item = session.query(SQLAItem).get(item.id)
-        item._name = newname
-        # No need to add the item since the session took note that its name was changed
-        # and so it's in session.dirty and will be changed when committed.
-        try:
-            session.commit()
-        except IntegrityError:
-            raise ItemAlreadyExistsError("Rename operation failed. There already is " + \
-                                         "an item named '%s'." % newname)
-        finally:
-            session.close()
-
-    def _commit_item(self, revision):
-        """
-        @see: Backend._commit_item.__doc__
-        """
-        item = revision.item
-        session = revision.session
-
-        # We need to distinguish between different types of uniqueness constraint violations.
-        # That is why we flush the item first, then we flush the revision and finally we commit.
-        # Flush would have failed if either of the two was already present (item with the same name
-        # or revision with the same revno on that item.)
-        try:
-            # try to flush item if it's not already persisted
-            if item.id is None:
-                session.add(item)
-                session.flush()
-        except IntegrityError:
-            raise ItemAlreadyExistsError("An item with that name already exists.")
-        except DataError:
-            raise StorageError("The item's name is too long for this backend. It must be less than %s." % NAME_LEN)
-        else:
-            # Flushing of item succeeded. That means we can try to flush the revision.
-            # Close the item's data container and add potentially pending chunks.
-            revision._data.close()
-            session.add(revision)
-            try:
-                session.flush()
-            except IntegrityError:
-                raise RevisionAlreadyExistsError("A revision with revno %d already exists on the item." \
-                                                  % (revision.revno))
-            else:
-                # Flushing of revision succeeded as well. All is fine. We can now commit()
-                session.commit()
-                # After committing, the Item has an id and we can create a metadata lock for it
-                self._item_metadata_lock[item.id] = Lock()
-        finally:
-            session.close()
-
-
-    def _rollback_item(self, revision):
-        """
-        @see: Backend._rollback_item.__doc__
-        """
-        session = revision.session
-        session.rollback()
-
-    def _change_item_metadata(self, item):
-        """
-        @see: Backend._change_item_metadata.__doc__
-        """
-        if item.id is None:
-            # If this is the case it means that we operate on an Item that has not been
-            # committed yet and thus we should not use a Lock in persistant storage.
-            pass
-        else:
-            self._item_metadata_lock[item.id].acquire()
-
-    def _publish_item_metadata(self, item):
-        """
-        @see: Backend._publish_item_metadata.__doc__
-        """
-        # XXX This should just be tried and the exception be caught
-        if item.id is None and self.has_item(item.name):
-            raise ItemAlreadyExistsError("The Item whose metadata you tried to publish already exists.")
-        session = self.Session()
-        session.add(item)
-        session.commit()
-        try:
-            lock = self._item_metadata_lock[item.id]
-        except KeyError:
-            # Item hasn't been committed before publish, hence no lock.
-            pass
-        else:
-            lock.release()
-
-    def _get_item_metadata(self, item):
-        """
-        @see: Backend._get_item_metadata.__doc__
-        """
-        # When the item is restored from the db, it's _metadata should already
-        # be populated. If not, it means there isn't any.
-        return {}
-
-
-class SQLAItem(Item, Base):
-    __tablename__ = 'items'
-
-    id = Column(Integer, primary_key=True)
-    # Since not all DBMSs support arbitrarily long item names, we must
-    # impose a limit. SQLite will ignore it, PostgreSQL will raise DataError
-    # and MySQL will simply truncate. Sweet.
-    # For faster lookup, index the item name.
-    _name = Column(Unicode(NAME_LEN), unique=True, index=True)
-    _metadata = Column(PickleType)
-
-    def __init__(self, backend, itemname):
-        self._name = itemname
-        self.setup(backend)
-
-    def setup(self, backend):
-        """
-        This is different from __init__ as it may be also invoked explicitly
-        when the object is returned from the database. We may as well call
-        __init__ directly, but having a separate method for that makes it clearer.
-        """
-        self._backend = backend
-        self._locked = False
-        self._read_accessed = False
-        self._uncommitted_revision = None
-
-    @property
-    def element_attrs(self):
-        return dict(name=self._name)
-
-    def list_revisions(self):
-        """
-        @see: Item.list_revisions.__doc__
-        """
-        # XXX Why does this not work?
-        # return [rev.revno for rev in self._revisions if rev.id is not None]
-        session = self._backend.Session()
-        revisions = session.query(SQLARevision).filter(SQLARevision._item_id==self.id).all()
-        revnos = [rev.revno for rev in revisions]
-        session.close()
-        return revnos
-
-    def get_revision(self, revno):
-        """
-        @see: Item.get_revision.__doc__
-        """
-        try:
-            session = self._backend.Session()
-            if revno == -1:
-                revnos = self.list_revisions()
-                try:
-                    # If there are no revisions we can list, then obviously we can't get the desired revision.
-                    revno = revnos[-1]
-                except IndexError:
-                    raise NoResultFound
-            rev = session.query(SQLARevision).filter(SQLARevision._item_id==self.id).filter(SQLARevision._revno==revno).one()
-            rev.setup(self._backend)
-            # Don't close the session here as it is needed for the revision to read the Data and access its attributes.
-            # This should be changed.
-            rev.session = session
-            rev.session.add(rev)
-            return rev
-        except NoResultFound:
-            raise NoSuchRevisionError("Item %s has no revision %d." % (self.name, revno))
-
-    def destroy(self):
-        """
-        @see: Item.destroy.__doc__
-        """
-        session = self._backend.Session()
-        session.delete(self)
-        session.commit()
-
-
-class Chunk(Base):
-    """
-    A chunk of data. This represents a piece of the BLOB we tried to save.
-    It is stored in one row in the database and can hence be retrieved independently
-    from the other chunks of the BLOB.
-    """
-    __tablename__ = 'rev_data_chunks'
-
-    id = Column(Integer, primary_key=True)
-    chunkno = Column(Integer)
-    _container_id = Column(Integer, ForeignKey('rev_data.id'))
-
-    # Maximum chunk size.
-    chunksize = 64 * 1024
-    _data = Column(Binary(chunksize))
-
-    def __init__(self, chunkno, data=''):
-        # We enumerate the chunks so as to keep track of their order.
-        self.chunkno = chunkno
-        assert len(data) <= self.chunksize
-        self._data = data
-
-    @property
-    def data(self):
-        """
-        Since we store the chunk's data internally as Binary type, we
-        get buffer objects back from the DB. We need to convert them
-        to str in order to work with them.
-        """
-        return str(self._data)
-
-    def write(self, data):
-        """
-        Write the given data to this chunk. If the data is longer than
-        what we can store (perhaps we were already filled a bit or it's
-        just too much data), we return the amount of bytes written.
-        """
-        if data:
-            remaining = self.chunksize - len(self.data)
-            data = data[:remaining]
-            self._data += data
-        #else:
-        #   # if data is empty, we do not need to do anything!
-        #   pass
-        return len(data)
-
-
-class Data(Base):
-    """
-    Data that is assembled from smaller chunks.
-    Bookkeeping is done here.
-    """
-    __tablename__ = 'rev_data'
-
-    id = Column(Integer, primary_key=True)
-    # We need to use the following cascade to add/delete the chunks from the database, if
-    # Data is added/deleted.
-    _chunks = relation(Chunk, order_by=Chunk.id, cascade='save-update, delete, delete-orphan')
-    _revision_id = Column(Integer, ForeignKey('revisions.id'))
-    size = Column(Integer)
-
-    def __init__(self):
-        self.setup()
-        self.size = 0
-
-    # XXX use sqla reconstructor
-    def setup(self):
-        """
-        @see: SQLAItem.setup.__doc__
-        """
-        self.chunkno = 0
-        # XXX if we keep the last chunk outside of _chunks, read() will not be
-        # able to read data from the last chunk
-        self._last_chunk = Chunk(self.chunkno)
-        self.cursor_pos = 0
-
-    def write(self, data):
-        """
-        The given data is split into chunks and stored in Chunk objects.
-        Each chunk is 'filled' until it is full (i.e., Chunk.chunksize == len(Chunk.data)).
-        Only the last chunk may not be filled completely.
-        This does *only* support sequential writing of data, because otherwise
-        we'd need to re-order potentially all chunks after the cursor position.
-
-        :type data: str
-        :param data: The data we want to split and write to the DB in chunks.
-        """
-        # XXX This currently relies on the autoflush feature of the session. It should ideally
-        #     flush after every chunk.
-        while data:
-            written = self._last_chunk.write(data)
-            if written:
-                self.size += written
-                data = data[written:]
-            else:
-                self.chunkno += 1
-                self._chunks.append(self._last_chunk)
-                self._last_chunk = Chunk(self.chunkno)
-
-    def read(self, amount=None):
-        """
-        The given amount of data is read from the smaller chunks that are contained in this
-        Data container. The caller is completely unaware of the existence of those chunks.
-
-        Behaves like file-API's read().
-
-        :type amount: int
-        :param amount: amount of bytes we want to read.
-        """
-        chunksize = self._last_chunk.chunksize
-
-        available = self.size - self.cursor_pos
-        if available < 0:
-            # cursor might be far beyond EOF, but that still just means 0
-            available = 0
-
-        if amount is None or amount < 0 or amount > available:
-            amount = available
-
-        chunkno_first, head_offset = divmod(self.cursor_pos, chunksize)
-        chunkno_last, tail_offset = divmod(self.cursor_pos + amount, chunksize)
-
-        if tail_offset == 0:
-            # This handles multiple special cases:
-            # any read that ends on a CHUNK boundary - we do not need to read
-            # chunkno_last because there is no data in it that we need to read.
-            # this includes the very special case of a 0 byte read at pos 0.
-            # this includes also the special case of a read ending at EOF and
-            # EOF being on a CHUNK boundary.
-            # We optimize that to not read the unneeded chunk (for the EOF case
-            # this chunk does not even exist),  but use all bytes up to the end
-            # of the previous chunk (if there is a previous chunk).
-            chunkno_last -= 1
-            tail_offset = chunksize
-
-        chunks = [chunk.data for chunk in self._chunks[chunkno_first:chunkno_last+1]]
-        if chunks:
-            # make sure that there is at least one chunk to operate on
-            # if there is no chunk at all, we have empty data
-            if chunkno_first != chunkno_last:
-                # more than 1 chunk, head and tail in different chunks
-                chunks[0] = chunks[0][head_offset:]
-                chunks[-1] = chunks[-1][:tail_offset]
-            else:
-                # only 1 chunk with head and tail inside it
-                chunks[0] = chunks[0][head_offset:tail_offset]
-        data = "".join(chunks)
-        assert len(data) == amount
-        self.cursor_pos += amount
-        return data
-
-    def seek(self, pos, mode=0):
-        """
-        @see: StringIO.seek.__doc__
-        """
-        if mode == 0:
-            if pos < 0:
-                raise IOError("invalid argument")
-            cursor = pos
-        elif mode == 1:
-            cursor = max(0, self.cursor_pos + pos)
-        elif mode == 2:
-            cursor = max(0, self.size + pos)
-        self.cursor_pos = cursor
-
-    def tell(self):
-        """
-        @see: StringIO.tell.__doc__
-        """
-        return self.cursor_pos
-
-    def close(self):
-        """
-        Close the Data container. Append the last chunk.
-        """
-        self._chunks.append(self._last_chunk)
-
-
-class SQLARevision(NewRevision, Base):
-    """
-    The SQLARevision. This is currently only based on NewRevision.
-    It does NOT currently check whether the operation performed is valid.
-    """
-    __tablename__ = 'revisions'
-    # Impose a UniqueConstraint so only one revision with a specific revno may exist on one item
-    __table_args__ = (UniqueConstraint('_item_id', '_revno'), {})
-
-    id = Column(Integer, primary_key=True)
-    # We need to add/delete the Data container of this revision when the revision is added/deleted
-    _data = relation(Data, uselist=False, lazy=False, cascade='save-update, delete, delete-orphan')
-    _item_id = Column(Integer, ForeignKey('items.id'), index=True)
-    # If the item is deleted, delete this revision as well.
-    _item = relation(SQLAItem, backref=backref('_revisions', cascade='delete, delete-orphan', lazy=True), cascade='', uselist=False, lazy=False)
-    _revno = Column(Integer, index=True)
-    _metadata = Column(PickleType)
-
-    def __init__(self, item, revno, *args, **kw):
-        super(SQLARevision, self).__init__(item, revno, *args, **kw)
-        self._revno = revno
-        self.setup(item._backend)
-        self._item = item
-        self._item_id = item.id
-
-    def __del__(self):
-        # XXX XXX XXX DO NOT RELY ON THIS
-        try:
-            self.session.close()
-        except AttributeError:
-            pass
-
-    @property
-    def element_attrs(self):
-        return dict(revno=str(self._revno))
-
-    def setup(self, backend):
-        if self._data is None:
-            self._data = Data()
-        if self._metadata is None:
-            self._metadata = {}
-        self._data.setup()
-        self._backend = backend
-
-    def write(self, data):
-        """
-        Write the given amount of data.
-        """
-        self._data.write(data)
-        self._size = self._data.size
-
-    def read(self, amount=None):
-        """
-        Read the given amount of data.
-        """
-        return self._data.read(amount)
-
-    def seek(self, pos, mode=0):
-        """
-        Seek to the given pos.
-        """
-        self._data.seek(pos, mode)
-
-    def tell(self):
-        """
-        Return the current cursor pos.
-        """
-        return self._data.tell()
-
-    def close(self):
-        """
-        Close all open sessions.
-        """
-        self.session.close()
-
-    def __setitem__(self, key, value):
-        NewRevision.__setitem__(self, key, value)
-
-    def destroy(self):
-        """
-        @see: Backend.Revision.destroy.__doc__
-        """
-        session = self._backend.Session.object_session(self)
-        if session is None:
-            session = self._backend.Session()
-        session.delete(self)
-        session.commit()
--- a/MoinMoin/storage/middleware/__init__.py	Sat Sep 10 03:23:36 2011 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,7 +0,0 @@
-# Copyright: 2011 MoinMoin:ThomasWaldmann
-# License: GNU GPL v2 (or any later version), see LICENSE.txt for details.
-
-"""
-MoinMoin - Storage Middleware / Mixins
-"""
-
--- a/MoinMoin/storage/middleware/acl.py	Sat Sep 10 03:23:36 2011 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,508 +0,0 @@
-# Copyright: 2003-2011 MoinMoin:ThomasWaldmann
-# Copyright: 2000-2004 Juergen Hermann <jh@web.de>
-# Copyright: 2003 Gustavo Niemeyer
-# Copyright: 2005 Oliver Graf
-# Copyright: 2007 Alexander Schremmer
-# Copyright: 2009 Christopher Denter
-# License: GNU GPL v2 (or any later version), see LICENSE.txt for details.
-
-"""
-MoinMoin - ACL Middleware
-
-This backend is a middleware implementing access control using ACLs (access
-control lists) and is referred to as AMW (ACL MiddleWare) hereafter.
-It does not store any data, but uses a given backend for this.
-This middleware is injected between the user of the storage API and the actual
-backend used for storage. It is independent of the backend being used.
-Instances of the AMW are bound to individual request objects. The user whose
-permissions the AMW checks is hence obtained by a lookup on the request object.
-The backend itself (and the objects it returns) need to be wrapped in order
-to make sure that no object of the real backend is (directly or indirectly)
-made accessible to the user of the API.
-The real backend is still available as an attribute of the request and can
-be used by conversion utilities or for similar tasks (flaskg.unprotected_storage).
-Regular users of the storage API, such as the views that modify an item,
-*MUST NOT*, in any way, use the real backend unless the author knows *exactly*
-what he's doing (as this may introduce security bugs without the code actually
-being broken).
-
-The classes wrapped are:
-    * AclWrapperBackend (wraps MoinMoin.storage.Backend)
-    * AclWrapperItem (wraps MoinMoin.storage.Item)
-    * AclWrapperRevision (wraps MoinMoin.storage.Revision)
-
-When an attribute is 'wrapped' it means that, in this context, the user's
-permissions are checked prior to attribute usage. If the user may not perform
-the action he intended to perform, an AccessDeniedError is raised.
-Otherwise the action is performed on the respective attribute of the real backend.
-It is important to note here that the outcome of such an action may need to
-be wrapped itself, as is the case when items or revisions are returned.
-
-All wrapped classes must, of course, adhere to the normal storage API.
-"""
-
-
-from UserDict import DictMixin
-
-from flask import current_app as app
-from flask import g as flaskg
-
-from MoinMoin.security import AccessControlList
-
-from MoinMoin.storage import Item, NewRevision, StoredRevision
-from MoinMoin.storage.error import NoSuchItemError, NoSuchRevisionError, AccessDeniedError
-
-from MoinMoin.config import ACL, ADMIN, READ, WRITE, CREATE, DESTROY
-
-
-class AclWrapperBackend(object):
-    """
-    The AMW is bound to a specific request. The actual backend is retrieved
-    from the config upon request initialization. Any method that is in some
-    way relevant to security needs to be wrapped in order to ensure the user
-    has the permissions necessary to perform the desired action.
-    Note: This may *not* inherit from MoinMoin.storage.Backend because that would
-    break our __getattr__ attribute 'redirects' (which are necessary because a backend
-    implementor may decide to use his own helper functions which the items and revisions
-    will still try to call).
-    """
-    def __init__(self, cfg, backend, hierarchic=False, before=u"", default=u"", after=u"", valid=None):
-        """
-        :type backend: Some object that implements the storage API.
-        :param backend: The unprotected backend that we want to protect.
-        :type hierarchic: bool
-        :param hierarchic: Indicate whether we want to process ACLs in hierarchic mode.
-        :type before: unicode
-        :param before: ACL to be applied before all the other ACLs.
-        :type default: unicode
-        :param default: If no ACL information is given on the item in question, use this default.
-        :type after: unicode
-        :param after: ACL to be applied after all the other ACLs.
-        :type valid: list of strings or None
-        :param valid: If a list is given, only strings in the list are treated as valid acl privilege descriptors.
-                      If None is give, the global wiki default is used.
-        """
-        self.cfg = cfg
-        self.backend = backend
-        self.hierarchic = hierarchic
-        self.valid = valid if valid is not None else cfg.acl_rights_contents
-        self.before = AccessControlList([before], default=default, valid=self.valid)
-        self.default = AccessControlList([default], default=default, valid=self.valid)
-        self.after = AccessControlList([after], default=default, valid=self.valid)
-
-    def __getattr__(self, attr):
-        # Attributes that this backend does not define itself are just looked
-        # up on the real backend.
-        return getattr(self.backend, attr)
-
-    def get_item(self, itemname):
-        """
-        @see: Backend.get_item.__doc__
-        """
-        if not self._may(itemname, READ):
-            raise AccessDeniedError(flaskg.user.name, READ, itemname)
-        real_item = self.backend.get_item(itemname)
-        # Wrap the item here as well.
-        wrapped_item = AclWrapperItem(real_item, self)
-        return wrapped_item
-
-    def has_item(self, itemname):
-        """
-        @see: Backend.has_item.__doc__
-        """
-        # We do not hide the sheer existance of items. When trying
-        # to create an item with the same name, the user would notice anyway.
-        return self.backend.has_item(itemname)
-
-    def create_item(self, itemname):
-        """
-        @see: Backend.create_item.__doc__
-        """
-        if not self._may(itemname, CREATE):
-            raise AccessDeniedError(flaskg.user.name, CREATE, itemname)
-        real_item = self.backend.create_item(itemname)
-        # Wrap item.
-        wrapped_item = AclWrapperItem(real_item, self)
-        return wrapped_item
-
-    def iter_items_noindex(self):
-        """
-        @see: Backend.iter_items_noindex.__doc__
-        """
-        for item in self.backend.iteritems():
-            if self._may(item.name, READ):
-                yield AclWrapperItem(item, self)
-
-    iteritems = iter_items_noindex
-
-    def _get_acl(self, itemname):
-        """
-        Get ACL strings from the last revision's metadata and return ACL object.
-        """
-        try:
-            item = self.backend.get_item(itemname)
-            # we always use the ACLs set on the latest revision:
-            current_rev = item.get_revision(-1)
-            acl = current_rev[ACL]
-            if not isinstance(acl, unicode):
-                raise TypeError("%s metadata has unsupported type: %r" % (ACL, acl))
-            acls = [acl, ]
-        except (NoSuchItemError, NoSuchRevisionError, KeyError):
-            # do not use default acl here
-            acls = []
-        default = self.default.default
-        return AccessControlList(tuple(acls), default=default, valid=self.valid)
-
-    def _may(self, itemname, right, username=None):
-        """ Check if username may have <right> access on item <itemname>.
-
-        For hierarchic=False we just check the item in question.
-
-        For hierarchic=True, we check each item in the hierarchy. We
-        start with the deepest item and recurse to the top of the tree.
-        If one of those permits, True is returned.
-        This is done *only* if there is *no ACL at all* (not even an empty one)
-        on the items we 'recurse over'.
-
-        For both configurations, we check `before` before the item/default
-        acl and `after` after the item/default acl, of course.
-
-        `default` is only used if there is no ACL on the item (and none on
-        any of the item's parents when using hierarchic.)
-
-        :param itemname: item to get permissions from
-        :param right: the right to check
-        :param username: username to use for permissions check (default is to
-                         use the username doing the current request)
-        :rtype: bool
-        :returns: True if you have permission or False
-        """
-        if username is None:
-            username = flaskg.user.name
-
-        allowed = self.before.may(username, right)
-        if allowed is not None:
-            return allowed
-
-        if self.hierarchic:
-            items = itemname.split('/') # create item hierarchy list
-            some_acl = False
-            for i in range(len(items), 0, -1):
-                # Create the next pagename in the hierarchy
-                # starting at the leaf, going to the root
-                name = '/'.join(items[:i])
-                acl = self._get_acl(name)
-                if acl.has_acl():
-                    some_acl = True
-                    allowed = acl.may(username, right)
-                    if allowed is not None:
-                        return allowed
-                    # If the item has an acl (even one that doesn't match) we *do not*
-                    # check the parents. We only check the parents if there's no acl on
-                    # the item at all.
-                    break
-            if not some_acl:
-                allowed = self.default.may(username, right)
-                if allowed is not None:
-                    return allowed
-        else:
-            acl = self._get_acl(itemname)
-            if acl.has_acl():
-                allowed = acl.may(username, right)
-                if allowed is not None:
-                    return allowed
-            else:
-                allowed = self.default.may(username, right)
-                if allowed is not None:
-                    return allowed
-
-        allowed = self.after.may(username, right)
-        if allowed is not None:
-            return allowed
-
-        return False
-
-
-class AclWrapperItem(Item):
-    """
-    Similar to AclWrapperBackend. Wrap a storage item and protect its
-    attributes by performing permission checks prior to performing the
-    action and raising AccessDeniedErrors if appropriate.
-    """
-    def __init__(self, item, aclbackend):
-        """
-        :type item: Object adhering to the storage item API.
-        :param item: The unprotected item we want to wrap.
-        :type aclbackend: Instance of AclWrapperBackend.
-        :param aclbackend: The AMW this item belongs to.
-        """
-        self._backend = aclbackend
-        self._item = item
-        self._may = aclbackend._may
-
-    @property
-    def name(self):
-        """
-        @see: Item.name.__doc__
-        """
-        return self._item.name
-
-    # needed by storage.serialization:
-    @property
-    def element_name(self):
-        return self._item.element_name
-    @property
-    def element_attrs(self):
-        return self._item.element_attrs
-
-    def require_privilege(*privileges):
-        """
-        This decorator is used in order to avoid code duplication
-        when checking a user's permissions. It allows providing arguments
-        that represent the permissions to check, such as READ and WRITE
-        (see module level constants; don't pass strings, please).
-
-        :type privileges: List of strings.
-        :param privileges: Represent the privileges to check.
-        """
-        def wrap(f):
-            def wrapped_f(self, *args, **kwargs):
-                for privilege in privileges:
-                    if not self._may(self.name, privilege):
-                        username = flaskg.user.name
-                        raise AccessDeniedError(username, privilege, self.name)
-                return f(self, *args, **kwargs)
-            return wrapped_f
-        return wrap
-
-
-    @require_privilege(WRITE)
-    def __setitem__(self, key, value):
-        """
-        @see: Item.__setitem__.__doc__
-        """
-        return self._item.__setitem__(key, value)
-
-    @require_privilege(WRITE)
-    def __delitem__(self, key):
-        """
-        @see: Item.__delitem__.__doc__
-        """
-        return self._item.__delitem__(key)
-
-    @require_privilege(READ)
-    def __getitem__(self, key):
-        """
-        @see: Item.__getitem__.__doc__
-        """
-        return self._item.__getitem__(key)
-
-    @require_privilege(READ)
-    def keys(self):
-        """
-        @see: Item.keys.__doc__
-        """
-        return self._item.keys()
-
-    @require_privilege(WRITE)
-    def change_metadata(self):
-        """
-        @see: Item.change_metadata.__doc__
-        """
-        return self._item.change_metadata()
-
-    @require_privilege(WRITE)
-    def publish_metadata(self):
-        """
-        @see: Item.publish_metadata.__doc__
-        """
-        return self._item.publish_metadata()
-
-    @require_privilege(READ)
-    def get_revision(self, revno):
-        """
-        @see: Item.get_revision.__doc__
-        """
-        return AclWrapperRevision(self._item.get_revision(revno), self)
-
-    @require_privilege(READ)
-    def list_revisions(self):
-        """
-        @see: Item.list_revisions.__doc__
-        """
-        return self._item.list_revisions()
-
-    @require_privilege(READ, WRITE)
-    def rename(self, newname):
-        """
-        Rename item from name (src) to newname (dst).
-        Note that there is no special rename privilege. By taking other
-        privileges into account, we implicitly perform the permission check here.
-        This checks R/W at src and W/C at dst. This combination was chosen for
-        the following reasons:
-         * It is the most intuitive of the possible solutions.
-         * If we'd only check for R at src, everybody would be able to rename even
-           ImmutablePages if there is a writable/creatable name somewhere else
-           (e.g., Trash/).
-         * 'delete' aka 'rename to trashbin' can be controlled with 'create':
-           Just don't provide create for the trash namespace.
-         * Someone without create in the target namespace cannot rename.
-
-        @see: Item.rename.__doc__
-        """
-        # Special case since we need to check newname as well. Easier to special-case than
-        # adjusting the decorator.
-        username = flaskg.user.name
-        if not self._may(newname, CREATE):
-            raise AccessDeniedError(username, CREATE, newname)
-        if not self._may(newname, WRITE):
-            raise AccessDeniedError(username, WRITE, newname)
-        return self._item.rename(newname)
-
-    @require_privilege(WRITE)
-    def commit(self):
-        """
-        @see: Item.commit.__doc__
-        """
-        return self._item.commit()
-
-    # This does not require a privilege as the item must have been obtained
-    # by either get_item or create_item already, which already check permissions.
-    def rollback(self):
-        """
-        @see: Item.rollback.__doc__
-        """
-        return self._item.rollback()
-
-    @require_privilege(DESTROY)
-    def destroy(self):
-        """
-        USE WITH GREAT CARE!
-
-        @see: Item.destroy.__doc__
-        """
-        return self._item.destroy()
-
-    @require_privilege(WRITE)
-    def create_revision(self, revno):
-        """
-        @see: Item.create_revision.__doc__
-        """
-        wrapped_revision = AclWrapperRevision(self._item.create_revision(revno), self)
-        return wrapped_revision
-
-
-class AclWrapperRevision(object, DictMixin):
-    """
-    Wrapper for revision classes. We need to wrap NewRevisions because they allow altering data.
-    We need to wrap StoredRevisions since they offer a destroy() method and access to their item.
-    The caller should know what kind of revision he gets. Hence, we just implement the methods of
-    both, StoredRevision and NewRevision. If a method is invoked that is not defined on the
-    kind of revision we wrap, we will see an AttributeError one level deeper anyway, so this is ok.
-    """
-    def __init__(self, revision, item):
-        """
-        :type revision: Object adhering to the storage revision API.
-        :param revision: The revision we want to protect.
-        :type item: Object adhering to the storage item API.
-        :param item: The item this revision belongs to
-        """
-        self._revision = revision
-        self._item = item
-        self._may = item._may
-
-    def __getattr__(self, attr):
-        # Pass through any call that is not subject to ACL protection (e.g. serialize)
-        return getattr(self._revision, attr)
-
-    @property
-    def item(self):
-        """
-        @see: Revision.item.__doc__
-        """
-        return self._item
-
-    @property
-    def timestamp(self):
-        """This property accesses the creation timestamp of the revision"""
-        return self._revision.timestamp
-
-    def __setitem__(self, key, value):
-        """
-        In order to change an ACL on an item you must have the ADMIN privilege.
-        We must allow the (unchanged) preceeding revision's ACL being stored
-        into the new revision, though.
-
-        TODO: the ACL specialcasing done here (requiring admin privilege for
-              changing ACLs) is only one case of a more generic problem:
-              Access (read,write,change) to some metadata must be checked.
-              ACL - changing needs ADMIN priviledge
-              userid, ip, hostname, etc. - writing them should be from system only
-              content hash - writing it should be from system only
-              For the metadata editing offered to the wiki user on the UI,
-              we should only offer metadata for which the wiki user has change
-              permissions. On save, we have to check the permissions.
-              Idea: have metadata key prefixes, classifying metadata entries:
-              security.* - security related
-                      .acl - content acl
-                      .insecure - allow insecure rendering (e.g. raw html)
-              system.* - internal stuff, only system may process this
-              user.* - user defined entries
-              (... needs more thinking ...)
-
-        @see: NewRevision.__setitem__.__doc__
-        """
-        if key == ACL:
-            try:
-                # This rev is not yet committed
-                last_rev = self._item.get_revision(-1)
-                last_acl = last_rev[ACL]
-            except (NoSuchRevisionError, KeyError):
-                last_acl = u''
-
-            acl_changed = value != last_acl
-
-            if acl_changed and not self._may(self._item.name, ADMIN):
-                username = flaskg.user.name
-                raise AccessDeniedError(username, ADMIN, self._item.name)
-        return self._revision.__setitem__(key, value)
-
-    def __getitem__(self, key):
-        """
-        @see: NewRevision.__getitem__.__doc__
-        """
-        return self._revision[key]
-
-    def __delitem__(self, key):
-        """
-        @see: NewRevision.__delitem__.__doc__
-        """
-        del self._revision[key]
-
-    def read(self, chunksize=-1):
-        """
-        @see: Backend._read_revision_data.__doc__
-        """
-        return self._revision.read(chunksize)
-
-    def seek(self, position, mode=0):
-        """
-        @see: StringIO.StringIO().seek.__doc__
-        """
-        return self._revision.seek(position, mode)
-
-    def destroy(self):
-        """
-        @see: Backend._destroy_revision.__doc__
-        """
-        if not self._may(self._item.name, DESTROY):
-            username = flaskg.user.name
-            raise AccessDeniedError(username, DESTROY + " revisions of", self._item.name)
-        return self._revision.destroy()
-
-    def write(self, data):
-        """
-        @see: Backend._write_revision_data.__doc__
-        """
-        return self._revision.write(data)
-
--- a/MoinMoin/storage/middleware/indexing.py	Sat Sep 10 03:23:36 2011 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,407 +0,0 @@
-# Copyright: 2010-2011 MoinMoin:ThomasWaldmann
-# Copyright: 2011 MoinMoin:MichaelMayorov
-# License: GNU GPL v2 (or any later version), see LICENSE.txt for details.
-
-"""
-    MoinMoin - Indexing Mixin Classes
-
-    Other backends mix in the Indexing*Mixin classes into their Backend,
-    Item, Revision classes to support flexible metadata indexing and querying
-    for wiki items / revisions
-
-    Wiki items and revisions of same item are identified by same UUID.
-    The wiki item name is contained in the item revision's metadata.
-    If you rename an item, this is done by creating a new revision with a different
-    (new) name in its revision metadata.
-"""
-
-
-import os
-import time, datetime
-
-from flask import current_app as app
-from flask import g as flaskg
-from flask import request
-
-from MoinMoin.storage.error import NoSuchItemError, NoSuchRevisionError, \
-                                   AccessDeniedError
-from MoinMoin.config import ACL, CONTENTTYPE, UUID, NAME, NAME_OLD, MTIME, TAGS, \
-                            ADDRESS, HOSTNAME, USERID, ITEMLINKS, ITEMTRANSCLUSIONS, \
-                            REV_NO
-from MoinMoin.search.indexing import backend_to_index
-from MoinMoin.converter import default_registry
-from MoinMoin.util.iri import Iri
-from MoinMoin.util.mime import Type, type_moin_document
-from MoinMoin.util.tree import moin_page
-from MoinMoin.util.crypto import make_uuid
-from MoinMoin import wikiutil
-
-from MoinMoin import log
-logging = log.getLogger(__name__)
-
-
-def convert_to_indexable(rev, new_rev=False):
-    """
-    convert a revision to an indexable document
-
-    :param rev: item revision - please make sure that the content file is
-                ready to read all indexable content from it. if you have just
-                written that content or already read from it, you need to call
-                rev.seek(0) before calling convert_to_indexable(rev).
-    """
-    try:
-        # TODO use different converter mode?
-        # Maybe we want some special mode for the input converters so they emit
-        # different output than for normal rendering), esp. for the non-markup
-        # content types (images, etc.).
-        input_contenttype = rev[CONTENTTYPE]
-        output_contenttype = 'text/plain'
-        type_input_contenttype = Type(input_contenttype)
-        type_output_contenttype = Type(output_contenttype)
-        reg = default_registry
-        # first try a direct conversion (this could be useful for extraction
-        # of (meta)data from binary types, like from images or audio):
-        conv = reg.get(type_input_contenttype, type_output_contenttype)
-        if conv:
-            doc = conv(rev, input_contenttype)
-            return doc
-        # otherwise try via DOM as intermediate format (this is useful if
-        # input type is markup, to get rid of the markup):
-        input_conv = reg.get(type_input_contenttype, type_moin_document)
-        refs_conv = reg.get(type_moin_document, type_moin_document, items='refs')
-        output_conv = reg.get(type_moin_document, type_output_contenttype)
-        if input_conv and output_conv:
-            doc = input_conv(rev, input_contenttype)
-            # We do not convert smileys, includes, macros, links, because
-            # it does not improve search results or even makes results worse.
-            # We do run the referenced converter, though, to extract links and
-            # transclusions.
-            if new_rev:
-                # we only can modify new, uncommitted revisions, not stored revs
-                i = Iri(scheme='wiki', authority='', path='/' + rev[NAME])
-                doc.set(moin_page.page_href, unicode(i))
-                refs_conv(doc)
-                # side effect: we update some metadata:
-                rev[ITEMLINKS] = refs_conv.get_links()
-                rev[ITEMTRANSCLUSIONS] = refs_conv.get_transclusions()
-            doc = output_conv(doc)
-            return doc
-        # no way
-        raise TypeError("No converter for %s --> %s" % (input_contenttype, output_contenttype))
-    except Exception as e: # catch all exceptions, we don't want to break an indexing run
-        logging.exception("Exception happened in conversion of item %r rev %d contenttype %s:" % (rev[NAME], rev.revno, rev[CONTENTTYPE]))
-        doc = u'ERROR [%s]' % str(e)
-        return doc
-
-
-class IndexingBackendMixin(object):
-    """
-    Backend indexing support / functionality using the index.
-    """
-    def __init__(self, *args, **kw):
-        cfg = kw.pop('cfg')
-        super(IndexingBackendMixin, self).__init__(*args, **kw)
-        self._index = ItemIndex(cfg)
-
-    def close(self):
-        self._index.close()
-        super(IndexingBackendMixin, self).close()
-
-    def create_item(self, itemname):
-        """
-        intercept new item creation and make sure there is NAME / UUID in the item
-        """
-        item = super(IndexingBackendMixin, self).create_item(itemname)
-        item.change_metadata()
-        if NAME not in item:
-            item[NAME] = itemname
-        if UUID not in item:
-            item[UUID] = make_uuid()
-        item.publish_metadata()
-        return item
-
-    def query_parser(self, default_fields, all_revs=False):
-        return self._index.query_parser(default_fields, all_revs=all_revs)
-
-    def searcher(self, all_revs=False):
-        return self._index.searcher(all_revs=all_revs)
-
-    def search(self, q, all_revs=False, **kw):
-        return self._index.search(q, all_revs=all_revs, **kw)
-
-    def search_page(self, q, all_revs=False, pagenum=1, pagelen=10, **kw):
-        return self._index.search_page(q, all_revs=all_revs, pagenum=pagenum, pagelen=pagelen, **kw)
-
-    def documents(self, all_revs=False, **kw):
-        return self._index.documents(all_revs=all_revs, **kw)
-
-
-class IndexingItemMixin(object):
-    """
-    Item indexing support
-    """
-    def __init__(self, backend, *args, **kw):
-        super(IndexingItemMixin, self).__init__(backend, *args, **kw)
-        self._index = backend._index
-        self.__unindexed_revision = None
-
-    def create_revision(self, revno):
-        self.__unindexed_revision = super(IndexingItemMixin, self).create_revision(revno)
-        return self.__unindexed_revision
-
-    def commit(self):
-        self.__unindexed_revision.update_index()
-        self.__unindexed_revision = None
-        return super(IndexingItemMixin, self).commit()
-
-    def rollback(self):
-        self.__unindexed_revision = None
-        return super(IndexingItemMixin, self).rollback()
-
-    def publish_metadata(self):
-        self.update_index()
-        return super(IndexingItemMixin, self).publish_metadata()
-
-    def destroy(self):
-        self.remove_index()
-        return super(IndexingItemMixin, self).destroy()
-
-    def update_index(self):
-        """
-        update the index with metadata of this item
-
-        this is automatically called by item.publish_metadata() and can be used by a indexer script also.
-        """
-        logging.debug("item %r update index:" % (self.name, ))
-        for k, v in self.items():
-            logging.debug(" * item meta %r: %r" % (k, v))
-        self._index.update_item(metas=self)
-
-    def remove_index(self):
-        """
-        update the index, removing everything related to this item
-        """
-        uuid = self[UUID]
-        logging.debug("item %r %r remove index!" % (self.name, uuid))
-        self._index.remove_item(uuid)
-
-
-class IndexingRevisionMixin(object):
-    """
-    Revision indexing support
-    """
-    def __init__(self, item, *args, **kw):
-        super(IndexingRevisionMixin, self).__init__(item, *args, **kw)
-        self._index = item._index
-
-    def destroy(self):
-        self.remove_index()
-        return super(IndexingRevisionMixin, self).destroy()
-
-    def update_index(self):
-        """
-        update the index with metadata of this revision
-
-        this is automatically called by item.commit() and can be used by a indexer script also.
-        """
-        name = self.item.name
-        uuid = self.item[UUID]
-        revno = self.revno
-        logging.debug("Processing: name %s revno %s" % (name, revno))
-        if MTIME not in self:
-            self[MTIME] = int(time.time())
-        if NAME not in self:
-            self[NAME] = name
-        if UUID not in self:
-            self[UUID] = uuid # do we want the item's uuid in the rev's metadata?
-        if CONTENTTYPE not in self:
-            self[CONTENTTYPE] = u'application/octet-stream'
-
-        if app.cfg.log_remote_addr:
-            remote_addr = request.remote_addr
-            if remote_addr:
-                self[ADDRESS] = unicode(remote_addr)
-                hostname = wikiutil.get_hostname(remote_addr)
-                if hostname:
-                    self[HOSTNAME] = hostname
-        try:
-            if flaskg.user.valid:
-                self[USERID] = unicode(flaskg.user.uuid)
-        except:
-            # when loading xml via script, we have no flaskg.user
-            pass
-
-        self.seek(0) # for a new revision, file pointer points to EOF, rewind first
-        rev_content = convert_to_indexable(self, new_rev=True)
-
-        logging.debug("item %r revno %d update index:" % (name, revno))
-        for k, v in self.items():
-            logging.debug(" * rev meta %r: %r" % (k, v))
-        logging.debug("Indexable content: %r" % (rev_content[:250], ))
-        self._index.add_rev(uuid, revno, self, rev_content)
-
-    def remove_index(self):
-        """
-        update the index, removing everything related to this revision
-        """
-        name = self.item.name
-        uuid = self.item[UUID]
-        revno = self.revno
-        metas = self
-        logging.debug("item %r revno %d remove index!" % (name, revno))
-        self._index.remove_rev(metas[UUID], revno)
-
-    # TODO maybe use this class later for data indexing also,
-    # TODO by intercepting write() to index data written to a revision
-
-from whoosh.writing import AsyncWriter
-from whoosh.qparser import QueryParser, MultifieldParser
-
-from MoinMoin.search.indexing import WhooshIndex
-
-class ItemIndex(object):
-    """
-    Index for Items/Revisions
-    """
-    def __init__(self, cfg, force_create=False):
-        self.wikiname = cfg.interwikiname
-        self.index_object = WhooshIndex(force_create=force_create, cfg=cfg)
-
-    def close(self):
-        self.index_object.all_revisions_index.close()
-        self.index_object.latest_revisions_index.close()
-
-    def remove_index(self):
-        self.index_object.remove_index()
-
-    def update_item(self, metas):
-        """
-        update item (not revision!) metadata
-        """
-        # XXX we do not have an index for item metadata yet!
-
-    def remove_item(self, uuid):
-        """
-        remove all data related to this item and all its revisions from the index
-        """
-        with self.index_object.latest_revisions_index.searcher() as latest_revs_searcher:
-            doc_number = latest_revs_searcher.document_number(uuid=uuid,
-                                                              wikiname=self.wikiname
-                                                             )
-        if doc_number is not None:
-            with AsyncWriter(self.index_object.latest_revisions_index) as async_writer:
-                async_writer.delete_document(doc_number)
-
-        with self.index_object.all_revisions_index.searcher() as all_revs_searcher:
-            doc_numbers = list(all_revs_searcher.document_numbers(uuid=uuid,
-                                                                  wikiname=self.wikiname
-                                                                 ))
-        if doc_numbers:
-            with AsyncWriter(self.index_object.all_revisions_index) as async_writer:
-                for doc_number in doc_numbers:
-                    async_writer.delete_document(doc_number)
-
-    def add_rev(self, uuid, revno, rev, rev_content):
-        """
-        add a new revision <revno> for item <uuid> with metadata <metas>
-        """
-        with self.index_object.all_revisions_index.searcher() as all_revs_searcher:
-            all_found_document = all_revs_searcher.document(uuid=rev[UUID],
-                                                            rev_no=revno,
-                                                            wikiname=self.wikiname
-                                                           )
-        with self.index_object.latest_revisions_index.searcher() as latest_revs_searcher:
-            latest_found_document = latest_revs_searcher.document(uuid=rev[UUID],
-                                                                  wikiname=self.wikiname
-                                                                 )
-        if not all_found_document:
-            schema = self.index_object.all_revisions_index.schema
-            with AsyncWriter(self.index_object.all_revisions_index) as async_writer:
-                converted_rev = backend_to_index(rev, revno, schema, rev_content, self.wikiname)
-                logging.debug("All revisions: adding %s %s", converted_rev[NAME], converted_rev[REV_NO])
-                async_writer.add_document(**converted_rev)
-        if not latest_found_document or int(revno) > latest_found_document[REV_NO]:
-            schema = self.index_object.latest_revisions_index.schema
-            with AsyncWriter(self.index_object.latest_revisions_index) as async_writer:
-                converted_rev = backend_to_index(rev, revno, schema, rev_content, self.wikiname)
-                logging.debug("Latest revisions: updating %s %s", converted_rev[NAME], converted_rev[REV_NO])
-                async_writer.update_document(**converted_rev)
-
-    def remove_rev(self, uuid, revno):
-        """
-        remove a revision <revno> of item <uuid>
-        """
-        with self.index_object.latest_revisions_index.searcher() as latest_revs_searcher:
-            latest_doc_number = latest_revs_searcher.document_number(uuid=uuid,
-                                                                     rev_no=revno,
-                                                                     wikiname=self.wikiname
-                                                                    )
-        if latest_doc_number is not None:
-            with AsyncWriter(self.index_object.latest_revisions_index) as async_writer:
-                logging.debug("Latest revisions: removing %d", latest_doc_number)
-                async_writer.delete_document(latest_doc_number)
-
-        with self.index_object.all_revisions_index.searcher() as all_revs_searcher:
-            doc_number = all_revs_searcher.document_number(uuid=uuid,
-                                                           rev_no=revno,
-                                                           wikiname=self.wikiname
-                                                          )
-        if doc_number is not None:
-            with AsyncWriter(self.index_object.all_revisions_index) as async_writer:
-                logging.debug("All revisions: removing %d", doc_number)
-                async_writer.delete_document(doc_number)
-
-    def query_parser(self, default_fields, all_revs=False):
-        if all_revs:
-            schema = self.index_object.all_revisions_schema
-        else:
-            schema = self.index_object.latest_revisions_schema
-        if len(default_fields) > 1:
-            qp = MultifieldParser(default_fields, schema=schema)
-        elif len(default_fields) == 1:
-            qp = QueryParser(default_fields[0], schema=schema)
-        else:
-            raise ValueError("default_fields list must at least contain one field name")
-        return qp
-
-    def searcher(self, all_revs=False):
-        """
-        Get a searcher for the right index. Always use this with "with":
-
-        with storage.searcher(all_revs) as searcher:
-            # your code
-
-        If you do not need the searcher itself or the Result object, but rather
-        the found documents, better use search() or search_page(), see below.
-        """
-        if all_revs:
-            ix = self.index_object.all_revisions_index
-        else:
-            ix = self.index_object.latest_revisions_index
-        return ix.searcher()
-
-    def search(self, q, all_revs=False, **kw):
-        with self.searcher(all_revs) as searcher:
-            # Note: callers must consume everything we yield, so the for loop
-            # ends and the "with" is left to close the index files.
-            for hit in searcher.search(q, **kw):
-                yield hit.fields()
-
-    def search_page(self, q, all_revs=False, pagenum=1, pagelen=10, **kw):
-        with self.searcher(all_revs) as searcher:
-            # Note: callers must consume everything we yield, so the for loop
-            # ends and the "with" is left to close the index files.
-            for hit in searcher.search_page(q, pagenum, pagelen=pagelen, **kw):
-                yield hit.fields()
-
-    def documents(self, all_revs=False, **kw):
-        if all_revs:
-            ix = self.index_object.all_revisions_index
-        else:
-            ix = self.index_object.latest_revisions_index
-        with ix.searcher() as searcher:
-            # Note: callers must consume everything we yield, so the for loop
-            # ends and the "with" is left to close the index files.
-            for doc in searcher.documents(**kw):
-                yield doc
-
--- a/MoinMoin/storage/middleware/router.py	Sat Sep 10 03:23:36 2011 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,457 +0,0 @@
-# Copyright: 2008-2010 MoinMoin:ThomasWaldmann
-# Copyright: 2009 MoinMoin:ChristopherDenter
-# License: GNU GPL v2 (or any later version), see LICENSE.txt for details.
-
-"""
-    MoinMoin - routing backend
-
-    You can use this backend to route requests to different backends
-    depending on the item name. I.e., you can specify mountpoints and
-    map them to different backends. E.g. you could route all your items
-    to an FSBackend and only items below hg/<youritemnamehere> go into
-    a MercurialBackend and similarly tmp/<youritemnamehere> is for
-    temporary items in a MemoryBackend() that are discarded when the
-    process terminates.
-"""
-
-
-import re
-
-from MoinMoin import log
-logging = log.getLogger(__name__)
-
-from MoinMoin.error import ConfigurationError
-from MoinMoin.storage.error import AccessDeniedError
-
-from MoinMoin.storage import Backend as BackendBase
-from MoinMoin.storage import Item as ItemBase
-from MoinMoin.storage import NewRevision as NewRevisionBase
-from MoinMoin.storage import StoredRevision as StoredRevisionBase
-
-from MoinMoin.storage.middleware.indexing import IndexingBackendMixin, IndexingItemMixin, IndexingRevisionMixin
-from MoinMoin.storage.middleware.serialization import SerializableRevisionMixin, SerializableItemMixin, SerializableBackendMixin
-
-
-class BareRouterBackend(BackendBase):
-    """
-    Router Backend - routes requests to different backends depending
-    on the item name.
-
-    For method docstrings, please see the "Backend" base class.
-    """
-    def __init__(self, mapping, *args, **kw):
-        """
-        Initialize router backend.
-
-        The mapping given must satisfy the following criteria:
-            * Order matters.
-            * Mountpoints are just item names, including the special '' (empty)
-              root item name. A trailing '/' of a mountpoint will be ignored.
-            * There *must* be a backend with mountpoint '' (or '/') at the very
-              end of the mapping. That backend is then used as root, which means
-              that all items that don't lie in the namespace of any other
-              backend are stored there.
-
-        :type mapping: list of tuples of mountpoint -> backend mappings
-        :param mapping: [(mountpoint, backend), ...]
-        """
-        super(BareRouterBackend, self).__init__(*args, **kw)
-        self.mapping = [(mountpoint.rstrip('/'), backend) for mountpoint, backend in mapping]
-
-    def close(self):
-        super(BareRouterBackend, self).close()
-        for mountpoint, backend in self.mapping:
-            backend.close()
-        self.mapping = []
-
-    def _get_backend(self, itemname):
-        """
-        For a given fully-qualified itemname (i.e. something like Company/Bosses/Mr_Joe)
-        find the backend it belongs to (given by this instance's mapping), the local
-        itemname inside that backend and the mountpoint of the backend.
-
-        Note: Internally (i.e. in all Router* classes) we always use the normalized
-              item name for consistency reasons.
-
-        :type itemname: str
-        :param itemname: fully-qualified itemname
-        :returns: tuple of (backend, itemname, mountpoint)
-        """
-        if not isinstance(itemname, (str, unicode)):
-            raise TypeError("Item names must have string type, not %s" % (type(itemname)))
-
-        for mountpoint, backend in self.mapping:
-            if itemname == mountpoint or itemname.startswith(mountpoint and mountpoint + '/' or ''):
-                lstrip = mountpoint and len(mountpoint)+1 or 0
-                return backend, itemname[lstrip:], mountpoint
-        raise AssertionError("No backend found for %r. Available backends: %r" % (itemname, self.mapping))
-
-    def get_backend(self, namespace):
-        """
-        Given a namespace, return the backend mounted there.
-
-        :type namespace: basestring
-        :param namespace: The namespace of which we look the backend up.
-        """
-        return self._get_backend(namespace)[0]
-
-    def iter_items_noindex(self):
-        """
-        Iterate over all items.
-
-        Must not use the index as this method is used to *build* the index.
-
-        @see: Backend.iter_items_noindex.__doc__
-        """
-        for mountpoint, backend in self.mapping:
-            for item in backend.iter_items_noindex():
-                yield RouterItem(self, item.name, item, mountpoint)
-
-    # TODO: implement a faster iteritems using the index
-    iteritems = iter_items_noindex
-
-    def has_item(self, itemname):
-        """
-        @see: Backend.has_item.__doc__
-        """
-        # While we could use the inherited, generic implementation
-        # it is generally advised to override this method.
-        # Thus, we pass the call down.
-        logging.debug("has_item: %r" % itemname)
-        backend, itemname, mountpoint = self._get_backend(itemname)
-        return backend.has_item(itemname)
-
-    def get_item(self, itemname):
-        """
-        @see: Backend.get_item.__doc__
-        """
-        logging.debug("get_item: %r" % itemname)
-        backend, itemname, mountpoint = self._get_backend(itemname)
-        return RouterItem(self, itemname, backend.get_item(itemname), mountpoint)
-
-    def create_item(self, itemname):
-        """
-        @see: Backend.create_item.__doc__
-        """
-        logging.debug("create_item: %r" % itemname)
-        backend, itemname, mountpoint = self._get_backend(itemname)
-        return RouterItem(self, itemname, backend.create_item(itemname), mountpoint)
-
-
-class RouterBackend(SerializableBackendMixin, IndexingBackendMixin, BareRouterBackend):
-    pass
-
-
-class BareRouterItem(ItemBase):
-    """
-    Router Item - Wraps 'real' storage items to make them aware of their full name.
-
-    Items stored in the backends managed by the RouterBackend do not know their full
-    name since the backend they belong to is looked up from a list for a given
-    mountpoint and only the itemname itself (without leading mountpoint) is given to
-    the specific backend.
-    This is done so as to allow mounting a given backend at a different mountpoint.
-    The problem with that is, of course, that items do not know their full name if they
-    are retrieved via the specific backends directly. Thus, it is neccessary to wrap the
-    items returned from those specific backends in an instance of this RouterItem class.
-    This makes sure that an item in a specific backend only knows its local name (as it
-    should be; this allows mounting at a different place without renaming all items) but
-    items that the RouterBackend creates or gets know their fully qualified name.
-
-    In order to achieve this, we must mimic the Item interface here. In addition to that,
-    a backend implementor may have decided to provide additional methods on his Item class.
-    We can not know that here, ahead of time. We must redirect any attribute lookup to the
-    encapsulated item, hence, and only intercept calls that are related to the item name.
-    To do this, we store the wrapped item and redirect all calls via this classes __getattr__
-    method. For this to work, RouterItem *must not* inherit from Item, because otherwise
-    the attribute would be looked up on the abstract base class, which certainly is not what
-    we want.
-    Furthermore there's a problem with __getattr__ and new-style classes' special methods
-    which can be looked up here:
-    http://docs.python.org/reference/datamodel.html#special-method-lookup-for-new-style-classes
-    """
-    def __init__(self, backend, item_name, item, mountpoint, *args, **kw):
-        """
-        :type backend: Object adhering to the storage API.
-        :param backend: The backend this item belongs to.
-        :type itemname: basestring.
-        :param itemname: The name of the item (not the FQIN).
-        :type item: Object adhering to the storage item API.
-        :param item: The item we want to wrap.
-        :type mountpoint: basestring.
-        :param mountpoint: The mountpoint where this item is located.
-        """
-        self._get_backend = backend._get_backend
-        self._itemname = item_name
-        self._item = item
-        self._mountpoint = mountpoint
-        super(BareRouterItem, self).__init__(backend, item_name, *args, **kw)
-
-    def __getattr__(self, attr):
-        """
-        Redirect all attribute lookups to the item that is proxied by this instance.
-
-        Note: __getattr__ only deals with stuff that is not found in instance,
-              this class and base classes, so be careful!
-        """
-        return getattr(self._item, attr)
-
-    @property
-    def name(self):
-        """
-        :rtype: str
-        :returns: the item's fully-qualified name
-        """
-        mountpoint = self._mountpoint
-        if mountpoint:
-            mountpoint += '/'
-        return mountpoint + self._itemname
-
-    def __setitem__(self, key, value):
-        """
-        @see: Item.__setitem__.__doc__
-        """
-        return self._item.__setitem__(key, value)
-
-    def __delitem__(self, key):
-        """
-        @see: Item.__delitem__.__doc__
-        """
-        return self._item.__delitem__(key)
-
-    def __getitem__(self, key):
-        """
-        @see: Item.__getitem__.__doc__
-        """
-        return self._item.__getitem__(key)
-
-    def keys(self):
-        return self._item.keys()
-
-    def change_metadata(self):
-        return self._item.change_metadata()
-
-    def publish_metadata(self):
-        return self._item.publish_metadata()
-
-    def rollback(self):
-        return self._item.rollback()
-
-    def commit(self):
-        return self._item.commit()
-
-    def rename(self, newname):
-        """
-        For intra-backend renames, this is the same as the normal Item.rename
-        method.
-        For inter-backend renames, this *moves* the complete item over to the
-        new backend, possibly with a new item name.
-        In order to avoid content duplication, the old item is destroyed after
-        having been copied (in inter-backend scenarios only, of course).
-
-        @see: Item.rename.__doc__
-        """
-        old_name = self._item.name
-        backend, itemname, mountpoint = self._get_backend(newname)
-        if mountpoint != self._mountpoint:
-            # Mountpoint changed! That means we have to copy the item over.
-            converts, skips, fails = backend.copy_item(self._item, verbose=False, name=itemname)
-            assert len(converts) == 1
-
-            new_item = backend.get_item(itemname)
-            old_item = self._item
-            self._item = new_item
-            self._mountpoint = mountpoint
-            self._itemname = itemname
-            # We destroy the old item in order not to duplicate data.
-            # It may be the case that the item we want to destroy is ACL protected. In that case,
-            # the destroy() below doesn't irreversibly kill the item because at this point it is already
-            # guaranteed that it lives on at another place and we do not require 'destroy' hence.
-            try:
-                # Perhaps we don't deal with acl protected items anyway.
-                old_item.destroy()
-            except AccessDeniedError:
-                # OK, we're indeed routing to an ACL protected backend. Use unprotected item.
-                old_item._item.destroy()
-
-        else:
-            # Mountpoint didn't change
-            self._item.rename(itemname)
-            self._itemname = itemname
-
-    def list_revisions(self):
-        return self._item.list_revisions()
-
-    def create_revision(self, revno):
-        """
-        In order to make item name lookups via revision.item.name work, we need
-        to wrap the revision here.
-
-        @see: Item.create_revision.__doc__
-        """
-        rev = self._item.create_revision(revno)
-        return NewRouterRevision(self, revno, rev)
-
-    def get_revision(self, revno):
-        """
-        In order to make item name lookups via revision.item.name work, we need
-        to wrap the revision here.
-
-        @see: Item.get_revision.__doc__
-        """
-        rev = self._item.get_revision(revno)
-        return StoredRouterRevision(self, revno, rev)
-
-    def destroy(self):
-        """
-        ATTENTION!
-        This method performs an irreversible operation and deletes potentially important
-        data. Use with great care.
-
-        @see: Item.destroy.__doc__
-        """
-        return self._item.destroy()
-
-
-class RouterItem(SerializableItemMixin, IndexingItemMixin, BareRouterItem):
-    pass
-
-
-class BareNewRouterRevision(NewRevisionBase):
-    """
-    """
-    def __init__(self, item, revno, revision, *args, **kw):
-        self._item = item
-        self._revision = revision
-        super(BareNewRouterRevision, self).__init__(item, revno, *args, **kw)
-
-    def __getattr__(self, attr):
-        """
-        Redirect all attribute lookups to the revision that is proxied by this instance.
-
-        Note: __getattr__ only deals with stuff that is not found in instance,
-              this class and base classes, so be careful!
-        """
-        return getattr(self._revision, attr)
-
-    @property
-    def item(self):
-        """
-        Here we have to return the RouterItem, which in turn wraps the real item
-        and provides it with its full name that we need for the rev.item.name lookup.
-
-        @see: Revision.item.__doc__
-        """
-        assert isinstance(self._item, RouterItem)
-        return self._item
-
-    @property
-    def revno(self):
-        return self._revision.revno
-
-    @property
-    def timestamp(self):
-        return self._revision.timestamp
-
-    def __setitem__(self, key, value):
-        """
-        We only need to redirect this manually here because python doesn't do that
-        in combination with __getattr__. See RouterBackend.__doc__ for an explanation.
-
-        As this class wraps generic Revisions, this may very well result in an exception
-        being raised if the wrapped revision is a StoredRevision.
-        """
-        return self._revision.__setitem__(key, value)
-
-    def __delitem__(self, key):
-        """
-        @see: RouterRevision.__setitem__.__doc__
-        """
-        return self._revision.__delitem__(key)
-
-    def __getitem__(self, key):
-        """
-        @see: RouterRevision.__setitem__.__doc__
-        """
-        return self._revision.__getitem__(key)
-
-    def keys(self):
-        return self._revision.keys()
-
-    def read(self, chunksize=-1):
-        return self._revision.read(chunksize)
-
-    def seek(self, position, mode=0):
-        return self._revision.seek(position, mode)
-
-    def tell(self):
-        return self._revision.tell()
-
-    def write(self, data):
-        self._revision.write(data)
-
-    def destroy(self):
-        return self._revision.destroy()
-
-
-class NewRouterRevision(SerializableRevisionMixin, IndexingRevisionMixin, BareNewRouterRevision):
-    pass
-
-class BareStoredRouterRevision(StoredRevisionBase):
-    """
-    """
-    def __init__(self, item, revno, revision, *args, **kw):
-        self._item = item
-        self._revision = revision
-        super(BareStoredRouterRevision, self).__init__(item, revno, *args, **kw)
-
-    def __getattr__(self, attr):
-        """
-        Redirect all attribute lookups to the revision that is proxied by this instance.
-
-        Note: __getattr__ only deals with stuff that is not found in instance,
-              this class and base classes, so be careful!
-        """
-        return getattr(self._revision, attr)
-
-    @property
-    def item(self):
-        """
-        Here we have to return the RouterItem, which in turn wraps the real item
-        and provides it with its full name that we need for the rev.item.name lookup.
-
-        @see: Revision.item.__doc__
-        """
-        assert isinstance(self._item, RouterItem)
-        return self._item
-
-    @property
-    def revno(self):
-        return self._revision.revno
-
-    @property
-    def timestamp(self):
-        return self._revision.timestamp
-
-    def __getitem__(self, key):
-        return self._revision.__getitem__(key)
-
-    def keys(self):
-        return self._revision.keys()
-
-    def read(self, chunksize=-1):
-        return self._revision.read(chunksize)
-
-    def seek(self, position, mode=0):
-        return self._revision.seek(position, mode)
-
-    def tell(self):
-        return self._revision.tell()
-
-    def destroy(self):
-        return self._revision.destroy()
-
-
-class StoredRouterRevision(SerializableRevisionMixin, IndexingRevisionMixin, BareStoredRouterRevision):
-    pass
-
--- a/MoinMoin/storage/middleware/serialization.py	Sat Sep 10 03:23:36 2011 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,673 +0,0 @@
-# Copyright: 2009-2010 MoinMoin:ThomasWaldmann
-# License: GNU GPL v2 (or any later version), see LICENSE.txt for details.
-
-"""
-    MoinMoin - XML serialization support
-
-    This module contains mixin classes to support xml serialization / unserialization.
-    It uses the sax xml parser / xml generator from the stdlib.
-
-    Applications include wiki backup/restore, wiki item packages, ...
-
-    Examples
-    --------
-
-    a) serialize all items of a storage backend to a file:
-    backend = ... (some storage backend)
-    serialize(backend, "items.xml")
-
-    b) unserialize all items from a file to a storage backend:
-    backend = ... (some storage backend)
-    unserialize(backend, "items.xml")
-
-    c) serialize just some items:
-    some_items = [u'FrontPage', u'HelpOnLinking', u'HelpOnMoinWikiSyntax', ]
-    serialize(backend, 'some_items.xml', ItemNameList, some_items)
-"""
-
-
-from MoinMoin import log
-logging = log.getLogger(__name__)
-
-import base64
-
-from xml.sax import parse as xml_parse
-from xml.sax.saxutils import XMLGenerator
-from xml.sax.handler import ContentHandler
-
-from MoinMoin.storage.error import ItemAlreadyExistsError
-
-class MoinContentHandler(ContentHandler):
-    """
-    ContentHandler that handles sax parse events and feeds them into the
-    unserializer stack.
-    """
-    def __init__(self, handler, context):
-        ContentHandler.__init__(self)
-        self.unserializer = handler.make_unserializer(context)
-
-    def unserialize(self, *args):
-        try:
-            self.unserializer.send(args)
-        except StopIteration:
-            pass
-
-    def startElement(self, name, attrs):
-        self.unserialize('startElement', name, attrs)
-
-    def endElement(self, name):
-        self.unserialize('endElement', name)
-
-    def characters(self, data):
-        self.unserialize('characters', data)
-
-
-class XMLSelectiveGenerator(XMLGenerator):
-    """
-    Manage xml output writing (by XMLGenerator base class)
-    and selection of output (by shall_serialize method)
-
-    You are expected to subclass this class and overwrite the shall_serialize method.
-    """
-    def __init__(self, out, encoding='UTF-8'):
-        # note: we have UTF-8 as default, base class has iso-8859-1
-        if out is not None and not hasattr(out, 'write'):
-            # None is OK (will become stdout by XMLGenerator.__init__)
-            # file-like is also OK
-            # for everything else (filename?), we try to open it first:
-            out = open(out, 'w')
-        XMLGenerator.__init__(self, out, encoding)
-
-    def shall_serialize(self, item=None, rev=None,
-                        revno=None, current_revno=None):
-        # shall be called by serialization code before starting to write
-        # the element to decide whether it shall be serialized.
-        return True
-
-
-class NLastRevs(XMLSelectiveGenerator):
-    def __init__(self, out, nrevs, invert):
-        self.nrevs = nrevs
-        self.invert = invert
-        XMLSelectiveGenerator.__init__(self, out)
-
-    def shall_serialize(self, item=None, rev=None,
-                        revno=None, current_revno=None):
-        if revno is None:
-            return True
-        else:
-            return self.invert ^ (revno > current_revno - self.nrevs)
-
-
-class SinceTime(XMLSelectiveGenerator):
-    def __init__(self, out, ts, invert):
-        self.ts = ts
-        self.invert = invert
-        XMLSelectiveGenerator.__init__(self, out)
-
-    def shall_serialize(self, item=None, rev=None,
-                        revno=None, current_revno=None):
-        if rev is None:
-            return True
-        else:
-            return self.invert ^ (rev.timestamp >= self.ts)
-
-
-class NRevsOrSinceTime(XMLSelectiveGenerator):
-    def __init__(self, out, nrevs, ts, invert):
-        self.nrevs = nrevs
-        self.ts = ts
-        self.invert = invert
-        XMLSelectiveGenerator.__init__(self, out)
-
-    def shall_serialize(self, item=None, rev=None,
-                        revno=None, current_revno=None):
-        if revno is None:
-            return True
-        else:
-            return self.invert ^ (
-                   (revno > current_revno - self.nrevs) | (rev.timestamp >= self.ts))
-
-
-class NRevsAndSinceTime(XMLSelectiveGenerator):
-    def __init__(self, out, nrevs, ts, invert):
-        self.nrevs = nrevs
-        self.ts = ts
-        self.invert = invert
-        XMLSelectiveGenerator.__init__(self, out)
-
-    def shall_serialize(self, item=None, rev=None,
-                        revno=None, current_revno=None):
-        if revno is None:
-            return True
-        else:
-            return self.invert ^ (
-                   (revno > current_revno - self.nrevs) & (rev.timestamp >= self.ts))
-
-
-class ItemNameList(XMLSelectiveGenerator):
-    def __init__(self, out, item_names):
-        self.item_names = item_names
-        XMLSelectiveGenerator.__init__(self, out)
-
-    def shall_serialize(self, item=None, rev=None,
-                        revno=None, current_revno=None):
-        return item is not None and item.name in self.item_names
-
-
-def serialize(obj, xmlfile, xmlgen_cls=XMLSelectiveGenerator, *args, **kwargs):
-    """
-    Serialize <obj> to <xmlfile>.
-
-    The default value of <xmlgen_cls> will just serialize everything. Alternatively,
-    use some of XMLSelectiveGenerator child classes to do selective serialization,
-    e.g. of just a list of items or just a subset of the revisions.
-
-    :arg obj: object to serialize (must mix in Serializable)
-    :arg xmlfile: output file (file-like or filename)
-    :arg xmlgen_cls: XMLSelectiveGenerator (sub)class instance (all args/kwargs
-                     given after this will be given to xmlgen_cls.__init__()
-    """
-    xg = xmlgen_cls(xmlfile, *args, **kwargs)
-    xg.startDocument() # <?xml version="1.0" encoding="UTF-8"?>
-    obj.serialize(xg)
-
-
-class XMLUnserializationContext(object):
-    """
-    Provides context information for xml unserialization.
-    """
-    def __init__(self, xmlfile, encoding='utf-8', revno_mode='next'):
-        if xmlfile is not None and not hasattr(xmlfile, 'read'):
-            # for everything not file-like (filename?), we try to open it first:
-            xmlfile = open(xmlfile, 'r')
-        self.xmlfile = xmlfile
-        self.revno_mode = revno_mode
-
-
-def unserialize(obj, xmlfile, context_cls=XMLUnserializationContext, *args, **kwargs):
-    """
-    Unserialize <xmlfile> to <obj>.
-
-    :arg obj: object to write unserialized data to (must mix in Serializable)
-    :arg xmlfile: input file (file-like or filename)
-    """
-    context = context_cls(xmlfile, *args, **kwargs)
-    obj.unserialize(context)
-
-
-class Serializable(object):
-    element_name = None # override with xml element name
-    element_attrs = None # override with xml element attributes
-
-    @classmethod
-    def _log(cls, text):
-        logging.warning(text)
-
-    # serialization support:
-    def serialize(self, xmlgen):
-        # works for simple elements, please override for complex elements
-        # xmlgen.shall_serialize should be called by elements supporting selection
-        xmlgen.startElement(self.element_name, self.element_attrs or {})
-        self.serialize_value(xmlgen)
-        xmlgen.endElement(self.element_name)
-        xmlgen.ignorableWhitespace('\n')
-
-    def serialize_value(self, xmlgen):
-        # works for simple values, please override for complex values
-        xmlgen.characters(str(self.value))
-
-    # unserialization support:
-    def get_unserializer(self, context, name, attrs):
-        """
-        returns a unserializer instance for child element <name>, usually
-        a instance of some other class derived from UnserializerBase
-        """
-        raise NotImplementedError()
-
-    def startElement(self, attrs):
-        """ called when this element is opened """
-
-    def characters(self, data):
-        """ called for character data within this element """
-
-    def endElement(self):
-        """ called when this element is closed """
-
-    def noHandler(self, name):
-        self._log("No unserializer for element name: %s, not handled by %s" % (
-                  name, self.__class__))
-
-    def unexpectedEnd(self, name):
-        self._log("Unexpected end element: %s (expected: %s)" % (
-                  name, self.element_name))
-
-    def make_unserializer(self, context):
-        """
-        convenience wrapper that creates the unserializing generator and
-        automatically does the first "nop" generator call.
-        """
-        gen = self._unserialize(context)
-        gen.next()
-        return gen
-
-    def _unserialize(self, context):
-        """
-        Generator that gets fed with event data from the sax parser, e.g.:
-            ('startElement', name, attrs)
-            ('endElement', name)
-            ('characters', data)
-
-        It only handles stuff for name == self.element_name, everything else gets
-        delegated to a lower level generator, that is found by self.get_unserializer().
-        """
-        while True:
-            d = yield
-            fn = d[0]
-            if fn == 'startElement':
-                name, attrs = d[1:]
-                if name == self.element_name:
-                    self.startElement(attrs)
-                else:
-                    unserializer_instance = self.get_unserializer(context, name, attrs)
-                    if unserializer_instance is not None:
-                        unserializer = unserializer_instance.make_unserializer(context)
-                        try:
-                            while True:
-                                d = yield unserializer.send(d)
-                        except StopIteration:
-                            pass
-                    else:
-                        self.noHandler(name)
-
-            elif fn == 'endElement':
-                name = d[1]
-                if name == self.element_name:
-                    self.endElement()
-                    return # end generator
-                else:
-                    self.unexpectedEnd(name)
-
-            elif fn == 'characters':
-                self.characters(d[1])
-
-    def unserialize(self, context):
-        xml_parse(context.xmlfile, MoinContentHandler(self, context))
-
-
-def create_value_object(v):
-    if isinstance(v, tuple):
-        return TupleValue(v)
-    elif isinstance(v, list):
-        return ListValue(v)
-    elif isinstance(v, dict):
-        return DictValue(v)
-    elif isinstance(v, unicode):
-        return UnicodeValue(v)
-    elif isinstance(v, str):
-        return StrValue(v)
-    elif isinstance(v, bool):
-        return BoolValue(v)
-    elif isinstance(v, int):
-        return IntValue(v)
-    elif isinstance(v, long):
-        return LongValue(v)
-    elif isinstance(v, float):
-        return FloatValue(v)
-    elif isinstance(v, complex):
-        return ComplexValue(v)
-    else:
-        raise TypeError("unsupported type %r (value: %r)" % (type(v), v))
-
-
-class Value(Serializable):
-    element_name = None # override in child class
-
-    def __init__(self, value=None, attrs=None, setter_fn=None):
-        self.value = value
-        self.element_attrs = attrs
-        self.setter_fn = setter_fn
-        self.data = u''
-
-    def characters(self, data):
-        self.data += data
-
-    def endElement(self):
-        value = self.element_decode(self.data)
-        self.setter_fn(value)
-
-    def serialize_value(self, xmlgen):
-        xmlgen.characters(self.element_encode(self.value))
-
-    def element_decode(self, x):
-        return x # override in child class
-
-    def element_encode(self, x):
-        return x # override in child class
-
-class UnicodeValue(Value):
-    element_name = 'str' # py3-style (and shorter)
-
-class StrValue(Value):
-    element_name = 'bytes' # py3-style (rarely used)
-
-    def element_decode(self, x):
-        return x.encode('utf-8')
-
-    def element_encode(self, x):
-        return x.decode('utf-8')
-
-class IntValue(Value):
-    element_name = 'int'
-
-    def element_decode(self, x):
-        return int(x)
-
-    def element_encode(self, x):
-        return str(x)
-
-class LongValue(Value):
-    element_name = 'long'
-
-    def element_decode(self, x):
-        return long(x)
-
-    def element_encode(self, x):
-        return str(x)
-
-class FloatValue(Value):
-    element_name = 'float'
-
-    def element_decode(self, x):
-        return float(x)
-
-    def element_encode(self, x):
-        return str(x)
-
-class ComplexValue(Value):
-    element_name = 'complex'
-
-    def element_decode(self, x):
-        return complex(x)
-
-    def element_encode(self, x):
-        return str(x)
-
-class BoolValue(Value):
-    element_name = 'bool'
-
-    def element_decode(self, x):
-        if x == 'False':
-            return False
-        if x == 'True':
-            return True
-        raise ValueError("boolean serialization must be 'True' or 'False', no %r" % x)
-
-    def element_encode(self, x):
-        return str(x)
-
-class TupleValue(Serializable):
-    element_name = 'tuple'
-
-    def __init__(self, value=None, attrs=None, setter_fn=None):
-        self.value = value
-        self.element_attrs = attrs
-        self._result_fn = setter_fn
-        self._data = []
-
-    def get_unserializer(self, context, name, attrs):
-        mapping = {
-            'bytes': StrValue, # py3-style
-            'str': UnicodeValue, # py3-style
-            'bool': BoolValue,
-            'int': IntValue,
-            'long': LongValue,
-            'float': FloatValue,
-            'complex': ComplexValue,
-            'list': ListValue,
-            'tuple': TupleValue,
-            'dict': DictValue,
-        }
-        cls = mapping.get(name)
-        if cls:
-            return cls(attrs=attrs, setter_fn=self.setter_fn)
-        else:
-            raise TypeError("unsupported element: %s", name)
-
-    def setter_fn(self, value):
-        self._data.append(value)
-
-    def endElement(self):
-        value = tuple(self._data)
-        self._result_fn(value)
-
-    def serialize_value(self, xmlgen):
-        for e in self.value:
-            e = create_value_object(e)
-            e.serialize(xmlgen)
-
-
-class ListValue(TupleValue):
-    element_name = 'list'
-
-    def endElement(self):
-        value = list(self._data)
-        self._result_fn(value)
-
-
-class DictValue(Serializable):
-    element_name = 'dict'
-
-    def __init__(self, value=None, attrs=None, setter_fn=None):
-        self.value = value
-        self.element_attrs = attrs
-        self._result_fn = setter_fn
-        self._data = []
-
-    def get_unserializer(self, context, name, attrs):
-        mapping = {
-            'tuple': TupleValue,
-        }
-        cls = mapping.get(name)
-        if cls:
-            return cls(attrs=attrs, se