changeset 4609:246ba4eecab2

updated werkzeug to 0.5.pre20090228
author Thomas Waldmann <tw AT waldmann-edv DOT de>
date Sat, 28 Feb 2009 00:08:31 +0100
parents 3a79d3ca5a83
children 79480a001506
files MoinMoin/support/werkzeug/__init__.py MoinMoin/support/werkzeug/_internal.py MoinMoin/support/werkzeug/contrib/__init__.py MoinMoin/support/werkzeug/contrib/atom.py MoinMoin/support/werkzeug/contrib/cache.py MoinMoin/support/werkzeug/contrib/fixers.py MoinMoin/support/werkzeug/contrib/iterio.py MoinMoin/support/werkzeug/contrib/jsrouting.py MoinMoin/support/werkzeug/contrib/kickstart.py MoinMoin/support/werkzeug/contrib/limiter.py MoinMoin/support/werkzeug/contrib/profiler.py MoinMoin/support/werkzeug/contrib/reporterstream.py MoinMoin/support/werkzeug/contrib/securecookie.py MoinMoin/support/werkzeug/contrib/sessions.py MoinMoin/support/werkzeug/contrib/testtools.py MoinMoin/support/werkzeug/contrib/viewdecorators.py MoinMoin/support/werkzeug/contrib/wrappers.py MoinMoin/support/werkzeug/datastructures.py MoinMoin/support/werkzeug/debug/__init__.py MoinMoin/support/werkzeug/debug/console.py MoinMoin/support/werkzeug/debug/render.py MoinMoin/support/werkzeug/debug/repr.py MoinMoin/support/werkzeug/debug/shared/debugger.js MoinMoin/support/werkzeug/debug/tbtools.py MoinMoin/support/werkzeug/debug/templates/help_command.html MoinMoin/support/werkzeug/debug/templates/traceback_full.html MoinMoin/support/werkzeug/debug/utils.py MoinMoin/support/werkzeug/exceptions.py MoinMoin/support/werkzeug/http.py MoinMoin/support/werkzeug/local.py MoinMoin/support/werkzeug/routing.py MoinMoin/support/werkzeug/script.py MoinMoin/support/werkzeug/serving.py MoinMoin/support/werkzeug/templates.py MoinMoin/support/werkzeug/test.py MoinMoin/support/werkzeug/testapp.py MoinMoin/support/werkzeug/useragents.py MoinMoin/support/werkzeug/utils.py MoinMoin/support/werkzeug/wrappers.py
diffstat 39 files changed, 5539 insertions(+), 2787 deletions(-) [+]
line wrap: on
line diff
--- a/MoinMoin/support/werkzeug/__init__.py	Fri Feb 27 23:30:37 2009 +0100
+++ b/MoinMoin/support/werkzeug/__init__.py	Sat Feb 28 00:08:31 2009 +0100
@@ -7,11 +7,11 @@
 
     It provides useful classes and functions for any WSGI application to make
     the life of a python web developer much easier.  All of the provided
-    classes are independed from each other so you can mix it with any other
+    classes are independent from each other so you can mix it with any other
     library.
 
 
-    :copyright: 2007-2008 by Armin Ronacher.
+    :copyright: (c) 2009 by the Werkzeug Team, see AUTHORS for more details.
     :license: BSD, see LICENSE for more details.
 """
 from types import ModuleType
@@ -23,36 +23,49 @@
     'werkzeug.local':       ['Local', 'LocalManager', 'LocalProxy'],
     'werkzeug.templates':   ['Template'],
     'werkzeug.serving':     ['run_simple'],
-    'werkzeug.test':        ['Client'],
+    'werkzeug.test':        ['Client', 'EnvironBuilder', 'create_environ',
+                             'run_wsgi_app'],
     'werkzeug.testapp':     ['test_app'],
     'werkzeug.exceptions':  ['abort', 'Aborter'],
-    'werkzeug.utils':       ['escape', 'create_environ', 'url_quote',
+    'werkzeug.utils':       ['escape', 'url_quote',
                              'environ_property', 'cookie_date', 'http_date',
-                             'url_encode', 'url_quote_plus', 'Headers',
-                             'EnvironHeaders', 'CombinedMultiDict', 'url_fix',
-                             'run_wsgi_app', 'get_host', 'responder',
+                             'url_encode', 'url_quote_plus', 'url_fix',
+                             'get_host', 'responder',
                              'SharedDataMiddleware', 'ClosingIterator',
                              'FileStorage', 'url_unquote_plus', 'url_decode',
                              'url_unquote', 'get_current_url', 'redirect',
                              'append_slash_redirect',
-                             'cached_property', 'MultiDict', 'import_string',
+                             'cached_property', 'import_string',
                              'dump_cookie', 'parse_cookie', 'unescape',
                              'format_string', 'Href', 'DispatcherMiddleware',
                              'find_modules', 'header_property', 'html',
                              'xhtml', 'HTMLBuilder', 'parse_form_data',
                              'validate_arguments', 'ArgumentValidationError',
-                             'bind_arguments'],
+                             'bind_arguments', 'FileWrapper', 'wrap_file',
+                             'pop_path_info', 'peek_path_info',
+                             'LimitedStream'],
+    'werkzeug.datastructures': ['MultiDict', 'CombinedMultiDict', 'Headers',
+                             'EnvironHeaders', 'ImmutableList',
+                             'ImmutableDict', 'ImmutableMultiDict',
+                             'TypeConversionDict', 'ImmutableTypeConversionDict',
+                             'Accept', 'MIMEAccept', 'CharsetAccept',
+                             'LanguageAccept', 'RequestCacheControl',
+                             'ResponseCacheControl', 'ETags', 'HeaderSet',
+                             'WWWAuthenticate', 'Authorization',
+                             'CacheControl', 'FileMultiDict'],
     'werkzeug.useragents':  ['UserAgent'],
-    'werkzeug.http':        ['Accept', 'CacheControl', 'ETags', 'parse_etags',
-                             'parse_date', 'parse_cache_control_header',
+    'werkzeug.http':        ['parse_etags', 'parse_date', 'parse_cache_control_header',
                              'is_resource_modified', 'parse_accept_header',
                              'parse_set_header', 'quote_etag', 'unquote_etag',
                              'generate_etag', 'dump_header',
                              'parse_list_header', 'parse_dict_header',
-                             'HeaderSet', 'parse_authorization_header',
+                             'parse_authorization_header',
                              'parse_www_authenticate_header',
-                             'WWWAuthenticate', 'Authorization',
-                             'HTTP_STATUS_CODES'],
+                             'remove_entity_headers', 'is_entity_header',
+                             'remove_hop_by_hop_headers', 'parse_options_header',
+                             'dump_options_header', 'is_hop_by_hop_header',
+                             'unquote_header_value',
+                             'quote_header_value', 'HTTP_STATUS_CODES'],
     'werkzeug.wrappers':    ['BaseResponse', 'BaseRequest', 'Request',
                              'Response', 'AcceptMixin', 'ETagRequestMixin',
                              'ETagResponseMixin', 'ResponseStreamMixin',
--- a/MoinMoin/support/werkzeug/_internal.py	Fri Feb 27 23:30:37 2009 +0100
+++ b/MoinMoin/support/werkzeug/_internal.py	Sat Feb 28 00:08:31 2009 +0100
@@ -5,15 +5,14 @@
 
     This module provides internally used helpers and constants.
 
-    :copyright: Copyright 2008 by Armin Ronacher.
-    :license: GNU GPL.
+    :copyright: (c) 2009 by the Werkzeug Team, see AUTHORS for more details.
+    :license: BSD, see LICENSE for more details.
 """
-import cgi
 import inspect
 from weakref import WeakKeyDictionary
 from cStringIO import StringIO
 from Cookie import BaseCookie, Morsel, CookieError
-from time import asctime, gmtime, time
+from time import gmtime
 from datetime import datetime
 
 
@@ -60,6 +59,7 @@
     415:    'Unsupported Media Type',
     416:    'Requested Range Not Satisfiable',
     417:    'Expectation Failed',
+    418:    'I\'m a teapot',        # see RFC 2324
     422:    'Unprocessable Entity',
     423:    'Locked',
     424:    'Failed Dependency',
@@ -76,15 +76,33 @@
 }
 
 
+class _Missing(object):
+
+    def __repr__(self):
+        return 'no value'
+
+    def __reduce__(self):
+        return '_missing'
+
+_missing = _Missing()
+
+
+def _proxy_repr(cls):
+    def proxy_repr(self):
+        return '%s(%s)' % (self.__class__.__name__, cls.__repr__(self))
+    return proxy_repr
+
+
 def _log(type, message, *args, **kwargs):
     """Log into the internal werkzeug logger."""
     global _logger
     if _logger is None:
         import logging
-        handler = logging.StreamHandler()
         _logger = logging.getLogger('werkzeug')
-        _logger.addHandler(handler)
-        _logger.setLevel(logging.INFO)
+        if _logger.level == logging.NOTSET:
+            _logger.setLevel(logging.INFO)
+            handler = logging.StreamHandler()
+            _logger.addHandler(handler)
     getattr(_logger, type)(message.rstrip(), *args, **kwargs)
 
 
@@ -183,6 +201,7 @@
 
 def _iter_modules(path):
     """Iterate over all modules in a package."""
+    import os
     import pkgutil
     if hasattr(pkgutil, 'iter_modules'):
         for importer, modname, ispkg in pkgutil.iter_modules(path):
@@ -235,35 +254,6 @@
         return result
 
 
-class _StorageHelper(cgi.FieldStorage):
-    """Helper class used by `parse_form_data` to parse submitted file and
-    form data.  Don't use this class directly.  This also defines a simple
-    repr that prints just the filename as the default repr reads the
-    complete data of the stream.
-    """
-
-    FieldStorageClass = cgi.FieldStorage
-
-    def __init__(self, environ, stream_factory):
-        if stream_factory is not None:
-            self.make_file = lambda binary=None: stream_factory()
-        cgi.FieldStorage.__init__(self,
-            fp=environ['wsgi.input'],
-            environ={
-                'REQUEST_METHOD':   environ['REQUEST_METHOD'],
-                'CONTENT_TYPE':     environ['CONTENT_TYPE'],
-                'CONTENT_LENGTH':   environ['CONTENT_LENGTH']
-            },
-            keep_blank_values=True
-        )
-
-    def __repr__(self):
-        return '<%s %r>' % (
-            self.__class__.__name__,
-            self.name
-        )
-
-
 class _ExtendedCookie(BaseCookie):
     """Form of the base cookie that doesn't raise a `CookieError` for
     malformed keys.  This has the advantage that broken cookies submitted
@@ -326,33 +316,8 @@
         )
 
 
-class _UpdateDict(dict):
-    """A dict that calls `on_update` on modifications."""
-
-    def __init__(self, data, on_update):
-        dict.__init__(self, data)
-        self.on_update = on_update
-
-    def calls_update(f):
-        def oncall(self, *args, **kw):
-            rv = f(self, *args, **kw)
-            if self.on_update is not None:
-                self.on_update(self)
-            return rv
-        return _patch_wrapper(f, oncall)
-
-    __setitem__ = calls_update(dict.__setitem__)
-    __delitem__ = calls_update(dict.__delitem__)
-    clear = calls_update(dict.clear)
-    pop = calls_update(dict.pop)
-    popitem = calls_update(dict.popitem)
-    setdefault = calls_update(dict.setdefault)
-    update = calls_update(dict.update)
-
-
-
 def _easteregg(app):
-    """Like the name says."""
+    """Like the name says.  But who knows how it works?"""
     gyver = '\n'.join([x + (77 - len(x)) * ' ' for x in '''
 eJyFlzuOJDkMRP06xRjymKgDJCDQStBYT8BCgK4gTwfQ2fcFs2a2FzvZk+hvlcRvRJD148efHt9m
 9Xz94dRY5hGt1nrYcXx7us9qlcP9HHNh28rz8dZj+q4rynVFFPdlY4zH873NKCexrDM6zxxRymzz
--- a/MoinMoin/support/werkzeug/contrib/__init__.py	Fri Feb 27 23:30:37 2009 +0100
+++ b/MoinMoin/support/werkzeug/contrib/__init__.py	Sat Feb 28 00:08:31 2009 +0100
@@ -1,19 +1,16 @@
 # -*- coding: utf-8 -*-
 """
-    `werkzeug.contrib`
-    ~~~~~~~~~~~~~~~~~~
+    werkzeug.contrib
+    ~~~~~~~~~~~~~~~~
 
-    Contains user-submitted code that other users may find useful,
-    but which are not part of Werkzeug core. Anyone can write
-    code for inclusion in the `contrib` package.  All the modules
-    in this package are distributed as an addon library and thus not
-    part of `werkzeug` itself.
+    Contains user-submitted code that other users may find useful, but which
+    is not part of the Werkzeug core.  Anyone can write code for inclusion in
+    the `contrib` package.  All modules in this package are distributed as an
+    add-on library and thus are not part of Werkzeug itself.
 
-    This file itself is mostly for informational purposes and to
-    tell the Python interpreter that `contrib` is a package.
+    This file itself is mostly for informational purposes and to tell the
+    Python interpreter that `contrib` is a package.
 
-
-    :copyright: 2007-2008 by Marek Kubica, Marian Sigler, Armin Ronacher,
-                Leif K-Brooks, Ronny Pfannschmid, Thomas Johansson.
+    :copyright: (c) 2009 by the Werkzeug Team, see AUTHORS for more details.
     :license: BSD, see LICENSE for more details.
 """
--- a/MoinMoin/support/werkzeug/contrib/atom.py	Fri Feb 27 23:30:37 2009 +0100
+++ b/MoinMoin/support/werkzeug/contrib/atom.py	Sat Feb 28 00:08:31 2009 +0100
@@ -3,11 +3,23 @@
     werkzeug.contrib.atom
     ~~~~~~~~~~~~~~~~~~~~~
 
-    This module provides a class called `AtomFeed` which can be used
-    to generate Atom feeds.
+    This module provides a class called :class:`AtomFeed` which can be
+    used to generate feeds in the Atom syndication format (see :rfc:`4287`).
 
-    :copyright: Copyright 2007 by Armin Ronacher, Marian Sigler.
-    :license: GNU GPL.
+    Example::
+
+        def atom_feed(request):
+            feed = AtomFeed("My Blog", feed_url=req.url,
+                            url=req.host_url,
+                            subtitle="My example blog for a feed test.")
+            for post in Post.query.limit(10).all():
+                feed.add(post.title, post.body, content_type='html',
+                         author=post.author, url=post.url, id=post.uid,
+                         updated=post.last_update, published=post.pub_date)
+            return feed.get_response()
+
+    :copyright: (c) 2009 by the Werkzeug Team, see AUTHORS for more details.
+    :license: BSD, see LICENSE for more details.
 """
 from datetime import datetime
 from werkzeug.utils import escape
@@ -34,64 +46,55 @@
 
 
 class AtomFeed(object):
-    """A helper class that creates Atom feeds."""
+    """A helper class that creates Atom feeds.
+
+    :param title: the title of the feed. Required.
+    :param title_type: the type attribute for the title element.  One of
+                       ``'html'``, ``'text'`` or ``'xhtml'``.
+    :param url: the url for the feed (not the url *of* the feed)
+    :param id: a globally unique id for the feed.  Must be an URI.  If
+               not present the `feed_url` is used, but one of both is
+               required.
+    :param updated: the time the feed was modified the last time.  Must
+                    be a :class:`datetime.datetime` object.  If not
+                    present the latest entry's `updated` is used.
+    :param feed_url: the URL to the feed.  Should be the URL that was
+                     requested.
+    :param author: the author of the feed.  Must be either a string (the
+                   name) or a dict with name (required) and uri or
+                   email (both optional).  Can be a list of (may be
+                   mixed, too) strings and dicts, too, if there are
+                   multiple authors. Required if not every entry has an
+                   author element.
+    :param icon: an icon for the feed.
+    :param logo: a logo for the feed.
+    :param rights: copyright information for the feed.
+    :param rights_type: the type attribute for the rights element.  One of
+                        ``'html'``, ``'text'`` or ``'xhtml'``.  Default is
+                        ``'text'``.
+    :param subtitle: a short description of the feed.
+    :param subtitle_type: the type attribute for the subtitle element.
+                          One of ``'text'``, ``'html'``, ``'text'``
+                          or ``'xhtml'``.  Default is ``'text'``.
+    :param links: additional links.  Must be a list of dictionaries with
+                  href (required) and rel, type, hreflang, title, length
+                  (all optional)
+    :param generator: the software that generated this feed.  This must be
+                      a tuple in the form ``(name, url, version)``.  If
+                      you don't want to specify one of them, set the item
+                      to `None`.
+    :param entries: a list with the entries for the feed. Entries can also
+                    be added later with :meth:`add`.
+
+    For more information on the elements see
+    http://www.atomenabled.org/developers/syndication/
+
+    Everywhere where a list is demanded, any iterable can be used.
+    """
+
     default_generator = ('Werkzeug', None, None)
 
     def __init__(self, title=None, entries=None, **kwargs):
-        """Create an Atom feed.
-
-        :Parameters:
-          title
-            the title of the feed. Required.
-          title_type
-            the type attribute for the title element. One of html, text,
-            xhtml. Default is text.
-          url
-            the url for the feed (not the url *of* the feed)
-          id
-            a globally unique id for the feed. Must be an URI. If not present
-            the `feed_url` is used, but one of both is required.
-          updated
-            the time the feed was modified the last time. Must be a `datetime`
-            object. If not present the latest entry's `updated` is used.
-          feed_url
-            the url to the feed. Should be the URL that was requested.
-          author
-            the author of the feed. Must be either a string (the name) or a
-            dict with name (required) and uri or email (both optional). Can be
-            a list of (may be mixed, too) strings and dicts, too, if there are
-            multiple authors. Required if not every entry has an author
-            element.
-          icon
-            an icon for the feed.
-          logo
-            a logo for the feed.
-          rights
-            copyright information for the feed.
-          rights_type
-            the type attribute for the rights element. One of html, text,
-            xhtml. Default is text.
-          subtitle
-            a short description of the feed.
-          subtitle_type
-            the type attribute for the subtitle element. One of html, text,
-            xhtml. Default is text.
-          links
-            additional links. Must be a list of dictionaries with href
-            (required) and rel, type, hreflang, title, length (all optional)
-          generator
-            the software that generated this feed.  This must be a tuple in
-            the form ``(name, url, version)``.  If you don't want to specify
-            one of them, set the item to None.
-          entries
-            a list with the entries for the feed. Entries can also be added
-            later with add().
-
-        For more information on the elements see
-        http://www.atomenabled.org/developers/syndication/
-
-        Everywhere where a list is demanded, any iterable can be used.
-        """
         self.title = title
         self.title_type = kwargs.get('title_type', 'text')
         self.url = kwargs.get('url')
@@ -127,7 +130,10 @@
                 raise TypeError('author must contain at least a name')
 
     def add(self, *args, **kwargs):
-        """add a new entry to the feed"""
+        """Add a new entry to the feed.  This function can either be called
+        with a :class:`FeedEntry` or some keyword and positional arguments
+        that are forwarded to the :class:`FeedEntry` constructor.
+        """
         if len(args) == 1 and not kwargs and isinstance(args[0], FeedEntry):
             self.entries.append(args[0])
         else:
@@ -207,7 +213,7 @@
 
     def __call__(self, environ, start_response):
         """Use the class as WSGI response object."""
-        return self.get_response(environ, start_response)
+        return self.get_response()(environ, start_response)
 
     def __unicode__(self):
         return self.to_string()
@@ -217,61 +223,47 @@
 
 
 class FeedEntry(object):
-    """Represents a single entry in a feed."""
+    """Represents a single entry in a feed.
+
+    :param title: the title of the entry. Required.
+    :param title_type: the type attribute for the title element.  One of
+                       ``'html'``, ``'text'`` or ``'xhtml'``.
+    :param content: the content of the entry.
+    :param content_type: the type attribute for the content element.  One
+                         of ``'html'``, ``'text'`` or ``'xhtml'``.
+    :param summary: a summary of the entry's content.
+    :param summary_type: the type attribute for the summary element.  One
+                         of ``'html'``, ``'text'`` or ``'xhtml'``.
+    :param url: the url for the entry.
+    :param id: a globally unique id for the entry.  Must be an URI.  If
+               not present the URL is used, but one of both is required.
+    :param updated: the time the entry was modified the last time.  Must
+                    be a :class:`datetime.datetime` object. Required.
+    :param author: the author of the feed.  Must be either a string (the
+                   name) or a dict with name (required) and uri or
+                   email (both optional).  Can be a list of (may be
+                   mixed, too) strings and dicts, too, if there are
+                   multiple authors. Required if not every entry has an
+                   author element.
+    :param published: the time the entry was initially published.  Must
+                      be a :class:`datetime.datetime` object.
+    :param rights: copyright information for the entry.
+    :param rights_type: the type attribute for the rights element.  One of
+                        ``'html'``, ``'text'`` or ``'xhtml'``.  Default is
+                        ``'text'``.
+    :param links: additional links.  Must be a list of dictionaries with
+                  href (required) and rel, type, hreflang, title, length
+                  (all optional)
+    :param xml_base: The xml base (url) for this feed item.  If not provided
+                     it will default to the item url.
+
+    For more information on the elements see
+    http://www.atomenabled.org/developers/syndication/
+
+    Everywhere where a list is demanded, any iterable can be used.
+    """
 
     def __init__(self, title=None, content=None, feed_url=None, **kwargs):
-        """Holds an Atom feed entry.
-
-        :Parameters:
-          title
-            the title of the entry. Required.
-          title_type
-            the type attribute for the title element. One of html, text,
-            xhtml. Default is text.
-          content
-            the content of the entry.
-          content_type
-            the type attribute for the content element. One of html, text,
-            xhtml. Default is text.
-          summary
-            a summary of the entry's content.
-          summary_type
-            a type attribute for the summary element. One of html, text,
-            xhtml. Default is text.
-          url
-            the url for the entry.
-          id
-            a globally unique id for the entry. Must be an URI. If not present
-            the URL is used, but one of both is required.
-          updated
-            the time the entry was modified the last time. Must be a
-            `datetime` object. Required.
-          author
-            the author of the entry. Must be either a string (the name) or a
-            dict with name (required) and uri or email (both optional). Can
-            be a list of (may be mixed, too) strings and dicts, too, if there
-            are multiple authors. Required if there is no author for the
-            feed.
-          published
-            the time the entry was initially published. Must be a `datetime`
-            object.
-          rights
-            copyright information for the entry.
-          rights_type
-            the type attribute for the rights element. One of html, text,
-            xhtml. Default is text.
-          links
-            additional links. Must be a list of dictionaries with href
-            (required) and rel, type, hreflang, title, length (all optional)
-          xml_base
-            The xml base (url) for this feed item.  If not provided it will
-            default to the item url.
-
-        For more information on the elements see
-        http://www.atomenabled.org/developers/syndication/
-
-        Everywhere where a list is demanded, any iterable can be used.
-        """
         self.title = title
         self.title_type = kwargs.get('title_type', 'text')
         self.content = content
--- a/MoinMoin/support/werkzeug/contrib/cache.py	Fri Feb 27 23:30:37 2009 +0100
+++ b/MoinMoin/support/werkzeug/contrib/cache.py	Sat Feb 28 00:08:31 2009 +0100
@@ -3,14 +3,57 @@
     werkzeug.contrib.cache
     ~~~~~~~~~~~~~~~~~~~~~~
 
-    Small helper module that provides a simple interface to memcached, a
-    simple django-inspired in-process cache and a file system based cache.
+    The main problem with dynamic Web sites is, well, they're dynamic.  Each
+    time a user requests a page, the webserver executes a lot of code, queries
+    the database, renders templates until the visitor gets the page he sees.
 
-    The idea is that it's possible to switch caching systems without changing
-    much code in the application.
+    This is a lot more expensive than just loading a file from the file system
+    and sending it to the visitor.
 
+    For most Web applications, this overhead isn't a big deal but once it
+    becomes, you will be glad to have a cache system in place.
 
-    :copyright: 2007-2008 by Armin Ronacher.
+    How Caching Works
+    =================
+
+    Caching is pretty simple.  Basically you have a cache object lurking around
+    somewhere that is connected to a remote cache or the file system or
+    something else.  When the request comes in you check if the current page
+    is already in the cache and if, you're returning it.  Otherwise you generate
+    the page and put it into the cache.  (Or a fragment of the page, you don't
+    have to cache the full thing)
+
+    Here a simple example of how to cache a sidebar for a template::
+
+        def get_sidebar(user):
+            identifier = 'sidebar_for/user%d' % user.id
+            value = cache.get(identifier)
+            if value is not None:
+                return value
+            value = generate_sidebar_for(user=user)
+            cache.set(identifier, value, timeout=60 * 5)
+            return value
+
+    Creating a Cache Object
+    =======================
+
+    To create a cache object you just import the cache system of your choice
+    from the cache module and instanciate it.  Then you can start working
+    with that object:
+
+    >>> from werkzeug.contrib.cache import SimpleCache
+    >>> c = SimpleCache()
+    >>> c.set("foo", "value")
+    >>> c.get("foo")
+    'value'
+    >>> c.get("missing") is None
+    True
+
+    Please keep in mind that you have to create the cache and put it somewhere
+    you have access to it (either as a module global you can import or if you
+    put it onto your WSGI application).
+
+    :copyright: (c) 2009 by the Werkzeug Team, see AUTHORS for more details.
     :license: BSD, see LICENSE for more details.
 """
 import os
@@ -23,58 +66,135 @@
 from time import time
 from cPickle import loads, dumps, load, dump, HIGHEST_PROTOCOL
 
-have_memcache = True
-try:
-    import cmemcache as memcache
-    is_cmemcache = True
-except ImportError:
-    try:
-        import memcache
-        is_cmemcache = False
-    except ImportError:
-        have_memcache = False
-
 
 class BaseCache(object):
-    """Baseclass for the cache systems."""
+    """Baseclass for the cache systems.  All the cache systems implement this
+    API or a superset of it.
+
+    :param default_timeout: the default timeout that is used if no timeout is
+                            specified on :meth:`set`.
+    """
 
     def __init__(self, default_timeout=300):
         self.default_timeout = default_timeout
 
     def get(self, key):
+        """Looks up key in the cache and returns it.  If the key does not
+        exist `None` is returned instead.
+
+        :param key: the key to be looked up.
+        """
         return None
-    delete = get
+
+    def delete(self, key):
+        """Deletes `key` from the cache.  If it does not exist in the cache
+        nothing happens.
+
+        :param key: the key to delete.
+        """
+        pass
 
     def get_many(self, *keys):
+        """Returns a list of keys.  For each key a item in the list is
+        created.  Example::
+
+            foo, bar = cache.get_many("foo", "bar")
+
+        If a key can't be looked up `None` is returned for that key
+        instead.
+
+        :param keys: The function accepts multiple keys as positional
+                     arguments.
+        """
         return map(self.get, keys)
 
     def get_dict(self, *keys):
-        return dict(izip(keys, self.get_many(keys)))
+        """Works like :meth:`get_many` but returns a dict::
+
+            d = cache.get_dict("foo", "bar")
+            foo = d["foo"]
+            bar = d["bar"]
+
+        :param keys: The function accepts multiple keys as positional
+                     arguments.
+        """
+        return dict(izip(keys, self.get_many(*keys)))
 
     def set(self, key, value, timeout=None):
+        """Adds or overrides a key in the cache.
+
+        :param key: the key to set
+        :param value: the value for the key
+        :param timeout: the cache timeout for the key or the default
+                        timeout if not specified.
+        """
         pass
-    add = set
+
+    def add(self, key, value, timeout=None):
+        """Works like :meth:`set` but does not override already existing
+        values.
+
+        :param key: the key to set
+        :param value: the value for the key
+        :param timeout: the cache timeout for the key or the default
+                        timeout if not specified.
+        """
+        pass
 
     def set_many(self, mapping, timeout=None):
+        """Sets multiple keys and values from a dict.
+
+        :param mapping: a dict with the values to set.
+        :param timeout: the cache timeout for the key or the default
+                        timeout if not specified.
+        """
         for key, value in mapping.iteritems():
             self.set(key, value, timeout)
 
     def delete_many(self, *keys):
+        """Deletes multiple keys at once.
+
+        :param keys: The function accepts multiple keys as positional
+                     arguments.
+        """
         for key in keys:
             self.delete(key)
 
     def clear(self):
+        """Clears the cache.  Keep in mind that not all caches support
+        clearning of the full cache.
+        """
         pass
 
     def inc(self, key, delta=1):
+        """Increments the value of a key by `delta`.  If the key does
+        not yet exist it is initialized with `delta`.
+
+        For supporting caches this is an atomic operation.
+
+        :param key: the key to increment.
+        :param delta: the delta to add.
+        """
         self.set(key, (self.get(key) or 0) + delta)
 
     def dec(self, key, delta=1):
+        """Decrements the value of a key by `delta`.  If the key does
+        not yet exist it is initialized with `-delta`.
+
+        For supporting caches this is an atomic operation.
+
+        :param key: the key to increment.
+        :param delta: the delta to subtract.
+        """
         self.set(key, (self.get(key) or 0) - delta)
 
 
 class NullCache(BaseCache):
-    """A cache that doesn't cache."""
+    """A cache that doesn't cache.  This can be useful for unit testing.
+
+    :param default_timeout: a dummy parameter that is ignored but exists
+                            for API compatibility with other caches.
+    """
 
 
 class SimpleCache(BaseCache):
@@ -82,6 +202,11 @@
     mainly for the development server and is not 100% thread safe.  It tries
     to use as many atomic operations as possible and no locks for simplicity
     but it could happen under heavy load that keys are added multiple times.
+
+    :param threshold: the maximum number of items the cache stores before
+                      it starts deleting some.
+    :param default_timeout: the default timeout that is used if no timeout is
+                            specified on :meth:`~BaseCache.set`.
     """
 
     def __init__(self, threshold=500, default_timeout=300):
@@ -126,33 +251,62 @@
 class MemcachedCache(BaseCache):
     """A cache that uses memcached as backend.
 
+    The first argument can either be a list or tuple of server addresses
+    in which case Werkzeug tries to import the memcache module and connect
+    to it, or an object that resembles the API of a :class:`memcache.Client`.
+
     Implementation notes:  This cache backend works around some limitations in
     memcached to simplify the interface.  For example unicode keys are encoded
-    to utf-8 on the fly.  Methods such as `get_dict` return the keys in the
-    same format as passed.  Furthermore all get methods silently ignore key
-    errors to not cause problems when untrusted user data is passed to the get
-    methods which is often the case in web applications.
+    to utf-8 on the fly.  Methods such as :meth:`~BaseCache.get_dict` return
+    the keys in the same format as passed.  Furthermore all get methods
+    silently ignore key errors to not cause problems when untrusted user data
+    is passed to the get methods which is often the case in web applications.
+
+    :param servers: a list or tuple of server addresses or alternatively
+                    a :class:`memcache.Client` or a compatible client.
+    :param default_timeout: the default timeout that is used if no timeout is
+                            specified on :meth:`~BaseCache.set`.
+    :param key_prefix: a prefix that is added before all keys.  This makes it
+                       possible to use the same memcached server for different
+                       applications.  Keep in mind that
+                       :meth:`~BaseCache.clear` will also clear keys with a
+                       different prefix.
     """
 
-    def __init__(self, servers, default_timeout=300):
+    def __init__(self, servers, default_timeout=300, key_prefix=None):
         BaseCache.__init__(self, default_timeout)
-        if not have_memcache:
-            raise RuntimeError('no memcache module found')
+        if isinstance(servers, (list, tuple)):
+            try:
+                import cmemcache as memcache
+                is_cmemcache = True
+            except ImportError:
+                try:
+                    import memcache
+                    is_cmemcache = False
+                except ImportError:
+                    raise RuntimeError('no memcache module found')
 
-        # cmemcache has a bug that debuglog is not defined for the
-        # client.  Whenever pickle fails you get a weird AttributError.
-        if is_cmemcache:
-            self._client = memcache.Client(map(str, servers))
-            try:
-                self._client.debuglog = lambda *a: None
-            except:
-                pass
+            # cmemcache has a bug that debuglog is not defined for the
+            # client.  Whenever pickle fails you get a weird AttributError.
+            if is_cmemcache:
+                client = memcache.Client(map(str, servers))
+                try:
+                    client.debuglog = lambda *a: None
+                except:
+                    pass
+            else:
+                client = memcache.Client(servers, False, HIGHEST_PROTOCOL)
         else:
-            self._client = memcache.Client(servers, False, HIGHEST_PROTOCOL)
+            client = servers
+
+        self._client = client
+        self.key_prefix = key_prefix
 
     def get(self, key):
         if isinstance(key, unicode):
             key = key.encode('utf-8')
+        if self.key_prefix:
+            key = self.key_prefix + key
         # memcached doesn't support keys longer than that.  Because often
         # checks for so long keys can occour because it's tested from user
         # submitted data etc we fail silently for getting.
@@ -168,13 +322,15 @@
                 have_encoded_keys = True
             else:
                 encoded_key = key
+            if self.key_prefix:
+                encoded_key = self.key_prefix + encoded_key
             if _test_memcached_key(key):
                 key_mapping[encoded_key] = key
         # the keys call here is important because otherwise cmemcache
         # does ugly things.  What exaclty I don't know, i think it does
         # Py_DECREF but quite frankly i don't care.
         d = rv = self._client.get_multi(key_mapping.keys())
-        if have_encoded_keys:
+        if have_encoded_keys or self.key_prefix:
             rv = {}
             for key, value in d.iteritems():
                 rv[key_mapping[key]] = value
@@ -189,6 +345,8 @@
             timeout = self.default_timeout
         if isinstance(key, unicode):
             key = key.encode('utf-8')
+        if self.key_prefix:
+            key = self.key_prefix + key
         self._client.add(key, value, timeout)
 
     def set(self, key, value, timeout=None):
@@ -196,6 +354,8 @@
             timeout = self.default_timeout
         if isinstance(key, unicode):
             key = key.encode('utf-8')
+        if self.key_prefix:
+            key = self.key_prefix + key
         self._client.set(key, value, timeout)
 
     def get_many(self, *keys):
@@ -209,20 +369,29 @@
         for key, value in mapping.iteritems():
             if isinstance(key, unicode):
                 key = key.encode('utf-8')
+            if self.key_prefix:
+                key = self.key_prefix + key
             new_mapping[key] = value
         self._client.set_multi(new_mapping, timeout)
 
     def delete(self, key):
         if isinstance(key, unicode):
             key = key.encode('utf-8')
-        self._client.delete(key)
+        if self.key_prefix:
+            key = self.key_prefix + key
+        if _test_memcached_key(key):
+            self._client.delete(key)
 
     def delete_many(self, *keys):
-        keys = list(keys)
-        for idx, key in enumerate(keys):
+        new_keys = []
+        for key in keys:
             if isinstance(key, unicode):
-                keys[idx] = key.encode('utf-8')
-        self._client.delete_multi(keys)
+                key = key.encode('utf-8')
+            if self.key_prefix:
+                key = self.key_prefix + key
+            if _test_memcached_key(key):
+                new_keys.append(key)
+        self._client.delete_multi(new_keys)
 
     def clear(self):
         self._client.flush_all()
@@ -230,16 +399,48 @@
     def inc(self, key, delta=1):
         if isinstance(key, unicode):
             key = key.encode('utf-8')
-        self._client.incr(key, key, delta)
+        if self.key_prefix:
+            key = self.key_prefix + key
+        self._client.incr(key, delta)
 
     def dec(self, key, delta=1):
         if isinstance(key, unicode):
             key = key.encode('utf-8')
-        self._client.decr(key, key, delta)
+        if self.key_prefix:
+            key = self.key_prefix + key
+        self._client.decr(key, delta)
+
+
+class GAEMemcachedCache(MemcachedCache):
+    """Connects to the Google appengine memcached Cache.
+
+    :param default_timeout: the default timeout that is used if no timeout is
+                            specified on :meth:`~BaseCache.set`.
+    :param key_prefix: a prefix that is added before all keys.  This makes it
+                       possible to use the same memcached server for different
+                       applications.  Keep in mind that
+                       :meth:`~BaseCache.clear` will also clear keys with a
+                       different prefix.
+    """
+
+    def __init__(self, default_timeout=300, key_prefix=None):
+        from google.appengine.api import memcache
+        MemcachedCache.__init__(self, memcache.Client(),
+                                default_timeout, key_prefix)
 
 
 class FileSystemCache(BaseCache):
-    """A cache that stores the items on the file system."""
+    """A cache that stores the items on the file system.  This cache depends
+    on being the only user of the `cache_dir`.  Make absolutely sure that
+    nobody but this cache stores files there or otherwise the chace will
+    randomely delete files therein.
+
+    :param cache_dir: the directory where cached files are stored.
+    :param threshold: the maximum number of items the cache stores before
+                      it starts deleting some.
+    :param default_timeout: the default timeout that is used if no timeout is
+                            specified on :meth:`~BaseCache.set`.
+    """
 
     def __init__(self, cache_dir, threshold=500, default_timeout=300):
         BaseCache.__init__(self, default_timeout)
@@ -255,7 +456,7 @@
             for idx, key in enumerate(entries):
                 try:
                     f = file(self._get_filename(key))
-                    if pickle.load(f) > now and idx % 3 != 0:
+                    if load(f) > now and idx % 3 != 0:
                         f.close()
                         continue
                 except:
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/support/werkzeug/contrib/fixers.py	Sat Feb 28 00:08:31 2009 +0100
@@ -0,0 +1,96 @@
+# -*- coding: utf-8 -*-
+"""
+    werkzeug.contrib.fixers
+    ~~~~~~~~~~~~~~~~~~~~~~~
+
+    .. versionadded:: 0.5
+
+    This module includes various helpers that fix bugs in web servers.  They may
+    be necessary for some versions of a buggy web server but not others.  We try
+    to stay updated with the status of the bugs as good as possible but you have
+    to make sure whether they fix the problem you encounter.
+
+    If you notice bugs in webservers not fixed in this module consider
+    contributing a patch.
+
+    :copyright: Copyright 2009 by the Werkzeug Team, see AUTHORS for more details.
+    :license: BSD, see LICENSE for more details.
+"""
+from urllib import unquote
+
+
+class LighttpdCGIRootFix(object):
+    """Wrap the application in this middleware if you are using lighttpd
+    with FastCGI or CGI and the application is mounted on the URL root.
+    """
+
+    def __init__(self, app):
+        self.app = app
+
+    def __call__(self, environ, start_response):
+        environ['PATH_INFO'] = environ.get('SCRIPT_NAME', '') + \
+                               environ.get('PATH_INFO', '')
+        environ['SCRIPT_NAME'] = ''
+        return self.app(environ, start_response)
+
+
+class PathInfoFromRequestUriFix(object):
+    """On windows environment variables are limited to the system charset
+    which makes it impossible to store the `PATH_INFO` variable in the
+    environment without loss of information on some systems.
+
+    This is for example a problem for CGI scripts on a Windows Apache.
+
+    This fixer works by recreating the `PATH_INFO` from `REQUEST_URI`,
+    `REQUEST_URL`, or `UNENCODED_URL` (whatever is available).  Thus the
+    fix can only be applied if the webserver supports either of these
+    variables.
+    """
+
+    def __init__(self, app):
+        self.app = app
+
+    def __call__(self, environ, start_response):
+        for key in 'REQUEST_URL', 'REQUEST_URI', 'UNENCODED_URL':
+            if key not in environ:
+                continue
+            request_uri = unquote(environ[key])
+            script_name = unquote(environ.get('SCRIPT_NAME', ''))
+            if request_uri.startswith(script_name):
+                environ['PATH_INFO'] = request_uri[len(script_name):] \
+                    .split('?', 1)[0]
+                break
+        return self.app(environ, start_response)
+
+
+class ProxyFix(object):
+    """This middleware can be applied to add HTTP proxy support to an
+    application that was not designed with HTTP proxies in mind.  It
+    sets `REMOTE_ADDR`, `HTTP_HOST` from `X-Forwarded` headers.
+
+    Werkzeug wrappers have builtin support for this by setting the
+    :attr:`~werkzeug.BaseRequest.is_behind_proxy` attribute to `True`.
+
+    Do not use this middleware in non-proxy setups for security reasons.
+
+    The original values of `REMOTE_ADDR` and `HTTP_HOST` are stored in
+    the WSGI environment as `werkzeug.proxy_fix.orig_remote_addr` and
+    `werkzeug.proxy_fix.orig_http_host`.
+    """
+
+    def __init__(self, app):
+        self.app = app
+
+    def __call__(self, environ, start_response):
+        getter = environ.get
+        forwarded_for = getter('HTTP_X_FORWARDED_FOR', '').split(',')
+        forwarded_host = getter('HTTP_X_FORWARDED_HOST', '')
+        environ.update({
+            'werkzeug.proxy_fix.orig_remote_addr':  getter('REMOTE_ADDR'),
+            'werkzeug.proxy_fix.orig_http_host':    getter('HTTP_HOST')
+        })
+        if forwarded_for:
+            environ['REMOTE_ADDR'] = forwarded_for[0].strip()
+        if forwarded_host:
+            environ['HTTP_HOST'] = forwarded_host
+        return self.app(environ, start_response)
--- a/MoinMoin/support/werkzeug/contrib/iterio.py	Fri Feb 27 23:30:37 2009 +0100
+++ b/MoinMoin/support/werkzeug/contrib/iterio.py	Sat Feb 28 00:08:31 2009 +0100
@@ -1,16 +1,15 @@
 # -*- coding: utf-8 -*-
-"""
+r"""
     werkzeug.contrib.iterio
     ~~~~~~~~~~~~~~~~~~~~~~~
 
-    This module implements a `IterIO` that converts an iterator into a stream
-    object and the other way round.  Converting streams into iterators
-    requires the `greenlet`_ module.
-
+    This module implements a :class:`IterIO` that converts an iterator into
+    a stream object and the other way round.  Converting streams into
+    iterators requires the `greenlet`_ module.
 
     To convert an iterator into a stream all you have to do is to pass it
-    directly to the `IterIO` constructor.  In this example we pass it a newly
-    created generator::
+    directly to the :class:`IterIO` constructor.  In this example we pass it
+    a newly created generator::
 
         def foo():
             yield "something\n"
@@ -19,10 +18,11 @@
         print stream.read()         # read the whole iterator
 
     The other way round works a bit different because we have to ensure that
-    the code execution doesn't take place yet.  An `IterIO` call with a
+    the code execution doesn't take place yet.  An :class:`IterIO` call with a
     callable as first argument does two things.  The function itself is passed
-    an `IterI` stream it can feed.  The object returned by the `IterIO`
-    constructor on the other hand is not an stream object but an iterator::
+    an :class:`IterIO` stream it can feed.  The object returned by the
+    :class:`IterIO` constructor on the other hand is not an stream object but
+    an iterator::
 
         def foo(stream):
             stream.write("some")
@@ -34,21 +34,21 @@
         print iterator.next()       # prints otherthing
         iterator.next()             # raises StopIteration
 
-
     .. _greenlet: http://codespeak.net/py/dist/greenlet.html
 
-    :copyright: 2007 by Armin Ronacher.
+    :copyright: (c) 2009 by the Werkzeug Team, see AUTHORS for more details.
     :license: BSD, see LICENSE for more details.
 """
 try:
     from py.magic import greenlet
-except (RuntimeError, ImportError):
+except:
     greenlet = None
 
 
 class IterIO(object):
-    """
-    Baseclass for iterator IOs.
+    """Instances of this object implement an interface compatible with the
+    standard Python :class:`file` object.  Streams are either read-only or
+    write-only depending on how the object is created.
     """
 
     def __new__(cls, obj):
@@ -121,9 +121,7 @@
 
 
 class IterI(IterIO):
-    """
-    Convert an stream into an iterator.
-    """
+    """Convert an stream into an iterator."""
 
     def __new__(cls, func):
         if greenlet is None:
@@ -158,7 +156,7 @@
         self.pos += len(s)
         self._buffer.append(s)
 
-    def writelines(slf, list):
+    def writelines(self, list):
         self.write(''.join(list))
 
     def flush(self):
@@ -170,11 +168,10 @@
 
 
 class IterO(IterIO):
-    """
-    Iter output.  Wrap an iterator and give it a stream like interface.
-    """
+    """Iter output.  Wrap an iterator and give it a stream like interface."""
 
-    __new__ = object.__new__
+    def __new__(cls, gen):
+        return object.__new__(cls)
 
     def __init__(self, gen):
         self._gen = gen
@@ -197,18 +194,22 @@
         if mode == 1:
             pos += self.pos
         elif mode == 2:
-            pos += len(self._buf)
+            self.read()
+            self.pos = min(self.pos, self.pos + pos)
+            return
+        elif mode != 0:
+            raise IOError('Invalid argument')
+        buf = []
         try:
-            buf = []
             tmp_end_pos = len(self._buf)
             while pos > tmp_end_pos:
                 item = self._gen.next()
                 tmp_end_pos += len(item)
                 buf.append(item)
-            if buf:
-                self._buf += ''.join(buf)
         except StopIteration:
             pass
+        if buf:
+            self._buf += ''.join(buf)
         self.pos = max(0, pos)
 
     def read(self, n=-1):
@@ -216,24 +217,26 @@
             raise ValueError('I/O operation on closed file')
         if n < 0:
             self._buf += ''.join(self._gen)
-            return self._buf[self.pos:]
+            result = self._buf[self.pos:]
+            self.pos += len(result)
+            return result
         new_pos = self.pos + n
+        buf = []
         try:
-            buf = []
             tmp_end_pos = len(self._buf)
             while new_pos > tmp_end_pos:
                 item = self._gen.next()
                 tmp_end_pos += len(item)
                 buf.append(item)
-            if buf:
-                self._buf += ''.join(buf)
         except StopIteration:
             pass
+        if buf:
+            self._buf += ''.join(buf)
         new_pos = max(0, new_pos)
         try:
             return self._buf[self.pos:new_pos]
         finally:
-            self.pos = new_pos
+            self.pos = min(new_pos, len(self._buf))
 
     def readline(self, length=None):
         if self.closed:
@@ -244,10 +247,10 @@
             pos = self.pos
             while nl_pos < 0:
                 item = self._gen.next()
-                pos2 = item.find('\n', pos)
+                local_pos = item.find('\n')
                 buf.append(item)
-                if pos2 >= 0:
-                    nl_pos = pos
+                if local_pos >= 0:
+                    nl_pos = pos + local_pos
                     break
                 pos += len(item)
         except StopIteration:
@@ -263,7 +266,7 @@
         try:
             return self._buf[self.pos:new_pos]
         finally:
-            self.pos = new_pos
+            self.pos = min(new_pos, len(self._buf))
 
     def readlines(self, sizehint=0):
         total = 0
--- a/MoinMoin/support/werkzeug/contrib/jsrouting.py	Fri Feb 27 23:30:37 2009 +0100
+++ b/MoinMoin/support/werkzeug/contrib/jsrouting.py	Sat Feb 28 00:08:31 2009 +0100
@@ -6,17 +6,17 @@
     Addon module that allows to create a JavaScript function from a map
     that generates rules.
 
-    :copyright: 2007 by Armin Ronacher, Leif K-Brooks.
+    :copyright: (c) 2009 by the Werkzeug Team, see AUTHORS for more details.
     :license: BSD, see LICENSE for more details.
 """
 try:
     from simplejson import dumps
 except ImportError:
     def dumps(*args):
-        raise RuntimeErrr('simplejson required for jsrouting')
+        raise RuntimeError('simplejson required for jsrouting')
 
 from inspect import getmro
-from werkzeug.minitmpl import Template
+from werkzeug.templates import Template
 from werkzeug.routing import NumberConverter
 
 
--- a/MoinMoin/support/werkzeug/contrib/kickstart.py	Fri Feb 27 23:30:37 2009 +0100
+++ b/MoinMoin/support/werkzeug/contrib/kickstart.py	Sat Feb 28 00:08:31 2009 +0100
@@ -3,23 +3,26 @@
     werkzeug.contrib.kickstart
     ~~~~~~~~~~~~~~~~~~~~~~~~~~
 
-    This module provides some simple shortcuts to make using Werkzeug
-    simpler for small scripts.
+    This module provides some simple shortcuts to make using Werkzeug simpler
+    for small scripts.
 
-    These improvements include predefied Request and Response objects as well
-    as a pre-defined Application object which all can be customized in child
-    classes of course. The Request and Reponse objects handle URL generation
-    as well as sessions via the werkzeug.contrib.sessions and is purely
-    optional.
+    These improvements include predefined `Request` and `Response` objects as
+    well as a predefined `Application` object which can be customized in child
+    classes, of course.  The `Request` and `Reponse` objects handle URL
+    generation as well as sessions via `werkzeug.contrib.sessions` and are
+    purely optional.
 
-    There is also some integration of template engines. The template loaders
-    are of course not neccessary to use the template engines in Werkzeug, but
-    they provide a common interface. Currently supported template engines
-    include Werkzeug's minitmpl an Genshi. Support for other engines can be
-    added in a trivial way. These loaders provide a template interface similar
-    to the one that Django uses.
+    There is also some integration of template engines.  The template loaders
+    are, of course, not neccessary to use the template engines in Werkzeug,
+    but they provide a common interface.  Currently supported template engines
+    include Werkzeug's minitmpl and Genshi_.  Support for other engines can be
+    added in a trivial way.  These loaders provide a template interface
+    similar to the one used by Django_.
 
-    :copyright: 2007-2008 by Marek Kubica, Armin Ronacher.
+    .. _Genshi: http://genshi.edgewall.org/
+    .. _Django: http://www.djangoproject.com/
+
+    :copyright: (c) 2009 by the Werkzeug Team, see AUTHORS for more details.
     :license: BSD, see LICENSE for more details.
 """
 from os import path
@@ -27,7 +30,6 @@
 from werkzeug.templates import Template
 from werkzeug.exceptions import HTTPException
 from werkzeug.routing import RequestRedirect
-from werkzeug.contrib.sessions import FilesystemSessionStore
 
 __all__ = ['Request', 'Response', 'TemplateNotFound', 'TemplateLoader',
            'GenshiTemplateLoader', 'Application']
--- a/MoinMoin/support/werkzeug/contrib/limiter.py	Fri Feb 27 23:30:37 2009 +0100
+++ b/MoinMoin/support/werkzeug/contrib/limiter.py	Sat Feb 28 00:08:31 2009 +0100
@@ -3,53 +3,40 @@
     werkzeug.contrib.limiter
     ~~~~~~~~~~~~~~~~~~~~~~~~
 
-    A middleware that limits incoming data.  This works around problems
-    with trac or django because those stream into the memory directly.
+    A middleware that limits incoming data.  This works around problems with
+    Trac_ or Django_ because those directly stream into the memory.
+
+    .. _Trac: http://trac.edgewall.org/
+    .. _Django: http://www.djangoproject.com/
+
+    :copyright: (c) 2009 by the Werkzeug Team, see AUTHORS for more details.
+    :license: BSD, see LICENSE for more details.
+"""
+from warnings import warn
+from werkzeug.utils import LimitedStream as LimitedStreamBase
 
 
-    :copyright: 2007 by Armin Ronacher.
-    :license: BSD, see LICENSE for more details.
-"""
-
-
-class LimitedStream(object):
-    """
-    Wraps a stream and doesn't read more than n bytes.
-    """
+class _SilentLimitedStream(LimitedStreamBase):
 
     def __init__(self, environ, limit):
-        self._environ = environ
-        self._stream = environ['wsgi.input']
-        self._limit = min(limit, int(environ.get('CONTENT_LENGTH') or 0))
-        self._pos = 0
+        LimitedStreamBase.__init__(self,
+            environ['wsgi.input'],
+            min(limit, int(environ.get('CONTENT_LENGTH') or 0)),
+            silent=True
+        )
 
-    def read(self, size=None):
-        if self._pos >= self._limit:
-            return ''
-        if size is None:
-            size = self._limit
-        read = self._stream.read(min(self._limit - self._pos, size))
-        self._pos += len(read)
-        return read
 
-    def readline(self, *args):
-        if self._pos >= self._limit:
-            return ''
-        line = self._stream.readline(*args)
-        self.pos += len(line)
-        self.processed()
-        return line
+class LimitedStream(_SilentLimitedStream):
 
-    def readlines(self, hint=None):
-        result = []
-        while self.pos < self._limit:
-            result.append(self.readline())
-        return result
+    def __init__(self, environ, limit):
+        _SilentLimitedStream.__init__(self, environ, limit)
+        warn(DeprecationWarning('comtrin limited stream is deprecated, use '
+                                'werkzeug.LimitedStream instead.'),
+             stacklevel=2)
 
 
 class StreamLimitMiddleware(object):
-    """
-    Limits the input stream to a given number of bytes.  This is useful if
+    """Limits the input stream to a given number of bytes.  This is useful if
     you have a WSGI application that reads form data into memory (django for
     example) and you don't want users to harm the server by uploading tons of
     data.
@@ -62,5 +49,5 @@
         self.maximum_size = maximum_size
 
     def __call__(self, environ, start_response):
-        environ['wsgi.input'] = LimitedStream(environ, self.maximum_size)
+        environ['wsgi.input'] = _SilentLimitedStream(environ, self.maximum_size)
         return self.app(environ, start_response)
--- a/MoinMoin/support/werkzeug/contrib/profiler.py	Fri Feb 27 23:30:37 2009 +0100
+++ b/MoinMoin/support/werkzeug/contrib/profiler.py	Sat Feb 28 00:08:31 2009 +0100
@@ -1,11 +1,19 @@
 # -*- coding: utf-8 -*-
 """
-    inyoka.middlewares.profiler
-    ~~~~~~~~~~~~~~~~~~~~~~~~~~~
+    werkzeug.contrib.profiler
+    ~~~~~~~~~~~~~~~~~~~~~~~~~
 
-    Provides a WSGI Profiler middleware for finding bottlenecks.
+    This module provides a simple WSGI profiler middleware for finding
+    bottlenecks in web application.  It uses the :mod:`profile` or
+    :mod:`cProfile` module to do the profiling and writes the stats to the
+    stream provided (defaults to stderr).
 
-    :copyright: 2006-2007 by Armin Ronacher.
+    Example usage::
+
+        from werkzeug.contrib.profiler import ProfilerMiddleware
+        app = ProfilerMiddleware(app)
+
+    :copyright: (c) 2009 by the Werkzeug Team, see AUTHORS for more details.
     :license: BSD, see LICENSE for more details.
 """
 import sys
@@ -24,7 +32,7 @@
     """An object that redirects `write` calls to multiple streams.
     Use this to log to both `sys.stdout` and a file::
 
-        f = file('profiler.log')
+        f = open('profiler.log', 'w')
         stream = MergeStream(sys.stdout, f)
         profiler = ProfilerMiddleware(app, stream)
     """
@@ -40,7 +48,18 @@
 
 
 class ProfilerMiddleware(object):
-    """Simple profiler middleware."""
+    """Simple profiler middleware.  Wraps a WSGI application and profiles
+    a request.  This intentionally buffers the response so that timings are
+    more exact.
+
+    For the exact meaning of `sort_by` and `restrictions` consult the
+    :mod:`profile` documentation.
+
+    :param app: the WSGI application to profile.
+    :param stream: the stream for the profiled stats.  defaults to stderr.
+    :param sort_by: a tuple of columns to sort the result by.
+    :param restrictions: a tuple of profiling strictions.
+    """
 
     def __init__(self, app, stream=None,
                  sort_by=('time', 'calls'), restrictions=()):
@@ -82,8 +101,11 @@
 def make_action(app_factory, hostname='localhost', port=5000,
                 threaded=False, processes=1, stream=None,
                 sort_by=('time', 'calls'), restrictions=()):
-    """Return a new callback for werkzeug scripts that starts a local
-    server for profiling.
+    """Return a new callback for :mod:`werkzeug.script` that starts a local
+    server with the profiler enabled::
+
+        from werkzeug.contrib import profiler
+        action_profile = profiler.make_action(make_app)
     """
     def action(hostname=('h', hostname), port=('p', port),
                threaded=threaded, processes=processes):
--- a/MoinMoin/support/werkzeug/contrib/reporterstream.py	Fri Feb 27 23:30:37 2009 +0100
+++ b/MoinMoin/support/werkzeug/contrib/reporterstream.py	Sat Feb 28 00:08:31 2009 +0100
@@ -3,13 +3,18 @@
     werkzeug.contrib.reporterstream
     ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
-    This module implements a class that can wrap `wsgi.input` in order to
-    be informed about changes of the stream.  This is useful if you want
-    to display a progress bar for the upload.
+    This module implements a class that can wrap `wsgi.input` in order to be
+    informed about changes of the stream.  This is useful if you want to
+    display a progress bar for the upload.
 
-    :copyright: 2007 by Armin Ronacher.
+    :copyright: (c) 2009 by the Werkzeug Team, see AUTHORS for more details.
     :license: BSD, see LICENSE for more details.
 """
+from warnings import warn
+warn(DeprecationWarning('reporter stream is deprecated.  If you want to continue '
+                        'using this class copy the module code from the '
+                        'werkzeug wiki: http://dev.pocoo.org/projects/werkzeug/'
+                        'wiki/ReporterStream'), stacklevel=2)
 
 
 class BaseReporterStream(object):
--- a/MoinMoin/support/werkzeug/contrib/securecookie.py	Fri Feb 27 23:30:37 2009 +0100
+++ b/MoinMoin/support/werkzeug/contrib/securecookie.py	Sat Feb 28 00:08:31 2009 +0100
@@ -14,18 +14,18 @@
 
     Example usage:
 
-        >>> from werkzeug.contrib.securecookie import SecureCookie
-        >>> x = SecureCookie({"foo": 42, "baz": (1, 2, 3)}, "deadbeef")
+    >>> from werkzeug.contrib.securecookie import SecureCookie
+    >>> x = SecureCookie({"foo": 42, "baz": (1, 2, 3)}, "deadbeef")
 
     Dumping into a string so that one can store it in a cookie:
 
-        >>> value = x.serialize()
+    >>> value = x.serialize()
 
     Loading from that string again:
 
-        >>> x = SecureCookie.unserialize(value, "deadbeef")
-        >>> x["baz"]
-        (1, 2, 3)
+    >>> x = SecureCookie.unserialize(value, "deadbeef")
+    >>> x["baz"]
+    (1, 2, 3)
 
     If someone modifies the cookie and the checksum is wrong the unserialize
     method will fail silently and return a new empty `SecureCookie` object.
@@ -42,7 +42,7 @@
         from werkzeug import BaseRequest, cached_property
         from werkzeug.contrib.securecookie import SecureCookie
 
-        # don' use this key but a different one.  you could just use
+        # don't use this key but a different one; you could just use
         # os.unrandom(20) to get something random
         SECRET_KEY = '\xfa\xdd\xb8z\xae\xe0}4\x8b\xea'
 
@@ -67,45 +67,54 @@
                                     httponly=True)
             return response(environ, start_response)
 
+    A less verbose integration can be achieved by using shorthand methods::
 
-    :copyright: 2007 by Armin Ronacher, Thomas Johansson.
+        class Request(BaseRequest):
+
+            @cached_property
+            def client_session(self):
+                return SecureCookie.load_cookie(self, secret_key=COOKIE_SECRET)
+
+        def application(environ, start_response):
+            request = Request(environ, start_response)
+
+            # get a response object here
+            response = ...
+
+            request.client_session.save_cookie(response)
+            return response(environ, start_response)
+
+    :copyright: (c) 2009 by the Werkzeug Team, see AUTHORS for more details.
     :license: BSD, see LICENSE for more details.
 """
-try:
-    from hashlib import sha1
-except ImportError:
-    import sha as sha1
+import sys
+import cPickle as pickle
 from hmac import new as hmac
 from datetime import datetime
 from time import time, mktime, gmtime
-from random import Random
-from cPickle import loads, dumps, HIGHEST_PROTOCOL
 from werkzeug import url_quote_plus, url_unquote_plus
-from werkzeug.contrib.sessions import ModificationTrackingDict, generate_key
+from werkzeug.contrib.sessions import ModificationTrackingDict
+
+
+# rather ugly way to import the correct hash method.  Because
+# hmac either accepts modules with a new method (sha, md5 etc.)
+# or a hashlib factory function we have to figure out what to
+# pass to it.  If we have 2.5 or higher (so not 2.4 with a
+# custom hashlib) we import from hashlib and fail if it does
+# not exist (have seen that in old OS X versions).
+# in all other cases the now deprecated sha module is used.
+_default_hash = None
+if sys.version_info >= (2, 5):
+    try:
+        from hashlib import sha1 as _default_hash
+    except ImportError:
+        pass
+if _default_hash is None:
+    import sha as _default_hash
 
 
 class UnquoteError(Exception):
-    pass
-
-
-def pickle_quote(value):
-    """Pickle and url encode a value."""
-    result = None
-    for protocol in xrange(HIGHEST_PROTOCOL + 1):
-        data = ''.join(dumps(value, protocol).encode('base64').splitlines()).strip()
-        if result is None or len(result) > len(data):
-            result = data
-    return result
-
-
-def pickle_unquote(string):
-    """URL decode a string and load it into pickle"""
-    try:
-        return loads(string.decode('base64'))
-    # unfortunately pickle can cause pretty every error here.
-    # if we get one we catch it and convert it into an UnquoteError
-    except Exception, e:
-        raise UnquoteError(str(e))
+    """Internal exception used to signal failures on quoting."""
 
 
 class SecureCookie(ModificationTrackingDict):
@@ -113,16 +122,43 @@
     an alternative mac method.  The import thing is that the mac method
     is a function with a similar interface to the hashlib.  Required
     methods are update() and digest().
+
+    Example usage:
+
+    >>> x = SecureCookie({"foo": 42, "baz": (1, 2, 3)}, "deadbeef")
+    >>> x["foo"]
+    42
+    >>> x["baz"]
+    (1, 2, 3)
+    >>> x["blafasel"] = 23
+    >>> x.should_save
+    True
+
+    :param data: the initial data.  Either a dict, list of tuples or `None`.
+    :param secret_key: the secret key.  If not set `None` or not specified
+                       it has to be set before :meth:`serialize` is called.
+    :param new: The initial value of the `new` flag.
     """
 
-    # the hash method to use.  In python 2.5 and higher this is a callable
-    # that returns a new hashlib object or a module with a new method that
-    # creates such an object.  In python 2.4 and earlier only the module
-    # is supported.
-    hash_method = sha1
+    #: The hash method to use.  This has to be a module with a new function
+    #: or a function that creates a hashlib object.  Such as `hashlib.md5`
+    #: Subclasses can override this attribute.  The default hash is sha1.
+    hash_method = _default_hash
+
+    #: the module used for serialization.  Unless overriden by subclasses
+    #: the standard pickle module is used.
+    serialization_method = pickle
+
+    #: if the contents should be base64 quoted.  This can be disabled if the
+    #: serialization process returns cookie safe strings only.
+    quote_base64 = True
 
     def __init__(self, data=None, secret_key=None, new=True):
         ModificationTrackingDict.__init__(self, data or ())
+        # explicitly convert it into a bytestring because python 2.6
+        # no longer performs an implicit string conversion on hmac
+        if secret_key is not None:
+            secret_key = str(secret_key)
         self.secret_key = secret_key
         self.new = new
 
@@ -134,9 +170,43 @@
         )
 
     def should_save(self):
-        """True if the session should be saved."""
+        """True if the session should be saved.  By default this is only true
+        for :attr:`modified` cookies, not :attr:`new`.
+        """
         return self.modified
-    should_save = property(should_save)
+    should_save = property(should_save, doc=should_save.__doc__)
+
+    @classmethod
+    def quote(cls, value):
+        """Quote the value for the cookie.  This can be any object supported
+        by :attr:`serialization_method`.
+
+        :param value: the value to quote.
+        """
+        if cls.serialization_method is not None:
+            value = cls.serialization_method.dumps(value)
+        if cls.quote_base64:
+            value = ''.join(value.encode('base64').splitlines()).strip()
+        return value
+
+    @classmethod
+    def unquote(cls, value):
+        """Unquote the value for the cookie.  If unquoting does not work a
+        :exc:`UnquoteError` is raised.
+
+        :param value: the value to unquote.
+        """
+        try:
+            if cls.quote_base64:
+                value = value.decode('base64')
+            if cls.serialization_method is not None:
+                value = cls.serialization_method.loads(value)
+            return value
+        except:
+            # unfortunately pickle and other serialization modules can
+            # cause pretty every error here.  if we get one we catch it
+            # and convert it into an UnquoteError
+            raise UnquoteError()
 
     def serialize(self, expires=None):
         """Serialize the secure cookie into a string.
@@ -144,6 +214,9 @@
         If expires is provided, the session will be automatically invalidated
         after expiration when you unseralize it. This provides better
         protection against session cookie theft.
+
+        :param expires: an optional expiration date for the cookie (a
+                        :class:`datetime.datetime` object)
         """
         if self.secret_key is None:
             raise RuntimeError('no secret key defined')
@@ -155,10 +228,10 @@
             self['_expires'] = int(mktime(expires))
         result = []
         mac = hmac(self.secret_key, None, self.hash_method)
-        for key, value in self.iteritems():
+        for key, value in sorted(self.items()):
             result.append('%s=%s' % (
                 url_quote_plus(key),
-                pickle_quote(value)
+                self.quote(value)
             ))
             mac.update('|' + result[-1])
         return '%s?%s' % (
@@ -166,8 +239,14 @@
             '&'.join(result)
         )
 
+    @classmethod
     def unserialize(cls, string, secret_key):
-        """Load the secure cookie from a serialized string."""
+        """Load the secure cookie from a serialized string.
+
+        :param string: the cookie value to unserialize.
+        :param secret_key: the secret key used to serialize the cookie.
+        :return: a new :class:`SecureCookie`.
+        """
         if isinstance(string, unicode):
             string = string.encode('utf-8', 'ignore')
         try:
@@ -200,7 +279,7 @@
             if items is not None and client_hash == mac.digest():
                 try:
                     for key, value in items.iteritems():
-                        items[key] = pickle_unquote(value)
+                        items[key] = cls.unquote(value)
                 except UnquoteError:
                     items = ()
                 else:
@@ -212,22 +291,39 @@
             else:
                 items = ()
         return cls(items, secret_key, False)
-    unserialize = classmethod(unserialize)
 
+    @classmethod
     def load_cookie(cls, request, key='session', secret_key=None):
-        """Loads a SecureCookie from a cookie in request. If the cookie is not
-        set, a new SecureCookie instanced is returned.
+        """Loads a :class:`SecureCookie` from a cookie in request.  If the
+        cookie is not set, a new :class:`SecureCookie` instanced is
+        returned.
+
+        :param request: a request object that has a `cookies` attribute
+                        which is a dict of all cookie values.
+        :param key: the name of the cookie.
+        :param secret_key: the secret key used to unquote the cookie.
+                           Always provide the value even though it has
+                           no default!
         """
         data = request.cookies.get(key)
         if not data:
             return SecureCookie(secret_key=secret_key)
         return SecureCookie.unserialize(data, secret_key)
-    load_cookie = classmethod(load_cookie)
 
     def save_cookie(self, response, key='session', expires=None,
                     session_expires=None, max_age=None, path='/', domain=None,
                     secure=None, httponly=False, force=False):
-        """Saves the SecureCookie in a cookie on response."""
+        """Saves the SecureCookie in a cookie on response object.  All
+        parameters that are not described here are forwarded directly
+        to :meth:`~BaseResponse.set_cookie`.
+
+        :param response: a response object that has a
+                         :meth:`~BaseResponse.set_cookie` method.
+        :param key: the name of the cookie.
+        :param session_expires: the expiration date of the secure cookie
+                                stored information.  If this is not provided
+                                the cookie `expires` date is used instead.
+        """
         if force or self.should_save:
             data = self.serialize(session_expires or expires)
             response.set_cookie(key, data, expires=expires, max_age=max_age,
--- a/MoinMoin/support/werkzeug/contrib/sessions.py	Fri Feb 27 23:30:37 2009 +0100
+++ b/MoinMoin/support/werkzeug/contrib/sessions.py	Sat Feb 28 00:08:31 2009 +0100
@@ -3,7 +3,7 @@
     werkzeug.contrib.sessions
     ~~~~~~~~~~~~~~~~~~~~~~~~~
 
-    This module contains some helper classes that helps one to add session
+    This module contains some helper classes that help one to add session
     support to a python WSGI application.
 
     Example::
@@ -42,15 +42,14 @@
                 response.set_cookie('cookie_name', request.session.sid)
             return response(environ, start_response)
 
-
-    :copyright: 2007 by Armin Ronacher.
+    :copyright: (c) 2009 by the Werkzeug Team, see AUTHORS for more details.
     :license: BSD, see LICENSE for more details.
 """
 import re
 import os
-from os import path, unlink
+from os import path
 from time import time
-from random import Random, random
+from random import random
 try:
     from hashlib import sha1
 except ImportError:
@@ -142,15 +141,15 @@
             self.should_save and '*' or ''
         )
 
+    @property
     def should_save(self):
         """True if the session should be saved."""
         return self.modified or self.new
-    should_save = property(should_save)
 
 
 class SessionStore(object):
     """Baseclass for all session stores.  The Werkzeug contrib module does not
-    implement any useful stores beside the filesystem store, application
+    implement any useful stores besides the filesystem store, application
     developers are encouraged to create their own stores.
     """
 
@@ -191,7 +190,7 @@
 
 
 class FilesystemSessionStore(SessionStore):
-    """Simple example session store that saves session on the filesystem like
+    """Simple example session store that saves sessions in the filesystem like
     PHP does.
     """
 
@@ -217,6 +216,8 @@
     def delete(self, session):
         fn = self.get_session_filename(session.sid)
         try:
+            # Late import because Google Appengine won't allow os.unlink
+            from os import unlink
             unlink(fn)
         except OSError:
             pass
--- a/MoinMoin/support/werkzeug/contrib/testtools.py	Fri Feb 27 23:30:37 2009 +0100
+++ b/MoinMoin/support/werkzeug/contrib/testtools.py	Sat Feb 28 00:08:31 2009 +0100
@@ -1,15 +1,15 @@
+# -*- coding: utf-8 -*-
 """
     werkzeug.contrib.testtools
-    ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+    ~~~~~~~~~~~~~~~~~~~~~~~~~~
 
-    This Module implements a extended wrappers for simplified Testing
+    This module implements extended wrappers for simplified testing.
 
     `TestResponse`
-        a response wrapper wich adds various cached attributes for
-        simplified assertions on various contenttypes
+        A response wrapper which adds various cached attributes for
+        simplified assertions on various content types.
 
-
-    :copyright: 2007 by Ronny Pfannschmidt.
+    :copyright: (c) 2009 by the Werkzeug Team, see AUTHORS for more details.
     :license: BSD, see LICENSE for more details.
 """
 from werkzeug import Response, cached_property, import_string
@@ -54,7 +54,10 @@
         """Get the result of simplejson.loads if possible."""
         if 'json' not in self.mimetype:
             raise AttributeError('Not a JSON response')
-        from simplejson import loads
+        try:
+            from simplejson import loads
+        except:
+            from json import loads
         return loads(self.data)
     json = cached_property(json)
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/support/werkzeug/contrib/viewdecorators.py	Sat Feb 28 00:08:31 2009 +0100
@@ -0,0 +1,58 @@
+# -*- coding: utf-8 -*-
+"""
+    werkzeug.contrib.viewdecorators
+    ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+    Convenience decorators for view callables to return common responses.
+
+    For details on HTTP status codes, see `RFC 2616`_.
+
+    .. _RFC 2616: http://tools.ietf.org/html/rfc2616
+
+    :copyright: (c) 2006-2009 Jochen Kupperschmidt
+    :license: BSD, see LICENSE for more details.
+"""
+
+from functools import wraps
+
+try:
+    import simplejson as json
+except ImportError:
+    import json
+
+from werkzeug.wrappers import Response
+
+
+def jsonify(func):
+    """Return data as JSON response.
+
+    Data returned by the decorated callable is transformed to JSON and wrapped
+    in a response with the appropriate MIME type (as defined in `RFC 4627`_).
+
+    .. _RFC 4627: http://tools.ietf.org/html/rfc4627
+    """
+    @wraps(func)
+    def wrapper(*args, **kwargs):
+        data = json.dumps(func(*args, **kwargs))
+        return Response(data, mimetype='application/json')
+    return wrapper
+
+def respond_created(func):
+    """Return a ``201 Created`` response.
+
+    The decorated callable is expected to return the URL of the newly created
+    resource.  That URL is then added to the response as ``Location:`` header.
+    """
+    @wraps(func)
+    def wrapper(*args, **kwargs):
+        url = func(*args, **kwargs)
+        return Response(status=201, headers=[('Location', url)])
+    return wrapper
+
+def respond_no_content(func):
+    """Send a ``204 No Content`` response."""
+    @wraps(func)
+    def wrapper(*args, **kwargs):
+        func(*args, **kwargs)
+        return Response(status=204)
+    return wrapper
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/support/werkzeug/contrib/wrappers.py	Sat Feb 28 00:08:31 2009 +0100
@@ -0,0 +1,162 @@
+# -*- coding: utf-8 -*-
+"""
+    werkzeug.contrib.wrappers
+    ~~~~~~~~~~~~~~~~~~~~~~~~~
+
+    Extra wrappers or mixins contributed by the community.  These wrappers can
+    be mixed in into request objects to add extra functionality.
+
+    Example::
+
+        from werkzeug import Request as RequestBase
+        from werkzeug.contrib.wrappers import JSONRequestMixin
+
+        class Request(RequestBase, JSONRequestMixin):
+            pass
+
+    Afterwards this request object provides the extra functionality of the
+    :class:`JSONRequestMixin`.
+
+    :copyright: (c) 2009 by the Werkzeug Team, see AUTHORS for more details.
+    :license: BSD, see LICENSE for more details.
+"""
+from werkzeug.exceptions import BadRequest
+from werkzeug.utils import cached_property
+from werkzeug._internal import _decode_unicode
+try:
+    from simplejson import loads
+except ImportError:
+    from json import loads
+
+
+class JSONRequestMixin(object):
+    """Add json method to a request object.  This will parse the input data
+    through simplejson if possible.
+
+    :exc:`~werkzeug.exceptions.BadRequest` will be raised if the content-type
+    is not json or if the data itself cannot be parsed as json.
+    """
+
+    @cached_property
+    def json(self):
+        """Get the result of simplejson.loads if possible."""
+        if 'json' not in self.environ.get('CONTENT_TYPE', ''):
+            raise BadRequest('Not a JSON request')
+        try:
+            return loads(self.data)
+        except Exception:
+            raise BadRequest('Unable to read JSON request')
+
+
+class ProtobufRequestMixin(object):
+    """Add protobuf parsing method to a request object.  This will parse the
+    input data through `protobuf`_ if possible.
+
+    :exc:`~werkzeug.exceptions.BadRequest` will be raised if the content-type
+    is not protobuf or if the data itself cannot be parsed property.
+
+    .. _protobuf: http://code.google.com/p/protobuf/
+    """
+
+    #: by default the :class:`ProtobufRequestMixin` will raise a
+    #: :exc:`~werkzeug.exceptions.BadRequest` if the object is not
+    #: initialized.  You can bypass that check by setting this
+    #: attribute to `False`.
+    protobuf_check_initialization = True
+
+    def parse_protobuf(self, proto_type):
+        """Parse the data into an instance of proto_type."""
+        if 'protobuf' not in self.environ.get('CONTENT_TYPE', ''):
+            raise BadRequest('Not a Protobuf request')
+
+        obj = proto_type()
+        try:
+            obj.ParseFromString(self.data)
+        except Exception:
+            raise BadRequest("Unable to parse Protobuf request")
+
+        # Fail if not all required fields are set
+        if self.protobuf_check_initialization and not obj.IsInitialized():
+            raise BadRequest("Partial Protobuf request")
+
+        return obj
+
+
+class RoutingArgsRequestMixin(object):
+    """This request mixin adds support for the wsgiorg routing args
+    `specification`_.
+
+    .. _specification: http://www.wsgi.org/wsgi/Specifications/routing_args
+    """
+
+    def _get_routing_args(self):
+        return self.environ.get('wsgiorg.routing_args', (()))[0]
+
+    def _set_routing_args(self, value):
+        if self.shallow:
+            raise RuntimeError('A shallow request tried to modify the WSGI '
+                               'environment.  If you really want to do that, '
+                               'set `shallow` to False.')
+        self.environ['wsgiorg.routing_args'] = (value, self.routing_vars)
+
+    routing_args = property(_get_routing_args, _set_routing_args, doc='''
+        The positional URL arguments as `tuple`.''')
+    del _get_routing_args, _set_routing_args
+
+    def _get_routing_vars(self):
+        rv = self.environ.get('wsgiorg.routing_args')
+        if rv is not None:
+            return rv[1]
+        rv = {}
+        if not self.shallow:
+            self.routing_vars = rv
+        return rv
+
+    def _set_routing_vars(self, value):
+        if self.shallow:
+            raise RuntimeError('A shallow request tried to modify the WSGI '
+                               'environment.  If you really want to do that, '
+                               'set `shallow` to False.')
+        self.environ['wsgiorg.routing_args'] = (self.routing_args, value)
+
+    routing_vars = property(_get_routing_vars, _set_routing_vars, doc='''
+        The keyword URL arguments as `dict`.''')
+    del _get_routing_vars, _set_routing_vars
+
+
+class ReverseSlashBehaviorRequestMixin(object):
+    """This mixin reverses the trailing slash behavior of :attr:`script_root`
+    and :attr:`path`.  This makes it possible to use :func:`~urlparse.urljoin`
+    directly on the paths.
+
+    Because it changes the behavior or :class:`Request` this class has to be
+    mixed in *before* the actual request class::
+
+        class MyRequest(ReverseSlashBehaviorRequestMixin, Request):
+            pass
+
+    This example shows the differences (for an application mounted on
+    `/application` and the request going to `/application/foo/bar`):
+
+        +---------------+-------------------+---------------------+
+        |               | normal behavior   | reverse behavior    |
+        +===============+===================+=====================+
+        | `script_root` | ``/application``  | ``/application/``   |
+        +---------------+-------------------+---------------------+
+        | `path`        | ``/foo/bar``      | ``foo/bar``         |
+        +---------------+-------------------+---------------------+
+    """
+
+    @cached_property
+    def path(self):
+        """Requested path as unicode.  This works a bit like the regular path
+        info in the WSGI environment but will not include a leading slash.
+        """
+        path = (self.environ.get('PATH_INFO') or '').lstrip('/')
+        return _decode_unicode(path, self.charset, self.encoding_errors)
+
+    @cached_property
+    def script_root(self):
+        """The root path of the script includling a trailing slash."""
+        path = (self.environ.get('SCRIPT_NAME') or '').rstrip('/') + '/'
+        return _decode_unicode(path, self.charset, self.encoding_errors)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/support/werkzeug/datastructures.py	Sat Feb 28 00:08:31 2009 +0100
@@ -0,0 +1,1878 @@
+# -*- coding: utf-8 -*-
+"""
+    werkzeug.datastructures
+    ~~~~~~~~~~~~~~~~~~~~~~~
+
+    This module provides mixins and classes with an immutable interface.
+
+    :copyright: Copyright 2009 by the Werkzeug Team, see AUTHORS for more details.
+    :license: BSD, see LICENSE for more details.
+"""
+import re
+import codecs
+from werkzeug._internal import _proxy_repr, _missing
+
+
+_locale_delim_re = re.compile(r'[_-]')
+
+
+def is_immutable(self):
+    raise TypeError('%r objects are immutable' % self.__class__.__name__)
+
+
+class ImmutableListMixin(object):
+    """Makes a :class:`list` immutable.
+
+    .. versionadded:: 0.5
+
+    :private:
+    """
+
+    def __delitem__(self, key):
+        is_immutable(self)
+
+    def __delslice__(self, i, j):
+        is_immutable(self)
+
+    def __iadd__(self, other):
+        is_immutable(self)
+    __imul__ = __iadd__
+
+    def __setitem__(self, key, value):
+        is_immutable(self)
+
+    def __setslice__(self, i, j, value):
+        is_immutable(self)
+
+    def append(self, item):
+        is_immutable(self)
+    remove = append
+
+    def extend(self, iterable):
+        is_immutable(self)
+
+    def insert(self, pos, value):
+        is_immutable(self)
+
+    def pop(self, index=-1):
+        is_immutable(self)
+
+    def reverse(self):
+        is_immutable(self)
+
+    def sort(self, cmp=None, key=None, reverse=None):
+        is_immutable(self)
+
+
+class ImmutableList(ImmutableListMixin, list):
+    """An immutable :class:`list`.
+
+    .. versionadded:: 0.5
+
+    :private:
+    """
+
+    __repr__ = _proxy_repr(list)
+
+
+class ImmutableDictMixin(object):
+    """Makes a :class:`dict` immutable.
+
+    .. versionadded:: 0.5
+
+    :private:
+    """
+
+    def setdefault(self, key, default=None):
+        is_immutable(self)
+
+    def update(self, *args, **kwargs):
+        is_immutable(self)
+
+    def pop(self, key, default=None):
+        is_immutable(self)
+
+    def popitem(self):
+        is_immutable(self)
+
+    def __setitem__(self, key, value):
+        is_immutable(self)
+
+    def __delitem__(self, key):
+        is_immutable(self)
+
+    def clear(self):
+        is_immutable(self)
+
+
+class ImmutableMultiDictMixin(ImmutableDictMixin):
+    """Makes a :class:`MultiDict` immutable.
+
+    .. versionadded:: 0.5
+
+    :private:
+    """
+
+    def popitemlist(self):
+        is_immutable(self)
+
+    def poplist(self, key):
+        is_immutable(self)
+
+    def setlist(self, key, new_list):
+        is_immutable(self)
+
+    def setlistdefault(self, key, default_list=None):
+        is_immutable(self)
+
+
+class UpdateDictMixin(object):
+    """Makes dicts call `self.on_update` on modifications."""
+
+    on_update = None
+
+    def calls_update(name):
+        def oncall(self, *args, **kw):
+            rv = getattr(super(UpdateDictMixin, self), name)(*args, **kw)
+            if self.on_update is not None:
+                self.on_update(self)
+            return rv
+        oncall.__name__ = name
+        return oncall
+
+    __setitem__ = calls_update('__setitem__')
+    __delitem__ = calls_update('__delitem__')
+    clear = calls_update('clear')
+    popitem = calls_update('popitem')
+    setdefault = calls_update('setdefault')
+    update = calls_update('update')
+    del calls_update
+
+
+class TypeConversionDict(dict):
+    """Works like a regular dict but the :meth:`get` method can perform
+    type conversions.  :class:`MultiDict` and :class:`CombinedMultiDict`
+    are subclasses of this class and provide the same feature.
+
+    .. versionadded:: 0.5
+    """
+
+    def get(self, key, default=None, type=None):
+        """Return the default value if the requested data doesn't exist.
+        If `type` is provided and is a callable it should convert the value,
+        return it or raise a :exc:`ValueError` if that is not possible.  In
+        this case the function will return the default as if the value was not
+        found:
+
+        >>> d = TypeConversionDict(foo='42', bar='blub')
+        >>> d.get('foo', type=int)
+        42
+        >>> d.get('bar', -1, type=int)
+        -1
+
+        :param key: The key to be looked up.
+        :param default: The default value to be returned if the key can't
+                        be looked up.  If not further specified `None` is
+                        returned.
+        :param type: A callable that is used to cast the value in the
+                     :class:`MultiDict`.  If a :exc:`ValueError` is raised
+                     by this callable the default value is returned.
+        """
+        try:
+            rv = self[key]
+            if type is not None:
+                rv = type(rv)
+        except (KeyError, ValueError):
+            rv = default
+        return rv
+
+
+class ImmutableTypeConversionDict(ImmutableDictMixin, TypeConversionDict):
+    """Works like a :class:`TypeConversionDict` but does not support
+    modifications.
+
+    .. versionadded:: 0.5
+    """
+
+
+class MultiDict(TypeConversionDict):
+    """A :class:`MultiDict` is a dictionary subclass customized to deal with
+    multiple values for the same key which is for example used by the parsing
+    functions in the wrappers.  This is necessary because some HTML form
+    elements pass multiple values for the same key.
+
+    :class:`MultiDict` implements all standard dictionary methods.
+    Internally, it saves all values for a key as a list, but the standard dict
+    access methods will only return the first value for a key. If you want to
+    gain access to the other values, too, you have to use the `list` methods as
+    explained below.
+
+    Basic Usage:
+
+    >>> d = MultiDict([('a', 'b'), ('a', 'c')])
+    >>> d
+    MultiDict([('a', 'b'), ('a', 'c')])
+    >>> d['a']
+    'b'
+    >>> d.getlist('a')
+    ['b', 'c']
+    >>> 'a' in d
+    True
+
+    It behaves like a normal dict thus all dict functions will only return the
+    first value when multiple values for one key are found.
+
+    From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a
+    subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will
+    render a page for a ``400 BAD REQUEST`` if catched in a catch-all for HTTP
+    exceptions.
+
+    A :class:`MultiDict` can be constructed from an iterable of
+    ``(key, value)`` tuples, a dict, a :class:`MultiDict` or from Werkzeug 0.2
+    onwards some keyword parameters.
+
+    :param mapping: the initial value for the :class:`MultiDict`.  Either a
+                    regular dict, an iterable of ``(key, value)`` tuples
+                    or `None`.
+    """
+
+    # internal list type.  This is an internal interface!  do not use.
+    # it's only used in methods that do not modify the multi dict so that
+    # ImmutableMultiDict can use it without much hassle.
+    _list_type = list
+
+    # the key error this class raises.  Because of circular dependencies
+    # with the http exception module this class is created at the end of
+    # this module.
+    KeyError = None
+
+    def __init__(self, mapping=None):
+        if isinstance(mapping, MultiDict):
+            dict.__init__(self, ((k, self._list_type(v))
+                          for k, v in mapping.lists()))
+        elif isinstance(mapping, dict):
+            tmp = {}
+            for key, value in mapping.iteritems():
+                if isinstance(value, (tuple, list)):
+                    value = self._list_type(value)
+                else:
+                    value = self._list_type([value])
+                tmp[key] = value
+            dict.__init__(self, tmp)
+        else:
+            tmp = {}
+            for key, value in mapping or ():
+                tmp.setdefault(key, []).append(value)
+            dict.__init__(self, (dict((k, self._list_type(v))
+                                 for k, v in tmp.iteritems())))
+
+    def __getitem__(self, key):
+        """Return the first data value for this key;
+        raises KeyError if not found.
+
+        :param key: The key to be looked up.
+        :raise KeyError: if the key does not exist.
+        """
+        if key in self:
+            return dict.__getitem__(self, key)[0]
+        raise self.KeyError(key)
+
+    def __setitem__(self, key, value):
+        """Set an item as list."""
+        dict.__setitem__(self, key, [value])
+
+    def getlist(self, key, type=None):
+        """Return the list of items for a given key. If that key is not in the
+        `MultiDict`, the return value will be an empty list.  Just as `get`
+        `getlist` accepts a `type` parameter.  All items will be converted
+        with the callable defined there.
+
+        :param key: The key to be looked up.
+        :param type: A callable that is used to cast the value in the
+                     :class:`MultiDict`.  If a :exc:`ValueError` is raised
+                     by this callable the value will be removed from the list.
+        :return: a :class:`list` of all the values for the key.
+        """
+        try:
+            rv = dict.__getitem__(self, key)
+        except KeyError:
+            return self._list_type()
+        if type is None:
+            return rv
+        result = []
+        for item in rv:
+            try:
+                result.append(type(item))
+            except ValueError:
+                pass
+        return self._list_type(result)
+
+    def setlist(self, key, new_list):
+        """Remove the old values for a key and add new ones.  Note that the list
+        you pass the values in will be shallow-copied before it is inserted in
+        the dictionary.
+
+        >>> d = MultiDict()
+        >>> d.setlist('foo', ['1', '2'])
+        >>> d['foo']
+        '1'
+        >>> d.getlist('foo')
+        ['1', '2']
+
+        :param key: The key for which the values are set.
+        :param new_list: An iterable with the new values for the key.  Old values
+                         are removed first.
+        """
+        dict.__setitem__(self, key, list(new_list))
+
+    def setdefault(self, key, default=None):
+        """Returns the value for the key if it is in the dict, otherwise it
+        returns `default` and sets that value for `key`.
+
+        :param key: The key to be looked up.
+        :param default: The default value to be returned if the key is not
+                        in the dict.  If not further specified it's `None`.
+        """
+        if key not in self:
+            self[key] = default
+        else:
+            default = self[key]
+        return default
+
+    def setlistdefault(self, key, default_list=()):
+        """Like `setdefault` but sets multiple values.
+
+        :param key: The key to be looked up.
+        :param default: An iterable of default values.  It is either copied
+                        (in case it was a list) or converted into a list
+                        before returned.
+        :return: a :class:`list`
+        """
+        if key not in self:
+            default_list = list(default_list)
+            dict.__setitem__(self, key, default_list)
+        else:
+            default_list = self.getlist(key)
+        return default_list
+
+    def items(self):
+        """Return a list of ``(key, value)`` pairs, where value is the first
+        item in the list associated with the key.
+
+        :return: a :class:`list`
+        """
+        return [(key, self[key]) for key in self.iterkeys()]
+
+    #: Return a list of ``(key, value)`` pairs, where values is the list of
+    #: all values associated with the key.
+    #:
+    #: :return: a :class:`list`
+    lists = dict.items
+
+    def values(self):
+        """Returns a list of the first value on every key's value list.
+
+        :return: a :class:`list`.
+        """
+        return [self[key] for key in self.iterkeys()]
+
+    #: Return a list of all values associated with a key.  Zipping
+    #: :meth:`keys` and this is the same as calling :meth:`lists`:
+    #:
+    #: >>> d = MultiDict({"foo": [1, 2, 3]})
+    #: >>> zip(d.keys(), d.listvalues()) == d.lists()
+    #: True
+    #:
+    #: :return: a :class:`list`
+    listvalues = dict.values
+
+    def iteritems(self):
+        """Like :meth:`items` but returns an iterator."""
+        for key, values in dict.iteritems(self):
+            yield key, values[0]
+
+    #: Return a list of all values associated with a key.
+    #:
+    #: :return: a :class:`list`
+    iterlists = dict.iteritems
+
+    def itervalues(self):
+        """Like :meth:`values` but returns an iterator."""
+        for values in dict.itervalues(self):
+            yield values[0]
+
+    #: like :meth:`listvalues` but returns an iterator.
+    iterlistvalues = dict.itervalues
+
+    def copy(self):
+        """Return a shallow copy of this object."""
+        return self.__class__(self)
+
+    def to_dict(self, flat=True):
+        """Return the contents as regular dict.  If `flat` is `True` the
+        returned dict will only have the first item present, if `flat` is
+        `False` all values will be returned as lists.
+
+        :param flat: If set to `False` the dict returned will have lists
+                     with all the values in it.  Otherwise it will only
+                     contain the first value for each key.
+        :return: a :class:`dict`
+        """
+        if flat:
+            return dict(self.iteritems())
+        return dict(self)
+
+    def update(self, other_dict):
+        """update() extends rather than replaces existing key lists."""
+        if isinstance(other_dict, MultiDict):
+            for key, value_list in other_dict.iterlists():
+                self.setlistdefault(key, []).extend(value_list)
+        elif isinstance(other_dict, dict):
+            for key, value in other_dict.items():
+                self.setlistdefault(key, []).append(value)
+        else:
+            for key, value in other_dict:
+                self.setlistdefault(key, []).append(value)
+
+    def pop(self, key, default=_missing):
+        """Pop the first item for a list on the dict.  Afterwards the
+        key is removed from the dict, so additional values are discarded:
+
+        >>> d = MultiDict({"foo": [1, 2, 3]})
+        >>> d.pop("foo")
+        1
+        >>> "foo" in d
+        False
+
+        :param key: the key to pop.
+        :param default: if provided the value to return if the key was
+                        not in the dictionary.
+        """
+        if default is not _missing:
+            return dict.pop(self, key, default)
+        try:
+            return dict.pop(self, key)[0]
+        except KeyError, e:
+            raise self.KeyError(str(e))
+
+    def popitem(self):
+        """Pop an item from the dict."""
+        try:
+            item = dict.popitem(self)
+            return (item[0], item[1][0])
+        except KeyError, e:
+            raise self.KeyError(str(e))
+
+    def poplist(self, key):
+        """Pop the list for a key from the dict.  If the key is not in the dict
+        an empty list is returned.
+
+        .. versionchanged:: 0.5
+           If the key does no longer exist a list is returned instead of
+           raising an error.
+        """
+        return dict.pop(self, key, [])
+
+    def popitemlist(self):
+        """Pop a ``(key, list)`` tuple from the dict."""
+        try:
+            return dict.popitem(self)
+        except KeyError, e:
+            raise self.KeyError(str(e))
+
+    def __repr__(self):
+        tmp = []
+        for key, values in self.iterlists():
+            for value in values:
+                tmp.append((key, value))
+        return '%s(%r)' % (self.__class__.__name__, tmp)
+
+
+class Headers(object):
+    """An object that stores some headers.  It has a dict-like interface
+    but is ordered and can store the same keys multiple times.
+
+    This data structure is useful if you want a nicer way to handle WSGI
+    headers which are stored as tuples in a list.
+
+    From Werkzeug 0.3 onwards, the :exc:`KeyError` raised by this class is
+    also a subclass of the :class:`~exceptions.BadRequest` HTTP exception
+    and will render a page for a ``400 BAD REQUEST`` if catched in a
+    catch-all for HTTP exceptions.
+
+    Headers is mostly compatible with the Python :class:`wsgiref.headers.Headers`
+    class, with the exception of `__getitem__`.  :mod:`wsgiref` will return
+    `None` for ``headers['missing']``, whereas :class:`Headers` will raise
+    a :class:`KeyError`.
+
+    To create a new :class:`Headers` object pass it a list or dict of headers
+    which are used as default values.  This does not reuse the list passed
+    to the constructor for internal usage.  To create a :class:`Headers`
+    object that uses as internal storage the list or list-like object you
+    can use the :meth:`linked` class method.
+
+    :param defaults: The list of default values for the :class:`Headers`.
+    """
+
+    # the key error this class raises.  Because of circular dependencies
+    # with the http exception module this class is created at the end of
+    # this module.
+    KeyError = None
+
+    def __init__(self, defaults=None, _list=None):
+        if _list is None:
+            _list = []
+        self._list = _list
+        if defaults is not None:
+            self.extend(defaults)
+
+    @classmethod
+    def linked(cls, headerlist):
+        """Create a new :class:`Headers` object that uses the list of headers
+        passed as internal storage:
+
+        >>> headerlist = [('Content-Length', '40')]
+        >>> headers = Headers.linked(headerlist)
+        >>> headers.add('Content-Type', 'text/html')
+        >>> headerlist
+        [('Content-Length', '40'), ('Content-Type', 'text/html')]
+
+        :param headerlist: The list of headers the class is linked to.
+        :return: new linked :class:`Headers` object.
+        """
+        return cls(_list=headerlist)
+
+    def __getitem__(self, key, _index_operation=True):
+        if _index_operation:
+            if isinstance(key, (int, long)):
+                return self._list[key]
+            elif isinstance(key, slice):
+                return self.__class__(self._list[key])
+        ikey = key.lower()
+        for k, v in self._list:
+            if k.lower() == ikey:
+                return v
+        raise self.KeyError(key)
+
+    def __eq__(self, other):
+        return other.__class__ is self.__class__ and \
+               set(other._list) == set(self._list)
+
+    def __ne__(self, other):
+        return not self.__eq__(other)
+
+    def get(self, key, default=None, type=None):
+        """Return the default value if the requested data doesn't exist.
+        If `type` is provided and is a callable it should convert the value,
+        return it or raise a :exc:`ValueError` if that is not possible.  In
+        this case the function will return the default as if the value was not
+        found:
+
+        >>> d = Headers([('Content-Length', '42')])
+        >>> d.get('Content-Length', type=int)
+        42
+
+        If a headers object is bound you must not add unicode strings
+        because no encoding takes place.
+
+        :param key: The key to be looked up.
+        :param default: The default value to be returned if the key can't
+                        be looked up.  If not further specified `None` is
+                        returned.
+        :param type: A callable that is used to cast the value in the
+                     :class:`Headers`.  If a :exc:`ValueError` is raised
+                     by this callable the default value is returned.
+        """
+        try:
+            rv = self.__getitem__(key, _index_operation=False)
+        except KeyError:
+            return default
+        if type is None:
+            return rv
+        try:
+            return type(rv)
+        except ValueError:
+            return default
+
+    def getlist(self, key, type=None):
+        """Return the list of items for a given key. If that key is not in the
+        :class:`Headers`, the return value will be an empty list.  Just as
+        :meth:`get` :meth:`getlist` accepts a `type` parameter.  All items will
+        be converted with the callable defined there.
+
+        :param key: The key to be looked up.
+        :param type: A callable that is used to cast the value in the
+                     :class:`Headers`.  If a :exc:`ValueError` is raised
+                     by this callable the value will be removed from the list.
+        :return: a :class:`list` of all the values for the key.
+        """
+        ikey = key.lower()
+        result = []
+        for k, v in self:
+            if k.lower() == ikey:
+                if type is not None:
+                    try:
+                        v = type(v)
+                    except ValueError:
+                        continue
+                result.append(v)
+        return result
+
+    def get_all(self, name):
+        """Return a list of all the values for the named field.
+
+        This method is compatible with the :mod:`wsgiref`
+        :meth:`~wsgiref.headers.Headers.get_all` method.
+        """
+        return self.getlist(name)
+
+    def iteritems(self, lower=False):
+        for key, value in self:
+            if lower:
+                key = key.lower()
+            yield key, value
+
+    def iterkeys(self, lower=False):
+        for key, _ in self.iteritems(lower):
+            yield key
+
+    def itervalues(self):
+        for _, value in self.iteritems():
+            yield value
+
+    def keys(self, lower=False):
+        return list(self.iterkeys(lower))
+
+    def values(self):
+        return list(self.itervalues())
+
+    def items(self, lower=False):
+        return list(self.iteritems(lower))
+
+    def extend(self, iterable):
+        """Extend the headers with a dict or an iterable yielding keys and
+        values.
+        """
+        if isinstance(iterable, dict):
+            for key, value in iterable.iteritems():
+                if isinstance(value, (tuple, list)):
+                    for v in value:
+                        self.add(key, v)
+                else:
+                    self.add(key, value)
+        else:
+            for key, value in iterable:
+                self.add(key, value)
+
+    def __delitem__(self, key, _index_operation=True):
+        if _index_operation and isinstance(key, (int, long, slice)):
+            del self._list[key]
+            return
+        key = key.lower()
+        new = []
+        for k, v in self._list:
+            if k.lower() != key:
+                new.append((k, v))
+        self._list[:] = new
+
+    def remove(self, key):
+        """Remove a key.
+
+        :param key: The key to be removed.
+        """
+        return self.__delitem__(key, _index_operation=False)
+
+    def pop(self, key=None, default=_missing):
+        """Removes and returns a key or index.
+
+        :param key: The key to be popped.  If this is an integer the item at
+                    that position is removed, if it's a string the value for
+                    that key is.  If the key is omitted or `None` the last
+                    item is removed.
+        :return: an item.
+        """
+        if key is None:
+            return self._list.pop()
+        if isinstance(key, (int, long)):
+            return self._list.pop(key)
+        try:
+            rv = self[key]
+            self.remove(key)
+        except KeyError:
+            if default is not _missing:
+                return default
+            raise
+        return rv
+
+    def popitem(self):
+        """Removes a key or index and returns a (key, value) item."""
+        return self.pop()
+
+    def __contains__(self, key):
+        """Check if a key is present."""
+        try:
+            self.__getitem__(key, _index_operation=False)
+        except KeyError:
+            return False
+        return True
+
+    has_key = __contains__
+
+    def __iter__(self):
+        """Yield ``(key, value)`` tuples."""
+        return iter(self._list)
+
+    def __len__(self):
+        return len(self._list)
+
+    def add(self, _key, _value, **kw):
+        """Add a new header tuple to the list.
+
+        Keyword arguments can specify additional parameters for the header
+        value, with underscores converted to dashes::
+
+        >>> d = Headers()
+        >>> d.add('Content-Type', 'text/plain')
+        >>> d.add('Content-Disposition', 'attachment', filename='foo.png')
+
+        The keyword argument dumping uses :func:`dump_options_header`
+        behind the scenes.
+
+        .. versionadded:: 0.4.1
+            keyword arguments were added for :mod:`wsgiref` compatibility.
+        """
+        if kw:
+            _value = dump_options_header(_value, dict((k.replace('_', '-'), v)
+                                                      for k, v in kw.items()))
+        self._list.append((_key, _value))
+
+    def add_header(self, _key, _value, **_kw):
+        """Add a new header tuple to the list.
+
+        An alias for :meth:`add` for compatibility with the :mod:`wsgiref`
+        :meth:`~wsgiref.headers.Headers.add_header` method.
+        """
+        self.add(_key, _value, **_kw)
+
+    def clear(self):
+        """Clears all headers."""
+        del self._list[:]
+
+    def set(self, key, value):
+        """Remove all header tuples for `key` and add a new one.  The newly
+        added key either appears at the end of the list if there was no
+        entry or replaces the first one.
+
+        :param key: The key to be inserted.
+        :param value: The value to be inserted.
+        """
+        lc_key = key.lower()
+        for idx, (old_key, old_value) in enumerate(self._list):
+            if old_key.lower() == lc_key:
+                # replace first ocurrence
+                self._list[idx] = (key, value)
+                break
+        else:
+            return self.add(key, value)
+        self._list[idx + 1:] = [(k, v) for k, v in self._list[idx + 1:]
+                                if k.lower() != lc_key]
+
+    def setdefault(self, key, value):
+        """Returns the value for the key if it is in the dict, otherwise it
+        returns `default` and sets that value for `key`.
+
+        :param key: The key to be looked up.
+        :param default: The default value to be returned if the key is not
+                        in the dict.  If not further specified it's `None`.
+        """
+        if key in self:
+            return self[key]
+        self.set(key, value)
+        return value
+
+    def __setitem__(self, key, value):
+        """Like :meth:`set` but also supports index/slice based setting."""
+        if isinstance(key, (slice, int, long)):
+            self._list[key] = value
+        else:
+            self.set(key, value)
+
+    def to_list(self, charset='utf-8'):
+        """Convert the headers into a list and converts the unicode header
+        items to the specified charset.
+
+        :return: list
+        """
+        result = []
+        for k, v in self:
+            if isinstance(v, unicode):
+                v = v.encode(charset)
+            else:
+                v = str(v)
+            result.append((k, v))
+        return result
+
+    def copy(self):
+        return self.__class__(self._list)
+
+    def __copy__(self):
+        return self.copy()
+
+    def __str__(self, charset='utf-8'):
+        """Returns formatted headers suitable for HTTP transmission."""
+        strs = []
+        for key, value in self.to_list(charset):
+            strs.append('%s: %s' % (key, value))
+        strs.append('\r\n')
+        return '\r\n'.join(strs)
+
+    def __repr__(self):
+        return '%s(%r)' % (
+            self.__class__.__name__,
+            list(self)
+        )
+
+
+class ImmutableHeadersMixin(object):
+    """Makes a :class:`Headers` immutable.
+
+    .. versionadded:: 0.5
+    """
+
+    def __delitem__(self, key):
+        is_immutable(self)
+
+    def __setitem__(self, key, value):
+        is_immutable(self)
+    set = __setitem__
+
+    def add(self, item):
+        is_immutable(self)
+    remove = add_header = add
+
+    def extend(self, iterable):
+        is_immutable(self)
+
+    def insert(self, pos, value):
+        is_immutable(self)
+
+    def pop(self, index=-1):
+        is_immutable(self)
+
+    def popitem(self):
+        is_immutable(self)
+
+    def setdefault(self, key, default):
+        is_immutable(self)
+
+
+class EnvironHeaders(ImmutableHeadersMixin, Headers):
+    """Read only version of the headers from a WSGI environment.  This
+    provides the same interface as `Headers` and is constructed from
+    a WSGI environment.
+
+    From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a
+    subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will
+    render a page for a ``400 BAD REQUEST`` if catched in a catch-all for
+    HTTP exceptions.
+    """
+
+    def __init__(self, environ):
+        self.environ = environ
+
+    @classmethod
+    def linked(cls, environ):
+        raise TypeError('%r object is always linked to environment, '
+                        'no separate initializer' % cls.__name__)
+
+    def __eq__(self, other):
+        return self is other
+
+    def __getitem__(self, key, _index_operation=False):
+        # _index_operation is a no-op for this class as there is no index but
+        # used because get() calls it.
+        key = key.upper().replace('-', '_')
+        if key in ('CONTENT_TYPE', 'CONTENT_LENGTH'):
+            return self.environ[key]
+        return self.environ['HTTP_' + key]
+
+    def __iter__(self):
+        for key, value in self.environ.iteritems():
+            if key.startswith('HTTP_'):
+                yield key[5:].replace('_', '-').title(), value
+            elif key in ('CONTENT_TYPE', 'CONTENT_LENGTH'):
+                yield key.replace('_', '-').title(), value
+
+    def copy(self):
+        raise TypeError('cannot create %r copies' % self.__class__.__name__)
+
+
+class CombinedMultiDict(ImmutableMultiDictMixin, MultiDict):
+    """A read only :class:`MultiDict` that you can pass multiple :class:`MultiDict`
+    instances as sequence and it will combine the return values of all wrapped
+    dicts:
+
+    >>> from werkzeug import MultiDict, CombinedMultiDict
+    >>> post = MultiDict([('foo', 'bar')])
+    >>> get = MultiDict([('blub', 'blah')])
+    >>> combined = CombinedMultiDict([get, post])
+    >>> combined['foo']
+    'bar'
+    >>> combined['blub']
+    'blah'
+
+    This works for all read operations and will raise a `TypeError` for
+    methods that usually change data which isn't possible.
+
+    From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a
+    subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will
+    render a page for a ``400 BAD REQUEST`` if catched in a catch-all for HTTP
+    exceptions.
+    """
+
+    _list_type = ImmutableList
+
+    def __init__(self, dicts=None):
+        self.dicts = dicts or []
+
+    @classmethod
+    def fromkeys(cls):
+        raise TypeError('cannot create %r instances by fromkeys' %
+                        cls.__name__)
+
+    def __getitem__(self, key):
+        for d in self.dicts:
+            if key in d:
+                return d[key]
+        raise self.KeyError(key)
+
+    def get(self, key, default=None, type=None):
+        for d in self.dicts:
+            if key in d:
+                if type is not None:
+                    try:
+                        return type(d[key])
+                    except ValueError:
+                        continue
+                return d[key]
+        return default
+
+    def getlist(self, key, type=None):
+        rv = []
+        for d in self.dicts:
+            rv.extend(d.getlist(key, type))
+        return self._list_type(rv)
+
+    def keys(self):
+        rv = set()
+        for d in self.dicts:
+            rv.update(d.keys())
+        return list(rv)
+
+    def iteritems(self):
+        found = set()
+        for d in self.dicts:
+            for key, value in d.iteritems():
+                if key not in found:
+                    found.add(key)
+                    yield key, value
+
+    def itervalues(self):
+        for key, value in self.iteritems():
+            yield value
+
+    def values(self):
+        return list(self.itervalues())
+
+    def items(self):
+        return list(self.iteritems())
+
+    def iterlists(self):
+        rv = {}
+        for d in self.dicts:
+            for key, values in d.iterlists():
+                rv.setdefault(key, []).extend(values)
+        for key, values in rv.iteritems():
+            yield key, ImmutableList(values)
+
+    def lists(self):
+        return list(self.iterlists())
+
+    def iterlistvalues(self):
+        return (x[0] for x in self.lists())
+
+    def listvalues(self):
+        return list(self.iterlistvalues())
+
+    def iterkeys(self):
+        return iter(self.keys())
+
+    __iter__ = iterkeys
+
+    def copy(self):
+        """Return a shallow copy of this object."""
+        return self.__class__(self.dicts[:])
+
+    def to_dict(self, flat=True):
+        """Return the contents as regular dict.  If `flat` is `True` the
+        returned dict will only have the first item present, if `flat` is
+        `False` all values will be returned as lists.
+
+        :param flat: If set to `False` the dict returned will have lists
+                     with all the values in it.  Otherwise it will only
+                     contain the first item for each key.
+        :return: a :class:`dict`
+        """
+        rv = {}
+        for d in reversed(self.dicts):
+            rv.update(d.to_dict(flat))
+        return rv
+
+    def __len__(self):
+        return len(self.keys())
+
+    def __contains__(self, key):
+        for d in self.dicts:
+            if key in d:
+                return True
+        return False
+
+    has_key = __contains__
+
+    def __repr__(self):
+        return '%s(%r)' % (self.__class__.__name__, self.dicts)
+
+
+class FileMultiDict(MultiDict):
+    """A special :class:`MultiDict` that has convenience methods to add
+    files to it.  This is used for :class:`EnvironBuilder` and generally
+    useful for unittesting.
+
+    .. versionadded:: 0.5
+    """
+
+    def add_file(self, name, file, filename=None, content_type=None):
+        """Adds a new file to the dict.  `file` can be a file name or
+        a :class:`file`-like or a :class:`FileStorage` object.
+
+        :param name: the name of the field.
+        :param file: a filename or :class:`file`-like object
+        :param filename: an optional filename
+        :param content_type: an optional content type
+        """
+        from werkzeug.utils import FileStorage
+        if isinstance(file, FileStorage):
+            self[name] = file
+            return
+        if isinstance(file, basestring):
+            if filename is None:
+                filename = file
+            file = open(file, 'rb')
+        if filename and content_type is None:
+            from mimetypes import guess_type
+            content_type = guess_type(filename)[0] or \
+                           'application/octet-stream'
+        self[name] = FileStorage(file, filename, name, content_type)
+
+
+class ImmutableDict(ImmutableDictMixin, dict):
+    """An immutable :class:`dict`.
+
+    .. versionadded:: 0.5
+    """
+
+    __repr__ = _proxy_repr(dict)
+
+
+class ImmutableMultiDict(ImmutableMultiDictMixin, MultiDict):
+    """An immutable :class:`MultiDict`.  The methods that return the internal
+    lists return :class:`ImmutableList` objects.
+
+    .. versionadded:: 0.5
+    """
+
+    _list_type = ImmutableList
+
+
+class Accept(ImmutableList):
+    """An :class:`Accept` object is just a list subclass for lists of
+    ``(value, quality)`` tuples.  It is automatically sorted by quality.
+
+    All :class:`Accept` objects work similar to a list but provide extra
+    functionality for working with the data.  Containment checks are
+    normalized to the rules of that header:
+
+    >>> a = CharsetAccept([('ISO-8859-1', 1), ('utf-8', 0.7)])
+    >>> a.best
+    'ISO-8859-1'
+    >>> 'iso-8859-1' in a
+    True
+    >>> 'UTF8' in a
+    True
+    >>> 'utf7' in a
+    False
+
+    To get the quality for an item you can use normal item lookup:
+
+    >>> print a['utf-8']
+    0.7
+    >>> a['utf7']
+    0
+
+    .. versionchanged:: 0.5
+       :class:`Accept` objects are forzed immutable now.
+    """
+
+    def __init__(self, values=()):
+        if values is None:
+            list.__init__(self)
+            self.provided = False
+        elif isinstance(values, Accept):
+            self.provided = values.provided
+            list.__init__(self, values)
+        else:
+            self.provided = True
+            values = [(a, b) for b, a in values]
+            values.sort()
+            values.reverse()
+            list.__init__(self, [(a, b) for b, a in values])
+
+    def _value_matches(self, value, item):
+        """Check if a value matches a given accept item."""
+        return item == '*' or item.lower() == value.lower()
+
+    def __getitem__(self, key):
+        """Besides index lookup (getting item n) you can also pass it a string
+        to get the quality for the item.  If the item is not in the list, the
+        returned quality is ``0``.
+        """
+        if isinstance(key, basestring):
+            for item, quality in self:
+                if self._value_matches(key, item):
+                    return quality
+            return 0
+        return list.__getitem__(self, key)
+
+    def __contains__(self, value):
+        for item, quality in self:
+            if self._value_matches(value, item):
+                return True
+        return False
+
+    def __repr__(self):
+        return '%s([%s])' % (
+            self.__class__.__name__,
+            ', '.join('(%r, %s)' % (x, y) for x, y in self)
+        )
+
+    def index(self, key):
+        """Get the position of an entry or raise :exc:`ValueError`.
+
+        :param key: The key to be looked up.
+
+        .. versionchanged:: 0.5
+           This used to raise :exc:`IndexError`, which was inconsistent
+           with the list API.
+        """
+        if isinstance(key, basestring):
+            for idx, (item, quality) in enumerate(self):
+                if self._value_matches(key, item):
+                    return idx
+            raise ValueError(key)
+        return list.index(self, key)
+
+    def find(self, key):
+        """Get the position of an entry or return -1.
+
+        :param key: The key to be looked up.
+        """
+        try:
+            return self.index(key)
+        except ValueError:
+            return -1
+
+    def values(self):
+        """Return a list of the values, not the qualities."""
+        return list(self.itervalues())
+
+    def itervalues(self):
+        """Iterate over all values."""
+        for item in self:
+            yield item[0]
+
+    def to_header(self):
+        """Convert the header set into an HTTP header string."""
+        result = []
+        for value, quality in self:
+            if quality != 1:
+                value = '%s;q=%s' % (value, quality)
+            result.append(value)
+        return ','.join(result)
+
+    def __str__(self):
+        return self.to_header()
+
+    @property
+    def best(self):
+        """The best match as value."""
+        if self:
+            return self[0][0]
+
+
+class MIMEAccept(Accept):
+    """Like :class:`Accept` but with special methods and behavior for
+    mimetypes.
+    """
+
+    def _value_matches(self, value, item):
+        def _normalize(x):
+            x = x.lower()
+            return x == '*' and ('*', '*') or x.split('/', 1)
+
+        # this is from the application which is trusted.  to avoid developer
+        # frustration we actually check these for valid values
+        if '/' not in value:
+            raise ValueError('invalid mimetype %r' % value)
+        value_type, value_subtype = _normalize(value)
+        if value_type == '*' and value_subtype != '*':
+            raise ValueError('invalid mimetype %r' % value)
+
+        if '/' not in item:
+            return False
+        item_type, item_subtype = _normalize(item)
+        if item_type == '*' and item_subtype != '*':
+            return False
+        return (
+            (item_type == item_subtype == '*' or
+             value_type == value_subtype == '*') or
+            (item_type == value_type and (item_subtype == '*' or
+                                          value_subtype == '*' or
+                                          item_subtype == value_subtype))
+        )
+
+    @property
+    def accept_html(self):
+        """True if this object accepts HTML."""
+        return (
+            'text/html' in self or
+            'application/xhtml+xml' in self or
+            self.accept_xhtml
+        )
+
+    @property
+    def accept_xhtml(self):
+        """True if this object accepts XHTML."""
+        return (
+            'application/xhtml+xml' in self or
+            'application/xml' in self
+        )
+
+
+class LanguageAccept(Accept):
+    """Like :class:`Accept` but with normalization for languages."""
+
+    def _value_matches(self, value, item):
+        def _normalize(language):
+            return _locale_delim_re.split(language.lower())
+        return item == '*' or _normalize(value) == _normalize(item)
+
+
+class CharsetAccept(Accept):
+    """Like :class:`Accept` but with normalization for charsets."""
+
+    def _value_matches(self, value, item):
+        def _normalize(name):
+            try:
+                return codecs.lookup(name).name
+            except LookupError:
+                return name.lower()
+        return item == '*' or _normalize(value) == _normalize(item)
+
+
+def cache_property(key, empty, type):
+    """Return a new property object for a cache header.  Useful if you
+    want to add support for a cache extension in a subclass."""
+    return property(lambda x: x._get_cache_value(key, empty, type),
+                    lambda x, v: x._set_cache_value(key, v, type),
+                    lambda x: x._del_cache_value(key),
+                    'accessor for %r' % key)
+
+
+class _CacheControl(UpdateDictMixin, dict):
+    """Subclass of a dict that stores values for a Cache-Control header.  It
+    has accessors for all the cache-control directives specified in RFC 2616.
+    The class does not differentiate between request and response directives.
+
+    Because the cache-control directives in the HTTP header use dashes the
+    python descriptors use underscores for that.
+
+    To get a header of the :class:`CacheControl` object again you can convert
+    the object into a string or call the :meth:`to_header` method.  If you plan
+    to subclass it and add your own items have a look at the sourcecode for
+    that class.
+
+    The following attributes are exposed:
+
+    `no_cache`, `no_store`, `max_age`, `max_stale`, `min_fresh`,
+    `no_transform`, `only_if_cached`, `public`, `private`, `must_revalidate`,
+    `proxy_revalidate`, and `s_maxage`
+
+    .. versionchanged:: 0.4
+
+       setting `no_cache` or `private` to boolean `True` will set the implicit
+       none-value which is ``*``:
+
+       >>> cc = ResponseCacheControl()
+       >>> cc.no_cache = True
+       >>> cc
+       <ResponseCacheControl 'no-cache'>
+       >>> cc.no_cache
+       '*'
+       >>> cc.no_cache = None
+       >>> cc
+       <ResponseCacheControl ''>
+    """
+
+    no_cache = cache_property('no-cache', '*', None)
+    no_store = cache_property('no-store', None, bool)
+    max_age = cache_property('max-age', -1, int)
+    no_transform = cache_property('no-transform', None, None)
+
+    def __init__(self, values=(), on_update=None):
+        dict.__init__(self, values or ())
+        self.on_update = on_update
+        self.provided = values is not None
+
+    def _get_cache_value(self, key, empty, type):
+        """Used internally by the accessor properties."""
+        if type is bool:
+            return key in self
+        if key in self:
+            value = self[key]
+            if value is None:
+                return empty
+            elif type is not None:
+                try:
+                    value = type(value)
+                except ValueError:
+                    pass
+            return value
+
+    def _set_cache_value(self, key, value, type):
+        """Used internally by the accessor properties."""
+        if type is bool:
+            if value:
+                self[key] = None
+            else:
+                self.pop(key, None)
+        else:
+            if value is None:
+                self.pop(key)
+            elif value is True:
+                self[key] = None
+            else:
+                self[key] = value
+
+    def _del_cache_value(self, key):
+        """Used internally by the accessor properties."""
+        if key in self:
+            del self[key]
+
+    def to_header(self):
+        """Convert the stored values into a cache control header."""
+        return dump_header(self)
+
+    def __str__(self):
+        return self.to_header()
+
+    def __repr__(self):
+        return '<%s %r>' % (
+            self.__class__.__name__,
+            self.to_header()
+        )
+
+
+class RequestCacheControl(ImmutableDictMixin, _CacheControl):
+    """A cache control for requests.  This is immutable and gives access
+    to all the request-relevant cache control headers.
+
+    .. versionadded:: 0.5
+       In previous versions a `CacheControl` class existed that was used
+       both for request and response.
+    """
+
+    max_stale = cache_property('max-stale', '*', int)
+    min_fresh = cache_property('min-fresh', '*', int)
+    no_transform = cache_property('no-transform', None, None)
+    only_if_cached = cache_property('only-if-cached', None, bool)
+
+
+class ResponseCacheControl(_CacheControl):
+    """A cache control for responses.  Unlike :class:`RequestCacheControl`
+    this is mutable and gives access to response-relevant cache control
+    headers.
+
+    .. versionadded:: 0.5
+       In previous versions a `CacheControl` class existed that was used
+       both for request and response.
+    """
+
+    public = cache_property('public', None, bool)
+    private = cache_property('private', '*', None)
+    must_revalidate = cache_property('must-revalidate', None, bool)
+    proxy_revalidate = cache_property('proxy-revalidate', None, bool)
+    s_maxage = cache_property('s-maxage', None, None)
+
+
+class CacheControl(ResponseCacheControl):
+    """Deprecated."""
+    max_stale = cache_property('max-stale', '*', int)
+    min_fresh = cache_property('min-fresh', '*', int)
+    no_transform = cache_property('no-transform', None, None)
+    only_if_cached = cache_property('only-if-cached', None, bool)
+
+    def __init__(self, values=(), on_update=None):
+        from warnings import warn
+        warn(DeprecationWarning('CacheControl is deprecated in favor of '
+                                'RequestCacheControl and ResponseCacheControl.'))
+        ResponseCacheControl.__init__(self, values, on_update)
+
+
+# attach cache_property to the _CacheControl as staticmethod
+# so that others can reuse it.
+_CacheControl.cache_property = staticmethod(cache_property)
+
+
+class HeaderSet(object):
+    """Similar to the :class:`ETags` class this implements a set-like structure.
+    Unlike :class:`ETags` this is case insensitive and used for vary, allow, and
+    content-language headers.
+
+    If not constructed using the :func:`parse_set_header` function the
+    instantiation works like this:
+
+    >>> hs = HeaderSet(['foo', 'bar', 'baz'])
+    >>> hs
+    HeaderSet(['foo', 'bar', 'baz'])
+    """
+
+    def __init__(self, headers=None, on_update=None):
+        self._headers = list(headers or ())
+        self._set = set([x.lower() for x in self._headers])
+        self.on_update = on_update
+
+    def add(self, header):
+        """Add a new header to the set."""
+        self.update((header,))
+
+    def remove(self, header):
+        """Remove a layer from the set.  This raises an :exc:`KeyError` if the
+        header is not in the set.
+
+        .. versionchanged:: 0.5
+            In older versions a :exc:`IndexError` was raised instead of a
+            :exc:`KeyError` if the object was missing.
+
+        :param header: the header to be removed.
+        """
+        key = header.lower()
+        if key not in self._set:
+            raise KeyError(header)
+        self._set.remove(key)
+        for idx, key in enumerate(self._headers):
+            if key.lower() == header:
+                del self._headers[idx]
+                break
+        if self.on_update is not None:
+            self.on_update(self)
+
+    def update(self, iterable):
+        """Add all the headers from the iterable to the set.
+
+        :param iterable: updates the set with the items from the iterable.
+        """
+        inserted_any = False
+        for header in iterable:
+            key = header.lower()
+            if key not in self._set:
+                self._headers.append(header)
+                self._set.add(key)
+                inserted_any = True
+        if inserted_any and self.on_update is not None:
+            self.on_update(self)
+
+    def discard(self, header):
+        """Like :meth:`remove` but ignores errors.
+
+        :param header: the header to be discarded.
+        """
+        try:
+            return self.remove(header)
+        except KeyError:
+            pass
+
+    def find(self, header):
+        """Return the index of the header in the set or return -1 if not found.
+
+        :param header: the header to be looked up.
+        """
+        header = header.lower()
+        for idx, item in enumerate(self._headers):
+            if item.lower() == header:
+                return idx
+        return -1
+
+    def index(self, header):
+        """Return the index of the header in the set or raise an
+        :exc:`IndexError`.
+
+        :param header: the header to be looked up.
+        """
+        rv = self.find(header)
+        if rv < 0:
+            raise IndexError(header)
+        return rv
+
+    def clear(self):
+        """Clear the set."""
+        self._set.clear()
+        del self._headers[:]
+        if self.on_update is not None:
+            self.on_update(self)
+
+    def as_set(self, preserve_casing=False):
+        """Return the set as real python set type.  When calling this, all
+        the items are converted to lowercase and the ordering is lost.
+
+        :param preserve_casing: if set to `True` the items in the set returned
+                                will have the original case like in the
+                                :class:`HeaderSet`, otherwise they will
+                                be lowercase.
+        """
+        if preserve_casing:
+            return set(self._headers)
+        return set(self._set)
+
+    def to_header(self):
+        """Convert the header set into an HTTP header string."""
+        return ', '.join(map(quote_header_value, self._headers))
+
+    def __getitem__(self, idx):
+        return self._headers[idx]
+
+    def __delitem__(self, idx):
+        rv = self._headers.pop(idx)
+        self._set.remove(rv.lower())
+        if self.on_update is not None:
+            self.on_update(self)
+
+    def __setitem__(self, idx, value):
+        old = self._headers[idx]
+        self._set.remove(old.lower())
+        self._headers[idx] = value
+        self._set.add(value.lower())
+        if self.on_update is not None:
+            self.on_update(self)
+
+    def __contains__(self, header):
+        return header.lower() in self._set
+
+    def __len__(self):
+        return len(self._set)
+
+    def __iter__(self):
+        return iter(self._headers)
+
+    def __nonzero__(self):
+        return bool(self._set)
+
+    def __str__(self):
+        return self.to_header()
+
+    def __repr__(self):
+        return '%s(%r)' % (
+            self.__class__.__name__,
+            self._headers
+        )
+
+
+class ETags(object):
+    """A set that can be used to check if one etag is present in a collection
+    of etags.
+    """
+
+    def __init__(self, strong_etags=None, weak_etags=None, star_tag=False):
+        self._strong = frozenset(not star_tag and strong_etags or ())
+        self._weak = frozenset(weak_etags or ())
+        self.star_tag = star_tag
+
+    def as_set(self, include_weak=False):
+        """Convert the `ETags` object into a python set.  Per default all the
+        weak etags are not part of this set."""
+        rv = set(self._strong)
+        if include_weak:
+            rv.update(self._weak)
+        return rv
+
+    def is_weak(self, etag):
+        """Check if an etag is weak."""
+        return etag in self._weak
+
+    def contains_weak(self, etag):
+        """Check if an etag is part of the set including weak and strong tags."""
+        return self.is_weak(etag) or self.contains(etag)
+
+    def contains(self, etag):
+        """Check if an etag is part of the set ignoring weak tags."""
+        if self.star_tag:
+            return True
+        return etag in self._strong
+
+    def contains_raw(self, etag):
+        """When passed a quoted tag it will check if this tag is part of the
+        set.  If the tag is weak it is checked against weak and strong tags,
+        otherwise weak only."""
+        etag, weak = unquote_etag(etag)
+        if weak:
+            return self.contains_weak(etag)
+        return self.contains(etag)
+
+    def to_header(self):
+        """Convert the etags set into a HTTP header string."""
+        if self.star_tag:
+            return '*'
+        return ', '.join(
+            ['"%s"' % x for x in self._strong] +
+            ['w/"%s"' % x for x in self._weak]
+        )
+
+    def __call__(self, etag=None, data=None, include_weak=False):
+        if [etag, data].count(None) != 1:
+            raise TypeError('either tag or data required, but at least one')
+        if etag is None:
+            etag = generate_etag(data)
+        if include_weak:
+            if etag in self._weak:
+                return True
+        return etag in self._strong
+
+    def __nonzero__(self):
+        return bool(self.star_tag or self._strong)
+
+    def __str__(self):
+        return self.to_header()
+
+    def __iter__(self):
+        return iter(self._strong)
+
+    def __contains__(self, etag):
+        return self.contains(etag)
+
+    def __repr__(self):
+        return '<%s %r>' % (self.__class__.__name__, str(self))
+
+
+class Authorization(ImmutableDictMixin, dict):
+    """Represents an `Authorization` header sent by the client.  You should
+    not create this kind of object yourself but use it when it's returned by
+    the `parse_authorization_header` function.
+
+    This object is a dict subclass and can be altered by setting dict items
+    but it should be considered immutable as it's returned by the client and
+    not meant for modifications.
+
+    .. versionchanged:: 0.5
+       This object became immutable.
+    """
+
+    def __init__(self, auth_type, data=None):
+        dict.__init__(self, data or {})
+        self.type = auth_type
+
+    username = property(lambda x: x.get('username'), doc='''
+        The username transmitted.  This is set for both basic and digest
+        auth all the time.''')
+    password = property(lambda x: x.get('password'), doc='''
+        When the authentication type is basic this is the password
+        transmitted by the client, else `None`.''')
+    realm = property(lambda x: x.get('realm'), doc='''
+        This is the server realm sent back for HTTP digest auth.''')
+    nonce = property(lambda x: x.get('nonce'), doc='''
+        The nonce the server sent for digest auth, sent back by the client.
+        A nonce should be unique for every 401 response for HTTP digest
+        auth.''')
+    uri = property(lambda x: x.get('uri'), doc='''
+        The URI from Request-URI of the Request-Line; duplicated because
+        proxies are allowed to change the Request-Line in transit.  HTTP
+        digest auth only.''')
+    nc = property(lambda x: x.get('nc'), doc='''
+        The nonce count value transmitted by clients if a qop-header is
+        also transmitted.  HTTP digest auth only.''')
+    cnonce = property(lambda x: x.get('cnonce'), doc='''
+        If the server sent a qop-header in the ``WWW-Authenticate``
+        header, the client has to provide this value for HTTP digest auth.
+        See the RFC for more details.''')
+    response = property(lambda x: x.get('response'), doc='''
+        A string of 32 hex digits computed as defined in RFC 2617, which
+        proves that the user knows a password.  Digest auth only.''')
+    opaque = property(lambda x: x.get('opaque'), doc='''
+        The opaque header from the server returned unchanged by the client.
+        It is recommended that this string be base64 or hexadecimal data.
+        Digest auth only.''')
+
+    @property
+    def qop(self):
+        """Indicates what "quality of protection" the client has applied to
+        the message for HTTP digest auth."""
+        def on_update(header_set):
+            if not header_set and 'qop' in self:
+                del self['qop']
+            elif header_set:
+                self['qop'] = header_set.to_header()
+        return parse_set_header(self.get('qop'), on_update)
+
+
+class WWWAuthenticate(UpdateDictMixin, dict):
+    """Provides simple access to `WWW-Authenticate` headers."""
+
+    #: list of keys that require quoting in the generated header
+    _require_quoting = frozenset(['domain', 'nonce', 'opaque', 'realm'])
+
+    def __init__(self, auth_type=None, values=None, on_update=None):
+        dict.__init__(self, values or ())
+        if auth_type:
+            self['__auth_type__'] = auth_type
+        self.on_update = on_update
+
+    def set_basic(self, realm='authentication required'):
+        """Clear the auth info and enable basic auth."""
+        dict.clear(self)
+        dict.update(self, {'__auth_type__': 'basic', 'realm': realm})
+        if self.on_update:
+            self.on_update(self)
+
+    def set_digest(self, realm, nonce, qop=('auth',), opaque=None,
+                   algorithm=None, stale=False):
+        """Clear the auth info and enable digest auth."""
+        d = {
+            '__auth_type__':    'digest',
+            'realm':            realm,
+            'nonce':            nonce,
+            'qop':              dump_header(qop)
+        }
+        if stale:
+            d['stale'] = 'TRUE'
+        if opaque is not None:
+            d['opaque'] = opaque
+        if algorithm is not None:
+            d['algorithm'] = algorithm
+        dict.clear(self)
+        dict.update(self, d)
+        if self.on_update:
+            self.on_update(self)
+
+    def to_header(self):
+        """Convert the stored values into a WWW-Authenticate header."""
+        d = dict(self)
+        auth_type = d.pop('__auth_type__', None) or 'basic'
+        return '%s %s' % (auth_type.title(), ', '.join([
+            '%s=%s' % (key, quote_header_value(value,
+                       allow_token=key not in self._require_quoting))
+            for key, value in d.iteritems()
+        ]))
+
+    def __str__(self):
+        return self.to_header()
+
+    def __repr__(self):
+        return '<%s %r>' % (
+            self.__class__.__name__,
+            self.to_header()
+        )
+
+    def auth_property(name, doc=None):
+        """A static helper function for subclasses to add extra authentication
+        system properites onto a class::
+
+            class FooAuthenticate(WWWAuthenticate):
+                special_realm = auth_property('special_realm')
+
+        For more information have a look at the sourcecode to see how the
+        regular properties (:attr:`realm` etc. are implemented).
+        """
+        def _set_value(self, value):
+            if value is None:
+                self.pop(name, None)
+            else:
+                self[name] = str(value)
+        return property(lambda x: x.get(name), _set_value, doc=doc)
+
+    def _set_property(name, doc=None):
+        def fget(self):
+            def on_update(header_set):
+                if not header_set and name in self:
+                    del self[name]
+                elif header_set:
+                    self[name] = header_set.to_header()
+            return parse_set_header(self.get(name), on_update)
+        return property(fget, doc=doc)
+
+    type = auth_property('__auth_type__', doc='''
+        The type of the auth mechanism.  HTTP currently specifies
+        `Basic` and `Digest`.''')
+    realm = auth_property('realm', doc='''
+        A string to be displayed to users so they know which username and
+        password to use.  This string should contain at least the name of
+        the host performing the authentication and might additionally
+        indicate the collection of users who might have access.''')
+    domain = _set_property('domain', doc='''
+        A list of URIs that define the protection space.  If a URI is an
+        absolute path, it is relative to the canonical root URL of the
+        server being accessed.''')
+    nonce = auth_property('nonce', doc='''
+        A server-specified data string which should be uniquely generated
+        each time a 401 response is made.  It is recommended that this
+        string be base64 or hexadecimal data.''')
+    opaque = auth_property('opaque', doc='''
+        A string of data, specified by the server, which should be returned
+        by the client unchanged in the Authorization header of subsequent
+        requests with URIs in the same protection space.  It is recommended
+        that this string be base64 or hexadecimal data.''')
+    algorithm = auth_property('algorithm', doc='''
+        A string indicating a pair of algorithms used to produce the digest
+        and a checksum.  If this is not present it is assumed to be "MD5".
+        If the algorithm is not understood, the challenge should be ignored
+        (and a different one used, if there is more than one).''')
+    qop = _set_property('qop', doc='''
+        A set of quality-of-privacy modifies such as auth and auth-int.''')
+
+    def _get_stale(self):
+        val = self.get('stale')
+        if val is not None:
+            return val.lower() == 'true'
+    def _set_stale(self, value):
+        if value is None:
+            self.pop('stale', None)
+        else:
+            self['stale'] = value and 'TRUE' or 'FALSE'
+    stale = property(_get_stale, _set_stale, doc='''
+        A flag, indicating that the previous request from the client was
+        rejected because the nonce value was stale.''')
+    del _get_stale, _set_stale
+
+    # make auth_property a staticmethod so that subclasses of
+    # `WWWAuthenticate` can use it for new properties.
+    auth_property = staticmethod(auth_property)
+    del _set_property
+
+
+# circular dependencies
+from werkzeug.http import dump_options_header, dump_header, generate_etag, \
+     quote_header_value, parse_set_header, unquote_etag
+
+
+# create all the special key errors now that the classes are defined.
+from werkzeug.exceptions import BadRequest
+for _cls in MultiDict, CombinedMultiDict, Headers, EnvironHeaders:
+    _cls.KeyError = BadRequest.wrap(KeyError, _cls.__name__ + '.KeyError')
+del _cls
--- a/MoinMoin/support/werkzeug/debug/__init__.py	Fri Feb 27 23:30:37 2009 +0100
+++ b/MoinMoin/support/werkzeug/debug/__init__.py	Sat Feb 28 00:08:31 2009 +0100
@@ -5,7 +5,7 @@
 
     WSGI application traceback debugger.
 
-    :copyright: 2008 by Georg Brandl, Armin Ronacher.
+    :copyright: (c) 2009 by the Werkzeug Team, see AUTHORS for more details.
     :license: BSD, see LICENSE for more details.
 """
 from os.path import join, dirname, basename, isfile
@@ -37,12 +37,26 @@
     The `evalex` keyword argument allows evaluating expressions in a
     traceback's frame context.
 
-    THIS IS A GAPING SECURITY HOLE IF PUBLICLY ACCESSIBLE!
+    :param app: the WSGI application to run debugged.
+    :param evalex: enable exception evaluation feature (interactive
+                   debugging).  This requires a non-forking server.
+    :param request_key: The key that points to the request object in ths
+                        environment.  This parameter is ignored in current
+                        versions.
+    :param console_path: the URL for a general purpose console.
+    :param console_init_func: the function that is executed before starting
+                              the general purpose console.  The return value
+                              is used as initial namespace.
+    :param show_hidden_frames: by default hidden traceback frames are skipped.
+                               You can show them by setting this parameter
+                               to `True`.
     """
 
     def __init__(self, app, evalex=False, request_key='werkzeug.request',
-                 console_path='/console', console_init_func=dict,
+                 console_path='/console', console_init_func=None,
                  show_hidden_frames=False):
+        if console_init_func:
+            console_init_func = dict
         self.app = app
         self.evalex = evalex
         self.frames = {}
@@ -65,7 +79,8 @@
             if hasattr(app_iter, 'close'):
                 app_iter.close()
             traceback = get_current_traceback(skip=1, show_hidden_frames=
-                                              self.show_hidden_frames)
+                                              self.show_hidden_frames,
+                                              ignore_system_exceptions=True)
             for frame in traceback.frames:
                 self.frames[frame.id] = frame
             self.tracebacks[traceback.id] = traceback
@@ -80,14 +95,13 @@
                 # more, better log something into the error log and fall
                 # back gracefully.
                 environ['wsgi.errors'].write(
-                    '\nDebugging middlware catched exception in streamed '
-                    'reponse a point where response headers were already '
+                    'Debugging middleware catched exception in streamed '
+                    'response at a point where response headers were already '
                     'sent.\n')
-                traceback.log(environ['wsgi.errors'])
-                return
+            else:
+                yield traceback.render_full(evalex=self.evalex) \
+                               .encode('utf-8', 'replace')
 
-            yield traceback.render_full(evalex=self.evalex) \
-                           .encode('utf-8', 'replace')
             traceback.log(environ['wsgi.errors'])
 
     def execute_command(self, request, command, frame):
@@ -103,7 +117,7 @@
     def paste_traceback(self, request, traceback):
         """Paste the traceback and return a JSON response."""
         paste_id = traceback.paste()
-        return Response('{"url": "http://paste.pocoo.org/show/%d/", "id": %d}'
+        return Response('{"url": "http://paste.pocoo.org/show/%s/", "id": %s}'
                         % (paste_id, paste_id), mimetype='application/json')
 
     def get_source(self, request, frame):
--- a/MoinMoin/support/werkzeug/debug/console.py	Fri Feb 27 23:30:37 2009 +0100
+++ b/MoinMoin/support/werkzeug/debug/console.py	Sat Feb 28 00:08:31 2009 +0100
@@ -5,13 +5,13 @@
 
     Interactive console support.
 
-    :copyright: Copyright 2008 by Armin Ronacher.
+    :copyright: (c) 2009 by the Werkzeug Team, see AUTHORS for more details.
     :license: BSD.
 """
 import sys
 import code
 from types import CodeType
-from cgi import escape
+from werkzeug.utils import escape
 from werkzeug.local import Local
 from werkzeug.debug.repr import debug_repr, dump, helper
 from werkzeug.debug.utils import render_template
--- a/MoinMoin/support/werkzeug/debug/render.py	Fri Feb 27 23:30:37 2009 +0100
+++ b/MoinMoin/support/werkzeug/debug/render.py	Sat Feb 28 00:08:31 2009 +0100
@@ -5,14 +5,13 @@
 
     Render the traceback debugging page.
 
-    :copyright: 2007 by Georg Brandl, Armin Ronacher.
+    :copyright: (c) 2009 by the Werkzeug Team, see AUTHORS for more details.
     :license: BSD, see LICENSE for more details.
 """
 import pprint
 from os.path import dirname, join
 
 from werkzeug.templates import Template
-from werkzeug.debug.util import Namespace
 
 
 def get_template(name):
@@ -37,6 +36,7 @@
 
 
 def code_table(frame):
+    from werkzeug.debug.util import Namespace
     lines = []
     lineno = frame['context_lineno']
     if lineno is not None:
--- a/MoinMoin/support/werkzeug/debug/repr.py	Fri Feb 27 23:30:37 2009 +0100
+++ b/MoinMoin/support/werkzeug/debug/repr.py	Sat Feb 28 00:08:31 2009 +0100
@@ -10,22 +10,17 @@
     Together with the CSS and JavaScript files of the debugger this gives
     a colorful and more compact output.
 
-    :copyright: Copyright 2008 by Armin Ronacher.
+    :copyright: (c) 2009 by the Werkzeug Team, see AUTHORS for more details.
     :license: BSD.
 """
 import sys
 import re
-from types import InstanceType
 from traceback import format_exception_only
 try:
     from collections import deque
 except ImportError:
     deque = None
-from cgi import escape
-try:
-    set
-except NameError:
-    from sets import Set as set, ImmutableSet as frozenset
+from werkzeug.utils import escape
 from werkzeug.debug.utils import render_template
 
 
@@ -53,7 +48,7 @@
 
 class _Helper(object):
     """Displays an HTML version of the normal help, for the interactive
-    debugger only because it requirse a patched sys.stdout.
+    debugger only because it requires a patched sys.stdout.
     """
 
     def __call__(self, topic=None):
--- a/MoinMoin/support/werkzeug/debug/shared/debugger.js	Fri Feb 27 23:30:37 2009 +0100
+++ b/MoinMoin/support/werkzeug/debug/shared/debugger.js	Sat Feb 28 00:08:31 2009 +0100
@@ -85,18 +85,24 @@
    */
   $('div.plain form')
     .submit(function() {
-      $('input[@type="submit"]', this).val('submitting...');
+      var label = $('input[@type="submit"]', this);
+      var old_val = label.val();
+      label.val('submitting...');
       $.ajax({
         dataType:     'json',
         url:          './__debugger__',
         data:         {tb: TRACEBACK, cmd: 'paste'},
         success:      function(data) {
-          console.log(data);
           $('div.plain span.pastemessage')
             .removeClass('pastemessage')
             .text('Paste created: ')
             .append($('<a>#' + data.id + '</a>').attr('href', data.url));
-      }});
+        },
+        error:        function() {
+          alert('Error: Could not submit paste.  No network connection?');
+          label.val(old_val);
+        }
+      });
       return false;
     });
 
--- a/MoinMoin/support/werkzeug/debug/tbtools.py	Fri Feb 27 23:30:37 2009 +0100
+++ b/MoinMoin/support/werkzeug/debug/tbtools.py	Sat Feb 28 00:08:31 2009 +0100
@@ -5,7 +5,7 @@
 
     This module provides various traceback related utility functions.
 
-    :copyright: Copyright 2008 by Armin Ronacher.
+    :copyright: (c) 2009 by the Werkzeug Team, see AUTHORS for more details.
     :license: BSD.
 """
 import re
@@ -142,8 +142,6 @@
         from xmlrpclib import ServerProxy
         srv = ServerProxy('http://paste.pocoo.org/xmlrpc/')
         return srv.pastes.newPaste('pytb', self.plaintext)
-        return '{"url": "http://paste.pocoo.org/show/%d/", "id": %d}' % \
-               (paste_id, paste_id)
 
     def render_summary(self, include_title=True):
         """Render the traceback for the interactive console."""
@@ -233,15 +231,22 @@
             return eval(code, self.globals, self.locals)
         exec code in self.globals, self.locals
 
+    @cached_property
     def sourcelines(self):
         """The sourcecode of the file as list of unicode strings."""
         # get sourcecode from loader or file
         source = None
         if self.loader is not None:
-            if hasattr(self.loader, 'get_source'):
-                source = self.loader.get_source(self.module)
-            elif hasattr(self.loader, 'get_source_by_code'):
-                source = self.loader.get_source_by_code(self.code)
+            try:
+                if hasattr(self.loader, 'get_source'):
+                    source = self.loader.get_source(self.module)
+                elif hasattr(self.loader, 'get_source_by_code'):
+                    source = self.loader.get_source_by_code(self.code)
+            except:
+                # we munch the exception so that we don't cause troubles
+                # if the loader is broken.
+                pass
+
         if source is None:
             try:
                 f = file(self.filename)
@@ -277,17 +282,16 @@
             charset = 'utf-8'
 
         return source.decode(charset, 'replace').splitlines()
-    sourcelines = cached_property(sourcelines)
 
+    @property
     def current_line(self):
         try:
             return self.sourcelines[self.lineno - 1]
         except IndexError:
             return u''
-    current_line = property(current_line)
 
+    @cached_property
     def console(self):
         return Console(self.globals, self.locals)
-    console = cached_property(console)
 
     id = property(lambda x: id(x))
--- a/MoinMoin/support/werkzeug/debug/templates/help_command.html	Fri Feb 27 23:30:37 2009 +0100
+++ b/MoinMoin/support/werkzeug/debug/templates/help_command.html	Sat Feb 28 00:08:31 2009 +0100
@@ -5,6 +5,6 @@
     <pre class="help">$text</pre>
   <% else %>
     <h3>Help</h3>
-    <p>Type help(object) for help abuot object.</p>
+    <p>Type help(object) for help about object.</p>
   <% endif %>
 </div>
--- a/MoinMoin/support/werkzeug/debug/templates/traceback_full.html	Fri Feb 27 23:30:37 2009 +0100
+++ b/MoinMoin/support/werkzeug/debug/templates/traceback_full.html	Sat Feb 28 00:08:31 2009 +0100
@@ -32,8 +32,8 @@
         </form>
       </div>
       <div class="explanation">
-        The debugger cought an exception in your WSGI application.  You can now
-        look at the traceback which lead to the error.  <span class="nojavascript">
+        The debugger caught an exception in your WSGI application.  You can now
+        look at the traceback which led to the error.  <span class="nojavascript">
         If you enable JavaScript you can also use additional features such as code
         execution (if the evalex feature is enabled), automatic pasting of the
         exceptions and much more.</span>
--- a/MoinMoin/support/werkzeug/debug/utils.py	Fri Feb 27 23:30:37 2009 +0100
+++ b/MoinMoin/support/werkzeug/debug/utils.py	Sat Feb 28 00:08:31 2009 +0100
@@ -5,7 +5,7 @@
 
     Various other utilities.
 
-    :copyright: Copyright 2008 by Armin Ronacher.
+    :copyright: (c) 2009 by the Werkzeug Team, see AUTHORS for more details.
     :license: BSD.
 """
 from os.path import join, dirname
--- a/MoinMoin/support/werkzeug/exceptions.py	Fri Feb 27 23:30:37 2009 +0100
+++ b/MoinMoin/support/werkzeug/exceptions.py	Sat Feb 28 00:08:31 2009 +0100
@@ -4,7 +4,7 @@
     ~~~~~~~~~~~~~~~~~~~
 
     This module implements a number of Python exceptions you can raise from
-    within your views to trigger a standard non 200 response.
+    within your views to trigger a standard non-200 response.
 
 
     Usage Example
@@ -28,9 +28,8 @@
 
 
     As you can see from this example those exceptions are callable WSGI
-    applications.  Because of Python 2.3 / 2.4 compatibility those do not
-    extend from the response objects but only from the python exception
-    class.
+    applications.  Because of Python 2.4 compatibility those do not extend
+    from the response objects but only from the python exception class.
 
     As a matter of fact they are not Werkzeug response objects.  However you
     can get a response object by calling ``get_response()`` on a HTTP
@@ -40,7 +39,7 @@
     because some errors fetch additional information from the WSGI
     environment.
 
-    If you want to hook in a different exception page to say, an 404 status
+    If you want to hook in a different exception page to say, a 404 status
     code, you can add a second except for a specific subclass of an error::
 
         @responder
@@ -54,7 +53,7 @@
                 return e
 
 
-    :copyright: 2007-2008 by Armin Ronacher.
+    :copyright: (c) 2009 by the Werkzeug Team, see AUTHORS for more details.
     :license: BSD, see LICENSE for more details.
 """
 import sys
@@ -117,7 +116,11 @@
         return [('Content-Type', 'text/html')]
 
     def get_response(self, environ):
-        """Get a response object."""
+        """Get a response object.
+
+        :param environ: the environ for the request.
+        :return: a :class:`BaseResponse` object or a subclass thereof.
+        """
         # lazyly imported for various reasons.  For one can use the exceptions
         # with custom responses (testing exception instances against types) and
         # so we don't ever have to import the wrappers, but also because there
@@ -127,15 +130,18 @@
         return BaseResponse(self.get_body(environ), self.code, headers)
 
     def __call__(self, environ, start_response):
-        """Call the exception as WSGI application."""
+        """Call the exception as WSGI application.
+
+        :param environ: the WSGI environment.
+        :param start_response: the response callable provided by the WSGI
+                               server.
+        """
         response = self.get_response(environ)
         return response(environ, start_response)
 
 
 class _ProxyException(HTTPException):
-    """
-    An HTTP exception that expands renders a WSGI application on error.
-    """
+    """An HTTP exception that expands renders a WSGI application on error."""
 
     def __init__(self, response):
         Exception.__init__(self, 'proxy exception for %r' % response)
@@ -146,10 +152,9 @@
 
 
 class BadRequest(HTTPException):
-    """
-    *400* `Bad Request`
+    """*400* `Bad Request`
 
-    Raise if the browser send something to the application the application
+    Raise if the browser sends something to the application the application
     or server cannot handle.
     """
     code = 400
@@ -160,8 +165,7 @@
 
 
 class Unauthorized(HTTPException):
-    """
-    *401* `Unauthorized`
+    """*401* `Unauthorized`
 
     Raise if the user is not authorized.  Also used if you want to use HTTP
     basic auth.
@@ -169,8 +173,8 @@
     code = 401
     description = (
         '<p>The server could not verify that you are authorized to access '
-        'the URL requested.  You either supplied the wrong credentials (e.g.'
-        ', bad password), or your browser doesn\'t understand how to supply '
+        'the URL requested.  You either supplied the wrong credentials (e.g. '
+        'a bad password), or your browser doesn\'t understand how to supply '
         'the credentials required.</p><p>In case you are allowed to request '
         'the document, please check your user-id and password and try '
         'again.</p>'
@@ -178,8 +182,7 @@
 
 
 class Forbidden(HTTPException):
-    """
-    *403* `Forbidden`
+    """*403* `Forbidden`
 
     Raise if the user doesn't have the permission for the requested resource
     but was authenticated.
@@ -192,8 +195,7 @@
 
 
 class NotFound(HTTPException):
-    """
-    *404* `Not Found`
+    """*404* `Not Found`
 
     Raise if a resource does not exist and never existed.
     """
@@ -206,8 +208,7 @@
 
 
 class MethodNotAllowed(HTTPException):
-    """
-    *405* `Method Not Allowed`
+    """*405* `Method Not Allowed`
 
     Raise if the server used a method the resource does not handle.  For
     example `POST` if the resource is view only.  Especially useful for REST.
@@ -236,10 +237,9 @@
 
 
 class NotAcceptable(HTTPException):
-    """
-    *406* `Not Acceptable`
+    """*406* `Not Acceptable`
 
-    Raise if the server cant return any content conforming to the
+    Raise if the server can't return any content conforming to the
     `Accept` headers of the client.
     """
     code = 406
@@ -253,8 +253,7 @@
 
 
 class RequestTimeout(HTTPException):
-    """
-    *408* `Request Timeout`
+    """*408* `Request Timeout`
 
     Raise to signalize a timeout.
     """
@@ -266,8 +265,7 @@
 
 
 class Gone(HTTPException):
-    """
-    *410* `Gone`
+    """*410* `Gone`
 
     Raise if a resource existed previously and went away without new location.
     """
@@ -280,8 +278,7 @@
 
 
 class LengthRequired(HTTPException):
-    """
-    *411* `Length Required`
+    """*411* `Length Required`
 
     Raise if the browser submitted data but no ``Content-Length`` header which
     is required for the kind of processing the server does.
@@ -289,13 +286,12 @@
     code = 411
     description = (
         '<p>A request with this method requires a valid <code>Content-'
-        'Lenght</code> header.</p>'
+        'Length</code> header.</p>'
     )
 
 
 class PreconditionFailed(HTTPException):
-    """
-    *412* `Precondition Failed`
+    """*412* `Precondition Failed`
 
     Status code used in combination with ``If-Match``, ``If-None-Match``, or
     ``If-Unmodified-Since``.
@@ -308,21 +304,19 @@
 
 
 class RequestEntityTooLarge(HTTPException):
-    """
-    *413* `Request Entity Too Large`
+    """*413* `Request Entity Too Large`
 
     The status code one should return if the data submitted exceeded a given
     limit.
     """
     code = 413
     description = (
-        '<p>The data value transmitted exceed the capacity limit.</p>'
+        '<p>The data value transmitted exceeds the capacity limit.</p>'
     )
 
 
 class RequestURITooLarge(HTTPException):
-    """
-    *414* `Request URI Too Large`
+    """*414* `Request URI Too Large`
 
     Like *413* but for too long URLs.
     """
@@ -334,8 +328,7 @@
 
 
 class UnsupportedMediaType(HTTPException):
-    """
-    *415* `Unsupported Media Type`
+    """*415* `Unsupported Media Type`
 
     The status code returned if the server is unable to handle the media type
     the client transmitted.
@@ -348,11 +341,10 @@
 
 
 class InternalServerError(HTTPException):
-    """
-    *500* `Internal Server Error`
+    """*500* `Internal Server Error`
 
-    Raise if an internal server error occoured.  This is a good fallback if an
-    unknown error occoured in the dispatcher.
+    Raise if an internal server error occurred.  This is a good fallback if an
+    unknown error occurred in the dispatcher.
     """
     code = 500
     description = (
@@ -363,8 +355,7 @@
 
 
 class NotImplemented(HTTPException):
-    """
-    *501* `Not Implemented`
+    """*501* `Not Implemented`
 
     Raise if the application does not support the action requested by the
     browser.
@@ -377,10 +368,9 @@
 
 
 class BadGateway(HTTPException):
-    """
-    *502* `Bad Gateway`
+    """*502* `Bad Gateway`
 
-    If you do proxing in your application you should return this status code
+    If you do proxying in your application you should return this status code
     if you received an invalid response from the upstream server it accessed
     in attempting to fulfill the request.
     """
@@ -392,8 +382,7 @@
 
 
 class ServiceUnavailable(HTTPException):
-    """
-    *503* `Service Unavailable`
+    """*503* `Service Unavailable`
 
     Status code you should return if a service is temporarily unavailable.
     """
@@ -421,7 +410,7 @@
 
 
 #: raised by the request functions if they were unable to decode the
-#: incomding data properly.
+#: incoming data properly.
 HTTPUnicodeError = BadRequest.wrap(UnicodeError, 'HTTPUnicodeError')
 
 
--- a/MoinMoin/support/werkzeug/http.py	Fri Feb 27 23:30:37 2009 +0100
+++ b/MoinMoin/support/werkzeug/http.py	Sat Feb 28 00:08:31 2009 +0100
@@ -5,7 +5,7 @@
 
     Werkzeug comes with a bunch of utilties that help Werkzeug to deal with
     HTTP data.  Most of the classes and functions provided by this module are
-    used by the wrappers, but they are useful on their own too, especially if
+    used by the wrappers, but they are useful on their own, too, especially if
     the response and request objects are not used.
 
     This covers some of the more HTTP centric features of WSGI, some other
@@ -13,580 +13,55 @@
     module.
 
 
-    :copyright: 2007-2008 by Armin Ronacher.
+    :copyright: (c) 2009 by the Werkzeug Team, see AUTHORS for more details.
     :license: BSD, see LICENSE for more details.
 """
 import re
-import rfc822
+import inspect
+from email.utils import parsedate_tz, mktime_tz
+from cStringIO import StringIO
+from tempfile import TemporaryFile
 from urllib2 import parse_http_list as _parse_list_header
 from datetime import datetime
 try:
     from hashlib import md5
 except ImportError:
     from md5 import new as md5
-try:
-    set = set
-    frozenset = frozenset
-except NameError:
-    from sets import Set as set, ImmutableSet as frozenset
-from werkzeug._internal import _patch_wrapper, _UpdateDict, HTTP_STATUS_CODES
+from werkzeug._internal import _decode_unicode, HTTP_STATUS_CODES
 
 
 _accept_re = re.compile(r'([^\s;,]+)(?:[^,]*?;\s*q=(\d*(?:\.\d+)?))?')
 _token_chars = frozenset("!#$%&'*+-.0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
                          '^_`abcdefghijklmnopqrstuvwxyz|~')
 _etag_re = re.compile(r'([Ww]/)?(?:"(.*?)"|(.*?))(?:\s*,\s*|$)')
-
-
-class Accept(list):
-    """An `Accept` object is just a list subclass for lists of
-    ``(value, quality)`` tuples.  It is automatically sorted by quality.
-    """
-
-    def __init__(self, values=()):
-        if values is None:
-            list.__init__(self)
-            self.provided = False
-        else:
-            self.provided = True
-            values = [(a, b) for b, a in values]
-            values.sort()
-            values.reverse()
-            list.__init__(self, [(a, b) for b, a in values])
-
-    def __getitem__(self, key):
-        """Beside index lookup (getting item n) you can also pass it a string
-        to get the quality for the item.  If the item is not in the list, the
-        returned quality is ``0``.
-        """
-        if isinstance(key, basestring):
-            for value in self:
-                if value[0] == key:
-                    return value[1]
-            return 0
-        return list.__getitem__(self, key)
-
-    def __contains__(self, key):
-        return self.find(key) > -1
-
-    def __repr__(self):
-        return '%s(%s)' % (
-            self.__class__.__name__,
-            list.__repr__(self)
-        )
-
-    def index(self, key):
-        """Get the position of en entry or raise `IndexError`."""
-        rv = self.find(key)
-        if rv < 0:
-            raise IndexError(key)
-        return key
-
-    def find(self, key):
-        """Get the position of an entry or return -1"""
-        if isinstance(key, basestring):
-            for idx, value in enumerate(self):
-                if value[0] == key:
-                    return idx
-            return -1
-        return list.find(self, key)
-
-    def values(self):
-        """Return a list of the values, not the qualities."""
-        return [x[0] for x in self]
-
-    def itervalues(self):
-        """Iterate over all values."""
-        for item in self:
-            yield item[0]
-
-    def best(self):
-        """The best match as value."""
-        return self and self[0][0] or None
-    best = property(best)
-
-
-class HeaderSet(object):
-    """Similar to the `ETags` class this implements a set like structure.
-    Unlike `ETags` this is case insensitive and used for vary, allow, and
-    content-language headers.
-
-    If not constructed using the `parse_set_header` function the instanciation
-    works like this:
-
-    >>> hs = HeaderSet(['foo', 'bar', 'baz'])
-    >>> hs
-    HeaderSet(['foo', 'bar', 'baz'])
-    """
-
-    def __init__(self, headers=None, on_update=None):
-        self._headers = list(headers or ())
-        self._set = set([x.lower() for x in self._headers])
-        self.on_update = on_update
-
-    def add(self, header):
-        """Add a new header to the set."""
-        self.update((header,))
-
-    def remove(self, header):
-        """Remove a layer from the set.  This raises an `IndexError` if the
-        header is not in the set."""
-        key = header.lower()
-        if key not in self._set:
-            raise IndexError(header)
-        self._set.remove(key)
-        for idx, key in enumerate(self._headers):
-            if key.lower() == header:
-                del self._headers[idx]
-                break
-        if self.on_update is not None:
-            self.on_update(self)
-
-    def update(self, iterable):
-        """Add all the headers from the iterable to the set."""
-        inserted_any = False
-        for header in iterable:
-            key = header.lower()
-            if key not in self._set:
-                self._headers.append(header)
-                self._set.add(key)
-                inserted_any = True
-        if inserted_any and self.on_update is not None:
-            self.on_update(self)
-
-    def discard(self, header):
-        """Like remove but ignores errors."""
-        try:
-            return self.remove(header)
-        except IndexError:
-            pass
-
-    def find(self, header):
-        """Return the index of the header in the set or return -1 if not found."""
-        header = header.lower()
-        for idx, item in enumerate(self._headers):
-            if item.lower() == header:
-                return idx
-        return -1
-
-    def index(self, header):
-        """Return the index of the headerin the set or raise an `IndexError`."""
-        rv = self.find(header)
-        if rv < 0:
-            raise IndexError(header)
-        return rv
-
-    def clear(self):
-        """Clear the set."""
-        self._set.clear()
-        del self._headers[:]
-        if self.on_update is not None:
-            self.on_update(self)
-
-    def as_set(self, preserve_casing=False):
-        """Return the set as real python set structure.  When calling this
-        all the items are converted to lowercase and the ordering is lost.
-
-        If `preserve_casing` is `True` the items in the set returned will
-        have the original case like in the `HeaderSet`, otherwise they will
-        be lowercase.
-        """
-        if preserve_casing:
-            return set(self._headers)
-        return set(self._set)
-
-    def to_header(self):
-        """Convert the header set into an HTTP header string."""
-        return ', '.join(map(quote_header_value, self._headers))
-
-    def __getitem__(self, idx):
-        return self._headers[idx]
-
-    def __delitem__(self, idx):
-        rv = self._headers.pop(idx)
-        self._set.remove(rv.lower())
-        if self.on_update is not None:
-            self.on_update(self)
-
-    def __setitem__(self, idx, value):
-        old = self._headers[idx]
-        self._set.remove(old.lower())
-        self._headers[idx] = value
-        self._set.add(value.lower())
-        if self.on_update is not None:
-            self.on_update(self)
-
-    def __contains__(self, header):
-        return header.lower() in self._set
-
-    def __len__(self):
-        return len(self._set)
-
-    def __iter__(self):
-        return iter(self._headers)
-
-    def __nonzero__(self):
-        return bool(self._set)
-
-    def __str__(self):
-        return self.to_header()
-
-    def __repr__(self):
-        return '%s(%r)' % (
-            self.__class__.__name__,
-            self._headers
-        )
-
-
-class CacheControl(_UpdateDict):
-    """Subclass of a dict that stores values for a Cache-Control header.  It
-    has accesors for all the cache-control directives specified in RFC 2616.
-    The class does not differentiate between request and response directives.
-
-    Because the cache-control directives in the HTTP header use dashes the
-    python descriptors use underscores for that.
-
-    To get a header of the `CacheControl` object again you can convert the
-    object into a string or call the `to_header()` function.  If you plan
-    to subclass it and add your own items have a look at the sourcecode for
-    that class.
-
-    The following attributes are exposed:
-
-    `no_cache`, `no_store`, `max_age`, `max_stale`, `min_fresh`,
-    `no_transform`, `only_if_cached`, `public`, `private`, `must_revalidate`,
-    `proxy_revalidate`, and `s_maxage`"""
-
-    def cache_property(key, default, type):
-        """Return a new property object for a cache header.  Useful if you
-        want to add support for a cache extension in a subclass."""
-        return property(lambda x: x._get_cache_value(key, default, type),
-                        lambda x, v: x._set_cache_value(key, v, type),
-                        'accessor for %r' % key)
-
-    no_cache = cache_property('no-cache', '*', bool)
-    no_store = cache_property('no-store', None, bool)
-    max_age = cache_property('max-age', -1, int)
-    max_stale = cache_property('max-stale', '*', int)
-    min_fresh = cache_property('min-fresh', '*', int)
-    no_transform = cache_property('no-transform', None, None)
-    only_if_cached = cache_property('only-if-cached', None, bool)
-    public = cache_property('public', None, bool)
-    private = cache_property('private', '*', None)
-    must_revalidate = cache_property('must-revalidate', None, bool)
-    proxy_revalidate = cache_property('proxy-revalidate', None, bool)
-    s_maxage = cache_property('s-maxage', None, None)
-
-    def __init__(self, values=(), on_update=None):
-        _UpdateDict.__init__(self, values or (), on_update)
-        self.provided = values is not None
-
-    def _get_cache_value(self, key, default, type):
-        """Used internally be the accessor properties."""
-        if type is bool:
-            return key in self
-        if key in self:
-            value = self[key]
-            if value is None:
-                return default
-            elif type is not None:
-                try:
-                    value = type(value)
-                except ValueError:
-                    pass
-            return value
-
-    def _set_cache_value(self, key, value, type):
-        """Used internally be the accessor properties."""
-        if type is bool:
-            if value:
-                self[key] = None
-            else:
-                self.pop(key, None)
-        else:
-            if value is not None:
-                self[key] = value
-            else:
-                self.pop(key, None)
+_multipart_boundary_re = re.compile('^[ -~]{0,200}[!-~]$')
 
-    def to_header(self):
-        """Convert the stored values into a cache control header."""
-        return dump_header(self)
-
-    def __str__(self):
-        return self.to_header()
-
-    def __repr__(self):
-        return '<%s %r>' % (
-            self.__class__.__name__,
-            self.to_header()
-        )
-
-    # make cache_property a staticmethod so that subclasses of
-    # `CacheControl` can use it for new properties.
-    cache_property = staticmethod(cache_property)
-
-
-class ETags(object):
-    """A set that can be used to check if one etag is present in a collection
-    of etags.
-    """
-
-    def __init__(self, strong_etags=None, weak_etags=None, star_tag=False):
-        self._strong = frozenset(not star_tag and strong_etags or ())
-        self._weak = frozenset(weak_etags or ())
-        self.star_tag = star_tag
-
-    def as_set(self, include_weak=False):
-        """Convert the `ETags` object into a python set.  Per default all the
-        weak etags are not part of this set."""
-        rv = set(self._strong)
-        if include_weak:
-            rv.update(self._weak)
-        return rv
-
-    def is_weak(self, etag):
-        """Check if an etag is weak."""
-        return etag in self._weak
-
-    def contains_weak(self, etag):
-        """Check if an etag is part of the set including weak and strong tags."""
-        return self.is_weak(etag) or self.contains(etag)
-
-    def contains(self, etag):
-        """Check if an etag is part of the set ignoring weak tags."""
-        if self.star_tag:
-            return True
-        return etag in self._strong
-
-    def contains_raw(self, etag):
-        """When passed a quoted tag it will check if this tag is part of the
-        set.  If the tag is weak it is checked against weak and strong tags,
-        otherwise weak only."""
-        etag, weak = unquote_etag(etag)
-        if weak:
-            return self.contains_weak(etag)
-        return self.contains(etag)
-
-    def to_header(self):
-        """Convert the etags set into a HTTP header string."""
-        if self.star_tag:
-            return '*'
-        return ', '.join(
-            ['"%s"' % x for x in self._strong] +
-            ['w/"%s"' % x for x in self._weak]
-        )
-
-    def __call__(self, etag=None, data=None, include_weak=False):
-        if [etag, data].count(None) != 1:
-            raise TypeError('either tag or data required, but at least one')
-        if etag is None:
-            etag = generate_etag(data)
-        if include_weak:
-            if etag in self._weak:
-                return True
-        return etag in self._strong
-
-    def __nonzero__(self):
-        return bool(self.star_tag or self._strong)
-
-    def __str__(self):
-        return self.to_header()
-
-    def __iter__(self):
-        return iter(self._strong)
-
-    def __contains__(self, etag):
-        return self.contains(etag)
-
-    def __repr__(self):
-        return '<%s %r>' % (self.__class__.__name__, str(self))
-
-
-class Authorization(dict):
-    """Represents an `Authorization` header sent by the client.  You should
-    not create this kind of object yourself but use it when it's returned by
-    the `parse_authorization_header` function.
-
-    This object is a dict subclass and can be altered by setting dict items
-    but it should be considered immutable as it's returned by the client and
-    not meant for modifications.
-    """
-
-    def __init__(self, auth_type, data=None):
-        dict.__init__(self, data or {})
-        self.type = auth_type
+_entity_headers = frozenset([
+    'allow', 'content-encoding', 'content-language', 'content-length',
+    'content-location', 'content-md5', 'content-range', 'content-type',
+    'expires', 'last-modified'
+])
+_hop_by_pop_headers = frozenset([
+    'connection', 'keep-alive', 'proxy-authenticate',
+    'proxy-authorization', 'te', 'trailers', 'transfer-encoding',
+    'upgrade'
+])
 
-    username = property(lambda x: x.get('username'), doc='''
-        The username transmitted.  This is set for both basic and digest
-        auth all the time.''')
-    password = property(lambda x: x.get('password'), doc='''
-        When the authentication type is basic this is the password
-        transmitted by the client, else `None`.''')
-    realm = property(lambda x: x.get('realm'), doc='''
-        This is the server realm send back for digest auth.  For HTTP
-        digest auth.''')
-    nonce = property(lambda x: x.get('nonce'), doc='''
-        The nonce the server send for digest auth, send back by the client.
-        A nonce should be unique for every 401 response for HTTP digest
-        auth.''')
-    uri = property(lambda x: x.get('uri'), doc='''
-        The URI from Request-URI of the Request-Line; duplicated because
-        proxies are allowed to change the Request-Line in transit.  HTTP
-        digest auth only.''')
-    nc = property(lambda x: x.get('nc'), doc='''
-        The nonce count value transmitted by clients if a qop-header is
-        also transmitted.  HTTP digest auth only.''')
-    cnonce = property(lambda x: x.get('cnonce'), doc='''
-        If the server sent a qop-header in the ``WWW-Authenticate``
-        header, the client has to provide this value for HTTP digest auth.
-        See the RFC for more details.''')
-    response = property(lambda x: x.get('response'), doc='''
-        A string of 32 hex digits computed as defined in RFC 2617, which
-        proves that the user knows a password.  Digest auth only.''')
-    opaque = property(lambda x: x.get('opaque'), doc='''
-        The opaque header from the server returned unchanged by the client.
-        It is recommended that this string be base64 or hexadecimal data.
-        Digest auth only.''')
-
-    def qop(self):
-        """Indicates what "quality of protection" the client has applied to
-        the message for HTTP digest auth."""
-        def on_update(header_set):
-            if not header_set and name in self:
-                del self['qop']
-            elif header_set:
-                self['qop'] = header_set.to_header()
-        return parse_set_header(self.get('qop'), on_update)
-    qop = property(qop, doc=qop.__doc__)
-
-
-class WWWAuthenticate(_UpdateDict):
-    """Provides simple access to `WWW-Authenticate` headers."""
-
-    #: list of keys that require quoting in the generated header
-    _require_quoting = frozenset(['domain', 'nonce', 'opaque', 'realm'])
-
-    def __init__(self, auth_type=None, values=None, on_update=None):
-        _UpdateDict.__init__(self, values or (), on_update)
-        if auth_type:
-            self['__auth_type__'] = auth_type
-
-    def set_basic(self, realm='authentication required'):
-        """Clear the auth info and enable basic auth."""
-        dict.clear(self)
-        dict.update(self, {'__auth_type__': 'basic', 'realm': realm})
-        if self.on_update:
-            self.on_update(self)
-
-    def set_digest(self, realm, nonce, qop=('auth',), opaque=None,
-                   algorithm=None, stale=False):
-        """Clear the auth info and enable digest auth."""
-        d = {
-            '__auth_type__':    'digest',
-            'realm':            realm,
-            'nonce':            nonce,
-            'qop':              dump_header(qop)
-        }
-        if stale:
-            d['stale'] = 'TRUE'
-        if opaque is not None:
-            d['opaque'] = opaque
-        if algorithm is not None:
-            d['algorithm'] = algorithm
-        dict.clear(self)
-        dict.update(self, d)
-        if self.on_update:
-            self.on_update(self)
-
-    def to_header(self):
-        """Convert the stored values into a WWW-Authenticate header."""
-        d = dict(self)
-        auth_type = d.pop('__auth_type__', None) or 'basic'
-        return '%s %s' % (auth_type.title(), ', '.join([
-            '%s=%s' % (key, quote_header_value(value,
-                       allow_token=key not in self._require_quoting))
-            for key, value in d.iteritems()
-        ]))
-
-    def __str__(self):
-        return self.to_header()
-
-    def __repr__(self):
-        return '<%s %r>' % (
-            self.__class__.__name__,
-            self.to_header()
-        )
-
-    def auth_property(name, doc=None):
-        def _set_value(self, value):
-            if value is None:
-                self.pop(name, None)
-            else:
-                self[name] = str(value)
-        return property(lambda x: x.get(name), _set_value, doc=doc)
-
-    def _set_property(name, doc=None):
-        def fget(self):
-            def on_update(header_set):
-                if not header_set and name in self:
-                    del self[name]
-                elif header_set:
-                    self[name] = header_set.to_header()
-            return parse_set_header(self.get(name), on_update)
-        return property(fget, doc=doc)
-
-    type = auth_property('__auth_type__', doc='''
-        The type of the auth machanism.  HTTP currently specifies
-        `Basic` and `Digest`.''')
-    realm = auth_property('realm', doc='''
-        A string to be displayed to users so they know which username and
-        password to use.  This string should contain at least the name of
-        the host performing the authentication and might additionally
-        indicate the collection of users who might have access.''')
-    domain = _set_property('domain', doc='''
-        A list of URIs that define the protection space.  If a URI is an
-        absolte path, it is relative to the canonical root URL of the
-        server being accessed.''')
-    nonce = auth_property('nonce', doc='''
-        A server-specified data string which should be uniquely generated
-        each time a 401 response is made.  It is recommended that this
-        string be base64 or hexadecimal data.''')
-    opaque = auth_property('opaque', doc='''
-        A string of data, specified by the server, which should be returned
-        by the client unchanged in the Authorization header of subsequent
-        requests with URIs in the same protection space.  It is recommended
-        that this string be base64 or hexadecimal data.''')
-    algorithm = auth_property('algorithm', doc='''
-        A string indicating a pair of algorithms used to produce the digest
-        and a checksum.  If this is not present it is assumed to be "MD5".
-        If the algorithm is not understood, the challenge should be ignored
-        (and a different one used, if there is more than one).''')
-    qop = _set_property('qop', doc='''
-        A set of quality-of-privacy modifies such as auth and auth-int.''')
-
-    def _get_stale(self):
-        val = self.get('stale')
-        if val is not None:
-            return val.lower() == 'true'
-    def _set_stale(self, value):
-        if value is None:
-            self.pop('stale', None)
-        else:
-            self['stale'] = value and 'TRUE' or 'FALSE'
-    stale = property(_get_stale, _set_stale, doc='''
-        A flag, indicating that the previous request from the client was
-        rejected because the nonce value was stale.''')
-    del _get_stale, _set_stale
-
-    # make auth_property a staticmethod so that subclasses of
-    # `WWWAuthenticate` can use it for new properties.
-    auth_property = staticmethod(auth_property)
-    del _set_property
+#: supported http encodings that are also available in python we support
+#: for multipart messages.
+_supported_multipart_encodings = frozenset(['base64', 'quoted-printable'])
 
 
 def quote_header_value(value, extra_chars='', allow_token=True):
-    """Quote a header value if necessary."""
+    """Quote a header value if necessary.
+
+    .. versionadded:: 0.5
+
+    :param value: the value to quote.
+    :param extra_chars: a list of extra characters to skip quoting.
+    :param allow_token: if this is enabled token values are returned
+                        unchanged.
+    """
     value = str(value)
     if allow_token:
         token_chars = _token_chars | set(extra_chars)
@@ -595,14 +70,50 @@
     return '"%s"' % value.replace('\\', '\\\\').replace('"', '\\"')
 
 
+def unquote_header_value(value):
+    r"""Unquotes a header value.  (Reversal of :func:`quote_header_value`).
+    This does not use the real unquoting but what browsers are actually
+    using for quoting.
+
+    .. versionadded:: 0.5
+
+    :param value: the header value to unquote.
+    """
+    if value and value[0] == value[-1] == '"':
+        # this is not the real unquoting, but fixing this so that the
+        # RFC is met will result in bugs with internet explorer and
+        # probably some other browsers as well.  IE for example is
+        # uploading files with "C:\foo\bar.txt" as filename
+        value = value[1:-1].replace('\\\\', '\\').replace('\\"', '"')
+    return value
+
+
+def dump_options_header(header, options):
+    """The reverse function to :func:`parse_options_header`.
+
+    :param header: the header to dump
+    :param options: a dict of options to append.
+    """
+    segments = []
+    if header is not None:
+        segments.append(header)
+    for key, value in options.iteritems():
+        if value is None:
+            segments.append(key)
+        else:
+            segments.append('%s=%s' % (key, quote_header_value(value)))
+    return '; '.join(segments)
+
+
 def dump_header(iterable, allow_token=True):
     """Dump an HTTP header again.  This is the reversal of
-    `parse_list_header`, `parse_set_header` and `parse_dict_header`.  This
-    also quotes strings that include an equals sign unless you pass it as dict
-    of key, value pairs.
+    :func:`parse_list_header`, :func:`parse_set_header` and
+    :func:`parse_dict_header`.  This also quotes strings that include an
+    equals sign unless you pass it as dict of key, value pairs.
 
-    The `allow_token` parameter can be set to `False` to disallow tokens as
-    values.  If this is enabled all values are quoted.
+    :param iterable: the iterable or dict of values to quote.
+    :param allow_token: if set to `False` tokens as values are disallowed.
+                        See :func:`quote_header_value` for more details.
     """
     if isinstance(iterable, dict):
         items = []
@@ -627,19 +138,25 @@
     the list may include quoted-strings.  A quoted-string could
     contain a comma.  A non-quoted string could have quotes in the
     middle.  Quotes are removed automatically after parsing.
+
+    :param value: a string with a list header.
+    :return: list
     """
     result = []
     for item in _parse_list_header(value):
         if item[:1] == item[-1:] == '"':
-            item = item[1:-1]
+            item = unquote_header_value(item[1:-1])
         result.append(item)
     return result
 
 
 def parse_dict_header(value):
-    """Parse lists of key, value paits as described by RFC 2068 Section 2 and
+    """Parse lists of key, value pairs as described by RFC 2068 Section 2 and
     convert them into a python dict.  If there is no value for a key it will
     be `None`.
+
+    :param value: a string with a dict header.
+    :return: dict
     """
     result = {}
     for item in _parse_list_header(value):
@@ -648,21 +165,73 @@
             continue
         name, value = item.split('=', 1)
         if value[:1] == value[-1:] == '"':
-            value = value[1:-1]
+            value = unquote_header_value(value[1:-1])
         result[name] = value
     return result
 
 
-def parse_accept_header(value):
+def parse_options_header(value):
+    """Parse a ``Content-Type`` like header into a tuple with the content
+    type and the options:
+
+    >>> parse_options_header('Content-Type: text/html; mimetype=text/html')
+    ('Content-Type: text/html', {'mimetype': 'text/html'})
+
+    This should not be used to parse ``Cache-Control`` like headers that use
+    a slightly different format.  For these headers use the
+    :func:`parse_dict_header` function.
+
+    .. versionadded:: 0.5
+
+    :param value: the header to parse.
+    :return: (str, options)
+    """
+    def _tokenize(string):
+        while string[:1] == ';':
+            string = string[1:]
+            end = string.find(';')
+            while end > 0 and string.count('"', 0, end) % 2:
+                end = string.find(';', end + 1)
+            if end < 0:
+                end = len(string)
+            value = string[:end]
+            yield value.strip()
+            string = string[end:]
+
+    parts = _tokenize(';' + value)
+    name = parts.next()
+    extra = {}
+    for part in parts:
+        if '=' in part:
+            key, value = part.split('=', 1)
+            extra[key.strip().lower()] = unquote_header_value(value.strip())
+        else:
+            extra[part.strip()] = None
+    return name, extra
+
+
+def parse_accept_header(value, accept_class=None):
     """Parses an HTTP Accept-* header.  This does not implement a complete
     valid algorithm but one that supports at least value and quality
     extraction.
 
-    Returns a new `Accept` object (basicly a list of ``(value, quality)``
+    Returns a new :class:`Accept` object (basically a list of ``(value, quality)``
     tuples sorted by the quality with some additional accessor methods).
+
+    The second parameter can be a subclass of :class:`Accept` that is created
+    with the parsed values and returned.
+
+    :param value: the accept header string to be parsed.
+    :param accept_class: the wrapper class for the return value (can be
+                         :class:`Accept` or a subclass thereof)
+    :return: an instance of `cls`.
     """
+    if accept_class is None:
+        accept_class = Accept
+
     if not value:
-        return Accept(None)
+        return accept_class(None)
+
     result = []
     for match in _accept_re.finditer(value):
         quality = match.group(2)
@@ -671,23 +240,41 @@
         else:
             quality = max(min(float(quality), 1), 0)
         result.append((match.group(1), quality))
-    return Accept(result)
+    return accept_class(result)
 
 
-def parse_cache_control_header(value, on_update=None):
+def parse_cache_control_header(value, on_update=None, cache_control_class=None):
     """Parse a cache control header.  The RFC differs between response and
     request cache control, this method does not.  It's your responsibility
     to not use the wrong control statements.
+
+    .. versionadded:: 0.5
+       The `cache_control_class` was added.  If not specified an immutable
+       :class:`RequestCacheControl` is returned.
+
+    :param value: a cache control header to be parsed.
+    :param on_update: an optional callable that is called every time a
+                      value on the :class:`CacheControl` object is changed.
+    :param cache_control_class: the class for the returned object.  By default
+                                :class:`RequestCacheControl` is used.
+    :return: a `cache_control_class` object.
     """
+    if cache_control_class is None:
+        cache_control_class = RequestCacheControl
     if not value:
-        return CacheControl(None, on_update)
-    return CacheControl(parse_dict_header(value), on_update)
+        return cache_control_class(None, on_update)
+    return cache_control_class(parse_dict_header(value), on_update)
 
 
 def parse_set_header(value, on_update=None):
-    """Parse a set like header and return a `HeaderSet` object.  The return
-    value is an object that treats the items case insensitive and keeps the
-    order of the items.
+    """Parse a set-like header and return a :class:`HeaderSet` object.  The
+    return value is an object that treats the items case-insensitively and
+    keeps the order of the items.
+
+    :param value: a set header to be parsed.
+    :param on_update: an optional callable that is called every time a
+                      value on the :class:`HeaderSet` object is changed.
+    :return: a :class:`HeaderSet`
     """
     if not value:
         return HeaderSet(None, on_update)
@@ -697,7 +284,10 @@
 def parse_authorization_header(value):
     """Parse an HTTP basic/digest authorization header transmitted by the web
     browser.  The return value is either `None` if the header was invalid or
-    not given, otherwise an `Authorization` object.
+    not given, otherwise an :class:`Authorization` object.
+
+    :param value: the authorization header to parse.
+    :return: a :class:`Authorization` object or `None`.
     """
     if not value:
         return
@@ -723,8 +313,14 @@
 
 
 def parse_www_authenticate_header(value, on_update=None):
-    """Parse an HTTP WWW-Authenticate header into a `WWWAuthenticate`
-    object."""
+    """Parse an HTTP WWW-Authenticate header into a :class:`WWWAuthenticate`
+    object.
+
+    :param value: a WWW-Authenticate header to parse.
+    :param on_update: an optional callable that is called every time a
+                      value on the :class:`WWWAuthenticate` object is changed.
+    :return: a :class:`WWWAuthenticate` object.
+    """
     if not value:
         return WWWAuthenticate(on_update=on_update)
     try:
@@ -737,7 +333,11 @@
 
 
 def quote_etag(etag, weak=False):
-    """Quote an etag."""
+    """Quote an etag.
+
+    :param etag: the etag to quote.
+    :param weak: set to `True` to tag it "weak".
+    """
     if '"' in etag:
         raise ValueError('invalid etag')
     etag = '"%s"' % etag
@@ -747,7 +347,16 @@
 
 
 def unquote_etag(etag):
-    """Unquote a single etag.  Return a ``(etag, weak)`` tuple."""
+    """Unquote a single etag:
+
+    >>> unquote_etag('w/"bar"')
+    ('bar', True)
+    >>> unquote_etag('"bar"')
+    ('bar', False)
+
+    :param etag: the etag identifier to unquote.
+    :return: a ``(etag, weak)`` tuple.
+    """
     if not etag:
         return None, None
     etag = etag.strip()
@@ -761,7 +370,11 @@
 
 
 def parse_etags(value):
-    """Parse and etag header.  Returns an `ETags` object."""
+    """Parse an etag header.
+
+    :param value: the tag header to parse
+    :return: an :class:`ETags` object.
+    """
     if not value:
         return ETags()
     strong = []
@@ -800,18 +413,211 @@
         Sun Nov  6 08:49:37 1994       ; ANSI C's asctime() format
 
     If parsing fails the return value is `None`.
+
+    :param value: a string with a supported date format.
+    :return: a :class:`datetime.datetime` object.
     """
     if value:
-        t = rfc822.parsedate_tz(value.strip())
+        t = parsedate_tz(value.strip())
         if t is not None:
             # if no timezone is part of the string we assume UTC
             if t[-1] is None:
                 t = t[:-1] + (0,)
-            return datetime.utcfromtimestamp(rfc822.mktime_tz(t))
+            return datetime.utcfromtimestamp(mktime_tz(t))
+
+
+def default_stream_factory(total_content_length, filename, content_type,
+                           content_length=None):
+    """The stream factory that is used per default."""
+    if total_content_length > 1024 * 500:
+        return TemporaryFile('wb+')
+    return StringIO()
+
+
+def _make_stream_factory(factory):
+    """this exists for backwards compatibility!, will go away in 0.6."""
+    args, _, _, defaults = inspect.getargspec(factory)
+    required_args = len(args) - len(defaults or ())
+    if inspect.ismethod(factory):
+        required_args -= 1
+    if required_args != 0:
+        return factory
+    from warnings import warn
+    warn(DeprecationWarning('stream factory passed to `parse_form_data` '
+                            'uses deprecated invokation API.'), stacklevel=4)
+    return lambda *a: factory()
+
+
+def _fix_ie_filename(filename):
+    """Internet Explorer 6 transmits the full file name if a file is
+    uploaded.  This function strips the full path if it thinks the
+    filename is Windows-like absolute.
+    """
+    if filename[1:3] == ':\\' or filename[:2] == '\\\\':
+        return filename.split('\\')[-1]
+    return filename
+
+
+def _line_parse(line):
+    """Removes line ending characters and returns a tuple (`stripped_line`,
+    `is_terminated`).
+    """
+    if line[-2:] == '\r\n':
+        return line[:-2], True
+    elif line[-1:] in '\r\n':
+        return line[:-1], True
+    return line, False
+
+
+def parse_multipart(file, boundary, content_length, stream_factory=None,
+                    charset='utf-8', errors='ignore', buffer_size=10 * 1024,
+                    max_form_memory_size=None):
+    """Parse a multipart/form-data stream.  This is invoked by
+    :func:`utils.parse_form_data` if the content type matches.  Currently it
+    exists for internal usage only, but could be exposed as separate
+    function if it turns out to be useful and if we consider the API stable.
+    """
+    # XXX: this function does not support multipart/mixed.  I don't know of
+    #      any browser that supports this, but it should be implemented
+    #      nonetheless.
+
+    # make sure the buffer size is divisible by four so that we can base64
+    # decode chunk by chunk
+    assert buffer_size % 4 == 0, 'buffer size has to be divisible by 4'
+    # also the buffer size has to be at least 1024 bytes long or long headers
+    # will freak out the system
+    assert buffer_size >= 1024, 'buffer size has to be at least 1KB'
+
+    if stream_factory is None:
+        stream_factory = default_stream_factory
+    else:
+        stream_factory = _make_stream_factory(stream_factory)
+
+    if not boundary:
+        raise ValueError('Missing boundary')
+    if not is_valid_multipart_boundary(boundary):
+        raise ValueError('Invalid boundary: %s' % boundary)
+    if len(boundary) > buffer_size:
+        raise ValueError('Boundary longer than buffer size')
+
+    total_content_length = content_length
+    next_part = '--' + boundary
+    last_part = next_part + '--'
+
+    form = []
+    files = []
+    in_memory = 0
+
+    # convert the file into a limited stream with iteration capabilities
+    iterator = _ChunkIter(file, content_length, buffer_size)
+
+    try:
+        terminator = iterator.next().strip()
+        if terminator != next_part:
+            raise ValueError('Expected boundary at start of multipart data')
+
+        while terminator != last_part:
+            headers = parse_multipart_headers(iterator)
+            disposition = headers.get('content-disposition')
+            if disposition is None:
+                raise ValueError('Missing Content-Disposition header')
+            disposition, extra = parse_options_header(disposition)
+            filename = extra.get('filename')
+            name = extra.get('name')
+            transfer_encoding = headers.get('content-transfer-encoding')
+
+            content_type = headers.get('content-type')
+            if content_type is None:
+                is_file = False
+            else:
+                content_type = parse_options_header(content_type)[0]
+                is_file = True
+
+            if is_file:
+                if filename is not None:
+                    filename = _fix_ie_filename(_decode_unicode(filename,
+                                                                charset,
+                                                                errors))
+                try:
+                    content_length = int(headers['content-length'])
+                except (KeyError, ValueError):
+                    content_length = 0
+                stream = stream_factory(total_content_length, content_type,
+                                        filename, content_length)
+            else:
+                stream = StringIO()
+
+            newline_length = 0
+            for line in iterator:
+                if line[:2] == '--':
+                    terminator = line.rstrip()
+                    if terminator in (next_part, last_part):
+                        break
+                if transfer_encoding in _supported_multipart_encodings:
+                    try:
+                        line = line.decode(transfer_encoding)
+                    except:
+                        raise ValueError('could not base 64 decode chunk')
+                newline_length = line[-2:] == '\r\n' and 2 or 1
+                stream.write(line)
+                if not is_file and max_form_memory_size is not None:
+                    in_memory += len(line)
+                    if in_memory > max_form_memory_size:
+                        from werkzeug.exceptions import RequestEntityTooLarge
+                        raise RequestEntityTooLarge()
+            else:
+                raise ValueError('unexpected end of part')
+
+            # chop off the trailing line terminator and rewind
+            stream.seek(-newline_length, 1)
+            stream.truncate()
+            stream.seek(0)
+
+            if is_file:
+                files.append((name, FileStorage(stream, filename, name,
+                                                content_type,
+                                                content_length)))
+            else:
+                form.append((name, _decode_unicode(stream.read(),
+                                                   charset, errors)))
+    finally:
+        # make sure the whole input stream is read
+        iterator.exhaust()
+
+    return form, files
+
+
+def parse_multipart_headers(iterable):
+    """Parses multipart headers from an iterable that yields lines (including
+    the trailing newline symbol.
+    """
+    result = []
+    for line in iterable:
+        line, line_terminated = _line_parse(line)
+        if not line_terminated:
+            raise ValueError('unexpected end of line in multipart header')
+        if not line:
+            break
+        elif line[0] in ' \t' and result:
+            key, value = result[-1]
+            result[-1] = (key, value + '\n ' + line[1:])
+        else:
+            parts = line.split(':', 1)
+            if len(parts) == 2:
+                result.append((parts[0].strip(), parts[1].strip()))
+    return Headers(result)
 
 
 def is_resource_modified(environ, etag=None, data=None, last_modified=None):
-    """Convenience method for conditional requests."""
+    """Convenience method for conditional requests.
+
+    :param environ: the WSGI environment of the request to be checked.
+    :param etag: the etag for the response for comparision.
+    :param data: or alternatively the data of the response to automatically
+                 generate an etag using :func:`generate_etag`.
+    :param last_modified: an optional date of the last modification.
+    :return: `True` if the resource was modified, otherwise `False`.
+    """
     if etag is None and data is not None:
         etag = generate_etag(data)
     elif data is not None:
@@ -832,3 +638,85 @@
             unmodified = if_none_match.contains_raw(etag)
 
     return not unmodified
+
+
+def remove_entity_headers(headers):
+    """Remove all entity headers from a list or :class:`Headers` object.  This
+    operation works in-place.
+
+    :param headers: a list or :class:`Headers` object.
+    """
+    headers[:] = [(key, value) for key, value in headers if
+                  not is_entity_header(key)]
+
+
+def remove_hop_by_hop_headers(headers):
+    """Remove all HTTP/1.1 "Hop-by-Hop" headers from a list or
+    :class:`Headers` object.  This operation works in-place.
+
+    .. versionadded:: 0.5
+
+    :param headers: a list or :class:`Headers` object.
+    """
+    headers[:] = [(key, value) for key, value in headers if
+                  not is_hop_by_hop_header(key)]
+
+
+def is_entity_header(header):
+    """Check if a header is an entity header.
+
+    .. versionadded:: 0.5
+
+    :param header: the header to test.
+    :return: `True` if it's an entity header, `False` otherwise.
+    """
+    return header.lower() in _entity_headers
+
+
+def is_hop_by_hop_header(header):
+    """Check if a header is an HTTP/1.1 "Hop-by-Hop" header.
+
+    .. versionadded:: 0.5
+
+    :param header: the header to test.
+    :return: `True` if it's an entity header, `False` otherwise.
+    """
+    return header.lower() in _hop_by_pop_headers
+
+
+def is_valid_multipart_boundary(boundary):
+    """Checks if the string given is a valid multipart boundary."""
+    return _multipart_boundary_re.match(boundary) is not None
+
+
+# circular dependency fun
+from werkzeug.utils import LimitedStream, FileStorage
+from werkzeug.datastructures import Headers, Accept, RequestCacheControl, \
+     ResponseCacheControl, HeaderSet, ETags, Authorization, \
+     WWWAuthenticate
+
+
+class _ChunkIter(LimitedStream):
+    """An iterator that yields chunks from the file.  This iterator
+    does not end!  It will happily continue yielding empty strings
+    if the limit is reached.  This is intentional.
+    """
+
+    def __init__(self, stream, limit, buffer_size):
+        LimitedStream.__init__(self, stream, limit, True)
+        self._buffer = []
+        self._buffer_size = buffer_size
+
+    def next(self):
+        if len(self._buffer) > 1:
+            return self._buffer.pop(0)
+        chunks = self.read(self._buffer_size).splitlines(True)
+        first_chunk = self._buffer and self._buffer[0] or ''
+        if chunks:
+            first_chunk += chunks.pop(0)
+        self._buffer = chunks
+        return first_chunk
+
+
+# backwards compatibible imports
+from werkzeug.datastructures import MIMEAccept, CharsetAccept, LanguageAccept
--- a/MoinMoin/support/werkzeug/local.py	Fri Feb 27 23:30:37 2009 +0100
+++ b/MoinMoin/support/werkzeug/local.py	Sat Feb 28 00:08:31 2009 +0100
@@ -10,12 +10,12 @@
     application is not thread safe any longer.
 
     The python standard library comes with a utility called "thread locals".
-    A thread local is a global object where you can put stuff on and get back
+    A thread local is a global object where you can put stuff in and get back
     later in a thread safe way.  That means whenever you set or get an object
     to / from a thread local object the thread local object checks in which
     thread you are and delivers the correct value.
 
-    This however has a few disadvantages.  For example beside threads there
+    This however has a few disadvantages.  For example besides threads there
     are other ways to handle concurrency in Python.  A very popular approach
     are greenlets.  Also, whether every request gets its own thread is not
     guaranteed in WSGI.  It could be that a request is reusing a thread from
@@ -64,14 +64,15 @@
     context.
 
 
-    :copyright: 2007-2008 by Armin Ronacher.
+    :copyright: (c) 2009 by the Werkzeug Team, see AUTHORS for more details.
     :license: BSD, see LICENSE for more details.
 """
 try:
     from py.magic import greenlet
     get_current_greenlet = greenlet.getcurrent
     del greenlet
-except (RuntimeError, ImportError):
+except:
+    # catch all, py.* fails with so many different errors.
     get_current_greenlet = int
 try:
     from thread import get_ident as get_current_thread, allocate_lock
@@ -83,11 +84,11 @@
 
 # get the best ident function.  if greenlets are not installed we can
 # savely just use the builtin thread function and save a python methodcall
-# and the cost of caculating a hash.
+# and the cost of calculating a hash.
 if get_current_greenlet is int:
     get_ident = get_current_thread
 else:
-    get_ident = lambda: hash((get_current_thread(), get_current_greenlet()))
+    get_ident = lambda: (get_current_thread(), get_current_greenlet())
 
 
 class Local(object):
@@ -147,11 +148,10 @@
     def __init__(self, locals=None):
         if locals is None:
             self.locals = []
+        elif isinstance(locals, Local):
+            self.locals = [locals]
         else:
-            try:
-                self.locals = list(locals)
-            except TypeError:
-                self.locals = [locals]
+            self.locals = list(locals)
 
     def get_ident(self):
         """Return the context identifier the local objects use internally for
@@ -254,7 +254,7 @@
 
     def __unicode__(self):
         try:
-            return unicode(self.__current_oject)
+            return unicode(self.__current_object)
         except RuntimeError:
             return repr(self)
 
--- a/MoinMoin/support/werkzeug/routing.py	Fri Feb 27 23:30:37 2009 +0100
+++ b/MoinMoin/support/werkzeug/routing.py	Sat Feb 28 00:08:31 2009 +0100
@@ -5,30 +5,29 @@
 
     When it comes to combining multiple controller or view functions (however
     you want to call them) you need a dispatcher.  A simple way would be
-    applying regular expression tests on the ``PATH_INFO`` and call registered
-    callback functions that return the value then.
+    applying regular expression tests on the ``PATH_INFO`` and calling
+    registered callback functions that return the value then.
 
     This module implements a much more powerful system than simple regular
     expression matching because it can also convert values in the URLs and
     build URLs.
 
     Here a simple example that creates an URL map for an application with
-    two subdomains (www and kb) and some URL rules::
-
-        m = Map([
-            # Static URLs
-            Rule('/', endpoint='static/index'),
-            Rule('/about', endpoint='static/about'),
-            Rule('/help', endpoint='static/help'),
+    two subdomains (www and kb) and some URL rules:
 
-            # Knowledge Base
-            Subdomain('kb', [
-                Rule('/', endpoint='kb/index'),
-                Rule('/browse/', endpoint='kb/browse'),
-                Rule('/browse/<int:id>/', endpoint='kb/browse'),
-                Rule('/browse/<int:id>/<int:page>', endpoint='kb/browse')
-            ])
-        ], default_subdomain='www')
+    >>> m = Map([
+    ...     # Static URLs
+    ...     Rule('/', endpoint='static/index'),
+    ...     Rule('/about', endpoint='static/about'),
+    ...     Rule('/help', endpoint='static/help'),
+    ...     # Knowledge Base
+    ...     Subdomain('kb', [
+    ...         Rule('/', endpoint='kb/index'),
+    ...         Rule('/browse/', endpoint='kb/browse'),
+    ...         Rule('/browse/<int:id>/', endpoint='kb/browse'),
+    ...         Rule('/browse/<int:id>/<int:page>', endpoint='kb/browse')
+    ...     ])
+    ... ], default_subdomain='www')
 
     If the application doesn't use subdomains it's perfectly fine to not set
     the default subdomain and use the `Subdomain` rule factory.  The endpoint
@@ -48,12 +47,14 @@
     >>> c.build("kb/browse", dict(id=42, page=3))
     'http://kb.example.com/browse/42/3'
     >>> c.build("static/about")
-    u'/about'
-    >>> c.build("static/about", subdomain="kb")
-    'http://www.example.com/about'
+    '/about'
     >>> c.build("static/index", force_external=True)
     'http://www.example.com/'
 
+    >>> c = m.bind('example.com', subdomain='kb')
+    >>> c.build("static/about")
+    'http://www.example.com/about'
+
     The first argument to bind is the server name *without* the subdomain.
     Per default it will assume that the script is mounted on the root, but
     often that's not the case so you can provide the real mount point as
@@ -91,21 +92,16 @@
     method is raised.
 
 
-    :copyright: 2007-2008 by Armin Ronacher, Leif K-Brooks,
+    :copyright: (c) 2009 by the Werkzeug Team, see AUTHORS for more details.
                              Thomas Johansson.
     :license: BSD, see LICENSE for more details.
 """
-import sys
 import re
 from urlparse import urljoin
 from itertools import izip
 
 from werkzeug.utils import url_encode, url_quote, redirect, format_string
 from werkzeug.exceptions import HTTPException, NotFound, MethodNotAllowed
-try:
-    set
-except NameError:
-    from sets import Set as set
 
 
 _rule_re = re.compile(r'''
@@ -183,7 +179,7 @@
     """Raise if the map requests a redirect. This is for example the case if
     `strict_slashes` are activated and an url that requires a leading slash.
 
-    The attribute `new_url` contains the absolute desitination url.
+    The attribute `new_url` contains the absolute destination url.
     """
     code = 301
 
@@ -243,7 +239,7 @@
             ])
         ])
 
-    All the rules except of the ``'#select_language'`` endpoint will now
+    All the rules except for the ``'#select_language'`` endpoint will now
     listen on a two letter long subdomain that helds the language code
     for the current request.
     """
@@ -374,7 +370,7 @@
 class Rule(RuleFactory):
     """A Rule represents one URL pattern.  There are some options for `Rule`
     that change the way it behaves and are passed to the `Rule` constructor.
-    Note that beside the rule-string all arguments *must* be keyword arguments
+    Note that besides the rule-string all arguments *must* be keyword arguments
     in order to not break the application on Werkzeug upgrades.
 
     `string`
@@ -435,7 +431,7 @@
         not specified the `Map` setting is used.
 
     `build_only`
-        Set this to true and the rule will never match but will create a URL
+        Set this to True and the rule will never match but will create a URL
         that can be build. This is useful if you have resources on a subdomain
         or folder that are not handled by the WSGI application (like static data)
 
@@ -609,7 +605,9 @@
         for key in set(values) - processed:
             query_vars[key] = unicode(values[key])
         if query_vars:
-            url += '?' + url_encode(query_vars, self.map.charset)
+            url += '?' + url_encode(query_vars, self.map.charset,
+                                    sort=self.map.sort_parameters,
+                                    key=self.map.sort_key)
 
         return subdomain, url
 
@@ -757,12 +755,18 @@
     """This converter is the default converter and accepts any string but
     only one one path segment.  Thus the string can not include a slash.
 
-    Supported arguments:
+    This is the default validator.
 
-    - `minlength` - the minimum length of the string. must be greater
-      than 1.
-    - `maxlength` - the maximum length of the string.
-    - `length` - the exact length of that string.
+    Example::
+
+        Rule('/pages/<page>'),
+        Rule('/<string(length=2):lang_code>')
+
+    :param map: the :class:`Map`.
+    :param minlength: the minimum length of the string.  Must be greater
+                      or equal 1.
+    :param maxlength: the maximum length of the string.
+    :param length: the exact length of the string.
     """
 
     def __init__(self, map, minlength=1, maxlength=None, length=None):
@@ -786,6 +790,10 @@
     identifiers or unicode strings::
 
         Rule('/<any(about, help, imprint, u"class"):page_name>')
+
+    :param map: the :class:`Map`.
+    :param items: this function accepts the possible items as positional
+                  arguments.
     """
 
     def __init__(self, map, *items):
@@ -794,7 +802,18 @@
 
 
 class PathConverter(BaseConverter):
-    """Like the default string converter, but it also matches slashes."""
+    """Like the default :class:`UnicodeConverter`, but it also matches
+    slashes.  This is useful for wikis and similar applications::
+
+        Rule('/<path:wikipage>')
+        Rule('/<path:wikipage>/edit')
+
+    :param map: the :class:`Map`.
+    :param minlength: the minimum length of the string.  Must be greater
+                      or equal 1.
+    :param maxlength: the maximum length of the string.
+    :param length: the exact length of the string.
+    """
     regex = '[^/].*?'
     is_greedy = True
     weight = 50
@@ -833,14 +852,15 @@
 
         Rule('/page/<int:page>')
 
-    Supported arguments:
+    This converter does not support negative values.
 
-    - `fixed_digits` - the number of fixed digits in the URL. If you
-      set this to ``4`` for example, the application will only match
-      if the url looks like ``/0001/``.  The default is
-      variable length.
-    - `min` - the minimal value.
-    - `max` - the maximal value.
+    :param map: the :class:`Map`.
+    :param fixed_digits: the number of fixed digits in the URL.  If you set
+                         this to ``4`` for example, the application will
+                         only match if the url looks like ``/0001/``.  The
+                         default is variable length.
+    :param min: the minimal value.
+    :param max: the maximal value.
     """
     regex = r'\d+'
     num_convert = int
@@ -851,10 +871,11 @@
 
         Rule('/probability/<float:probability>')
 
-    Supported arguments:
+    This converter does not support negative values.
 
-    - `min` - the minimal value.
-    - `max` - the maximal value.
+    :param map: the :class:`Map`.
+    :param min: the minimal value.
+    :param max: the maximal value.
     """
     regex = r'\d+\.\d+'
     num_convert = float
@@ -868,26 +889,30 @@
     parameters.  Some of the configuration values are only stored on the
     `Map` instance since those affect all rules, others are just defaults
     and can be overridden for each rule.  Note that you have to specify all
-    arguments beside the `rules` as keywords arguments!
+    arguments besides the `rules` as keywords arguments!
+
+    :param rules: sequence of url rules for this map.
+    :param default_subdomain: The default subdomain for rules without a
+                              subdomain defined.
+    :param charset: charset of the url. defaults to ``"utf-8"``
+    :param strict_slashes: Take care of trailing slashes.
+    :param redirect_defaults: This will redirect to the default rule if it
+                              wasn't visited that way. This helps creating
+                              unique URLs.
+    :param converters: A dict of converters that adds additional converters
+                       to the list of converters. If you redefine one
+                       converter this will override the original one.
+    :param sort_parameters: If set to `True` the url parameters are sorted.
+                            See `url_encode` for more details.
+    :param sort_key: The sort key function for `url_encode`.
+
+    .. versionadded:: 0.5
+        `sort_parameters` and `sort_key` was added.
     """
 
     def __init__(self, rules=None, default_subdomain='', charset='utf-8',
                  strict_slashes=True, redirect_defaults=True,
-                 converters=None):
-        """Initializes the new URL map.
-
-        :param rules: sequence of url rules for this map.
-        :param default_subdomain: The default subdomain for rules without a
-                                  subdomain defined.
-        :param charset: charset of the url. defaults to ``"utf-8"``
-        :param strict_slashes: Take care of trailing slashes.
-        :param redirect_defaults: This will redirect to the default rule if it
-                                  wasn't visited that way. This helps creating
-                                  unique URLs.
-        :param converters: A dict of converters that adds additional converters
-                           to the list of converters. If you redefine one
-                           converter this will override the original one.
-        """
+                 converters=None, sort_parameters=False, sort_key=None):
         self._rules = []
         self._rules_by_endpoint = {}
         self._remap = True
@@ -901,6 +926,9 @@
         if converters:
             self.converters.update(converters)
 
+        self.sort_parameters = sort_parameters
+        self.sort_key = sort_key
+
         for rulefactory in rules or ():
             self.add(rulefactory)
 
@@ -911,6 +939,11 @@
         you want to wrap the builder a bit so that the current language
         code is automatically added if not provided but endpoints expect
         it.
+
+        :param endpoint: the endpoint to check.
+        :param arguments: this function accepts one or more arguments
+                          as positional arguments.  Each one of them is
+                          checked.
         """
         self.update()
         arguments = set(arguments)
@@ -920,7 +953,12 @@
         return False
 
     def iter_rules(self, endpoint=None):
-        """Iterate over all rules or the rules of an endpoint."""
+        """Iterate over all rules or the rules of an endpoint.
+
+        :param endpoint: if provided only the rules for that endpoint
+                         are returned.
+        :return: an iterator
+        """
         if endpoint is not None:
             return iter(self._rules_by_endpoint[endpoint])
         return iter(self._rules)
@@ -928,6 +966,8 @@
     def add(self, rulefactory):
         """Add a new rule or factory to the map and bind it.  Requires that the
         rule is not bound to another map.
+
+        :param rulefactory: a :class:`Rule` or :class:`RuleFactory`
         """
         for rule in rulefactory.get_rules(self):
             rule.bind(self)
@@ -935,21 +975,16 @@
             self._rules_by_endpoint.setdefault(rule.endpoint, []).append(rule)
         self._remap = True
 
-    def add_rule(self, rule):
-        from warnings import warn
-        warn(DeprecationWarning('use map.add instead of map.add_rule now'))
-        return self.add(rule)
-
     def bind(self, server_name, script_name=None, subdomain=None,
              url_scheme='http', default_method='GET', path_info=None):
-        """Return a new `MapAdapter` with the details specified to the call.
-        Note that `script_name` will default to ``'/'`` if not further
+        """Return a new :class:`MapAdapter` with the details specified to the
+        call.  Note that `script_name` will default to ``'/'`` if not further
         specified or `None`.  The `server_name` at least is a requirement
         because the HTTP RFC requires absolute URLs for redirects and so all
         redirect exceptions raised by Werkzeug will contain the full canonical
         URL.
 
-        If no path_info is passed to match() it will use the default path
+        If no path_info is passed to :meth:`match` it will use the default path
         info passed to bind.  While this doesn't really make sense for
         manual bind calls, it's useful if you bind a map to a WSGI
         environment which already contains the path info.
@@ -965,10 +1000,9 @@
         return MapAdapter(self, server_name, script_name, subdomain,
                           url_scheme, path_info, default_method)
 
-    def bind_to_environ(self, environ, server_name=None, subdomain=None,
-                        calculate_subdomain=False):
-        """Like `bind` but you can pass it an WSGI environment and it will
-        fetch the information from that directory.  Note that because of
+    def bind_to_environ(self, environ, server_name=None, subdomain=None):
+        """Like :meth:`bind` but you can pass it an WSGI environment and it
+        will fetch the information from that directory.  Note that because of
         limitations in the protocol there is no way to get the current
         subdomain and real `server_name` from the environment.  If you don't
         provide it, Werkzeug will use `SERVER_NAME` and `SERVER_PORT` (or
@@ -981,11 +1015,20 @@
         in the wsgi `environ` is ``'staging.dev.example.com'`` the calculated
         subdomain will be ``'staging.dev'``.
 
-        If the object passed as environ as an environ attribute, the value of
+        If the object passed as environ has an environ attribute, the value of
         this attribute is used instead.  This allows you to pass request
         objects.  Additionally `PATH_INFO` added as a default ot the
-        `MapAdapter` so that you don't have to pass the path info to the
-        match method.
+        :class:`MapAdapter` so that you don't have to pass the path info to
+        the match method.
+
+        .. versionchanged:: 0.5
+            previously this method accepted a bogus `calculate_subdomain`
+            parameter that did not have any effect.  It was removed because
+            of that.
+
+        :param environ: a WSGI environment.
+        :param server_name: an optional server name hint (see above).
+        :param subdomain: optionally the current subdomain (see above).
         """
         if hasattr(environ, 'environ'):
             environ = environ.environ
@@ -998,7 +1041,8 @@
                    in (('https', '443'), ('http', '80')):
                     server_name += ':' + environ['SERVER_PORT']
         elif subdomain is None:
-            cur_server_name = environ['SERVER_NAME'].split('.')
+            cur_server_name = environ.get('HTTP_HOST',
+                environ['SERVER_NAME']).split(':', 1)[0].split('.')
             real_server_name = server_name.split(':', 1)[0].split('.')
             offset = -len(real_server_name)
             if cur_server_name[offset:] != real_server_name:
@@ -1023,8 +1067,8 @@
 
 
 class MapAdapter(object):
-    """Retured by `Map.bind` or `Map.bind_to_environ` and does the
-    URL matching and building based on runtime information.
+    """Returned by :meth:`Map.bind` or :meth:`Map.bind_to_environ` and does
+    the URL matching and building based on runtime information.
     """
 
     def __init__(self, map, server_name, script_name, subdomain,
@@ -1068,8 +1112,19 @@
                 return urls.dispatch(lambda e, v: views[e](request, **v),
                                      catch_http_exceptions=True)
 
-        Keep in mind that this method might return exception objects too, so
-        use `Response.force_type` to get a response object.
+        Keep in mind that this method might return exception objects, too, so
+        use :class:`Response.force_type` to get a response object.
+
+        :param view_func: a function that is called with the endpoint as
+                          first argument and the value dict as second.  Has
+                          to dispatch to the actual view function with this
+                          information.  (see above)
+        :param path_info: the path info to use for matching.  Overrides the
+                          path info specified on binding.
+        :param method: the HTTP method used for matching.  Overrides the
+                       method specified on binding.
+        :param catch_http_exceptions: set to `True` to catch any of the
+                                      werkzeug :class:`HTTPException`\s.
         """
         try:
             try:
@@ -1116,7 +1171,6 @@
 
         Here is a small example for matching:
 
-        >>> from werkzeug.routing import Map, Rule
         >>> m = Map([
         ...     Rule('/', endpoint='index'),
         ...     Rule('/downloads/', endpoint='downloads/index'), 
@@ -1133,11 +1187,16 @@
         >>> urls.match("/downloads")
         Traceback (most recent call last):
           ...
-        werkzeug.routing.RequestRedirect: http://example.com/downloads/
+        RequestRedirect: http://example.com/downloads/
         >>> urls.match("/missing")
         Traceback (most recent call last):
           ...
-        werkzeug.routing.NotFound: /missing
+        NotFound: 404 Not Found
+
+        :param path_info: the path info to use for matching.  Overrides the
+                          path info specified on binding.
+        :param method: the HTTP method used for matching.  Overrides the
+                       method specified on binding.
         """
         self.map.update()
         if path_info is None:
@@ -1199,6 +1258,11 @@
     def test(self, path_info=None, method=None):
         """Test if a rule would match.  Works like `match` but returns `True`
         if the URL matches, or `False` if it does not exist.
+
+        :param path_info: the path info to use for matching.  Overrides the
+                          path info specified on binding.
+        :param method: the HTTP method used for matching.  Overrides the
+                       method specified on binding.
         """
         try:
             self.match(path_info, method)
@@ -1216,12 +1280,14 @@
         The `build` function also accepts an argument called `force_external`
         which, if you set it to `True` will force external URLs. Per default
         external URLs (include the server name) will only be used if the
-        target URL is on a
-        different subdomain.
+        target URL is on a different subdomain.
 
-        With the same map as in the example above this code generates some
-        target URLs:
-
+        >>> m = Map([
+        ...     Rule('/', endpoint='index'),
+        ...     Rule('/downloads/', endpoint='downloads/index'), 
+        ...     Rule('/downloads/<int:id>', endpoint='downloads/show')
+        ... ])
+        >>> urls = m.bind("example.com", "/")
         >>> urls.build("index", {})
         '/'
         >>> urls.build("downloads/show", {'id': 42})
@@ -1243,8 +1309,15 @@
         raised.
 
         The build method accepts an argument called `method` which allows you
-        to specify the method you want to have an URL builded for if you have
+        to specify the method you want to have an URL built for if you have
         different methods for the same endpoint specified.
+
+        :param endpoint: the endpoint of the URL to build.
+        :param values: the values for the URL to build.  Unhandled values are
+                       appended to the URL as query parameters.
+        :param method: the HTTP method for the rule if there are different
+                       URLs for different methods on the same endpoint.
+        :param force_external: enforce full canonical external URLs.
         """
         self.map.update()
         method = method or self.default_method
--- a/MoinMoin/support/werkzeug/script.py	Fri Feb 27 23:30:37 2009 +0100
+++ b/MoinMoin/support/werkzeug/script.py	Sat Feb 28 00:08:31 2009 +0100
@@ -61,17 +61,13 @@
     or as named parameters, pretty much like Python function calls.
 
 
-    :copyright: 2007-2008 by Armin Ronacher, Thomas Johansson.
+    :copyright: (c) 2009 by the Werkzeug Team, see AUTHORS for more details.
     :license: BSD, see LICENSE for more details.
 '''
 import sys
 import inspect
 import getopt
 from os.path import basename
-try:
-    set = set
-except NameError:
-    from sets import Set as set
 
 
 argument_types = {
@@ -95,7 +91,15 @@
     namespace if no namespace is given, otherwise in the dict provided.
     Only items that start with action_prefix are processed as actions.  If
     you want to use all items in the namespace provided as actions set
-    action_prefix to an empty string."""
+    action_prefix to an empty string.
+
+    :param namespace: An optional dict where the functions are looked up in.
+                      By default the local namespace of the caller is used.
+    :param action_prefix: The prefix for the functions.  Everything else
+                          is ignored.
+    :param args: the arguments for the function.  If not specified
+                 :data:`sys.argv` without the first argument is used.
+    """
     if namespace is None:
         namespace = sys._getframe(1).f_locals
     actions = find_actions(namespace, action_prefix)
@@ -232,11 +236,21 @@
     return func, description, arguments
 
 
-def make_shell(init_func=lambda: {}, banner=None, use_ipython=True):
+def make_shell(init_func=None, banner=None, use_ipython=True):
     """Returns an action callback that spawns a new interactive
-    python shell."""
+    python shell.
+
+    :param init_func: an optional initialization function that is
+                      called before the shell is started.  The return
+                      value of this function is the initial namespace.
+    :param banner: the banner that is displayed before the shell.  If
+                   not specified a generic banner is used instead.
+    :param use_ipython: if set to `True` ipython is used if available.
+    """
     if banner is None:
         banner = 'Interactive Werkzeug Shell'
+    if init_func is None:
+        init_func = dict
     def action(ipython=use_ipython):
         """Start a new interactive python session."""
         namespace = init_func()
@@ -256,8 +270,21 @@
 
 def make_runserver(app_factory, hostname='localhost', port=5000,
                    use_reloader=False, use_debugger=False, use_evalex=True,
-                   threaded=False, processes=1):
-    """Returns an action callback that spawns a new wsgiref server."""
+                   threaded=False, processes=1, static_files=None):
+    """Returns an action callback that spawns a new development server.
+
+    .. versionadded:: 0.5
+       `static_files` was added.
+
+    :param app_factory: a function that returns a new WSGI application.
+    :param hostname: the default hostname the server should listen on.
+    :param port: the default port of the server.
+    :param use_reloader: the default setting for the reloader.
+    :param use_evalex: the default setting for the evalex flag of the debugger.
+    :param threaded: the default threading setting.
+    :param processes: the default number of processes to start.
+    :param static_files: optionally a dict of static files.
+    """
     def action(hostname=('h', hostname), port=('p', port),
                reloader=use_reloader, debugger=use_debugger,
                evalex=use_evalex, threaded=threaded, processes=processes):
@@ -265,5 +292,5 @@
         from werkzeug.serving import run_simple
         app = app_factory()
         run_simple(hostname, port, app, reloader, debugger, evalex,
-                   None, 1, threaded, processes)
+                   None, 1, threaded, processes, static_files)
     return action
--- a/MoinMoin/support/werkzeug/serving.py	Fri Feb 27 23:30:37 2009 +0100
+++ b/MoinMoin/support/werkzeug/serving.py	Sat Feb 28 00:08:31 2009 +0100
@@ -5,7 +5,7 @@
 
     There are many ways to serve a WSGI application.  While you're developing
     it you usually don't want a full blown webserver like Apache but a simple
-    standalone one.  With Python 2.5 onwards there is the `wsgiref`_ server in
+    standalone one.  From Python 2.5 onwards there is the `wsgiref`_ server in
     the standard library.  If you're using older versions of Python you can
     download the package from the cheeseshop.
 
@@ -38,7 +38,7 @@
     .. _wsgiref: http://cheeseshop.python.org/pypi/wsgiref
 
 
-    :copyright: 2007-2008 by Armin Ronacher.
+    :copyright: (c) 2009 by the Werkzeug Team, see AUTHORS for more details.
     :license: BSD, see LICENSE for more details.
 """
 import os
@@ -46,88 +46,153 @@
 import sys
 import time
 import thread
+from urlparse import urlparse
 from itertools import chain
-try:
-    from wsgiref.simple_server import ServerHandler, WSGIRequestHandler, \
-         WSGIServer
-    have_wsgiref = True
-except ImportError:
-    have_wsgiref = False
 from SocketServer import ThreadingMixIn, ForkingMixIn
+from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
 from werkzeug._internal import _log
+from werkzeug.utils import responder
+from werkzeug.exceptions import InternalServerError
 
 
-if have_wsgiref:
-    class BaseRequestHandler(WSGIRequestHandler):
-        """
-        Subclass of the normal request handler that thinks it is
-        threaded or something like that. The default wsgiref handler
-        has wrong information so we need this class.
-        """
-        multithreaded = False
-        multiprocess = False
-        _handler_class = None
-
-        def get_handler(self):
-            handler = self._handler_class
-            if handler is None:
-                class handler(ServerHandler):
-                    wsgi_multithread = self.multithreaded
-                    wsgi_multiprocess = self.multiprocess
-                self._handler_class = handler
+class BaseRequestHandler(BaseHTTPRequestHandler, object):
 
-            rv = handler(self.rfile, self.wfile, self.get_stderr(),
-                         self.get_environ())
-            rv.request_handler = self
-            return rv
-
-        def handle(self):
-            self.raw_requestline = self.rfile.readline()
-            if self.parse_request():
-                self.get_handler().run(self.server.get_app())
+    def run_wsgi(self):
+        path_info, _, query = urlparse(self.path)[2:5]
+        app = self.server.app
+        environ = {
+            'wsgi.version':         (1, 0),
+            'wsgi.url_scheme':      'http',
+            'wsgi.input':           self.rfile,
+            'wsgi.errors':          sys.stderr,
+            'wsgi.multithread':     self.server.multithread,
+            'wsgi.multiprocess':    self.server.multiprocess,
+            'wsgi.run_once':        0,
+            'REQUEST_METHOD':       self.command,
+            'SCRIPT_NAME':          '',
+            'QUERY_STRING':         query,
+            'CONTENT_TYPE':         self.headers.get('Content-Type', ''),
+            'CONTENT_LENGTH':       self.headers.get('Content-Length', ''),
+            'REMOTE_ADDR':          self.client_address[0],
+            'REMOTE_PORT':          self.client_address[1],
+            'SERVER_NAME':          self.server.server_address[0],
+            'SERVER_PORT':          str(self.server.server_address[1]),
+            'SERVER_PROTOCOL':      self.request_version
+        }
+        if path_info:
+            from urllib import unquote
+            environ['PATH_INFO'] = unquote(path_info)
+        for key, value in self.headers.items():
+            environ['HTTP_' + key.upper().replace('-', '_')] = value
 
-        def log_request(self, code='-', size='-'):
-            _log('info', '%s -- [%s] %s %s',
-                self.address_string(),
-                self.requestline,
-                code,
-                size
-            )
+        headers_set = []
+        headers_sent = []
 
-        def log_error(self, format, *args):
-            _log('error', 'Error: %s', format % args)
+        def write(data):
+            assert headers_set, 'write() before start_response'
+            if not headers_sent:
+                status, response_headers = headers_sent[:] = headers_set
+                code, msg = status.split(None, 1)
+                self.send_response(int(code), msg)
+                for line in response_headers:
+                    self.send_header(*line)
+                self.end_headers()
 
-        def log_message(self, format, *args):
-            _log('info', format, args)
+            assert type(data) is str, 'applications must write bytes'
+            self.wfile.write(data)
+            self.wfile.flush()
+
+        def start_response(status, response_headers, exc_info=None):
+            if exc_info:
+                try:
+                    if headers_sent:
+                        raise exc_info[0], exc_info[1], exc_info[2]
+                finally:
+                    exc_info = None
+            elif headers_set:
+                raise AssertionError('Headers already set')
+            headers_set[:] = [status, response_headers]
+            return write
+
+        def execute(app):
+            application_iter = app(environ, start_response)
+            try:
+                for data in application_iter:
+                    write(data)
+            finally:
+                if hasattr(application_iter, 'close'):
+                    application_iter.close()
+
+        try:
+            execute(app)
+        except (socket.error, socket.timeout):
+            return
+        except:
+            from werkzeug.debug.tbtools import get_current_traceback
+            traceback = get_current_traceback(ignore_system_exceptions=True)
+            try:
+                # if we haven't yet sent the headers but they are set
+                # we roll back to be able to set them again.
+                if not headers_sent:
+                    del headers_set[:]
+                execute(InternalServerError())
+            except:
+                pass
+            self.server.log('error', 'Error on request:\n%s',
+                            traceback.plaintext)
+
+    def __getattr__(self, name):
+        if name.startswith('do_'):
+            return self.run_wsgi
+        raise AttributeError(name)
+
+
+class BaseWSGIServer(HTTPServer):
+    multithread = False
+    multiprocess = False
+
+    def __init__(self, host, port, app, handler=None):
+        if handler is None:
+            handler = BaseRequestHandler
+        HTTPServer.__init__(self, (host, int(port)), handler)
+        self.app = app
+
+    def log(self, type, message, *args):
+        _log(type, message, *args)
+
+    def serve_forever(self):
+        try:
+            HTTPServer.serve_forever(self)
+        except KeyboardInterrupt:
+            pass
+
+
+class ThreadedWSGIServer(ThreadingMixIn, BaseWSGIServer):
+    multithread = True
+
+
+class ForkingWSGIServer(ForkingMixIn, BaseWSGIServer):
+    multiprocess = True
+
+    def __init__(self, host, port, app, processes=40, handler=None):
+        BaseWSGIServer.__init__(self, host, port, app, handler)
+        self.max_children = processes
 
 
 def make_server(host, port, app=None, threaded=False, processes=1,
                 request_handler=None):
-    """Create a new wsgiref server that is either threaded, or forks
+    """Create a new server instance that is either threaded, or forks
     or just processes one request after another.
     """
-    if not have_wsgiref:
-        raise RuntimeError('All the Werkzeug serving features require '
-                           'an installed wsgiref library.')
-    request_handler = request_handler or BaseRequestHandler
     if threaded and processes > 1:
         raise ValueError("cannot have a multithreaded and "
                          "multi process server.")
     elif threaded:
-        class request_handler(request_handler):
-            multithreaded = True
-        class server(ThreadingMixIn, WSGIServer):
-            pass
+        return ThreadedWSGIServer(host, port, app, request_handler)
     elif processes > 1:
-        class request_handler(request_handler):
-            multiprocess = True
-        class server(ForkingMixIn, WSGIServer):
-            max_children = processes - 1
+        return ForkingWSGIServer(host, port, app, processes, request_handler)
     else:
-        server = WSGIServer
-    srv = server((host, port), request_handler)
-    srv.set_app(app)
-    return srv
+        return BaseWSGIServer(host, port, app, request_handler)
 
 
 def reloader_loop(extra_files=None, interval=1):
@@ -203,11 +268,14 @@
 def run_simple(hostname, port, application, use_reloader=False,
                use_debugger=False, use_evalex=True,
                extra_files=None, reloader_interval=1, threaded=False,
-               processes=1, request_handler=None):
+               processes=1, request_handler=None, static_files=None):
     """Start an application using wsgiref and with an optional reloader.  This
     wraps `wsgiref` to fix the wrong default reporting of the multithreaded
     WSGI variable and adds optional multithreading and fork support.
 
+    .. versionadded:: 0.5
+       `static_files` was added to simplify serving of static files.
+
     :param hostname: The host for the application.  eg: ``'localhost'``
     :param port: The port for the server.  eg: ``8080``
     :param application: the WSGI application to execute
@@ -215,7 +283,7 @@
                          process if modules were changed?
     :param use_debugger: should the werkzeug debugging system be used?
     :param use_evalex: should the exception evaluation feature be enabled?
-    :param extra_files: a list of files the reloader should listen for
+    :param extra_files: a list of files the reloader should watch
                         additionally to the modules.  For example configuration
                         files.
     :param reloader_interval: the interval for the reloader in seconds.
@@ -223,21 +291,25 @@
                      thread?
     :param processes: number of processes to spawn.
     :param request_handler: optional parameter that can be used to replace
-                            the default wsgiref request handler.  Have a look
-                            at the `werkzeug.serving` sourcecode for more
-                            details.
+                            the default one.  You can use this to replace it
+                            with a different
+                            :class:`~BaseHTTPServer.BaseHTTPRequestHandler`
+                            subclass.
+    :param static_files: a dict of paths for static files.  This works exactly
+                         like :class:`SharedDataMiddleware`, it's actually
+                         just wrapping the application in that middleware before
+                         serving.
     """
     if use_debugger:
         from werkzeug.debug import DebuggedApplication
         application = DebuggedApplication(application, use_evalex)
+    if static_files:
+        from werkzeug.utils import SharedDataMiddleware
+        application = SharedDataMiddleware(application, static_files)
 
     def inner():
-        srv = make_server(hostname, port, application, threaded,
-                          processes, request_handler)
-        try:
-            srv.serve_forever()
-        except KeyboardInterrupt:
-            pass
+        make_server(hostname, port, application, threaded,
+                    processes, request_handler).serve_forever()
 
     if os.environ.get('WERKZEUG_RUN_MAIN') != 'true':
         display_hostname = hostname or '127.0.0.1'
--- a/MoinMoin/support/werkzeug/templates.py	Fri Feb 27 23:30:37 2009 +0100
+++ b/MoinMoin/support/werkzeug/templates.py	Sat Feb 28 00:08:31 2009 +0100
@@ -3,127 +3,9 @@
     werkzeug.templates
     ~~~~~~~~~~~~~~~~~~
 
-    This template engine recognizes ASP/PHP like blocks and executes the code
-    in them::
-
-        t = Template('<% for u in users %>${u["username"]}\n<% endfor %>')
-        t.render(users=[{'username': 'John'},
-                        {'username': 'Jane'}])
-
-    would result in::
-
-        John
-        Jane
-
-    You can also create templates from files::
-
-        t = Template.from_file('test.html')
-
-    The syntax elements are a mixture of django, genshi text and mod_python
-    templates and used internally in werkzeug components.
-
-    We do not recommend using this template engine in a real environment
-    because is quite slow and does not provide any advanced features.  For
-    simple applications (cgi script like) this can however be sufficient.
-
-
-    Syntax Elements
-    ---------------
-
-    Printing Variables:
-
-    .. sourcecode:: text
-
-        $variable
-        $variable.attribute[item](some, function)(calls)
-        ${expression} or <%py print expression %>
-
-    Keep in mind that the print statement adds a newline after the call or
-    a whitespace if it ends with a comma.
-
-    For Loops:
-
-    .. sourcecode:: text
-
-        <% for item in seq %>
-            ...
-        <% endfor %>
-
-    While Loops:
-
-    .. sourcecode:: text
-
-        <% while expression %>
-            <%py break / continue %>
-        <% endwhile %>
-
-    If Conditions:
-
-    .. sourcecode:: text
+    A minimal template engine.
 
-        <% if expression %>
-            ...
-        <% elif expression %>
-            ...
-        <% else %>
-            ...
-        <% endif %>
-
-    Python Expressions:
-
-    .. sourcecode:: text
-
-        <%py
-            ...
-        %>
-
-        <%python
-            ...
-        %>
-
-    Note on python expressions:  You cannot start a loop in a python block
-    and continue it in another one.  This example does *not* work:
-
-    .. sourcecode:: text
-
-        <%python
-            for item in seq:
-        %>
-            ...
-
-    Comments:
-
-    .. sourcecode:: text
-
-        <%#
-            This is a comment
-        %>
-
-
-    Missing Variables
-    -----------------
-
-    If you try to access a missing variable you will get back an `Undefined`
-    object.  You can iterate over such an object or print it and it won't
-    fail.  However every other operation will raise an error.  To test if a
-    variable is undefined you can use this expression:
-
-    .. sourcecode:: text
-
-        <% if variable is Undefined %>
-            ...
-        <% endif %>
-
-
-    Python 2.3 Compatibility
-    ------------------------
-
-    Because of limitations in Python 2.3 it's impossible to achieve the
-    semi-silent variable lookup fallback.  If a template relies on undefined
-    variables it won't execute under Python 2.3.
-
-
-    :copyright: 2006-2008 by Armin Ronacher, Ka-Ping Yee.
+    :copyright: (c) 2009 by the Werkzeug Team, see AUTHORS for more details.
     :license: BSD License.
 """
 import sys
@@ -136,22 +18,6 @@
 from werkzeug import utils
 from werkzeug._internal import _decode_unicode
 
-# Anything older than Python 2.4 
-if sys.version_info < (2, 4):
-    class AstMangler(object):
-
-        def __getattr__(self, key):
-            class_ = getattr(_ast, key)
-            def wrapper(*args, **kw):
-                lineno = kw.pop('lineno', None)
-                obj = class_(*args, **kw)
-                obj.lineno = lineno
-                return obj
-            return wrapper
-
-    _ast = ast
-    ast = AstMangler()
-
 
 # Copyright notice: The `parse_data` method uses the string interpolation
 # algorithm by Ka-Ping Yee which originally was part of `ltpl20.py`_
@@ -394,8 +260,8 @@
 
 class Context(object):
 
-    def __init__(self, namespace, encoding, errors):
-        self.encoding = encoding
+    def __init__(self, namespace, charset, errors):
+        self.charset = charset
         self.errors = errors
         self._namespace = namespace
         self._buffer = []
@@ -414,13 +280,13 @@
 
     def to_unicode(self, value):
         if isinstance(value, str):
-            return _decode_unicode(value, self.encoding, self.errors)
+            return _decode_unicode(value, self.charset, self.errors)
         return unicode(value)
 
     def get_value(self, as_unicode=True):
         rv = u''.join(self._buffer)
         if not as_unicode:
-            return rv.encode(self.encoding, self.errors)
+            return rv.encode(self.charset, self.errors)
         return rv
 
     def __getitem__(self, key, default=undefined):
@@ -461,48 +327,69 @@
         'url_encode':       utils.url_encode
     }
 
-    def __init__(self, source, filename='<template>', encoding='utf-8',
+    def __init__(self, source, filename='<template>', charset='utf-8',
                  errors='strict', unicode_mode=True):
         if isinstance(source, str):
-            source = _decode_unicode(source, encoding, errors)
+            source = _decode_unicode(source, charset, errors)
         if isinstance(filename, unicode):
             filename = filename.encode('utf-8')
         node = Parser(tokenize(u'\n'.join(source.splitlines()),
                                filename), filename).parse()
         self.code = TemplateCodeGenerator(node, filename).getCode()
         self.filename = filename
-        self.encoding = encoding
+        self.charset = charset
         self.errors = errors
         self.unicode_mode = unicode_mode
 
-    def from_file(cls, file, encoding='utf-8', errors='strict',
-                  unicode_mode=True):
-        """Load a template from a file."""
+    @classmethod
+    def from_file(cls, file, charset='utf-8', errors='strict',
+                  unicode_mode=True, encoding=None):
+        """Load a template from a file.
+
+        .. versionchanged:: 0.5
+            The encoding parameter was renamed to charset.
+
+        :param file: a filename or file object to load the template from.
+        :param charset: the charset of the template to load.
+        :param errors: the error behavior of the charset decoding.
+        :param unicode_mode: set to `False` to disable unicode mode.
+        :return: a template
+        """
+        if encoding is not None:
+            from warnings import warn
+            warn(DeprecationWarning('the encoding parameter is deprecated. '
+                                    'use charset instead.'), stacklevel=2)
+            charset = encoding
         close = False
         if isinstance(file, basestring):
             f = open(file, 'r')
             close = True
         try:
-            data = _decode_unicode(f.read(), encoding, errors)
+            data = _decode_unicode(f.read(), charset, errors)
         finally:
             if close:
                 f.close()
-        return cls(data, getattr(f, 'name', '<template>'), encoding,
+        return cls(data, getattr(f, 'name', '<template>'), charset,
                    errors, unicode_mode)
-    from_file = classmethod(from_file)
 
     def render(self, *args, **kwargs):
         """This function accepts either a dict or some keyword arguments which
         will then be the context the template is evaluated in.  The return
         value will be the rendered template.
+
+        :param context: the function accepts the same arguments as the
+                        :class:`dict` constructor.
+        :return: the rendered template as string
         """
         ns = self.default_context.copy()
-        ns.update(dict(*args, **kwargs))
-        context = Context(ns, self.encoding, self.errors)
-        if sys.version_info < (2, 4):
-            exec self.code in context.runtime, ns
+        if len(args) == 1 and isinstance(args[0], utils.MultiDict):
+            ns.update(args[0].to_dict(flat=True))
         else:
-            exec self.code in context.runtime, context
+            ns.update(dict(*args))
+        if kwargs:
+            ns.update(kwargs)
+        context = Context(ns, self.charset, self.errors)
+        exec self.code in context.runtime, context
         return context.get_value(self.unicode_mode)
 
     def substitute(self, *args, **kwargs):
--- a/MoinMoin/support/werkzeug/test.py	Fri Feb 27 23:30:37 2009 +0100
+++ b/MoinMoin/support/werkzeug/test.py	Sat Feb 28 00:08:31 2009 +0100
@@ -3,277 +3,647 @@
     werkzeug.test
     ~~~~~~~~~~~~~
 
-    Quite often you want to unittest your application or just check the output
-    from an interactive python session.  In theory that is pretty simple because
-    you can fake a WSGI environment and call the application with a dummy
-    start_response and iterate over the application iterator but there are
-    argumentably better ways to interact with an application.
-
-    Werkzeug provides an object called `Client` which you can pass a WSGI
-    application (and optionally a response wrapper) which you can use to send
-    virtual requests to the application.
-
-    A response wrapper is a callable that takes three arguments: the application
-    iterator, the status and finally a list of headers.  The default response
-    wrapper returns a tuple.  Because response objects have the same signature
-    you can use them as response wrapper, ideally by subclassing them and hooking
-    in test functionality.
+    This module implements a client to WSGI applications for testing.
 
-    >>> from werkzeug import Client, BaseResponse, test_app
-    >>> c = Client(test_app, BaseResponse)
-    >>> resp = c.get('/')
-    >>> resp.status_code
-    200
-    >>> resp.headers
-    Headers([('Content-Type', 'text/html; charset=utf-8')])
-    >>> resp.response_body.splitlines()[:2]
-    ['<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"',
-     '  "http://www.w3.org/TR/html4/loose.dtd">']
-
-    Or here without wrapper defined:
-
-    >>> from werkzeug import Client, test_app
-    >>> c = Client(test_app)
-    >>> app_iter, status, headers = c.get('/')
-    >>> status
-    '200 OK'
-    >>> headers
-    [('Content-Type', 'text/html; charset=utf-8')]
-    >>> ''.join(app_iter).splitlines()[:2]
-    ['<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"',
-     '  "http://www.w3.org/TR/html4/loose.dtd">']
-
-    :copyright: 2007 by Armin Ronacher.
+    :copyright: (c) 2009 by the Werkzeug Team, see AUTHORS for more details.
     :license: BSD, see LICENSE for more details.
 """
+import sys
+import urlparse
 from time import time
 from random import random
-from urllib import urlencode
+from itertools import chain
+from tempfile import TemporaryFile
 from cStringIO import StringIO
+from cookielib import CookieJar
 from mimetypes import guess_type
-from werkzeug.wrappers import BaseResponse
-from werkzeug.utils import create_environ, run_wsgi_app
+from urllib2 import Request as U2Request
+
+from werkzeug._internal import _empty_stream
+from werkzeug.wrappers import BaseRequest
+from werkzeug.utils import create_environ, run_wsgi_app, get_current_url, \
+     url_encode, url_decode, FileStorage
+from werkzeug.datastructures import FileMultiDict, MultiDict, \
+     CombinedMultiDict, Headers
 
 
-def encode_multipart(values):
-    """Encode a dict of values (can either be strings or file descriptors)
-    into a multipart encoded string.  The filename is taken from the `.name`
-    attribute of the file descriptor.  Because StringIOs do not provide
-    this attribute it will generate a random filename in that case.
-
-    The return value is a tuple in the form (``boundary``, ``data``).
-
-    This method does not accept unicode strings!
+def stream_encode_multipart(values, use_tempfile=True, threshold=1024 * 500,
+                            boundary=None, charset='utf-8'):
+    """Encode a dict of values (either strings or file descriptors or
+    :class:`FileStorage` objects.) into a multipart encoded string stored
+    in a file descriptor.
     """
-    boundary = '-----------=_Part_%s%s' (time(), random())
-    lines = []
-    for key, value in values.iteritems():
-        if isinstance(value, File):
-            lines.extend((
-                '--' + boundary,
-                'Content-Disposition: form-data; name="%s"; filename="%s"' %
-                    (key, value.filename),
-                'Content-Type: ' + value.mimetype,
-                '',
-                value.read()
-            ))
-        else:
-            lines.extend((
-                '--' + boundary,
-                'Content-Disposition: form-data; name="%s"' % key,
-                '',
-                value
-            ))
-    lines.extend(('--' + boundary + '--', ''))
-    return boundary, '\r\n'.join(lines)
+    if boundary is None:
+        boundary = '---------------WerkzeugFormPart_%s%s' % (time(), random())
+    _closure = [StringIO(), 0, False]
+
+    if use_tempfile:
+        def write(string):
+            stream, total_length, on_disk = _closure
+            if on_disk:
+                stream.write(string)
+            else:
+                length = len(string)
+                if length + _closure[1] <= threshold:
+                    stream.write(string)
+                else:
+                    new_stream = TemporaryFile('wb+')
+                    new_stream.write(stream.getvalue())
+                    new_stream.write(string)
+                    _closure[0] = new_stream
+                    _closure[2] = True
+                _closure[1] = total_length + length
+    else:
+        write = _closure[0].write
+
+    if not isinstance(values, MultiDict):
+        values = MultiDict(values)
+
+    for key, values in values.iterlists():
+        for value in values:
+            write('--%s\r\nContent-Disposition: form-data; name="%s"' %
+                  (boundary, key))
+            reader = getattr(value, 'read', None)
+            if reader is not None:
+                filename = getattr(value, 'filename',
+                                   getattr(value, 'name', None))
+                content_type = getattr(value, 'content_type', None)
+                if content_type is None:
+                    content_type = filename and guess_type(filename)[0] or \
+                                   'application/octet-stream'
+                if filename is not None:
+                    write('; filename="%s"\r\n' % filename)
+                else:
+                    write('\r\n')
+                write('Content-Type: %s\r\n\r\n' % content_type)
+                while 1:
+                    chunk = reader(16384)
+                    if not chunk:
+                        break
+                    write(chunk)
+            else:
+                if isinstance(value, unicode):
+                    value = value.encode(charset)
+                write('\r\n\r\n' + value)
+            write('\r\n')
+    write('--%s--\r\n' % boundary)
+
+    length = int(_closure[0].tell())
+    _closure[0].seek(0)
+    _closure[0].seek(0)
+    return _closure[0], length, boundary
 
 
-class File(object):
-    """Wraps a file descriptor or any other stream so that `encode_multipart`
-    can get the mimetype and filename from it.
+def encode_multipart(values, boundary=None, charset='utf-8'):
+    """Like `stream_encode_multipart` but returns a tuple in the form
+    (``boundary``, ``data``) where data is a bytestring.
+    """
+    stream, length, boundary = stream_encode_multipart(
+        values, use_tempfile=False, boundary=boundary, charset=charset)
+    return boundary, stream.read()
+
+
+def File(fd, filename=None, mimetype=None):
+    """Backwards compat."""
+    from warnings import warn
+    warn(DeprecationWarning('werkzeug.test.File is deprecated, use the '
+                            'EnvironBuilder or FileStorage instead'))
+    return FileStorage(fd, filename=filename, content_type=mimetype)
+
+
+class _TestCookieHeaders(object):
+    """A headers adapter for cookielib
     """
 
-    def __init__(self, fd, filename=None, mimetype=None):
-        if isinstance(fd, basestring):
-            if filename is None:
-                filename = fd
-            fd = file(fd, 'rb')
-            try:
-                self.stream = StringIO(fd.read())
-            finally:
-                fd.close()
-        else:
-            self.stream = fd
-            if filename is None:
-                if not hasattr(fd, 'name'):
-                    raise ValueError('no filename for provided')
-                filename = fd.name
-        if mimetype is None:
-            mimetype = guess_type(filename)
-        self.filename = fileanme
-        self.mimetype = mimetype or 'application/octet-stream'
+    def __init__(self, headers):
+        self.headers = headers
 
-    def getattr(self, name):
-        return getattr(self.stream, name)
+    def getheaders(self, name):
+        headers = []
+        for k, v in self.headers:
+            if k == name:
+                headers.append(v)
+        return headers
 
-    def __repr__(self):
-        return '<%s %r>' % (
-            self.__class__.__name__,
-            self.filename
+
+class _TestCookieResponse(object):
+    """Something that looks like a httplib.HTTPResponse, but is actually just an
+    adapter for our test responses to make them available for cookielib.
+    """
+
+    def __init__(self, headers):
+        self.headers = _TestCookieHeaders(headers)
+
+    def info(self):
+        return self.headers
+
+
+class _TestCookieJar(CookieJar):
+    """A cookielib.CookieJar modified to inject and read cookie headers from
+    and to wsgi environments, and wsgi application responses.
+    """
+
+    def inject_wsgi(self, environ):
+        """Inject the cookies as client headers into the server's wsgi
+        environment.
+        """
+        cvals = []
+        for cookie in self:
+            cvals.append('%s=%s' % (cookie.name, cookie.value))
+        if cvals:
+            environ['HTTP_COOKIE'] = ','.join(cvals)
+
+    def extract_wsgi(self, environ, headers):
+        """Extract the server's set-cookie headers as cookies into the
+        cookie jar.
+        """
+        self.extract_cookies(
+            _TestCookieResponse(headers),
+            U2Request(get_current_url(environ)),
         )
 
 
-class Client(object):
-    """This class allows to send requests to a wrapped application."""
+def _iter_data(data):
+    """Iterates over a dict or multidict yielding all keys and values.
+    This is used to iterate over the data passed to the
+    :class:`EnvironBuilder`.
+    """
+    if isinstance(data, MultiDict):
+        for key, values in data.iterlists():
+            for value in values:
+                yield key, value
+    else:
+        for item in data.iteritems():
+            yield item
 
-    def __init__(self, application, response_wrapper=None):
-        """The response wrapper can be a class or factory function that takes
-        three arguments: app_iter, status and headers.  The default response
-        wrapper just returns a tuple.
 
-        Example::
+class EnvironBuilder(object):
+    """This class can be used to conveniently create a WSGI environment
+    for testing purposes.  It can be used to quickly create WSGI environments
+    or request objects from arbitrary data.
 
-            class ClientResponse(BaseResponse):
-                ...
+    The signature of this class is also used in some other places as of
+    Werkzeug 0.5 (:func:`create_environ`, :meth:`BaseResponse.from_values`,
+    :meth:`Client.open`).  Because of this most of the functionality is
+    available through the constructor alone.
 
-            client = Client(MyApplication(), response_wrapper=ClientResponse)
+    Files and regular form data can be manipulated independently of each
+    other with the :attr:`form` and :attr:`files` attributes, but are
+    passed with the same argument to the constructor: `data`.
+
+    `data` can be any of these values:
+
+    -   a `str`: If it's a string it is converted into a :attr:`input_stream`,
+        the :attr:`content_length` is set and you have to provide a
+        :attr:`content_type`.
+    -   a `dict`: If it's a dict the keys have to be strings and the values
+        and of the following objects:
+
+        -   a :class:`file`-like object.  These are converted into
+            :class:`FileStorage` objects automatically.
+        -   a tuple.  The :meth:`~FileMultiDict.add_file` method is called
+            with the tuple items as positional arguments.
+
+    :param path: the path of the request.  In the WSGI environment this will
+                 end up as `PATH_INFO`.  If the `query_string` is not defined
+                 and there is a question mark in the `path` everything after
+                 it is used as query string.
+    :param base_url: the base URL is a URL that is used to extract the WSGI
+                     URL scheme, host (server name + server port) and the
+                     script root (`SCRIPT_NAME`).
+    :param query_string: an optional string or dict with URL parameters.
+    :param method: the HTTP method to use, defaults to `GET`.
+    :param input_stream: an optional input stream.  Do not specify this and
+                         `data`.  As soon as an input stream is set you can't
+                         modify :attr:`args` and :attr:`files` unless you
+                         set the :attr:`input_stream` to `None` again.
+    :param content_type: The content type for the request.  As of 0.5 you
+                         don't have to provide this when specifying files
+                         and form data via `data`.
+    :param content_length: The content length for the request.  You don't
+                           have to specify this when providing data via
+                           `data`.
+    :param errors_stream: an optional error stream that is used for
+                          `wsgi.errors`.  Defaults to :data:`stderr`.
+    :param multithread: controls `wsgi.multithread`.  Defaults to `False`.
+    :param multiprocess: controls `wsgi.multiprocess`.  Defaults to `False`.
+    :param run_once: controls `wsgi.run_once`.  Defaults to `False`.
+    :param headers: an optional list or :class:`Headers` object of headers.
+    :param data: a string or dict of form data.  See explanation above.
+    :param environ_base: an optional dict of environment defaults.
+    :param environ_overrides: an optional dict of environment overrides.
+    :param charset: the charset used to encode unicode data.
+    """
+
+    #: the server protocol to use.  defaults to HTTP/1.1
+    server_protocol = 'HTTP/1.1'
+
+    #: the wsgi version to use.  defaults to (1, 0)
+    wsgi_version = (1, 0)
+
+    #: the default request class for :meth:`get_request`
+    request_class = BaseRequest
+
+    def __init__(self, path='/', base_url=None, query_string=None,
+                 method='GET', input_stream=None, content_type=None,
+                 content_length=None, errors_stream=None, multithread=False,
+                 multiprocess=False, run_once=False, headers=None, data=None,
+                 environ_base=None, environ_overrides=None, charset='utf-8'):
+        if query_string is None and '?' in path:
+            path, query_string = path.split('?', 1)
+        self.charset = charset
+        self.path = path
+        self.base_url = base_url
+        if isinstance(query_string, basestring):
+            self.query_string = query_string
+        else:
+            if query_string is None:
+                query_string = MultiDict()
+            elif not isinstance(query_string, MultiDict):
+                query_string = MultiDict(query_string)
+            self.args = query_string
+        self.method = method
+        if headers is None:
+            headers = Headers()
+        elif not isinstance(headers, Headers):
+            headers = Headers(headers)
+        self.headers = headers
+        self.content_type = content_type
+        if errors_stream is None:
+            errors_stream = sys.stderr
+        self.errors_stream = errors_stream
+        self.multithread = multithread
+        self.multiprocess = multiprocess
+        self.run_once = run_once
+        self.environ_base = environ_base
+        self.environ_overrides = environ_overrides
+        self.input_stream = input_stream
+        self.closed = False
+
+        if data:
+            if input_stream is not None:
+                raise TypeError('can\'t provide input stream and data')
+            if isinstance(data, basestring):
+                self.input_stream = StringIO(data)
+                if self.content_length is None:
+                    self.content_length = len(data)
+            else:
+                for key, value in _iter_data(data):
+                    if isinstance(value, (tuple, dict)) or \
+                       hasattr(value, 'read'):
+                        self._add_file_from_data(key, value)
+                    else:
+                        self.form[key] = value
+
+    def _add_file_from_data(self, key, value):
+        """Called in the EnvironBuilder to add files from the data dict."""
+        if isinstance(value, tuple):
+            self.files.add_file(key, *value)
+        elif isinstance(value, dict):
+            from warnings import warn
+            warn(DeprecationWarning('it\'s no longer possible to pass dicts '
+                                    'as `data`.  Use tuples or FileStorage '
+                                    'objects intead'), stacklevel=2)
+            args = v
+            value = dict(value)
+            mimetype = value.pop('mimetype', None)
+            if mimetype is not None:
+                value['content_type'] = mimetype
+            self.files.add_file(key, **value)
+        else:
+            self.files.add_file(key, value)
+
+    def _get_base_url(self):
+        return urlparse.urlunsplit((self.url_scheme, self.host,
+                                    self.script_root, '', '')).rstrip('/') + '/'
+
+    def _set_base_url(self, value):
+        if value is None:
+            scheme = 'http'
+            netloc = 'localhost'
+            scheme = 'http'
+            script_root = ''
+        else:
+            scheme, netloc, script_root, qs, anchor = urlparse.urlsplit(value)
+            if qs or anchor:
+                raise ValueError('base url must not contain a query string '
+                                 'or fragment')
+        self.script_root = script_root.rstrip('/')
+        self.host = netloc
+        self.url_scheme = scheme
+
+    base_url = property(_get_base_url, _set_base_url, doc='''
+        The base URL is a URL that is used to extract the WSGI
+        URL scheme, host (server name + server port) and the
+        script root (`SCRIPT_NAME`).''')
+    del _get_base_url, _set_base_url
+
+    def _get_content_type(self):
+        ct = self.headers.get('Content-Type')
+        if ct is None and not self._input_stream:
+            if self.method in ('POST', 'PUT'):
+                if self._files:
+                    return 'multipart/form-data'
+                return 'application/x-www-form-urlencoded'
+            return None
+        return ct
+
+    def _set_content_type(self, value):
+        if value is None:
+            self.headers.pop('Content-Type', None)
+        else:
+            self.headers['Content-Type'] = value
+
+    content_type = property(_get_content_type, _set_content_type, doc='''
+        The content type for the request.  Reflected from and to the
+        :attr:`headers`.  Do not set if you set :attr:`files` or
+        :attr:`form` for auto detection.''')
+    del _get_content_type, _set_content_type
+
+    def _get_content_length(self):
+        return self.headers.get('Content-Length', type=int)
+
+    def _set_content_length(self, value):
+        self.headers['Content-Length'] = str(value)
+
+    content_length = property(_get_content_length, _set_content_length, doc='''
+        The content length as integer.  Reflected from and to the
+        :attr:`headers`.  Do not set if you set :attr:`files` or
+        :attr:`form` for auto detection.''')
+    del _get_content_length, _set_content_length
+
+    def form_property(name, storage, doc):
+        key = '_' + name
+        def getter(self):
+            if self._input_stream is not None:
+                raise AttributeError('an input stream is defined')
+            rv = getattr(self, key)
+            if rv is None:
+                rv = storage()
+                setattr(self, key, rv)
+            return rv
+        def setter(self, value):
+            self._input_stream = None
+            setattr(self, key, value)
+        return property(getter, setter, doc)
+
+    form = form_property('form', MultiDict, doc='''
+        A :class:`MultiDict` of form values.''')
+    files = form_property('files', FileMultiDict, doc='''
+        A :class:`FileMultiDict` of uploaded files.  You can use the
+        :meth:`~FileMultiDict.add_file` method to add new files to the
+        dict.''')
+    del form_property
+
+    def _get_input_stream(self):
+        return self._input_stream
+
+    def _set_input_stream(self, value):
+        self._input_stream = value
+        self._form = self._files = None
+
+    input_stream = property(_get_input_stream, _set_input_stream, doc='''
+        An optional input stream.  If you set this it will clear
+        :attr:`form` and :attr:`files`.''')
+    del _get_input_stream, _set_input_stream
+
+    def _get_query_string(self):
+        if self._query_string is None:
+            if self._args is not None:
+                return url_encode(self._args, charset=self.charset)
+            return ''
+        return self._query_string
+
+    def _set_query_string(self, value):
+        self._query_string = value
+        self._args = None
+
+    query_string = property(_get_query_string, _set_query_string, doc='''
+        The query string.  If you set this to a string :attr:`args` will
+        no longer be available.''')
+    del _get_query_string, _set_query_string
+
+    def _get_args(self):
+        if self._query_string is not None:
+            raise AttributeError('a query string is defined')
+        if self._args is None:
+            self._args = MultiDict()
+        return self._args
+
+    def _set_args(self, value):
+        self._query_string = None
+        self._args = value
+
+    args = property(_get_args, _set_args, doc='''
+        The URL arguments as :class:`MultiDict`.''')
+    del _get_args, _set_args
+
+    @property
+    def server_name(self):
+        """The server name (read-only, use :attr:`host` to set)"""
+        return self.host.split(':', 1)[0]
+
+    @property
+    def server_port(self):
+        """The server port as integer (read-only, use :attr:`host` to set)"""
+        pieces = self.host.split(':', 1)
+        if len(pieces) == 2 and pieces[1].isdigit():
+            return int(pieces[1])
+        elif self.url_scheme == 'https':
+            return 443
+        return 80
+
+    def __del__(self):
+        self.close()
+
+    def close(self):
+        """Closes all files.  If you put real :class:`file` objects into the
+        :attr:`files` dict you can call this method to automatically close
+        them all in one go.
         """
+        if self.closed:
+            return
+        try:
+            files = self.files.itervalues()
+        except AttributeError:
+            files = ()
+        for f in files:
+            try:
+                f.close()
+            except Exception, e:
+                pass
+        self.closed = True
+
+    def get_environ(self):
+        """Return the built environ."""
+        input_stream = self.input_stream
+        content_length = self.content_length
+        content_type = self.content_type
+
+        if input_stream is not None:
+            start_pos = input_stream.tell()
+            input_stream.seek(0, 2)
+            end_pos = input_stream.tell()
+            input_stream.seek(start_pos)
+            content_length = end_pos - start_pos
+        elif content_type == 'multipart/form-data':
+            values = CombinedMultiDict([self.form, self.files])
+            input_stream, content_length, boundary = \
+                stream_encode_multipart(values, charset=self.charset)
+            content_type += '; boundary="%s"' % boundary
+        elif content_type == 'application/x-www-form-urlencoded':
+            values = url_encode(self.form, charset=self.charset)
+            content_length = len(values)
+            input_stream = StringIO(values)
+        else:
+            input_stream = _empty_stream
+
+        result = {}
+        if self.environ_base:
+            result.update(self.environ_base)
+
+        def _encode(x):
+            if isinstance(x, unicode):
+                return x.encode(self.charset)
+            return x
+
+        result.update({
+            'REQUEST_METHOD':       self.method,
+            'SCRIPT_NAME':          _encode(self.script_root),
+            'PATH_INFO':            _encode(self.path),
+            'QUERY_STRING':         self.query_string,
+            'SERVER_NAME':          self.server_name,
+            'SERVER_PORT':          str(self.server_port),
+            'HTTP_HOST':            self.host,
+            'SERVER_PROTOCOL':      self.server_protocol,
+            'CONTENT_TYPE':         content_type or '',
+            'CONTENT_LENGTH':       str(content_length or '0'),
+            'wsgi.version':         self.wsgi_version,
+            'wsgi.url_scheme':      self.url_scheme,
+            'wsgi.input':           input_stream,
+            'wsgi.errors':          self.errors_stream,
+            'wsgi.multithread':     self.multithread,
+            'wsgi.multiprocess':    self.multiprocess,
+            'wsgi.run_once':        self.run_once
+        })
+        for key, value in self.headers.to_list(self.charset):
+            result['HTTP_%s' % key.upper().replace('-', '_')] = value
+        if self.environ_overrides:
+            result.update(self.environ_overrides)
+        return result
+
+    def get_request(self, cls=None):
+        """Returns a request with the data.  If the request class is not
+        specified :attr:`request_class` is used.
+
+        :param cls: The request wrapper to use.
+        """
+        if cls is None:
+            cls = self.request_class
+        return cls(self.get_environ())
+
+
+class Client(object):
+    """This class allows to send requests to a wrapped application.
+
+    The response wrapper can be a class or factory function that takes
+    three arguments: app_iter, status and headers.  The default response
+    wrapper just returns a tuple.
+
+    Example::
+
+        class ClientResponse(BaseResponse):
+            ...
+
+        client = Client(MyApplication(), response_wrapper=ClientResponse)
+
+    The use_cookies parameter indicates whether cookies should be stored and
+    sent for subsequent requests. This is True by default, but passing False
+    will disable this behaviour.
+
+    .. versionadded:: 0.5
+       `use_cookies` is new in this version.  Older versions did not provide
+       builtin cookie support.
+    """
+
+    def __init__(self, application, response_wrapper=None, use_cookies=True):
         self.application = application
         if response_wrapper is None:
             response_wrapper = lambda a, s, h: (a, s, h)
         self.response_wrapper = response_wrapper
-
-    def open(self, path='/', base_url=None, query_string=None, method='GET',
-             data=None, input_stream=None, content_type=None,
-             content_length=0, errors_stream=None, multithread=False,
-             multiprocess=False, run_once=False, environ_overrides=None,
-             as_tuple=False):
-        """Takes the same arguments as the `create_environ` function from the
-        utility module with some additions.
-
-        The first parameter should be the path of the request which defaults to
-        '/'.  The second one can either be a absolute path (in that case the url
-        host is localhost:80) or a full path to the request with scheme,
-        netloc port and the path to the script.
-
-        If the `path` contains a query string it will be used, even if the
-        `query_string` parameter was given.  If it does not contain one
-        the `query_string` parameter is used as querystring.  In that case
-        it can either be a dict, MultiDict or string.
-
-        The following options exist:
-
-        `method`
-            The request method.  Defaults to `GET`
-
-        `input_stream`
-            The input stream.  Defaults to an empty read only stream.
-
-        `data`
-            The data you want to transmit.  You can set this to a string and
-            define a content type instead of specifying an input stream.
-            Additionally you can pass a dict with the form data.  The values
-            could then be strings (no unicode objects!) which are then url
-            encoded or file objects.
-
-            A file object for this method is either a file descriptor with
-            an additional `name` attribute (like a file descriptor returned
-            by the `open` / `file` function), a tuple in the form
-            ``(fd, filename, mimetype)`` (all arguments except fd optional)
-            or as dict with those keys and values.
-
-            Additionally you can instanciate the `werkzeug.test.File` object
-            (or a subclass of it) and pass it as value.
-
-        `content_type`
-            The content type for this request.  Default is an empty content
-            type.
-
-        `content_length`
-            The value for the content length header.  Defaults to 0.
+        if use_cookies:
+            self.cookie_jar = _TestCookieJar()
+        else:
+            self.cookie_jar = None
 
-        `errors_stream`
-            The wsgi.errors stream.  Defaults to `sys.stderr`.
+    def open(self, *args, **kwargs):
+        """Takes the same arguments as the :class:`EnvironBuilder` class with
+        some additions:  You can provide a :class:`EnvironBuilder` or a WSGI
+        environment as only argument instead of the :class:`EnvironBuilder`
+        arguments and two optional keyword arguments (`as_tuple`, `buffered`)
+        that change the type of the return value or the way the application is
+        executed.
 
-        `multithread`
-            The multithreaded flag for the WSGI Environment.  Defaults to
-            `False`.
+        .. versionchanged:: 0.5
+           If a dict is provided as file in the dict for the `data` parameter
+           the content type has to be called `content_type` now instead of
+           `mimetype`.  This change was made for consistency with
+           :class:`werkzeug.FileWrapper`.
 
-        `multiprocess`
-            The multiprocess flag for the WSGI Environment.  Defaults to
-            `False`.
+        Additional parameters:
 
-        `run_once`
-            The run_once flag for the WSGI Environment.  Defaults to `False`.
+        :param as_tuple: Returns a tuple in the form ``(environ, result)``
+        :param buffered: set this to true to buffer the application run.
+                         This will automatically close the application for
+                         you as well.
         """
-        if input_stream is None and data and method in ('PUT', 'POST'):
-            need_multipart = False
-            if isinstance(data, basestring):
-                assert content_type is not None, 'content type required'
-            else:
-                for key, value in data.iteritems():
-                    if isinstance(value, basestring):
-                        if isinstance(value, unicode):
-                            data[key] = str(value)
-                        continue
-                    need_multipart = True
-                    if isinstance(value, tuple):
-                        data[key] = File(*value)
-                    elif isinstance(value, dict):
-                        data[key] = File(**value)
-                    elif not isinstance(value, File):
-                        data[key] = File(value)
-                if need_multipart:
-                    boundary, data = encode_multipart(data)
-                    if content_type is None:
-                        content_type = 'multipart/form-data; boundary=' + \
-                            boundary
-                else:
-                    data = urlencode(data)
-                    if content_type is None:
-                        content_type = 'application/x-www-form-urlencoded'
-            content_length = len(data)
-            input_stream = StringIO(data)
+        as_tuple = kwargs.pop('as_tuple', False)
+        buffered = kwargs.pop('buffered', False)
+        environ = None
+        if not kwargs and len(args) == 1:
+            if isinstance(args[0], EnvironBuilder):
+                environ = args[0].get_environ()
+            elif isinstance(args[0], dict):
+                environ = args[0]
+        if environ is None:
+            builder = EnvironBuilder(*args, **kwargs)
+            try:
+                environ = builder.get_environ()
+            finally:
+                builder.close()
 
-        if hasattr(path, 'environ'):
-            environ = path.environ
-        elif isinstance(path, dict):
-            environ = path
-        else:
-            environ = create_environ(path, base_url, query_string, method,
-                                     input_stream, content_type, content_length,
-                                     errors_stream, multithread,
-                                     multiprocess, run_once)
-        if environ_overrides:
-            environ.update(environ_overrides)
-        rv = run_wsgi_app(self.application, environ)
+        if self.cookie_jar is not None:
+            self.cookie_jar.inject_wsgi(environ)
+        rv = run_wsgi_app(self.application, environ, buffered=buffered)
+        if self.cookie_jar is not None:
+            self.cookie_jar.extract_wsgi(environ, rv[2])
         response = self.response_wrapper(*rv)
         if as_tuple:
             return environ, response
         return response
 
     def get(self, *args, **kw):
-        """Like open but method is enforced to GET"""
+        """Like open but method is enforced to GET."""
         kw['method'] = 'GET'
         return self.open(*args, **kw)
 
     def post(self, *args, **kw):
-        """Like open but method is enforced to POST"""
+        """Like open but method is enforced to POST."""
         kw['method'] = 'POST'
         return self.open(*args, **kw)
 
     def head(self, *args, **kw):
-        """Like open but method is enforced to HEAD"""
+        """Like open but method is enforced to HEAD."""
         kw['method'] = 'HEAD'
         return self.open(*args, **kw)
 
     def put(self, *args, **kw):
-        """Like open but method is enforced to PUT"""
+        """Like open but method is enforced to PUT."""
         kw['method'] = 'PUT'
         return self.open(*args, **kw)
 
     def delete(self, *args, **kw):
-        """Like open but method is enforced to DELETE"""
+        """Like open but method is enforced to DELETE."""
         kw['method'] = 'DELETE'
         return self.open(*args, **kw)
 
@@ -282,3 +652,79 @@
             self.__class__.__name__,
             self.application
         )
+
+
+def create_environ(*args, **kwargs):
+    """Create a new WSGI environ dict based on the values passed.  The first
+    parameter should be the path of the request which defaults to '/'.  The
+    second one can either be an absolute path (in that case the host is
+    localhost:80) or a full path to the request with scheme, netloc port and
+    the path to the script.
+
+    This accepts the same arguments as the :class:`EnvironBuilder`
+    constructor.
+
+    .. versionchanged:: 0.5
+       This function is now a thin wrapper over :class:`EnvironBuilder` which
+       was added in 0.5.  The `headers`, `environ_base`, `environ_overrides`
+       and `charset` parameters were added.
+    """
+    builder = EnvironBuilder(*args, **kwargs)
+    try:
+        return builder.get_environ()
+    finally:
+        builder.close()
+
+
+def run_wsgi_app(app, environ, buffered=False):
+    """Return a tuple in the form (app_iter, status, headers) of the
+    application output.  This works best if you pass it an application that
+    returns an iterator all the time.
+
+    Sometimes applications may use the `write()` callable returned
+    by the `start_response` function.  This tries to resolve such edge
+    cases automatically.  But if you don't get the expected output you
+    should set `buffered` to `True` which enforces buffering.
+
+    If passed an invalid WSGI application the behavior of this function is
+    undefined.  Never pass non-conforming WSGI applications to this function.
+
+    :param app: the application to execute.
+    :param buffered: set to `True` to enforce buffering.
+    :return: tuple in the form ``(app_iter, status, headers)``
+    """
+    response = []
+    buffer = []
+
+    def start_response(status, headers, exc_info=None):
+        if exc_info is not None:
+            raise exc_info[0], exc_info[1], exc_info[2]
+        response[:] = [status, headers]
+        return buffer.append
+
+    app_iter = app(environ, start_response)
+
+    # when buffering we emit the close call early and conver the
+    # application iterator into a regular list
+    if buffered:
+        close_func = getattr(app_iter, 'close', None)
+        try:
+            app_iter = list(app_iter)
+        finally:
+            if close_func is not None:
+                close_func()
+
+    # otherwise we iterate the application iter until we have
+    # a response, chain the already received data with the already
+    # collected data and wrap it in a new `ClosingIterator` if
+    # we have a close callable.
+    else:
+        while not response:
+            buffer.append(app_iter.next())
+        if buffer:
+            app_iter = chain(buffer, app_iter)
+            close_func = getattr(app_iter, 'close', None)
+            if close_func is not None:
+                app_iter = ClosingIterator(app_iter, close_func)
+
+    return app_iter, response[0], response[1]
--- a/MoinMoin/support/werkzeug/testapp.py	Fri Feb 27 23:30:37 2009 +0100
+++ b/MoinMoin/support/werkzeug/testapp.py	Sat Feb 28 00:08:31 2009 +0100
@@ -6,7 +6,7 @@
     Provide a small test application that can be used to test a WSGI server
     and check it for WSGI compliance.
 
-    :copyright: Copyright 2007-2008 by Armin Ronacher.
+    :copyright: (c) 2009 by the Werkzeug Team, see AUTHORS for more details.
     :license: BSD, see LICENSE for more details.
 """
 from werkzeug.templates import Template
--- a/MoinMoin/support/werkzeug/useragents.py	Fri Feb 27 23:30:37 2009 +0100
+++ b/MoinMoin/support/werkzeug/useragents.py	Sat Feb 28 00:08:31 2009 +0100
@@ -4,11 +4,11 @@
     ~~~~~~~~~~~~~~~~~~~
 
     This module provides a helper to inspect user agent strings.  This module
-    is far from complete but should work for most of the current browsers that
-    are available.
+    is far from complete but should work for most of the currently available
+    browsers.
 
 
-    :copyright: 2007-2008 by Armin Ronacher.
+    :copyright: (c) 2009 by the Werkzeug Team, see AUTHORS for more details.
     :license: BSD, see LICENSE for more details.
 """
 import re
@@ -18,11 +18,12 @@
     """A simple user agent parser.  Used by the `UserAgent`."""
 
     platforms = (
+        ('iphone', 'iphone'),
         (r'darwin|mac|os\s*x', 'macos'),
         ('win', 'windows'),
+        (r'android', 'android'),
         (r'x11|lin(\b|ux)?', 'linux'),
         ('(sun|i86)os', 'solaris'),
-        ('iphone', 'iphone'),
         (r'nintendo\s+wii', 'wii'),
         ('irix', 'irix'),
         ('hp-?ux', 'hpux'),
@@ -38,6 +39,7 @@
         ('ask jeeves', 'ask'),
         (r'aol|america\s+online\s+browser', 'aol'),
         ('opera', 'opera'),
+        ('chrome', 'chrome'),
         ('firefox|firebird|phoenix|iceweasel', 'firefox'),
         ('galeon', 'galeon'),
         ('safari', 'safari'),
@@ -59,17 +61,15 @@
     )
 
     def __init__(self):
-        self.platforms = re.compile(r'|'.join(['(?P<%s>%s)' % (b, a) for a, b
-                                    in self.platforms]), re.I)
+        self.platforms = [(b, re.compile(a, re.I)) for a, b in self.platforms]
         self.browsers = [(b, re.compile(self._browser_version_re % a))
                          for a, b in self.browsers]
 
     def __call__(self, user_agent):
-        match = self.platforms.search(user_agent)
-        if match is not None:
-            for platform, value in match.groupdict().iteritems():
-                if value:
-                    break
+        for platform, regex in self.platforms:
+            match = regex.search(user_agent)
+            if match is not None:
+                break
         else:
             platform = None
         for browser, regex in self.browsers:
@@ -88,15 +88,29 @@
 
 
 class UserAgent(object):
-    """Represents a user agent.  Pass it a WSGI environment or an user agent
+    """Represents a user agent.  Pass it a WSGI environment or a user agent
     string and you can inspect some of the details from the user agent
-    string via the attributes.  The following attribute exist:
+    string via the attributes.  The following attributes exist:
 
-    -   `string`, the raw user agent string
-    -   `platform`, the browser platform
-    -   `browser`, the name of the browser
-    -   `version`, the version of the browser
-    -   `language`, the language of the browser
+    .. attribute:: string
+
+       the raw user agent string
+
+    .. attribute:: platform
+
+       the browser platform
+
+    .. attribute:: browser
+
+        the name of the browser
+
+    .. attribute:: version
+
+        the version of the browser
+
+    .. attribute:: language
+
+        the language of the browser
     """
     _parser = UserAgentParser()
 
--- a/MoinMoin/support/werkzeug/utils.py	Fri Feb 27 23:30:37 2009 +0100
+++ b/MoinMoin/support/werkzeug/utils.py	Sat Feb 28 00:08:31 2009 +0100
@@ -7,396 +7,30 @@
     them are used by the request and response wrappers but especially for
     middleware development it makes sense to use them without the wrappers.
 
-    :copyright: 2007-2008 by Armin Ronacher, Georg Brandl.
+    :copyright: (c) 2009 by the Werkzeug Team, see AUTHORS for more details.
     :license: BSD, see LICENSE for more details.
 """
 import re
 import os
 import sys
-import cgi
 import urllib
 import urlparse
 import posixpath
-from itertools import chain
 from time import asctime, gmtime, time
 from datetime import timedelta
-try:
-    set = set
-except NameError:
-    from sets import Set as set
-    def reversed(item):
-        return item[::-1]
+
 from werkzeug._internal import _patch_wrapper, _decode_unicode, \
      _empty_stream, _iter_modules, _ExtendedCookie, _ExtendedMorsel, \
-     _StorageHelper, _DictAccessorProperty, _dump_date, \
-     _parse_signature
-from werkzeug.http import generate_etag, parse_etags
+     _DictAccessorProperty, _dump_date, _parse_signature, _missing
 
 
-_format_re = re.compile(r'\$(%s|\{%s\})' % (('[a-zA-Z_][a-zA-Z0-9_]*',) * 2))
+_format_re = re.compile(r'\$(?:(%s)|\{(%s)\})' % (('[a-zA-Z_][a-zA-Z0-9_]*',) * 2))
 _entity_re = re.compile(r'&([^;]+);')
 
 
-class MultiDict(dict):
-    """A `MultiDict` is a dictionary subclass customized to deal with multiple
-    values for the same key which is for example used by the parsing functions
-    in the wrappers.  This is necessary because some HTML form elements pass
-    multiple values for the same key.
-
-    `MultiDict` implements the all standard dictionary methods.  Internally,
-    it saves all values for a key as a list, but the standard dict access
-    methods will only return the first value for a key. If you want to gain
-    access to the other values too you have to use the `list` methods as
-    explained below.
-
-    Basic Usage:
-
-    >>> d = MultiDict([('a', 'b'), ('a', 'c')])
-    >>> d
-    MultiDict([('a', 'b'), ('a', 'c')])
-    >>> d['a']
-    'b'
-    >>> d.getlist('a')
-    ['b', 'c']
-    >>> 'a' in d
-    True
-
-    It behaves like a normal dict thus all dict functions will only return the
-    first value when multiple values for one key are found.
-
-    From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a
-    subclass of the `BadRequest` HTTP exception and will render a page for a
-    ``400 BAD REQUEST`` if catched in a catch-all for HTTP exceptions.
-    """
-
-    #: the key error this class raises.  Because of circular dependencies
-    #: with the http exception module this class is created at the end of
-    #: this module.
-    KeyError = None
-
-    def __init__(self, mapping=()):
-        """A `MultiDict` can be constructed from an iterable of
-        ``(key, value)`` tuples, a dict, a `MultiDict` or with Werkzeug 0.2
-        onwards some keyword parameters.
-        """
-        if isinstance(mapping, MultiDict):
-            dict.__init__(self, [(k, v[:]) for k, v in mapping.lists()])
-        elif isinstance(mapping, dict):
-            tmp = {}
-            for key, value in mapping.iteritems():
-                if isinstance(value, (tuple, list)):
-                    value = list(value)
-                else:
-                    value = [value]
-                tmp[key] = value
-            dict.__init__(self, tmp)
-        else:
-            tmp = {}
-            for key, value in mapping:
-                tmp.setdefault(key, []).append(value)
-            dict.__init__(self, tmp)
-
-    def __getitem__(self, key):
-        """Return the first data value for this key;
-        raises KeyError if not found.
-
-        :raise KeyError: if the key does not exist
-        """
-        if key in self:
-            return dict.__getitem__(self, key)[0]
-        raise self.KeyError(key)
-
-    def __setitem__(self, key, value):
-        """Set an item as list."""
-        dict.__setitem__(self, key, [value])
-
-    def get(self, key, default=None, type=None):
-        """Return the default value if the requested data doesn't exist.
-        If `type` is provided and is a callable it should convert the value,
-        return it or raise a `ValueError` if that is not possible.  In this
-        case the function will return the default as if the value was not
-        found.
-
-        Example:
-
-        >>> d = MultiDict(foo='42', bar='blub')
-        >>> d.get('foo', type=int)
-        42
-        >>> d.get('bar', -1, type=int)
-        -1
-        """
-        try:
-            rv = self[key]
-            if type is not None:
-                rv = type(rv)
-        except (KeyError, ValueError):
-            rv = default
-        return rv
-
-    def getlist(self, key, type=None):
-        """Return the list of items for a given key. If that key is not in the
-        `MultiDict`, the return value will be an empty list.  Just as `get`
-        `getlist` accepts a `type` parameter.  All items will be converted
-        with the callable defined there.
-
-        :return: list
-        """
-        try:
-            rv = dict.__getitem__(self, key)
-        except KeyError:
-            return []
-        if type is None:
-            return rv
-        result = []
-        for item in rv:
-            try:
-                result.append(type(item))
-            except ValueError:
-                pass
-        return result
-
-    def setlist(self, key, new_list):
-        """Remove the old values for a key and add new ones.  Note that the list
-        you pass the values in will be shallow-copied before it is inserted in
-        the dictionary.
-
-        >>> multidict.setlist('foo', ['1', '2'])
-        >>> multidict['foo']
-        '1'
-        >>> multidict.getlist('foo')
-        ['1', '2']
-        """
-        dict.__setitem__(self, key, list(new_list))
-
-    def setdefault(self, key, default=None):
-        if key not in self:
-            self[key] = default
-        else:
-            default = self[key]
-        return default
-
-    def setlistdefault(self, key, default_list=()):
-        """Like `setdefault` but sets multiple values."""
-        if key not in self:
-            default_list = list(default_list)
-            dict.__setitem__(self, key, default_list)
-        else:
-            default_list = self.getlist(key)
-        return default_list
-
-    def items(self):
-        """Return a list of (key, value) pairs, where value is the last item
-        in the list associated with the key.
-        """
-        return [(key, self[key]) for key in self.iterkeys()]
-
-    lists = dict.items
-
-    def values(self):
-        """Returns a list of the last value on every key list."""
-        return [self[key] for key in self.iterkeys()]
-
-    listvalues = dict.values
-
-    def iteritems(self):
-        for key, values in dict.iteritems(self):
-            yield key, values[0]
-
-    iterlists = dict.iteritems
-
-    def itervalues(self):
-        for values in dict.itervalues(self):
-            yield values[0]
-
-    iterlistvalues = dict.itervalues
-
-    def copy(self):
-        """Return a shallow copy of this object."""
-        return self.__class__(self)
-
-    def to_dict(self, flat=True):
-        """Return the contents as regular dict.  If `flat` is `True` the
-        returned dict will only have the first item present, if `flat` is
-        `False` all values will be returned as lists.
-
-        :return: dict
-        """
-        if flat:
-            return dict(self.iteritems())
-        return dict(self)
-
-    def update(self, other_dict):
-        """update() extends rather than replaces existing key lists."""
-        if isinstance(other_dict, MultiDict):
-            for key, value_list in other_dict.iterlists():
-                self.setlistdefault(key, []).extend(value_list)
-        elif isinstance(other_dict, dict):
-            for key, value in other_dict.items():
-                self.setlistdefault(key, []).append(value)
-        else:
-            for key, value in other_dict:
-                self.setlistdefault(key, []).append(value)
-
-    def pop(self, *args):
-        """Pop the first item for a list on the dict."""
-        return dict.pop(self, *args)[0]
-
-    def popitem(self):
-        """Pop an item from the dict."""
-        item = dict.popitem(self)
-        return (item[0], item[1][0])
-
-    poplist = dict.pop
-    popitemlist = dict.popitem
-
-    def __repr__(self):
-        tmp = []
-        for key, values in self.iterlists():
-            for value in values:
-                tmp.append((key, value))
-        return '%s(%r)' % (self.__class__.__name__, tmp)
-
-
-class CombinedMultiDict(MultiDict):
-    """A read only `MultiDict` decorator that you can pass multiple `MultiDict`
-    instances as sequence and it will combine the return values of all wrapped
-    dicts:
-
-    >>> from werkzeug import MultiDict, CombinedMultiDict
-    >>> post = MultiDict([('foo', 'bar')])
-    >>> get = MultiDict([('blub', 'blah')])
-    >>> combined = CombinedMultiDict([get, post])
-    >>> combined['foo']
-    'bar'
-    >>> combined['blub']
-    'blah'
-
-    This works for all read operations and will raise a `TypeError` for
-    methods that usually change data which isn't possible.
-
-    From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a
-    subclass of the `BadRequest` HTTP exception and will render a page for a
-    ``400 BAD REQUEST`` if catched in a catch-all for HTTP exceptions.
-    """
-
-    def __init__(self, dicts=None):
-        self.dicts = dicts or []
-
-    def fromkeys(cls):
-        raise TypeError('cannot create %r instances by fromkeys' %
-                        cls.__name__)
-    fromkeys = classmethod(fromkeys)
-
-    def __getitem__(self, key):
-        for d in self.dicts:
-            if key in d:
-                return d[key]
-        raise self.KeyError(key)
-
-    def get(self, key, default=None, type=None):
-        for d in self.dicts:
-            if key in d:
-                if type is not None:
-                    try:
-                        return type(d[key])
-                    except ValueError:
-                        continue
-                return d[key]
-        return default
-
-    def getlist(self, key, type=None):
-        rv = []
-        for d in self.dicts:
-            rv.extend(d.getlist(key, type))
-        return rv
-
-    def keys(self):
-        rv = set()
-        for d in self.dicts:
-            rv.update(d.keys())
-        return list(rv)
-
-    def iteritems(self):
-        found = set()
-        for d in self.dicts:
-            for key, value in d.iteritems():
-                if not key in found:
-                    found.add(key)
-                    yield key, value
-
-    def itervalues(self):
-        for key, value in self.iteritems():
-            yield value
-
-    def values(self):
-        return list(self.itervalues())
-
-    def items(self):
-        return list(self.iteritems())
-
-    def lists(self):
-        rv = {}
-        for d in self.dicts:
-            rv.update(d)
-        return rv.items()
-
-    def listvalues(self):
-        rv = {}
-        for d in reversed(self.dicts):
-            rv.update(d)
-        return rv.values()
-
-    def iterkeys(self):
-        return iter(self.keys())
-
-    __iter__ = iterkeys
-
-    def iterlists(self):
-        return iter(self.lists())
-
-    def iterlistvalues(self):
-        return iter(self.listvalues())
-
-    def copy(self):
-        """Return a shallow copy of this object."""
-        return self.__class__(self.dicts[:])
-
-    def to_dict(self, flat=True):
-        """Returns the contents as simple dict.  If `flat` is `True` the
-        resulting dict will only have the first item present, if `flat`
-        is `False` all values will be lists.
-        """
-        rv = {}
-        for d in reversed(self.dicts):
-            rv.update(d.to_dict(flat))
-        return rv
-
-    def _immutable(self, *args):
-        raise TypeError('%r instances are immutable' %
-                        self.__class__.__name__)
-
-    setlist = setdefault = setlistdefault = update = pop = popitem = \
-    poplist = popitemlist = __setitem__ = __delitem__ = _immutable
-    del _immutable
-
-    def __len__(self):
-        return len(self.keys())
-
-    def __contains__(self, key):
-        for d in self.dicts:
-            if key in d:
-                return True
-        return False
-
-    has_key = __contains__
-
-    def __repr__(self):
-        return '%s(%r)' % (self.__class__.__name__, self.dicts)
-
-
 class FileStorage(object):
-    """The `FileStorage` object is a thin wrapper over incoming files.  It is
-    used by the request object to represent uploaded files.  All the
+    """The :class:`FileStorage` class is a thin wrapper over incoming files.
+    It is used by the request object to represent uploaded files.  All the
     attributes of the wrapper stream are proxied by the file storage so
     it's possible to do ``storage.read()`` instead of the long form
     ``storage.stream.read()``.
@@ -404,15 +38,6 @@
 
     def __init__(self, stream=None, filename=None, name=None,
                  content_type='application/octet-stream', content_length=-1):
-        """Creates a new `FileStorage` object.
-
-        :param stream: the input stream for uploaded file.  Usually this
-                       points to a temporary file.
-        :param filename: The filename of the file on the client.
-        :param name: the name of the form field
-        :param content_type: the content type of the file
-        :param content_length: the content length of the file.
-        """
         self.name = name
         self.stream = stream or _empty_stream
         self.filename = filename or getattr(stream, 'name', None)
@@ -422,8 +47,14 @@
     def save(self, dst, buffer_size=16384):
         """Save the file to a destination path or file object.  If the
         destination is a file object you have to close it yourself after the
-        call.  The buffer size is the number of bytes held in the memory
-        during the copy process.  It defaults to 16KB.
+        call.  The buffer size is the number of bytes held in memory during
+        the copy process.  It defaults to 16KB.
+
+        :param dst: a filename or open file object the uploaded file
+                    is saved to.
+        :param buffer_size: the size of the buffer.  This works the same as
+                            the `length` parameter of
+                            :func:`shutil.copyfileobj`.
         """
         from shutil import copyfileobj
         close_dst = False
@@ -436,15 +67,16 @@
             if close_dst:
                 dst.close()
 
+    def close(self):
+        """Close the underlaying file if possible."""
+        try:
+            self.stream.close()
+        except:
+            pass
+
     def __getattr__(self, name):
         return getattr(self.stream, name)
 
-    def __nonzero__(self):
-        return bool(self.filename and self.content_length)
-
-    def __len__(self):
-        return max(self.content_length, 0)
-
     def __iter__(self):
         return iter(self.readline, '')
 
@@ -456,268 +88,6 @@
         )
 
 
-class Headers(object):
-    """An object that stores some headers.  It has a dict like interface
-    but is ordered and can store keys multiple times.
-
-    This data structure is useful if you want a nicer way to handle WSGI
-    headers which are stored as tuples in a list.
-
-    From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a
-    subclass of the `BadRequest` HTTP exception and will render a page for a
-    ``400 BAD REQUEST`` if catched in a catch-all for HTTP exceptions.
-    """
-
-    #: the key error this class raises.  Because of circular dependencies
-    #: with the http exception module this class is created at the end of
-    #: this module.
-    KeyError = None
-
-    def __init__(self, defaults=None, _list=None):
-        """Create a new `Headers` object based on a list or dict of headers
-        which are used as default values.  This does not reuse the list passed
-        to the constructor for internal usage.  To create a `Headers` object
-        that uses as internal storage the list or list-like object provided
-        it's possible to use the `linked` classmethod.
-        """
-        if _list is None:
-            _list = []
-        self._list = _list
-        if isinstance(defaults, dict):
-            for key, value in defaults.iteritems():
-                if isinstance(value, (tuple, list)):
-                    for v in value:
-                        self._list.append((key, v))
-                else:
-                    self._list.append((key, value))
-        elif defaults is not None:
-            self._list[:] = defaults
-
-    def linked(cls, headerlist):
-        """Create a new `Headers` object that uses the list of headers passed
-        as internal storage:
-
-        >>> headerlist = [('Content-Length', '40')]
-        >>> headers = Headers.linked(headerlist)
-        >>> headers.add('Content-Type', 'text/html')
-        >>> headerlist
-        [('Content-Length', '40'), ('Content-Type', 'text/html')]
-
-        :return: new linked `Headers` object.
-        """
-        return cls(_list=headerlist)
-    linked = classmethod(linked)
-
-    def __getitem__(self, key):
-        ikey = key.lower()
-        for k, v in self._list:
-            if k.lower() == ikey:
-                return v
-        raise self.KeyError(key)
-
-    def __eq__(self, other):
-        return other.__class__ is self.__class__ and \
-               set(other._list) == set(self._list)
-
-    def __ne__(self, other):
-        return not self.__eq__(other)
-
-    def get(self, key, default=None, type=None):
-        """Return the default value if the requested data doesn't exist.
-        If `type` is provided and is a callable it should convert the value,
-        return it or raise a `ValueError` if that is not possible.  In this
-        case the function will return the default as if the value was not
-        found.
-
-        Example:
-
-        >>> d = Headers([('Content-Length', '42')])
-        >>> d.get('Content-Length', type=int)
-        42
-
-        If a headers object is bound you must notadd unicode strings
-        because no encoding takes place.
-        """
-        try:
-            rv = self[key]
-        except KeyError:
-            return default
-        if type is None:
-            return rv
-        try:
-            return type(rv)
-        except ValueError:
-            return default
-
-    def getlist(self, key, type=None):
-        """Return the list of items for a given key. If that key is not in the
-        `MultiDict`, the return value will be an empty list.  Just as `get`
-        `getlist` accepts a `type` parameter.  All items will be converted
-        with the callable defined there.
-
-        :return: list
-        """
-        ikey = key.lower()
-        result = []
-        for k, v in self:
-            if k.lower() == ikey:
-                if type is not None:
-                    try:
-                        v = type(v)
-                    except ValueError:
-                        continue
-                result.append(v)
-        return result
-
-    def iteritems(self, lower=False):
-        for key, value in self:
-            if lower:
-                key = key.lower()
-            yield key, value
-
-    def iterkeys(self, lower=False):
-        for key, _ in self.iteritems(lower):
-            yield key
-
-    def itervalues(self):
-        for _, value in self.iteritems():
-            yield value
-
-    def keys(self, lower=False):
-        return list(self.iterkeys(lower))
-
-    def values(self):
-        return list(self.itervalues())
-
-    def items(self, lower=False):
-        return list(self.iteritems(lower))
-
-    def extend(self, iterable):
-        """Extend the headers with a dict or an iterable yielding keys and
-        values.
-        """
-        if isinstance(iterable, dict):
-            iterable = iterable.iteritems()
-        for key, value in iterable:
-            self.add(key, value)
-
-    def __delitem__(self, key):
-        key = key.lower()
-        new = []
-        for k, v in self._list:
-            if k.lower() != key:
-                new.append((k, v))
-        self._list[:] = new
-
-    remove = __delitem__
-
-    def __contains__(self, key):
-        """Check if a key is present."""
-        try:
-            self[key]
-        except KeyError:
-            return False
-        return True
-
-    has_key = __contains__
-
-    def __iter__(self):
-        """Yield ``(key, value)`` tuples."""
-        return iter(self._list)
-
-    def add(self, key, value):
-        """add a new header tuple to the list"""
-        self._list.append((key, value))
-
-    def clear(self):
-        """clears all headers"""
-        del self._list[:]
-
-    def set(self, key, value):
-        """remove all header tuples for key and add
-        a new one
-        """
-        lc_key = key.lower()
-        for idx, (old_key, old_value) in enumerate(self._list):
-            if old_key.lower() == lc_key:
-                self._list[idx] = (key, value)
-                return
-        self.add(key, value)
-
-    __setitem__ = set
-
-    def to_list(self, charset='utf-8'):
-        """Convert the headers into a list and converts the unicode header
-        items to the specified charset.
-
-        :return: list
-        """
-        result = []
-        for k, v in self:
-            if isinstance(v, unicode):
-                v = v.encode(charset)
-            else:
-                v = str(v)
-            result.append((k, v))
-        return result
-
-    def copy(self):
-        return self.__class__(self._list)
-
-    def __copy__(self):
-        return self.copy()
-
-    def __repr__(self):
-        return '%s(%r)' % (
-            self.__class__.__name__,
-            list(self)
-        )
-
-
-class EnvironHeaders(Headers):
-    """Read only version of the headers from a WSGI environment.  This
-    provides the same interface as `Headers` and is constructed from
-    a WSGI environment.
-
-    From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a
-    subclass of the `BadRequest` HTTP exception and will render a page for a
-    ``400 BAD REQUEST`` if catched in a catch-all for HTTP exceptions.
-    """
-
-    def __init__(self, environ):
-        self.environ = environ
-
-    def linked(cls, environ):
-        raise TypeError('%r object is always linked to environment, '
-                        'no separate initializer' % self.__class__.__name__)
-    linked = classmethod(linked)
-
-    def __eq__(self, other):
-        return self is other
-
-    def __getitem__(self, key):
-        key = key.upper().replace('-', '_')
-        if key in ('CONTENT_TYPE', 'CONTENT_LENGTH'):
-            return self.environ[key]
-        return self.environ['HTTP_' + key]
-
-    def __iter__(self):
-        for key, value in self.environ.iteritems():
-            if key.startswith('HTTP_'):
-                yield key[5:].replace('_', '-').title(), value
-            elif key in ('CONTENT_TYPE', 'CONTENT_LENGTH'):
-                yield key.replace('_', '-').title(), value
-
-    def copy(self):
-        raise TypeError('cannot create %r copies' % self.__class__.__name__)
-
-    def _immutable(self, *a, **kw):
-        raise TypeError('%r is immutable' % self.__class__.__name__)
-    remove = __delitem__ = add = clear = extend = set = __setitem__ = \
-        _immutable
-    del _immutable
-
-
 class SharedDataMiddleware(object):
     """A WSGI middleware that provides static content for development
     environments or simple server setups. Usage is quite simple::
@@ -744,9 +114,16 @@
         })
 
     This will then serve the ``shared_files`` folder in the `myapplication`
-    python package.
+    Python package.
+
+    The optional `disallow` parameter can be a list of `fnmatch` rules for
+    files that are not accessible from the web.  If `cache` is set to `False`
+    no caching headers are sent.
     """
 
+    # TODO: use wsgi.file_wrapper or something, just don't yield everything
+    # at once.  Also consider switching to BaseResponse
+
     def __init__(self, app, exports, disallow=None, cache=True):
         self.app = app
         self.exports = {}
@@ -767,6 +144,10 @@
             self.is_allowed = lambda x: not fnmatch(x, disallow)
 
     def is_allowed(self, filename):
+        """Subclasses can override this method to disallow the access to
+        certain files.  However by providing `disallow` in the constructor
+        this method is overwritten.
+        """
         return True
 
     def get_file_loader(self, filename):
@@ -822,14 +203,20 @@
             data = stream.read()
         finally:
             stream.close()
-        headers = [('Content-Type', mime_type), ('Cache-Control', 'public')]
+
+        headers = [('Cache-Control', 'public')]
         if self.cache:
             etag = generate_etag(data)
             headers += [('Expires', expiry), ('ETag', etag)]
             if parse_etags(environ.get('HTTP_IF_NONE_MATCH')).contains(etag):
+                remove_entity_headers(headers)
                 start_response('304 Not Modified', headers)
                 return []
 
+        headers.extend((
+            ('Content-Type', mime_type),
+            ('Content-Length', str(len(data)))
+        ))
         start_response('200 OK', headers)
         return [data]
 
@@ -875,10 +262,10 @@
         return ClosingIterator(app(environ, start_response), [cleanup_session,
                                                               cleanup_locals])
 
-    If there is just one close function it can be bassed instead of the list.
+    If there is just one close function it can be passed instead of the list.
 
-    A closing iterator is non needed if the application uses response objects
-    and finishes the processing if the resonse is started::
+    A closing iterator is not needed if the application uses response objects
+    and finishes the processing if the response is started::
 
         try:
             return response(environ, start_response)
@@ -912,6 +299,159 @@
             callback()
 
 
+class FileWrapper(object):
+    """This class can be used to convert a :class:`file`-like object into
+    an iterable.  It yields `buffer_size` blocks until the file is fully
+    read.
+
+    You should not use this class directly but rather use the
+    :func:`wrap_file` function that uses the WSGI server's file wrapper
+    support if it's available.
+
+    .. versionadded:: 0.5
+
+    :param file: a :class:`file`-like object with a :meth:`~file.read` method.
+    :param buffer_size: number of bytes for one iteration.
+    """
+
+    def __init__(self, file, buffer_size=8192):
+        self.file = file
+        self.buffer_size = buffer_size
+
+    def close(self):
+        if hasattr(self.file, 'close'):
+            self.file.close()
+
+    def __iter__(self):
+        return self
+
+    def next(self):
+        data = self.file.read(self.buffer_size)
+        if data:
+            return data
+        raise StopIteration()
+
+
+class LimitedStream(object):
+    """Wraps a stream so that it doesn't read more than n bytes.  If the
+    stream is exhausted and the caller tries to get more bytes from it
+    :func:`on_exhausted` is called which by default raises a
+    :exc:`~werkzeug.exceptions.BadRequest`.  The return value of that
+    function is forwarded to the reader function.  So if it returns an
+    empty string :meth:`read` will return an empty string as well.
+
+    The limit however must never be higher than what the stream can
+    output.  Otherwise :meth:`readlines` will try to read past the
+    limit.
+
+    The `silent` parameter has no effect if :meth:`is_exhausted` is
+    overriden by a subclass.
+
+    :param stream: the stream to wrap.
+    :param limit: the limit for the stream, must not be longer than
+                  what the string can provide if the stream does not
+                  end with `EOF` (like `wsgi.input`)
+    :param silent: If set to `True` the stream will allow reading
+                   past the limit and will return an empty string.
+    """
+
+    def __init__(self, stream, limit, silent=False):
+        self._stream = stream
+        self._pos = 0
+        self.limit = limit
+        self.silent = silent
+
+    def __iter__(self):
+        return self
+
+    @property
+    def is_exhausted(self):
+        """If the stream is exhausted this attribute is `True`."""
+        return self._pos >= self.limit
+
+    def on_exhausted(self):
+        """This is called when the stream tries to read past the limit.
+        The return value of this function is returned from the reading
+        function.
+
+        Per default this raises a :exc:`~werkzeug.exceptions.BadRequest`.
+        """
+        if self.silent:
+            return ''
+        raise BadRequest('input stream exhausted')
+
+    def exhaust(self, chunk_size=1024 * 16):
+        """Exhaust the stream.  This consumes all the data left until the
+        limit is reached.
+
+        :param chunk_size: the size for a chunk.  It will read the chunk
+                           until the stream is exhausted and throw away
+                           the results.
+        """
+        to_read = self.limit - self._pos
+        chunk = chunk_size
+        while to_read > 0:
+            chunk = min(to_read, chunk)
+            self.read(chunk)
+            to_read -= chunk
+
+    def read(self, size=None):
+        """Read `size` bytes or if size is not provided everything is read.
+
+        :param size: the number of bytes read.
+        """
+        if self._pos >= self.limit:
+            return self.on_exhausted()
+        if size is None:
+            size = self.limit
+        read = self._stream.read(min(self.limit - self._pos, size))
+        self._pos += len(read)
+        return read
+
+    def readline(self, size=None):
+        """Read a line from the stream.  Arguments are forwarded to the
+        `readline` function of the underlaying stream if it supports
+        them.
+        """
+        if self._pos >= self.limit:
+            return self.on_exhausted()
+        if size is None:
+            size = self.limit - self._pos
+        else:
+            size = min(size, self.limit - self._pos)
+        line = self._stream.readline(size)
+        self._pos += len(line)
+        return line
+
+    def readlines(self, size=None):
+        """Reads a file into a list of strings.  It calls :meth:`readline`
+        until the file is read to the end.  It does support the optional
+        `size` argument if the underlaying stream supports it for
+        `readline`.
+        """
+        last_pos = self._pos
+        result = []
+        if size is not None:
+            end = min(self.limit, last_pos + size)
+        else:
+            end = self.limit
+        while 1:
+            if size is not None:
+                size -= last_pos - self._pos
+            if self._pos >= end:
+                break
+            result.append(self.readline(size))
+            if size is not None:
+                last_pos = self._pos
+        return result
+
+    def next(self):
+        line = self.readline()
+        if line is None:
+            raise StopIteration()
+        return line
+
+
 class Href(object):
     """Implements a callable that constructs URLs with the given base. The
     function can be called with any number of positional and keyword
@@ -929,12 +469,21 @@
 
     If any of the arguments (positional or keyword) evaluates to `None` it
     will be skipped.  If no keyword arguments are given the last argument
-    can be a `dict` or `MultiDict` (or any other dict subclass), otherwise
-    the keyword arguments are used for the query parameters, cutting off
-    the first trailing underscore of the parameter name:
+    can be a :class:`dict` or :class:`MultiDict` (or any other dict subclass),
+    otherwise the keyword arguments are used for the query parameters, cutting
+    off the first trailing underscore of the parameter name:
 
     >>> href(is_=42)
     '/foo?is=42'
+    >>> href({'foo': 'bar'})
+    '/foo?foo=bar'
+
+    Combining of both methods is not allowed:
+
+    >>> href({'foo': 'bar'}, bar=42)
+    Traceback (most recent call last):
+      ...
+    TypeError: keyword arguments and query-dicts can't be combined
 
     Accessing attributes on the href object creates a new href object with
     the attribute name as prefix:
@@ -942,13 +491,25 @@
     >>> bar_href = href.bar
     >>> bar_href("blub")
     '/foo/bar/blub'
+
+    If `sort` is set to `True` the items are sorted by `key` or the default
+    sorting algorithm:
+
+    >>> href = Href("/", sort=True)
+    >>> href(a=1, b=2, c=3)
+    '/?a=1&b=2&c=3'
+
+    .. versionadded:: 0.5
+        `sort` and `key` were added.
     """
 
-    def __init__(self, base='./', charset='utf-8'):
+    def __init__(self, base='./', charset='utf-8', sort=False, key=None):
         if not base:
             base = './'
         self.base = base
         self.charset = charset
+        self.sort = sort
+        self.key = key
 
     def __getattr__(self, name):
         if name[:2] == '__':
@@ -956,15 +517,18 @@
         base = self.base
         if base[-1:] != '/':
             base += '/'
-        return Href(urlparse.urljoin(base, name), self.charset)
+        return Href(urlparse.urljoin(base, name), self.charset, self.sort,
+                    self.key)
 
     def __call__(self, *path, **query):
-        if query:
-            if path and isinstance(path[-1], dict):
-                query, path = path[-1], path[:-1]
-            else:
-                query = dict([(k.endswith('_') and k[:-1] or k, v)
-                              for k, v in query.items()])
+        if path and isinstance(path[-1], dict):
+            if query:
+                raise TypeError('keyword arguments and query-dicts '
+                                'can\'t be combined')
+            query, path = path[-1], path[:-1]
+        elif query:
+            query = dict([(k.endswith('_') and k[:-1] or k, v)
+                          for k, v in query.items()])
         path = '/'.join([url_quote(x, self.charset) for x in path
                          if x is not None]).lstrip('/')
         rv = self.base
@@ -973,12 +537,13 @@
                 rv += '/'
             rv = urlparse.urljoin(rv, path)
         if query:
-            rv += '?' + url_encode(query, self.charset)
+            rv += '?' + url_encode(query, self.charset, sort=self.sort,
+                                   key=self.key)
         return str(rv)
 
 
 class cached_property(object):
-    """A decorator that converts a function into a lazy property. The
+    """A decorator that converts a function into a lazy property.  The
     function wrapped is called the first time to retrieve the result
     and than that calculated result is used the next time you access
     the value::
@@ -989,37 +554,48 @@
             def foo(self):
                 # calculate something important here
                 return 42
+
+    .. versionchanged:: 0.5
+       cached properties are now optionally writeable.
     """
 
-    def __init__(self, func, name=None, doc=None):
+    def __init__(self, func, name=None, doc=None, writeable=False):
         self.func = func
+        self.writeable = writeable
         self.__name__ = name or func.__name__
         self.__doc__ = doc or func.__doc__
 
     def __get__(self, obj, type=None):
         if obj is None:
             return self
-        value = self.func(obj)
-        setattr(obj, self.__name__, value)
+        value = obj.__dict__.get(self.__name__, _missing)
+        if value is _missing:
+            value = self.func(obj)
+            obj.__dict__[self.__name__] = value
         return value
 
+    def __set__(self, obj, value):
+        if not self.writeable:
+            raise TypeError('read only attribute')
+        obj.__dict__[self.__name__] = value
+
 
 class environ_property(_DictAccessorProperty):
     """Maps request attributes to environment variables. This works not only
     for the Werzeug request object, but also any other class with an
     environ attribute:
 
-    >>> class test_p(object):
-    ...     environ = { 'test': 'test' }
-    ...     test = environ_property('test')
-    >>> var = test_p()
+    >>> class Test(object):
+    ...     environ = {'key': 'value'}
+    ...     test = environ_property('key')
+    >>> var = Test()
     >>> var.test
-    test
+    'value'
 
     If you pass it a second value it's used as default if the key does not
     exist, the third one can be a converter that takes a value and converts
-    it.  If it raises `ValueError` or `TypeError` the default value is used.
-    If no default value is provided `None` is used.
+    it.  If it raises :exc:`ValueError` or :exc:`TypeError` the default value
+    is used. If no default value is provided `None` is used.
 
     Per default the property is read only.  You have to explicitly enable it
     by passing ``read_only=False`` to the constructor.
@@ -1052,7 +628,7 @@
 
     >>> html.p(class_='foo', *[html.a('foo', href='foo.html'), ' ',
     ...                        html.a('bar', href='bar.html')])
-    '<p class="foo"><a href="foo.html">foo</a> <a href="bar.html">bar</a></p>'
+    u'<p class="foo"><a href="foo.html">foo</a> <a href="bar.html">bar</a></p>'
 
     This class works around some browser limitations and can not be used for
     arbitrary SGML/XML generation.  For that purpose lxml and similar
@@ -1061,7 +637,7 @@
     Calling the builder escapes the string passed:
 
     >>> html.p(html("<foo>"))
-    '<p>&lt;foo&gt;</p>'
+    u'<p>&lt;foo&gt;</p>'
     """
 
     from htmlentitydefs import name2codepoint
@@ -1098,6 +674,8 @@
                 if key.endswith('_'):
                     key = key[:-1]
                 if key in self._boolean_attributes:
+                    if not value:
+                        continue
                     value = self._dialect == 'xhtml' and '="%s"' % key or ''
                 else:
                     value = '="%s"' % escape(value, True)
@@ -1106,7 +684,8 @@
                 write(self._dialect == 'xhtml' and ' />' or '>')
                 return ''.join(buffer)
             write('>')
-            children_as_string = ''.join(children)
+            children_as_string = ''.join(unicode(x) for x in children
+                                         if x is not None)
             if children_as_string:
                 if tag in self._plaintext_elements:
                     children_as_string = escape(children_as_string)
@@ -1129,39 +708,84 @@
 
 
 def parse_form_data(environ, stream_factory=None, charset='utf-8',
-                    errors='ignore'):
+                    errors='ignore', max_form_memory_size=None,
+                    max_content_length=None, dict_class=None):
     """Parse the form data in the environ and return it as tuple in the form
     ``(stream, form, files)``.  You should only call this method if the
     transport method is `POST` or `PUT`.
 
     If the mimetype of the data transmitted is `multipart/form-data` the
     files multidict will be filled with `FileStorage` objects.  If the
-    mimetype is unknow the input stream is wrapped and returned as first
+    mimetype is unknown the input stream is wrapped and returned as first
     argument, else the stream is empty.
+
+    This function does not raise exceptions, even if the input data is
+    malformed.
+
+    Have a look at :ref:`dealing-with-request-data` for more details.
+
+    .. versionadded:: 0.5
+       The `max_form_memory_size`, `max_content_length` and
+       `dict_class` parameters were added.
+
+    :param environ: the WSGI environment to be used for parsing.
+    :param stream_factory: An optional callable that returns a new read and
+                           writeable file descriptor.  This callable works
+                           the same as :meth:`~BaseResponse._get_file_stream`.
+    :param charset: The character set for URL and url encoded form data.
+    :param errors: The encoding error behavior.
+    :param max_form_memory_size: the maximum number of bytes to be accepted for
+                           in-memory stored form data.  If the data
+                           exceeds the value specified an
+                           :exc:`~exceptions.RequestURITooLarge`
+                           exception is raised.
+    :param max_content_length: If this is provided and the transmitted data
+                               is longer than this value an
+                               :exc:`~exceptions.RequestEntityTooLarge`
+                               exception is raised.
+    :param dict_class: an optional dict class to use.  If this is not specified
+                       or `None` the default :class:`MultiDict` is used.
+    :return: A tuple in the form ``(stream, form, files)``.
     """
+    content_type, extra = parse_options_header(environ.get('CONTENT_TYPE', ''))
+    try:
+        content_length = int(environ['CONTENT_LENGTH'])
+    except (KeyError, ValueError):
+        content_length = 0
+
+    if dict_class is None:
+        dict_class = MultiDict
+
+    if max_content_length is not None and content_length > max_content_length:
+        raise RequestEntityTooLarge()
+
     stream = _empty_stream
-    form = []
-    files = []
-    storage = _StorageHelper(environ, stream_factory)
-    if storage.file:
-        stream = storage.file
-    if storage.list is not None:
-        for key in storage.keys():
-            values = storage[key]
-            if not isinstance(values, list):
-                values = [values]
-            for item in values:
-                if getattr(item, 'filename', None) is not None:
-                    fn = _decode_unicode(item.filename, charset, errors)
-                    # fix stupid IE bug (IE6 sends the whole path)
-                    if fn[1:3] == ':\\' or fn[:2] == '\\\\':
-                        fn = fn.split('\\')[-1]
-                    files.append((key, FileStorage(item.file, fn, key,
-                                  item.type, item.length)))
-                else:
-                    form.append((key, _decode_unicode(item.value,
-                                 charset, errors)))
-    return stream, MultiDict(form), MultiDict(files)
+    files = ()
+
+    if content_type == 'multipart/form-data':
+        try:
+            form, files = parse_multipart(environ['wsgi.input'],
+                                          extra.get('boundary'),
+                                          content_length, stream_factory,
+                                          charset, errors,
+                                          max_form_memory_size=max_form_memory_size)
+        except ValueError, e:
+            form = dict_class()
+        else:
+            form = dict_class(form)
+    elif content_type == 'application/x-www-form-urlencoded' or \
+         content_type == 'application/x-url-encoded':
+        if max_form_memory_size is not None and \
+           content_length > max_form_memory_size:
+            raise RequestEntityTooLarge()
+        form = url_decode(environ['wsgi.input'].read(content_length),
+                          charset, errors=errors, dict_class=dict_class)
+    else:
+        form = dict_class()
+        stream = LimitedStream(environ['wsgi.input'], content_length,
+                               silent=True)
+
+    return stream, form, dict_class(files)
 
 
 def get_content_type(mimetype, charset):
@@ -1169,6 +793,10 @@
 
     If the mimetype represents text the charset will be appended as charset
     parameter, otherwise the mimetype is returned unchanged.
+
+    :param mimetype: the mimetype to be used as content type.
+    :param charset: the charset to be appended in case it was a text mimetype.
+    :return: the content type.
     """
     if mimetype.startswith('text/') or \
        mimetype == 'application/xml' or \
@@ -1179,16 +807,19 @@
 
 
 def format_string(string, context):
-    """String-template format a string::
+    """String-template format a string:
 
-        >>> format_string('$foo and ${foo}s', dict(foo=42))
-        '42 and 42s'
+    >>> format_string('$foo and ${foo}s', dict(foo=42))
+    '42 and 42s'
 
     This does not do any attribute lookup etc.  For more advanced string
     formattings have a look at the `werkzeug.template` module.
+
+    :param string: the format string.
+    :param context: a dict with the variables to insert.
     """
     def lookup_arg(match):
-        x = context[match.group(1)]
+        x = context[match.group(1) or match.group(2)]
         if not isinstance(x, basestring):
             x = type(string)(x)
         return x
@@ -1196,43 +827,87 @@
 
 
 def url_decode(s, charset='utf-8', decode_keys=False, include_empty=True,
-               errors='ignore'):
-    """Parse a querystring and return it as `MultiDict`.  Per default only
-    values are decoded into unicode strings.  If `decode_keys` is set to
-    ``True`` the same will happen for keys.
+               errors='ignore', separator='&', dict_class=None):
+    """Parse a querystring and return it as :class:`MultiDict`.  Per default
+    only values are decoded into unicode strings.  If `decode_keys` is set to
+    `True` the same will happen for keys.
 
     Per default a missing value for a key will default to an empty key.  If
     you don't want that behavior you can set `include_empty` to `False`.
 
-    Per default encoding errors are ignore.  If you want a different behavior
+    Per default encoding errors are ignored.  If you want a different behavior
     you can set `errors` to ``'replace'`` or ``'strict'``.  In strict mode a
     `HTTPUnicodeError` is raised.
+
+    .. versionchanged:: 0.5
+       In previous versions ";" and "&" could be used for url decoding.
+       This changed in 0.5 where only "&" is supported.  If you want to
+       use ";" instead a different `separator` can be provided.
+
+       The `dict_class` parameter was added.
+
+    :param s: a string with the query string to decode.
+    :param charset: the charset of the query string.
+    :param decode_keys: set to `True` if you want the keys to be decoded
+                        as well.
+    :param include_empty: Set to `False` if you don't want empty values to
+                          appear in the dict.
+    :param errors: the decoding error behavior.
+    :param separator: the pair separator to be used, defaults to ``&``
+    :param dict_class: an optional dict class to use.  If this is not specified
+                       or `None` the default :class:`MultiDict` is used.
     """
-    tmp = []
-    for key, values in cgi.parse_qs(str(s), include_empty).iteritems():
-        for value in values:
-            if decode_keys:
-                key = _decode_unicode(key, charset, errors)
-            tmp.append((key, _decode_unicode(value, charset, errors)))
-    return MultiDict(tmp)
+    if dict_class is None:
+        dict_class = MultiDict
+    result = []
+    for pair in str(s).split(separator):
+        if not pair:
+            continue
+        if '=' in pair:
+            key, value = pair.split('=', 1)
+        else:
+            key = pair
+            value = ''
+        key = urllib.unquote_plus(key)
+        if decode_keys:
+            key = _decode_unicode(key, charset, errors)
+        result.append((key, url_unquote_plus(value, charset, errors)))
+    return dict_class(result)
 
 
-def url_encode(obj, charset='utf-8', encode_keys=False):
+def url_encode(obj, charset='utf-8', encode_keys=False, sort=False, key=None,
+               separator='&'):
     """URL encode a dict/`MultiDict`.  If a value is `None` it will not appear
     in the result string.  Per default only values are encoded into the target
     charset strings.  If `encode_keys` is set to ``True`` unicode keys are
     supported too.
+
+    If `sort` is set to `True` the items are sorted by `key` or the default
+    sorting algorithm.
+
+    .. versionadded:: 0.5
+        `sort`, `key`, and `separator` were added.
+
+    :param obj: the object to encode into a query string.
+    :param charset: the charset of the query string.
+    :param encode_keys: set to `True` if you have unicode keys.
+    :param sort: set to `True` if you want parameters to be sorted by `key`.
+    :param separator: the separator to be used for the pairs.
+    :param key: an optional function to be used for sorting.  For more details
+                check out the :func:`sorted` documentation.
     """
     if isinstance(obj, MultiDict):
         items = obj.lists()
     elif isinstance(obj, dict):
         items = []
-        for key, value in obj.iteritems():
-            if not isinstance(value, (tuple, list)):
-                value = [value]
-            items.append((key, value))
+        for k, v in obj.iteritems():
+            if not isinstance(v, (tuple, list)):
+                v = [v]
+            items.append((k, v))
     else:
         items = obj or ()
+    if sort:
+        items.sort(key=key)
     tmp = []
     for key, values in items:
         if encode_keys and isinstance(key, unicode):
@@ -1248,11 +923,16 @@
                 value = str(value)
             tmp.append('%s=%s' % (urllib.quote(key),
                                   urllib.quote_plus(value)))
-    return '&'.join(tmp)
+    return separator.join(tmp)
 
 
 def url_quote(s, charset='utf-8', safe='/:'):
-    """URL encode a single string with a given encoding."""
+    """URL encode a single string with a given encoding.
+
+    :param s: the string to quote.
+    :param charset: the charset to be used.
+    :param safe: an optional sequence of safe characters.
+    """
     if isinstance(s, unicode):
         s = s.encode(charset)
     elif not isinstance(s, str):
@@ -1263,6 +943,10 @@
 def url_quote_plus(s, charset='utf-8', safe=''):
     """URL encode a single string with the given encoding and convert
     whitespace to "+".
+
+    :param s: the string to quote.
+    :param charset: the charset to be used.
+    :param safe: an optional sequence of safe characters.
     """
     if isinstance(s, unicode):
         s = s.encode(charset)
@@ -1274,9 +958,13 @@
 def url_unquote(s, charset='utf-8', errors='ignore'):
     """URL decode a single string with a given decoding.
 
-    Per default encoding errors are ignore.  If you want a different behavior
+    Per default encoding errors are ignored.  If you want a different behavior
     you can set `errors` to ``'replace'`` or ``'strict'``.  In strict mode a
     `HTTPUnicodeError` is raised.
+
+    :param s: the string to unquote.
+    :param charset: the charset to be used.
+    :param errors: the error handling for the charset decoding.
     """
     return _decode_unicode(urllib.unquote(s), charset, errors)
 
@@ -1285,22 +973,27 @@
     """URL decode a single string with the given decoding and decode
     a "+" to whitespace.
 
-    Per default encoding errors are ignore.  If you want a different behavior
+    Per default encoding errors are ignored.  If you want a different behavior
     you can set `errors` to ``'replace'`` or ``'strict'``.  In strict mode a
     `HTTPUnicodeError` is raised.
+
+    :param s: the string to unquote.
+    :param charset: the charset to be used.
+    :param errors: the error handling for the charset decoding.
     """
     return _decode_unicode(urllib.unquote_plus(s), charset, errors)
 
 
 def url_fix(s, charset='utf-8'):
-    """Sometimes you get an URL by a user that just isn't a real URL because
+    r"""Sometimes you get an URL by a user that just isn't a real URL because
     it contains unsafe characters like ' ' and so on.  This function can fix
     some of the problems in a similar way browsers handle data entered by the
     user:
 
-    >>> url_fix(u'http://de.wikipedia.org/wiki/Elf (Begriffsklärung)')
+    >>> url_fix(u'http://de.wikipedia.org/wiki/Elf (Begriffskl\xe4rung)')
     'http://de.wikipedia.org/wiki/Elf%20%28Begriffskl%C3%A4rung%29'
 
+    :param s: the string with the URL to fix.
     :param charset: The target charset for the URL if the url was given as
                     unicode string.
     """
@@ -1318,6 +1011,9 @@
     also translated.
 
     There is a special handling for `None` which escapes to an empty string.
+
+    :param s: the string to escape.
+    :param quote: set to true to also escape double quotes.
     """
     if s is None:
         return ''
@@ -1334,6 +1030,8 @@
 def unescape(s):
     """The reverse function of `escape`.  This unescapes all the HTML
     entities, not only the XML entities inserted by `escape`.
+
+    :param s: the string to unescape.
     """
     def handle_match(m):
         name = m.group(1)
@@ -1351,8 +1049,10 @@
 
 
 def get_host(environ):
-    """Return the real host for the given WSGI enviornment.  This takes care
+    """Return the real host for the given WSGI environment.  This takes care
     of the `X-Forwarded-Host` header.
+
+    :param environ: the WSGI environment to get the host of.
     """
     if 'HTTP_X_FORWARDED_HOST' in environ:
         return environ['HTTP_X_FORWARDED_HOST']
@@ -1379,6 +1079,11 @@
     'http://localhost/'
     >>> get_current_url(env, strip_querystring=True)
     'http://localhost/script/'
+
+    :param environ: the WSGI environment to get the current URL from.
+    :param root_only: set `True` if you only want the root URL.
+    :param strip_querystring: set to `True` if you don't want the querystring.
+    :param host_only: set to `True` if the host URL should be returned.
     """
     tmp = [environ['wsgi.url_scheme'], '://', get_host(environ)]
     cat = tmp.append
@@ -1396,28 +1101,109 @@
     return ''.join(tmp)
 
 
+def pop_path_info(environ):
+    """Removes and returns the next segment of `PATH_INFO`, pushing it onto
+    `SCRIPT_NAME`.  Returns `None` if there is nothing left on `PATH_INFO`.
+
+    If there are empty segments (``'/foo//bar``) these are ignored but
+    properly pushed to the `SCRIPT_NAME`:
+
+    >>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'}
+    >>> pop_path_info(env)
+    'a'
+    >>> env['SCRIPT_NAME']
+    '/foo/a'
+    >>> pop_path_info(env)
+    'b'
+    >>> env['SCRIPT_NAME']
+    '/foo/a/b'
+
+    .. versionadded:: 0.5
+
+    :param environ: the WSGI environment that is modified.
+    """
+    path = environ.get('PATH_INFO')
+    if not path:
+        return None
+
+    script_name = environ.get('SCRIPT_NAME', '')
+
+    # shift multiple leading slashes over
+    old_path = path
+    path = path.lstrip('/')
+    if path != old_path:
+        script_name += '/' * (len(old_path) - len(path))
+
+    if '/' not in path:
+        environ['PATH_INFO'] = ''
+        environ['SCRIPT_NAME'] = script_name + path
+        return path
+
+    segment, path = path.split('/', 1)
+    environ['PATH_INFO'] = '/' + path
+    environ['SCRIPT_NAME'] = script_name + segment
+    return segment
+
+
+def peek_path_info(environ):
+    """Returns the next segment on the `PATH_INFO` or `None` if there
+    is none.  Works like :func:`pop_path_info` without modifying the
+    environment:
+
+    >>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'}
+    >>> peek_path_info(env)
+    'a'
+    >>> peek_path_info(env)
+    'a'
+
+    .. versionadded:: 0.5
+
+    :param environ: the WSGI environment that is checked.
+    """
+    segments = environ.get('PATH_INFO', '').lstrip('/').split('/', 1)
+    if segments:
+        return segments[0]
+
+
 def cookie_date(expires=None):
     """Formats the time to ensure compatibility with Netscape's cookie
     standard.
 
     Accepts a floating point number expressed in seconds since the epoc in, a
-    datetime object or a timetuple.  All times in UTC.  The `parse_date`
-    function in `werkzeug.http` can be used to parse such a date.
+    datetime object or a timetuple.  All times in UTC.  The :func:`parse_date`
+    function can be used to parse such a date.
 
     Outputs a string in the format ``Wdy, DD-Mon-YYYY HH:MM:SS GMT``.
+
+    :param expires: If provided that date is used, otherwise the current.
     """
     return _dump_date(expires, '-')
 
 
-def parse_cookie(header, charset='utf-8', errors='ignore'):
+def parse_cookie(header, charset='utf-8', errors='ignore',
+                 dict_class=None):
     """Parse a cookie.  Either from a string or WSGI environ.
 
-    Per default encoding errors are ignore.  If you want a different behavior
+    Per default encoding errors are ignored.  If you want a different behavior
     you can set `errors` to ``'replace'`` or ``'strict'``.  In strict mode a
-    `HTTPUnicodeError` is raised.
+    :exc:`HTTPUnicodeError` is raised.
+
+    .. versionchanged:: 0.5
+       This function now returns a :class:`TypeConversionDict` instead of a
+       regular dict.  The `dict_class` parameter was added.
+
+    :param header: the header to be used to parse the cookie.  Alternatively
+                   this can be a WSGI environment.
+    :param charset: the charset for the cookie values.
+    :param errors: the error behavior for the charset decoding.
+    :param dict_class: an optional dict class to use.  If this is not specified
+                       or `None` the default :class:`TypeConversionDict` is
+                       used.
     """
     if isinstance(header, dict):
         header = header.get('HTTP_COOKIE', '')
+    if dict_class is None:
+        dict_class = TypeConversionDict
     cookie = _ExtendedCookie()
     cookie.load(header)
     result = {}
@@ -1429,7 +1215,7 @@
         if value.value is not None:
             result[key] = _decode_unicode(value.value, charset, errors)
 
-    return result
+    return dict_class(result)
 
 
 def dump_cookie(key, value='', max_age=None, expires=None, path='/',
@@ -1437,12 +1223,12 @@
                 sync_expires=True):
     """Creates a new Set-Cookie header without the ``Set-Cookie`` prefix
     The parameters are the same as in the cookie Morsel object in the
-    Python standard library but it accepts unicode data too.
+    Python standard library but it accepts unicode data, too.
 
     :param max_age: should be a number of seconds, or `None` (default) if
                     the cookie should last only as long as the client's
                     browser session.  Additionally `timedelta` objects
-                    are accepted too.
+                    are accepted, too.
     :param expires: should be a `datetime` object or unix timestamp.
     :param path: limits the cookie to a given path, per default it will
                  span the whole domain.
@@ -1485,10 +1271,12 @@
     """Formats the time to match the RFC1123 date format.
 
     Accepts a floating point number expressed in seconds since the epoc in, a
-    datetime object or a timetuple.  All times in UTC.  The `parse_date`
-    function in `werkzeug.http` can be used to parse such a date.
+    datetime object or a timetuple.  All times in UTC.  The :func:`parse_date`
+    function can be used to parse such a date.
 
     Outputs a string in the format ``Wdy, DD Mon YYYY HH:MM:SS GMT``.
+
+    :param timestamp: If provided that date is used, otherwise the current.
     """
     return _dump_date(timestamp, ' ')
 
@@ -1499,8 +1287,11 @@
     302, 303, 305, and 307.  300 is not supported because it's not a real
     redirect and 304 because it's the answer for a request with a request
     with defined If-Modified-Since headers.
+
+    :param location: the location the response should redirect to.
+    :param code: the redirect status code.
     """
-    assert code in (301, 302, 303, 305, 307)
+    assert code in (301, 302, 303, 305, 307), 'invalid code'
     from werkzeug.wrappers import BaseResponse
     response = BaseResponse(
         '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">\n'
@@ -1516,14 +1307,16 @@
 def append_slash_redirect(environ, code=301):
     """Redirect to the same URL but with a slash appended.  The behavior
     of this function is undefined if the path ends with a slash already.
+
+    :param environ: the WSGI environment for the request that triggers
+                    the redirect.
+    :param code: the status code for the redirect.
     """
     new_path = environ['PATH_INFO'].strip('/') + '/'
     query_string = environ['QUERY_STRING']
     if query_string:
         new_path += '?' + query_string
-    if not new_path.startswith('/'):
-        new_path = '/' + new_path
-    return redirect(new_path)
+    return redirect(new_path, code)
 
 
 def responder(f):
@@ -1539,15 +1332,36 @@
     return _patch_wrapper(f, lambda *a: f(*a)(*a[-2:]))
 
 
+def wrap_file(environ, file, buffer_size=8192):
+    """Wraps a file.  This uses the WSGI server's file wrapper if available
+    or otherwise the generic :class:`FileWrapper`.
+
+    .. versionadded:: 0.5
+
+    If the file wrapper from the WSGI server is used it's important to not
+    iterate over it from inside the application but to pass it through
+    unchanged.  If you want to pass out a file wrapper inside a response
+    object you have to set :attr:`~BaseResponse.direct_passthrough` to `True`.
+
+    More information about file wrappers are available in :pep:`333`.
+
+    :param file: a :class:`file`-like object with a :meth:`~file.read` method.
+    :param buffer_size: number of bytes for one iteration.
+    """
+    return environ.get('wsgi.file_wrapper', FileWrapper)(file, buffer_size)
+
+
 def import_string(import_name, silent=False):
-    """Imports an object based on a string.  This use useful if you want to
+    """Imports an object based on a string.  This is useful if you want to
     use import paths as endpoints or something similar.  An import path can
     be specified either in dotted notation (``xml.sax.saxutils.escape``)
     or with a colon as object delimiter (``xml.sax.saxutils:escape``).
 
-    If the `silent` is True the return value will be `None` if the import
-    fails.
+    If `silent` is True the return value will be `None` if the import fails.
 
+    :param import_name: the dotted name for the object to import.
+    :param silent: if set to `True` import errors are ignored and
+                   `None` is returned instead.
     :return: imported object
     """
     try:
@@ -1575,6 +1389,9 @@
     also recursively list modules but in that case it will import all the
     packages to get the correct load path of that module.
 
+    :param import_name: the dotted name for the package to find child modules.
+    :param include_packages: set to `True` if packages should be returned, too.
+    :param recursive: set to `True` if recursion should happen.
     :return: generator
     """
     module = import_string(import_path)
@@ -1594,144 +1411,6 @@
             yield modname
 
 
-def create_environ(path='/', base_url=None, query_string=None, method='GET',
-                   input_stream=None, content_type=None, content_length=0,
-                   errors_stream=None, multithread=False,
-                   multiprocess=False, run_once=False):
-    """Create a new WSGI environ dict based on the values passed.  The first
-    parameter should be the path of the request which defaults to '/'.  The
-    second one can either be a absolute path (in that case the host is
-    localhost:80) or a full path to the request with scheme, netloc port and
-    the path to the script.
-
-    If the `path` contains a query string it will be used, even if the
-    `query_string` parameter was given.  If it does not contain one
-    the `query_string` parameter is used as querystring.  In that case
-    it can either be a dict, MultiDict or string.
-
-    The following options exist:
-
-    `method`
-        The request method.  Defaults to `GET`
-
-    `input_stream`
-        The input stream.  Defaults to an empty read only stream.
-
-    `content_type`
-        The content type for this request.  Default is an empty content
-        type.
-
-    `content_length`
-        The value for the content length header.  Defaults to 0.
-
-    `errors_stream`
-        The wsgi.errors stream.  Defaults to `sys.stderr`.
-
-    `multithread`
-        The multithreaded flag for the WSGI Environment.  Defaults to `False`.
-
-    `multiprocess`
-        The multiprocess flag for the WSGI Environment.  Defaults to `False`.
-
-    `run_once`
-        The run_once flag for the WSGI Environment.  Defaults to `False`.
-    """
-    if base_url is not None:
-        scheme, netloc, script_name, qs, fragment = urlparse.urlsplit(base_url)
-        if ':' in netloc:
-            server_name, server_port = netloc.split(':')
-        else:
-            if scheme == 'http':
-                server_port = '80'
-            elif scheme == 'https':
-                server_port = '443'
-            server_name = netloc
-        if qs or fragment:
-            raise ValueError('base url cannot contain a query string '
-                             'or fragment')
-        script_name = urllib.unquote(script_name) or ''
-    else:
-        scheme = 'http'
-        server_name = netloc = 'localhost'
-        server_port = '80'
-        script_name = ''
-    if path and '?' in path:
-        path, query_string = path.split('?', 1)
-    elif not isinstance(query_string, basestring):
-        query_string = url_encode(query_string)
-    path = urllib.unquote(path) or '/'
-
-    return {
-        'REQUEST_METHOD':       method,
-        'SCRIPT_NAME':          script_name,
-        'PATH_INFO':            path,
-        'QUERY_STRING':         query_string,
-        'SERVER_NAME':          server_name,
-        'SERVER_PORT':          server_port,
-        'HTTP_HOST':            netloc,
-        'SERVER_PROTOCOL':      'HTTP/1.0',
-        'CONTENT_TYPE':         content_type or '',
-        'CONTENT_LENGTH':       str(content_length),
-        'wsgi.version':         (1, 0),
-        'wsgi.url_scheme':      scheme,
-        'wsgi.input':           input_stream or _empty_stream,
-        'wsgi.errors':          errors_stream or sys.stderr,
-        'wsgi.multithread':     multithread,
-        'wsgi.multiprocess':    multiprocess,
-        'wsgi.run_once':        run_once
-    }
-
-
-def run_wsgi_app(app, environ, buffered=False):
-    """Return a tuple in the form (app_iter, status, headers) of the
-    application output.  This works best if you pass it an application that
-    returns a iterator all the time.
-
-    Sometimes applications may use the `write()` callable returned
-    by the `start_response` function.  This tries to resolve such edge
-    cases automatically.  But if you don't get the expected output you
-    should set `buffered` to `True` which enforces buffering.
-
-    If passed an invalid WSGI application the behavior of this function is
-    undefined.  Never pass non-conforming WSGI applications to this function.
-    """
-    response = []
-    buffer = []
-
-    def start_response(status, headers, exc_info=None):
-        if exc_info is not None:
-            raise exc_info[0], exc_info[1], exc_info[2]
-        response[:] = [status, headers]
-        return buffer.append
-
-    app_iter = app(environ, start_response)
-
-    # when buffering we emit the close call early and conver the
-    # application iterator into a regular list
-    if buffered:
-        close_func = getattr(app_iter, 'close', None)
-        try:
-            app_iter = list(app_iter)
-        finally:
-            if close_func is not None:
-                close_func()
-
-    # otherwise we iterate the application iter until we have
-    # a response, chain the already received data with the already
-    # collected data and wrap it in a new `ClosingIterator` if
-    # we have a close callable.
-    else:
-        while not response:
-            buffer.append(app_iter.next())
-        if buffer:
-            app_iter = chain(buffer, app_iter)
-            close_func = getattr(app_iter, 'close', None)
-            if close_func is not None:
-                app_iter = ClosingIterator(app_iter, close_func)
-
-    return app_iter, response[0], response[1]
-
-
 def validate_arguments(func, args, kwargs, drop_extra=True):
     """Check if the function accepts the arguments and keyword arguments.
     Returns a new ``(args, kwargs)`` tuple that can savely be passed to
@@ -1768,6 +1447,13 @@
                                      'the data expected.')
                 return f(*args, **kwargs)
             return proxy
+
+    :param func: the function the validation is performed against.
+    :param args: a tuple of positional arguments.
+    :param kwargs: a dict of keyword arguments.
+    :param drop_extra: set to `False` if you don't want extra arguments
+                       to be silently dropped.
+    :return: tuple in the form ``(args, kwargs)``.
     """
     parser = _parse_signature(func)
     args, kwargs, missing, extra, extra_positional = parser(args, kwargs)[:5]
@@ -1784,6 +1470,11 @@
     returns a dict of names as the function would see it.  This can be useful
     to implement a cache decorator that uses the function arguments to build
     the cache key based on the values of the arguments.
+
+    :param func: the function the arguments should be bound for.
+    :param args: tuple of positional arguments.
+    :param kwargs: a dict of keyword arguments.
+    :return: a :class:`dict` of bound keyword arguments.
     """
     args, kwargs, missing, extra, extra_positional, \
         arg_spec, vararg_var, kwarg_var = _parse_signature(func)(args, kwargs)
@@ -1807,7 +1498,7 @@
 
 
 class ArgumentValidationError(ValueError):
-    """Raised if `validate_arguments` fails to validate"""
+    """Raised if :func:`validate_arguments` fails to validate"""
 
     def __init__(self, missing=None, extra=None, extra_positional=None):
         self.missing = set(missing or ())
@@ -1820,8 +1511,26 @@
         ))
 
 
-# create all the special key errors now that the classes are defined.
-from werkzeug.exceptions import BadRequest
-for _cls in MultiDict, CombinedMultiDict, Headers, EnvironHeaders:
-    _cls.KeyError = BadRequest.wrap(KeyError, _cls.__name__ + '.KeyError')
-del BadRequest, _cls
+# circular dependency fun
+from werkzeug.http import generate_etag, parse_etags, \
+     remove_entity_headers, parse_multipart, parse_options_header, \
+     dump_options_header
+from werkzeug.exceptions import BadRequest, RequestEntityTooLarge
+from werkzeug.datastructures import MultiDict, TypeConversionDict
+
+
+# DEPRECATED
+# these objects were previously in this module as well.  we import
+# them here for backwards compatibility.  Will go away in 0.6
+from werkzeug.datastructures import MultiDict, CombinedMultiDict, \
+     Headers, EnvironHeaders
+
+def create_environ(*args, **kwargs):
+    """backward compatibility."""
+    from werkzeug.test import create_environ
+    return create_environ(*args, **kwargs)
+
+def run_wsgi_app(*args, **kwargs):
+    """backwards compatibility."""
+    from werkzeug.test import run_wsgi_app
+    return run_wsgi_app(*args, **kwargs)
--- a/MoinMoin/support/werkzeug/wrappers.py	Fri Feb 27 23:30:37 2009 +0100
+++ b/MoinMoin/support/werkzeug/wrappers.py	Sat Feb 28 00:08:31 2009 +0100
@@ -17,22 +17,26 @@
     decoded into an unicode object if possible and if it makes sense.
 
 
-    :copyright: 2007-2008 by Armin Ronacher, Georg Brandl.
+    :copyright: (c) 2009 by the Werkzeug Team, see AUTHORS for more details.
     :license: BSD, see LICENSE for more details.
 """
 import tempfile
 import urlparse
 from datetime import datetime, timedelta
-from werkzeug.http import HTTP_STATUS_CODES, Accept, CacheControl, \
+from werkzeug.http import HTTP_STATUS_CODES, \
      parse_accept_header, parse_cache_control_header, parse_etags, \
      parse_date, generate_etag, is_resource_modified, unquote_etag, \
      quote_etag, parse_set_header, parse_authorization_header, \
-     parse_www_authenticate_header
-from werkzeug.utils import MultiDict, CombinedMultiDict, FileStorage, \
-     Headers, EnvironHeaders, cached_property, environ_property, \
-     get_current_url, create_environ, url_encode, run_wsgi_app, get_host, \
+     parse_www_authenticate_header, remove_entity_headers, \
+     default_stream_factory
+from werkzeug.utils import cached_property, environ_property, \
+     get_current_url, url_encode, run_wsgi_app, get_host, \
      cookie_date, parse_cookie, dump_cookie, http_date, escape, \
      header_property, parse_form_data, get_content_type, url_decode
+from werkzeug.datastructures import MultiDict, CombinedMultiDict, Headers, \
+     EnvironHeaders, ImmutableMultiDict, ImmutableTypeConversionDict, \
+     ImmutableList, MIMEAccept, CharsetAccept, LanguageAccept, \
+     ResponseCacheControl, RequestCacheControl
 from werkzeug._internal import _empty_stream, _decode_unicode, \
      _patch_wrapper
 
@@ -48,96 +52,160 @@
     to the request object, there is also a class called `Request` which
     subclasses `BaseRequest` and all the important mixins.
 
-    It's a good idea to creat