changeset 4627:c404a1295318

updated werkzeug
author Thomas Waldmann <tw AT waldmann-edv DOT de>
date Thu, 05 Mar 2009 23:01:03 +0100
parents 25532a36f2b5
children 3c6980b5e938
files MoinMoin/support/werkzeug/__init__.py MoinMoin/support/werkzeug/contrib/fixers.py MoinMoin/support/werkzeug/contrib/limiter.py MoinMoin/support/werkzeug/contrib/lint.py MoinMoin/support/werkzeug/datastructures.py MoinMoin/support/werkzeug/debug/__init__.py MoinMoin/support/werkzeug/http.py MoinMoin/support/werkzeug/serving.py MoinMoin/support/werkzeug/test.py MoinMoin/support/werkzeug/utils.py MoinMoin/support/werkzeug/wrappers.py
diffstat 11 files changed, 273 insertions(+), 99 deletions(-) [+]
line wrap: on
line diff
--- a/MoinMoin/support/werkzeug/__init__.py	Mon Mar 02 02:09:10 2009 +0100
+++ b/MoinMoin/support/werkzeug/__init__.py	Thu Mar 05 23:01:03 2009 +0100
@@ -43,7 +43,8 @@
                              'validate_arguments', 'ArgumentValidationError',
                              'bind_arguments', 'FileWrapper', 'wrap_file',
                              'pop_path_info', 'peek_path_info',
-                             'LimitedStream'],
+                             'LimitedStream', 'make_line_iter',
+                             'secure_filename'],
     'werkzeug.datastructures': ['MultiDict', 'CombinedMultiDict', 'Headers',
                              'EnvironHeaders', 'ImmutableList',
                              'ImmutableDict', 'ImmutableMultiDict',
--- a/MoinMoin/support/werkzeug/contrib/fixers.py	Mon Mar 02 02:09:10 2009 +0100
+++ b/MoinMoin/support/werkzeug/contrib/fixers.py	Thu Mar 05 23:01:03 2009 +0100
@@ -17,6 +17,10 @@
     :license: BSD, see LICENSE for more details.
 """
 from urllib import unquote
+from werkzeug.http import parse_options_header, parse_cache_control_header, \
+     parse_set_header, dump_header
+from werkzeug.useragents import UserAgent
+from werkzeug.datastructures import Headers, ResponseCacheControl
 
 
 class LighttpdCGIRootFix(object):
@@ -132,3 +136,70 @@
             new_headers += self.add_headers
             return start_response(status, new_headers, exc_info)
         return self.app(environ, rewriting_start_response)
+
+
+class InternetExplorerFix(object):
+    """This middleware fixes a couple of bugs with Microsoft Internet
+    Explorer.  Currently the following fixes are applied:
+
+    -   removing of `Vary` headers for unsupported mimetypes which
+        causes troubles with caching.  Can be disabled by passing
+        ``fix_vary=False`` to the constructor.
+        see: http://support.microsoft.com/kb/824847/en-us
+
+    -   removes offending headers to work around caching bugs in
+        Internet Explorer if `Content-Disposition` is set.  Can be
+        disabled by passing ``fix_attach=False`` to the constructor.
+
+    If it does not detect affected Internet Explorer versions it won't touch
+    the request / response.
+    """
+
+    # This code was inspired by Django fixers for the same bugs.  The
+    # fix_vary and fix_attach fixers were originally implemented in Django
+    # by Michael Axiak and is available as part of the Django project:
+    #     http://code.djangoproject.com/ticket/4148
+
+    def __init__(self, app, fix_vary=True, fix_attach=True):
+        self.app = app
+        self.fix_vary = fix_vary
+        self.fix_attach = fix_attach
+
+    def fix_headers(self, environ, headers, status=None):
+        if self.fix_vary:
+            header = headers.get('content-type', '')
+            mimetype, options = parse_options_header(header)
+            if mimetype not in ('text/html', 'text/plain', 'text/sgml'):
+                headers.pop('vary', None)
+
+        if self.fix_attach and 'content-disposition' in headers:
+            pragma = parse_set_header(headers.get('pragma', ''))
+            pragma.discard('no-cache')
+            header = pragma.to_header()
+            if not header:
+                headers.pop('pragma', '')
+            else:
+                headers['Pragma'] = header
+            header = headers.get('cache-control', '')
+            if header:
+                cc = parse_cache_control_header(header,
+                                                cls=ResponseCacheControl)
+                cc.no_cache = None
+                cc.no_store = False
+                header = cc.to_header()
+                if not header:
+                    headers.pop('cache-control', '')
+                else:
+                    headers['Cache-Control'] = header
+
+    def run_fixed(self, environ, start_response):
+        def fixing_start_response(status, headers, exc_info=None):
+            self.fix_headers(environ, Headers.linked(headers), status)
+            return start_response(status, headers, exc_info)
+        return self.app(environ, fixing_start_response)
+
+    def __call__(self, environ, start_response):
+        ua = UserAgent(environ)
+        if ua.browser != 'msie':
+            return self.app(environ, start_response)
+        return self.run_fixed(environ, start_response)
--- a/MoinMoin/support/werkzeug/contrib/limiter.py	Mon Mar 02 02:09:10 2009 +0100
+++ b/MoinMoin/support/werkzeug/contrib/limiter.py	Thu Mar 05 23:01:03 2009 +0100
@@ -30,7 +30,7 @@
 
     def __init__(self, environ, limit):
         _SilentLimitedStream.__init__(self, environ, limit)
-        warn(DeprecationWarning('comtrin limited stream is deprecated, use '
+        warn(DeprecationWarning('contrib limited stream is deprecated, use '
                                 'werkzeug.LimitedStream instead.'),
              stacklevel=2)
 
--- a/MoinMoin/support/werkzeug/contrib/lint.py	Mon Mar 02 02:09:10 2009 +0100
+++ b/MoinMoin/support/werkzeug/contrib/lint.py	Thu Mar 05 23:01:03 2009 +0100
@@ -55,7 +55,7 @@
         elif len(args) != 1:
             warn(WSGIWarning('too many parameters passed to wsgi.input.read()'),
                  stacklevel=2)
-        return self._stream.read(*args, **kwargs)
+        return self._stream.read(*args)
 
     def readline(self, *args):
         if len(args) == 0:
@@ -69,7 +69,7 @@
                  stacklevel=2)
         else:
             raise TypeError('too many arguments passed to wsgi.input.readline()')
-        return self._stream.readline(*args, **kwargs)
+        return self._stream.readline(*args)
 
     def __iter__(self):
         try:
--- a/MoinMoin/support/werkzeug/datastructures.py	Mon Mar 02 02:09:10 2009 +0100
+++ b/MoinMoin/support/werkzeug/datastructures.py	Thu Mar 05 23:01:03 2009 +0100
@@ -10,6 +10,7 @@
 """
 import re
 import codecs
+import mimetypes
 from werkzeug._internal import _proxy_repr, _missing
 
 
@@ -1070,8 +1071,7 @@
                 filename = file
             file = open(file, 'rb')
         if filename and content_type is None:
-            from mimetypes import guess_type
-            content_type = guess_type(filename)[0] or \
+            content_type = mimetypes.guess_type(filename)[0] or \
                            'application/octet-stream'
         self[name] = FileStorage(file, filename, name, content_type)
 
--- a/MoinMoin/support/werkzeug/debug/__init__.py	Mon Mar 02 02:09:10 2009 +0100
+++ b/MoinMoin/support/werkzeug/debug/__init__.py	Thu Mar 05 23:01:03 2009 +0100
@@ -8,8 +8,8 @@
     :copyright: (c) 2009 by the Werkzeug Team, see AUTHORS for more details.
     :license: BSD, see LICENSE for more details.
 """
+import mimetypes
 from os.path import join, dirname, basename, isfile
-from mimetypes import guess_type
 from werkzeug.wrappers import BaseRequest as Request, BaseResponse as Response
 from werkzeug.debug.repr import debug_repr
 from werkzeug.debug.tbtools import get_current_traceback
@@ -128,7 +128,8 @@
         """Return a static resource from the shared folder."""
         filename = join(dirname(__file__), 'shared', basename(filename))
         if isfile(filename):
-            mimetype = guess_type(filename)[0] or 'application/octet-stream'
+            mimetype = mimetypes.guess_type(filename)[0] \
+                or 'application/octet-stream'
             f = file(filename, 'rb')
             try:
                 return Response(f.read(), mimetype=mimetype)
--- a/MoinMoin/support/werkzeug/http.py	Mon Mar 02 02:09:10 2009 +0100
+++ b/MoinMoin/support/werkzeug/http.py	Thu Mar 05 23:01:03 2009 +0100
@@ -23,6 +23,7 @@
 from tempfile import TemporaryFile
 from urllib2 import parse_http_list as _parse_list_header
 from datetime import datetime
+from itertools import chain, repeat
 try:
     from hashlib import md5
 except ImportError:
@@ -210,7 +211,7 @@
     return name, extra
 
 
-def parse_accept_header(value, accept_class=None):
+def parse_accept_header(value, cls=None):
     """Parses an HTTP Accept-* header.  This does not implement a complete
     valid algorithm but one that supports at least value and quality
     extraction.
@@ -222,15 +223,15 @@
     with the parsed values and returned.
 
     :param value: the accept header string to be parsed.
-    :param accept_class: the wrapper class for the return value (can be
+    :param cls: the wrapper class for the return value (can be
                          :class:`Accept` or a subclass thereof)
     :return: an instance of `cls`.
     """
-    if accept_class is None:
-        accept_class = Accept
+    if cls is None:
+        cls = Accept
 
     if not value:
-        return accept_class(None)
+        return cls(None)
 
     result = []
     for match in _accept_re.finditer(value):
@@ -240,30 +241,30 @@
         else:
             quality = max(min(float(quality), 1), 0)
         result.append((match.group(1), quality))
-    return accept_class(result)
+    return cls(result)
 
 
-def parse_cache_control_header(value, on_update=None, cache_control_class=None):
+def parse_cache_control_header(value, on_update=None, cls=None):
     """Parse a cache control header.  The RFC differs between response and
     request cache control, this method does not.  It's your responsibility
     to not use the wrong control statements.
 
     .. versionadded:: 0.5
-       The `cache_control_class` was added.  If not specified an immutable
+       The `cls` was added.  If not specified an immutable
        :class:`RequestCacheControl` is returned.
 
     :param value: a cache control header to be parsed.
     :param on_update: an optional callable that is called every time a
                       value on the :class:`CacheControl` object is changed.
-    :param cache_control_class: the class for the returned object.  By default
+    :param cls: the class for the returned object.  By default
                                 :class:`RequestCacheControl` is used.
-    :return: a `cache_control_class` object.
+    :return: a `cls` object.
     """
-    if cache_control_class is None:
-        cache_control_class = RequestCacheControl
+    if cls is None:
+        cls = RequestCacheControl
     if not value:
-        return cache_control_class(None, on_update)
-    return cache_control_class(parse_dict_header(value), on_update)
+        return cls(None, on_update)
+    return cls(parse_dict_header(value), on_update)
 
 
 def parse_set_header(value, on_update=None):
@@ -509,7 +510,9 @@
     in_memory = 0
 
     # convert the file into a limited stream with iteration capabilities
-    iterator = _ChunkIter(file, content_length, buffer_size)
+    file = LimitedStream(file, content_length)
+    iterator = chain(make_line_iter(file, buffer_size=buffer_size),
+                     repeat(''))
 
     try:
         terminator = iterator.next().strip()
@@ -582,7 +585,7 @@
                                                    charset, errors)))
     finally:
         # make sure the whole input stream is read
-        iterator.exhaust()
+        file.exhaust()
 
     return form, files
 
@@ -698,33 +701,11 @@
 
 
 # circular dependency fun
-from werkzeug.utils import LimitedStream, FileStorage
+from werkzeug.utils import make_line_iter, FileStorage, LimitedStream
 from werkzeug.datastructures import Headers, Accept, RequestCacheControl, \
      ResponseCacheControl, HeaderSet, ETags, Authorization, \
      WWWAuthenticate
 
 
-class _ChunkIter(LimitedStream):
-    """An iterator that yields chunks from the file.  This iterator
-    does not end!  It will happily continue yielding empty strings
-    if the limit is reached.  This is intentional.
-    """
-
-    def __init__(self, stream, limit, buffer_size):
-        LimitedStream.__init__(self, stream, limit, True)
-        self._buffer = []
-        self._buffer_size = buffer_size
-
-    def next(self):
-        if len(self._buffer) > 1:
-            return self._buffer.pop(0)
-        chunks = self.read(self._buffer_size).splitlines(True)
-        first_chunk = self._buffer and self._buffer[0] or ''
-        if chunks:
-            first_chunk += chunks.pop(0)
-        self._buffer = chunks
-        return first_chunk
-
-
 # backwards compatibible imports
 from werkzeug.datastructures import MIMEAccept, CharsetAccept, LanguageAccept
--- a/MoinMoin/support/werkzeug/serving.py	Mon Mar 02 02:09:10 2009 +0100
+++ b/MoinMoin/support/werkzeug/serving.py	Thu Mar 05 23:01:03 2009 +0100
@@ -46,6 +46,7 @@
 import sys
 import time
 import thread
+from urllib import unquote
 from urlparse import urlparse
 from itertools import chain
 from SocketServer import ThreadingMixIn, ForkingMixIn
@@ -60,9 +61,8 @@
 class BaseRequestHandler(BaseHTTPRequestHandler, object):
     server_version = 'Werkzeug/' + version
 
-    def run_wsgi(self):
-        path_info, _, query = urlparse(self.path)[2:5]
-        app = self.server.app
+    def make_environ(self):
+        path_info, query = urlparse(self.path)[2::2]
         environ = {
             'wsgi.version':         (1, 0),
             'wsgi.url_scheme':      'http',
@@ -70,9 +70,10 @@
             'wsgi.errors':          sys.stderr,
             'wsgi.multithread':     self.server.multithread,
             'wsgi.multiprocess':    self.server.multiprocess,
-            'wsgi.run_once':        0,
+            'wsgi.run_once':        False,
             'REQUEST_METHOD':       self.command,
             'SCRIPT_NAME':          '',
+            'PATH_INFO':            unquote(path_info),
             'QUERY_STRING':         query,
             'CONTENT_TYPE':         self.headers.get('Content-Type', ''),
             'CONTENT_LENGTH':       self.headers.get('Content-Length', ''),
@@ -82,15 +83,17 @@
             'SERVER_PORT':          str(self.server.server_address[1]),
             'SERVER_PROTOCOL':      self.request_version
         }
-        if path_info:
-            from urllib import unquote
-            environ['PATH_INFO'] = unquote(path_info)
 
         for key, value in self.headers.items():
             key = 'HTTP_' + key.upper().replace('-', '_')
             if key not in ('HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH'):
                 environ[key] = value
 
+        return environ
+
+    def run_wsgi(self):
+        app = self.server.app
+        environ = self.make_environ()
         headers_set = []
         headers_sent = []
 
@@ -141,11 +144,12 @@
             finally:
                 if hasattr(application_iter, 'close'):
                     application_iter.close()
+                application_iter = None
 
         try:
             execute(app)
-        except (socket.error, socket.timeout):
-            return
+        except (socket.error, socket.timeout), e:
+            self.connection_dropped(e, environ)
         except:
             if self.server.passthrough_errors:
                 raise
@@ -162,6 +166,11 @@
             self.server.log('error', 'Error on request:\n%s',
                             traceback.plaintext)
 
+    def connection_dropped(self, error, environ):
+        """Called if the connection was closed by the client.  By default
+        nothing happens.
+        """
+
     def handle_one_request(self):
         """Handle a single HTTP request."""
         self.raw_requestline = self.rfile.readline()
--- a/MoinMoin/support/werkzeug/test.py	Mon Mar 02 02:09:10 2009 +0100
+++ b/MoinMoin/support/werkzeug/test.py	Thu Mar 05 23:01:03 2009 +0100
@@ -10,13 +10,13 @@
 """
 import sys
 import urlparse
+import mimetypes
 from time import time
 from random import random
 from itertools import chain
 from tempfile import TemporaryFile
 from cStringIO import StringIO
 from cookielib import CookieJar
-from mimetypes import guess_type
 from urllib2 import Request as U2Request
 
 from werkzeug._internal import _empty_stream
@@ -69,8 +69,9 @@
                                    getattr(value, 'name', None))
                 content_type = getattr(value, 'content_type', None)
                 if content_type is None:
-                    content_type = filename and guess_type(filename)[0] or \
-                                   'application/octet-stream'
+                    content_type = filename and \
+                        mimetypes.guess_type(filename)[0] or \
+                        'application/octet-stream'
                 if filename is not None:
                     write('; filename="%s"\r\n' % filename)
                 else:
--- a/MoinMoin/support/werkzeug/utils.py	Mon Mar 02 02:09:10 2009 +0100
+++ b/MoinMoin/support/werkzeug/utils.py	Thu Mar 05 23:01:03 2009 +0100
@@ -16,7 +16,9 @@
 import urllib
 import urlparse
 import posixpath
-from time import time
+import mimetypes
+from zlib import adler32
+from time import time, mktime
 from datetime import datetime, timedelta
 
 from werkzeug._internal import _patch_wrapper, _decode_unicode, \
@@ -26,6 +28,9 @@
 
 _format_re = re.compile(r'\$(?:(%s)|\{(%s)\})' % (('[a-zA-Z_][a-zA-Z0-9_]*',) * 2))
 _entity_re = re.compile(r'&([^;]+);')
+_filename_ascii_strip_re = re.compile(r'[^A-Za-z0-9_.-]')
+_windows_device_files = ('CON', 'AUX', 'COM1', 'COM2', 'COM3', 'COM4', 'LPT1',
+                         'LPT2', 'LPT3', 'PRN', 'NUL')
 
 
 class FileStorage(object):
@@ -50,6 +55,8 @@
         call.  The buffer size is the number of bytes held in memory during
         the copy process.  It defaults to 16KB.
 
+        For secure file saving also have a look at :func:`secure_filename`.
+
         :param dst: a filename or open file object the uploaded file
                     is saved to.
         :param buffer_size: the size of the buffer.  This works the same as
@@ -120,6 +127,11 @@
     rules for files that are not accessible from the web.  If `cache` is set to
     `False` no caching headers are sent.
 
+    Currently the middleware does not support non ASCII filenames.  If the
+    encoding on the file system happens to be the encoding of the URI it may
+    work but this could also be by accident.  We strongly suggest using ASCII
+    only file names for static files.
+
     .. versionchanged:: 0.5
        The cache timeout is configurable now.
 
@@ -201,6 +213,13 @@
             return None, None
         return loader
 
+    def generate_etag(self, mtime, file_size, real_filename):
+        return 'wzsdm-%d-%s-%s' % (
+            mktime(mtime.timetuple()),
+            file_size,
+            adler32(real_filename) & 0xffffffff
+        )
+
     def __call__(self, environ, start_response):
         # sanitize the path for non unix systems
         cleaned_path = environ.get('PATH_INFO', '').strip('/')
@@ -223,15 +242,15 @@
                     break
         if file_loader is None or not self.is_allowed(real_filename):
             return self.app(environ, start_response)
-        from mimetypes import guess_type
-        guessed_type = guess_type(real_filename)
+
+        guessed_type = mimetypes.guess_type(real_filename)
         mime_type = guessed_type[0] or 'text/plain'
         f, mtime, file_size = file_loader()
 
         headers = [('Date', http_date())]
         if self.cache:
             timeout = self.cache_timeout
-            etag = 'wzsdm-%s-%s-%s' % (mtime, file_size, hash(real_filename))
+            etag = self.generate_etag(mtime, file_size, real_filename)
             headers += [
                 ('Etag', '"%s"' % etag),
                 ('Cache-Control', 'max-age=%d, public' % timeout)
@@ -364,13 +383,53 @@
         raise StopIteration()
 
 
+
+def make_line_iter(stream, limit=None, buffer_size=10 * 1024):
+    """Savely iterates line-based over an input stream.  If the input stream
+    is not a :class:`LimitedStream` the `limit` parameter is mandatory.
+
+    This uses the stream's :meth:`~file.read` method internally as opposite
+    to the :meth:`~file.readline` method that is unsafe and can only be used
+    in violation of the WSGI specification.  The same problem applies to the
+    `__iter__` function of the input stream which calls :meth:`~file.readline`
+    without arguments.
+
+    If you need line-by-line processing it's strongly recommended to iterate
+    over the input stream using this helper function.
+
+    :param stream: the stream to iterate over.
+    :param limit: the limit in bytes for the stream.  (Usually
+                  content length.  Not necessary if the `stream`
+                  is a :class:`LimitedStream`.
+    :param buffer_size: The optional buffer size.
+    """
+    if not isinstance(stream, LimitedStream):
+        if limit is None:
+            raise TypeError('stream not limited and no limit provided.')
+        stream = LimitedStream(stream, limit)
+    buffer = []
+    while 1:
+        if len(buffer) > 1:
+            yield buffer.pop(0)
+            continue
+        chunks = stream.read(buffer_size).splitlines(True)
+        first_chunk = buffer and buffer[0] or ''
+        if chunks:
+            first_chunk += chunks.pop(0)
+        buffer = chunks
+        if not first_chunk:
+            return
+        yield first_chunk
+
+
 class LimitedStream(object):
     """Wraps a stream so that it doesn't read more than n bytes.  If the
     stream is exhausted and the caller tries to get more bytes from it
-    :func:`on_exhausted` is called which by default raises a
-    :exc:`~werkzeug.exceptions.BadRequest`.  The return value of that
-    function is forwarded to the reader function.  So if it returns an
-    empty string :meth:`read` will return an empty string as well.
+    :func:`on_exhausted` is called which by default returns an empty
+    string or raises :exc:`~werkzeug.exceptions.BadRequest` if silent
+    is set to `False`.  The return value of that function is forwarded
+    to the reader function.  So if it returns an empty string
+    :meth:`read` will return an empty string as well.
 
     The limit however must never be higher than what the stream can
     output.  Otherwise :meth:`readlines` will try to read past the
@@ -379,6 +438,22 @@
     The `silent` parameter has no effect if :meth:`is_exhausted` is
     overriden by a subclass.
 
+    .. admonition:: Note on WSGI compliance
+
+       calls to :meth:`readline` and :meth:`readlines` are not
+       WSGI compliant because it passes a size argument to the
+       readline methods.  Unfortunately the WSGI PEP is not safely
+       implementable without a size argument to :meth:`readline`
+       because there is no EOF marker in the stream.  As a result
+       of that the use of :meth:`readline` is discouraged.
+
+       For the same reason iterating over the :class:`LimitedStream`
+       is not portable.  It internally calls :meth:`readline`.
+
+       We strongly suggest using :meth:`read` only or using the
+       :func:`make_line_iter` which savely iterates line-based
+       over a WSGI input stream.
+
     :param stream: the stream to wrap.
     :param limit: the limit for the stream, must not be longer than
                   what the string can provide if the stream does not
@@ -387,7 +462,7 @@
                    past the limit and will return an empty string.
     """
 
-    def __init__(self, stream, limit, silent=False):
+    def __init__(self, stream, limit, silent=True):
         self._stream = stream
         self._pos = 0
         self.limit = limit
@@ -441,10 +516,7 @@
         return read
 
     def readline(self, size=None):
-        """Read a line from the stream.  Arguments are forwarded to the
-        `readline` function of the underlaying stream if it supports
-        them.
-        """
+        """Reads one line from the stream."""
         if self._pos >= self.limit:
             return self.on_exhausted()
         if size is None:
@@ -741,7 +813,7 @@
 
 def parse_form_data(environ, stream_factory=None, charset='utf-8',
                     errors='ignore', max_form_memory_size=None,
-                    max_content_length=None, dict_class=None):
+                    max_content_length=None, cls=None):
     """Parse the form data in the environ and return it as tuple in the form
     ``(stream, form, files)``.  You should only call this method if the
     transport method is `POST` or `PUT`.
@@ -758,7 +830,7 @@
 
     .. versionadded:: 0.5
        The `max_form_memory_size`, `max_content_length` and
-       `dict_class` parameters were added.
+       `cls` parameters were added.
 
     :param environ: the WSGI environment to be used for parsing.
     :param stream_factory: An optional callable that returns a new read and
@@ -775,7 +847,7 @@
                                is longer than this value an
                                :exc:`~exceptions.RequestEntityTooLarge`
                                exception is raised.
-    :param dict_class: an optional dict class to use.  If this is not specified
+    :param cls: an optional dict class to use.  If this is not specified
                        or `None` the default :class:`MultiDict` is used.
     :return: A tuple in the form ``(stream, form, files)``.
     """
@@ -785,8 +857,8 @@
     except (KeyError, ValueError):
         content_length = 0
 
-    if dict_class is None:
-        dict_class = MultiDict
+    if cls is None:
+        cls = MultiDict
 
     if max_content_length is not None and content_length > max_content_length:
         raise RequestEntityTooLarge()
@@ -802,22 +874,21 @@
                                           charset, errors,
                                           max_form_memory_size=max_form_memory_size)
         except ValueError, e:
-            form = dict_class()
+            form = cls()
         else:
-            form = dict_class(form)
+            form = cls(form)
     elif content_type == 'application/x-www-form-urlencoded' or \
          content_type == 'application/x-url-encoded':
         if max_form_memory_size is not None and \
            content_length > max_form_memory_size:
             raise RequestEntityTooLarge()
         form = url_decode(environ['wsgi.input'].read(content_length),
-                          charset, errors=errors, dict_class=dict_class)
+                          charset, errors=errors, cls=cls)
     else:
-        form = dict_class()
-        stream = LimitedStream(environ['wsgi.input'], content_length,
-                               silent=True)
+        form = cls()
+        stream = LimitedStream(environ['wsgi.input'], content_length)
 
-    return stream, form, dict_class(files)
+    return stream, form, cls(files)
 
 
 def get_content_type(mimetype, charset):
@@ -859,7 +930,7 @@
 
 
 def url_decode(s, charset='utf-8', decode_keys=False, include_empty=True,
-               errors='ignore', separator='&', dict_class=None):
+               errors='ignore', separator='&', cls=None):
     """Parse a querystring and return it as :class:`MultiDict`.  Per default
     only values are decoded into unicode strings.  If `decode_keys` is set to
     `True` the same will happen for keys.
@@ -876,7 +947,7 @@
        This changed in 0.5 where only "&" is supported.  If you want to
        use ";" instead a different `separator` can be provided.
 
-       The `dict_class` parameter was added.
+       The `cls` parameter was added.
 
     :param s: a string with the query string to decode.
     :param charset: the charset of the query string.
@@ -886,11 +957,11 @@
                           appear in the dict.
     :param errors: the decoding error behavior.
     :param separator: the pair separator to be used, defaults to ``&``
-    :param dict_class: an optional dict class to use.  If this is not specified
+    :param cls: an optional dict class to use.  If this is not specified
                        or `None` the default :class:`MultiDict` is used.
     """
-    if dict_class is None:
-        dict_class = MultiDict
+    if cls is None:
+        cls = MultiDict
     result = []
     for pair in str(s).split(separator):
         if not pair:
@@ -904,7 +975,7 @@
         if decode_keys:
             key = _decode_unicode(key, charset, errors)
         result.append((key, url_unquote_plus(value, charset, errors)))
-    return dict_class(result)
+    return cls(result)
 
 
 def url_encode(obj, charset='utf-8', encode_keys=False, sort=False, key=None,
@@ -1037,6 +1108,45 @@
     return urlparse.urlunsplit((scheme, netloc, path, qs, anchor))
 
 
+def secure_filename(filename):
+    """Pass it a filename and it will return a secure version of it.  This
+    filename can then savely be stored on a regular file system and passed
+    to :func:`os.path.join`.  The filename returned is an ASCII only string
+    for maximum portability.
+
+    On windows system the function also makes sure that the file is not
+    named after one of the special device files.
+
+    >>> secure_filename("My cool movie.mov")
+    'My_cool_movie.mov'
+    >>> secure_filename("../../../etc/passwd")
+    'etc_passwd'
+    >>> secure_filename(u'i contain cool \xfcml\xe4uts.txt')
+    'i_contain_cool_umlauts.txt'
+
+    .. versionadded:: 0.5
+
+    :param filename: the filename to secure
+    """
+    if isinstance(filename, unicode):
+        from unicodedata import normalize
+        filename = normalize('NFKD', filename).encode('ascii', 'ignore')
+    for sep in os.path.sep, os.path.altsep:
+        if sep:
+            filename = filename.replace(sep, ' ')
+    filename = str(_filename_ascii_strip_re.sub('', '_'.join(
+                   filename.split()))).strip('._')
+
+    # on nt a couple of special files are present in each folder.  We
+    # have to ensure that the target file is not such a filename.  In
+    # this case we prepend an underline
+    if os.name == 'nt':
+        if filename.split('.')[0].upper() in _windows_device_files:
+            filename = '_' + filename
+
+    return filename
+
+
 def escape(s, quote=False):
     """Replace special characters "&", "<" and ">" to HTML-safe sequences.  If
     the optional flag `quote` is `True`, the quotation mark character (") is
@@ -1213,7 +1323,7 @@
 
 
 def parse_cookie(header, charset='utf-8', errors='ignore',
-                 dict_class=None):
+                 cls=None):
     """Parse a cookie.  Either from a string or WSGI environ.
 
     Per default encoding errors are ignored.  If you want a different behavior
@@ -1222,20 +1332,20 @@
 
     .. versionchanged:: 0.5
        This function now returns a :class:`TypeConversionDict` instead of a
-       regular dict.  The `dict_class` parameter was added.
+       regular dict.  The `cls` parameter was added.
 
     :param header: the header to be used to parse the cookie.  Alternatively
                    this can be a WSGI environment.
     :param charset: the charset for the cookie values.
     :param errors: the error behavior for the charset decoding.
-    :param dict_class: an optional dict class to use.  If this is not specified
+    :param cls: an optional dict class to use.  If this is not specified
                        or `None` the default :class:`TypeConversionDict` is
                        used.
     """
     if isinstance(header, dict):
         header = header.get('HTTP_COOKIE', '')
-    if dict_class is None:
-        dict_class = TypeConversionDict
+    if cls is None:
+        cls = TypeConversionDict
     cookie = _ExtendedCookie()
     cookie.load(header)
     result = {}
@@ -1247,7 +1357,7 @@
         if value.value is not None:
             result[key] = _decode_unicode(value.value, charset, errors)
 
-    return dict_class(result)
+    return cls(result)
 
 
 def dump_cookie(key, value='', max_age=None, expires=None, path='/',
--- a/MoinMoin/support/werkzeug/wrappers.py	Mon Mar 02 02:09:10 2009 +0100
+++ b/MoinMoin/support/werkzeug/wrappers.py	Thu Mar 05 23:01:03 2009 +0100
@@ -226,7 +226,7 @@
                                        self.charset, self.encoding_errors,
                                        self.max_form_memory_size,
                                        self.max_content_length,
-                                       dict_class=ImmutableMultiDict)
+                                       cls=ImmutableMultiDict)
             else:
                 data = (_empty_stream, ImmutableMultiDict(),
                         ImmutableMultiDict())
@@ -252,7 +252,7 @@
         """The parsed URL parameters as :class:`MultiDict`."""
         return url_decode(self.environ.get('QUERY_STRING', ''), self.charset,
                           errors=self.encoding_errors,
-                          dict_class=ImmutableMultiDict)
+                          cls=ImmutableMultiDict)
 
     @cached_property
     def data(self):
@@ -299,7 +299,7 @@
     def cookies(self):
         """The retrieved cookie values as regular dictionary."""
         return parse_cookie(self.environ, self.charset,
-                            dict_class=ImmutableTypeConversionDict)
+                            cls=ImmutableTypeConversionDict)
 
     @cached_property
     def headers(self):