changeset 1920:b06ef2a53efa

'make pylint', fixed lots of minor stuff found by pylint (and there is still lots left to do)
author Thomas Waldmann <tw AT waldmann-edv DOT de>
date Fri, 30 Mar 2007 21:50:50 +0200
parents 5abc7d1528d5
children 4122148ceadb
files Makefile MoinMoin/Page.py MoinMoin/PageEditor.py MoinMoin/PageGraphicalEditor.py MoinMoin/_tests/test_request.py MoinMoin/action/AttachFile.py MoinMoin/action/LikePages.py MoinMoin/action/PackagePages.py MoinMoin/action/SpellCheck.py MoinMoin/action/SyncPages.py MoinMoin/action/fckdialog.py MoinMoin/action/info.py MoinMoin/action/login.py MoinMoin/action/test.py MoinMoin/caching.py MoinMoin/config/multiconfig.py MoinMoin/failure.py MoinMoin/filter/EXIF.py MoinMoin/filter/image_jpeg.py MoinMoin/formatter/__init__.py MoinMoin/formatter/text_html.py MoinMoin/formatter/text_xml.py MoinMoin/logfile/__init__.py MoinMoin/logfile/editlog.py MoinMoin/macro/EditTemplates.py MoinMoin/macro/EditedSystemPages.py MoinMoin/macro/EmbedObject.py MoinMoin/macro/ImageLink.py MoinMoin/macro/Include.py MoinMoin/macro/MonthCalendar.py MoinMoin/macro/Navigation.py MoinMoin/macro/SystemInfo.py MoinMoin/macro/TableOfContents.py MoinMoin/macro/TeudView.py MoinMoin/macro/WantedPages.py MoinMoin/macro/__init__.py MoinMoin/packages.py MoinMoin/parser/_ParserBase.py MoinMoin/parser/text_cplusplus.py MoinMoin/parser/text_irssi.py MoinMoin/parser/text_java.py MoinMoin/parser/text_pascal.py MoinMoin/parser/text_python.py MoinMoin/parser/text_rst.py MoinMoin/parser/text_xslt.py MoinMoin/search/builtin.py MoinMoin/search/results.py MoinMoin/theme/__init__.py MoinMoin/user.py MoinMoin/userform.py MoinMoin/version.py MoinMoin/wikisync.py MoinMoin/wikiutil.py
diffstat 53 files changed, 366 insertions(+), 332 deletions(-) [+]
line wrap: on
line diff
--- a/Makefile	Tue Mar 27 21:50:05 2007 +0200
+++ b/Makefile	Fri Mar 30 21:50:50 2007 +0200
@@ -85,6 +85,8 @@
 	@python tests/maketestwiki.py
 	@python -u -m trace --count --coverdir=cover --missing tests/runtests.py
 
+pylint:
+	@pylint --disable-msg=W0511,W0302,W0131,C0101,W0612,W0613,W0702,W0703,W0704,W0706,W0142,W0201 --disable-msg-cat=R --parseable=y MoinMoin
 
 clean: clean-testwiki clean-pyc
 	rm -rf build
--- a/MoinMoin/Page.py	Tue Mar 27 21:50:05 2007 +0200
+++ b/MoinMoin/Page.py	Fri Mar 30 21:50:50 2007 +0200
@@ -662,7 +662,8 @@
             return os.path.getsize(self._text_filename(rev=rev))
         except EnvironmentError, e:
             import errno
-            if e.errno == errno.ENOENT: return 0
+            if e.errno == errno.ENOENT:
+                return 0
             raise
 
     def mtime_usecs(self):
@@ -838,16 +839,20 @@
         userlist = user.getUserList(request)
         subscriber_list = {}
         for uid in userlist:
-            if uid == request.user.id and not include_self: continue # no self notification
+            if uid == request.user.id and not include_self:
+                continue # no self notification
             subscriber = user.User(request, uid)
 
             # This is a bit wrong if return_users=1 (which implies that the caller will process
             # user attributes and may, for example choose to send an SMS)
             # So it _should_ be "not (subscriber.email and return_users)" but that breaks at the moment.
-            if not subscriber.email: continue # skip empty email addresses
-            if trivial and not subscriber.want_trivial: continue # skip uninterested subscribers
+            if not subscriber.email:
+                continue # skip empty email addresses
+            if trivial and not subscriber.want_trivial:
+                continue # skip uninterested subscribers
 
-            if not UserPerms(subscriber).read(self.page_name): continue
+            if not UserPerms(subscriber).read(self.page_name):
+                continue
 
             if subscriber.isSubscribedTo(pageList):
                 lang = subscriber.language or request.cfg.language_default
--- a/MoinMoin/PageEditor.py	Tue Mar 27 21:50:05 2007 +0200
+++ b/MoinMoin/PageEditor.py	Fri Mar 30 21:50:50 2007 +0200
@@ -180,9 +180,10 @@
                     else:
                         msg = edit_lock_message
             except OSError, err:
-                if err.errno != errno.ENAMETOOLONG:
-                    raise err
-                msg = _("Page name is too long, try shorter name.")
+                if err.errno == errno.ENAMETOOLONG:
+                    msg = _("Page name is too long, try shorter name.")
+                else:
+                    raise
 
         # Did one of the prechecks fail?
         if msg:
@@ -411,8 +412,8 @@
         request.write("</p>")
 
         # Category selection
-        filter = self.cfg.cache.page_category_regex.search
-        cat_pages = request.rootpage.getPageList(filter=filter)
+        filterfn = self.cfg.cache.page_category_regex.search
+        cat_pages = request.rootpage.getPageList(filter=filterfn)
         cat_pages.sort()
         cat_pages = [wikiutil.pagelinkmarkup(p) for p in cat_pages]
         cat_pages.insert(0, ('', _('<No addition>', formatted=False)))
@@ -773,11 +774,11 @@
         now = time.time()
         # default: UTC
         zone = "Z"
-        user = self.request.user
+        u = self.request.user
 
         # setup the timezone
-        if user.valid and user.tz_offset:
-            tz = user.tz_offset
+        if u.valid and u.tz_offset:
+            tz = u.tz_offset
             # round to minutes
             tz -= tz % 60
             minutes = tz / 60
@@ -802,24 +803,24 @@
         # TODO: Allow addition of variables via wikiconfig or a global wiki dict.
         request = self.request
         now = self._get_local_timestamp()
-        user = request.user
-        signature = user.signature()
+        u = request.user
+        signature = u.signature()
         variables = {
             'PAGE': self.page_name,
             'TIME': "[[DateTime(%s)]]" % now,
             'DATE': "[[Date(%s)]]" % now,
-            'ME': user.name,
+            'ME': u.name,
             'USERNAME': signature,
             'USER': "-- %s" % signature,
             'SIG': "-- %s [[DateTime(%s)]]" % (signature, now),
         }
 
-        if user.valid and user.name:
-            if user.email:
-                variables['MAILTO'] = "[[MailTo(%s)]]" % user.email
+        if u.valid and u.name:
+            if u.email:
+                variables['MAILTO'] = "[[MailTo(%s)]]" % u.email
             # Users can define their own variables via
             # UserHomepage/MyDict, which override the default variables.
-            userDictPage = user.name + "/MyDict"
+            userDictPage = u.name + "/MyDict"
             if request.dicts.has_dict(userDictPage):
                 variables.update(request.dicts.dict(userDictPage))
 
--- a/MoinMoin/PageGraphicalEditor.py	Tue Mar 27 21:50:05 2007 +0200
+++ b/MoinMoin/PageGraphicalEditor.py	Fri Mar 30 21:50:50 2007 +0200
@@ -342,8 +342,8 @@
         request.write("</p>")
 
         # Category selection
-        filter = self.cfg.cache.page_category_regex.search
-        cat_pages = request.rootpage.getPageList(filter=filter)
+        filterfn = self.cfg.cache.page_category_regex.search
+        cat_pages = request.rootpage.getPageList(filter=filterfn)
         cat_pages.sort()
         cat_pages = [wikiutil.pagelinkmarkup(p) for p in cat_pages]
         cat_pages.insert(0, ('', _('<No addition>', formatted=False)))
--- a/MoinMoin/_tests/test_request.py	Tue Mar 27 21:50:05 2007 +0200
+++ b/MoinMoin/_tests/test_request.py	Fri Mar 30 21:50:50 2007 +0200
@@ -79,33 +79,32 @@
 
 class GroupPagesTestCase(unittest.TestCase):
 
-   def setUp(self):
-       self.config = TestConfig(self.request,
-                                page_group_regex=r'.+Group')
-
-   def tearDown(self):
-       del self.config
+    def setUp(self):
+        self.config = TestConfig(self.request, page_group_regex=r'.+Group')
 
-   def testNormalizeGroupName(self):
-       """ request: normalize pagename: restrict groups to alpha numeric Unicode
-       
-       Spaces should normalize after invalid chars removed!
-       """
-       import re
-       group = re.compile(r'.+Group', re.UNICODE)
-       cases = (
-           # current acl chars
-           (u'Name,:Group', u'NameGroup'),
-           # remove than normalize spaces
-           (u'Name ! @ # $ % ^ & * ( ) + Group', u'Name Group'),
-           )
-       for test, expected in cases:
-           # validate we are testing valid group names
-           assert group.search(test)
-           result = self.request.normalizePagename(test)
-           self.assertEqual(result, expected,
-                            ('Expected "%(expected)s" but got "%(result)s"') %
-                            locals())
+    def tearDown(self):
+        del self.config
+
+    def testNormalizeGroupName(self):
+        """ request: normalize pagename: restrict groups to alpha numeric Unicode
+        
+        Spaces should normalize after invalid chars removed!
+        """
+        import re
+        group = re.compile(r'.+Group', re.UNICODE)
+        cases = (
+            # current acl chars
+            (u'Name,:Group', u'NameGroup'),
+            # remove than normalize spaces
+            (u'Name ! @ # $ % ^ & * ( ) + Group', u'Name Group'),
+            )
+        for test, expected in cases:
+            # validate we are testing valid group names
+            assert group.search(test)
+            result = self.request.normalizePagename(test)
+            self.assertEqual(result, expected,
+                             ('Expected "%(expected)s" but got "%(result)s"') %
+                             locals())
 
 
 class HTTPDateTests(unittest.TestCase):
--- a/MoinMoin/action/AttachFile.py	Tue Mar 27 21:50:05 2007 +0200
+++ b/MoinMoin/action/AttachFile.py	Fri Mar 30 21:50:50 2007 +0200
@@ -122,10 +122,12 @@
     """
     _ = request.getText
     attach_dir = getAttachDir(request, pagename)
-    if not os.path.exists(attach_dir): return ''
+    if not os.path.exists(attach_dir):
+        return ''
 
     files = os.listdir(attach_dir)
-    if not files: return ''
+    if not files:
+        return ''
 
     attach_count = _('[%d attachments]') % len(files)
     attach_icon = request.theme.make_icon('attach', vars={'attach_count': attach_count})
@@ -283,16 +285,16 @@
     if mime_type != '*':
         files = [fname for fname in files if mime_type == mimetypes.guess_type(fname)[0]]
 
-    str = ""
+    html = ""
     if files:
         if showheader:
-            str = str + _(
+            html += _(
                 "To refer to attachments on a page, use '''{{{attachment:filename}}}''', \n"
                 "as shown below in the list of files. \n"
                 "Do '''NOT''' use the URL of the {{{[get]}}} link, \n"
                 "since this is subject to change and can break easily."
             )
-        str = str + "<ul>"
+        html += "<ul>"
 
         label_del = _("del")
         label_move = _("move")
@@ -352,15 +354,15 @@
             parmdict['viewlink'] = viewlink
             parmdict['del_link'] = del_link
             parmdict['move_link'] = move_link
-            str = str + ('<li>[%(del_link)s%(move_link)s'
+            html += ('<li>[%(del_link)s%(move_link)s'
                 '<a href="%(get_url)s">%(label_get)s</a>&nbsp;| %(viewlink)s]'
                 ' (%(fmtime)s, %(fsize)s KB) attachment:<strong>%(file)s</strong></li>') % parmdict
-        str = str + "</ul>"
+        html += "</ul>"
     else:
         if showheader:
-            str = '%s<p>%s</p>' % (str, _("No attachments stored for %(pagename)s") % {'pagename': wikiutil.escape(pagename)})
+            html += '<p>%s</p>' % (_("No attachments stored for %(pagename)s") % {'pagename': wikiutil.escape(pagename)})
 
-    return str
+    return html
 
 
 def _get_files(request, pagename):
@@ -587,14 +589,14 @@
         else:
             msg = _('You are not allowed to get attachments from this page.')
     elif request.form['do'][0] == 'unzip':
-         if request.user.may.delete(pagename) and request.user.may.read(pagename) and request.user.may.write(pagename):
+        if request.user.may.delete(pagename) and request.user.may.read(pagename) and request.user.may.write(pagename):
             unzip_file(pagename, request)
-         else:
+        else:
             msg = _('You are not allowed to unzip attachments of this page.')
     elif request.form['do'][0] == 'install':
-         if request.user.isSuperUser():
+        if request.user.isSuperUser():
             install_package(pagename, request)
-         else:
+        else:
             msg = _('You are not allowed to install files.')
     elif request.form['do'][0] == 'view':
         if request.user.may.read(pagename):
@@ -711,7 +713,8 @@
     _ = request.getText
 
     filename, fpath = _access_file(pagename, request)
-    if not filename: return # error msg already sent in _access_file
+    if not filename:
+        return # error msg already sent in _access_file
 
     # delete file
     os.remove(fpath)
@@ -751,8 +754,8 @@
         else:
             upload_form(pagename, request, msg=_("Nothing changed"))
     else:
-         upload_form(pagename, request, msg=_("Page %(newpagename)s does not exists or you don't have enough rights.") % {
-             'newpagename': new_pagename})
+        upload_form(pagename, request, msg=_("Page %(newpagename)s does not exists or you don't have enough rights.") % {
+            'newpagename': new_pagename})
 
 def attachment_move(pagename, request):
     _ = request.getText
@@ -776,7 +779,8 @@
     _ = request.getText
 
     filename, fpath = _access_file(pagename, request)
-    if not filename: return # error msg already sent in _access_file
+    if not filename:
+        return # error msg already sent in _access_file
 
     # move file
     d = {'action': 'AttachFile',
@@ -894,8 +898,8 @@
     if files:
         fsize = 0.0
         fcount = 0
-        for file in files:
-            fsize += float(size(request, pagename, file))
+        for f in files:
+            fsize += float(size(request, pagename, f))
             fcount += 1
 
         available_attachments_file_space = attachments_file_space - fsize
@@ -963,7 +967,8 @@
     _ = request.getText
 
     filename, fpath = _access_file(pagename, request)
-    if not filename: return
+    if not filename:
+        return
 
     request.write('<h2>' + _("Attachment '%(filename)s'") % {'filename': filename} + '</h2>')
 
@@ -1007,7 +1012,8 @@
     _ = request.getText
 
     filename, fpath = _access_file(pagename, request)
-    if not filename: return
+    if not filename:
+        return
 
     # send header & title
     request.emit_http_headers()
--- a/MoinMoin/action/LikePages.py	Tue Mar 27 21:50:05 2007 +0200
+++ b/MoinMoin/action/LikePages.py	Fri Mar 30 21:50:50 2007 +0200
@@ -144,7 +144,7 @@
     if match:
         start = match.group(1)
     else:
-       start = words[0]
+        start = words[0]
 
     match = end_re.search(pagename)
     if match:
--- a/MoinMoin/action/PackagePages.py	Tue Mar 27 21:50:05 2007 +0200
+++ b/MoinMoin/action/PackagePages.py	Fri Mar 30 21:50:50 2007 +0200
@@ -21,7 +21,8 @@
 from MoinMoin.action import AttachFile
 from MoinMoin.action.AttachFile import _get_files
 
-class ActionError(Exception): pass
+class ActionError(Exception):
+    pass
 
 class PackagePages:
     def __init__(self, pagename, request):
--- a/MoinMoin/action/SpellCheck.py	Tue Mar 27 21:50:05 2007 +0200
+++ b/MoinMoin/action/SpellCheck.py	Fri Mar 30 21:50:50 2007 +0200
@@ -38,9 +38,9 @@
 
     # validate candidate list (leave out directories!)
     wordsfiles = []
-    for file in candidates:
-        if os.path.isfile(file) and os.access(file, os.F_OK | os.R_OK):
-            wordsfiles.append(file)
+    for f in candidates:
+        if os.path.isfile(f) and os.access(f, os.F_OK | os.R_OK):
+            wordsfiles.append(f)
 
     # return validated file list
     return wordsfiles
@@ -55,13 +55,13 @@
     request.clock.start('spellread')
     try:
         try:
-            file = codecs.open(filename, 'rt', config.charset)
-            lines = file.readlines()
+            f = codecs.open(filename, 'rt', config.charset)
+            lines = f.readlines()
         except UnicodeError:
-            file = codecs.open(filename, 'rt', 'iso-8859-1')
-            lines = file.readlines()
+            f = codecs.open(filename, 'rt', 'iso-8859-1')
+            lines = f.readlines()
     finally:
-        file.close()
+        f.close()
     _loadWords(lines, dict)
     request.clock.stop('spellread')
 
@@ -159,7 +159,8 @@
 
     # do the checking
     for line in text.split('\n'):
-        if line == '' or line[0] == '#': continue
+        if line == '' or line[0] == '#':
+            continue
         word_re.sub(checkword, line)
 
     if badwords:
--- a/MoinMoin/action/SyncPages.py	Tue Mar 27 21:50:05 2007 +0200
+++ b/MoinMoin/action/SyncPages.py	Fri Mar 30 21:50:50 2007 +0200
@@ -37,7 +37,8 @@
 directions_map = {"up": UP, "down": DOWN, "both": BOTH}
 
 
-class ActionStatus(Exception): pass
+class ActionStatus(Exception):
+    pass
 
 
 class ActionClass(object):
--- a/MoinMoin/action/fckdialog.py	Tue Mar 27 21:50:05 2007 +0200
+++ b/MoinMoin/action/fckdialog.py	Fri Mar 30 21:50:50 2007 +0200
@@ -211,7 +211,7 @@
         pages = [p.page_name for p in searchresult.hits]
         pages.sort()
         pages[0:0] = [name]
-        page_list ='''
+        page_list = '''
          <tr>
           <td colspan=2>
            <select id="sctPagename" size="1" onchange="OnChangePagename(this.value);">
--- a/MoinMoin/action/info.py	Tue Mar 27 21:50:05 2007 +0200
+++ b/MoinMoin/action/info.py	Fri Mar 30 21:50:50 2007 +0200
@@ -93,8 +93,7 @@
 
         def render_action(text, query, **kw):
             kw.update(dict(rel='nofollow'))
-            html = page.link_to(request, text, querystr=query, **kw)
-            return html
+            return page.link_to(request, text, querystr=query, **kw)
 
         # read in the complete log of this page
         log = editlog.EditLog(request, rootpagename=pagename)
--- a/MoinMoin/action/login.py	Tue Mar 27 21:50:05 2007 +0200
+++ b/MoinMoin/action/login.py	Fri Mar 30 21:50:50 2007 +0200
@@ -38,7 +38,7 @@
             # Require valid user name
             name = form.get('name', [''])[0]
             if not user.isValidName(request, name):
-                 error = _("""Invalid user name {{{'%s'}}}.
+                error = _("""Invalid user name {{{'%s'}}}.
 Name may contain any Unicode alpha numeric character, with optional one
 space between words. Group page name is not allowed.""") % name
 
--- a/MoinMoin/action/test.py	Tue Mar 27 21:50:05 2007 +0200
+++ b/MoinMoin/action/test.py	Fri Mar 30 21:50:50 2007 +0200
@@ -34,8 +34,8 @@
         request.write('PyXML is missing\n')
 
     request.write('Python Path:\n')
-    for dir in sys.path:
-        request.write('   %s\n' % dir)
+    for path in sys.path:
+        request.write('   %s\n' % path)
 
     # check if the request is a local one
     import socket
@@ -58,12 +58,14 @@
     # check eventlog access
     log = eventlog.EventLog(request)
     msg = log.sanityCheck()
-    if msg: request.write("*** %s\n" % msg)
+    if msg:
+        request.write("*** %s\n" % msg)
 
     # check editlog access
     log = editlog.EditLog(request)
     msg = log.sanityCheck()
-    if msg: request.write("*** %s\n" % msg)
+    if msg:
+        request.write("*** %s\n" % msg)
 
     # keep some values to ourselves
     request.write("\nServer Environment:\n")
--- a/MoinMoin/caching.py	Tue Mar 27 21:50:05 2007 +0200
+++ b/MoinMoin/caching.py	Fri Mar 30 21:50:50 2007 +0200
@@ -89,21 +89,21 @@
 
         return needsupdate
 
-    def copyto(self, filename):
-        # currently unused function
-        import shutil
-        tmpfname = self._tmpfilename()
-        fname = self._filename()
-        if not self.locking or self.locking and self.wlock.acquire(1.0):
-            try:
-                shutil.copyfile(filename, tmpfname)
-                # this is either atomic or happening with real locks set:
-                filesys.rename(tmpfname, fname)
-            finally:
-                if self.locking:
-                    self.wlock.release()
-        else:
-            self.request.log("Can't acquire write lock in %s" % self.lock_dir)
+#    def copyto(self, filename):
+#        # currently unused function
+#        import shutil
+#        tmpfname = self._tmpfilename()
+#        fname = self._filename()
+#        if not self.locking or self.locking and self.wlock.acquire(1.0):
+#            try:
+#                shutil.copyfile(filename, tmpfname)
+#                # this is either atomic or happening with real locks set:
+#                filesys.rename(tmpfname, fname)
+#            finally:
+#                if self.locking:
+#                    self.wlock.release()
+#        else:
+#            self.request.log("Can't acquire write lock in %s" % self.lock_dir)
 
     def update(self, content):
         try:
--- a/MoinMoin/config/multiconfig.py	Tue Mar 27 21:50:05 2007 +0200
+++ b/MoinMoin/config/multiconfig.py	Fri Mar 30 21:50:50 2007 +0200
@@ -172,7 +172,8 @@
 
 # This is a way to mark some text for the gettext tools so that they don't
 # get orphaned. See http://www.python.org/doc/current/lib/node278.html.
-def _(text): return text
+def _(text):
+    return text
 
 
 class CacheClass:
--- a/MoinMoin/failure.py	Tue Mar 27 21:50:05 2007 +0200
+++ b/MoinMoin/failure.py	Fri Mar 30 21:50:50 2007 +0200
@@ -103,10 +103,10 @@
     def formatAllTracebacks(self, formatFuction):
         """ Format multiple tracebacks using formatFunction """
         tracebacks = []
-        for type, value, tb in self.exceptions():
-            if type is None:
+        for ttype, tvalue, tb in self.exceptions():
+            if ttype is None:
                 break
-            tracebacks.append(formatFuction((type, value, tb)))
+            tracebacks.append(formatFuction((ttype, tvalue, tb)))
             del tb
         return ''.join(tracebacks)
 
--- a/MoinMoin/filter/EXIF.py	Tue Mar 27 21:50:05 2007 +0200
+++ b/MoinMoin/filter/EXIF.py	Fri Mar 30 21:50:50 2007 +0200
@@ -719,10 +719,10 @@
 # ratio object that eventually will be able to reduce itself to lowest
 # common denominator for printing
 def gcd(a, b):
-   if b == 0:
-      return a
-   else:
-      return gcd(b, a % b)
+    if b == 0:
+        return a
+    else:
+        return gcd(b, a % b)
 
 class Ratio:
     def __init__(self, num, den):
@@ -1073,14 +1073,14 @@
 # process an image file (expects an open file object)
 # this is the function that has to deal with all the arbitrary nasty bits
 # of the EXIF standard
-def process_file(file, name='UNDEF', debug=0):
+def process_file(f, name='UNDEF', debug=0):
     # determine whether it's a JPEG or TIFF
-    data = file.read(12)
+    data = f.read(12)
     if data[0:4] in ['II*\x00', 'MM\x00*']:
         # it's a TIFF file
-        file.seek(0)
-        endian = file.read(1)
-        file.read(1)
+        f.seek(0)
+        endian = f.read(1)
+        f.read(1)
         offset = 0
     elif data[0:2] == '\xFF\xD8':
         # it's a JPEG file
@@ -1088,14 +1088,14 @@
         fake_exif = 0
         while data[2] == '\xFF' and data[6:10] in ('JFIF', 'JFXX', 'OLYM'):
             length = ord(data[4])*256+ord(data[5])
-            file.read(length-8)
+            f.read(length-8)
             # fake an EXIF beginning of file
-            data = '\xFF\x00'+file.read(10)
+            data = '\xFF\x00'+f.read(10)
             fake_exif = 1
         if data[2] == '\xFF' and data[6:10] == 'Exif':
             # detected EXIF header
-            offset = file.tell()
-            endian = file.read(1)
+            offset = f.tell()
+            endian = f.read(1)
         else:
             # no EXIF information
             return {}
@@ -1106,7 +1106,7 @@
     # deal with the EXIF info we found
     if debug:
         print {'I': 'Intel', 'M': 'Motorola'}[endian], 'format'
-    hdr = EXIF_header(file, endian, offset, fake_exif, debug)
+    hdr = EXIF_header(f, endian, offset, fake_exif, debug)
     ifd_list = hdr.list_IFDs()
     ctr = 0
     for i in ifd_list:
@@ -1150,9 +1150,9 @@
     # JPEG thumbnail (thankfully the JPEG data is stored as a unit)
     thumb_off = hdr.tags.get('Thumbnail JPEGInterchangeFormat')
     if thumb_off:
-        file.seek(offset+thumb_off.values[0])
+        f.seek(offset+thumb_off.values[0])
         size = hdr.tags['Thumbnail JPEGInterchangeFormatLength'].values[0]
-        hdr.tags['JPEGThumbnail'] = file.read(size)
+        hdr.tags['JPEGThumbnail'] = f.read(size)
 
     # deal with MakerNote contained in EXIF IFD
     if 'EXIF MakerNote' in hdr.tags:
@@ -1163,8 +1163,8 @@
     if 'JPEGThumbnail' not in hdr.tags:
         thumb_off = hdr.tags.get('MakerNote JPEGThumbnail')
         if thumb_off:
-            file.seek(offset+thumb_off.values[0])
-            hdr.tags['JPEGThumbnail'] = file.read(thumb_off.field_length)
+            f.seek(offset+thumb_off.values[0])
+            hdr.tags['JPEGThumbnail'] = f.read(thumb_off.field_length)
 
     return hdr.tags
 
@@ -1178,13 +1178,13 @@
 
     for filename in sys.argv[1:]:
         try:
-            file = open(filename, 'rb')
+            f = open(filename, 'rb')
         except:
             print filename, 'unreadable'
             print
             continue
-        print filename+':'
-        data = process_file(file, 1) # with debug info
+        print filename + ':'
+        data = process_file(f, 1) # with debug info
         if not data:
             print 'No EXIF information found'
             continue
--- a/MoinMoin/filter/image_jpeg.py	Tue Mar 27 21:50:05 2007 +0200
+++ b/MoinMoin/filter/image_jpeg.py	Fri Mar 30 21:50:50 2007 +0200
@@ -5,7 +5,7 @@
     @copyright: 2006 MoinMoin:ThomasWaldmann
     @license: GNU GPL, see COPYING for details.
 """
-import EXIF
+from MoinMoin.filter import EXIF
 
 def execute(indexobj, filename):
     """ Extract some EXIF data """
--- a/MoinMoin/formatter/__init__.py	Tue Mar 27 21:50:05 2007 +0200
+++ b/MoinMoin/formatter/__init__.py	Fri Mar 30 21:50:50 2007 +0200
@@ -356,6 +356,6 @@
         """
         return ""
 
-    def comment(self, text):
+    def comment(self, text, **kw):
         return ""
 
--- a/MoinMoin/formatter/text_html.py	Tue Mar 27 21:50:05 2007 +0200
+++ b/MoinMoin/formatter/text_html.py	Fri Mar 30 21:50:50 2007 +0200
@@ -712,7 +712,11 @@
                 # add alt and title tags to areas
                 map = re.sub('href\s*=\s*"((?!%TWIKIDRAW%).+?)"', r'href="\1" alt="\1" title="\1"', map)
                 # add in edit links plus alt and title attributes
-                map = map.replace('%TWIKIDRAW%"', edit_link + '" alt="' + _('Edit drawing %(filename)s') % {'filename': self.text(fname)} + '" title="' + _('Edit drawing %(filename)s') % {'filename': self.text(fname)} + '"')
+                map = map.replace('%TWIKIDRAW%"',
+                                  edit_link + 
+                                  '" alt="' + _('Edit drawing %(filename)s') % {'filename': self.text(fname)} + 
+                                  '" title="' + _('Edit drawing %(filename)s') % {'filename': self.text(fname)} +
+                                  '"')
                 # unxml, because 4.01 concrete will not validate />
                 map = map.replace('/>', '>')
                 return (map + self.image(
--- a/MoinMoin/formatter/text_xml.py	Tue Mar 27 21:50:05 2007 +0200
+++ b/MoinMoin/formatter/text_xml.py	Fri Mar 30 21:50:50 2007 +0200
@@ -83,10 +83,10 @@
 
     def rule(self, size=0, **kw):
         return "\n<br/>%s<br/>\n" % ("-" * 78,) # <hr/> not supported in stylebook
-        if size:
-            return '<hr size="%d"/>\n' % (size,)
-        else:
-            return '<hr/>\n'
+#        if size:
+#            return '<hr size="%d"/>\n' % (size,)
+#        else:
+#            return '<hr/>\n'
 
     def icon(self, type):
         return '<icon type="%s" />' % type            
@@ -158,7 +158,7 @@
 
         id_text = ''
         if id:
-          id_text = ' id="%s"' % id
+            id_text = ' id="%s"' % id
 
         return result + '<s%d%s title="' % (depth, id_text)
 
--- a/MoinMoin/logfile/__init__.py	Tue Mar 27 21:50:05 2007 +0200
+++ b/MoinMoin/logfile/__init__.py	Fri Mar 30 21:50:50 2007 +0200
@@ -264,7 +264,7 @@
                 self.__buffer = self.__buffer2
 
         if self.__lineno is not None:
-             self.__lineno += lines
+            self.__lineno += lines
         return False
 
     def __next(self):
--- a/MoinMoin/logfile/editlog.py	Tue Mar 27 21:50:05 2007 +0200
+++ b/MoinMoin/logfile/editlog.py	Fri Mar 30 21:50:50 2007 +0200
@@ -124,7 +124,7 @@
             title = wikiutil.escape('???' + title)
             text = wikiutil.escape(info[:idx])
         else:
-            raise "unknown EditorData type"
+            raise Exception("unknown EditorData type")
         return '<span title="%s">%s</span>' % (title, text)
 
 
@@ -146,43 +146,43 @@
         self.uid_override = kw.get('uid_override', None)
 
     def add(self, request, mtime, rev, action, pagename, host=None, extra=u'', comment=u''):
-            """ Generate a line for the editlog.
-    
-            If `host` is None, it's read from request vars.
-            """
-            if host is None:
-                host = request.remote_addr
-
-            if request.cfg.log_reverse_dns_lookups:
-                import socket
-                try:
-                    hostname = socket.gethostbyaddr(host)[0]
-                    hostname = unicode(hostname, config.charset)
-                except (socket.error, UnicodeError):
-                    hostname = host
-            else:
-                hostname = host
+        """ Generate a line for the editlog.
 
-            remap_chars = {u'\t': u' ', u'\r': u' ', u'\n': u' ', }
-            comment = comment.translate(remap_chars)
-            user_id = request.user.valid and request.user.id or ''
-
-            if self.uid_override is not None:
-                user_id = ''
-                hostname = self.uid_override
-                host = ''
+        If `host` is None, it's read from request vars.
+        """
+        if host is None:
+            host = request.remote_addr
 
-            line = u"\t".join((str(long(mtime)), # has to be long for py 2.2.x
-                               "%08d" % rev,
-                               action,
-                               wikiutil.quoteWikinameFS(pagename),
-                               host,
-                               hostname,
-                               user_id,
-                               extra,
-                               comment,
-                               )) + "\n"
-            self._add(line)
+        if request.cfg.log_reverse_dns_lookups:
+            import socket
+            try:
+                hostname = socket.gethostbyaddr(host)[0]
+                hostname = unicode(hostname, config.charset)
+            except (socket.error, UnicodeError):
+                hostname = host
+        else:
+            hostname = host
+
+        remap_chars = {u'\t': u' ', u'\r': u' ', u'\n': u' ', }
+        comment = comment.translate(remap_chars)
+        user_id = request.user.valid and request.user.id or ''
+
+        if self.uid_override is not None:
+            user_id = ''
+            hostname = self.uid_override
+            host = ''
+
+        line = u"\t".join((str(long(mtime)), # has to be long for py 2.2.x
+                           "%08d" % rev,
+                           action,
+                           wikiutil.quoteWikinameFS(pagename),
+                           host,
+                           hostname,
+                           user_id,
+                           extra,
+                           comment,
+                           )) + "\n"
+        self._add(line)
 
     def parser(self, line):
         """ Parser edit log line into fields """
--- a/MoinMoin/macro/EditTemplates.py	Tue Mar 27 21:50:05 2007 +0200
+++ b/MoinMoin/macro/EditTemplates.py	Fri Mar 30 21:50:50 2007 +0200
@@ -13,8 +13,8 @@
     # we don't want to spend much CPU for spiders requesting nonexisting pages
     if not self.request.isSpiderAgent:
         # Get list of template pages readable by current user
-        filter = self.request.cfg.cache.page_template_regex.search
-        templates = self.request.rootpage.getPageList(filter=filter)
+        filterfn = self.request.cfg.cache.page_template_regex.search
+        templates = self.request.rootpage.getPageList(filter=filterfn)
         result = []
         if templates:
             templates.sort()
--- a/MoinMoin/macro/EditedSystemPages.py	Tue Mar 27 21:50:05 2007 +0200
+++ b/MoinMoin/macro/EditedSystemPages.py	Fri Mar 30 21:50:50 2007 +0200
@@ -23,14 +23,14 @@
 
         # Get page list for current user (use this as admin), filter
         # pages that are both underlay and standard pages.
-        def filter(name):
+        def filterfn(name):
             page = Page(self.request, name)
             return (page.isStandardPage(includeDeleted=0) and
                     page.isUnderlayPage(includeDeleted=0))
 
         # Get page filtered page list. We don't need to filter by
         # exists, because our filter check this already.
-        pages = self.request.rootpage.getPageList(filter=filter, exists=0)
+        pages = self.request.rootpage.getPageList(filter=filterfn, exists=0)
 
         # Format as numberd list, sorted by page name         
         pages.sort()
--- a/MoinMoin/macro/EmbedObject.py	Tue Mar 27 21:50:05 2007 +0200
+++ b/MoinMoin/macro/EmbedObject.py	Fri Mar 30 21:50:50 2007 +0200
@@ -170,7 +170,7 @@
         mtype = mime_type.split('/')
 
         if self.alt == "":
-            self.alt = "%(text)s %(mime_type)s" % {'text': _("Embedded"), 'mime_type': mime_type,}
+            self.alt = "%(text)s %(mime_type)s" % {'text': _("Embedded"), 'mime_type': mime_type, }
 
         if mtype[0] == 'video':
             return '''
--- a/MoinMoin/macro/ImageLink.py	Tue Mar 27 21:50:05 2007 +0200
+++ b/MoinMoin/macro/ImageLink.py	Fri Mar 30 21:50:50 2007 +0200
@@ -120,7 +120,8 @@
         if '=' in arg:
             key, value = arg.split('=', 1)
             # avoid that urls with "=" are interpreted as keyword
-            if key.lower() not in kwAllowed: continue
+            if key.lower() not in kwAllowed:
+                continue
             kw_count += 1
             kw[str(key.lower())] = wikiutil.escape(value, quote=1)
 
--- a/MoinMoin/macro/Include.py	Tue Mar 27 21:50:05 2007 +0200
+++ b/MoinMoin/macro/Include.py	Fri Mar 30 21:50:50 2007 +0200
@@ -198,7 +198,7 @@
                 request._page_headings.setdefault(pntt, 0)
                 request._page_headings[pntt] += 1
                 if request._page_headings[pntt] > 1:
-                    hid += '-%d'%(request._page_headings[pntt],)
+                    hid += '-%d' % (request._page_headings[pntt], )
                 result.append(
                     macro.formatter.heading(1, level, id=hid) +
                     inc_page.link_to(request, heading, css_class="include-heading-link") +
--- a/MoinMoin/macro/MonthCalendar.py	Tue Mar 27 21:50:05 2007 +0200
+++ b/MoinMoin/macro/MonthCalendar.py	Fri Mar 30 21:50:50 2007 +0200
@@ -10,7 +10,7 @@
     @license: GNU GPL, see COPYING for details.
 
     Revisions:
-    * first revision without a number (=1.0):
+    * first rLevision without a number (=1.0):
         * was only online for a few hours and then replaced by 1.1
     * 1.1:
         * changed name to MonthCalendar to avoid conflict with "calendar" under case-insensitive OSes like Win32
@@ -219,7 +219,7 @@
 
     stranniversary = args.group('anniversary')
     if stranniversary:
-            parmanniversary = int(stranniversary)
+        parmanniversary = int(stranniversary)
     else:
         parmanniversary = defanniversary
 
--- a/MoinMoin/macro/Navigation.py	Tue Mar 27 21:50:05 2007 +0200
+++ b/MoinMoin/macro/Navigation.py	Fri Mar 30 21:50:50 2007 +0200
@@ -25,10 +25,10 @@
 def _getPages(request, filter_regex=None):
     """ Return a (filtered) list of pages names.
     """
-    filter = None
+    filterfn = None
     if filter_regex:
-        filter = re.compile(filter_regex).match
-    pages = request.rootpage.getPageList(filter=filter)
+        filterfn = re.compile(filter_regex).match
+    pages = request.rootpage.getPageList(filter=filterfn)
     pages.sort()
     return pages
 
--- a/MoinMoin/macro/SystemInfo.py	Tue Mar 27 21:50:05 2007 +0200
+++ b/MoinMoin/macro/SystemInfo.py	Fri Mar 30 21:50:50 2007 +0200
@@ -164,4 +164,4 @@
         return buf.getvalue()
 
 def execute(macro, args):
-        return SystemInfo(macro, args).render()
+    return SystemInfo(macro, args).render()
--- a/MoinMoin/macro/TableOfContents.py	Tue Mar 27 21:50:05 2007 +0200
+++ b/MoinMoin/macro/TableOfContents.py	Fri Mar 30 21:50:50 2007 +0200
@@ -123,7 +123,8 @@
     def parse_line(self, line, pagename):
         # FIXME this also finds "headlines" in {{{ code sections }}}:
         match = self.head_re.match(line)
-        if not match: return
+        if not match:
+            return
         title_text = match.group('htext').strip()
         pntt = pagename + title_text
         self.titles.setdefault(pntt, 0)
@@ -131,8 +132,8 @@
 
         # Get new indent level
         newindent = len(match.group('hmarker'))
-        if newindent > self.maxdepth: return
-        if newindent < self.mindepth: return
+        if newindent > self.maxdepth or newindent < self.mindepth:
+            return
         if not self.indent:
             self.baseindent = newindent - 1
             self.indent = self.baseindent
--- a/MoinMoin/macro/TeudView.py	Tue Mar 27 21:50:05 2007 +0200
+++ b/MoinMoin/macro/TeudView.py	Fri Mar 30 21:50:50 2007 +0200
@@ -36,11 +36,11 @@
     if 'module' in macro.form:
         modname = macro.form["module"][0]
         try:
-            object = pydoc.locate(modname)
+            obj = pydoc.locate(modname)
         except pydoc.ErrorDuringImport, value:
             return "Error while loading module %s: %s" % (modname, value)
         else:
-            xmlstr = xmldoc.xml.document(object, encoding=config.charset)
+            xmlstr = xmldoc.xml.document(obj, encoding=config.charset)
 
         navigation = '<a href="%s">Index</a>' % pagename
         pathlen = modname.count('.')
--- a/MoinMoin/macro/WantedPages.py	Tue Mar 27 21:50:05 2007 +0200
+++ b/MoinMoin/macro/WantedPages.py	Fri Mar 30 21:50:50 2007 +0200
@@ -38,7 +38,7 @@
         # Skip system pages, because missing translations are not wanted pages,
         # unless you are a translator and clicked "Include system pages"
         if not allpages and wikiutil.isSystemPage(request, name):
-                continue
+            continue
 
         # Add links to pages which does not exists in pages dict
         links = page.getPageLinks(request)
@@ -59,7 +59,8 @@
     result = []
     result.append(macro.formatter.number_list(1))
     for name in wantednames:
-        if not name: continue
+        if not name:
+            continue
         result.append(macro.formatter.listitem(1))
         # Add link to the wanted page
         result.append(macro.formatter.pagelink(1, name, generated=1))
--- a/MoinMoin/macro/__init__.py	Tue Mar 27 21:50:05 2007 +0200
+++ b/MoinMoin/macro/__init__.py	Fri Mar 30 21:50:50 2007 +0200
@@ -140,7 +140,7 @@
             return self.defaultDependency
 
     def _macro_TitleSearch(self, args):
-        from FullSearch import search_box
+        from MoinMoin.search.FullSearch import search_box
         return search_box("titlesearch", self)
 
     def _macro_GoTo(self, args):
@@ -182,22 +182,22 @@
         if allpages:
             pages = request.rootpage.getPageList()
         else:
-            def filter(name):
+            def nosyspage(name):
                 return not wikiutil.isSystemPage(request, name)
-            pages = request.rootpage.getPageList(filter=filter)
+            pages = request.rootpage.getPageList(filter=nosyspage)
 
         word_re = re.compile(word_re, re.UNICODE)
-        map = {}
+        wordmap = {}
         for name in pages:
             for word in word_re.findall(name):
                 try:
-                    if not map[word].count(name):
-                        map[word].append(name)
+                    if not wordmap[word].count(name):
+                        wordmap[word].append(name)
                 except KeyError:
-                    map[word] = [name]
+                    wordmap[word] = [name]
 
         # Sort ignoring case
-        tmp = [(word.upper(), word) for word in map]
+        tmp = [(word.upper(), word) for word in wordmap]
         tmp.sort()
         all_words = [item[1] for item in tmp]
 
@@ -280,9 +280,9 @@
         interwiki_list = wikiutil.load_wikimap(self.request)
         buf = StringIO()
         buf.write('<dl>')
-        list = interwiki_list.items() # this is where we cached it
-        list.sort()
-        for tag, url in list:
+        iwlist = interwiki_list.items() # this is where we cached it
+        iwlist.sort()
+        for tag, url in iwlist:
             buf.write('<dt><tt><a href="%s">%s</a></tt></dt>' % (
                 wikiutil.join_wiki(url, 'RecentChanges'), tag))
             if '$PAGE' not in url:
@@ -403,7 +403,7 @@
 
     def _macro_MailTo(self, args):
         from MoinMoin.mail.sendmail import decodeSpamSafeEmail
-        result=''
+        result = ''
         args = args or ''
         if ',' not in args:
             email = args
--- a/MoinMoin/packages.py	Tue Mar 27 21:50:05 2007 +0200
+++ b/MoinMoin/packages.py	Fri Mar 30 21:50:50 2007 +0200
@@ -60,9 +60,9 @@
     event_logfile(self, pagename, pagefile)
 
 # Parsing and (un)quoting for script files
-def packLine(list, separator="|"):
-    """ Packs a list into a string that is separated by `separator`. """
-    return '|'.join([x.replace('\\', '\\\\').replace(separator, '\\' + separator) for x in list])
+def packLine(items, separator="|"):
+    """ Packs a list of items into a string that is separated by `separator`. """
+    return '|'.join([item.replace('\\', '\\\\').replace(separator, '\\' + separator) for item in items])
 
 def unpackLine(string, separator="|"):
     """ Unpacks a string that was packed by packLine. """
--- a/MoinMoin/parser/_ParserBase.py	Tue Mar 27 21:50:05 2007 +0200
+++ b/MoinMoin/parser/_ParserBase.py	Fri Mar 30 21:50:50 2007 +0200
@@ -96,7 +96,7 @@
             sword = word.lower()
         else:
             sword = word
-        return self.fmt.get(sword,self.def_fmt).formatString(formatter, word)
+        return self.fmt.get(sword, self.def_fmt).formatString(formatter, word)
 
 class FormattingRuleSingle:
     
@@ -156,7 +156,7 @@
     def setupRules(self):
         self.def_format = FormatText('Default')
         self.ID_format = FormatTextID('ID', self._ignore_case)
-        self.addRuleFormat("ID",self.ID_format)
+        self.addRuleFormat("ID", self.ID_format)
         self.addRuleFormat("Operator")
         self.addRuleFormat("Char")
         self.addRuleFormat("Comment")
@@ -176,18 +176,18 @@
         self._formatting_rule_index += 1
         n = "%s_%s" % (name, self._formatting_rule_index)
         f = FormattingRuleSingle(name, str_re, self._ignore_case)
-        self._formatting_rules.append((n,f))
+        self._formatting_rules.append((n, f))
         self._formatting_rules_n2r[n] = f
 
     def addRulePair(self, name, start_re, end_re):
         self._formatting_rule_index += 1
-        n = "%s_%s" % (name,self._formatting_rule_index)
+        n = "%s_%s" % (name, self._formatting_rule_index)
         f = FormattingRulePair(name, start_re, end_re, self._ignore_case)
-        self._formatting_rules.append((n,f))
+        self._formatting_rules.append((n, f))
         self._formatting_rules_n2r[n] = f
 
     def addWords(self, words, fmt):
-        if not isinstance(fmt,FormatTextBase):
+        if not isinstance(fmt, FormatTextBase):
             fmt = FormatText(fmt)
         for w in words:
             self.ID_format.addFormat(w, fmt)
@@ -210,13 +210,13 @@
         self.setupRules()
 
         l = []
-        for n,f in self._formatting_rules:
-            l.append("(?P<%s>%s)" % (n,f.getStartRe()))
+        for n, f in self._formatting_rules:
+            l.append("(?P<%s>%s)" % (n, f.getStartRe()))
         
         if self._ignore_case:
-            scan_re = re.compile("|".join(l),re.M|re.I)
+            scan_re = re.compile("|".join(l), re.M|re.I)
         else:
-            scan_re = re.compile("|".join(l),re.M)
+            scan_re = re.compile("|".join(l), re.M)
 
         self.lastpos = 0
         self.line = self.raw
@@ -256,11 +256,13 @@
 
     def write_match(self, formatter, match):
         for n, hit in match.groupdict().items():
-            if not hit: continue
+            if not hit:
+            	continue
             r = self._formatting_rules_n2r[n]
             s = r.getText(self, hit)
-            c = self.rule_fmt.get(r.name,None)
-            if not c: c = self.def_format
+            c = self.rule_fmt.get(r.name, None)
+            if not c:
+            	c = self.def_format
             first = 1
             for line in s.expandtabs(4).split('\n'):
                 if not first:
--- a/MoinMoin/parser/text_cplusplus.py	Tue Mar 27 21:50:05 2007 +0200
+++ b/MoinMoin/parser/text_cplusplus.py	Fri Mar 30 21:50:50 2007 +0200
@@ -36,31 +36,31 @@
     def setupRules(self):
         ParserBase.setupRules(self)
 
-        self.addRulePair("Comment","/[*]","[*]/")
-        self.addRule("Comment","//.*$")
-        self.addRulePair("String",'L?"',r'$|[^\\](\\\\)*"')
-        self.addRule("Char",r"'\\.'|'[^\\]'")
-        self.addRule("Number",r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|0[xX][0-9a-fA-F]+[Ll]?")
-        self.addRule("Preprc",r"^\s*#(.*\\\n)*(.*(?!\\))$")
-        self.addRule("ID","[a-zA-Z_][0-9a-zA-Z_]*")
-        self.addRule("SPChar",r"[~!%^&*()+=|\[\]:;,.<>/?{}-]")
+        self.addRulePair("Comment", "/[*]","[*]/")
+        self.addRule("Comment", "//.*$")
+        self.addRulePair("String", 'L?"', r'$|[^\\](\\\\)*"')
+        self.addRule("Char", r"'\\.'|'[^\\]'")
+        self.addRule("Number", r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|0[xX][0-9a-fA-F]+[Ll]?")
+        self.addRule("Preprc", r"^\s*#(.*\\\n)*(.*(?!\\))$")
+        self.addRule("ID", "[a-zA-Z_][0-9a-zA-Z_]*")
+        self.addRule("SPChar", r"[~!%^&*()+=|\[\]:;,.<>/?{}-]")
 
-        reserved_words = ['struct','class','union','enum',
-        'int','float','double','signed','unsigned','char','short','void','bool',
-        'long','register','auto','operator',
-        'static','const','private','public','protected','virtual','explicit',
-        'new','delete','this',
-        'if','else','while','for','do','switch','case','default','sizeof',
-        'dynamic_cast','static_cast','const_cast','reinterpret_cast','typeid',
-        'try','catch','throw','throws','return','continue','break','goto']
+        reserved_words = ['struct', 'class', 'union', 'enum',
+        'int', 'float', 'double', 'signed', 'unsigned', 'char', 'short', 'void', 'bool',
+        'long', 'register', 'auto', 'operator',
+        'static', 'const', 'private', 'public', 'protected', 'virtual', 'explicit',
+        'new', 'delete', 'this',
+        'if', 'else', 'while', 'for', 'do', 'switch', 'case', 'default', 'sizeof',
+        'dynamic_cast', 'static_cast', 'const_cast', 'reinterpret_cast', 'typeid',
+        'try', 'catch', 'throw', 'throws', 'return', 'continue', 'break', 'goto']
 
         reserved_words2 = ['extern', 'volatile', 'typedef', 'friend',
-                           '__declspec', 'inline','__asm','thread','naked',
-                           'dllimport','dllexport','namespace','using',
-                           'template','typename','goto']
+                           '__declspec', 'inline', '__asm', 'thread', 'naked',
+                           'dllimport', 'dllexport', 'namespace', 'using',
+                           'template', 'typename', 'goto']
 
-        special_words = ['std','string','vector','map','set','cout','cin','cerr', 'endl']
-        constant_words = ['true','false','NULL']
+        special_words = ['std', 'string', 'vector', 'map', 'set', 'cout', 'cin', 'cerr', 'endl']
+        constant_words = ['true', 'false', 'NULL']
 
         self.addReserved(reserved_words)
         self.addConstant(constant_words)
--- a/MoinMoin/parser/text_irssi.py	Tue Mar 27 21:50:05 2007 +0200
+++ b/MoinMoin/parser/text_irssi.py	Fri Mar 30 21:50:50 2007 +0200
@@ -55,9 +55,11 @@
 
         def write_tbl_cell(text, code=1, add_style=''):
             write(fmt.table_cell(1, style=tbl_style+add_style))
-            if code: write(fmt.code(1))
+            if code:
+                write(fmt.code(1))
             write(text)
-            if code: write(fmt.code(0))
+            if code:
+                write(fmt.code(0))
             write(fmt.table_cell(0))
           
         write(fmt.table(1))
--- a/MoinMoin/parser/text_java.py	Tue Mar 27 21:50:05 2007 +0200
+++ b/MoinMoin/parser/text_java.py	Fri Mar 30 21:50:50 2007 +0200
@@ -20,23 +20,23 @@
     def setupRules(self):
         ParserBase.setupRules(self)
 
-        self.addRulePair("Comment","/[*]","[*]/")
-        self.addRule("Comment","//.*$")
-        self.addRulePair("String",'"',r'$|[^\\](\\\\)*"')
-        self.addRule("Char",r"'\\.'|'[^\\]'")
-        self.addRule("Number",r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|0[xX][0-9a-fA-F]+[Ll]?")
-        self.addRule("ID","[a-zA-Z_][0-9a-zA-Z_]*")
-        self.addRule("SPChar",r"[~!%^&*()+=|\[\]:;,.<>/?{}-]")
+        self.addRulePair("Comment", "/[*]", "[*]/")
+        self.addRule("Comment", "//.*$")
+        self.addRulePair("String", '"', r'$|[^\\](\\\\)*"')
+        self.addRule("Char", r"'\\.'|'[^\\]'")
+        self.addRule("Number", r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|0[xX][0-9a-fA-F]+[Ll]?")
+        self.addRule("ID", "[a-zA-Z_][0-9a-zA-Z_]*")
+        self.addRule("SPChar", r"[~!%^&*()+=|\[\]:;,.<>/?{}-]")
 
-        reserved_words = ['class','interface','enum','import','package',
-        'byte','int','long','float','double','char','short','void','boolean',
-        'static','final','const','private','public','protected',
-        'new','this','super','abstract','native','synchronized','transient','volatile','strictfp',
-        'extends','implements','if','else','while','for','do','switch','case','default','instanceof',
-        'try','catch','finally','throw','throws','return','continue','break']
+        reserved_words = ['class', 'interface', 'enum', 'import', 'package',
+        'byte', 'int', 'long', 'float', 'double', 'char', 'short', 'void', 'boolean',
+        'static', 'final', 'const', 'private', 'public', 'protected',
+        'new', 'this', 'super', 'abstract', 'native', 'synchronized', 'transient', 'volatile', 'strictfp',
+        'extends', 'implements', 'if', 'else', 'while', 'for', 'do', 'switch', 'case', 'default', 'instanceof',
+        'try', 'catch', 'finally', 'throw', 'throws', 'return', 'continue', 'break']
 
         self.addReserved(reserved_words)
 
-        constant_words = ['true','false','null']
+        constant_words = ['true', 'false', 'null']
 
         self.addConstant(constant_words)
--- a/MoinMoin/parser/text_pascal.py	Tue Mar 27 21:50:05 2007 +0200
+++ b/MoinMoin/parser/text_pascal.py	Fri Mar 30 21:50:50 2007 +0200
@@ -17,34 +17,34 @@
     Dependencies = []
 
     def __init__(self, raw, request, **kw):
-        ParserBase.__init__(self,raw,request,**kw)
+        ParserBase.__init__(self, raw, request, **kw)
         self._ignore_case = 1
 
     def setupRules(self):
         ParserBase.setupRules(self)
         
-        self.addRulePair("Comment","\(\*","\*\)")
-        self.addRulePair("Comment","\{","\}")
-        self.addRule("Comment","//.*$")
-        self.addRulePair("String",'\'','\'')
-        self.addRule("Char",r"'\\.'|#[a-f0-9][a-f0-9]")
-        self.addRule("Number",r"[0-9](\.[0-9]*)?(eE[+-][0-9])?|\$[0-9a-fA-F]+")
-        self.addRule("ID","[a-zA-Z_][0-9a-zA-Z_]*")
-        self.addRule("SPChar",r"[~!%^&*()+=|\[\]:;,.<>/?{}-]")
+        self.addRulePair("Comment", "\(\*", "\*\)")
+        self.addRulePair("Comment", "\{", "\}")
+        self.addRule("Comment", "//.*$")
+        self.addRulePair("String", '\'', '\'')
+        self.addRule("Char", r"'\\.'|#[a-f0-9][a-f0-9]")
+        self.addRule("Number", r"[0-9](\.[0-9]*)?(eE[+-][0-9])?|\$[0-9a-fA-F]+")
+        self.addRule("ID", "[a-zA-Z_][0-9a-zA-Z_]*")
+        self.addRule("SPChar", r"[~!%^&*()+=|\[\]:;,.<>/?{}-]")
         
-        reserved_words = ['class','interface','set','uses','unit',
-                          'byte','integer','longint','float','double',
-                          'extended','char','shortint','boolean',
-                          'var','const','private','public','protected',
-                          'new','this','super','abstract','native',
-                          'synchronized','transient','volatile','strictfp',
-                          'if','else','while','for','do','case','default',
-                          'try','except','finally','raise','continue','break',
-                          'begin','end','type','class','implementation',
-                          'procedure','function','constructor','destructor', 'program']
+        reserved_words = ['class', 'interface', 'set', 'uses', 'unit',
+                          'byte', 'integer', 'longint', 'float', 'double',
+                          'extended', 'char', 'shortint', 'boolean',
+                          'var', 'const', 'private', 'public', 'protected',
+                          'new', 'this', 'super', 'abstract', 'native',
+                          'synchronized', 'transient', 'volatile', 'strictfp',
+                          'if', 'else', 'while', 'for', 'do', 'case', 'default',
+                          'try', 'except', 'finally', 'raise', 'continue', 'break',
+                          'begin', 'end', 'type', 'class', 'implementation',
+                          'procedure', 'function', 'constructor', 'destructor', 'program']
         
         self.addReserved(reserved_words)
         
-        constant_words = ['true','false','nil']
+        constant_words = ['true', 'false', 'nil']
         
         self.addConstant(constant_words)
--- a/MoinMoin/parser/text_python.py	Tue Mar 27 21:50:05 2007 +0200
+++ b/MoinMoin/parser/text_python.py	Fri Mar 30 21:50:50 2007 +0200
@@ -80,7 +80,7 @@
         self.request.write(self.formatter.code_line(0))
         self.request.write(formatter.code_area(0, self._code_id))
 
-    def __call__(self, toktype, toktext, (srow,scol), (erow,ecol), line):
+    def __call__(self, toktype, toktext, (srow, scol), (erow, ecol), line):
         """ Token handler.
         """
         # calculate new positions
--- a/MoinMoin/parser/text_rst.py	Tue Mar 27 21:50:05 2007 +0200
+++ b/MoinMoin/parser/text_rst.py	Fri Mar 30 21:50:50 2007 +0200
@@ -148,7 +148,7 @@
         self.nodes = []
         # Make sure it's a supported docutils version.
         required_version = (0, 3, 10)
-        current_version = tuple([int(i) for i in (docutils.__version__.split('.')+['0','0'])[:3]])
+        current_version = tuple([int(i) for i in (docutils.__version__.split('.') + ['0', '0'])[:3]])
         if current_version < required_version:
             err = 'ERROR: The installed docutils version is %s;' % ('.'.join([str(i) for i in current_version]))
             err += ' version %s or later is required.' % ('.'.join([str(i) for i in required_version]))
--- a/MoinMoin/parser/text_xslt.py	Tue Mar 27 21:50:05 2007 +0200
+++ b/MoinMoin/parser/text_xslt.py	Fri Mar 30 21:50:50 2007 +0200
@@ -109,7 +109,7 @@
                 text = text.expandtabs()
                 text = text.replace('\n', '<br>\n')
                 text = text.replace(' ', '&nbsp;')
-                before = _('%(errortype)s processing error') % {'errortype': etype,}
+                before = _('%(errortype)s processing error') % {'errortype': etype, }
                 title = u"<strong>%s: %s</strong><p>" % (before, msg)
                 self.request.write(title)
                 self.request.write(text.decode(config.charset))
--- a/MoinMoin/search/builtin.py	Tue Mar 27 21:50:05 2007 +0200
+++ b/MoinMoin/search/builtin.py	Fri Mar 30 21:50:50 2007 +0200
@@ -228,7 +228,7 @@
         """
         self.update_queue.append(pagename)
         if now:
-           self._do_queued_updates_InNewThread()
+            self._do_queued_updates_InNewThread()
 
     def remove_item(self, pagename, attachment=None, now=1):
         """ Removes a page and all its revisions or a single attachment
--- a/MoinMoin/search/results.py	Tue Mar 27 21:50:05 2007 +0200
+++ b/MoinMoin/search/results.py	Fri Mar 30 21:50:50 2007 +0200
@@ -510,7 +510,7 @@
 
             # Add text after last match and finish the line
             if match.end < end:
-               output.append(f.text(body[match.end:end]))
+                output.append(f.text(body[match.end:end]))
             output.append(f.text(u'...'))
             output.append(f.linebreak(preformatted=0))
 
--- a/MoinMoin/theme/__init__.py	Tue Mar 27 21:50:05 2007 +0200
+++ b/MoinMoin/theme/__init__.py	Fri Mar 30 21:50:50 2007 +0200
@@ -1066,28 +1066,28 @@
         _ = self.request.getText
         editbar_actions = []
         for editbar_item in self.request.cfg.edit_bar:
-             if editbar_item == 'Discussion':
-                 if not self.request.cfg.supplementation_page and self.request.getPragma('supplementation-page', 1) in ('on', '1'):
-                     editbar_actions.append(self.supplementation_page_nameLink(page))
-                 elif self.request.cfg.supplementation_page and not self.request.getPragma('supplementation-page', 1) in ('off', '0'):
-                     editbar_actions.append(self.supplementation_page_nameLink(page))
-             elif editbar_item == 'Comments':
-                 # we just use <a> to get same style as other links, but we add some dummy
-                 # link target to get correct mouseover pointer appearance. return false
-                 # keeps the browser away from jumping to the link target:: 
-                 editbar_actions.append('<a href="#" class="toggleCommentsButton" onClick="toggleComments();return false;">%s</a>' % _('Comments'))
-             elif editbar_item == 'Edit':
-                 editbar_actions.append(self.editorLink(page))
-             elif editbar_item == 'Info':
-                 editbar_actions.append(self.infoLink(page))
-             elif editbar_item == 'Subscribe':
-                 editbar_actions.append(self.subscribeLink(page))
-             elif editbar_item == 'Quicklink':
-                 editbar_actions.append(self.quicklinkLink(page))
-             elif editbar_item == 'Attachments':
-                 editbar_actions.append(self.attachmentsLink(page))
-             elif editbar_item == 'ActionsMenu':
-                 editbar_actions.append(self.actionsMenu(page))
+            if editbar_item == 'Discussion':
+                if not self.request.cfg.supplementation_page and self.request.getPragma('supplementation-page', 1) in ('on', '1'):
+                    editbar_actions.append(self.supplementation_page_nameLink(page))
+                elif self.request.cfg.supplementation_page and not self.request.getPragma('supplementation-page', 1) in ('off', '0'):
+                    editbar_actions.append(self.supplementation_page_nameLink(page))
+            elif editbar_item == 'Comments':
+                # we just use <a> to get same style as other links, but we add some dummy
+                # link target to get correct mouseover pointer appearance. return false
+                # keeps the browser away from jumping to the link target:: 
+                editbar_actions.append('<a href="#" class="toggleCommentsButton" onClick="toggleComments();return false;">%s</a>' % _('Comments'))
+            elif editbar_item == 'Edit':
+                editbar_actions.append(self.editorLink(page))
+            elif editbar_item == 'Info':
+                editbar_actions.append(self.infoLink(page))
+            elif editbar_item == 'Subscribe':
+                editbar_actions.append(self.subscribeLink(page))
+            elif editbar_item == 'Quicklink':
+                editbar_actions.append(self.quicklinkLink(page))
+            elif editbar_item == 'Attachments':
+                editbar_actions.append(self.attachmentsLink(page))
+            elif editbar_item == 'ActionsMenu':
+                editbar_actions.append(self.actionsMenu(page))
         return editbar_actions
 
     def supplementation_page_nameLink(self, page):
--- a/MoinMoin/user.py	Tue Mar 27 21:50:05 2007 +0200
+++ b/MoinMoin/user.py	Fri Mar 30 21:50:50 2007 +0200
@@ -65,8 +65,8 @@
         except caching.CacheError:
             _name2id = {}
         cfg.cache.name2id = _name2id
-    id = _name2id.get(searchName, None)
-    if id is None:
+    uid = _name2id.get(searchName, None)
+    if uid is None:
         for userid in getUserList(request):
             name = User(request, id=userid).name
             _name2id[name] = userid
@@ -78,8 +78,8 @@
             cache.update(_name2id)
         except caching.CacheError:
             pass
-        id = _name2id.get(searchName, None)
-    return id
+        uid = _name2id.get(searchName, None)
+    return uid
 
 
 def getUserIdentification(request, username=None):
@@ -855,11 +855,11 @@
         data = '\n'.join(self._trail) + '\n'
         path = self.__filename() + ".trail"
         try:
-            file = codecs.open(path, "w", config.charset)
+            f = codecs.open(path, "w", config.charset)
             try:
-                file.write(data)
+                f.write(data)
             finally:
-                file.close()
+                f.close()
         except (IOError, OSError), err:
             self._request.log("Can't save trail file: %s" % str(err))
 
--- a/MoinMoin/userform.py	Tue Mar 27 21:50:05 2007 +0200
+++ b/MoinMoin/userform.py	Fri Mar 30 21:50:50 2007 +0200
@@ -473,10 +473,10 @@
             buttons = [("select_user", _('Select User'))]
             button_cell = []
             for name, label in buttons:
-                 button_cell.extend([
-                     html.INPUT(type="submit", name=name, value=label),
-                     ' ',
-                 ])
+                button_cell.extend([
+                    html.INPUT(type="submit", name=name, value=label),
+                    ' ',
+                ])
             self.make_row('', button_cell)
 
         if self.request.user.valid and not create_only:
--- a/MoinMoin/version.py	Tue Mar 27 21:50:05 2007 +0200
+++ b/MoinMoin/version.py	Fri Mar 30 21:50:50 2007 +0200
@@ -10,7 +10,7 @@
 import sys
 
 try:
-    from patchlevel import patchlevel
+    from MoinMoin.patchlevel import patchlevel
 except:
     patchlevel = 'release'
 
--- a/MoinMoin/wikisync.py	Tue Mar 27 21:50:05 2007 +0200
+++ b/MoinMoin/wikisync.py	Fri Mar 30 21:50:50 2007 +0200
@@ -36,10 +36,12 @@
         return page_name
 
 
-class UnsupportedWikiException(Exception): pass
+class UnsupportedWikiException(Exception):
+    pass
 
 
-class NotAllowedException(Exception): pass
+class NotAllowedException(Exception):
+    pass
 
 
 class SyncPage(object):
@@ -424,7 +426,7 @@
         """ Removes all tags. """
         return NotImplemented
 
-    def fetch(self, iwid_full=None, iw_name=None):
+    def fetch(self, iwid_full=None, direction=None):
         """ Fetches tags by a special IWID or interwiki name. """
         return NotImplemented
 
--- a/MoinMoin/wikiutil.py	Tue Mar 27 21:50:05 2007 +0200
+++ b/MoinMoin/wikiutil.py	Fri Mar 30 21:50:50 2007 +0200
@@ -568,7 +568,8 @@
         lines += Page(request, INTERWIKI_PAGE).get_raw_body().splitlines()
 
         for line in lines:
-            if not line or line[0] == '#': continue
+            if not line or line[0] == '#':
+                continue
             try:
                 line = "%s %s/InterWiki" % (line, request.getScriptname())
                 wikitag, urlprefix, dummy = line.split(None, 2)
@@ -875,8 +876,8 @@
 }
 
 MIMETYPES_spoil_mapping = {} # inverse mapping of above
-for key, value in MIMETYPES_sanitize_mapping.items():
-    MIMETYPES_spoil_mapping[value] = key
+for _key, _value in MIMETYPES_sanitize_mapping.items():
+    MIMETYPES_spoil_mapping[_value] = _key
 
 
 class MimeType(object):
@@ -1209,8 +1210,10 @@
         except ValueError, err:
             msg = str(err)
             break
-        if not key: break
-        if endtoken and key == endtoken: break
+        if not key:
+            break
+        if endtoken and key == endtoken:
+            break
 
         # call extension function with the current token, the parser, and the dict
         if extension:
@@ -1336,7 +1339,7 @@
         return "%s, %s, optional:%s" % (self.param_list, self.param_dict,
                                         self.optional)
 
-    def parse_parameters(self, input):
+    def parse_parameters(self, params):
         """
         (4, 2)
         """
@@ -1348,8 +1351,8 @@
         i = 0
         start = 0
         named = False
-        while start < len(input):
-            match = re.match(self.param_re, input[start:])
+        while start < len(params):
+            match = re.match(self.param_re, params[start:])
             if not match:
                 raise ValueError, "Misformatted value"
             start += match.end()
@@ -1392,10 +1395,10 @@
                 nr = i
                 parameter_list[nr] = value
 
-            #Let's populate and map our dictionary to what's been found
+            # Let's populate and map our dictionary to what's been found
             for name in self.param_dict:
                 tmp = self.param_dict[name]
-                parameter_dict[name]=parameter_list[tmp]
+                parameter_dict[name] = parameter_list[tmp]
 
             i += 1
 
@@ -1644,7 +1647,7 @@
     out = StringIO.StringIO()
     request.redirect(out)
     wikiizer = Parser(text, request)
-    wikiizer.format(request.formatter,inhibit_p=True)
+    wikiizer.format(request.formatter, inhibit_p=True)
     result = out.getvalue()
     request.redirect()
     del out