changeset 787:9d74a2f53323

Merge with main.
author Alexander Schremmer <alex AT alexanderweb DOT de>
date Sat, 10 Jun 2006 16:45:05 +0200
parents 503f0b91ba41 (current diff) c02c3bcad99b (diff)
children 2438f386293f faebfa285206 cd3019c751e6
files MoinMoin/Page.py MoinMoin/PageEditor.py MoinMoin/PageGraphicalEditor.py MoinMoin/_tests/test_util_mail.py MoinMoin/_tests/test_wikiacl.py MoinMoin/auth.py MoinMoin/logfile/logfile.py MoinMoin/mailimport.py MoinMoin/security.py MoinMoin/util/ParserBase.py MoinMoin/util/antispam.py MoinMoin/util/autoadmin.py MoinMoin/util/mail.py MoinMoin/util/sessionParser.py MoinMoin/wikiacl.py MoinMoin/xmlrpc/__init__.py
diffstat 62 files changed, 3295 insertions(+), 3007 deletions(-) [+]
line wrap: on
line diff
--- a/MANIFEST.in	Wed Jun 07 14:50:19 2006 +0200
+++ b/MANIFEST.in	Sat Jun 10 16:45:05 2006 +0200
@@ -10,7 +10,7 @@
 recursive-include docs *
 
 # include stuff for translators
-recursive-include   MoinMoin/i18n README Makefile *.txt *.py POTFILES.in *.po* *.mo
+recursive-include   MoinMoin/i18n *
 
 # contrib stuff
 recursive-include   contrib *
--- a/MoinMoin/Page.py	Wed Jun 07 14:50:19 2006 +0200
+++ b/MoinMoin/Page.py	Sat Jun 10 16:45:05 2006 +0200
@@ -2,7 +2,7 @@
 """
     MoinMoin - Page class
 
-    @copyright: 2000-2004 by Jürgen Hermann <jh@web.de>
+    @copyright: 2000-2004 by Jrgen Hermann <jh@web.de>
     @license: GNU GPL, see COPYING for details.
 """
 
@@ -1603,10 +1603,9 @@
         Return cached ACL or invoke parseACL and update the cache.
 
         @param request: the request object
-        @rtype: MoinMoin.wikiacl.AccessControlList
+        @rtype: MoinMoin.security.AccessControlList
         @return: ACL of this page
         """
-        import wikiacl
         request.clock.start('getACL')
         # Try the cache or parse acl and update the cache
         currentRevision = self.current_rev()
@@ -1628,16 +1627,16 @@
         The effective ACL is always from the last revision, even if
         you access an older revision.
         """
-        import wikiacl
+        from MoinMoin import security
         if self.exists() and self.rev == 0:
-            return wikiacl.parseACL(self.request, self.get_raw_body())
+            return security.parseACL(self.request, self.get_raw_body())
         try:
             lastRevision = self.getRevList()[0]
         except IndexError:
-            return wikiacl.AccessControlList(self.request)
+            return security.AccessControlList(self.request)
         body = Page(self.request, self.page_name,
                     rev=lastRevision).get_raw_body()
-        return wikiacl.parseACL(self.request, body)
+        return security.parseACL(self.request, body)
 
     def clean_acl_cache(self):
         """
--- a/MoinMoin/PageEditor.py	Wed Jun 07 14:50:19 2006 +0200
+++ b/MoinMoin/PageEditor.py	Sat Jun 10 16:45:05 2006 +0200
@@ -2,7 +2,7 @@
 """
     MoinMoin - PageEditor class
 
-    @copyright: 2000-2004 by Jürgen Hermann <jh@web.de>
+    @copyright: 2000-2004 by Jrgen Hermann <jh@web.de>
     @license: GNU GPL, see COPYING for details.
 """
 
@@ -15,7 +15,7 @@
 from MoinMoin.logfile import editlog, eventlog
 from MoinMoin.util import filesys, timefuncs
 import MoinMoin.util.web
-import MoinMoin.util.mail
+from MoinMoin.mail import sendmail
 
 
 #############################################################################
@@ -587,7 +587,7 @@
             else:
                 mailBody = mailBody + _("No differences found!\n", formatted=False)
         
-        return util.mail.sendmail(self.request, emails,
+        return sendmail.sendmail(self.request, emails,
             _('[%(sitename)s] %(trivial)sUpdate of "%(pagename)s" by %(username)s', formatted=False) % {
                 'trivial' : (trivial and _("Trivial ", formatted=False)) or "",
                 'sitename': self.cfg.sitename or "Wiki",
@@ -935,7 +935,7 @@
             msg = _('You did not change the page content, not saved!')
             raise self.Unchanged, msg
         else:
-            from wikiacl import parseACL
+            from MoinMoin.security import parseACL
             # Get current ACL and compare to new ACL from newtext. If
             # they are not the sames, the user must have admin
             # rights. This is a good place to update acl cache - instead
--- a/MoinMoin/PageGraphicalEditor.py	Wed Jun 07 14:50:19 2006 +0200
+++ b/MoinMoin/PageGraphicalEditor.py	Sat Jun 10 16:45:05 2006 +0200
@@ -16,7 +16,6 @@
 from MoinMoin.logfile import editlog, eventlog
 from MoinMoin.util import filesys
 import MoinMoin.util.web
-import MoinMoin.util.mail
 from MoinMoin.parser.text_moin_wiki import Parser
 
 from StringIO import StringIO
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/_tests/test_mail_sendmail.py	Sat Jun 10 16:45:05 2006 +0200
@@ -0,0 +1,114 @@
+# -*- coding: utf-8 -*-
+"""
+    MoinMoin - MoinMoin.mail.sendmail Tests
+
+    @copyright: 2003-2004 by Jürgen Hermann <jh@web.de>
+    @license: GNU GPL, see COPYING for details.
+"""
+
+import unittest
+from email.Charset import Charset, QP
+from email.Header import Header
+from MoinMoin.mail import sendmail
+from MoinMoin import config
+
+
+class decodeSpamSafeEmailTestCase(unittest.TestCase):
+    """mail.sendmail: testing mail"""
+    
+    _tests = (
+        ('', ''),
+        ('AT', '@'),
+        ('DOT', '.'),
+        ('DASH', '-'),
+        ('CAPS', ''),
+        ('Mixed', 'Mixed'),
+        ('lower', 'lower'),
+        ('Firstname DOT Lastname AT example DOT net',
+         'Firstname.Lastname@example.net'),
+        ('Firstname . Lastname AT exa mp le DOT n e t',
+         'Firstname.Lastname@example.net'),
+        ('Firstname I DONT WANT SPAM . Lastname@example DOT net',
+         'Firstname.Lastname@example.net'),
+        ('First name I Lastname DONT AT WANT SPAM example DOT n e t',
+         'FirstnameLastname@example.net'),
+        ('first.last@example.com', 'first.last@example.com'),
+        ('first . last @ example . com', 'first.last@example.com'),
+        )
+
+    def testDecodeSpamSafeMail(self):
+        """mail.sendmail: decoding spam safe mail"""
+        for coded, expected in self._tests:
+            result = sendmail.decodeSpamSafeEmail(coded)
+            self.assertEqual(result, expected,
+                             'Expected "%(expected)s" but got "%(result)s"' %
+                             locals())
+
+
+class EncodeAddressTests(unittest.TestCase):
+    """ Address encoding tests
+    
+    See http://www.faqs.org/rfcs/rfc2822.html section 3.4. 
+    Address Specification.
+            
+    mailbox     =   name-addr / addr-spec
+    name-addr   =   [display-name] angle-addr
+    angle-addr  =   [CFWS] "<" addr-spec ">" [CFWS] / obs-angle-addr
+    """    
+    charset = Charset(config.charset)
+    charset.header_encoding = QP
+    charset.body_encoding = QP
+
+    def testSimpleAddress(self):
+        """ mail.sendmail: encode simple address: local@domain """
+        address = u'local@domain'
+        expected = address.encode(config.charset)
+        self.failUnlessEqual(sendmail.encodeAddress(address, self.charset),
+                             expected)
+
+    def testComposite(self):
+        """ mail.sendmail: encode address: 'Phrase <local@domain>' """
+        address = u'Phrase <local@domain>'
+        phrase = str(Header(u'Phrase '.encode('utf-8'), self.charset))
+        expected = phrase + '<local@domain>'
+        self.failUnlessEqual(sendmail.encodeAddress(address, self.charset),
+                             expected)
+                             
+    def testCompositeUnicode(self):
+        """ mail.sendmail: encode Uncode address: 'ויקי <local@domain>' """
+        address = u'ויקי <local@domain>'
+        phrase = str(Header(u'ויקי '.encode('utf-8'), self.charset))
+        expected = phrase + '<local@domain>'
+        self.failUnlessEqual(sendmail.encodeAddress(address, self.charset),
+                             expected)
+                             
+    def testEmptyPhrase(self):
+        """ mail.sendmail: encode address with empty phrase: '<local@domain>' """
+        address = u'<local@domain>'
+        expected = address.encode(config.charset)
+        self.failUnlessEqual(sendmail.encodeAddress(address, self.charset),
+                             expected)
+                             
+    def testEmptyAddress(self):
+        """ mail.sendmail: encode address with empty address: 'Phrase <>' 
+        
+        Let the smtp server handle this. We may raise error in such
+        case, but we don't do error checking for mail addresses.
+        """
+        address = u'Phrase <>'
+        phrase = str(Header(u'Phrase '.encode('utf-8'), self.charset))
+        expected = phrase + '<>'
+        self.failUnlessEqual(sendmail.encodeAddress(address, self.charset),
+                             expected)
+
+    def testInvalidAddress(self):
+        """ mail.sendmail: encode invalid address 'Phrase <blah' 
+        
+        Assume that this is a simple address. This address will
+        probably cause an error when trying to send mail. Junk in, junk
+        out.
+        """
+        address = u'Phrase <blah'
+        expected = address.encode(config.charset)
+        self.failUnlessEqual(sendmail.encodeAddress(address, self.charset),
+                             expected)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/_tests/test_security.py	Sat Jun 10 16:45:05 2006 +0200
@@ -0,0 +1,243 @@
+# -*- coding: iso-8859-1 -*-
+"""
+    MoinMoin - MoinMoin.security Tests
+
+    @copyright: 2003-2004 by Jürgen Hermann <jh@web.de>
+    @license: GNU GPL, see COPYING for details.
+"""
+
+import unittest
+from MoinMoin._tests import TestConfig
+from MoinMoin import config, security, _tests
+
+acliter = security.ACLStringIterator
+
+class ACLStringIteratorTestCase(unittest.TestCase):
+    
+    def setUp(self):
+        self.config = TestConfig(self.request,
+                                 defaults=['acl_rights_valid', 'acl_rights_before'])
+                
+    def tearDown(self):
+        del self.config
+        
+    def testEmpty(self):
+        """ security: empty acl string raise StopIteration """
+        iter = acliter(self.request.cfg.acl_rights_valid, '')
+        self.failUnlessRaises(StopIteration, iter.next)
+
+    def testWhiteSpace(self):
+        """ security: white space acl string raise StopIteration """
+        iter = acliter(self.request.cfg.acl_rights_valid, '       ')
+        self.failUnlessRaises(StopIteration, iter.next)
+            
+    def testDefault(self):
+        """ security: default meta acl """
+        iter = acliter(self.request.cfg.acl_rights_valid, 'Default Default')
+        for mod, entries, rights in iter:
+            self.assertEqual(entries, ['Default'])
+            self.assertEqual(rights, [])
+                
+    def testEmptyRights(self):
+        """ security: empty rights """    
+        iter = acliter(self.request.cfg.acl_rights_valid, 'WikiName:')
+        mod, entries, rights = iter.next()
+        self.assertEqual(entries, ['WikiName'])
+        self.assertEqual(rights, [])
+
+    def testSingleWikiNameSingleWrite(self):
+        """ security: single wiki name, single right """
+        iter = acliter(self.request.cfg.acl_rights_valid, 'WikiName:read')
+        mod, entries, rights = iter.next()
+        self.assertEqual(entries, ['WikiName'])
+        self.assertEqual(rights, ['read'])
+
+    def testMultipleWikiNameAndRights(self):
+        """ security: multiple wiki names and rights """
+        iter = acliter(self.request.cfg.acl_rights_valid, 'UserOne,UserTwo:read,write')
+        mod, entries, rights = iter.next()
+        self.assertEqual(entries, ['UserOne', 'UserTwo'])
+        self.assertEqual(rights, ['read', 'write'])      
+        
+    def testMultipleEntries(self):
+        """ security: multiple entries """
+        iter = acliter(self.request.cfg.acl_rights_valid, 'UserOne:read,write UserTwo:read All:')
+        mod, entries, rights = iter.next()
+        self.assertEqual(entries, ['UserOne'])
+        self.assertEqual(rights, ['read', 'write'])      
+        mod, entries, rights = iter.next()
+        self.assertEqual(entries, ['UserTwo'])
+        self.assertEqual(rights, ['read'])      
+        mod, entries, rights = iter.next()
+        self.assertEqual(entries, ['All'])
+        self.assertEqual(rights, [])      
+       
+    def testNameWithSpaces(self):
+        """ security: single name with spaces """
+        iter = acliter(self.request.cfg.acl_rights_valid, 'user one:read')
+        mod, entries, rights = iter.next()
+        self.assertEqual(entries, ['user one'])
+        self.assertEqual(rights, ['read'])
+
+    def testMultipleWikiNameAndRights(self):
+        """ security: multiple names with spaces """
+        iter = acliter(self.request.cfg.acl_rights_valid, 'user one,user two:read')
+        mod, entries, rights = iter.next()
+        self.assertEqual(entries, ['user one', 'user two'])
+        self.assertEqual(rights, ['read'])      
+        
+    def testMultipleEntriesWithSpaces(self):
+        """ security: multiple entries with spaces """
+        iter = acliter(self.request.cfg.acl_rights_valid, 'user one:read,write user two:read')
+        mod, entries, rights = iter.next()
+        self.assertEqual(entries, ['user one'])
+        self.assertEqual(rights, ['read', 'write'])      
+        mod, entries, rights = iter.next()
+        self.assertEqual(entries, ['user two'])
+        self.assertEqual(rights, ['read'])      
+         
+    def testMixedNames(self):
+        """ security: mixed wiki names and names with spaces """
+        iter = acliter(self.request.cfg.acl_rights_valid, 'UserOne,user two:read,write user three,UserFour:read')
+        mod, entries, rights = iter.next()
+        self.assertEqual(entries, ['UserOne', 'user two'])
+        self.assertEqual(rights, ['read', 'write'])      
+        mod, entries, rights = iter.next()
+        self.assertEqual(entries, ['user three', 'UserFour'])
+        self.assertEqual(rights, ['read'])      
+
+    def testModifier(self):
+        """ security: acl modifiers """
+        iter = acliter(self.request.cfg.acl_rights_valid, '+UserOne:read -UserTwo:')
+        mod, entries, rights = iter.next()
+        self.assertEqual(mod, '+')
+        self.assertEqual(entries, ['UserOne'])
+        self.assertEqual(rights, ['read'])
+        mod, entries, rights = iter.next()
+        self.assertEqual(mod, '-')
+        self.assertEqual(entries, ['UserTwo'])
+        self.assertEqual(rights, [])
+        
+    def testIgnoreInvalidACL(self):
+        """ security: ignore invalid acl
+
+        The last part of this acl can not be parsed. If it ends with :
+        then it will be parsed as one name with spaces.
+        """
+        iter = acliter(self.request.cfg.acl_rights_valid, 'UserOne:read user two is ignored')
+        mod, entries, rights = iter.next()
+        self.assertEqual(entries, ['UserOne'])
+        self.assertEqual(rights, ['read'])
+        self.failUnlessRaises(StopIteration, iter.next)
+        
+    def testEmptyNamesWithRight(self):
+        """ security: empty names with rights
+
+        The documents does not talk about this case, may() should ignore
+        the rights because there is no entry.
+        """
+        iter = acliter(self.request.cfg.acl_rights_valid, 'UserOne:read :read All:')
+        mod, entries, rights = iter.next()
+        self.assertEqual(entries, ['UserOne'])
+        self.assertEqual(rights, ['read'])
+        mod, entries, rights = iter.next()
+        self.assertEqual(entries, [])
+        self.assertEqual(rights, ['read'])        
+        mod, entries, rights = iter.next()
+        self.assertEqual(entries, ['All'])
+        self.assertEqual(rights, [])
+
+    def testIgnodeInvalidRights(self):
+        """ security: ignore rights not in acl_rights_valid """
+        iter = acliter(self.request.cfg.acl_rights_valid, 'UserOne:read,sing,write,drink,sleep')
+        mod, entries, rights = iter.next()
+        self.assertEqual(rights, ['read', 'write'])        
+
+    def testBadGuy(self):
+        """ security: bad guy may not allowed anything
+
+        This test was failing on the apply acl rights test.
+        """
+        iter = acliter(self.request.cfg.acl_rights_valid, 'UserOne:read,write BadGuy: All:read')
+        mod, entries, rights = iter.next()
+        mod, entries, rights = iter.next()
+        self.assertEqual(entries, ['BadGuy'])
+        self.assertEqual(rights, [])
+
+    def testAllowExtraWhitespace(self):
+        """ security: allow extra white space between entries """
+        iter = acliter(self.request.cfg.acl_rights_valid, 'UserOne,user two:read,write   user three,UserFour:read  All:')
+        mod, entries, rights = iter.next()
+        self.assertEqual(entries, ['UserOne', 'user two'])
+        self.assertEqual(rights, ['read', 'write'])      
+        mod, entries, rights = iter.next()
+        self.assertEqual(entries, ['user three', 'UserFour'])
+        self.assertEqual(rights, ['read'])      
+        mod, entries, rights = iter.next()
+        self.assertEqual(entries, ['All'])
+        self.assertEqual(rights, [])            
+       
+
+class AclTestCase(unittest.TestCase):
+    """ security: testing access control list
+
+    TO DO: test unknown user?
+    """
+    def setUp(self):
+        # Backup user
+        self.config = TestConfig(self.request, defaults=['acl_rights_valid', 'acl_rights_before'])
+        self.savedUser = self.request.user.name
+        
+    def tearDown(self):
+        # Restore user
+        self.request.user.name = self.savedUser
+        del self.config
+        
+    def testApplyACLByUser(self):
+        """ security: applying acl by user name"""
+        # This acl string...
+        acl_rights = [
+            "Admin1,Admin2:read,write,delete,revert,admin  "
+            "Admin3:read,write,admin  "
+            "JoeDoe:read,write  "
+            "name with spaces,another one:read,write  "
+            "CamelCase,extended name:read,write  "
+            "BadGuy:  "
+            "All:read  "
+            ]
+        acl = security.AccessControlList(self.request, acl_rights)
+
+        # Should apply these rights:
+        users = (
+            # user,                 rights
+            # CamelCase names
+            ('Admin1',              ('read', 'write', 'admin', 'revert', 'delete')),
+            ('Admin2',              ('read', 'write', 'admin', 'revert', 'delete')),
+            ('Admin3',              ('read', 'write', 'admin')),
+            ('JoeDoe',              ('read', 'write')),
+            ('SomeGuy',             ('read',)),
+            # Extended names or mix of extended and CamelCase
+            ('name with spaces',    ('read','write',)),
+            ('another one',         ('read','write',)),
+            ('CamelCase',           ('read','write',)),
+            ('extended name',       ('read','write',)),
+            # Blocking bad guys
+            ('BadGuy',              ()),
+            # All other users - every one not mentioned in the acl lines
+            ('All',                 ('read',)),
+            ('Anonymous',           ('read',)),
+            )       
+
+        # Check rights
+        for user, may in users:
+            mayNot = [right for right in self.request.cfg.acl_rights_valid
+                      if right not in may]
+            # User should have these rights...
+            for right in may:
+                self.assert_(acl.may(self.request, user, right),
+                    '"%(user)s" should be allowed to "%(right)s"' % locals())
+            # But NOT these:
+            for right in mayNot:
+                self.failIf(acl.may(self.request, user, right),
+                    '"%(user)s" should NOT be allowed to "%(right)s"' % locals())
+            
--- a/MoinMoin/_tests/test_util_mail.py	Wed Jun 07 14:50:19 2006 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,114 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
-    MoinMoin - MoinMoin.util.mail Tests
-
-    @copyright: 2003-2004 by Jürgen Hermann <jh@web.de>
-    @license: GNU GPL, see COPYING for details.
-"""
-
-import unittest
-from email.Charset import Charset, QP
-from email.Header import Header
-from MoinMoin.util import mail
-from MoinMoin import config
-
-
-class decodeSpamSafeEmailTestCase(unittest.TestCase):
-    """util.mail: testing mail"""
-    
-    _tests = (
-        ('', ''),
-        ('AT', '@'),
-        ('DOT', '.'),
-        ('DASH', '-'),
-        ('CAPS', ''),
-        ('Mixed', 'Mixed'),
-        ('lower', 'lower'),
-        ('Firstname DOT Lastname AT example DOT net',
-         'Firstname.Lastname@example.net'),
-        ('Firstname . Lastname AT exa mp le DOT n e t',
-         'Firstname.Lastname@example.net'),
-        ('Firstname I DONT WANT SPAM . Lastname@example DOT net',
-         'Firstname.Lastname@example.net'),
-        ('First name I Lastname DONT AT WANT SPAM example DOT n e t',
-         'FirstnameLastname@example.net'),
-        ('first.last@example.com', 'first.last@example.com'),
-        ('first . last @ example . com', 'first.last@example.com'),
-        )
-
-    def testDecodeSpamSafeMail(self):
-        """util.mail: decoding spam safe mail"""
-        for coded, expected in self._tests:
-            result = mail.decodeSpamSafeEmail(coded)
-            self.assertEqual(result, expected,
-                             'Expected "%(expected)s" but got "%(result)s"' %
-                             locals())
-
-
-class EncodeAddressTests(unittest.TestCase):
-    """ Address encoding tests
-    
-    See http://www.faqs.org/rfcs/rfc2822.html section 3.4. 
-    Address Specification.
-            
-    mailbox     =   name-addr / addr-spec
-    name-addr   =   [display-name] angle-addr
-    angle-addr  =   [CFWS] "<" addr-spec ">" [CFWS] / obs-angle-addr
-    """    
-    charset = Charset(config.charset)
-    charset.header_encoding = QP
-    charset.body_encoding = QP
-
-    def testSimpleAddress(self):
-        """ util.mail: encode simple address: local@domain """
-        address = u'local@domain'
-        expected = address.encode(config.charset)
-        self.failUnlessEqual(mail.encodeAddress(address, self.charset),
-                             expected)
-
-    def testComposite(self):
-        """ util.mail: encode address: 'Phrase <local@domain>' """
-        address = u'Phrase <local@domain>'
-        phrase = str(Header(u'Phrase '.encode('utf-8'), self.charset))
-        expected = phrase + '<local@domain>'
-        self.failUnlessEqual(mail.encodeAddress(address, self.charset),
-                             expected)
-                             
-    def testCompositeUnicode(self):
-        """ util.mail: encode Uncode address: 'ויקי <local@domain>' """
-        address = u'ויקי <local@domain>'
-        phrase = str(Header(u'ויקי '.encode('utf-8'), self.charset))
-        expected = phrase + '<local@domain>'
-        self.failUnlessEqual(mail.encodeAddress(address, self.charset),
-                             expected)
-                             
-    def testEmptyPhrase(self):
-        """ util.mail: encode address with empty phrase: '<local@domain>' """
-        address = u'<local@domain>'
-        expected = address.encode(config.charset)
-        self.failUnlessEqual(mail.encodeAddress(address, self.charset),
-                             expected)
-                             
-    def testEmptyAddress(self):
-        """ util.mail: encode address with empty address: 'Phrase <>' 
-        
-        Let the smtp server handle this. We may raise error in such
-        case, but we don't do error checking for mail addresses.
-        """
-        address = u'Phrase <>'
-        phrase = str(Header(u'Phrase '.encode('utf-8'), self.charset))
-        expected = phrase + '<>'
-        self.failUnlessEqual(mail.encodeAddress(address, self.charset),
-                             expected)
-
-    def testInvalidAddress(self):
-        """ util.mail: encode invalid address 'Phrase <blah' 
-        
-        Assume that this is a simple address. This address will
-        probably cause an error when trying to send mail. Junk in, junk
-        out.
-        """
-        address = u'Phrase <blah'
-        expected = address.encode(config.charset)
-        self.failUnlessEqual(mail.encodeAddress(address, self.charset),
-                             expected)
--- a/MoinMoin/_tests/test_wikiacl.py	Wed Jun 07 14:50:19 2006 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,259 +0,0 @@
-# -*- coding: iso-8859-1 -*-
-"""
-    MoinMoin - MoinMoin.wikiacl Tests
-
-    @copyright: 2003-2004 by Jürgen Hermann <jh@web.de>
-    @license: GNU GPL, see COPYING for details.
-"""
-
-import unittest
-from MoinMoin._tests import TestConfig
-from MoinMoin import config, wikiacl, _tests
-
-acliter = wikiacl.ACLStringIterator
-
-class ACLStringIteratorTestCase(unittest.TestCase):
-    
-    def setUp(self):
-        self.config = TestConfig(self.request,
-                                 defaults=['acl_rights_valid', 'acl_rights_before'])
-                
-    def tearDown(self):
-        del self.config
-        
-    def testEmpty(self):
-        """ wikiacl: empty acl string raise StopIteration """
-        iter = acliter(self.request.cfg.acl_rights_valid, '')
-        self.failUnlessRaises(StopIteration, iter.next)
-
-    def testWhiteSpace(self):
-        """ wikiacl: white space acl string raise StopIteration """
-        iter = acliter(self.request.cfg.acl_rights_valid, '       ')
-        self.failUnlessRaises(StopIteration, iter.next)
-            
-    def testDefault(self):
-        """ wikiacl: default meta acl """
-        iter = acliter(self.request.cfg.acl_rights_valid,
-                       'Default Default')
-        for mod, entries, rights in iter:
-            self.assertEqual(entries, ['Default'])
-            self.assertEqual(rights, [])
-                
-    def testEmptyRights(self):
-        """ wikiacl: empty rights """    
-        iter = acliter(self.request.cfg.acl_rights_valid,
-                       'WikiName:')
-        mod, entries, rights = iter.next()
-        self.assertEqual(entries, ['WikiName'])
-        self.assertEqual(rights, [])
-
-    def testSingleWikiNameSingleWrite(self):
-        """ wikiacl: single wiki name, single right """
-        iter = acliter(self.request.cfg.acl_rights_valid,
-                       'WikiName:read')
-        mod, entries, rights = iter.next()
-        self.assertEqual(entries, ['WikiName'])
-        self.assertEqual(rights, ['read'])
-
-    def testMultipleWikiNameAndRights(self):
-        """ wikiacl: multiple wiki names and rights """
-        iter = acliter(self.request.cfg.acl_rights_valid,
-                       'UserOne,UserTwo:read,write')
-        mod, entries, rights = iter.next()
-        self.assertEqual(entries, ['UserOne', 'UserTwo'])
-        self.assertEqual(rights, ['read', 'write'])      
-        
-    def testMultipleEntries(self):
-        """ wikiacl: multiple entries """
-        iter = acliter(self.request.cfg.acl_rights_valid,
-                       'UserOne:read,write UserTwo:read All:')
-        mod, entries, rights = iter.next()
-        self.assertEqual(entries, ['UserOne'])
-        self.assertEqual(rights, ['read', 'write'])      
-        mod, entries, rights = iter.next()
-        self.assertEqual(entries, ['UserTwo'])
-        self.assertEqual(rights, ['read'])      
-        mod, entries, rights = iter.next()
-        self.assertEqual(entries, ['All'])
-        self.assertEqual(rights, [])      
-       
-    def testNameWithSpaces(self):
-        """ wikiacl: single name with spaces """
-        iter = acliter(self.request.cfg.acl_rights_valid,
-                       'user one:read')
-        mod, entries, rights = iter.next()
-        self.assertEqual(entries, ['user one'])
-        self.assertEqual(rights, ['read'])
-
-    def testMultipleWikiNameAndRights(self):
-        """ wikiacl: multiple names with spaces """
-        iter = acliter(self.request.cfg.acl_rights_valid,
-                       'user one,user two:read')
-        mod, entries, rights = iter.next()
-        self.assertEqual(entries, ['user one', 'user two'])
-        self.assertEqual(rights, ['read'])      
-        
-    def testMultipleEntriesWithSpaces(self):
-        """ wikiacl: multiple entries with spaces """
-        iter = acliter(self.request.cfg.acl_rights_valid,
-                       'user one:read,write user two:read')
-        mod, entries, rights = iter.next()
-        self.assertEqual(entries, ['user one'])
-        self.assertEqual(rights, ['read', 'write'])      
-        mod, entries, rights = iter.next()
-        self.assertEqual(entries, ['user two'])
-        self.assertEqual(rights, ['read'])      
-         
-    def testMixedNames(self):
-        """ wikiacl: mixed wiki names and names with spaces """
-        iter = acliter(self.request.cfg.acl_rights_valid,
-                       'UserOne,user two:read,write user three,UserFour:read')
-        mod, entries, rights = iter.next()
-        self.assertEqual(entries, ['UserOne', 'user two'])
-        self.assertEqual(rights, ['read', 'write'])      
-        mod, entries, rights = iter.next()
-        self.assertEqual(entries, ['user three', 'UserFour'])
-        self.assertEqual(rights, ['read'])      
-
-    def testModifier(self):
-        """ wikiacl: acl modifiers """
-        iter = acliter(self.request.cfg.acl_rights_valid,
-                       '+UserOne:read -UserTwo:')
-        mod, entries, rights = iter.next()
-        self.assertEqual(mod, '+')
-        self.assertEqual(entries, ['UserOne'])
-        self.assertEqual(rights, ['read'])
-        mod, entries, rights = iter.next()
-        self.assertEqual(mod, '-')
-        self.assertEqual(entries, ['UserTwo'])
-        self.assertEqual(rights, [])
-        
-    def testIgnoreInvalidACL(self):
-        """ wikiacl: ignore invalid acl
-
-        The last part of this acl can not be parsed. If it ends with :
-        then it will be parsed as one name with spaces.
-        """
-        iter = acliter(self.request.cfg.acl_rights_valid,
-                       'UserOne:read user two is ignored')
-        mod, entries, rights = iter.next()
-        self.assertEqual(entries, ['UserOne'])
-        self.assertEqual(rights, ['read'])
-        self.failUnlessRaises(StopIteration, iter.next)
-        
-    def testEmptyNamesWithRight(self):
-        """ wikiacl: empty names with rights
-
-        The documents does not talk about this case, may() should ignore
-        the rights because there is no entry.
-        """
-        iter = acliter(self.request.cfg.acl_rights_valid,
-                       'UserOne:read :read All:')
-        mod, entries, rights = iter.next()
-        self.assertEqual(entries, ['UserOne'])
-        self.assertEqual(rights, ['read'])
-        mod, entries, rights = iter.next()
-        self.assertEqual(entries, [])
-        self.assertEqual(rights, ['read'])        
-        mod, entries, rights = iter.next()
-        self.assertEqual(entries, ['All'])
-        self.assertEqual(rights, [])
-
-    def testIgnodeInvalidRights(self):
-        """ wikiacl: ignore rights not in acl_rights_valid """
-        iter = acliter(self.request.cfg.acl_rights_valid,
-                       'UserOne:read,sing,write,drink,sleep')
-        mod, entries, rights = iter.next()
-        self.assertEqual(rights, ['read', 'write'])        
-
-    def testBadGuy(self):
-        """ wikiacl: bad guy may not allowed anything
-
-        This test was failing on the apply acl rights test.
-        """
-        iter = acliter(self.request.cfg.acl_rights_valid,
-                       'UserOne:read,write BadGuy: All:read')
-        mod, entries, rights = iter.next()
-        mod, entries, rights = iter.next()
-        self.assertEqual(entries, ['BadGuy'])
-        self.assertEqual(rights, [])
-
-    def testAllowExtraWhitespace(self):
-        """ wikiacl: allow extra white space between entries """
-        iter = acliter(self.request.cfg.acl_rights_valid,
-                       'UserOne,user two:read,write   user three,UserFour:read  All:')
-        mod, entries, rights = iter.next()
-        self.assertEqual(entries, ['UserOne', 'user two'])
-        self.assertEqual(rights, ['read', 'write'])      
-        mod, entries, rights = iter.next()
-        self.assertEqual(entries, ['user three', 'UserFour'])
-        self.assertEqual(rights, ['read'])      
-        mod, entries, rights = iter.next()
-        self.assertEqual(entries, ['All'])
-        self.assertEqual(rights, [])            
-       
-
-class AclTestCase(unittest.TestCase):
-    """wikiacl: testing access control list
-
-    TO DO: test unknown user?
-    """
-    def setUp(self):
-        # Backup user
-        self.config = TestConfig(self.request,
-                                 defaults=['acl_rights_valid', 'acl_rights_before'])
-        self.savedUser = self.request.user.name
-        
-    def tearDown(self):
-        # Restore user
-        self.request.user.name = self.savedUser
-        del self.config
-        
-    def testApplyACLByUser(self):
-        """wikiacl: applying acl by user name"""
-        # This acl string...
-        acl_rights = [
-            "Admin1,Admin2:read,write,delete,revert,admin  "
-            "Admin3:read,write,admin  "
-            "JoeDoe:read,write  "
-            "name with spaces,another one:read,write  "
-            "CamelCase,extended name:read,write  "
-            "BadGuy:  "
-            "All:read  "
-            ]
-        acl = wikiacl.AccessControlList(self.request, acl_rights)
-
-        # Should apply these rights:
-        users = (
-            # user,                 rights
-            # CamelCase names
-            ('Admin1',              ('read', 'write', 'admin', 'revert', 'delete')),
-            ('Admin2',              ('read', 'write', 'admin', 'revert', 'delete')),
-            ('Admin3',              ('read', 'write', 'admin')),
-            ('JoeDoe',              ('read', 'write')),
-            ('SomeGuy',             ('read',)),
-            # Extended names or mix of extended and CamelCase
-            ('name with spaces',    ('read','write',)),
-            ('another one',         ('read','write',)),
-            ('CamelCase',           ('read','write',)),
-            ('extended name',       ('read','write',)),
-            # Blocking bad guys
-            ('BadGuy',              ()),
-            # All other users - every one not mentioned in the acl lines
-            ('All',                 ('read',)),
-            ('Anonymous',           ('read',)),
-            )       
-
-        # Check rights
-        for user, may in users:
-            mayNot = [right for right in self.request.cfg.acl_rights_valid
-                      if right not in may]
-            # User should have these rights...
-            for right in may:
-                self.assert_(acl.may(self.request, user, right),
-                    '"%(user)s" should be allowed to "%(right)s"' % locals())
-            # But NOT these:
-            for right in mayNot:
-                self.failIf(acl.may(self.request, user, right),
-                    '"%(user)s" should NOT be allowed to "%(right)s"' % locals())
-            
--- a/MoinMoin/auth.py	Wed Jun 07 14:50:19 2006 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,523 +0,0 @@
-# -*- coding: iso-8859-1 -*-
-"""
-    MoinMoin - modular authentication code
-
-    Here are some methods moin can use in cfg.auth authentication method list.
-    The methods from that list get called (from request.py) in that sequence.
-    They get request as first argument and also some more kw arguments:
-       name: the value we did get from a POST of the UserPreferences page
-             in the "name" form field (or None)
-       password: the value of the password form field (or None)
-       login: True if user has clicked on Login button
-       logout: True if user has clicked on Logout button
-       user_obj: the user_obj we have until now (user_obj returned from
-                 previous auth method or None for first auth method)
-       (we maybe add some more here)
-
-    Use code like this to get them:
-        name = kw.get('name') or ''
-        password = kw.get('password') or ''
-        login = kw.get('login')
-        logout = kw.get('logout')
-        request.log("got name=%s len(password)=%d login=%r logout=%r" % (name, len(password), login, logout))
-    
-    The called auth method then must return a tuple (user_obj, continue_flag).
-    user_obj can be one of:
-    * a (newly created) User object
-    * None if we want to inhibit log in from previous auth methods
-    * what we got as kw argument user_obj (meaning: no change).
-    continue_flag is a boolean indication whether the auth loop shall continue
-    trying other auth methods (or not).
-
-    The methods give a kw arg "auth_attribs" to User.__init__ that tells
-    which user attribute names are DETERMINED and set by this auth method and
-    must not get changed by the user using the UserPreferences form.
-    It also gives a kw arg "auth_method" that tells the name of the auth
-    method that authentified the user.
-    
-    @copyright: 2005-2006 Bastian Blank, Florian Festi, Thomas Waldmann
-    @copyright: 2005-2006 MoinMoin:AlexanderSchremmer
-    @license: GNU GPL, see COPYING for details.
-"""
-
-import time, Cookie
-from MoinMoin import user
-
-def log(request, **kw):
-    """ just log the call, do nothing else """
-    username = kw.get('name')
-    password = kw.get('password')
-    login = kw.get('login')
-    logout = kw.get('logout')
-    user_obj = kw.get('user_obj')
-    request.log("auth.log: name=%s login=%r logout=%r user_obj=%r" % (username, login, logout, user_obj))
-    return user_obj, True
-
-# some cookie functions used by moin_cookie auth
-def makeCookie(request, moin_id, maxage, expires):
-    """ calculate a MOIN_ID cookie """
-    c = Cookie.SimpleCookie()
-    cfg = request.cfg
-    c['MOIN_ID'] = moin_id
-    c['MOIN_ID']['max-age'] = maxage
-    if cfg.cookie_domain:
-        c['MOIN_ID']['domain'] = cfg.cookie_domain
-    if cfg.cookie_path:
-        c['MOIN_ID']['path'] = cfg.cookie_path
-    else:
-        path = request.getScriptname()
-        if not path:
-            path = '/'
-        c['MOIN_ID']['path'] = path
-    # Set expires for older clients
-    c['MOIN_ID']['expires'] = request.httpDate(when=expires, rfc='850')        
-    return c.output()
-
-def setCookie(request, u):
-    """ Set cookie for the user obj u
-    
-    cfg.cookie_lifetime and the user 'remember_me' setting set the
-    lifetime of the cookie. lifetime in int hours, see table:
-    
-    value   cookie lifetime
-    ----------------------------------------------------------------
-     = 0    forever, ignoring user 'remember_me' setting
-     > 0    n hours, or forever if user checked 'remember_me'
-     < 0    -n hours, ignoring user 'remember_me' setting
-    """
-    # Calculate cookie maxage and expires
-    lifetime = int(request.cfg.cookie_lifetime) * 3600 
-    forever = 10*365*24*3600 # 10 years
-    now = time.time()
-    if not lifetime:
-        maxage = forever
-    elif lifetime > 0:
-        if u.remember_me:
-            maxage = forever
-        else:
-            maxage = lifetime
-    elif lifetime < 0:
-        maxage = (-lifetime)
-    expires = now + maxage
-    
-    cookie = makeCookie(request, u.id, maxage, expires)
-    # Set cookie
-    request.setHttpHeader(cookie)
-    # IMPORTANT: Prevent caching of current page and cookie
-    request.disableHttpCaching()
-
-def deleteCookie(request):
-    """ Delete the user cookie by sending expired cookie with null value
-
-    According to http://www.cse.ohio-state.edu/cgi-bin/rfc/rfc2109.html#sec-4.2.2
-    Deleted cookie should have Max-Age=0. We also have expires
-    attribute, which is probably needed for older browsers.
-
-    Finally, delete the saved cookie and create a new user based on the new settings.
-    """
-    moin_id = ''
-    maxage = 0
-    # Set expires to one year ago for older clients
-    expires = time.time() - (3600 * 24 * 365) # 1 year ago
-    cookie = makeCookie(request, moin_id, maxage, expires) 
-    # Set cookie
-    request.setHttpHeader(cookie)
-    # IMPORTANT: Prevent caching of current page and cookie        
-    request.disableHttpCaching()
-
-def moin_cookie(request, **kw):
-    """ authenticate via the MOIN_ID cookie """
-    username = kw.get('name')
-    password = kw.get('password')
-    login = kw.get('login')
-    logout = kw.get('logout')
-    user_obj = kw.get('user_obj')
-    #request.log("auth.moin_cookie: name=%s login=%r logout=%r user_obj=%r" % (username, login, logout, user_obj))
-    if login:
-        u = user.User(request, name=username, password=password,
-                      auth_method='login_userpassword')
-        if u.valid:
-            setCookie(request, u)
-            return u, True # we make continuing possible, e.g. for smbmount
-        return user_obj, True
-
-    try:
-        cookie = Cookie.SimpleCookie(request.saved_cookie)
-    except Cookie.CookieError:
-        # ignore invalid cookies, else user can't relogin
-        cookie = None
-    if cookie and cookie.has_key('MOIN_ID'):
-        u = user.User(request, id=cookie['MOIN_ID'].value,
-                      auth_method='moin_cookie', auth_attribs=())
-
-        if logout:
-            u.valid = 0 # just make user invalid, but remember him
-
-        if u.valid:
-            setCookie(request, u) # refreshes cookie lifetime
-            return u, True # use True to get other methods called, too
-        else: # logout or invalid user
-            deleteCookie(request)
-            return u, True # we return a invalidated user object, so that
-                           # following auth methods can get the name of
-                           # the user who logged out
-    return user_obj, True
-
-
-def http(request, **kw):
-    """ authenticate via http basic/digest/ntlm auth """
-    from MoinMoin.request import TWISTED, CLI
-    user_obj = kw.get('user_obj')
-    u = None
-    # check if we are running Twisted
-    if isinstance(request, TWISTED.Request):
-        username = request.twistd.getUser()
-        password = request.twistd.getPassword()
-        # when using Twisted http auth, we use username and password from
-        # the moin user profile, so both can be changed by user.
-        u = user.User(request, auth_username=username, password=password,
-                      auth_method='http', auth_attribs=())
-
-    elif not isinstance(request, CLI.Request):
-        env = request.env
-        auth_type = env.get('AUTH_TYPE','')
-        if auth_type in ['Basic', 'Digest', 'NTLM', 'Negotiate',]:
-            username = env.get('REMOTE_USER','')
-            if auth_type in ('NTLM', 'Negotiate',):
-                # converting to standard case so the user can even enter wrong case
-                # (added since windows does not distinguish between e.g.
-                #  "Mike" and "mike")
-                username = username.split('\\')[-1] # split off domain e.g.
-                                                    # from DOMAIN\user
-                # this "normalizes" the login name from {meier, Meier, MEIER} to Meier
-                # put a comment sign in front of next line if you don't want that:
-                username = username.title()
-            # when using http auth, we have external user name and password,
-            # we don't use the moin user profile for those attributes.
-            u = user.User(request, auth_username=username,
-                          auth_method='http', auth_attribs=('name', 'password'))
-
-    if u:
-        u.create_or_update()
-    if u and u.valid:
-        return u, True # True to get other methods called, too
-    else:
-        return user_obj, True
-
-def sslclientcert(request, **kw):
-    """ authenticate via SSL client certificate """
-    from MoinMoin.request import TWISTED
-    user_obj = kw.get('user_obj')
-    u = None
-    changed = False
-    # check if we are running Twisted
-    if isinstance(request, TWISTED.Request):
-        return user_obj, True # not supported if we run twisted
-        # Addendum: this seems to need quite some twisted insight and coding.
-        # A pointer i got on #twisted: divmod's vertex.sslverify
-        # If you really need this, feel free to implement and test it and
-        # submit a patch if it works.
-    else:
-        env = request.env
-        if env.get('SSL_CLIENT_VERIFY', 'FAILURE') == 'SUCCESS':
-            # if we only want to accept some specific CA, do a check like:
-            # if env.get('SSL_CLIENT_I_DN_OU') == "http://www.cacert.org"
-            email = env.get('SSL_CLIENT_S_DN_Email', '')
-            email_lower = email.lower()
-            commonname = env.get('SSL_CLIENT_S_DN_CN', '')
-            commonname_lower = commonname.lower()
-            if email_lower or commonname_lower:
-                for uid in user.getUserList(request):
-                    u = user.User(request, uid,
-                                  auth_method='sslclientcert', auth_attribs=())
-                    if email_lower and u.email.lower() == email_lower:
-                        u.auth_attribs = ('email', 'password')
-                        #this is only useful if same name should be used, as
-                        #commonname is likely no CamelCase WikiName
-                        #if commonname_lower != u.name.lower():
-                        #    u.name = commonname
-                        #    changed = True
-                        #u.auth_attribs = ('email', 'name', 'password')
-                        break
-                    if commonname_lower and u.name.lower() == commonname_lower:
-                        u.auth_attribs = ('name', 'password')
-                        #this is only useful if same email should be used as
-                        #specified in certificate.
-                        #if email_lower != u.email.lower():
-                        #    u.email = email
-                        #    changed = True
-                        #u.auth_attribs = ('name', 'email', 'password')
-                        break
-                else:
-                    u = None
-                if u is None:
-                    # user wasn't found, so let's create a new user object
-                    u = user.User(request, name=commonname_lower, auth_username=commonname_lower)
-
-    if u:
-        u.create_or_update(changed)
-    if u and u.valid:
-        return u, True
-    else:
-        return user_obj, True
-
-
-def smb_mount(request, **kw):
-    """ (u)mount a SMB server's share for username (using username/password for
-        authentication at the SMB server). This can be used if you need access
-        to files on some share via the wiki, but needs more code to be useful.
-        If you don't need it, don't use it.
-    """
-    username = kw.get('name')
-    password = kw.get('password')
-    login = kw.get('login')
-    logout = kw.get('logout')
-    user_obj = kw.get('user_obj')
-    cfg = request.cfg
-    verbose = cfg.smb_verbose
-    if verbose: request.log("got name=%s login=%r logout=%r" % (username, login, logout))
-    
-    # we just intercept login to mount and logout to umount the smb share
-    if login or logout:
-        import os, pwd, subprocess
-        web_username = cfg.smb_dir_user
-        web_uid = pwd.getpwnam(web_username)[2] # XXX better just use current uid?
-        if logout and user_obj: # logout -> we don't have username in form
-            username = user_obj.name # so we take it from previous auth method (moin_cookie e.g.)
-        mountpoint = cfg.smb_mountpoint % {
-            'username': username,
-        }
-        if login:
-            cmd = u"sudo mount -t cifs -o user=%(user)s,domain=%(domain)s,uid=%(uid)d,dir_mode=%(dir_mode)s,file_mode=%(file_mode)s,iocharset=%(iocharset)s //%(server)s/%(share)s %(mountpoint)s >>%(log)s 2>&1"
-        elif logout:
-            cmd = u"sudo umount %(mountpoint)s >>%(log)s 2>&1"
-            
-        cmd = cmd % {
-            'user': username,
-            'uid': web_uid,
-            'domain': cfg.smb_domain,
-            'server': cfg.smb_server,
-            'share': cfg.smb_share,
-            'mountpoint': mountpoint,
-            'dir_mode': cfg.smb_dir_mode,
-            'file_mode': cfg.smb_file_mode,
-            'iocharset': cfg.smb_iocharset,
-            'log': cfg.smb_log,
-        }
-        env = os.environ.copy()
-        if login:
-            try:
-                os.makedirs(mountpoint) # the dir containing the mountpoint must be writeable for us!
-            except OSError, err:
-                pass
-            env['PASSWD'] = password.encode(cfg.smb_coding)
-        subprocess.call(cmd.encode(cfg.smb_coding), env=env, shell=True)
-    return user_obj, True
-
-
-def ldap_login(request, **kw):
-    """ get authentication data from form, authenticate against LDAP (or Active Directory),
-        fetch some user infos from LDAP and create a user profile for that user that must
-        be used by subsequent auth plugins (like moin_cookie) as we never return a user
-        object from ldap_login.
-    """
-    username = kw.get('name')
-    password = kw.get('password')
-    login = kw.get('login')
-    logout = kw.get('logout')
-    user_obj = kw.get('user_obj')
-
-    cfg = request.cfg
-    verbose = cfg.ldap_verbose
-    
-    if verbose: request.log("got name=%s login=%r logout=%r" % (username, login, logout))
-    
-    # we just intercept login and logout for ldap, other requests have to be
-    # handled by another auth handler
-    if not login and not logout:
-        return user_obj, True
-    
-    import sys, re
-    import ldap
-    import traceback
-
-    u = None
-    coding = cfg.ldap_coding
-    try:
-        if verbose: request.log("LDAP: Trying to initialize %s." % cfg.ldap_uri)
-        l = ldap.initialize(cfg.ldap_uri)
-        if verbose: request.log("LDAP: Connected to LDAP server %s." % cfg.ldap_uri)
-        # you can use %(username)s and %(password)s here to get the stuff entered in the form:
-        ldap_binddn = cfg.ldap_binddn % locals()
-        ldap_bindpw = cfg.ldap_bindpw % locals()
-        l.simple_bind_s(ldap_binddn.encode(coding), ldap_bindpw.encode(coding))
-        if verbose: request.log("LDAP: Bound with binddn %s" % ldap_binddn)
-
-        filterstr = "(%s=%s)" % (cfg.ldap_name_attribute, username)
-        if verbose: request.log("LDAP: Searching %s" % filterstr)
-        lusers = l.search_st(cfg.ldap_base, cfg.ldap_scope,
-                             filterstr.encode(coding), timeout=cfg.ldap_timeout)
-        result_length = len(lusers)
-        if result_length != 1:
-            if result_length > 1:
-                request.log("LDAP: Search found more than one (%d) matches for %s." % (len(lusers), filterstr))
-            if result_length == 0:
-                if verbose: request.log("LDAP: Search found no matches for %s." % (filterstr, ))
-            return user_obj, True
-
-        dn, ldap_dict = lusers[0]
-        if verbose:
-            request.log("LDAP: debug lusers = %r" % lusers)
-            for key,val in ldap_dict.items():
-                request.log("LDAP: %s: %s" % (key, val))
-
-        try:
-            if verbose: request.log("LDAP: DN found is %s, trying to bind with pw" % dn)
-            l.simple_bind_s(dn, password.encode(coding))
-            if verbose: request.log("LDAP: Bound with dn %s (username: %s)" % (dn, username))
-            
-            email = ldap_dict.get(cfg.ldap_email_attribute, [''])[0]
-            email = email.decode(coding)
-            sn, gn = ldap_dict.get('sn', [''])[0], ldap_dict.get('givenName', [''])[0]
-            aliasname = ''
-            if sn and gn:
-                aliasname = "%s, %s" % (sn, gn)
-            elif sn:
-                aliasname = sn
-            aliasname = aliasname.decode(coding)
-            
-            u = user.User(request, auth_username=username, password=password, auth_method='ldap', auth_attribs=('name', 'password', 'email', 'mailto_author',))
-            u.name = username
-            u.aliasname = aliasname
-            u.email = email
-            u.remember_me = 0 # 0 enforces cookie_lifetime config param
-            if verbose: request.log("LDAP: creating userprefs with name %s email %s alias %s" % (username, email, aliasname))
-            
-        except ldap.INVALID_CREDENTIALS, err:
-            request.log("LDAP: invalid credentials (wrong password?) for dn %s (username: %s)" % (dn, username))
-
-    except:
-        info = sys.exc_info()
-        request.log("LDAP: caught an exception, traceback follows...")
-        request.log(''.join(traceback.format_exception(*info)))
-
-    if u:
-        u.create_or_update(True)
-    return user_obj, True # moin_cookie has to set the cookie and return the user obj
-
-
-def interwiki(request, **kw):
-    # TODO use auth_method and auth_attribs for User object
-    username = kw.get('name')
-    password = kw.get('password')
-    login = kw.get('login')
-    logout = kw.get('logout')
-    user_obj = kw.get('user_obj')
-
-    if login:
-        wikitag, wikiurl, wikitail, err = wikiutil.resolve_wiki(username)
-
-        if err or wikitag not in request.cfg.trusted_wikis:
-            return user_obj, True
-        
-        if password:
-            import xmlrpclib
-            homewiki = xmlrpclib.Server(wikiurl + "?action=xmlrpc2")
-            account_data = homewiki.getUser(wikitail, password)
-            if isinstance(account_data, str):
-                # show error message
-                return user_obj, True
-            
-            u = user.User(request, name=username)
-            for key, value in account_data.iteritems():
-                if key not in ["may", "id", "valid", "trusted"
-                               "auth_username",
-                               "name", "aliasname",
-                               "enc_passwd"]:
-                    setattr(u, key, value)
-            u.save()
-            setCookie(request, u)
-            return u, True
-        else:
-            pass
-            # XXX redirect to homewiki
-    
-    return user_obj, True
-
-
-class php_session:
-    """ Authentication module for PHP based frameworks
-        Authenticates via PHP session cookie. Currently supported systems:
-
-        * eGroupware 1.2 ("egw")
-         * You need to configure eGroupware in the "header setup" to use
-           "php sessions plus restore"
-
-        @copyright: 2005 by MoinMoin:AlexanderSchremmer
-            - Thanks to Spreadshirt
-    """
-
-    def __init__(self, apps=['egw'], s_path="/tmp", s_prefix="sess_"):
-        """ @param apps: A list of the enabled applications. See above for
-            possible keys.
-            @param s_path: The path where the PHP sessions are stored.
-            @param s_prefix: The prefix of the session files.
-        """
-        
-        self.s_path = s_path
-        self.s_prefix = s_prefix
-        self.apps = apps
-
-    def __call__(self, request, **kw):
-        def handle_egroupware(session):
-            """ Extracts name, fullname and email from the session. """
-            username = session['egw_session']['session_lid'].split("@", 1)[0]
-            known_accounts = session['egw_info_cache']['accounts']['cache']['account_data']
-            
-            # if the next line breaks, then the cache was not filled with the current
-            # user information
-            user_info = [value for key, value in known_accounts.items()
-                         if value['account_lid'] == username][0]
-            name = user_info.get('fullname', '')
-            email = user_info.get('email', '')
-            
-            dec = lambda x: x and x.decode("iso-8859-1")
-            
-            return dec(username), dec(email), dec(name)
-        
-        import Cookie, urllib
-        from MoinMoin.user import User
-        from MoinMoin.util import sessionParser
-    
-        user_obj = kw.get('user_obj')
-        try:
-            cookie = Cookie.SimpleCookie(request.saved_cookie)
-        except Cookie.CookieError: # ignore invalid cookies
-            cookie = None
-        if cookie:
-            for cookiename in cookie.keys():
-                cookievalue = urllib.unquote(cookie[cookiename].value).decode('iso-8859-1')
-                session = sessionParser.loadSession(cookievalue, path=self.s_path, prefix=self.s_prefix)
-                if session:
-                    if "egw" in self.apps and session.get('egw_session', None):
-                        username, email, name = handle_egroupware(session)
-                        break
-            else:
-                return user_obj, True
-            
-            user = User(request, name=username, auth_username=username)
-            
-            changed = False
-            if name != user.aliasname:
-                user.aliasname = name
-                changed = True
-            if email != user.email:
-                user.email = email
-                changed = True
-            
-            if user:
-                user.create_or_update(changed)
-            if user and user.valid:
-                return user, True # True to get other methods called, too
-        return user_obj, True # continue with next method in auth list
-
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/auth/_PHPsessionParser.py	Sat Jun 10 16:45:05 2006 +0200
@@ -0,0 +1,143 @@
+"""
+    MoinMoin - Parsing of PHP session files
+
+    @copyright: 2005 by MoinMoin:AlexanderSchremmer
+        - Thanks to Spreadshirt
+    @license: GNU GPL, see COPYING for details.
+"""
+
+#Known minor bugs/questions/ideas:
+#How does object demarshalling work?
+#The order of the python dictionaries is not stable compared to the PHP arrays
+#The loader does not check the owner of the files, so be aware of faked session
+#files.
+
+import os
+from MoinMoin import wikiutil
+
+s_prefix = "sess_"
+s_path = "/tmp"
+
+class UnknownObject(object):
+    """ Used in the return value if the input data could not be parsed. """
+    def __init__(self, pos):
+        self.pos = pos
+
+    def __repr__(self):
+        return "<Unknown object at pos %i>" % self.pos
+
+def transformList(items):
+    """ Transforms a list [1, 2, 3, 4, ...] into a
+        [(1, 2), (3, 4), ...] generator. """
+    for i in xrange(0, len(items), 2):
+        yield (items[i], items[i+1])
+    raise StopIteration
+
+def parseValue(string, start=0):
+    """ Parses the inner structure. """
+    #print "Parsing %r" % (string[start:], )
+
+    val_type = string[start]
+    header_end = string.find(':', 3+start)
+    if header_end != -1:
+        first_data = string[start+2:header_end]
+    else:
+        first_data = None
+    
+    #print "Saw type %r, first_data is %r." % (val_type, first_data)
+    if val_type == 'a': # array (in Python rather a mixture of a list and a dict)
+        i = 0
+        items = []
+        
+        current_pos = header_end+2
+        data = string
+        while i != (int(first_data) * 2):
+            item, current_pos = parseValue(data, current_pos)
+            items.append(item)
+            i += 1
+            current_pos += 1
+        
+        t_list = list(transformList(items))
+        try:
+            result = dict(t_list) # note that dict does not retain the order
+        except TypeError:
+            result = list(t_list)
+            #print "Warning, could not convert to dict: %r" %  (result, )
+        return result, current_pos
+    
+    if val_type == 's': # string
+        current_pos = header_end+2
+        end = current_pos + int(first_data)
+        data = string[current_pos:end]
+        current_pos = end+1
+        if data.startswith("a:"): #Sometimes, arrays are marshalled as strings.
+            try:
+                data = parseValue(data, 0)[0]
+            except ValueError: #Hmm, wrongly guessed. Just an ordinary string
+                pass
+        return data, current_pos
+
+    if val_type in ('i', 'b'): # integer or boolean
+        current_pos = start+2
+        str_buffer = ""
+        while current_pos != len(string):
+            cur_char = string[current_pos]
+            if cur_char.isdigit() or cur_char == "-":
+                str_buffer += cur_char
+            else:
+                cast = (val_type == 'i') and int or (lambda x: bool(int(x)))
+                return cast(str_buffer), current_pos
+            current_pos += 1
+
+    if val_type == "N": # Null, called None in Python
+        return None, start+1
+        
+    return UnknownObject(start), start+1
+
+def parseSession(boxed):
+    """ Parses the outer structure that is similar to a dict. """
+    current_pos = 0
+    session_dict = {}
+    while current_pos < len(boxed):
+        name_end = boxed.find("|", current_pos)
+        name = boxed[current_pos:name_end]
+        current_pos = name_end+1
+        data, current_pos = parseValue(boxed, current_pos)
+        current_pos += 1
+        session_dict[name] = data
+
+    return session_dict
+
+def loadSession(key, path=s_path, prefix=s_prefix):
+    """ Loads a particular session from the directory. The key needs to be the
+        session id. """
+    key = key.lower()
+    filename = os.path.join(path, prefix + wikiutil.taintfilename(key))
+
+    try:
+        f = open(filename, "rb")
+    except IOError, e:
+        if e.errno == 2:
+            return None # session does not exist
+        else:
+            raise
+
+    blob = f.read()
+    f.close()
+    return parseSession(blob)
+
+def listSessions(path=s_path, prefix=s_prefix):
+    """ Lists all sessions in a particular directory. """
+    return [os.path.basename(x).replace(s_prefix, '') for x in os.listdir(s_path)
+            if x.startswith(s_prefix)]
+
+if __name__ == '__main__':
+    # testing code
+    import time
+    a=time.clock()
+    
+    #print s
+    p_s = loadSession("...")
+    import pprint; pprint.pprint(p_s)
+    print time.clock() - a
+    print listSessions()
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/auth/__init__.py	Sat Jun 10 16:45:05 2006 +0200
@@ -0,0 +1,264 @@
+# -*- coding: iso-8859-1 -*-
+"""
+    MoinMoin - modular authentication code
+
+    Here are some methods moin can use in cfg.auth authentication method list.
+    The methods from that list get called (from request.py) in that sequence.
+    They get request as first argument and also some more kw arguments:
+       name: the value we did get from a POST of the UserPreferences page
+             in the "name" form field (or None)
+       password: the value of the password form field (or None)
+       login: True if user has clicked on Login button
+       logout: True if user has clicked on Logout button
+       user_obj: the user_obj we have until now (user_obj returned from
+                 previous auth method or None for first auth method)
+       (we maybe add some more here)
+
+    Use code like this to get them:
+        name = kw.get('name') or ''
+        password = kw.get('password') or ''
+        login = kw.get('login')
+        logout = kw.get('logout')
+        request.log("got name=%s len(password)=%d login=%r logout=%r" % (name, len(password), login, logout))
+    
+    The called auth method then must return a tuple (user_obj, continue_flag).
+    user_obj can be one of:
+    * a (newly created) User object
+    * None if we want to inhibit log in from previous auth methods
+    * what we got as kw argument user_obj (meaning: no change).
+    continue_flag is a boolean indication whether the auth loop shall continue
+    trying other auth methods (or not).
+
+    The methods give a kw arg "auth_attribs" to User.__init__ that tells
+    which user attribute names are DETERMINED and set by this auth method and
+    must not get changed by the user using the UserPreferences form.
+    It also gives a kw arg "auth_method" that tells the name of the auth
+    method that authentified the user.
+
+    TODO: check against other cookie work (see wiki)  
+          reduce amount of XXX
+          
+    @copyright: 2005-2006 Bastian Blank, Florian Festi, MoinMoin:ThomasWaldmann,
+                          MoinMoin:AlexanderSchremmer, Nick Phillips,
+                          MoinMoin:FrankieChow, MoinMoin:NirSoffer
+    @license: GNU GPL, see COPYING for details.
+"""
+
+import time, Cookie
+from MoinMoin import user
+
+# cookie names
+MOIN_SESSION = 'MOIN_SESSION'
+
+import hmac, random
+
+def generate_security_string(length):
+    """ generate a random length (length/2 .. length) string with random content """
+    random_length = random.randint(length/2, length)
+    safe = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_-'
+    return ''.join([random.choice(safe) for i in range(random_length)])
+
+def make_security_hash(request, data, securitystring=''):
+    """ generate a hash string based on site configuration's cfg.cookie_secret,
+        securitystring and the data.
+    """
+    return hmac.new(request.cfg.cookie_secret + securitystring, data).hexdigest()
+
+def makeCookie(request, cookie_name, cookie_string, maxage, expires):
+    """ create an appropriate cookie """
+    c = Cookie.SimpleCookie()
+    cfg = request.cfg
+    c[cookie_name] = cookie_string
+    c[cookie_name]['max-age'] = maxage
+    if cfg.cookie_domain:
+        c[cookie_name]['domain'] = cfg.cookie_domain
+    if cfg.cookie_path:
+        c[cookie_name]['path'] = cfg.cookie_path
+    else:
+        path = request.getScriptname()
+        if not path:
+            path = '/'
+        c[cookie_name]['path'] = path
+    # Set expires for older clients
+    c[cookie_name]['expires'] = request.httpDate(when=expires, rfc='850')        
+    return c.output()
+
+def setCookie(request, u, cookie_name, cookie_string):
+    """ Set cookie for the user obj u
+    
+    cfg.cookie_lifetime and the user 'remember_me' setting set the
+    lifetime of the cookie. lifetime in int hours, see table:
+    
+    value   cookie lifetime
+    ----------------------------------------------------------------
+     = 0    forever, ignoring user 'remember_me' setting
+     > 0    n hours, or forever if user checked 'remember_me'
+     < 0    -n hours, ignoring user 'remember_me' setting
+    """
+    # Calculate cookie maxage and expires
+    lifetime = int(request.cfg.cookie_lifetime) * 3600 
+    forever = 10 * 365 * 24 * 3600 # 10 years
+    now = time.time()
+    if not lifetime:
+        maxage = forever
+    elif lifetime > 0:
+        if u.remember_me:
+            maxage = forever
+        else:
+            maxage = lifetime
+    elif lifetime < 0:
+        maxage = (-lifetime)
+    expires = now + maxage
+    
+    cookie = makeCookie(request, cookie_name, cookie_string, maxage, expires)
+    # Set cookie
+    request.setHttpHeader(cookie)
+    # IMPORTANT: Prevent caching of current page and cookie
+    request.disableHttpCaching()
+
+def setSessionCookie(request, u):
+    """ Set moin_session cookie for user obj u """
+    import base64
+    cfg = request.cfg
+    enc_username = base64.encodestring(u.auth_username)
+    enc_id = base64.encodestring(u.id)
+    # XXX - should include expiry!
+    cookie_body = "username=%s:id=%s" % (enc_username, enc_id)
+    cookie_hash = make_security_hash(request, cookie_body)
+    cookie_string = ':'.join([cookie_hash, cookie_body])
+    setCookie(request, u, MOIN_SESSION, cookie_string)
+
+def deleteCookie(request, cookie_name):
+    """ Delete the user cookie by sending expired cookie with null value
+
+    According to http://www.cse.ohio-state.edu/cgi-bin/rfc/rfc2109.html#sec-4.2.2
+    Deleted cookie should have Max-Age=0. We also have expires attribute,
+    which is probably needed for older browsers.
+
+    Finally, delete the saved cookie and create a new user based on the new settings.
+    """
+    cookie_string = ''
+    maxage = 0
+    # Set expires to one year ago for older clients
+    expires = time.time() - (3600 * 24 * 365) # 1 year ago
+    cookie = makeCookie(request, cookie_name, cookie_string, maxage, expires) 
+    # Set cookie
+    request.setHttpHeader(cookie)
+    # IMPORTANT: Prevent caching of current page and cookie        
+    request.disableHttpCaching()
+
+def moin_login(request, **kw):
+    """ handle login from moin login form, session has to be established later by moin_session """
+    username = kw.get('name')
+    password = kw.get('password')
+    login = kw.get('login')
+    #logout = kw.get('logout')
+    user_obj = kw.get('user_obj')
+
+    cfg = request.cfg
+    verbose = False
+    if hasattr(cfg, 'moin_login_verbose'):
+        verbose = cfg.moin_login_verbose
+    
+    #request.log("auth.moin_login: name=%s login=%r logout=%r user_obj=%r" % (username, login, logout, user_obj))
+
+    if login:
+        if verbose: request.log("moin_login performing login action")
+        u = user.User(request, name=username, password=password, auth_method='moin_login')
+        if u.valid:
+            if verbose: request.log("moin_login got valid user...")
+            user_obj = u
+        else:
+            if verbose: request.log("moin_login not valid, previous valid=%d." % user_obj.valid)
+
+    return user_obj, True
+
+def moin_session(request, **kw):
+    """ Authenticate via cookie.
+    
+    We don't handle initial logins (except to set the appropriate cookie), just
+    ongoing sessions, and logout. Use another method for initial login.
+    """
+    import base64
+    
+    username = kw.get('name')
+    login = kw.get('login')
+    logout = kw.get('logout')
+    user_obj = kw.get('user_obj')
+
+    cfg = request.cfg
+    verbose = False
+    if hasattr(cfg, 'moin_session_verbose'):
+        verbose = cfg.moin_session_verbose
+
+    cookie_name = MOIN_SESSION
+    
+    if verbose: request.log("auth.moin_session: name=%s login=%r logout=%r user_obj=%r" % (username, login, logout, user_obj))
+
+    if login:
+        if verbose: request.log("moin_session performing login action")
+
+        # Has any other method successfully authenticated?
+        if user_obj is not None and user_obj.valid:
+            # Yes - set up session cookie
+            if verbose: request.log("moin_session got valid user from previous auth method, setting cookie...")
+            if verbose: request.log("moin_session got auth_username %s." % user_obj.auth_username)
+            setSessionCookie(request, user_obj)
+            return user_obj, True # we make continuing possible, e.g. for smbmount
+        else:
+            # No other method succeeded, so allow continuation...
+            # XXX Cookie clear here???
+            if verbose: request.log("moin_session did not get valid user from previous auth method, doing nothing")
+            return user_obj, True
+
+    try:
+        if verbose: request.log("trying to get cookie...")
+        cookie = Cookie.SimpleCookie(request.saved_cookie)
+    except Cookie.CookieError:
+        # ignore invalid cookies, else user can't relogin
+        if verbose: request.log("caught Cookie.CookieError")
+        cookie = None
+
+    if not (cookie is not None and cookie.has_key(cookie_name)):
+        # No valid cookie
+        if verbose: request.log("either no cookie or no %s key" % cookie_name)
+        return user_obj, True
+    
+    try:
+        cookie_hash, cookie_body = cookie[cookie_name].value.split(':', 1)
+    except ValueError:
+        # Invalid cookie
+        if verbose: request.log("invalid cookie format: (%s)" % cookie[cookie_name].value)
+        return user_obj, True
+    
+    if cookie_hash != make_security_hash(request, cookie_body):
+        # Invalid cookie
+        # XXX Cookie clear here???
+        if verbose: request.log("cookie recovered had invalid hash")
+        return user_obj, True
+
+    # We can trust the cookie
+    if verbose: request.log("Cookie OK, authenticated.")
+    params = { 'username': '', 'id': '' }
+    cookie_pairs = cookie_body.split(":")
+    for key, value in [pair.split("=", 1) for pair in cookie_pairs]:
+        params[key] = base64.decodestring(value) # assuming all values are base64 encoded
+    # XXX Should check expiry from cookie
+    # XXX Should name be in auth_attribs?
+    u = user.User(request,
+                  id=params['id'],
+                  auth_username=params['username'],
+                  auth_method='moin_session',
+                  auth_attribs=(),
+                  )
+        
+    if logout:
+        if verbose: request.log("Logout requested, setting u invalid and 'deleting' cookie")
+        u.valid = 0 # just make user invalid, but remember him
+        deleteCookie(request, cookie_name)
+        return u, True # we return a invalidated user object, so that
+                       # following auth methods can get the name of
+                       # the user who logged out
+    setSessionCookie(request, u) # refreshes cookie lifetime
+    return u, True # use True to get other methods called, too
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/auth/http.py	Sat Jun 10 16:45:05 2006 +0200
@@ -0,0 +1,53 @@
+# -*- coding: iso-8859-1 -*-
+"""
+    MoinMoin - http authentication
+
+    You need either your webserver configured for doing HTTP auth (like Apache
+    reading some .htpasswd file) or Twisted (will accept HTTP auth against
+    password stored in moin user profile, but currently will NOT ask for auth).
+
+    @copyright: 2006 by MoinMoin:ThomasWaldmann
+    @license: GNU GPL, see COPYING for details.
+"""
+from MoinMoin import user
+from MoinMoin.request import TWISTED, CLI
+
+def http(request, **kw):
+    """ authenticate via http basic/digest/ntlm auth """
+    user_obj = kw.get('user_obj')
+    u = None
+    # check if we are running Twisted
+    if isinstance(request, TWISTED.Request):
+        username = request.twistd.getUser()
+        password = request.twistd.getPassword()
+        # when using Twisted http auth, we use username and password from
+        # the moin user profile, so both can be changed by user.
+        u = user.User(request, auth_username=username, password=password,
+                      auth_method='http', auth_attribs=())
+
+    elif not isinstance(request, CLI.Request):
+        env = request.env
+        auth_type = env.get('AUTH_TYPE','')
+        if auth_type in ['Basic', 'Digest', 'NTLM', 'Negotiate',]:
+            username = env.get('REMOTE_USER','')
+            if auth_type in ('NTLM', 'Negotiate',):
+                # converting to standard case so the user can even enter wrong case
+                # (added since windows does not distinguish between e.g.
+                #  "Mike" and "mike")
+                username = username.split('\\')[-1] # split off domain e.g.
+                                                    # from DOMAIN\user
+                # this "normalizes" the login name from {meier, Meier, MEIER} to Meier
+                # put a comment sign in front of next line if you don't want that:
+                username = username.title()
+            # when using http auth, we have external user name and password,
+            # we don't use the moin user profile for those attributes.
+            u = user.User(request, auth_username=username,
+                          auth_method='http', auth_attribs=('name', 'password'))
+
+    if u:
+        u.create_or_update()
+    if u and u.valid:
+        return u, True # True to get other methods called, too
+    else:
+        return user_obj, True
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/auth/interwiki.py	Sat Jun 10 16:45:05 2006 +0200
@@ -0,0 +1,50 @@
+# -*- coding: iso-8859-1 -*-
+"""
+    MoinMoin - authentication using a remote wiki
+
+    This is completely untested and rather has to be seen as an idea
+    than a working implementation.
+
+    @copyright: 2005 by ???
+    @license: GNU GPL, see COPYING for details.
+"""
+import xmlrpclib
+from MoinMoin import auth, wikiutil, user
+
+def interwiki(request, **kw):
+    # TODO use auth_method and auth_attribs for User object
+    username = kw.get('name')
+    password = kw.get('password')
+    login = kw.get('login')
+    logout = kw.get('logout')
+    user_obj = kw.get('user_obj')
+
+    if login:
+        wikitag, wikiurl, wikitail, err = wikiutil.resolve_wiki(username)
+
+        if err or wikitag not in request.cfg.trusted_wikis:
+            return user_obj, True
+        
+        if password:
+            homewiki = xmlrpclib.Server(wikiurl + "?action=xmlrpc2")
+            account_data = homewiki.getUser(wikitail, password)
+            if isinstance(account_data, str):
+                # show error message
+                return user_obj, True
+            
+            u = user.User(request, name=username)
+            for key, value in account_data.iteritems():
+                if key not in ["may", "id", "valid", "trusted"
+                               "auth_username",
+                               "name", "aliasname",
+                               "enc_passwd"]:
+                    setattr(u, key, value)
+            u.save()
+            auth.setSessionCookie(request, u)
+            return u, True
+        else:
+            pass
+            # XXX redirect to homewiki
+    
+    return user_obj, True
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/auth/ldap_login.py	Sat Jun 10 16:45:05 2006 +0200
@@ -0,0 +1,101 @@
+# -*- coding: iso-8859-1 -*-
+"""
+    MoinMoin - LDAP / Active Directory authentication
+
+    This code only creates a user object, the session has to be established by
+    the auth.moin_session auth plugin.
+    
+    @copyright: 2006 by MoinMoin:ThomasWaldmann, Nick Phillips
+    @license: GNU GPL, see COPYING for details.
+"""
+import sys, re
+import traceback
+import ldap
+from MoinMoin import user
+
+def ldap_login(request, **kw):
+    """ get authentication data from form, authenticate against LDAP (or Active Directory),
+        fetch some user infos from LDAP and create a user profile for that user that must
+        be used by subsequent auth plugins (like moin_cookie) as we never return a user
+        object from ldap_login.
+    """
+    username = kw.get('name')
+    password = kw.get('password')
+    login = kw.get('login')
+    logout = kw.get('logout')
+    user_obj = kw.get('user_obj')
+
+    cfg = request.cfg
+    verbose = cfg.ldap_verbose
+    
+    if verbose: request.log("got name=%s login=%r logout=%r" % (username, login, logout))
+    
+    # we just intercept login and logout for ldap, other requests have to be
+    # handled by another auth handler
+    if not login and not logout:
+        return user_obj, True
+    
+    u = None
+    coding = cfg.ldap_coding
+    try:
+        if verbose: request.log("LDAP: Trying to initialize %s." % cfg.ldap_uri)
+        l = ldap.initialize(cfg.ldap_uri)
+        if verbose: request.log("LDAP: Connected to LDAP server %s." % cfg.ldap_uri)
+        # you can use %(username)s and %(password)s here to get the stuff entered in the form:
+        ldap_binddn = cfg.ldap_binddn % locals()
+        ldap_bindpw = cfg.ldap_bindpw % locals()
+        l.simple_bind_s(ldap_binddn.encode(coding), ldap_bindpw.encode(coding))
+        if verbose: request.log("LDAP: Bound with binddn %s" % ldap_binddn)
+
+        filterstr = "(%s=%s)" % (cfg.ldap_name_attribute, username)
+        if verbose: request.log("LDAP: Searching %s" % filterstr)
+        lusers = l.search_st(cfg.ldap_base, cfg.ldap_scope,
+                             filterstr.encode(coding), timeout=cfg.ldap_timeout)
+        result_length = len(lusers)
+        if result_length != 1:
+            if result_length > 1:
+                request.log("LDAP: Search found more than one (%d) matches for %s." % (len(lusers), filterstr))
+            if result_length == 0:
+                if verbose: request.log("LDAP: Search found no matches for %s." % (filterstr, ))
+            return user_obj, True
+
+        dn, ldap_dict = lusers[0]
+        if verbose:
+            request.log("LDAP: debug lusers = %r" % lusers)
+            for key,val in ldap_dict.items():
+                request.log("LDAP: %s: %s" % (key, val))
+
+        try:
+            if verbose: request.log("LDAP: DN found is %s, trying to bind with pw" % dn)
+            l.simple_bind_s(dn, password.encode(coding))
+            if verbose: request.log("LDAP: Bound with dn %s (username: %s)" % (dn, username))
+            
+            email = ldap_dict.get(cfg.ldap_email_attribute, [''])[0]
+            email = email.decode(coding)
+            sn, gn = ldap_dict.get('sn', [''])[0], ldap_dict.get('givenName', [''])[0]
+            aliasname = ''
+            if sn and gn:
+                aliasname = "%s, %s" % (sn, gn)
+            elif sn:
+                aliasname = sn
+            aliasname = aliasname.decode(coding)
+            
+            u = user.User(request, auth_username=username, password="{SHA}NotStored", auth_method='ldap', auth_attribs=('name', 'password', 'email', 'mailto_author',))
+            u.name = username
+            u.aliasname = aliasname
+            u.email = email
+            u.remember_me = 0 # 0 enforces cookie_lifetime config param
+            if verbose: request.log("LDAP: creating userprefs with name %s email %s alias %s" % (username, email, aliasname))
+            
+        except ldap.INVALID_CREDENTIALS, err:
+            request.log("LDAP: invalid credentials (wrong password?) for dn %s (username: %s)" % (dn, username))
+
+    except:
+        info = sys.exc_info()
+        request.log("LDAP: caught an exception, traceback follows...")
+        request.log(''.join(traceback.format_exception(*info)))
+
+    if u:
+        u.create_or_update(True)
+    return u, True # moin_session has to set the cookie
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/auth/log.py	Sat Jun 10 16:45:05 2006 +0200
@@ -0,0 +1,21 @@
+# -*- coding: iso-8859-1 -*-
+"""
+    MoinMoin - logging auth plugin
+
+    This does nothing except logging the auth parameters (the password is NOT
+    logged, of course).
+
+    @copyright: 2006 by MoinMoin:ThomasWaldmann
+    @license: GNU GPL, see COPYING for details.
+"""
+
+def log(request, **kw):
+    """ just log the call, do nothing else """
+    username = kw.get('name')
+    password = kw.get('password')
+    login = kw.get('login')
+    logout = kw.get('logout')
+    user_obj = kw.get('user_obj')
+    request.log("auth.log: name=%s login=%r logout=%r user_obj=%r" % (username, login, logout, user_obj))
+    return user_obj, True
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/auth/mysql_group.py	Sat Jun 10 16:45:05 2006 +0200
@@ -0,0 +1,71 @@
+# -*- coding: iso-8859-1 -*-
+"""
+    MoinMoin - auth plugin doing a check against MySQL group db
+
+    ...
+
+    @copyright: 2006 by Nick Phillips
+    @license: GNU GPL, see COPYING for details.
+"""
+
+import MySQLdb
+
+def mysql_group(request, **kw):
+    """ Authorize via MySQL group DB.
+    
+    We require an already-authenticated user_obj.
+    We don't worry about the type of request (login, logout, neither).
+    We just check user is part of authorized group.
+    """
+    
+    username = kw.get('name')
+#    login = kw.get('login')
+#    logout = kw.get('logout')
+    user_obj = kw.get('user_obj')
+
+    cfg = request.cfg
+    verbose = False
+
+    if hasattr(cfg, 'mysql_group_verbose'):
+        verbose = cfg.mysql_group_verbose
+    
+    if verbose: request.log("auth.mysql_group: name=%s user_obj=%r" % (username, user_obj))
+
+    # Has any other method successfully authenticated?
+    if user_obj is not None and user_obj.valid:
+        # Yes - we can do stuff!
+        if verbose: request.log("mysql_group got valid user from previous auth method, trying authz...")
+        if verbose: request.log("mysql_group got auth_username %s." % user_obj.auth_username)
+
+        # XXX Check auth_username for dodgy chars (should be none as it is authenticated, but...)
+
+        # OK, now check mysql!
+        try:
+            m = MySQLdb.connect(host=cfg.mysql_group_dbhost,
+                                user=cfg.mysql_group_dbuser,
+                                passwd=cfg.mysql_group_dbpass,
+                                db=cfg.mysql_group_dbname,
+                                )
+        except:
+            import sys
+            import traceback
+            info = sys.exc_info()
+            request.log("mysql_group: authorization failed due to exception connecting to DB, traceback follows...")
+            request.log(''.join(traceback.format_exception(*info)))
+            return None, False
+        
+        c = m.cursor()
+        c.execute(cfg.mysql_group_query, user_obj.auth_username)
+        results = c.fetchall()
+        if results:
+            # Checked out OK
+            if verbose: request.log("mysql_group got %d results -- authorized!" % len(results))
+            return user_obj, True # we make continuing possible, e.g. for smbmount
+        else:
+            if verbose: request.log("mysql_group did not get match from DB -- not authorized")
+            return None, False
+    else:
+        # No other method succeeded, so we cannot authorize -- must fail
+        if verbose: request.log("mysql_group did not get valid user from previous auth method, cannot authorize")
+        return None, False
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/auth/php_session.py	Sat Jun 10 16:45:05 2006 +0200
@@ -0,0 +1,80 @@
+# -*- coding: iso-8859-1 -*-
+"""
+    MoinMoin - PHP session cookie authentication
+    
+    Currently supported systems:
+
+        * eGroupware 1.2 ("egw")
+         * You need to configure eGroupware in the "header setup" to use
+           "php sessions plus restore"
+
+    @copyright: 2005 by MoinMoin:AlexanderSchremmer (Thanks to Spreadshirt)
+    @license: GNU GPL, see COPYING for details.
+"""
+
+import Cookie, urllib
+from MoinMoin import user
+from MoinMoin.auth import _PHPsessionParser
+
+class php_session:
+    """ PHP session cookie authentication """
+    def __init__(self, apps=['egw'], s_path="/tmp", s_prefix="sess_"):
+        """ @param apps: A list of the enabled applications. See above for
+            possible keys.
+            @param s_path: The path where the PHP sessions are stored.
+            @param s_prefix: The prefix of the session files.
+        """
+        
+        self.s_path = s_path
+        self.s_prefix = s_prefix
+        self.apps = apps
+
+    def __call__(self, request, **kw):
+        def handle_egroupware(session):
+            """ Extracts name, fullname and email from the session. """
+            username = session['egw_session']['session_lid'].split("@", 1)[0]
+            known_accounts = session['egw_info_cache']['accounts']['cache']['account_data']
+            
+            # if the next line breaks, then the cache was not filled with the current
+            # user information
+            user_info = [value for key, value in known_accounts.items()
+                         if value['account_lid'] == username][0]
+            name = user_info.get('fullname', '')
+            email = user_info.get('email', '')
+            
+            dec = lambda x: x and x.decode("iso-8859-1")
+            
+            return dec(username), dec(email), dec(name)
+        
+        user_obj = kw.get('user_obj')
+        try:
+            cookie = Cookie.SimpleCookie(request.saved_cookie)
+        except Cookie.CookieError: # ignore invalid cookies
+            cookie = None
+        if cookie:
+            for cookiename in cookie.keys():
+                cookievalue = urllib.unquote(cookie[cookiename].value).decode('iso-8859-1')
+                session = _PHPsessionParser.loadSession(cookievalue, path=self.s_path, prefix=self.s_prefix)
+                if session:
+                    if "egw" in self.apps and session.get('egw_session', None):
+                        username, email, name = handle_egroupware(session)
+                        break
+            else:
+                return user_obj, True
+            
+            user = user.User(request, name=username, auth_username=username)
+            
+            changed = False
+            if name != user.aliasname:
+                user.aliasname = name
+                changed = True
+            if email != user.email:
+                user.email = email
+                changed = True
+            
+            if user:
+                user.create_or_update(changed)
+            if user and user.valid:
+                return user, True # True to get other methods called, too
+        return user_obj, True # continue with next method in auth list
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/auth/smb_mount.py	Sat Jun 10 16:45:05 2006 +0200
@@ -0,0 +1,61 @@
+# -*- coding: iso-8859-1 -*-
+"""
+    MoinMoin - auth plugin for (un)mounting a smb share
+
+    (u)mount a SMB server's share for username (using username/password for
+    authentication at the SMB server). This can be used if you need access
+    to files on some share via the wiki, but needs more code to be useful.
+
+    @copyright: 2006 by MoinMoin:ThomasWaldmann
+    @license: GNU GPL, see COPYING for details.
+"""
+
+
+def smb_mount(request, **kw):
+    """ auth plugin for (un)mounting an smb share """
+    username = kw.get('name')
+    password = kw.get('password')
+    login = kw.get('login')
+    logout = kw.get('logout')
+    user_obj = kw.get('user_obj')
+    cfg = request.cfg
+    verbose = cfg.smb_verbose
+    if verbose: request.log("got name=%s login=%r logout=%r" % (username, login, logout))
+    
+    # we just intercept login to mount and logout to umount the smb share
+    if login or logout:
+        import os, pwd, subprocess
+        web_username = cfg.smb_dir_user
+        web_uid = pwd.getpwnam(web_username)[2] # XXX better just use current uid?
+        if logout and user_obj: # logout -> we don't have username in form
+            username = user_obj.name # so we take it from previous auth method (moin_cookie e.g.)
+        mountpoint = cfg.smb_mountpoint % {
+            'username': username,
+        }
+        if login:
+            cmd = u"sudo mount -t cifs -o user=%(user)s,domain=%(domain)s,uid=%(uid)d,dir_mode=%(dir_mode)s,file_mode=%(file_mode)s,iocharset=%(iocharset)s //%(server)s/%(share)s %(mountpoint)s >>%(log)s 2>&1"
+        elif logout:
+            cmd = u"sudo umount %(mountpoint)s >>%(log)s 2>&1"
+            
+        cmd = cmd % {
+            'user': username,
+            'uid': web_uid,
+            'domain': cfg.smb_domain,
+            'server': cfg.smb_server,
+            'share': cfg.smb_share,
+            'mountpoint': mountpoint,
+            'dir_mode': cfg.smb_dir_mode,
+            'file_mode': cfg.smb_file_mode,
+            'iocharset': cfg.smb_iocharset,
+            'log': cfg.smb_log,
+        }
+        env = os.environ.copy()
+        if login:
+            try:
+                os.makedirs(mountpoint) # the dir containing the mountpoint must be writeable for us!
+            except OSError, err:
+                pass
+            env['PASSWD'] = password.encode(cfg.smb_coding)
+        subprocess.call(cmd.encode(cfg.smb_coding), env=env, shell=True)
+    return user_obj, True
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/auth/sslclientcert.py	Sat Jun 10 16:45:05 2006 +0200
@@ -0,0 +1,71 @@
+# -*- coding: iso-8859-1 -*-
+"""
+    MoinMoin - SSL client certificate authentication
+
+    Currently not supported for Twisted web server, but only for web servers
+    setting SSL_CLIENT_* environment (e.g. Apache).
+    
+    @copyright: 2006 by MoinMoin:ThomasWaldmann,
+                2003 by Martin v. Löwis
+    @license: GNU GPL, see COPYING for details.
+"""
+
+from MoinMoin import user
+from MoinMoin.request import TWISTED
+
+def sslclientcert(request, **kw):
+    """ authenticate via SSL client certificate """
+    user_obj = kw.get('user_obj')
+    u = None
+    changed = False
+    # check if we are running Twisted
+    if isinstance(request, TWISTED.Request):
+        return user_obj, True # not supported if we run twisted
+        # Addendum: this seems to need quite some twisted insight and coding.
+        # A pointer i got on #twisted: divmod's vertex.sslverify
+        # If you really need this, feel free to implement and test it and
+        # submit a patch if it works.
+    else:
+        env = request.env
+        if env.get('SSL_CLIENT_VERIFY', 'FAILURE') == 'SUCCESS':
+            # if we only want to accept some specific CA, do a check like:
+            # if env.get('SSL_CLIENT_I_DN_OU') == "http://www.cacert.org"
+            email = env.get('SSL_CLIENT_S_DN_Email', '')
+            email_lower = email.lower()
+            commonname = env.get('SSL_CLIENT_S_DN_CN', '')
+            commonname_lower = commonname.lower()
+            if email_lower or commonname_lower:
+                for uid in user.getUserList(request):
+                    u = user.User(request, uid,
+                                  auth_method='sslclientcert', auth_attribs=())
+                    if email_lower and u.email.lower() == email_lower:
+                        u.auth_attribs = ('email', 'password')
+                        #this is only useful if same name should be used, as
+                        #commonname is likely no CamelCase WikiName
+                        #if commonname_lower != u.name.lower():
+                        #    u.name = commonname
+                        #    changed = True
+                        #u.auth_attribs = ('email', 'name', 'password')
+                        break
+                    if commonname_lower and u.name.lower() == commonname_lower:
+                        u.auth_attribs = ('name', 'password')
+                        #this is only useful if same email should be used as
+                        #specified in certificate.
+                        #if email_lower != u.email.lower():
+                        #    u.email = email
+                        #    changed = True
+                        #u.auth_attribs = ('name', 'email', 'password')
+                        break
+                else:
+                    u = None
+                if u is None:
+                    # user wasn't found, so let's create a new user object
+                    u = user.User(request, name=commonname_lower, auth_username=commonname_lower)
+
+    if u:
+        u.create_or_update(changed)
+    if u and u.valid:
+        return u, True
+    else:
+        return user_obj, True
+
--- a/MoinMoin/i18n/POTFILES.in	Wed Jun 07 14:50:19 2006 +0200
+++ b/MoinMoin/i18n/POTFILES.in	Sat Jun 10 16:45:05 2006 +0200
@@ -6,11 +6,9 @@
 config.py
 error.py
 search.py
-security.py
 user.py
 userform.py
 version.py
-wikiacl.py
 wikiutil.py
 wikidicts.py
 failure.py
@@ -107,6 +105,11 @@
 parser/text_rst.py
 parser/text_moin_wiki.py
 parser/text_xslt.py
+parser/ParserBase.py
+
+security/__init__.py
+security/antispam.py
+security/autoadmin.py
 
 server/__init__.py
 server/daemon.py
@@ -131,10 +134,7 @@
 theme/modern.py
 theme/rightsidebar.py
 
-util/ParserBase.py
 util/__init__.py
-util/antispam.py
-util/autoadmin.py
 util/chartypes.py
 util/dataset.py
 util/diff.py
--- a/MoinMoin/logfile/__init__.py	Wed Jun 07 14:50:19 2006 +0200
+++ b/MoinMoin/logfile/__init__.py	Sat Jun 10 16:45:05 2006 +0200
@@ -10,3 +10,409 @@
 
 logfiles = pysupport.getPackageModules(__file__)
 
+import os, codecs, errno
+from MoinMoin import config, wikiutil
+
+class LogError(Exception):
+    """ Base class for log errors """
+
+class LogMissing(LogError):
+    """ Raised when the log is missing """
+
+
+class LineBuffer:
+    """
+    Reads lines from a file
+      self.lines    list of lines (Strings) 
+      self.offsets  list of offset for each line
+    """
+    def __init__(self, file, offset, size, forward=True):
+        """
+        @param file: open file object
+        @param offset: position in file to start from
+        @param size: aproximate number of bytes to read
+        @param forward : read from offset on or from offset-size to offset
+        @type forward: boolean
+        """
+        if forward:
+            file.seek(offset)
+            self.lines = file.readlines(size)
+            self.__calculate_offsets(offset)
+        else:
+            if offset < 2 * size:
+                begin = 0
+            else:
+                begin = offset - size
+            file.seek(begin)
+            self.lines = file.read(offset-begin).splitlines(True)
+            if begin != 0:
+                begin += len(self.lines[0])
+                self.lines = self.lines[1:]
+                # XXX check for min one line read
+            self.__calculate_offsets(begin)
+
+        # Decode lines after offset in file is calculated
+        self.lines = [unicode(line, config.charset) for line in self.lines]
+        self.len = len(self.lines)
+
+    def __calculate_offsets(self, offset):
+        """
+        @param offset: offset of the first line
+        """
+        self.offsets = map(lambda x:len(x), self.lines)
+        self.offsets.append(0)
+        i = 1
+        length = len(self.offsets)
+        tmp = offset
+        while i < length:
+            result = self.offsets[i-1] + tmp
+            tmp = self.offsets[i]
+            self.offsets[i] =  result
+            i = i + 1
+        self.offsets[0] = offset
+
+
+class LogFile:
+    """
+    .filter: function that gets the values from .parser.
+       must return True to keep it or False to remove it
+    Overwrite .parser() and .add() to customize this class to
+    special log files
+    """
+    
+    def __init__(self, filename, buffer_size=65536):
+        """
+        @param filename: name of the log file
+        @param buffer_size: approx. size of one buffer in bytes
+        """
+        self.buffer_size = buffer_size
+        self.__filename = filename
+        self.filter = None
+        self.__lineno = 0
+        self.__buffer = None
+        self.__buffer1 = None
+        self.__buffer2 = None
+
+    def __iter__(self):
+        return self
+
+    def reverse(self):
+        """ @rtype: iterator
+        """
+        self.to_end()
+        while 1:
+            try:
+                result = self.previous()
+            except StopIteration:
+                return
+            yield result
+            
+    def sanityCheck(self):
+        """ Check for log file write access.
+        
+        TODO: os.access should not be used here.
+        
+        @rtype: string (error message) or None
+        """
+        if not os.access(self.__filename, os.W_OK):
+            return "The log '%s' is not writable!" % (self.__filename,)
+        return None
+
+    def __getattr__(self, name):
+        """
+        generate some attributes when needed
+        """
+        if name=="_LogFile__rel_index":
+            # starting iteration from begin
+            self.__buffer1 = LineBuffer(self._input, 0, self.buffer_size)
+            self.__buffer2 = LineBuffer(self._input,
+                                        self.__buffer1.offsets[-1],
+                                        self.buffer_size)
+            self.__buffer = self.__buffer1
+            self.__rel_index = 0
+            return 0
+        elif name == "_input":
+            try:
+                # Open the file without codecs.open, it break our offset
+                # calculation. We decode it later.
+                # Use binary mode in order to retain \r. Otherwise the offset
+                # calculation would fail
+                self._input = file(self.__filename, "rb",)
+            except IOError:
+                raise StopIteration
+            return self._input
+        elif name == "_output":
+            self._output = codecs.open(self.__filename, 'a', config.charset)
+            try:
+                os.chmod(self.__filename, 0666 & config.umask)
+            except OSError:
+                # TODO: should not ignore errors like this!
+                pass
+            return self._output
+        else:
+            raise AttributeError(name)
+
+    def size(self):
+        """ Return log size in bytes
+        
+        Return 0 if the file does not exists. Raises other OSError.
+        
+        @return: size of log file in bytes
+        @rtype: Int
+        """
+        try:
+            return os.path.getsize(self.__filename)
+        except OSError, err:
+            if err.errno == errno.ENOENT:
+                return 0            
+            raise
+
+    def lines(self):
+        """ Return number of lines in the log file
+        
+        Return 0 if the file does not exists. Raises other OSError.
+
+        Expensive for big log files - O(n)
+        
+        @return: size of log file in lines
+        @rtype: Int
+        """
+        try:
+            f = codecs.open(self.__filename, 'r')
+            try:
+                count = 0
+                for line in f:
+                    count += 1
+                return count
+            finally:
+                f.close()
+        except (OSError, IOError), err:
+            if err.errno == errno.ENOENT:
+                return 0
+            raise
+
+    def date(self):
+        """ Return timestamp of log file in usecs """
+        try:
+            mtime = os.path.getmtime(self.__filename)            
+        except OSError, err:
+            if err.errno == errno.ENOENT:
+                # This can happen on fresh wiki when building the index
+                # Usually the first request will create an event log
+                raise LogMissing(str(err))
+            raise
+        return wikiutil.timestamp2version(mtime)
+
+    def peek(self, lines):
+        """ What does this method do?
+
+        @param lines: number of lines, may be negative to move backward 
+            moves file position by lines.
+        @return: True if moving more than (WHAT?) to the beginning and moving
+            to the end or beyond
+        @rtype: boolean
+        peek adjusts .__lineno if set
+        This function is not aware of filters!
+        """
+        self.__rel_index = self.__rel_index + lines
+        while self.__rel_index < 0:
+            if self.__buffer == self.__buffer2:
+                # change to buffer 1
+                self.__buffer = self.__buffer1
+                self.__rel_index = self.__rel_index + self.__buffer.len
+            else:
+                if self.__buffer.offsets[0] == 0:
+                    # already at the beginning of the file
+                    # XXX
+                    self.__rel_index = 0
+                    self.__lineno = 0
+                    return True
+                else:
+                    # load previous lines
+                    self.__buffer2 = self.__buffer1
+                    self.__buffer1 = LineBuffer(self._input,
+                                                self.__buffer2.offsets[0],
+                                                self.buffer_size,
+                                                forward=False)
+                    self.__rel_index = (self.__rel_index +
+                                        self.__buffer1.len)
+                    self.__buffer = self.__buffer1
+                
+        while self.__rel_index >= self.__buffer.len:
+            if self.__buffer == self.__buffer1:
+                # change to buffer 2
+                self.__rel_index = self.__rel_index - self.__buffer.len
+                self.__buffer = self.__buffer2
+            else:
+                # try to load next buffer
+                tmpbuff = LineBuffer(self._input,
+                                     self.__buffer1.offsets[-1],
+                                     self.buffer_size)
+                if tmpbuff.len==0:
+                    # end of file
+                    if self.__lineno:
+                        self.__lineno = (self.__lineno + lines -
+                                         (self.__rel_index -
+                                          len(self.__buffer.offsets)))
+                    self.__rel_index = len(self.__buffer.offsets)
+                    return True
+                # shift buffers
+                self.__buffer1 = self.__buffer2
+                self.__buffer2 = tmpbuff                
+                self.__rel_index = self.__rel_index - self.__buffer1.len
+        if self.__lineno: self.__lineno += lines
+        return False
+
+    def __next(self):
+        """get next line already parsed"""
+        if self.peek(0):
+            raise StopIteration
+        result = self.parser(self.__buffer.lines[self.__rel_index])
+        self.peek(1)
+        return result
+
+    def next(self):
+        """
+        @return: next entry
+        raises StopIteration at file end
+        XXX It does not raise anything!
+        """
+        result = None
+        while result == None:
+            while result == None:
+                result = self.__next()
+            if self.filter and not self.filter(result):
+                result = None
+        return result
+    
+    def __previous(self):
+        if self.peek(-1): raise StopIteration
+        return self.parser(self.__buffer.lines[self.__rel_index])
+
+    def previous(self):
+        """
+        @return: previous entry and moves file position one line back
+        raises StopIteration at file begin
+        """
+        result = None
+        while result == None:
+            while result == None:
+                result = self.__previous()
+            if self.filter and not self.filter(result):
+                result = None
+        return result
+
+    def to_begin(self):
+        """moves file position to the begin"""
+        if self.__buffer1.offsets[0] != 0:
+            self.__buffer1 = LineBuffer(self._input,
+                                        0,
+                                        self.buffer_size)
+            self.__buffer2 = LineBuffer(self._input,
+                                        self.__buffer1.offsets[-1],
+                                        self.buffer_size)
+        self.__buffer = self.__buffer1
+        self.__rel_index = 0
+        self.__lineno = 0
+
+    def to_end(self):
+        """moves file position to the end"""
+        self._input.seek(0, 2) # to end of file
+        size = self._input.tell()
+        if (not self.__buffer2) or (size>self.__buffer2.offsets[-1]):
+            self.__buffer2 = LineBuffer(self._input,
+                                        size,
+                                        self.buffer_size,
+                                        forward = False)
+            
+            self.__buffer1 = LineBuffer(self._input,
+                                        self.__buffer2.offsets[0],
+                                        self.buffer_size,
+                                        forward = False)
+        self.__buffer = self.__buffer2
+        self.__rel_index = self.__buffer2.len
+        self.__lineno = None
+
+    def position(self):
+        """ Return the current file position
+        
+        This can be converted into a String using back-ticks and then
+        be rebuild.
+        For this plain file implementation position is an Integer.
+        """
+        return self.__buffer.offsets[self.__rel_index]
+        
+    def seek(self, position, line_no=None):
+        """ moves file position to an value formerly gotten from .position().
+        To enable line counting line_no must be provided.
+        .seek is much more efficient for moving long distances than .peek.
+        raises ValueError if position is invalid
+        """
+        if self.__buffer1.offsets[0] <= position < self.__buffer1.offsets[-1]:
+            # position is in .__buffer1 
+            self.__rel_index = self.__buffer1.offsets.index(position)
+            self.__buffer = self.__buffer1
+        elif (self.__buffer2.offsets[0] <= position <
+              self.__buffer2.offsets[-1]):
+            # position is in .__buffer2
+            self.__rel_index = self.__buffer2.offsets.index(position)
+            self.__buffer = self.__buffer2
+        else:
+            # load buffers around position
+            self.__buffer1 = LineBuffer(self._input,
+                                        position,
+                                        self.buffer_size,
+                                        forward = False)
+            self.__buffer2 = LineBuffer(self._input,
+                                        position,
+                                        self.buffer_size)
+            self.__buffer = self.__buffer2
+            self.__rel_index = 0
+            # XXX test for valid position
+        self.__lineno = line_no
+
+    def line_no(self):
+        """@return: the current line number or None if line number is unknown"""
+        return self.__lineno
+    
+    def calculate_line_no(self):
+        """ Calculate the current line number from buffer offsets
+        
+        If line number is unknown it is calculated by parsing the whole file.
+        This may be expensive.
+        """
+        self._input.seek(0, 0)
+        lines = self._input.read(self.__buffer.offsets[self.__rel_index])
+        self.__lineno = len(lines.splitlines())
+        return self.__lineno
+
+    def parser(self, line):
+        """
+        @param line: line as read from file
+        @return: parsed line or None on error
+        Converts the line from file to program representation
+        This implementation uses TAB separated strings.
+        This method should be overwritten by the sub classes.
+        """
+        return line.split("\t")
+
+    def add(self, *data):
+        """
+        add line to log file
+        This implementation save the values as TAB separated strings.
+        This method should be overwritten by the sub classes.
+        """
+        line = "\t".join(data)
+        self._add(line)
+        
+    def _add(self, line):
+        """
+        @param line: flat line
+        @type line: String
+        write on entry in the log file
+        """
+        if line != None:
+            if line[-1] != '\n':
+                line += '\n'
+            self._output.write(line)
+
--- a/MoinMoin/logfile/editlog.py	Wed Jun 07 14:50:19 2006 +0200
+++ b/MoinMoin/logfile/editlog.py	Sat Jun 10 16:45:05 2006 +0200
@@ -5,7 +5,7 @@
 """
 
 import os.path
-from logfile import LogFile
+from MoinMoin.logfile import LogFile
 from MoinMoin import wikiutil, user, config
 from MoinMoin.Page import Page
 
--- a/MoinMoin/logfile/eventlog.py	Wed Jun 07 14:50:19 2006 +0200
+++ b/MoinMoin/logfile/eventlog.py	Sat Jun 10 16:45:05 2006 +0200
@@ -5,7 +5,7 @@
 """
 
 import os.path, time
-from logfile import LogFile
+from MoinMoin.logfile import LogFile
 from MoinMoin import util, config, wikiutil
 from MoinMoin.util import web
 
--- a/MoinMoin/logfile/logfile.py	Wed Jun 07 14:50:19 2006 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,412 +0,0 @@
-"""
-    MoinMoin basic log stuff
-
-    @license: GNU GPL, see COPYING for details.
-"""
-
-import os, codecs, errno
-from MoinMoin import config, wikiutil
-
-class LogError(Exception):
-    """ Base class for log errors """
-
-class LogMissing(LogError):
-    """ Raised when the log is missing """
-
-
-class LineBuffer:
-    """
-    Reads lines from a file
-      self.lines    list of lines (Strings) 
-      self.offsets  list of offset for each line
-    """
-    def __init__(self, file, offset, size, forward=True):
-        """
-        @param file: open file object
-        @param offset: position in file to start from
-        @param size: aproximate number of bytes to read
-        @param forward : read from offset on or from offset-size to offset
-        @type forward: boolean
-        """
-        if forward:
-            file.seek(offset)
-            self.lines = file.readlines(size)
-            self.__calculate_offsets(offset)
-        else:
-            if offset < 2 * size:
-                begin = 0
-            else:
-                begin = offset - size
-            file.seek(begin)
-            self.lines = file.read(offset-begin).splitlines(True)
-            if begin != 0:
-                begin += len(self.lines[0])
-                self.lines = self.lines[1:]
-                # XXX check for min one line read
-            self.__calculate_offsets(begin)
-
-        # Decode lines after offset in file is calculated
-        self.lines = [unicode(line, config.charset) for line in self.lines]
-        self.len = len(self.lines)
-
-    def __calculate_offsets(self, offset):
-        """
-        @param offset: offset of the first line
-        """
-        self.offsets = map(lambda x:len(x), self.lines)
-        self.offsets.append(0)
-        i = 1
-        length = len(self.offsets)
-        tmp = offset
-        while i < length:
-            result = self.offsets[i-1] + tmp
-            tmp = self.offsets[i]
-            self.offsets[i] =  result
-            i = i + 1
-        self.offsets[0] = offset
-
-
-class LogFile:
-    """
-    .filter: function that gets the values from .parser.
-       must return True to keep it or False to remove it
-    Overwrite .parser() and .add() to customize this class to
-    special log files
-    """
-    
-    def __init__(self, filename, buffer_size=65536):
-        """
-        @param filename: name of the log file
-        @param buffer_size: approx. size of one buffer in bytes
-        """
-        self.buffer_size = buffer_size
-        self.__filename = filename
-        self.filter = None
-        self.__lineno = 0
-        self.__buffer = None
-        self.__buffer1 = None
-        self.__buffer2 = None
-
-    def __iter__(self):
-        return self
-
-    def reverse(self):
-        """ @rtype: iterator
-        """
-        self.to_end()
-        while 1:
-            try:
-                result = self.previous()
-            except StopIteration:
-                return
-            yield result
-            
-    def sanityCheck(self):
-        """ Check for log file write access.
-        
-        TODO: os.access should not be used here.
-        
-        @rtype: string (error message) or None
-        """
-        if not os.access(self.__filename, os.W_OK):
-            return "The log '%s' is not writable!" % (self.__filename,)
-        return None
-
-    def __getattr__(self, name):
-        """
-        generate some attributes when needed
-        """
-        if name=="_LogFile__rel_index":
-            # starting iteration from begin
-            self.__buffer1 = LineBuffer(self._input, 0, self.buffer_size)
-            self.__buffer2 = LineBuffer(self._input,
-                                        self.__buffer1.offsets[-1],
-                                        self.buffer_size)
-            self.__buffer = self.__buffer1
-            self.__rel_index = 0
-            return 0
-        elif name == "_input":
-            try:
-                # Open the file without codecs.open, it break our offset
-                # calculation. We decode it later.
-                # Use binary mode in order to retain \r. Otherwise the offset
-                # calculation would fail
-                self._input = file(self.__filename, "rb",)
-            except IOError:
-                raise StopIteration
-            return self._input
-        elif name == "_output":
-            self._output = codecs.open(self.__filename, 'a', config.charset)
-            try:
-                os.chmod(self.__filename, 0666 & config.umask)
-            except OSError:
-                # TODO: should not ignore errors like this!
-                pass
-            return self._output
-        else:
-            raise AttributeError(name)
-
-    def size(self):
-        """ Return log size in bytes
-        
-        Return 0 if the file does not exists. Raises other OSError.
-        
-        @return: size of log file in bytes
-        @rtype: Int
-        """
-        try:
-            return os.path.getsize(self.__filename)
-        except OSError, err:
-            if err.errno == errno.ENOENT:
-                return 0            
-            raise
-
-    def lines(self):
-        """ Return number of lines in the log file
-        
-        Return 0 if the file does not exists. Raises other OSError.
-
-        Expensive for big log files - O(n)
-        
-        @return: size of log file in lines
-        @rtype: Int
-        """
-        try:
-            f = codecs.open(self.__filename, 'r')
-            try:
-                count = 0
-                for line in f:
-                    count += 1
-                return count
-            finally:
-                f.close()
-        except (OSError, IOError), err:
-            if err.errno == errno.ENOENT:
-                return 0
-            raise
-
-    def date(self):
-        """ Return timestamp of log file in usecs """
-        try:
-            mtime = os.path.getmtime(self.__filename)            
-        except OSError, err:
-            if err.errno == errno.ENOENT:
-                # This can happen on fresh wiki when building the index
-                # Usually the first request will create an event log
-                raise LogMissing(str(err))
-            raise
-        return wikiutil.timestamp2version(mtime)
-
-    def peek(self, lines):
-        """ What does this method do?
-
-        @param lines: number of lines, may be negative to move backward 
-            moves file position by lines.
-        @return: True if moving more than (WHAT?) to the beginning and moving
-            to the end or beyond
-        @rtype: boolean
-        peek adjusts .__lineno if set
-        This function is not aware of filters!
-        """
-        self.__rel_index = self.__rel_index + lines
-        while self.__rel_index < 0:
-            if self.__buffer == self.__buffer2:
-                # change to buffer 1
-                self.__buffer = self.__buffer1
-                self.__rel_index = self.__rel_index + self.__buffer.len
-            else:
-                if self.__buffer.offsets[0] == 0:
-                    # already at the beginning of the file
-                    # XXX
-                    self.__rel_index = 0
-                    self.__lineno = 0
-                    return True
-                else:
-                    # load previous lines
-                    self.__buffer2 = self.__buffer1
-                    self.__buffer1 = LineBuffer(self._input,
-                                                self.__buffer2.offsets[0],
-                                                self.buffer_size,
-                                                forward=False)
-                    self.__rel_index = (self.__rel_index +
-                                        self.__buffer1.len)
-                    self.__buffer = self.__buffer1
-                
-        while self.__rel_index >= self.__buffer.len:
-            if self.__buffer == self.__buffer1:
-                # change to buffer 2
-                self.__rel_index = self.__rel_index - self.__buffer.len
-                self.__buffer = self.__buffer2
-            else:
-                # try to load next buffer
-                tmpbuff = LineBuffer(self._input,
-                                     self.__buffer1.offsets[-1],
-                                     self.buffer_size)
-                if tmpbuff.len==0:
-                    # end of file
-                    if self.__lineno:
-                        self.__lineno = (self.__lineno + lines -
-                                         (self.__rel_index -
-                                          len(self.__buffer.offsets)))
-                    self.__rel_index = len(self.__buffer.offsets)
-                    return True
-                # shift buffers
-                self.__buffer1 = self.__buffer2
-                self.__buffer2 = tmpbuff                
-                self.__rel_index = self.__rel_index - self.__buffer1.len
-        if self.__lineno: self.__lineno += lines
-        return False
-
-    def __next(self):
-        """get next line already parsed"""
-        if self.peek(0):
-            raise StopIteration
-        result = self.parser(self.__buffer.lines[self.__rel_index])
-        self.peek(1)
-        return result
-
-    def next(self):
-        """
-        @return: next entry
-        raises StopIteration at file end
-        XXX It does not raise anything!
-        """
-        result = None
-        while result == None:
-            while result == None:
-                result = self.__next()
-            if self.filter and not self.filter(result):
-                result = None
-        return result
-    
-    def __previous(self):
-        if self.peek(-1): raise StopIteration
-        return self.parser(self.__buffer.lines[self.__rel_index])
-
-    def previous(self):
-        """
-        @return: previous entry and moves file position one line back
-        raises StopIteration at file begin
-        """
-        result = None
-        while result == None:
-            while result == None:
-                result = self.__previous()
-            if self.filter and not self.filter(result):
-                result = None
-        return result
-
-    def to_begin(self):
-        """moves file position to the begin"""
-        if self.__buffer1.offsets[0] != 0:
-            self.__buffer1 = LineBuffer(self._input,
-                                        0,
-                                        self.buffer_size)
-            self.__buffer2 = LineBuffer(self._input,
-                                        self.__buffer1.offsets[-1],
-                                        self.buffer_size)
-        self.__buffer = self.__buffer1
-        self.__rel_index = 0
-        self.__lineno = 0
-
-    def to_end(self):
-        """moves file position to the end"""
-        self._input.seek(0, 2) # to end of file
-        size = self._input.tell()
-        if (not self.__buffer2) or (size>self.__buffer2.offsets[-1]):
-            self.__buffer2 = LineBuffer(self._input,
-                                        size,
-                                        self.buffer_size,
-                                        forward = False)
-            
-            self.__buffer1 = LineBuffer(self._input,
-                                        self.__buffer2.offsets[0],
-                                        self.buffer_size,
-                                        forward = False)
-        self.__buffer = self.__buffer2
-        self.__rel_index = self.__buffer2.len
-        self.__lineno = None
-
-    def position(self):
-        """ Return the current file position
-        
-        This can be converted into a String using back-ticks and then
-        be rebuild.
-        For this plain file implementation position is an Integer.
-        """
-        return self.__buffer.offsets[self.__rel_index]
-        
-    def seek(self, position, line_no=None):
-        """ moves file position to an value formerly gotten from .position().
-        To enable line counting line_no must be provided.
-        .seek is much more efficient for moving long distances than .peek.
-        raises ValueError if position is invalid
-        """
-        if self.__buffer1.offsets[0] <= position < self.__buffer1.offsets[-1]:
-            # position is in .__buffer1 
-            self.__rel_index = self.__buffer1.offsets.index(position)
-            self.__buffer = self.__buffer1
-        elif (self.__buffer2.offsets[0] <= position <
-              self.__buffer2.offsets[-1]):
-            # position is in .__buffer2
-            self.__rel_index = self.__buffer2.offsets.index(position)
-            self.__buffer = self.__buffer2
-        else:
-            # load buffers around position
-            self.__buffer1 = LineBuffer(self._input,
-                                        position,
-                                        self.buffer_size,
-                                        forward = False)
-            self.__buffer2 = LineBuffer(self._input,
-                                        position,
-                                        self.buffer_size)
-            self.__buffer = self.__buffer2
-            self.__rel_index = 0
-            # XXX test for valid position
-        self.__lineno = line_no
-
-    def line_no(self):
-        """@return: the current line number or None if line number is unknown"""
-        return self.__lineno
-    
-    def calculate_line_no(self):
-        """ Calculate the current line number from buffer offsets
-        
-        If line number is unknown it is calculated by parsing the whole file.
-        This may be expensive.
-        """
-        self._input.seek(0, 0)
-        lines = self._input.read(self.__buffer.offsets[self.__rel_index])
-        self.__lineno = len(lines.splitlines())
-        return self.__lineno
-
-    def parser(self, line):
-        """
-        @param line: line as read from file
-        @return: parsed line or None on error
-        Converts the line from file to program representation
-        This implementation uses TAB separated strings.
-        This method should be overwritten by the sub classes.
-        """
-        return line.split("\t")
-
-    def add(self, *data):
-        """
-        add line to log file
-        This implementation save the values as TAB separated strings.
-        This method should be overwritten by the sub classes.
-        """
-        line = "\t".join(data)
-        self._add(line)
-        
-    def _add(self, line):
-        """
-        @param line: flat line
-        @type line: String
-        write on entry in the log file
-        """
-        if line != None:
-            if line[-1] != '\n':
-                line += '\n'
-            self._output.write(line)
-
--- a/MoinMoin/macro/PageHits.py	Wed Jun 07 14:50:19 2006 +0200
+++ b/MoinMoin/macro/PageHits.py	Sat Jun 10 16:45:05 2006 +0200
@@ -15,9 +15,9 @@
 # Set pickle protocol, see http://docs.python.org/lib/node64.html
 PICKLE_PROTOCOL = pickle.HIGHEST_PROTOCOL
 
-from MoinMoin import caching, config
+from MoinMoin import caching, config, logfile
 from MoinMoin.Page import Page
-from MoinMoin.logfile import eventlog, logfile
+from MoinMoin.logfile import eventlog
 
 
 class PageHits:
--- a/MoinMoin/macro/__init__.py	Wed Jun 07 14:50:19 2006 +0200
+++ b/MoinMoin/macro/__init__.py	Sat Jun 10 16:45:05 2006 +0200
@@ -464,7 +464,7 @@
         return self.formatter.anchordef(args or "anchor")
 
     def _macro_MailTo(self, args):
-        from MoinMoin.util.mail import decodeSpamSafeEmail
+        from MoinMoin.mail.sendmail import decodeSpamSafeEmail
 
         args = args or ''
         if args.find(',') == -1:
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/mail/__init__.py	Sat Jun 10 16:45:05 2006 +0200
@@ -0,0 +1,10 @@
+# -*- coding: iso-8859-1 -*-
+"""
+    MoinMoin - Package Initialization
+
+    Subpackage containing e-mail support code.
+
+    @copyright: 2006 by MoinMoin:ThomasWaldmann
+    @license: GNU GPL, see COPYING for details.
+"""
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/mail/mailimport.py	Sat Jun 10 16:45:05 2006 +0200
@@ -0,0 +1,270 @@
+"""
+    MoinMoin - E-Mail Import into wiki
+    
+    Just call this script with the URL of the wiki as a single argument
+    and feed the mail into stdin.
+
+    @copyright: 2006 by MoinMoin:AlexanderSchremmer
+    @license: GNU GPL, see COPYING for details.
+"""
+
+import os, sys, re, time
+import email
+from email.Utils import parseaddr, parsedate_tz, mktime_tz
+
+from MoinMoin import user, wikiutil, config
+from MoinMoin.action.AttachFile import add_attachment, AttachmentAlreadyExists
+from MoinMoin.Page import Page
+from MoinMoin.PageEditor import PageEditor
+from MoinMoin.request.CLI import Request as RequestCLI
+# python, at least up to 2.4, ships a broken parser for headers
+from MoinMoin.support.HeaderFixed import decode_header
+
+input = sys.stdin
+
+debug = False
+
+re_subject = re.compile(r"\[([^\]]*)\]")
+re_sigstrip = re.compile("\r?\n-- \r?\n.*$", re.S)
+
+class attachment(object):
+    """ Represents an attachment of a mail. """
+    def __init__(self, filename, mimetype, data):
+        self.filename = filename
+        self.mimetype = mimetype
+        self.data = data
+    
+    def __repr__(self):
+        return "<attachment filename=%r mimetype=%r size=%i bytes>" % (
+            self.filename, self.mimetype, len(self.data))
+
+class ProcessingError(Exception):
+    pass
+
+def log(text):
+    if debug:
+        print >>sys.stderr, text
+
+def decode_2044(header):
+    """ Decodes header field. See RFC 2044. """
+    chunks = decode_header(header)
+    chunks_decoded = []
+    for i in chunks:
+        chunks_decoded.append(i[0].decode(i[1] or 'ascii'))
+    return u''.join(chunks_decoded).strip()
+
+def process_message(message):
+    """ Processes the read message and decodes attachments. """
+    attachments = []
+    html_data = []
+    text_data = []
+   
+    to_addr = parseaddr(decode_2044(message['To']))
+    from_addr = parseaddr(decode_2044(message['From']))
+    cc_addr = parseaddr(decode_2044(message['Cc']))
+    bcc_addr = parseaddr(decode_2044(message['Bcc']))
+    
+    subject = decode_2044(message['Subject'])
+    date = time.strftime("%Y-%m-%dT%H:%M:%S", time.gmtime(mktime_tz(parsedate_tz(message['Date']))))
+    
+    log("Processing mail:\n To: %r\n From: %r\n Subject: %r" % (to_addr, from_addr, subject))
+    
+    for part in message.walk():
+        log(" Part " + repr((part.get_charsets(), part.get_content_charset(), part.get_content_type(), part.is_multipart(), )))
+        ct = part.get_content_type()
+        cs = part.get_content_charset() or "latin1"
+        payload = part.get_payload(None, True)
+    
+        fn = part.get_filename()
+        if fn is not None and fn.startswith("=?"): # heuristics ...
+            fn = decode_2044(fn)
+            
+        if fn is None and part["Content-Disposition"] is not None and "attachment" in part["Content-Disposition"]:
+            # this doesn't catch the case where there is no content-disposition but there is a file to offer to the user
+            # i hope that this can be only found in mails that are older than 10 years,
+            # so I won't care about it here
+            fn = part["Content-Description"] or "NoName"
+        if fn:
+            a = attachment(fn, ct, payload)
+            attachments.append(a)
+        else:
+            if ct == 'text/plain':
+                text_data.append(payload.decode(cs))
+                log(repr(payload.decode(cs)))
+            elif ct == 'text/html':
+                html_data.append(payload.decode(cs))
+            elif not part.is_multipart():
+                log("Unknown mail part " + repr((part.get_charsets(), part.get_content_charset(), part.get_content_type(), part.is_multipart(), )))
+
+    return {'text': u"".join(text_data), 'html': u"".join(html_data),
+            'attachments': attachments,
+            'to_addr': to_addr, 'from_addr': from_addr, 'cc_addr': cc_addr, 'bcc_addr': bcc_addr,
+            'subject': subject, 'date': date}
+
+def get_pagename_content(msg, email_subpage_template, wiki_address):
+    """ Generates pagename and content according to the specification
+        that can be found on MoinMoin:FeatureRequests/WikiEmailintegration """
+
+    generate_summary = False
+    choose_html = True
+    
+    pagename_tpl = ""
+    for addr in ('to_addr', 'cc_addr', 'bcc_addr'):
+        if msg[addr][1].strip().lower() == wiki_address:
+            pagename_tpl = msg[addr][0]
+
+    if not pagename_tpl:
+        m = re_subject.match(msg['subject'])
+        if m:
+            pagename_tpl = m.group(1)
+    else:
+        # special fix for outlook users :-)
+        if pagename_tpl[-1] == pagename_tpl[0] == "'":
+            pagename_tpl = pagename_tpl[1:-1]
+    
+    if pagename_tpl.endswith("/"):
+        pagename_tpl += email_subpage_template
+
+    # last resort
+    if not pagename_tpl:
+        pagename_tpl = email_subpage_template
+
+    # rewrite using string.formatter when python 2.4 is mandatory
+    pagename = (pagename_tpl.replace("$from", msg['from_addr'][0]).
+                replace("$date", msg['date']).
+                replace("$subject", msg['subject']))
+
+    if pagename.startswith("+ ") and "/" in pagename:
+        generate_summary = True
+        pagename = pagename[1:].lstrip()
+
+    if choose_html and msg['html']:
+        content = "{{{#!html\n%s\n}}}" % msg['html'].replace("}}}", "} } }")
+    else:
+        # strip signatures ...
+        content = re_sigstrip.sub("", msg['text'])
+
+    return {'pagename': pagename, 'content': content, 'generate_summary': generate_summary}
+
+def import_mail_from_string(request, string):
+    """ Reads an RFC 822 compliant message from a string and imports it
+        to the wiki. """
+    return import_mail_from_message(request, email.message_from_string(string))
+
+def import_mail_from_file(request, input):
+    """ Reads an RFC 822 compliant message from the file `input` and imports it to
+        the wiki. """
+
+    return import_mail_from_message(request, email.message_from_file(input))
+
+def import_mail_from_message(request, message):
+    """ Reads a message generated by the email package and imports it
+        to the wiki. """
+    msg = process_message(message)
+
+    email_subpage_template = request.cfg.mail_import_subpage_template
+    wiki_address = request.cfg.mail_import_wiki_address or request.cfg.mail_from
+
+    request.user = user.get_by_email_address(request, msg['from_addr'][1])
+    
+    if not request.user:
+        raise ProcessingError("No suitable user found for mail address %r" % (msg['from_addr'][1], ))
+
+    d = get_pagename_content(msg, email_subpage_template, wiki_address)
+    pagename = d['pagename']
+    generate_summary = d['generate_summary']
+
+    comment = u"Mail: '%s'" % (msg['subject'], )
+    
+    page = PageEditor(request, pagename, do_editor_backup=0)
+    
+    if not request.user.may.save(page, "", 0):
+        raise ProcessingError("Access denied for page %r" % pagename)
+
+    attachments = []
+    
+    for att in msg['attachments']:
+        i = 0
+        while 1:
+            if i == 0:
+                fname = att.filename
+            else:
+                components = att.filename.split(".")
+                new_suffix = "-" + str(i)
+                # add the counter before the file extension
+                if len(components) > 1:
+                    fname = u"%s%s.%s" % (u".".join(components[:-1]), new_suffix, components[-1])
+                else:
+                    fname = att.filename + new_suffix
+            try:
+                # get the fname again, it might have changed
+                fname = add_attachment(request, pagename, fname, att.data)
+                attachments.append(fname)
+            except AttachmentAlreadyExists:
+                i += 1
+            else:
+                break
+
+    # build an attachment link table for the page with the e-mail
+    escape_link = lambda x: x.replace(" ", "%20")
+    attachment_links = [""] + [u"[attachment:%s attachment:%s]" % tuple([escape_link(u"%s/%s" % (pagename, x))] * 2) for x in attachments]
+
+    # assemble old page content and new mail body together
+    old_content = Page(request, pagename).get_raw_body()
+    if old_content:
+        new_content = u"%s\n-----\n%s" % (old_content, d['content'], )
+    else:
+        new_content = d['content']
+    new_content += "\n" + u"\n * ".join(attachment_links)
+
+    try:
+        page.saveText(new_content, 0, comment=comment)
+    except page.AccessDenied:
+        raise ProcessingError("Access denied for page %r" % pagename)
+    
+    if generate_summary and "/" in pagename:
+        parent_page = u"/".join(pagename.split("/")[:-1])
+        old_content = Page(request, parent_page).get_raw_body().splitlines()
+        
+        found_table = None
+        table_ends = None
+        for lineno, line in enumerate(old_content):
+            if line.startswith("## mail_overview") and old_content[lineno+1].startswith("||"):
+                found_table = lineno
+            elif found_table is not None and line.startswith("||"):
+                table_ends = lineno + 1
+            elif table_ends is not None and not line.startswith("||"):
+                break
+        
+        table_header = (u"\n\n## mail_overview (don't delete this line)\n" +
+                        u"|| '''[[GetText(From)]] ''' || '''[[GetText(To)]] ''' || '''[[GetText(Subject)]] ''' || '''[[GetText(Date)]] ''' || '''[[GetText(Link)]] ''' || '''[[GetText(Attachments)]] ''' ||\n"
+                       )
+        new_line = u'|| %s || %s || %s || [[DateTime(%s)]] || ["%s"] || %s ||' % (
+            msg['from_addr'][0] or msg['from_addr'][1],
+            msg['to_addr'][0] or msg['to_addr'][1],
+            msg['subject'],
+            msg['date'],
+            pagename,
+            " ".join(attachment_links),
+            )
+        if found_table is not None:
+            content = "\n".join(old_content[:table_ends] + [new_line] + old_content[table_ends:])
+        else:
+            content = "\n".join(old_content) + table_header + new_line
+
+        page = PageEditor(request, parent_page, do_editor_backup=0)
+        page.saveText(content, 0, comment=comment)
+
+if __name__ == "__main__":
+    if len(sys.argv) > 1:
+        url = sys.argv[1]
+    else:
+        url = 'localhost/'
+
+    request = RequestCLI(url=url)
+
+    try:
+        import_mail_from_file(request, input)
+    except ProcessingError, e:
+        print >>sys.stderr, "An error occured while processing the message:", e.args
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/mail/sendmail.py	Sat Jun 10 16:45:05 2006 +0200
@@ -0,0 +1,173 @@
+# -*- coding: iso-8859-1 -*-
+"""
+    MoinMoin - email helper functions
+
+    @copyright: 2003 by Jürgen Hermann <jh@web.de>
+    @license: GNU GPL, see COPYING for details.
+"""
+
+import os, re
+from email.Header import Header
+from MoinMoin import config
+
+_transdict = {"AT": "@", "DOT": ".", "DASH": "-"}
+
+
+def encodeAddress(address, charset):
+    """ Encode email address to enable non ascii names 
+    
+    e.g. '"Jürgen Hermann" <jh@web.de>'. According to the RFC, the name
+    part should be encoded, the address should not.
+    
+    @param address: email address, posibly using '"name" <address>' format
+    @type address: unicode
+    @param charset: sepcifying both the charset and the encoding, e.g
+        quoted printble or base64.
+    @type charset: email.Charset.Charset instance
+    @rtype: string
+    @return: encoded address
+    """   
+    composite = re.compile(r'(?P<phrase>.+)(?P<angle_addr>\<.*\>)', 
+                           re.UNICODE)
+    match = composite.match(address)
+    if match:
+        phrase = match.group('phrase').encode(config.charset)
+        phrase = str(Header(phrase, charset))
+        angle_addr = match.group('angle_addr').encode(config.charset)       
+        return phrase + angle_addr
+    else:
+        return address.encode(config.charset)
+
+
+def sendmail(request, to, subject, text, **kw):
+    """ Create and send a text/plain message
+        
+    Return a tuple of success or error indicator and message.
+    
+    @param request: the request object
+    @param to: recipients (list)
+    @param subject: subject of email (unicode)
+    @param text: email body text (unicode)
+    @keyword mail_from: override default mail_from
+    @type mail_from: unicode
+    @rtype: tuple
+    @return: (is_ok, Description of error or OK message)
+    """
+    import smtplib, socket
+    from email.Message import Message
+    from email.Charset import Charset, QP
+    from email.Utils import formatdate, make_msgid
+
+    _ = request.getText
+    cfg = request.cfg    
+    mail_from = kw.get('mail_from', '') or cfg.mail_from
+    subject = subject.encode(config.charset)    
+
+    # Create a text/plain body using CRLF (see RFC2822)
+    text = text.replace(u'\n', u'\r\n')
+    text = text.encode(config.charset)
+
+    # Create a message using config.charset and quoted printable
+    # encoding, which should be supported better by mail clients.
+    # TODO: check if its really works better for major mail clients
+    msg = Message()
+    charset = Charset(config.charset)
+    charset.header_encoding = QP
+    charset.body_encoding = QP
+    msg.set_charset(charset)    
+    msg.set_payload(charset.body_encode(text))
+    
+    # Create message headers
+    # Don't expose emails addreses of the other subscribers, instead we
+    # use the same mail_from, e.g. u"Jürgen Wiki <noreply@mywiki.org>"
+    address = encodeAddress(mail_from, charset) 
+    msg['From'] = address
+    msg['To'] = address
+    msg['Date'] = formatdate()
+    msg['Message-ID'] = make_msgid()
+    msg['Subject'] = Header(subject, charset)
+    
+    if cfg.mail_sendmail:
+        # Set the BCC.  This will be stripped later by sendmail.
+        msg['BCC'] = ','.join(to)
+        # Set Return-Path so that it isn't set (generally incorrectly) for us.
+        msg['Return-Path'] = address
+
+    # Send the message
+    if not cfg.mail_sendmail:
+        try:
+            host, port = (cfg.mail_smarthost + ':25').split(':')[:2]
+            server = smtplib.SMTP(host, int(port))
+            try:
+                #server.set_debuglevel(1)
+                if cfg.mail_login:
+                    user, pwd = cfg.mail_login.split()
+                    try: # try to do tls
+                        server.ehlo()
+                        if server.has_extn('starttls'):
+                            server.starttls()
+                            server.ehlo()
+                    except:
+                        pass
+                    server.login(user, pwd)
+                server.sendmail(mail_from, to, msg.as_string())
+            finally:
+                try:
+                    server.quit()
+                except AttributeError:
+                    # in case the connection failed, SMTP has no "sock" attribute
+                    pass
+        except smtplib.SMTPException, e:
+            return (0, str(e))
+        except (os.error, socket.error), e:
+            return (0, _("Connection to mailserver '%(server)s' failed: %(reason)s") % {
+                'server': cfg.mail_smarthost, 
+                'reason': str(e)
+            })
+    else:
+        try:
+            sendmailp = os.popen(cfg.mail_sendmail, "w") 
+            # msg contains everything we need, so this is a simple write
+            sendmailp.write(msg.as_string())
+            sendmail_status = sendmailp.close()
+            if sendmail_status:
+                return (0, str(sendmail_status))
+        except:
+            return (0, _("Mail not sent"))
+
+    return (1, _("Mail sent OK"))
+
+
+def decodeSpamSafeEmail(address):
+    """ Decode obfuscated email address to standard email address
+
+    Decode a spam-safe email address in `address` by applying the
+    following rules:
+    
+    Known all-uppercase words and their translation:
+        "DOT"   -> "."
+        "AT"    -> "@"
+        "DASH"  -> "-"
+
+    Any unknown all-uppercase words simply get stripped.
+    Use that to make it even harder for spam bots!
+
+    Blanks (spaces) simply get stripped.
+    
+    @param address: obfuscated email address string
+    @rtype: string
+    @return: decoded email address
+    """
+    email = []
+
+    # words are separated by blanks
+    for word in address.split():
+        # is it all-uppercase?
+        if word.isalpha() and word == word.upper():
+            # strip unknown CAPS words
+            word = _transdict.get(word, '')
+        email.append(word)
+
+    # return concatenated parts
+    return ''.join(email)
+
--- a/MoinMoin/mailimport.py	Wed Jun 07 14:50:19 2006 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,270 +0,0 @@
-"""
-    MoinMoin - E-Mail Import
-    
-    Just call this script with the URL of the wiki as a single argument
-    and feed the mail into stdin.
-
-    @copyright: 2006 by MoinMoin:AlexanderSchremmer
-    @license: GNU GPL, see COPYING for details.
-"""
-
-import os, sys, re, time
-import email
-from email.Utils import parseaddr, parsedate_tz, mktime_tz
-
-from MoinMoin import user, wikiutil, config
-from MoinMoin.action.AttachFile import add_attachment, AttachmentAlreadyExists
-from MoinMoin.Page import Page
-from MoinMoin.PageEditor import PageEditor
-from MoinMoin.request.CLI import Request as RequestCLI
-# python, at least up to 2.4, ships a broken parser for headers
-from MoinMoin.support.HeaderFixed import decode_header
-
-input = sys.stdin
-
-debug = False
-
-re_subject = re.compile(r"\[([^\]]*)\]")
-re_sigstrip = re.compile("\r?\n-- \r?\n.*$", re.S)
-
-class attachment(object):
-    """ Represents an attachment of a mail. """
-    def __init__(self, filename, mimetype, data):
-        self.filename = filename
-        self.mimetype = mimetype
-        self.data = data
-    
-    def __repr__(self):
-        return "<attachment filename=%r mimetype=%r size=%i bytes>" % (
-            self.filename, self.mimetype, len(self.data))
-
-class ProcessingError(Exception):
-    pass
-
-def log(text):
-    if debug:
-        print >>sys.stderr, text
-
-def decode_2044(header):
-    """ Decodes header field. See RFC 2044. """
-    chunks = decode_header(header)
-    chunks_decoded = []
-    for i in chunks:
-        chunks_decoded.append(i[0].decode(i[1] or 'ascii'))
-    return u''.join(chunks_decoded).strip()
-
-def process_message(message):
-    """ Processes the read message and decodes attachments. """
-    attachments = []
-    html_data = []
-    text_data = []
-   
-    to_addr = parseaddr(decode_2044(message['To']))
-    from_addr = parseaddr(decode_2044(message['From']))
-    cc_addr = parseaddr(decode_2044(message['Cc']))
-    bcc_addr = parseaddr(decode_2044(message['Bcc']))
-    
-    subject = decode_2044(message['Subject'])
-    date = time.strftime("%Y-%m-%dT%H:%M:%S", time.gmtime(mktime_tz(parsedate_tz(message['Date']))))
-    
-    log("Processing mail:\n To: %r\n From: %r\n Subject: %r" % (to_addr, from_addr, subject))
-    
-    for part in message.walk():
-        log(" Part " + repr((part.get_charsets(), part.get_content_charset(), part.get_content_type(), part.is_multipart(), )))
-        ct = part.get_content_type()
-        cs = part.get_content_charset() or "latin1"
-        payload = part.get_payload(None, True)
-    
-        fn = part.get_filename()
-        if fn is not None and fn.startswith("=?"): # heuristics ...
-            fn = decode_2044(fn)
-            
-        if fn is None and part["Content-Disposition"] is not None and "attachment" in part["Content-Disposition"]:
-            # this doesn't catch the case where there is no content-disposition but there is a file to offer to the user
-            # i hope that this can be only found in mails that are older than 10 years,
-            # so I won't care about it here
-            fn = part["Content-Description"] or "NoName"
-        if fn:
-            a = attachment(fn, ct, payload)
-            attachments.append(a)
-        else:
-            if ct == 'text/plain':
-                text_data.append(payload.decode(cs))
-                log(repr(payload.decode(cs)))
-            elif ct == 'text/html':
-                html_data.append(payload.decode(cs))
-            elif not part.is_multipart():
-                log("Unknown mail part " + repr((part.get_charsets(), part.get_content_charset(), part.get_content_type(), part.is_multipart(), )))
-
-    return {'text': u"".join(text_data), 'html': u"".join(html_data),
-            'attachments': attachments,
-            'to_addr': to_addr, 'from_addr': from_addr, 'cc_addr': cc_addr, 'bcc_addr': bcc_addr,
-            'subject': subject, 'date': date}
-
-def get_pagename_content(msg, email_subpage_template, wiki_address):
-    """ Generates pagename and content according to the specification
-        that can be found on MoinMoin:FeatureRequests/WikiEmailintegration """
-
-    generate_summary = False
-    choose_html = True
-    
-    pagename_tpl = ""
-    for addr in ('to_addr', 'cc_addr', 'bcc_addr'):
-        if msg[addr][1].strip().lower() == wiki_address:
-            pagename_tpl = msg[addr][0]
-
-    if not pagename_tpl:
-        m = re_subject.match(msg['subject'])
-        if m:
-            pagename_tpl = m.group(1)
-    else:
-        # special fix for outlook users :-)
-        if pagename_tpl[-1] == pagename_tpl[0] == "'":
-            pagename_tpl = pagename_tpl[1:-1]
-    
-    if pagename_tpl.endswith("/"):
-        pagename_tpl += email_subpage_template
-
-    # last resort
-    if not pagename_tpl:
-        pagename_tpl = email_subpage_template
-
-    # rewrite using string.formatter when python 2.4 is mandatory
-    pagename = (pagename_tpl.replace("$from", msg['from_addr'][0]).
-                replace("$date", msg['date']).
-                replace("$subject", msg['subject']))
-
-    if pagename.startswith("+ ") and "/" in pagename:
-        generate_summary = True
-        pagename = pagename[1:].lstrip()
-
-    if choose_html and msg['html']:
-        content = "{{{#!html\n%s\n}}}" % msg['html'].replace("}}}", "} } }")
-    else:
-        # strip signatures ...
-        content = re_sigstrip.sub("", msg['text'])
-
-    return {'pagename': pagename, 'content': content, 'generate_summary': generate_summary}
-
-def import_mail_from_string(request, string):
-    """ Reads an RFC 822 compliant message from a string and imports it
-        to the wiki. """
-    return import_mail_from_message(request, email.message_from_string(string))
-
-def import_mail_from_file(request, input):
-    """ Reads an RFC 822 compliant message from the file `input` and imports it to
-        the wiki. """
-
-    return import_mail_from_message(request, email.message_from_file(input))
-
-def import_mail_from_message(request, message):
-    """ Reads a message generated by the email package and imports it
-        to the wiki. """
-    msg = process_message(message)
-
-    email_subpage_template = request.cfg.mail_import_subpage_template
-    wiki_address = request.cfg.mail_import_wiki_address or request.cfg.mail_from
-
-    request.user = user.get_by_email_address(request, msg['from_addr'][1])
-    
-    if not request.user:
-        raise ProcessingError("No suitable user found for mail address %r" % (msg['from_addr'][1], ))
-
-    d = get_pagename_content(msg, email_subpage_template, wiki_address)
-    pagename = d['pagename']
-    generate_summary = d['generate_summary']
-
-    comment = u"Mail: '%s'" % (msg['subject'], )
-    
-    page = PageEditor(request, pagename, do_editor_backup=0)
-    
-    if not request.user.may.save(page, "", 0):
-        raise ProcessingError("Access denied for page %r" % pagename)
-
-    attachments = []
-    
-    for att in msg['attachments']:
-        i = 0
-        while 1:
-            if i == 0:
-                fname = att.filename
-            else:
-                components = att.filename.split(".")
-                new_suffix = "-" + str(i)
-                # add the counter before the file extension
-                if len(components) > 1:
-                    fname = u"%s%s.%s" % (u".".join(components[:-1]), new_suffix, components[-1])
-                else:
-                    fname = att.filename + new_suffix
-            try:
-                # get the fname again, it might have changed
-                fname = add_attachment(request, pagename, fname, att.data)
-                attachments.append(fname)
-            except AttachmentAlreadyExists:
-                i += 1
-            else:
-                break
-
-    # build an attachment link table for the page with the e-mail
-    escape_link = lambda x: x.replace(" ", "%20")
-    attachment_links = [""] + [u"[attachment:%s attachment:%s]" % tuple([escape_link(u"%s/%s" % (pagename, x))] * 2) for x in attachments]
-
-    # assemble old page content and new mail body together
-    old_content = Page(request, pagename).get_raw_body()
-    if old_content:
-        new_content = u"%s\n-----\n%s" % (old_content, d['content'], )
-    else:
-        new_content = d['content']
-    new_content += "\n" + u"\n * ".join(attachment_links)
-
-    try:
-        page.saveText(new_content, 0, comment=comment)
-    except page.AccessDenied:
-        raise ProcessingError("Access denied for page %r" % pagename)
-    
-    if generate_summary and "/" in pagename:
-        parent_page = u"/".join(pagename.split("/")[:-1])
-        old_content = Page(request, parent_page).get_raw_body().splitlines()
-        
-        found_table = None
-        table_ends = None
-        for lineno, line in enumerate(old_content):
-            if line.startswith("## mail_overview") and old_content[lineno+1].startswith("||"):
-                found_table = lineno
-            elif found_table is not None and line.startswith("||"):
-                table_ends = lineno + 1
-            elif table_ends is not None and not line.startswith("||"):
-                break
-        
-        table_header = (u"\n\n## mail_overview (don't delete this line)\n" +
-                        u"|| '''[[GetText(From)]] ''' || '''[[GetText(To)]] ''' || '''[[GetText(Subject)]] ''' || '''[[GetText(Date)]] ''' || '''[[GetText(Link)]] ''' || '''[[GetText(Attachments)]] ''' ||\n"
-                       )
-        new_line = u'|| %s || %s || %s || [[DateTime(%s)]] || ["%s"] || %s ||' % (
-            msg['from_addr'][0] or msg['from_addr'][1],
-            msg['to_addr'][0] or msg['to_addr'][1],
-            msg['subject'],
-            msg['date'],
-            pagename,
-            " ".join(attachment_links),
-            )
-        if found_table is not None:
-            content = "\n".join(old_content[:table_ends] + [new_line] + old_content[table_ends:])
-        else:
-            content = "\n".join(old_content) + table_header + new_line
-
-        page = PageEditor(request, parent_page, do_editor_backup=0)
-        page.saveText(content, 0, comment=comment)
-
-if __name__ == "__main__":
-    if len(sys.argv) > 1:
-        url = sys.argv[1]
-    else:
-        url = 'localhost/'
-
-    request = RequestCLI(url=url)
-
-    try:
-        import_mail_from_file(request, input)
-    except ProcessingError, e:
-        print >>sys.stderr, "An error occured while processing the message:", e.args
-
--- a/MoinMoin/multiconfig.py	Wed Jun 07 14:50:19 2006 +0200
+++ b/MoinMoin/multiconfig.py	Sat Jun 10 16:45:05 2006 +0200
@@ -2,7 +2,7 @@
 """
     MoinMoin - Multiple configuration handler and Configuration defaults class
 
-    @copyright: 2000-2004 by Jrgen Hermann <jh@web.de>
+    @copyright: 2000-2004 by Jürgen Hermann <jh@web.de>
     @license: GNU GPL, see COPYING for details.
 """
 
@@ -173,7 +173,7 @@
     actions_excluded = [] # ['DeletePage', 'AttachFile', 'RenamePage', 'test', ]
     allow_xslt = 0
     attachments = None # {'dir': path, 'url': url-prefix}
-    auth = [authmodule.moin_cookie]
+    auth = [authmodule.moin_login, authmodule.moin_session,]
     
     backup_compression = 'gz'
     backup_users = []
@@ -198,9 +198,11 @@
     chart_options = None
     
     config_check_enabled = 0
+
     cookie_domain = None # use '.domain.tld" for a farm with hosts in that domain
     cookie_path = None   # use '/wikifarm" for a farm with pathes below that path
     cookie_lifetime = 12 # 12 hours from now
+    cookie_secret = '1234' # secret value for crypting session cookie - you should change this :)
     
     data_dir = './data/'
     data_underlay_dir = './underlay/'
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/parser/ParserBase.py	Sat Jun 10 16:45:05 2006 +0200
@@ -0,0 +1,270 @@
+# -*- coding: iso-8859-1 -*-
+"""
+	MoinMoin - Base Source Parser
+
+    @copyright: 2002 by Taesu Pyo <bigflood@hitel.net>
+    @license: GNU GPL, see COPYING for details.
+
+    Docstrings and some refactoring by Oliver Graf <ograf@bitart.de>
+
+basic css:
+
+pre.codearea     { font-style: sans-serif; color: #000000; }
+
+pre.codearea span.ID       { color: #000000; }
+pre.codearea span.Char     { color: #004080; }
+pre.codearea span.Comment  { color: #808080; }
+pre.codearea span.Number   { color: #008080; font-weight: bold; }
+pre.codearea span.String   { color: #004080; }
+pre.codearea span.SPChar   { color: #0000C0; }
+pre.codearea span.ResWord  { color: #4040ff; font-weight: bold; }
+pre.codearea span.ConsWord { color: #008080; font-weight: bold; }
+
+"""
+
+import re, sys, sha
+from MoinMoin import config, wikiutil
+
+def parse_start_step(request, args):
+    """
+    Parses common Colorizer parameters start, step, numbers.
+    Uses L{wikiutil.parseAttributes} and sanitizes the results.
+
+    Start and step must be a non negative number and default to 1,
+    numbers might be on, off, or none and defaults to on. On or off
+    means that numbers are switchable via JavaScript (html formatter),
+    disabled means that numbers are disabled completely.
+
+    attrdict is returned as last element in the tuple, to enable the
+    calling parser to extract further arguments.
+
+    @param request: a request instance
+    @param args: the argument string
+
+    @returns: numbers, start, step, attrdict
+    """
+    nums, start, step = 1, 1, 1
+    attrs, msg = wikiutil.parseAttributes(request, args)
+    if not msg:
+        try:
+            start = int(attrs.get('start','"1"')[1:-1])
+        except ValueError:
+            pass
+        try:
+            step = int(attrs.get('step','"1"')[1:-1])
+        except ValueError:
+            pass
+        if attrs.get('numbers','"on"')[1:-1].lower() in ('off', 'false', 'no'):
+            nums = 0
+        elif attrs.get('numbers','"on"')[1:-1].lower() in ('none', 'disable'):
+            nums = -1
+    return nums, start, step, attrs
+
+class FormatTextBase:
+    pass
+
+class FormatText(FormatTextBase):
+    
+    def __init__(self, fmt):
+        self.fmt = fmt
+
+    def formatString(self, formatter, word):
+        return (formatter.code_token(1, self.fmt) +
+                formatter.text(word) +
+                formatter.code_token(0, self.fmt))
+
+class FormatTextID(FormatTextBase):
+    
+    def __init__(self, fmt, icase=0):
+        if not isinstance(fmt, FormatText):
+            self.def_fmt = FormatText(fmt)
+        else:
+            self.def_fmt = fmt
+        self._ignore_case = icase
+        self.fmt = {}
+
+    def addFormat(self, word, fmt):
+        if self._ignore_case:
+            word = word.lower()
+        self.fmt[word] = fmt
+        
+    def setDefaultFormat(self, fmt):
+        self.def_fmt = fmt
+        
+    def formatString(self, formatter, word):
+        if self._ignore_case:
+            sword = word.lower()
+        else:
+            sword = word
+        return self.fmt.get(sword,self.def_fmt).formatString(formatter, word)
+
+class FormattingRuleSingle:
+    
+    def __init__(self, name, str_re, icase=0):
+        self.name = name
+        self.str_re = str_re
+        
+    def getStartRe(self):
+        return self.str_re
+    
+    def getText(self, parser, hit):
+        return hit
+
+class FormattingRulePair:
+    
+    def __init__(self, name, str_begin, str_end, icase=0):
+        self.name = name
+        self.str_begin = str_begin
+        self.str_end = str_end
+        if icase:
+            self.end_re = re.compile(str_end, re.M|re.I)
+        else:
+            self.end_re = re.compile(str_end, re.M)
+        
+    def getStartRe(self):
+        return self.str_begin
+    
+    def getText(self, parser, hit):
+        match = self.end_re.search(parser.line, parser.lastpos)
+        if not match:
+            next_lastpos = len(parser.line)
+        else:
+            next_lastpos = match.end() + (match.end() == parser.lastpos)
+        r = parser.line[parser.lastpos:next_lastpos]
+        parser.lastpos = next_lastpos
+        return hit + r
+
+
+# ------------------------------------------------------------------------
+
+class ParserBase:
+
+    parsername = 'ParserBase'
+    
+    def __init__(self, raw, request, **kw):
+        self.raw = raw
+        self.request = request
+        self.show_nums, self.num_start, self.num_step, attrs = parse_start_step(request, kw.get('format_args',''))
+
+        self._ignore_case = 0
+        self._formatting_rules = []
+        self._formatting_rules_n2r = {}
+        self._formatting_rule_index = 0
+        self.rule_fmt = {}
+        self.line_count = len(raw.split('\n'))+1
+
+    def setupRules(self):
+        self.def_format = FormatText('Default')
+        self.ID_format = FormatTextID('ID', self._ignore_case)
+        self.addRuleFormat("ID",self.ID_format)
+        self.addRuleFormat("Operator")
+        self.addRuleFormat("Char")
+        self.addRuleFormat("Comment")
+        self.addRuleFormat("Number")
+        self.addRuleFormat("String")
+        self.addRuleFormat("SPChar")
+        self.addRuleFormat("ResWord")
+        self.addRuleFormat("ResWord2")
+        self.addRuleFormat("ConsWord")
+        self.addRuleFormat("Special")
+        self.addRuleFormat("Preprc")
+        self.addRuleFormat("Error")
+        self.reserved_word_format = FormatText('ResWord')
+        self.constant_word_format = FormatText('ConsWord')
+
+    def addRule(self, name, str_re):
+        self._formatting_rule_index += 1
+        n = "%s_%s" % (name, self._formatting_rule_index)
+        f = FormattingRuleSingle(name, str_re, self._ignore_case)
+        self._formatting_rules.append((n,f))
+        self._formatting_rules_n2r[n] = f
+
+    def addRulePair(self, name, start_re, end_re):
+        self._formatting_rule_index += 1
+        n = "%s_%s" % (name,self._formatting_rule_index)
+        f = FormattingRulePair(name, start_re, end_re, self._ignore_case)
+        self._formatting_rules.append((n,f))
+        self._formatting_rules_n2r[n] = f
+
+    def addWords(self, words, fmt):
+        if not isinstance(fmt,FormatTextBase):
+            fmt = FormatText(fmt)
+        for w in words:
+            self.ID_format.addFormat(w, fmt)
+
+    def addReserved(self, words):
+        self.addWords(words, self.reserved_word_format)
+
+    def addConstant(self, words):
+        self.addWords(words, self.constant_word_format)
+        
+    def addRuleFormat(self, name, fmt=None):
+        if fmt is None:
+            fmt = FormatText(name)
+        self.rule_fmt[name] = fmt
+
+    def format(self, formatter, form = None):
+        """ Send the text.
+        """
+
+        self.setupRules()
+
+        l = []
+        for n,f in self._formatting_rules:
+            l.append("(?P<%s>%s)" % (n,f.getStartRe()))
+        
+        if self._ignore_case:
+            scan_re = re.compile("|".join(l),re.M|re.I)
+        else:
+            scan_re = re.compile("|".join(l),re.M)
+
+        self.lastpos = 0
+        self.line = self.raw
+
+        self._code_id = sha.new(self.raw.encode(config.charset)).hexdigest()
+        self.request.write(formatter.code_area(1, self._code_id, self.parsername, self.show_nums, self.num_start, self.num_step))
+
+        self.request.write(formatter.code_line(1))
+            #formatter, len('%d' % (self.line_count,)))
+        
+        match = scan_re.search(self.line)
+
+        while match and self.lastpos < len(self.line):
+            # add the match we found
+            self.write_normal_text(formatter,
+                                   self.line[self.lastpos:match.start()])
+            self.lastpos = match.end() + (match.end() == self.lastpos)
+
+            self.write_match(formatter, match)
+
+            # search for the next one
+            match = scan_re.search(self.line, self.lastpos)
+
+        self.write_normal_text(formatter, self.line[self.lastpos:])
+
+        self.request.write(formatter.code_area(0, self._code_id))
+
+
+    def write_normal_text(self, formatter, text):
+        first = 1
+        for line in text.expandtabs(4).split('\n'):
+            if not first:
+                self.request.write(formatter.code_line(1))
+            else:
+                first = 0
+            self.request.write(formatter.text(line))
+
+    def write_match(self, formatter, match):
+        for n, hit in match.groupdict().items():
+            if not hit: continue
+            r = self._formatting_rules_n2r[n]
+            s = r.getText(self, hit)
+            c = self.rule_fmt.get(r.name,None)
+            if not c: c = self.def_format
+            first = 1
+            for line in s.expandtabs(4).split('\n'):
+                if not first:
+                    self.request.write(formatter.code_line(1))
+                else:
+                    first = 0
+                self.request.write(c.formatString(formatter, line))
--- a/MoinMoin/parser/text_cplusplus.py	Wed Jun 07 14:50:19 2006 +0200
+++ b/MoinMoin/parser/text_cplusplus.py	Sat Jun 10 16:45:05 2006 +0200
@@ -23,7 +23,7 @@
 
 """
 
-from MoinMoin.util.ParserBase import ParserBase
+from MoinMoin.parser.ParserBase import ParserBase
 
 Dependencies = []
 
--- a/MoinMoin/parser/text_java.py	Wed Jun 07 14:50:19 2006 +0200
+++ b/MoinMoin/parser/text_java.py	Sat Jun 10 16:45:05 2006 +0200
@@ -7,7 +7,7 @@
 
 """
 
-from MoinMoin.util.ParserBase import ParserBase
+from MoinMoin.parser.ParserBase import ParserBase
 
 Dependencies = []
 
--- a/MoinMoin/parser/text_pascal.py	Wed Jun 07 14:50:19 2006 +0200
+++ b/MoinMoin/parser/text_pascal.py	Sat Jun 10 16:45:05 2006 +0200
@@ -6,7 +6,7 @@
     @license: GNU GPL, see COPYING for details.
 """
 
-from MoinMoin.util.ParserBase import ParserBase
+from MoinMoin.parser.ParserBase import ParserBase
 
 Dependencies = []
 
--- a/MoinMoin/parser/text_python.py	Wed Jun 07 14:50:19 2006 +0200
+++ b/MoinMoin/parser/text_python.py	Sat Jun 10 16:45:05 2006 +0200
@@ -9,7 +9,7 @@
 import StringIO
 import keyword, token, tokenize, sha
 from MoinMoin import config, wikiutil
-from MoinMoin.util.ParserBase import parse_start_step
+from MoinMoin.parser.ParserBase import parse_start_step
 
 _KEYWORD = token.NT_OFFSET + 1
 _TEXT    = token.NT_OFFSET + 2
--- a/MoinMoin/parser/text_rst.py	Wed Jun 07 14:50:19 2006 +0200
+++ b/MoinMoin/parser/text_rst.py	Sat Jun 10 16:45:05 2006 +0200
@@ -306,13 +306,16 @@
         return html4css1.HTMLTranslator.astext(self)
 
     def fixup_wiki_formatting(self, text):
-        replacement = {'<p>': '', '</p>': '', '\n': '', '> ': '>'}
+        replacement = {'\n': '', '> ': '>'}
         for src, dst in replacement.items():
             text = text.replace(src, dst)
-        # Everything seems to have a space ending the text block. We want to
-        # get rid of this
-        if text and text[-1] == ' ':
-            text = text[:-1]
+        # Fixup extraneous markup
+        # Removes any empty span tags
+        text = re.sub(r'\s*<\s*span.*?>\s*<\s*/\s*span\s*>', '', text)
+        # Removes the first paragraph tag
+        text = re.sub(r'^\s*<\s*p[^>]*?>', '', text)
+        # Removes the ending paragraph close tag and any remaining whitespace
+        text = re.sub(r'<\s*/\s*p\s*>\s*$', '', text)
         return text
 
     def visit_reference(self, node):
@@ -339,8 +342,7 @@
                     (prefix == 'drawing') or
                     (prefix == 'inline')):
                 self.process_wiki_text(refuri)
-                # Don't call fixup_wiki_formatting because who knows what
-                # MoinMoin is inserting. (exits through add_wiki_markup)
+                self.wiki_text = self.fixup_wiki_formatting(self.wiki_text)
                 self.add_wiki_markup()
 
             # From here down, all links are handled by docutils (except 
@@ -352,6 +354,7 @@
                     # Attachment doesn't exist, give to MoinMoin to insert
                     # upload text.
                     self.process_wiki_text(refuri)
+                    self.wiki_text = self.fixup_wiki_formatting(self.wiki_text)
                     self.add_wiki_markup()
                 # Attachment exists, just get a link to it.
                 node['refuri'] = AttachFile.getAttachUrl(self.request.page.page_name, 
--- a/MoinMoin/security.py	Wed Jun 07 14:50:19 2006 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,59 +0,0 @@
-# -*- coding: iso-8859-1 -*-
-"""
-    MoinMoin - Wiki Security Interface
-
-    This implements the basic interface for user permissions and
-    system policy. If you want to define your own policy, inherit
-    from the base class 'Permissions', so that when new permissions
-    are defined, you get the defaults.
-
-    Then assign your new class to "SecurityPolicy" in wikiconfig;
-    and I mean the class, not an instance of it!
-
-    @copyright: 2000-2004 by Jürgen Hermann <jh@web.de>
-    @license: GNU GPL, see COPYING for details.
-"""
-
-#############################################################################
-### Basic Permissions Interface -- most features enabled by default
-#############################################################################
-
-
-class Permissions:
-    """ Basic interface for user permissions and system policy.
-
-        Note that you still need to allow some of the related actions, this
-        just controls their behaviour, not their activation.
-    """
-
-    def __init__(self, user):
-        """ Calculate the permissons `user` has.
-        """
-        from MoinMoin.Page import Page
-        self.Page = Page
-        self.name = user.name
-        self.request = user._request
-
-    def save(self, editor, newtext, rev, **kw):
-        """ Check whether user may save a page.
-
-            `editor` is the PageEditor instance, the other arguments are
-            those of the `PageEditor.saveText` method.
-        """
-        return self.write(editor.page_name)
-
-    def __getattr__(self, attr):
-        """ if attr is one of the rights in acl_rights_valid, then return a
-            checking function for it. Else raise an error.
-        """
-        request = self.request
-        Page = self.Page
-        if attr in request.cfg.acl_rights_valid:
-            return lambda pagename, Page=Page, request=request, attr=attr: Page(request, pagename).getACL(request).may(request, self.name, attr)
-        else:
-            raise AttributeError, attr
-        
-
-# make an alias for the default policy
-Default = Permissions
-
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/security/__init__.py	Sat Jun 10 16:45:05 2006 +0200
@@ -0,0 +1,407 @@
+# -*- coding: iso-8859-1 -*-
+"""
+    MoinMoin - Wiki Security Interface
+
+    This implements the basic interface for user permissions and
+    system policy. If you want to define your own policy, inherit
+    from the base class 'Permissions', so that when new permissions
+    are defined, you get the defaults.
+
+    Then assign your new class to "SecurityPolicy" in wikiconfig;
+    and I mean the class, not an instance of it!
+
+    @copyright: 2000-2004 by Jürgen Hermann <jh@web.de>
+    @license: GNU GPL, see COPYING for details.
+"""
+
+import re
+from MoinMoin import user
+
+#############################################################################
+### Basic Permissions Interface -- most features enabled by default
+#############################################################################
+
+
+class Permissions:
+    """ Basic interface for user permissions and system policy.
+
+        Note that you still need to allow some of the related actions, this
+        just controls their behaviour, not their activation.
+    """
+
+    def __init__(self, user):
+        """ Calculate the permissons `user` has.
+        """
+        from MoinMoin.Page import Page
+        self.Page = Page
+        self.name = user.name
+        self.request = user._request
+
+    def save(self, editor, newtext, rev, **kw):
+        """ Check whether user may save a page.
+
+            `editor` is the PageEditor instance, the other arguments are
+            those of the `PageEditor.saveText` method.
+        """
+        return self.write(editor.page_name)
+
+    def __getattr__(self, attr):
+        """ if attr is one of the rights in acl_rights_valid, then return a
+            checking function for it. Else raise an error.
+        """
+        request = self.request
+        Page = self.Page
+        if attr in request.cfg.acl_rights_valid:
+            return lambda pagename, Page=Page, request=request, attr=attr: Page(request, pagename).getACL(request).may(request, self.name, attr)
+        else:
+            raise AttributeError, attr
+        
+
+# make an alias for the default policy
+Default = Permissions
+
+
+# moved from MoinMoin.wikiacl ------------------------------------------------
+"""
+    MoinMoin Access Control Lists
+
+    @copyright: 2003 by Thomas Waldmann, http://linuxwiki.de/ThomasWaldmann
+    @copyright: 2003 by Gustavo Niemeyer, http://moin.conectiva.com.br/GustavoNiemeyer
+    @license: GNU GPL, see COPYING for details.
+"""
+
+class AccessControlList:
+    ''' Access Control List
+
+    Control who may do what on or with a wiki page.
+
+    Syntax of an ACL string:
+    
+        [+|-]User[,User,...]:[right[,right,...]] [[+|-]SomeGroup:...] ...
+        ... [[+|-]Known:...] [[+|-]All:...]
+
+        "User" is a user name and triggers only if the user matches. Up
+        to version 1.2 only WikiNames were supported, as of version 1.3,
+        any name can be used in acl lines, including name with spaces
+        using esoteric languages.
+
+        "SomeGroup" is a page name matching cfg.page_group_regex with
+         some lines in the form " * Member", defining the group members.
+
+        "Known" is a group containing all valid / known users.
+
+        "All" is a group containing all users (Known and Anonymous users).
+
+        "right" may be an arbitrary word like read, write, delete, admin.
+        Only words in cfg.acl_validrights are accepted, others are
+        ignored. It is allowed to specify no rights, which means that no
+        rights are given.
+
+    How ACL is processed
+
+        When some user is trying to access some ACL-protected resource,
+        the ACLs will be processed in the order they are found. The first
+        matching ACL will tell if the user has access to that resource
+        or not.
+
+        For example, the following ACL tells that SomeUser is able to
+        read and write the resources protected by that ACL, while any
+        member of SomeGroup (besides SomeUser, if part of that group)
+        may also admin that, and every other user is able to read it.
+
+            SomeUser:read,write SomeGroup:read,write,admin All:read
+
+        In this example, SomeUser can read and write but can not admin,
+        revert or delete pages. Rights that are NOT specified on the
+        right list are automatically set to NO.
+
+    Using Prefixes
+        
+        To make the system more flexible, there are also two modifiers:
+        the prefixes "+" and "-". 
+
+            +SomeUser:read -OtherUser:write
+
+        The acl line above will grant SomeUser read right, and OtherUser
+        write right, but will NOT block automatically all other rights
+        for these users. For example, if SomeUser ask to write, the
+        above acl line does not define if he can or can not write. He
+        will be able to write if acl_rights_before or acl_rights_after
+        allow this (see configuration options).
+        
+        Using prefixes, this acl line:
+        
+            SomeUser:read,write SomeGroup:read,write,admin All:read
+
+        Can be written as:
+        
+            -SomeUser:admin SomeGroup:read,write,admin All:read
+
+        Or even:
+
+            +All:read -SomeUser:admin SomeGroup:read,write,admin
+
+        Notice that you probably will not want to use the second and
+        third examples in ACL entries of some page. They are very
+        useful on the moin configuration entries though.
+
+   Configuration options
+   
+       cfg.acl_rights_default
+           It is is ONLY used when no other ACLs are given.
+           Default: "Known:read,write,delete All:read,write",
+
+       cfg.acl_rights_before
+           When the page has ACL entries, this will be inserted BEFORE
+           any page entries.
+           Default: ""
+
+       cfg.acl_rights_after
+           When the page has ACL entries, this will be inserted AFTER
+           any page entries.
+           Default: ""
+       
+       cfg.acl_rights_valid
+           These are the acceptable (known) rights (and the place to
+           extend, if necessary).
+           Default: ["read", "write", "delete", "admin"]
+    '''
+
+    special_users = ["All", "Known", "Trusted"] # order is important
+
+    def __init__(self, request, lines=[]):
+        """Initialize an ACL, starting from <nothing>.
+        """
+        self.setLines(request.cfg, lines)
+
+    def setLines(self, cfg, lines=[]):
+        self.clean()
+        self.addBefore(cfg)
+        if not lines:
+            self.addDefault(cfg)
+        else:
+            for line in lines:
+                self.addLine(cfg, line)
+        self.addAfter(cfg)
+
+    def clean(self):
+        self.acl = [] # [ ('User', {"read": 0, ...}), ... ]
+        self.acl_lines = []
+        self._is_group = {}
+
+    def addBefore(self, cfg):
+        self.addLine(cfg, cfg.acl_rights_before, remember=0)
+    def addDefault(self, cfg):
+        self.addLine(cfg, cfg.acl_rights_default, remember=0)
+    def addAfter(self, cfg):
+        self.addLine(cfg, cfg.acl_rights_after, remember=0)
+
+    def addLine(self, cfg, aclstring, remember=1):
+        """ Add another ACL line
+
+        This can be used in multiple subsequent calls to process longer
+        lists.
+
+        @param cfg: current config
+        @param aclstring: acl string from page or cfg
+        @param remember: should add the line to self.acl_lines
+        """
+        group_re = re.compile(cfg.page_group_regex)
+
+        # Remember lines
+        if remember:
+            self.acl_lines.append(aclstring)
+
+        # Iterate over entries and rights, parsed by acl string iterator
+        acliter = ACLStringIterator(cfg.acl_rights_valid, aclstring)
+        for modifier, entries, rights in acliter:
+            if entries == ['Default']:
+                self.addDefault(cfg)
+                continue
+            
+            for entry in entries:
+                if group_re.search(entry):
+                    self._is_group[entry] = 1
+                rightsdict = {}
+                if modifier:
+                    # Only user rights are added to the right dict.
+                    # + add rights with value of 1
+                    # - add right with value of 0
+                    for right in rights:
+                        rightsdict[right] = (modifier == '+')    
+                else:
+                    # All rights from acl_rights_valid are added to the
+                    # dict, user rights with value of 1, and other with
+                    # value of 0
+                    for right in cfg.acl_rights_valid:
+                        rightsdict[right] = (right in rights)
+                self.acl.append((entry, rightsdict))
+
+    def may(self, request, name, dowhat):
+        """May <name> <dowhat>?
+           Returns boolean answer.
+        """
+        is_group_member = request.dicts.has_member
+
+        allowed = None
+        for entry, rightsdict in self.acl:
+            if entry in self.special_users:
+                handler = getattr(self, "_special_"+entry, None)
+                allowed = handler(request, name, dowhat, rightsdict)
+            elif self._is_group.get(entry):
+                if is_group_member(entry, name):
+                    allowed = rightsdict.get(dowhat)
+                else:
+                    for special in self.special_users:
+                        if is_group_member(entry, special):
+                            handler = getattr(self, "_special_"+ special, None)
+                            allowed = handler(request, name,
+                                              dowhat, rightsdict)
+                            break # order of self.special_users is important
+            elif entry == name:
+                allowed = rightsdict.get(dowhat)
+            if allowed is not None:
+                return allowed
+        return 0
+
+    def getString(self, b='#acl ', e='\n'):
+        """print the acl strings we were fed with"""
+        return ''.join(["%s%s%s" % (b,l,e) for l in self.acl_lines])
+
+    def _special_All(self, request, name, dowhat, rightsdict):
+        return rightsdict.get(dowhat)
+
+    def _special_Known(self, request, name, dowhat, rightsdict):
+        """ check if user <name> is known to us,
+            that means that there is a valid user account present.
+            works for subscription emails.
+        """
+        if user.getUserId(request, name): # is a user with this name known?
+            return rightsdict.get(dowhat)
+        return None
+
+    def _special_Trusted(self, request, name, dowhat, rightsdict):
+        """ check if user <name> is known AND even has logged in using a password.
+            does not work for subsription emails that should be sent to <user>,
+            as he is not logged in in that case.
+        """
+        if request.user.trusted and name == request.user.name:
+            return rightsdict.get(dowhat)
+        return None
+
+    def __eq__(self, other):
+        return self.acl_lines == other.acl_lines
+    def __ne__(self, other):
+        return self.acl_lines != other.acl_lines
+
+
+class ACLStringIterator:
+    """ Iterator for acl string
+
+    Parse acl string and return the next entry on each call to
+    next. Implement the Iterator protocol.
+
+    Usage:
+        iter = ACLStringIterator(cfg.acl_rights_valid, 'user name:right')
+        for modifier, entries, rights in iter:
+            # process data
+    """
+    
+    def __init__(self, rights, aclstring):
+        """ Initialize acl iterator
+
+        @param rights: the acl rights to consider when parsing
+        @param aclstring: string to parse
+        """
+        self.rights = rights
+        self.rest = aclstring.strip()
+        self.finished = 0
+
+    def __iter__(self):
+        """ Required by the Iterator protocol """
+        return self
+    
+    def next(self):
+        """ Return the next values from the acl string
+
+        When the iterator is finished and you try to call next, it
+        raises a StopIteration. The iterator finish as soon as the
+        string is fully parsed or can not be parsed any more.
+
+        @rtype: 3 tuple - (modifier, [entry, ...], [right, ...])
+        @return: values for one item in an acl string
+        """
+        # Handle finished state, required by iterator protocol
+        if self.rest == '':
+            self.finished = 1
+        if self.finished:
+            raise StopIteration
+        
+        # Get optional modifier [+|-]entries:rights
+        modifier = ''
+        if self.rest[0] in ('+', '-'):
+            modifier, self.rest = self.rest[0], self.rest[1:]
+
+        # Handle the Default meta acl
+        if self.rest.startswith('Default ') or self.rest == 'Default':
+            self.rest = self.rest[8:]           
+            entries, rights = ['Default'], []
+            
+        # Handle entries:rights pairs
+        else:
+            # Get entries
+            try:
+                entries, self.rest = self.rest.split(':', 1)
+            except ValueError:
+                self.finished = 1
+                raise StopIteration("Can't parse rest of string")
+            if entries == '':
+                entries = []
+            else:
+                # TODO strip each entry from blanks?
+                entries = entries.split(',')            
+
+            # Get rights
+            try:         
+                rights, self.rest = self.rest.split(' ', 1)
+                # Remove extra white space after rights fragment,
+                # allowing using multiple spaces between items.
+                self.rest = self.rest.lstrip()
+            except ValueError:
+                rights, self.rest = self.rest, ''
+            rights = [r for r in rights.split(',') if r in self.rights]
+
+        return modifier, entries, rights
+
+
+def parseACL(request, body):
+    """ Parse acl lines on page and return ACL object
+
+    Use ACL object may(request, dowhat) to get acl rights.
+    """
+    acl_lines = []
+    while body and body[0] == '#':
+        # extract first line
+        try:
+            line, body = body.split('\n', 1)
+        except ValueError:
+            line = body
+            body = ''
+
+        # end parsing on empty (invalid) PI
+        if line == "#":
+            break
+
+        # skip comments (lines with two hash marks)
+        if line[1] == '#':
+            continue
+
+        tokens = line.split(None, 1)
+        if tokens[0].lower() == "#acl":
+            if len(tokens) == 2:
+                args = tokens[1].rstrip()
+            else:
+                args = ""
+            acl_lines.append(args)
+    return AccessControlList(request, acl_lines)
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/security/antispam.py	Sat Jun 10 16:45:05 2006 +0200
@@ -0,0 +1,245 @@
+#!/usr/bin/env python
+# -*- coding: iso-8859-1 -*-
+"""
+    This implements a global (and a local) blacklist against wiki spammers.
+
+    If started from commandline, it prints a merged list (moinmaster + MT) on
+    stdout, and what it got additionally from MT on stderr.
+    
+    @copyright: 2005 by Thomas Waldmann
+    @license: GNU GPL, see COPYING for details
+"""
+
+# give some log entries to stderr
+debug = 1
+
+import re, sys, time, datetime
+import sets
+
+if __name__ == '__main__':
+    sys.path.insert(0, "../..")
+
+from MoinMoin.security import Permissions
+from MoinMoin import caching, wikiutil
+
+# Errors ---------------------------------------------------------------
+
+class Error(Exception):
+    """Base class for antispam errors."""
+
+    def __str__(self):
+        return repr(self)
+
+class WikirpcError(Error):
+    """ Raised when we get xmlrpclib.Fault """
+
+    def __init__(self, msg, fault):
+        """ Init with msg and xmlrpclib.Fault dict """
+        self.msg = msg
+        self.fault = fault
+
+    def __str__(self):
+        """ Format the using description and data from the fault """
+        return self.msg + ": [%(faultCode)s]  %(faultString)s" % self.fault
+
+
+# Functions ------------------------------------------------------------
+
+def dprint(s):
+    if debug:
+        if isinstance(s, unicode):
+            s = s.encode('utf-8')
+        sys.stderr.write('%s\n' % s)
+
+
+def makelist(text):
+    """ Split text into lines, strip them, skip # comments """
+    lines = text.splitlines()
+    list = []
+    for line in lines:
+        line = line.split(' # ', 1)[0] # rest of line comment
+        line = line.strip()
+        if line and not line.startswith('#'):
+            list.append(line)
+    return list
+
+
+def getblacklist(request, pagename, do_update):
+    """ Get blacklist, possibly downloading new copy
+
+    @param request: current request (request instance)
+    @param pagename: bad content page name (unicode)
+    @rtype: list
+    @return: list of blacklisted regular expressions
+    """
+    from MoinMoin.PageEditor import PageEditor
+    p = PageEditor(request, pagename, uid_override="Antispam subsystem")
+    invalidate_cache = False
+    if do_update:
+        tooold = time.time() - 3600
+        mymtime = wikiutil.version2timestamp(p.mtime_usecs())
+        failure = caching.CacheEntry(request, "antispam", "failure", scope='wiki')
+        fail_time = failure.mtime() # only update if no failure in last hour
+        if (mymtime < tooold) and (fail_time < tooold):
+            dprint("%d *BadContent too old, have to check for an update..." % tooold)
+            import xmlrpclib
+            import socket
+
+            timeout = 15 # time out for reaching the master server via xmlrpc
+            old_timeout = socket.getdefaulttimeout()
+            socket.setdefaulttimeout(timeout)
+            
+            # For production code
+            uri = "http://moinmaster.wikiwikiweb.de:8000/?action=xmlrpc2"
+            # For testing (use your test wiki as BadContent source)
+            ##uri = "http://localhost/main/?action=xmlrpc2")
+            master = xmlrpclib.ServerProxy(uri)
+
+            try:
+                # Get BadContent info
+                master.putClientInfo('ANTISPAM-CHECK',
+                                     request.http_host+request.script_name)
+                response = master.getPageInfo(pagename)
+
+                # It seems that response is always a dict
+                if isinstance(response, dict) and 'faultCode' in response:
+                    raise WikirpcError("failed to get BadContent information",
+                                       response)
+                
+                # Compare date against local BadContent copy
+                masterdate = response['lastModified']
+
+                if isinstance(masterdate, datetime.datetime): 
+                    # for python 2.5a
+                    mydate = datetime.datetime(*tuple(time.gmtime(mymtime))[0:6])
+                else:
+                    # for python <= 2.4.x
+                    mydate = xmlrpclib.DateTime(tuple(time.gmtime(mymtime)))
+                                                    
+                dprint("master: %s mine: %s" % (masterdate, mydate))
+                if mydate < masterdate:
+                    # Get new copy and save
+                    dprint("Fetching page from master...")
+                    master.putClientInfo('ANTISPAM-FETCH',
+                                         request.http_host + request.script_name)
+                    response = master.getPage(pagename)
+                    if isinstance(response, dict) and 'faultCode' in response:
+                        raise WikirpcError("failed to get BadContent data",
+                                           response)
+                    p._write_file(response)
+
+                invalidate_cache = True
+
+            except (socket.error, xmlrpclib.ProtocolError), err:
+                # Log the error
+                # TODO: check if this does not fill the logs!
+                dprint('Timeout / socket / protocol error when accessing'
+                       ' moinmaster: %s' % str(err))
+                # update cache to wait before the next try
+                failure.update("")
+
+            except Error, err:
+                # In case of Error, we log the error and use the local
+                # BadContent copy.
+                dprint(str(err))
+
+            # set back socket timeout
+            socket.setdefaulttimeout(old_timeout)
+                
+    blacklist = p.get_raw_body()
+    return invalidate_cache, makelist(blacklist)
+
+
+class SecurityPolicy(Permissions):
+    """ Extend the default security policy with antispam feature """
+    
+    def save(self, editor, newtext, rev, **kw):
+        BLACKLISTPAGES = ["BadContent", "LocalBadContent"]
+        if not editor.page_name in BLACKLISTPAGES:
+            request = editor.request
+
+            # Start timing of antispam operation
+            request.clock.start('antispam')
+            
+            blacklist = []
+            invalidate_cache = not getattr(request.cfg, "_mmblcache", None)
+            for pn in BLACKLISTPAGES:
+                do_update = (pn != "LocalBadContent")
+                invalidate_cache_necessary, blacklist_entries = getblacklist(request, pn, do_update)
+                blacklist += blacklist_entries
+                invalidate_cache |= invalidate_cache_necessary
+
+            if blacklist:
+                if invalidate_cache:
+                    mmblcache = []
+                    for blacklist_re in blacklist:
+                        try:
+                            mmblcache.append(re.compile(blacklist_re, re.I))
+                        except re.error, err:
+                            dprint("Error in regex '%s': %s. Please check the pages %s." % (blacklist_re, str(err), ', '.join(BLACKLISTPAGES)))
+                            continue
+                    request.cfg._mmblcache = mmblcache
+
+                from MoinMoin.Page import Page
+
+                oldtext = ""
+                if rev > 0: # rev is the revision of the old page
+                    page = Page(request, editor.page_name, rev=rev)
+                    oldtext = page.get_raw_body()
+
+                newset = sets.ImmutableSet(newtext.splitlines(1))
+                oldset = sets.ImmutableSet(oldtext.splitlines(1))
+                difference = newset.difference(oldset)
+                addedtext = ''.join(difference) 
+                
+                for blacklist_re in request.cfg._mmblcache:
+                    match = blacklist_re.search(addedtext)
+                    if match:
+                        # Log error and raise SaveError, PageEditor
+                        # should handle this.
+                        _ = editor.request.getText
+                        msg = _('Sorry, can not save page because "%(content)s"'
+                                ' is not allowed in this wiki.') % {
+                            'content': match.group()
+                            }
+                        dprint(msg)
+                        raise editor.SaveError(msg)
+            request.clock.stop('antispam')
+            
+        # No problem to save if my base class agree
+        return Permissions.save(self, editor, newtext, rev, **kw)
+
+
+def main():
+    """ Fetch spammer patterns from MT blacklist and moinmaster and merge them.
+        A complete new list for moinmaster gets printed to stdout,
+        only the new entries are printed to stderr.
+    """
+    import urllib
+    mtbl = urllib.urlopen("http://www.jayallen.org/comment_spam/blacklist.txt").read()
+    mmbl = urllib.urlopen("http://moinmaster.wikiwikiweb.de/BadContent?action=raw").read()
+    mtbl = makelist(mtbl)
+    mmbl = makelist(mmbl)
+    print "#format plain"
+    print "#acl All:read"
+    newbl = []
+    for i in mtbl:
+        for j in mmbl:
+            match = re.search(j, i, re.I)
+            if match:
+                break
+        if not match and i not in mmbl:
+            print >>sys.stderr, "%s" % i
+            newbl.append(i)
+    bl = mmbl + newbl
+    bl.sort()
+    lasti = None
+    for i in bl:
+        if i != lasti:
+            print i
+            lasti = i
+
+if __name__ == '__main__':
+    main()
+
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/MoinMoin/security/autoadmin.py	Sat Jun 10 16:45:05 2006 +0200
@@ -0,0 +1,113 @@
+# -*- coding: iso-8859-1 -*-
+"""
+    MoinMoin - SecurityPolicy implementing auto admin rights for some users and some groups.
+    
+    AutoAdminGroup page contains users which automatically get admin rights
+    on their homepage and subpages of it. E.g. if ThomasWaldmann is in
+    AutoAdminGroup (or in a group contained in AutoAdminGroup), he gets
+    admin rights on pages ThomasWaldmann and ThomasWaldmann/*.
+
+    AutoAdminGroup page also contains groups which members automatically get
+    admin rights on the group's basename.
+    E.g. if SomeProject/AdminGroup is in AutoAdminGroup and ThomasWaldmann is
+    in SomeProject/AdminGroup, then ThomasWaldmann gets admin rights on pages
+    SomeProject and SomeProject/*.
+    
+    Further, it can autocreate the UserName/XxxxGroup (see grouppages var) when
+    a user save his homepage. Alternatively, this could be also done manually by
+    the user using *Template pages.
+
+    Usage (for wiki admin):
+     * Create an AutoAdminGroup page. If you don't know better, create an empty
+       page for starting.
+     * Enabling a home page for AutoAdmin: just add the user name to the
+       AutoAdminGroup page. After that, this user can create or change ACLs on
+       his homepage or subpages of it.
+     * Enabling another (project) page for AutoAdmin: add <PageName>/AdminGroup
+       to AutoAdminGroup. Also create that <PageName>/AdminGroup page and add
+       at least one user or one group to that page, enabling him or them to
+       create or change ACLs on <PageName> or subpages of it.
+     Those pages edited by wiki admin should be ACL protected with write access
+     limited to allowed people. They are used as source for some ACL
+     information and thus should be treated like the ACLs they get fed into.
+
+    Usage (for homepage owners):
+     * see if there is a HomepageTemplate with a prepared ACL line and some
+       other magic already on it. It is a good idea to have your homepage
+       read- and writeable for everybody as a means of open communication.
+       
+     * For creating personal (or private) subpages of your homepage, use the
+       ReadWritePageTemplate, ReadPageTemplate or PrivatePageTemplate.
+       They usually have some prepared ACL line on them, e.g.:
+       #acl @ME@/ReadWriteGroup:read,write @ME@/ReadGroup:read
+       That @ME@ from the template will be expanded to your name when saving,
+       thus using those 2 subpages (YourName/ReadWriteGroup and
+       YourName/ReadGroup) for allowing read/write or read-only access to
+       Now you only have to maintain 2 subpages (maybe they even have been
+       auto- created for you)
+     
+    Usage (for project people):
+     * see if there is some <ProjectName>Template with a prepared ACL line for
+       your project pages and use it for creating new subpages.
+       Use <ProjectName>/ReadWriteGroup and /ReadGroup etc. as you would do for
+       a homepage (see above).
+
+    @copyright: (c) Bastian Blank, Florian Festi, Thomas Waldmann
+    @license: GNU GPL, see COPYING for details.
+"""
+
+grouppage_autocreate = False # autocreate the group pages - alternatively use templates
+grouppages = ['AdminGroup', 'ReadGroup', 'ReadWriteGroup', ] # names of the subpages defining ACL groups
+
+from MoinMoin.security import Permissions
+from MoinMoin.Page import Page
+from MoinMoin.PageEditor import PageEditor
+
+class SecurityPolicy(Permissions):
+    """ Extend the default security policy with autoadmin feature """
+    
+    def admin(self, pagename):
+        try:
+            request = self.request
+            has_member = request.dicts.has_member
+            username = request.user.name
+            pagename = request.page.page_name
+            mainpage = pagename.split('/')[0]
+            if username == mainpage and has_member('AutoAdminGroup', username):
+                return True
+            groupname = "%s/AdminGroup" % mainpage
+            if has_member(groupname, username) and has_member('AutoAdminGroup', groupname):
+                return True
+        except AttributeError:
+            pass # when we get called from xmlrpc, there is no request.page
+        return Permissions.__getattr__(self, 'admin')(pagename)
+
+    def save(self, editor, newtext, rev, **kw):
+        request = self.request
+        has_member = request.dicts.has_member
+        username = request.user.name
+        pagename = editor.page_name
+
+        if grouppage_autocreate and username == pagename:
+            # create group pages when a user saves his own homepage
+            for page in grouppages:
+                grouppagename = "%s/%s" % (username, page)
+                grouppage = Page(request, grouppagename)
+                if not grouppage.exists():
+                    text = """\
+#acl %(username)s:read,write,delete,revert
+ * %(username)s
+""" % locals()
+                    editor = PageEditor(request, grouppagename)
+                    editor._write_file(text)
+
+        parts = pagename.split('/')
+        if len(parts) == 2:
+            mainpage, subpage = parts
+            if subpage in grouppages and not self.admin(pagename):
+                return False
+
+        # No problem to save if my base class agrees
+        return Permissions.save(self, editor, newtext, rev, **kw)
+
+
--- a/MoinMoin/server/standalone.py	Wed Jun 07 14:50:19 2006 +0200
+++ b/MoinMoin/server/standalone.py	Sat Jun 10 16:45:05 2006 +0200
@@ -403,7 +403,7 @@
                         self.request.getsockname()[1])
                 path = '/'
                 
-            self.requestline = 'ERROR: Redirecting to https://%s/%s' % (host, path)
+            self.requestline = 'ERROR: Redirecting to https://%s%s' % (host, path)
             self.request_version = 'HTTP/1.1'
             self.command = 'GET'
             self.send_response(301, 'Document Moved')
@@ -417,12 +417,12 @@
         def __init__(self, config):
             ThreadPoolServer.__init__(self, config)
             
-            cert = open(config.certificatePath).read()
+            cert = open(config.ssl_certificate).read()
             x509 = X509()
             x509.parse(cert)
             self.certChain = X509CertChain([x509])
             
-            priv = open(config.privateKeyPath).read()
+            priv = open(config.ssl_privkey).read()
             self.privateKey = parsePEMKey(priv, private=True)
             
             self.sessionCache = SessionCache()
@@ -430,24 +430,25 @@
         def finish_request(self, sock, client_address):
             # Peek into the packet, if it starts with GET or POS(T) then
             # redirect, otherwise let TLSLite handle the connection.
-            peek = sock.recv(3, socket.MSG_PEEK)
-            if peek.lower() == 'get' or peek.lower() == 'pos':
-                response = SecureRequestRedirect(sock, client_address, self)
-            tlsConnection = TLSConnection(sock)
-            if self.handshake(tlsConnection) == True:
-                self.RequestHandlerClass(tlsConnection, client_address, self)
+            peek = sock.recv(3, socket.MSG_PEEK).lower()
+            if peek == 'get' or peek == 'pos':
+                SecureRequestRedirect(sock, client_address, self)
+                return
+            tls_connection = TLSConnection(sock)
+            if self.handshake(tls_connection) == True:
+                self.RequestHandlerClass(tls_connection, client_address, self)
             else:
                 # This will probably fail because the TLSConnection has 
                 # already written SSL stuff to the socket. But not sure what
                 # else we should do.
-                response = SecureRequestRedirect(sock, client_address, self)
+                SecureRequestRedirect(sock, client_address, self)
                 
-        def handshake(self, tlsConnection):
+        def handshake(self, tls_connection):
             try:
-                tlsConnection.handshakeServer(certChain = self.certChain,
-                        privateKey = self.privateKey,
-                        sessionCache = self.sessionCache)
-                tlsConnection.ignoreAbruptClose = True
+                tls_connection.handshakeServer(certChain = self.certChain,
+                                               privateKey = self.privateKey,
+                                               sessionCache = self.sessionCache)
+                tls_connection.ignoreAbruptClose = True
                 return True
             except:
                 return False
--- a/MoinMoin/stats/hitcounts.py	Wed Jun 07 14:50:19 2006 +0200
+++ b/MoinMoin/stats/hitcounts.py	Sat Jun 10 16:45:05 2006 +0200
@@ -14,9 +14,9 @@
 
 _debug = 0
 
-from MoinMoin import caching, config, wikiutil
+from MoinMoin import caching, config, wikiutil, logfile
 from MoinMoin.Page import Page
-from MoinMoin.logfile import eventlog, logfile
+from MoinMoin.logfile import eventlog
 
 
 def linkto(pagename, request, params=''):
--- a/MoinMoin/stats/useragents.py	Wed Jun 07 14:50:19 2006 +0200
+++ b/MoinMoin/stats/useragents.py	Sat Jun 10 16:45:05 2006 +0200
@@ -13,9 +13,9 @@
 
 _debug = 0
 
-from MoinMoin import wikiutil, caching 
+from MoinMoin import wikiutil, caching, logfile 
 from MoinMoin.Page import Page
-from MoinMoin.logfile import eventlog, logfile
+from MoinMoin.logfile import eventlog
 
 
 def linkto(pagename, request, params=''):
--- a/MoinMoin/user.py	Wed Jun 07 14:50:19 2006 +0200
+++ b/MoinMoin/user.py	Sat Jun 10 16:45:05 2006 +0200
@@ -940,7 +940,7 @@
         return markup
 
     def mailAccountData(self, cleartext_passwd=None):
-        from MoinMoin.util import mail
+        from MoinMoin.mail import sendmail
         from MoinMoin.wikiutil import getSysPage
         _ = self._request.getText
 
@@ -980,7 +980,7 @@
 
         subject = _('[%(sitename)s] Your wiki account data',
                     formatted=False) % {'sitename': self._cfg.sitename or "Wiki"}
-        mailok, msg = mail.sendmail(self._request, [self.email], subject,
+        mailok, msg = sendmail.sendmail(self._request, [self.email], subject,
                                     text, mail_from=self._cfg.mail_from)
         return msg
 
--- a/MoinMoin/userform.py	Wed Jun 07 14:50:19 2006 +0200
+++ b/MoinMoin/userform.py	Sat Jun 10 16:45:05 2006 +0200
@@ -8,7 +8,7 @@
 
 import string, time, re
 from MoinMoin import user, util, wikiutil
-from MoinMoin.util import web, mail, timefuncs
+from MoinMoin.util import web, timefuncs
 from MoinMoin.widget import html
 
 _debug = 0
--- a/MoinMoin/util/ParserBase.py	Wed Jun 07 14:50:19 2006 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,270 +0,0 @@
-# -*- coding: iso-8859-1 -*-
-"""
-	MoinMoin - Base Source Parser
-
-    @copyright: 2002 by Taesu Pyo <bigflood@hitel.net>
-    @license: GNU GPL, see COPYING for details.
-
-    Docstrings and some refactoring by Oliver Graf <ograf@bitart.de>
-
-basic css:
-
-pre.codearea     { font-style: sans-serif; color: #000000; }
-
-pre.codearea span.ID       { color: #000000; }
-pre.codearea span.Char     { color: #004080; }
-pre.codearea span.Comment  { color: #808080; }
-pre.codearea span.Number   { color: #008080; font-weight: bold; }
-pre.codearea span.String   { color: #004080; }
-pre.codearea span.SPChar   { color: #0000C0; }
-pre.codearea span.ResWord  { color: #4040ff; font-weight: bold; }
-pre.codearea span.ConsWord { color: #008080; font-weight: bold; }
-
-"""
-
-import re, sys, sha
-from MoinMoin import config, wikiutil
-
-def parse_start_step(request, args):
-    """
-    Parses common Colorizer parameters start, step, numbers.
-    Uses L{wikiutil.parseAttributes} and sanitizes the results.
-
-    Start and step must be a non negative number and default to 1,
-    numbers might be on, off, or none and defaults to on. On or off
-    means that numbers are switchable via JavaScript (html formatter),
-    disabled means that numbers are disabled completely.
-
-    attrdict is returned as last element in the tuple, to enable the
-    calling parser to extract further arguments.
-
-    @param request: a request instance
-    @param args: the argument string
-
-    @returns: numbers, start, step, attrdict
-    """
-    nums, start, step = 1, 1, 1
-    attrs, msg = wikiutil.parseAttributes(request, args)
-    if not msg:
-        try:
-            start = int(attrs.get('start','"1"')[1:-1])
-        except ValueError:
-            pass
-        try:
-            step = int(attrs.get('step','"1"')[1:-1])
-        except ValueError:
-            pass
-        if attrs.get('numbers','"on"')[1:-1].lower() in ('off', 'false', 'no'):
-            nums = 0
-        elif attrs.get('numbers','"on"')[1:-1].lower() in ('none', 'disable'):
-            nums = -1
-    return nums, start, step, attrs
-
-class FormatTextBase:
-    pass
-
-class FormatText(FormatTextBase):
-    
-    def __init__(self, fmt):
-        self.fmt = fmt
-
-    def formatString(self, formatter, word):
-        return (formatter.code_token(1, self.fmt) +
-                formatter.text(word) +
-                formatter.code_token(0, self.fmt))
-
-class FormatTextID(FormatTextBase):
-    
-    def __init__(self, fmt, icase=0):
-        if not isinstance(fmt, FormatText):
-            self.def_fmt = FormatText(fmt)
-        else:
-            self.def_fmt = fmt
-        self._ignore_case = icase
-        self.fmt = {}
-
-    def addFormat(self, word, fmt):
-        if self._ignore_case:
-            word = word.lower()
-        self.fmt[word] = fmt
-        
-    def setDefaultFormat(self, fmt):
-        self.def_fmt = fmt
-        
-    def formatString(self, formatter, word):
-        if self._ignore_case:
-            sword = word.lower()
-        else:
-            sword = word
-        return self.fmt.get(sword,self.def_fmt).formatString(formatter, word)
-
-class FormattingRuleSingle:
-    
-    def __init__(self, name, str_re, icase=0):
-        self.name = name
-        self.str_re = str_re
-        
-    def getStartRe(self):
-        return self.str_re
-    
-    def getText(self, parser, hit):
-        return hit
-
-class FormattingRulePair:
-    
-    def __init__(self, name, str_begin, str_end, icase=0):
-        self.name = name
-        self.str_begin = str_begin
-        self.str_end = str_end
-        if icase:
-            self.end_re = re.compile(str_end, re.M|re.I)
-        else:
-            self.end_re = re.compile(str_end, re.M)
-        
-    def getStartRe(self):
-        return self.str_begin
-    
-    def getText(self, parser, hit):
-        match = self.end_re.search(parser.line, parser.lastpos)
-        if not match:
-            next_lastpos = len(parser.line)
-        else:
-            next_lastpos = match.end() + (match.end() == parser.lastpos)
-        r = parser.line[parser.lastpos:next_lastpos]
-        parser.lastpos = next_lastpos
-        return hit + r
-
-
-# ------------------------------------------------------------------------
-
-class ParserBase:
-
-    parsername = 'ParserBase'
-    
-    def __init__(self, raw, request, **kw):
-        self.raw = raw
-        self.request = request
-        self.show_nums, self.num_start, self.num_step, attrs = parse_start_step(request, kw.get('format_args',''))
-
-        self._ignore_case = 0
-        self._formatting_rules = []
-        self._formatting_rules_n2r = {}
-        self._formatting_rule_index = 0
-        self.rule_fmt = {}
-        self.line_count = len(raw.split('\n'))+1
-
-    def setupRules(self):
-        self.def_format = FormatText('Default')
-        self.ID_format = FormatTextID('ID', self._ignore_case)
-        self.addRuleFormat("ID",self.ID_format)
-        self.addRuleFormat("Operator")
-        self.addRuleFormat("Char")
-        self.addRuleFormat("Comment")
-        self.addRuleFormat("Number")
-        self.addRuleFormat("String")
-        self.addRuleFormat("SPChar")
-        self.addRuleFormat("ResWord")
-        self.addRuleFormat("ResWord2")
-        self.addRuleFormat("ConsWord")
-        self.addRuleFormat("Special")
-        self.addRuleFormat("Preprc")
-        self.addRuleFormat("Error")
-        self.reserved_word_format = FormatText('ResWord')
-        self.constant_word_format = FormatText('ConsWord')
-
-    def addRule(self, name, str_re):
-        self._formatting_rule_index += 1
-        n = "%s_%s" % (name, self._formatting_rule_index)
-        f = FormattingRuleSingle(name, str_re, self._ignore_case)
-        self._formatting_rules.append((n,f))
-        self._formatting_rules_n2r[n] = f
-
-    def addRulePair(self, name, start_re, end_re):
-        self._formatting_rule_index += 1
-        n = "%s_%s" % (name,self._formatting_rule_index)
-        f = FormattingRulePair(name, start_re, end_re, self._ignore_case)
-        self._formatting_rules.append((n,f))
-        self._formatting_rules_n2r[n] = f
-
-    def addWords(self, words, fmt):
-        if not isinstance(fmt,FormatTextBase):
-            fmt = FormatText(fmt)
-        for w in words:
-            self.ID_format.addFormat(w, fmt)
-
-    def addReserved(self, words):
-        self.addWords(words, self.reserved_word_format)
-
-    def addConstant(self, words):
-        self.addWords(words, self.constant_word_format)
-        
-    def addRuleFormat(self, name, fmt=None):
-        if fmt is None:
-            fmt = FormatText(name)
-        self.rule_fmt[name] = fmt
-
-    def format(self, formatter, form = None):
-        """ Send the text.
-        """
-
-        self.setupRules()
-
-        l = []
-        for n,f in self._formatting_rules:
-            l.append("(?P<%s>%s)" % (n,f.getStartRe()))
-        
-        if self._ignore_case:
-            scan_re = re.compile("|".join(l),re.M|re.I)
-        else:
-            scan_re = re.compile("|".join(l),re.M)
-
-        self.lastpos = 0
-        self.line = self.raw
-
-        self._code_id = sha.new(self.raw.encode(config.charset)).hexdigest()
-        self.request.write(formatter.code_area(1, self._code_id, self.parsername, self.show_nums, self.num_start, self.num_step))
-
-        self.request.write(formatter.code_line(1))
-            #formatter, len('%d' % (self.line_count,)))
-        
-        match = scan_re.search(self.line)
-
-        while match and self.lastpos < len(self.line):
-            # add the match we found
-            self.write_normal_text(formatter,
-                                   self.line[self.lastpos:match.start()])
-            self.lastpos = match.end() + (match.end() == self.lastpos)
-
-            self.write_match(formatter, match)
-
-            # search for the next one
-            match = scan_re.search(self.line, self.lastpos)
-
-        self.write_normal_text(formatter, self.line[self.lastpos:])
-
-        self.request.write(formatter.code_area(0, self._code_id))
-
-
-    def write_normal_text(self, formatter, text):
-        first = 1
-        for line in text.expandtabs(4).split('\n'):
-            if not first:
-                self.request.write(formatter.code_line(1))
-            else:
-                first = 0
-            self.request.write(formatter.text(line))
-
-    def write_match(self, formatter, match):
-        for n, hit in match.groupdict().items():
-            if not hit: continue
-            r = self._formatting_rules_n2r[n]
-            s = r.getText(self, hit)
-            c = self.rule_fmt.get(r.name,None)
-            if not c: c = self.def_format
-            first = 1
-            for line in s.expandtabs(4).split('\n'):
-                if not first:
-                    self.request.write(formatter.code_line(1))
-                else:
-                    first = 0
-                self.request.write(c.formatString(formatter, line))
--- a/MoinMoin/util/antispam.py	Wed Jun 07 14:50:19 2006 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,245 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: iso-8859-1 -*-
-"""
-    This implements a global (and a local) blacklist against wiki spammers.
-
-    If started from commandline, it prints a merged list (moinmaster + MT) on
-    stdout, and what it got additionally from MT on stderr.
-    
-    @copyright: 2005 by Thomas Waldmann
-    @license: GNU GPL, see COPYING for details
-"""
-
-# give some log entries to stderr
-debug = 1
-
-import re, sys, time, datetime
-import sets
-
-if __name__ == '__main__':
-    sys.path.insert(0, "../..")
-
-from MoinMoin.security import Permissions
-from MoinMoin import caching, wikiutil
-
-# Errors ---------------------------------------------------------------
-
-class Error(Exception):
-    """Base class for antispam errors."""
-
-    def __str__(self):
-        return repr(self)
-
-class WikirpcError(Error):
-    """ Raised when we get xmlrpclib.Fault """
-
-    def __init__(self, msg, fault):
-        """ Init with msg and xmlrpclib.Fault dict """
-        self.msg = msg
-        self.fault = fault
-
-    def __str__(self):
-        """ Format the using description and data from the fault """
-        return self.msg + ": [%(faultCode)s]  %(faultString)s" % self.fault
-
-
-# Functions ------------------------------------------------------------
-
-def dprint(s):
-    if debug:
-        if isinstance(s, unicode):
-            s = s.encode('utf-8')
-        sys.stderr.write('%s\n' % s)
-
-
-def makelist(text):
-    """ Split text into lines, strip them, skip # comments """
-    lines = text.splitlines()
-    list = []
-    for line in lines:
-        line = line.split(' # ', 1)[0] # rest of line comment
-        line = line.strip()
-        if line and not line.startswith('#'):
-            list.append(line)
-    return list
-
-
-def getblacklist(request, pagename, do_update):
-    """ Get blacklist, possibly downloading new copy
-
-    @param request: current request (request instance)
-    @param pagename: bad content page name (unicode)
-    @rtype: list
-    @return: list of blacklisted regular expressions
-    """
-    from MoinMoin.PageEditor import PageEditor
-    p = PageEditor(request, pagename, uid_override="Antispam subsystem")
-    invalidate_cache = False
-    if do_update:
-        tooold = time.time() - 3600
-        mymtime = wikiutil.version2timestamp(p.mtime_usecs())
-        failure = caching.CacheEntry(request, "antispam", "failure", scope='wiki')
-        fail_time = failure.mtime() # only update if no failure in last hour
-        if (mymtime < tooold) and (fail_time < tooold):
-            dprint("%d *BadContent too old, have to check for an update..." % tooold)
-            import xmlrpclib
-            import socket
-
-            timeout = 15 # time out for reaching the master server via xmlrpc
-            old_timeout = socket.getdefaulttimeout()
-            socket.setdefaulttimeout(timeout)
-            
-            # For production code
-            uri = "http://moinmaster.wikiwikiweb.de:8000/?action=xmlrpc2"
-            # For testing (use your test wiki as BadContent source)
-            ##uri = "http://localhost/main/?action=xmlrpc2")
-            master = xmlrpclib.ServerProxy(uri)
-
-            try:
-                # Get BadContent info
-                master.putClientInfo('ANTISPAM-CHECK',
-                                     request.http_host+request.script_name)
-                response = master.getPageInfo(pagename)
-
-                # It seems that response is always a dict
-                if isinstance(response, dict) and 'faultCode' in response:
-                    raise WikirpcError("failed to get BadContent information",
-                                       response)
-                
-                # Compare date against local BadContent copy
-                masterdate = response['lastModified']
-
-                if isinstance(masterdate, datetime.datetime): 
-                    # for python 2.5a
-                    mydate = datetime.datetime(*tuple(time.gmtime(mymtime))[0:6])
-                else:
-                    # for python <= 2.4.x
-                    mydate = xmlrpclib.DateTime(tuple(time.gmtime(mymtime)))
-                                                    
-                dprint("master: %s mine: %s" % (masterdate, mydate))
-                if mydate < masterdate:
-                    # Get new copy and save
-                    dprint("Fetching page from master...")
-                    master.putClientInfo('ANTISPAM-FETCH',
-                                         request.http_host + request.script_name)
-                    response = master.getPage(pagename)
-                    if isinstance(response, dict) and 'faultCode' in response:
-                        raise WikirpcError("failed to get BadContent data",
-                                           response)
-                    p._write_file(response)
-
-                invalidate_cache = True
-
-            except (socket.error, xmlrpclib.ProtocolError), err:
-                # Log the error
-                # TODO: check if this does not fill the logs!
-                dprint('Timeout / socket / protocol error when accessing'
-                       ' moinmaster: %s' % str(err))
-                # update cache to wait before the next try
-                failure.update("")
-
-            except Error, err:
-                # In case of Error, we log the error and use the local
-                # BadContent copy.
-                dprint(str(err))
-
-            # set back socket timeout
-            socket.setdefaulttimeout(old_timeout)
-                
-    blacklist = p.get_raw_body()
-    return invalidate_cache, makelist(blacklist)
-
-
-class SecurityPolicy(Permissions):
-    """ Extend the default security policy with antispam feature """
-    
-    def save(self, editor, newtext, rev, **kw):
-        BLACKLISTPAGES = ["BadContent", "LocalBadContent"]
-        if not editor.page_name in BLACKLISTPAGES:
-            request = editor.request
-
-            # Start timing of antispam operation
-            request.clock.start('antispam')
-            
-            blacklist = []
-            invalidate_cache = not getattr(request.cfg, "_mmblcache", None)
-            for pn in BLACKLISTPAGES:
-                do_update = (pn != "LocalBadContent")
-                invalidate_cache_necessary, blacklist_entries = getblacklist(request, pn, do_update)
-                blacklist += blacklist_entries
-                invalidate_cache |= invalidate_cache_necessary
-
-            if blacklist:
-                if invalidate_cache:
-                    mmblcache = []
-                    for blacklist_re in blacklist:
-                        try:
-                            mmblcache.append(re.compile(blacklist_re, re.I))
-                        except re.error, err:
-                            dprint("Error in regex '%s': %s. Please check the pages %s." % (blacklist_re, str(err), ', '.join(BLACKLISTPAGES)))
-                            continue
-                    request.cfg._mmblcache = mmblcache
-
-                from MoinMoin.Page import Page
-
-                oldtext = ""
-                if rev > 0: # rev is the revision of the old page
-                    page = Page(request, editor.page_name, rev=rev)
-                    oldtext = page.get_raw_body()
-
-                newset = sets.ImmutableSet(newtext.splitlines(1))
-                oldset = sets.ImmutableSet(oldtext.splitlines(1))
-                difference = newset.difference(oldset)
-                addedtext = ''.join(difference) 
-                
-                for blacklist_re in request.cfg._mmblcache:
-                    match = blacklist_re.search(addedtext)
-                    if match:
-                        # Log error and raise SaveError, PageEditor
-                        # should handle this.
-                        _ = editor.request.getText
-                        msg = _('Sorry, can not save page because "%(content)s"'
-                                ' is not allowed in this wiki.') % {
-                            'content': match.group()
-                            }
-                        dprint(msg)
-                        raise editor.SaveError(msg)
-            request.clock.stop('antispam')
-            
-        # No problem to save if my base class agree
-        return Permissions.save(self, editor, newtext, rev, **kw)
-
-
-def main():
-    """ Fetch spammer patterns from MT blacklist and moinmaster and merge them.
-        A complete new list for moinmaster gets printed to stdout,
-        only the new entries are printed to stderr.
-    """
-    import urllib
-    mtbl = urllib.urlopen("http://www.jayallen.org/comment_spam/blacklist.txt").read()
-    mmbl = urllib.urlopen("http://moinmaster.wikiwikiweb.de/BadContent?action=raw").read()
-    mtbl = makelist(mtbl)
-    mmbl = makelist(mmbl)
-    print "#format plain"
-    print "#acl All:read"
-    newbl = []
-    for i in mtbl:
-        for j in mmbl:
-            match = re.search(j, i, re.I)
-            if match:
-                break
-        if not match and i not in mmbl:
-            print >>sys.stderr, "%s" % i
-            newbl.append(i)
-    bl = mmbl + newbl
-    bl.sort()
-    lasti = None
-    for i in bl:
-        if i != lasti:
-            print i
-            lasti = i
-
-if __name__ == '__main__':
-    main()
-
-
--- a/MoinMoin/util/autoadmin.py	Wed Jun 07 14:50:19 2006 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,113 +0,0 @@
-# -*- coding: iso-8859-1 -*-
-"""
-    MoinMoin - SecurityPolicy implementing auto admin rights for some users and some groups.
-    
-    AutoAdminGroup page contains users which automatically get admin rights
-    on their homepage and subpages of it. E.g. if ThomasWaldmann is in
-    AutoAdminGroup (or in a group contained in AutoAdminGroup), he gets
-    admin rights on pages ThomasWaldmann and ThomasWaldmann/*.
-
-    AutoAdminGroup page also contains groups which members automatically get
-    admin rights on the group's basename.
-    E.g. if SomeProject/AdminGroup is in AutoAdminGroup and ThomasWaldmann is
-    in SomeProject/AdminGroup, then ThomasWaldmann gets admin rights on pages
-    SomeProject and SomeProject/*.
-    
-    Further, it can autocreate the UserName/XxxxGroup (see grouppages var) when
-    a user save his homepage. Alternatively, this could be also done manually by
-    the user using *Template pages.
-
-    Usage (for wiki admin):
-     * Create an AutoAdminGroup page. If you don't know better, create an empty
-       page for starting.
-     * Enabling a home page for AutoAdmin: just add the user name to the
-       AutoAdminGroup page. After that, this user can create or change ACLs on
-       his homepage or subpages of it.
-     * Enabling another (project) page for AutoAdmin: add <PageName>/AdminGroup
-       to AutoAdminGroup. Also create that <PageName>/AdminGroup page and add
-       at least one user or one group to that page, enabling him or them to
-       create or change ACLs on <PageName> or subpages of it.
-     Those pages edited by wiki admin should be ACL protected with write access
-     limited to allowed people. They are used as source for some ACL
-     information and thus should be treated like the ACLs they get fed into.
-
-    Usage (for homepage owners):
-     * see if there is a HomepageTemplate with a prepared ACL line and some
-       other magic already on it. It is a good idea to have your homepage
-       read- and writeable for everybody as a means of open communication.
-       
-     * For creating personal (or private) subpages of your homepage, use the
-       ReadWritePageTemplate, ReadPageTemplate or PrivatePageTemplate.
-       They usually have some prepared ACL line on them, e.g.:
-       #acl @ME@/ReadWriteGroup:read,write @ME@/ReadGroup:read
-       That @ME@ from the template will be expanded to your name when saving,
-       thus using those 2 subpages (YourName/ReadWriteGroup and
-       YourName/ReadGroup) for allowing read/write or read-only access to
-       Now you only have to maintain 2 subpages (maybe they even have been
-       auto- created for you)
-     
-    Usage (for project people):
-     * see if there is some <ProjectName>Template with a prepared ACL line for
-       your project pages and use it for creating new subpages.
-       Use <ProjectName>/ReadWriteGroup and /ReadGroup etc. as you would do for
-       a homepage (see above).
-
-    @copyright: (c) Bastian Blank, Florian Festi, Thomas Waldmann
-    @license: GNU GPL, see COPYING for details.
-"""
-
-grouppage_autocreate = False # autocreate the group pages - alternatively use templates
-grouppages = ['AdminGroup', 'ReadGroup', 'ReadWriteGroup', ] # names of the subpages defining ACL groups
-
-from MoinMoin.security import Permissions
-from MoinMoin.Page import Page
-from MoinMoin.PageEditor import PageEditor
-
-class SecurityPolicy(Permissions):
-    """ Extend the default security policy with autoadmin feature """
-    
-    def admin(self, pagename):
-        try:
-            request = self.request
-            has_member = request.dicts.has_member
-            username = request.user.name
-            pagename = request.page.page_name
-            mainpage = pagename.split('/')[0]
-            if username == mainpage and has_member('AutoAdminGroup', username):
-                return True
-            groupname = "%s/AdminGroup" % mainpage
-            if has_member(groupname, username) and has_member('AutoAdminGroup', groupname):
-                return True
-        except AttributeError:
-            pass # when we get called from xmlrpc, there is no request.page
-        return Permissions.__getattr__(self, 'admin')(pagename)
-
-    def save(self, editor, newtext, rev, **kw):
-        request = self.request
-        has_member = request.dicts.has_member
-        username = request.user.name
-        pagename = editor.page_name
-
-        if grouppage_autocreate and username == pagename:
-            # create group pages when a user saves his own homepage
-            for page in grouppages:
-                grouppagename = "%s/%s" % (username, page)
-                grouppage = Page(request, grouppagename)
-                if not grouppage.exists():
-                    text = """\
-#acl %(username)s:read,write,delete,revert
- * %(username)s
-""" % locals()
-                    editor = PageEditor(request, grouppagename)
-                    editor._write_file(text)
-
-        parts = pagename.split('/')
-        if len(parts) == 2:
-            mainpage, subpage = parts
-            if subpage in grouppages and not self.admin(pagename):
-                return False
-
-        # No problem to save if my base class agrees
-        return Permissions.save(self, editor, newtext, rev, **kw)
-
-
--- a/MoinMoin/util/mail.py	Wed Jun 07 14:50:19 2006 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,173 +0,0 @@
-# -*- coding: iso-8859-1 -*-
-"""
-    MoinMoin - email helper functions
-
-    @copyright: 2003 by Jürgen Hermann <jh@web.de>
-    @license: GNU GPL, see COPYING for details.
-"""
-
-import os, re
-from email.Header import Header
-from MoinMoin import config
-
-_transdict = {"AT": "@", "DOT": ".", "DASH": "-"}
-
-
-def encodeAddress(address, charset):
-    """ Encode email address to enable non ascii names 
-    
-    e.g. '"Jürgen Hermann" <jh@web.de>'. According to the RFC, the name
-    part should be encoded, the address should not.
-    
-    @param address: email address, posibly using '"name" <address>' format
-    @type address: unicode
-    @param charset: sepcifying both the charset and the encoding, e.g
-        quoted printble or base64.
-    @type charset: email.Charset.Charset instance
-    @rtype: string
-    @return: encoded address
-    """   
-    composite = re.compile(r'(?P<phrase>.+)(?P<angle_addr>\<.*\>)', 
-                           re.UNICODE)
-    match = composite.match(address)
-    if match:
-        phrase = match.group('phrase').encode(config.charset)
-        phrase = str(Header(phrase, charset))
-        angle_addr = match.group('angle_addr').encode(config.charset)       
-        return phrase + angle_addr
-    else:
-        return address.encode(config.charset)
-
-
-def sendmail(request, to, subject, text, **kw):
-    """ Create and send a text/plain message
-        
-    Return a tuple of success or error indicator and message.
-    
-    @param request: the request object
-    @param to: recipients (list)
-    @param subject: subject of email (unicode)
-    @param text: email body text (unicode)
-    @keyword mail_from: override default mail_from
-    @type mail_from: unicode
-    @rtype: tuple
-    @return: (is_ok, Description of error or OK message)
-    """
-    import smtplib, socket
-    from email.Message import Message
-    from email.Charset import Charset, QP
-    from email.Utils import formatdate, make_msgid
-
-    _ = request.getText
-    cfg = request.cfg    
-    mail_from = kw.get('mail_from', '') or cfg.mail_from
-    subject = subject.encode(config.charset)    
-
-    # Create a text/plain body using CRLF (see RFC2822)
-    text = text.replace(u'\n', u'\r\n')
-    text = text.encode(config.charset)
-
-    # Create a message using config.charset and quoted printable
-    # encoding, which should be supported better by mail clients.
-    # TODO: check if its really works better for major mail clients
-    msg = Message()
-    charset = Charset(config.charset)
-    charset.header_encoding = QP
-    charset.body_encoding = QP
-    msg.set_charset(charset)    
-    msg.set_payload(charset.body_encode(text))
-    
-    # Create message headers
-    # Don't expose emails addreses of the other subscribers, instead we
-    # use the same mail_from, e.g. u"Jürgen Wiki <noreply@mywiki.org>"
-    address = encodeAddress(mail_from, charset) 
-    msg['From'] = address
-    msg['To'] = address
-    msg['Date'] = formatdate()
-    msg['Message-ID'] = make_msgid()
-    msg['Subject'] = Header(subject, charset)
-    
-    if cfg.mail_sendmail:
-        # Set the BCC.  This will be stripped later by sendmail.
-        msg['BCC'] = ','.join(to)
-        # Set Return-Path so that it isn't set (generally incorrectly) for us.
-        msg['Return-Path'] = address
-
-    # Send the message
-    if not cfg.mail_sendmail:
-        try:
-            host, port = (cfg.mail_smarthost + ':25').split(':')[:2]
-            server = smtplib.SMTP(host, int(port))
-            try:
-                #server.set_debuglevel(1)
-                if cfg.mail_login:
-                    user, pwd = cfg.mail_login.split()
-                    try: # try to do tls
-                        server.ehlo()
-                        if server.has_extn('starttls'):
-                            server.starttls()
-                            server.ehlo()
-                    except:
-                        pass
-                    server.login(user, pwd)
-                server.sendmail(mail_from, to, msg.as_string())
-            finally:
-                try:
-                    server.quit()
-                except AttributeError:
-                    # in case the connection failed, SMTP has no "sock" attribute
-                    pass
-        except smtplib.SMTPException, e:
-            return (0, str(e))
-        except (os.error, socket.error), e:
-            return (0, _("Connection to mailserver '%(server)s' failed: %(reason)s") % {
-                'server': cfg.mail_smarthost, 
-                'reason': str(e)
-            })
-    else:
-        try:
-            sendmailp = os.popen(cfg.mail_sendmail, "w") 
-            # msg contains everything we need, so this is a simple write
-            sendmailp.write(msg.as_string())
-            sendmail_status = sendmailp.close()
-            if sendmail_status:
-                return (0, str(sendmail_status))
-        except:
-            return (0, _("Mail not sent"))
-
-    return (1, _("Mail sent OK"))
-
-
-def decodeSpamSafeEmail(address):
-    """ Decode obfuscated email address to standard email address
-
-    Decode a spam-safe email address in `address` by applying the
-    following rules:
-    
-    Known all-uppercase words and their translation:
-        "DOT"   -> "."
-        "AT"    -> "@"
-        "DASH"  -> "-"
-
-    Any unknown all-uppercase words simply get stripped.
-    Use that to make it even harder for spam bots!
-
-    Blanks (spaces) simply get stripped.
-    
-    @param address: obfuscated email address string
-    @rtype: string
-    @return: decoded email address
-    """
-    email = []
-
-    # words are separated by blanks
-    for word in address.split():
-        # is it all-uppercase?
-        if word.isalpha() and word == word.upper():
-            # strip unknown CAPS words
-            word = _transdict.get(word, '')
-        email.append(word)
-
-    # return concatenated parts
-    return ''.join(email)
-
--- a/MoinMoin/util/sessionParser.py	Wed Jun 07 14:50:19 2006 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,143 +0,0 @@
-"""
-    MoinMoin - Parsing of PHP session files
-
-    @copyright: 2005 by MoinMoin:AlexanderSchremmer
-        - Thanks to Spreadshirt
-    @license: GNU GPL, see COPYING for details.
-"""
-
-#Known minor bugs/questions/ideas:
-#How does object demarshalling work?
-#The order of the python dictionaries is not stable compared to the PHP arrays
-#The loader does not check the owner of the files, so be aware of faked session
-#files.
-
-import os
-from MoinMoin import wikiutil
-
-s_prefix = "sess_"
-s_path = "/tmp"
-
-class UnknownObject(object):
-    """ Used in the return value if the input data could not be parsed. """
-    def __init__(self, pos):
-        self.pos = pos
-
-    def __repr__(self):
-        return "<Unknown object at pos %i>" % self.pos
-
-def transformList(items):
-    """ Transforms a list [1, 2, 3, 4, ...] into a
-        [(1, 2), (3, 4), ...] generator. """
-    for i in xrange(0, len(items), 2):
-        yield (items[i], items[i+1])
-    raise StopIteration
-
-def parseValue(string, start=0):
-    """ Parses the inner structure. """
-    #print "Parsing %r" % (string[start:], )
-
-    val_type = string[start]
-    header_end = string.find(':', 3+start)
-    if header_end != -1:
-        first_data = string[start+2:header_end]
-    else:
-        first_data = None
-    
-    #print "Saw type %r, first_data is %r." % (val_type, first_data)
-    if val_type == 'a': # array (in Python rather a mixture of a list and a dict)
-        i = 0
-        items = []
-        
-        current_pos = header_end+2
-        data = string
-        while i != (int(first_data) * 2):
-            item, current_pos = parseValue(data, current_pos)
-            items.append(item)
-            i += 1
-            current_pos += 1
-        
-        t_list = list(transformList(items))
-        try:
-            result = dict(t_list) # note that dict does not retain the order
-        except TypeError:
-            result = list(t_list)
-            #print "Warning, could not convert to dict: %r" %  (result, )
-        return result, current_pos
-    
-    if val_type == 's': # string
-        current_pos = header_end+2
-        end = current_pos + int(first_data)
-        data = string[current_pos:end]
-        current_pos = end+1
-        if data.startswith("a:"): #Sometimes, arrays are marshalled as strings.
-            try:
-                data = parseValue(data, 0)[0]
-            except ValueError: #Hmm, wrongly guessed. Just an ordinary string
-                pass
-        return data, current_pos
-
-    if val_type in ('i', 'b'): # integer or boolean
-        current_pos = start+2
-        str_buffer = ""
-        while current_pos != len(string):
-            cur_char = string[current_pos]
-            if cur_char.isdigit() or cur_char == "-":
-                str_buffer += cur_char
-            else:
-                cast = (val_type == 'i') and int or (lambda x: bool(int(x)))
-                return cast(str_buffer), current_pos
-            current_pos += 1
-
-    if val_type == "N": # Null, called None in Python
-        return None, start+1
-        
-    return UnknownObject(start), start+1
-
-def parseSession(boxed):
-    """ Parses the outer structure that is similar to a dict. """
-    current_pos = 0
-    session_dict = {}
-    while current_pos < len(boxed):
-        name_end = boxed.find("|", current_pos)
-        name = boxed[current_pos:name_end]
-        current_pos = name_end+1
-        data, current_pos = parseValue(boxed, current_pos)
-        current_pos += 1
-        session_dict[name] = data
-
-    return session_dict
-
-def loadSession(key, path=s_path, prefix=s_prefix):
-    """ Loads a particular session from the directory. The key needs to be the
-        session id. """
-    key = key.lower()
-    filename = os.path.join(path, prefix + wikiutil.taintfilename(key))
-
-    try:
-        f = open(filename, "rb")
-    except IOError, e:
-        if e.errno == 2:
-            return None # session does not exist
-        else:
-            raise
-
-    blob = f.read()
-    f.close()
-    return parseSession(blob)
-
-def listSessions(path=s_path, prefix=s_prefix):
-    """ Lists all sessions in a particular directory. """
-    return [os.path.basename(x).replace(s_prefix, '') for x in os.listdir(s_path)
-            if x.startswith(s_prefix)]
-
-if __name__ == '__main__':
-    # testing code
-    import time
-    a=time.clock()
-    
-    #print s
-    p_s = loadSession("...")
-    import pprint; pprint.pprint(p_s)
-    print time.clock() - a
-    print listSessions()
--- a/MoinMoin/wikiacl.py	Wed Jun 07 14:50:19 2006 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,347 +0,0 @@
-# -*- coding: iso-8859-1 -*-
-"""
-    MoinMoin Access Control Lists
-
-    @copyright: 2003 by Thomas Waldmann, http://linuxwiki.de/ThomasWaldmann
-    @copyright: 2003 by Gustavo Niemeyer, http://moin.conectiva.com.br/GustavoNiemeyer
-    @license: GNU GPL, see COPYING for details.
-"""
-
-import re
-from MoinMoin import user
-
-class AccessControlList:
-    ''' Access Control List
-
-    Control who may do what on or with a wiki page.
-
-    Syntax of an ACL string:
-    
-        [+|-]User[,User,...]:[right[,right,...]] [[+|-]SomeGroup:...] ...
-        ... [[+|-]Known:...] [[+|-]All:...]
-
-        "User" is a user name and triggers only if the user matches. Up
-        to version 1.2 only WikiNames were supported, as of version 1.3,
-        any name can be used in acl lines, including name with spaces
-        using esoteric languages.
-
-        "SomeGroup" is a page name matching cfg.page_group_regex with
-         some lines in the form " * Member", defining the group members.
-
-        "Known" is a group containing all valid / known users.
-
-        "All" is a group containing all users (Known and Anonymous users).
-
-        "right" may be an arbitrary word like read, write, delete, admin.
-        Only words in cfg.acl_validrights are accepted, others are
-        ignored. It is allowed to specify no rights, which means that no
-        rights are given.
-
-    How ACL is processed
-
-        When some user is trying to access some ACL-protected resource,
-        the ACLs will be processed in the order they are found. The first
-        matching ACL will tell if the user has access to that resource
-        or not.
-
-        For example, the following ACL tells that SomeUser is able to
-        read and write the resources protected by that ACL, while any
-        member of SomeGroup (besides SomeUser, if part of that group)
-        may also admin that, and every other user is able to read it.
-
-            SomeUser:read,write SomeGroup:read,write,admin All:read
-
-        In this example, SomeUser can read and write but can not admin,
-        revert or delete pages. Rights that are NOT specified on the
-        right list are automatically set to NO.
-
-    Using Prefixes
-        
-        To make the system more flexible, there are also two modifiers:
-        the prefixes "+" and "-". 
-
-            +SomeUser:read -OtherUser:write
-
-        The acl line above will grant SomeUser read right, and OtherUser
-        write right, but will NOT block automatically all other rights
-        for these users. For example, if SomeUser ask to write, the
-        above acl line does not define if he can or can not write. He
-        will be able to write if acl_rights_before or acl_rights_after
-        allow this (see configuration options).
-        
-        Using prefixes, this acl line:
-        
-            SomeUser:read,write SomeGroup:read,write,admin All:read
-
-        Can be written as:
-        
-            -SomeUser:admin SomeGroup:read,write,admin All:read
-
-        Or even:
-
-            +All:read -SomeUser:admin SomeGroup:read,write,admin
-
-        Notice that you probably will not want to use the second and
-        third examples in ACL entries of some page. They are very
-        useful on the moin configuration entries though.
-
-   Configuration options
-   
-       cfg.acl_rights_default
-           It is is ONLY used when no other ACLs are given.
-           Default: "Known:read,write,delete All:read,write",
-
-       cfg.acl_rights_before
-           When the page has ACL entries, this will be inserted BEFORE
-           any page entries.
-           Default: ""
-
-       cfg.acl_rights_after
-           When the page has ACL entries, this will be inserted AFTER
-           any page entries.
-           Default: ""
-       
-       cfg.acl_rights_valid
-           These are the acceptable (known) rights (and the place to
-           extend, if necessary).
-           Default: ["read", "write", "delete", "admin"]
-    '''
-
-    special_users = ["All", "Known", "Trusted"] # order is important
-
-    def __init__(self, request, lines=[]):
-        """Initialize an ACL, starting from <nothing>.
-        """
-        self.setLines(request.cfg, lines)
-
-    def setLines(self, cfg, lines=[]):
-        self.clean()
-        self.addBefore(cfg)
-        if not lines:
-            self.addDefault(cfg)
-        else:
-            for line in lines:
-                self.addLine(cfg, line)
-        self.addAfter(cfg)
-
-    def clean(self):
-        self.acl = [] # [ ('User', {"read": 0, ...}), ... ]
-        self.acl_lines = []
-        self._is_group = {}
-
-    def addBefore(self, cfg):
-        self.addLine(cfg, cfg.acl_rights_before, remember=0)
-    def addDefault(self, cfg):
-        self.addLine(cfg, cfg.acl_rights_default, remember=0)
-    def addAfter(self, cfg):
-        self.addLine(cfg, cfg.acl_rights_after, remember=0)
-
-    def addLine(self, cfg, aclstring, remember=1):
-        """ Add another ACL line
-
-        This can be used in multiple subsequent calls to process longer
-        lists.
-
-        @param cfg: current config
-        @param aclstring: acl string from page or cfg
-        @param remember: should add the line to self.acl_lines
-        """
-        group_re = re.compile(cfg.page_group_regex)
-
-        # Remember lines
-        if remember:
-            self.acl_lines.append(aclstring)
-
-        # Iterate over entries and rights, parsed by acl string iterator
-        acliter = ACLStringIterator(cfg.acl_rights_valid, aclstring)
-        for modifier, entries, rights in acliter:
-            if entries == ['Default']:
-                self.addDefault(cfg)
-                continue
-            
-            for entry in entries:
-                if group_re.search(entry):
-                    self._is_group[entry] = 1
-                rightsdict = {}
-                if modifier:
-                    # Only user rights are added to the right dict.
-                    # + add rights with value of 1
-                    # - add right with value of 0
-                    for right in rights:
-                        rightsdict[right] = (modifier == '+')    
-                else:
-                    # All rights from acl_rights_valid are added to the
-                    # dict, user rights with value of 1, and other with
-                    # value of 0
-                    for right in cfg.acl_rights_valid:
-                        rightsdict[right] = (right in rights)
-                self.acl.append((entry, rightsdict))
-
-    def may(self, request, name, dowhat):
-        """May <name> <dowhat>?
-           Returns boolean answer.
-        """
-        is_group_member = request.dicts.has_member
-
-        allowed = None
-        for entry, rightsdict in self.acl:
-            if entry in self.special_users:
-                handler = getattr(self, "_special_"+entry, None)
-                allowed = handler(request, name, dowhat, rightsdict)
-            elif self._is_group.get(entry):
-                if is_group_member(entry, name):
-                    allowed = rightsdict.get(dowhat)
-                else:
-                    for special in self.special_users:
-                        if is_group_member(entry, special):
-                            handler = getattr(self, "_special_"+ special, None)
-                            allowed = handler(request, name,
-                                              dowhat, rightsdict)
-                            break # order of self.special_users is important
-            elif entry == name:
-                allowed = rightsdict.get(dowhat)
-            if allowed is not None:
-                return allowed
-        return 0
-
-    def getString(self, b='#acl ', e='\n'):
-        """print the acl strings we were fed with"""
-        return ''.join(["%s%s%s" % (b,l,e) for l in self.acl_lines])
-
-    def _special_All(self, request, name, dowhat, rightsdict):
-        return rightsdict.get(dowhat)
-
-    def _special_Known(self, request, name, dowhat, rightsdict):
-        """ check if user <name> is known to us,
-            that means that there is a valid user account present.
-            works for subscription emails.
-        """
-        if user.getUserId(request, name): # is a user with this name known?
-            return rightsdict.get(dowhat)
-        return None
-
-    def _special_Trusted(self, request, name, dowhat, rightsdict):
-        """ check if user <name> is known AND even has logged in using a password.
-            does not work for subsription emails that should be sent to <user>,
-            as he is not logged in in that case.
-        """
-        if request.user.trusted and name == request.user.name:
-            return rightsdict.get(dowhat)
-        return None
-
-    def __eq__(self, other):
-        return self.acl_lines == other.acl_lines
-    def __ne__(self, other):
-        return self.acl_lines != other.acl_lines
-
-
-class ACLStringIterator:
-    """ Iterator for acl string
-
-    Parse acl string and return the next entry on each call to
-    next. Implement the Iterator protocol.
-
-    Usage:
-        iter = ACLStringIterator(cfg.acl_rights_valid, 'user name:right')
-        for modifier, entries, rights in iter:
-            # process data
-    """
-    
-    def __init__(self, rights, aclstring):
-        """ Initialize acl iterator
-
-        @param rights: the acl rights to consider when parsing
-        @param aclstring: string to parse
-        """
-        self.rights = rights
-        self.rest = aclstring.strip()
-        self.finished = 0
-
-    def __iter__(self):
-        """ Required by the Iterator protocol """
-        return self
-    
-    def next(self):
-        """ Return the next values from the acl string
-
-        When the iterator is finished and you try to call next, it
-        raises a StopIteration. The iterator finish as soon as the
-        string is fully parsed or can not be parsed any more.
-
-        @rtype: 3 tuple - (modifier, [entry, ...], [right, ...])
-        @return: values for one item in an acl string
-        """
-        # Handle finished state, required by iterator protocol
-        if self.rest == '':
-            self.finished = 1
-        if self.finished:
-            raise StopIteration
-        
-        # Get optional modifier [+|-]entries:rights
-        modifier = ''
-        if self.rest[0] in ('+', '-'):
-            modifier, self.rest = self.rest[0], self.rest[1:]
-
-        # Handle the Default meta acl
-        if self.rest.startswith('Default ') or self.rest == 'Default':
-            self.rest = self.rest[8:]           
-            entries, rights = ['Default'], []
-            
-        # Handle entries:rights pairs
-        else:
-            # Get entries
-            try:
-                entries, self.rest = self.rest.split(':', 1)
-            except ValueError:
-                self.finished = 1
-                raise StopIteration("Can't parse rest of string")
-            if entries == '':
-                entries = []
-            else:
-                # TODO strip each entry from blanks?
-                entries = entries.split(',')            
-
-            # Get rights
-            try:         
-                rights, self.rest = self.rest.split(' ', 1)
-                # Remove extra white space after rights fragment,
-                # allowing using multiple spaces between items.
-                self.rest = self.rest.lstrip()
-            except ValueError:
-                rights, self.rest = self.rest, ''
-            rights = [r for r in rights.split(',') if r in self.rights]
-
-        return modifier, entries, rights
-
-
-def parseACL(request, body):
-    """ Parse acl lines on page and return ACL object
-
-    Use ACL object may(request, dowhat) to get acl rights.
-    """
-    acl_lines = []
-    while body and body[0] == '#':
-        # extract first line
-        try:
-            line, body = body.split('\n', 1)
-        except ValueError:
-            line = body
-            body = ''
-
-        # end parsing on empty (invalid) PI
-        if line == "#":
-            break
-
-        # skip comments (lines with two hash marks)
-        if line[1] == '#':
-            continue
-
-        tokens = line.split(None, 1)
-        if tokens[0].lower() == "#acl":
-            if len(tokens) == 2:
-                args = tokens[1].rstrip()
-            else:
-                args = ""
-            acl_lines.append(args)
-    return AccessControlList(request, acl_lines)
-
--- a/MoinMoin/wikidicts.py	Wed Jun 07 14:50:19 2006 +0200
+++ b/MoinMoin/wikidicts.py	Sat Jun 10 16:45:05 2006 +0200
@@ -21,9 +21,8 @@
 # Set pickle protocol, see http://docs.python.org/lib/node64.html
 PICKLE_PROTOCOL = pickle.HIGHEST_PROTOCOL
  
-from MoinMoin import config, caching, wikiutil, Page
+from MoinMoin import config, caching, wikiutil, Page, logfile
 from MoinMoin.logfile.editlog import EditLog
-from MoinMoin.logfile import logfile
 
 # Version of the internal data structure which is pickled
 # Please increment if you have changed the structure
--- a/MoinMoin/wikiutil.py	Wed Jun 07 14:50:19 2006 +0200
+++ b/MoinMoin/wikiutil.py	Sat Jun 10 16:45:05 2006 +0200
@@ -788,7 +788,7 @@
     
     def parse_filename(self, filename):
         import mimetypes
-        mtype, encoding = mimetypes.guess_type()
+        mtype, encoding = mimetypes.guess_type(filename)
         if mtype is None:
             mtype = 'application/octet-stream'
         self.parse_mimetype(mtype)
--- a/MoinMoin/xmlrpc/ProcessMail.py	Wed Jun 07 14:50:19 2006 +0200
+++ b/MoinMoin/xmlrpc/ProcessMail.py	Sat Jun 10 16:45:05 2006 +0200
@@ -6,7 +6,7 @@
     @license: GNU GPL, see COPYING for details.
 """
 
-from MoinMoin import mailimport
+from MoinMoin.mail import mailimport
 
 def execute(xmlrpcobj, secret, mail):
     request = xmlrpcobj.request
--- a/MoinMoin/xmlrpc/__init__.py	Wed Jun 07 14:50:19 2006 +0200
+++ b/MoinMoin/xmlrpc/__init__.py	Sat Jun 10 16:45:05 2006 +0200
@@ -496,6 +496,13 @@
         from MoinMoin import version
         return (version.project, version.release, version.revision)
 
+
+    # XXX BEGIN WARNING XXX
+    # All xmlrpc_*Attachment* functions have to be considered as UNSTABLE API -
+    # they are neither standard nor are they what we need when we have switched
+    # attachments (1.5 style) to mimetype items (hopefully in 1.6).
+    # They are likely to get removed again when we remove AttachFile module.
+    # So use them on your own risk.
     def xmlrpc_listAttachments(self, pagename):
         """ Get all attachments associated with pagename
         
@@ -510,7 +517,7 @@
         
         result = AttachFile._get_files(self.request, pagename)
         return result
-        
+
     def xmlrpc_getAttachment(self, pagename, attachname):
         """ Get attachname associated with pagename
         
@@ -523,13 +530,13 @@
         # User may read page?
         if not self.request.user.may.read(pagename):
             return self.notAllowedFault()
-        
-        filename = wikiutil.taintfilename(filename)
-        filename = AttachFile.getFilename(self.request, pagename, attachname)
+
+        filename = wikiutil.taintfilename(self._instr(attachname))
+        filename = AttachFile.getFilename(self.request, pagename, filename)
         if not os.path.isfile(filename):
             return self.noSuchPageFault()
         return self._outlob(open(filename, 'rb').read())
-        
+
     def xmlrpc_putAttachment(self, pagename, attachname, data):
         """ Set attachname associated with pagename to data
         
@@ -560,6 +567,8 @@
         os.chmod(filename, 0666 & config.umask)
         AttachFile._addLogEntry(self.request, 'ATTNEW', pagename, filename)
         return xmlrpclib.Boolean(1)
+    
+    # XXX END WARNING XXX
 
 
 class XmlRpc1(XmlRpcBase):
--- a/docs/CHANGES	Wed Jun 07 14:50:19 2006 +0200
+++ b/docs/CHANGES	Sat Jun 10 16:45:05 2006 +0200
@@ -35,6 +35,8 @@
     get reverted.
 
   Developer notes (these should be moved to the end in the release):
+    * killed "processors" (finally), formatter method changed to:
+      formatter.parser(parsername, lines)
     * refactored some actions to use ActionBase base class
     * moved "test" action from wikiaction to MoinMoin/action/
       (and use ActionBase)
@@ -44,8 +46,7 @@
     * moved wikirpc.py stuff to MoinMoin/xmlrpc/__init__.py
     * moved wikitest.py stuff to action/test.py (only used from there)
     * moved formatter/base.py to formatter/__init__.py (FormatterBase)
-    * killed "processors" (finally), formatter method changed to:
-      formatter.parser(parsername, lines)
+    * moved util/ParserBase.py to parser/ParserBase.py
     * moved / splitted request.py into MoinMoin/request/*
       Most stuff will be broken, please help fixing it (usually some imports
       will be missing and the adaptor script will need a change maybe):
@@ -56,6 +57,17 @@
       WSGI not
       FCGI not
       TWISTED not
+    * moved util/antispam.py to security/antispam.py,
+      moved util/autoadmin.py to security/autoadmin.py,
+      moved security.py to security/__init__.py,
+      moved wikiacl.py to security/__init__.py.
+    * moved logfile/logfile.py to logfile/__init__.py
+    * moved mailimport.py to mail/mailimport.py,
+      moved util/mail.py to mail/sendmail.py
+    * moved auth.py to auth/__init__.py,
+      moved util/sessionParser.py to auth/_PHPsessionParser.py,
+      teared auth code into single modules under auth/* - moin_session handling
+      and the builting moin_login method are in auth/__init__.py.
     * added wikiutil.MimeType class (works internally with sanitized mime
       types because the official ones suck)
     * renamed parsers to module names representing sane mimetypes, e.g.:
@@ -79,6 +91,9 @@
       You need to set "cache_dir = '/some/farm/cachedir' in your farmconfig.
     * Added XMLRPC methods for attachment handling. Thanks to Matthew Gilbert.
     * Added TLS/SSL support to the standalone server. Thanks to Matthew Gilbert.
+      To use TLS/SSL support you must also install the TLSLite library
+      (http://trevp.net/tlslite/). Version 0.3.8 was used for development and
+      testing.
 
   Bugfixes:
     * on action "info" page, "revert" link will not be displayed for empty page
@@ -90,6 +105,7 @@
     * allow "-" in usernames (fixes "Invalid user name" msg)
     * fixed smiley caching bug (smileys didn't change theme)
     * fixed backtrace when user removed css_url entry from user_form_fields
+    * Fixed the output of macro and "attachment:" usages of the rst parser.
 
   Other changes:
     * we use (again) the same browser compatibility check as FCKeditor uses
@@ -657,7 +673,7 @@
        did not allow attachments, you now have to use:
        actions_excluded = ['AttachFile']
      * special users (All, Known, Trusted) in Groups are now supported
-     * MoinMoin.util.autoadmin SecurityPolicy added
+     * MoinMoin.security.autoadmin SecurityPolicy added
        When using this security policy, a user will get admin rights on his
        homepage (where pagename == username) and its sub pages. This is needed
        for the MyPages action, but can also get used for manual ACL changes.
@@ -1705,7 +1721,7 @@
       Nevertheless it is a very good idea to use a non-broken and more secure
       browser like Mozilla, Firefox or Opera!
 
-    * from MoinMoin.util.antispam import SecurityPolicy in your
+    * from MoinMoin.security.antispam import SecurityPolicy in your
       moin_config.py will protect your wiki from at least the known spammers.
       See MoinMoin:AntiSpamGlobalSolution for details.
 
--- a/moin.spec	Wed Jun 07 14:50:19 2006 +0200
+++ b/moin.spec	Sat Jun 10 16:45:05 2006 +0200
@@ -44,6 +44,7 @@
 
 %prep
 %setup
+echo $RPM_BUILD_ROOT
 
 %build
 # replace python by python2 if python refers to version 1.5 on your system
@@ -52,15 +53,22 @@
 %install
 # replace python by python2 if python refers to version 1.5 on your system
 python setup.py install --root=$RPM_BUILD_ROOT --record=INSTALLED_FILES
-
 %clean
 rm -rf $RPM_BUILD_ROOT
 
-%files -f INSTALLED_FILES
+#%files -f INSTALLED_FILES   # Wrong: Installed files contains directories also
+# This lets rpmbuild complain about Files listet twice.
+# A Good explanation is here: "http://www.wideopen.com/archives/rpm-list/2001-February/msg00081.html
+%files
 %defattr(-,root,root)
+/usr
 %doc  README docs/CHANGES docs/INSTALL.html docs/licenses/COPYING
 
 %changelog
+* Do Jun  8 2006 Johannes Poehlmann
+- Fix RPM build errror "Files listet twice" 
+  Replaced files list and just package all of /usr.
+
 * Fri Mar 05 2004 Florian Festi
 - Initial RPM release.
 
--- a/setup.py	Wed Jun 07 14:50:19 2006 +0200
+++ b/setup.py	Sat Jun 10 16:45:05 2006 +0200
@@ -137,6 +137,7 @@
                 'python': os.path.normpath(sys.executable),
                 'package': self.package_name,
                 'module': module,
+                'package_location': '/usr/lib/python/site-packages', # FIXME
             }
 
             self.announce("creating %s" % outfile)
@@ -149,9 +150,12 @@
                         'if     "%%_4ver%%" == "" %(python)s -c "from %(package)s.script.%(module)s import run; run()" %%*\n'
                         % script_vars)
                 else:
-                    file.write('#! %(python)s\n'
-                        'from %(package)s.script.%(module)s import run\n'
-                        'run()\n'
+                    file.write("#! %(python)s\n"
+                        "#Fix and uncomment those 2 lines if your moin command doesn't find the MoinMoin package:\n"
+                        "#import sys\n"
+                        "#sys.path.insert(0, '%(package_location)s')\n"
+                        "from %(package)s.script.%(module)s import run\n"
+                        "run()\n"
                         % script_vars)
             finally:
                 file.close()
@@ -218,6 +222,7 @@
         'MoinMoin.script.old',
         'MoinMoin.script.old.migration',
         'MoinMoin.script.old.xmlrpc-tools',
+        'MoinMoin.security',
         'MoinMoin.server',
         'MoinMoin.stats',
         'MoinMoin.support',
@@ -234,6 +239,15 @@
         'MoinMoin._tests',
     ],
 
+    # TODO package_dir and package_data only works for python >= 2.4
+    # in case we don't require python >= 2.4 for 1.6 release, we need to find
+    # a solution for python 2.3.x
+    'package_dir': { 'MoinMoin.i18n': 'MoinMoin/i18n', },
+    'package_data': { 'MoinMoin.i18n': ['README', 'Makefile', 'MoinMoin.pot', 'POTFILES.in',
+                                        '*.po',
+                                        'mo/*',
+                                        'tools/*',], },
+
     # Override certain command classes with our own ones
     'cmdclass': {
         'build_scripts': build_scripts_moin,
--- a/wiki/config/more_samples/ldap_smb_farmconfig.py	Wed Jun 07 14:50:19 2006 +0200
+++ b/wiki/config/more_samples/ldap_smb_farmconfig.py	Sat Jun 10 16:45:05 2006 +0200
@@ -180,7 +180,7 @@
 
     # Link spam protection for public wikis (uncomment to enable).
     # Needs a reliable internet connection.
-    from MoinMoin.util.autoadmin import SecurityPolicy
+    from MoinMoin.security.autoadmin import SecurityPolicy
 
 
     # Mail --------------------------------------------------------------
--- a/wiki/config/wikiconfig.py	Wed Jun 07 14:50:19 2006 +0200
+++ b/wiki/config/wikiconfig.py	Sat Jun 10 16:45:05 2006 +0200
@@ -92,7 +92,7 @@
 
     # Link spam protection for public wikis (Uncomment to enable)
     # Needs a reliable internet connection.
-    #from MoinMoin.util.antispam import SecurityPolicy
+    #from MoinMoin.security.antispam import SecurityPolicy
 
 
     # Mail --------------------------------------------------------------
--- a/wiki/config/wikifarm/farmconfig.py	Wed Jun 07 14:50:19 2006 +0200
+++ b/wiki/config/wikifarm/farmconfig.py	Sat Jun 10 16:45:05 2006 +0200
@@ -109,7 +109,7 @@
 
     # Link spam protection for public wikis (uncomment to enable).
     # Needs a reliable internet connection.
-    #from MoinMoin.util.antispam import SecurityPolicy
+    #from MoinMoin.security.antispam import SecurityPolicy
 
 
     # Mail --------------------------------------------------------------