Mercurial > moin > 1.9
changeset 978:f56db9746839
Merge with main.
author | Alexander Schremmer <alex AT alexanderweb DOT de> |
---|---|
date | Mon, 17 Jul 2006 13:29:58 +0200 |
parents | b258156f1288 (current diff) b8c1bb917748 (diff) |
children | eaae4bcf60f3 86f141856d2b |
files | MoinMoin/Page.py |
diffstat | 64 files changed, 465 insertions(+), 407 deletions(-) [+] |
line wrap: on
line diff
--- a/MoinMoin/Page.py Mon Jul 17 13:29:36 2006 +0200 +++ b/MoinMoin/Page.py Mon Jul 17 13:29:58 2006 +0200 @@ -966,13 +966,11 @@ request = self.request request.http_headers(["Content-type: text/plain;charset=%s" % config.charset]) if self.exists(): - if not request.cacheable: - request.http_headers(request.nocache) - else: - # use the correct last-modified value from the on-disk file - # to ensure cacheability where supported - request.http_headers(["Last-Modified: " + - timefuncs.formathttpdate(os.path.getmtime(self._text_filename()))]) + # use the correct last-modified value from the on-disk file + # to ensure cacheability where supported. Because we are sending + # RAW (file) content, the file mtime is correct as Last-Modified header. + request.http_headers(["Last-Modified: " + + timefuncs.formathttpdate(os.path.getmtime(self._text_filename()))]) text = self.get_raw_body() text = self.encodeTextMimeType(text) @@ -1166,12 +1164,6 @@ if not content_only: # send the document leader - # need to inform caches that content changes - # based on cookie (even if we aren't sending one now) - request.setHttpHeader("Vary: Cookie") - # we include User-Agent because a bot might be denied and get no content - request.setHttpHeader("Vary: User-Agent") - # use "nocache" headers if we're using a method that # is not simply "display", or if a user is logged in # (which triggers personalisation features) @@ -1182,6 +1174,9 @@ else: # use the correct last-modified value from the on-disk file # to ensure cacheability where supported + # TODO: for page likes RecentChanges (generally: ALL pages + # with dynamically changing content), we MUST NOT use the + # page src mtime as last-modified header. XXX request.http_headers(["Last-Modified: " + timefuncs.formathttpdate(os.path.getmtime(self._text_filename()))])
--- a/MoinMoin/action/Despam.py Mon Jul 17 13:29:36 2006 +0200 +++ b/MoinMoin/action/Despam.py Mon Jul 17 13:29:58 2006 +0200 @@ -94,7 +94,7 @@ def revert_page(request, pagename, editor): if not request.user.may.revert(pagename): - return + return log = editlog.EditLog(request, rootpagename=pagename)
--- a/MoinMoin/action/LikePages.py Mon Jul 17 13:29:36 2006 +0200 +++ b/MoinMoin/action/LikePages.py Mon Jul 17 13:29:58 2006 +0200 @@ -30,13 +30,13 @@ # No matches if not matches: Page(request, pagename).send_page(request, - msg = _('No pages like "%s"!') % (pagename,)) + msg=_('No pages like "%s"!') % (pagename,)) return # One match - display it if len(matches) == 1: Page(request, matches.keys()[0]).send_page(request, - msg = _('Exactly one page like "%s" found, redirecting to page.') % ( + msg=_('Exactly one page like "%s" found, redirecting to page.') % ( pagename,)) return @@ -47,7 +47,7 @@ request.setContentLanguage(request.lang) request.theme.send_title(_('Pages like "%s"') % (pagename), pagename=pagename) - + # Start content - IMPORTANT - without content div, there is no # direction support! request.write(request.formatter.startContent("content")) @@ -99,13 +99,13 @@ # Stop after 10 matches if found == 10: break - + # Filter deleted pages or pages the user can't read from # matches. Order is important! for name in matches.keys(): page = Page(request, name) if not (page.exists() and request.user.may.read(name)): - del matches[name] + del matches[name] # Finally, merge both dicts matches.update(close_matches) @@ -131,11 +131,11 @@ @return: start, end, matches dict """ if start_re is None: - start_re=re.compile('([%s][%s]+)' % (config.chars_upper, - config.chars_lower)) + start_re = re.compile('([%s][%s]+)' % (config.chars_upper, + config.chars_lower)) if end_re is None: - end_re=re.compile('([%s][%s]+)$' % (config.chars_upper, - config.chars_lower)) + end_re = re.compile('([%s][%s]+)$' % (config.chars_upper, + config.chars_lower)) # If we don't get results with wiki words matching, fall back to # simple first word and last word, using spaces. @@ -144,14 +144,14 @@ if match: start = match.group(1) else: - start = words[0] - + start = words[0] + match = end_re.search(pagename) if match: end = match.group(1) else: - end = words[-1] - + end = words[-1] + matches = {} subpage = pagename + '/' @@ -201,7 +201,7 @@ return matches -def showMatches(pagename, request, start, end, matches, show_count = True): +def showMatches(pagename, request, start, end, matches, show_count=True): keys = matches.keys() keys.sort() _showMatchGroup(request, matches, keys, 8, pagename, show_count) @@ -211,7 +211,7 @@ _showMatchGroup(request, matches, keys, 2, "...%s" % (end,), show_count) -def _showMatchGroup(request, matches, keys, match, title, show_count = True): +def _showMatchGroup(request, matches, keys, match, title, show_count=True): _ = request.getText matchcount = matches.values().count(match)
--- a/MoinMoin/action/LocalSiteMap.py Mon Jul 17 13:29:36 2006 +0200 +++ b/MoinMoin/action/LocalSiteMap.py Mon Jul 17 13:29:58 2006 +0200 @@ -22,7 +22,7 @@ @copyright: 2001-2004 by Jürgen Hermann <jh@web.de> @license: GNU GPL, see COPYING for details. """ - + from MoinMoin import wikiutil from MoinMoin.Page import Page @@ -101,10 +101,10 @@ def new_kids(self, name): # does not recurse kids = [] - for child in Page(self.request, name).getPageLinks(self.request): + for child in Page(self.request, name).getPageLinks(self.request): if self.is_ok(child): kids.append(child) - return kids + return kids def new_node(self): self.numnodes = self.numnodes + 1 @@ -139,7 +139,7 @@ def append(self, node): self.children.append(node) - + def depth_first_visit(self, request, visitor, depth=0): visitor.visit(request, self.node, depth) for c in self.children:
--- a/MoinMoin/action/MyPages.py Mon Jul 17 13:29:36 2006 +0200 +++ b/MoinMoin/action/MyPages.py Mon Jul 17 13:29:58 2006 +0200 @@ -12,7 +12,7 @@ _ = request.getText thispage = Page(request, pagename) - + if request.user.valid: username = request.user.name else: @@ -28,11 +28,11 @@ wikiurl = wikiutil.mapURL(request, wikiurl) homepageurl = wikiutil.join_wiki(wikiurl, wikitail) request.http_redirect('%s?action=MyPages' % homepageurl) - + homepage = Page(request, username) if not homepage.exists(): return homepage.send_page(request, - msg = _('Please first create a homepage before creating additional pages.')) + msg=_('Please first create a homepage before creating additional pages.')) pagecontent = _("""\ You can add some additional sub pages to your already existing homepage here. @@ -59,11 +59,11 @@ from MoinMoin.Page import Page from MoinMoin.parser.text_moin_wiki import Parser as WikiParser request.http_headers() - + # This action generate data using the user language request.setContentLanguage(request.lang) request.theme.send_title(_('MyPages management', formatted=False), page=homepage) - + # Start content - IMPORTANT - without content div, there is no direction support! request.write(request.formatter.startContent("content")) @@ -71,7 +71,7 @@ p = Page(request, "$$$") request.formatter.setPage(p) parser.format(request.formatter) - + request.write(request.formatter.endContent()) request.theme.send_footer(homepage.page_name) request.theme.send_closing_html()
--- a/MoinMoin/action/PackagePages.py Mon Jul 17 13:29:36 2006 +0200 +++ b/MoinMoin/action/PackagePages.py Mon Jul 17 13:29:58 2006 +0200 @@ -33,7 +33,7 @@ may = self.request.user.may return (not self.__class__.__name__ in self.request.cfg.actions_excluded and may.write(self.pagename)) - + def render(self): """ Render action @@ -42,7 +42,7 @@ """ _ = self.request.getText form = self.request.form - + if form.has_key('cancel'): # User canceled return self.page.send_page(self.request) @@ -52,7 +52,7 @@ raise ActionError(_('You are not allowed to edit this page.')) elif not self.page.exists(): raise ActionError(_('This page is already deleted or was never created!')) - + self.package() except ActionError, e: return self.page.send_page(self.request, msg=e.args[0]) @@ -62,11 +62,11 @@ _ = self.request.getText form = self.request.form COMPRESSION_LEVEL = zipfile.ZIP_DEFLATED - + # Get new name from form and normalize. pagelist = form.get('pagelist', [u''])[0] packagename = form.get('packagename', [u''])[0] - + if not form.get('submit', [None])[0]: raise ActionError(self.makeform()) @@ -82,10 +82,10 @@ pagelist = ', '.join([getattr(page, "page_name") for page in pages]) target = wikiutil.taintfilename(packagename) - + if not target: raise ActionError(self.makeform(_('Invalid filename "%s"!') % wikiutil.escape(packagename))) - + # get directory, and possibly create it attach_dir = Page(self.request, self.page.page_name).getPagePath("attachments", check_create=1) fpath = os.path.join(attach_dir, target).encode(config.charset) @@ -107,16 +107,16 @@ zi = zipfile.ZipInfo(filename=str(cnt), date_time=datetime.fromtimestamp(timestamp).timetuple()[:6]) zi.compress_type = COMPRESSION_LEVEL zf.writestr(zi, page.get_raw_body().encode("utf-8")) - + script += [packLine(['Print', 'Thank you for using PackagePages!'])] - + zf.writestr(MOIN_PACKAGE_FILE, u"\n".join(script).encode("utf-8")) zf.close() os.chmod(fpath, 0666 & config.umask) _addLogEntry(self.request, 'ATTNEW', self.pagename, target) - + raise ActionError(_("Created the package %s containing the pages %s.") % (wikiutil.escape(target), wikiutil.escape(pagelist))) def makeform(self, error=""): @@ -164,9 +164,10 @@ </tr> </table> </form>''' % d - - return Dialog(self.request, content=form) - + + return Dialog(self.request, content=form) + def execute(pagename, request): """ Glue code for actions """ PackagePages(pagename, request).render() +
--- a/MoinMoin/action/RenamePage.py Mon Jul 17 13:29:36 2006 +0200 +++ b/MoinMoin/action/RenamePage.py Mon Jul 17 13:29:58 2006 +0200 @@ -30,7 +30,7 @@ def is_allowed(self): may = self.request.user.may return may.write(self.pagename) and may.delete(self.pagename) - + def check_condition(self): _ = self._ if not self.page.exists():
--- a/MoinMoin/action/SpellCheck.py Mon Jul 17 13:29:36 2006 +0200 +++ b/MoinMoin/action/SpellCheck.py Mon Jul 17 13:29:58 2006 +0200 @@ -109,7 +109,7 @@ # no new words checked return newwords = u' '.join(newwords) - + # get the page contents lsw_page = PageEditor(request, request.cfg.page_local_spelling_words) words = lsw_page.get_raw_body() @@ -168,7 +168,7 @@ if badwords: badwords = badwords.keys() - badwords.sort(lambda x,y: cmp(x.lower(), y.lower())) + badwords.sort(lambda x, y: cmp(x.lower(), y.lower())) # build regex recognizing the bad words badwords_re = r'(^|(?<!\w))(%s)(?!\w)' @@ -192,10 +192,10 @@ if own_form: msg = msg + ('<form method="post" action="">\n' '<input type="hidden" name="action" value="%s">\n') % action_name - + checkbox = '<input type="checkbox" name="newwords" value="%(word)s">%(word)s ' msg = msg + ( - " ".join(map(lambda w, cb=checkbox: cb % {'word': wikiutil.escape(w),}, badwords)) + + " ".join(map(lambda w, cb=checkbox: cb % {'word': wikiutil.escape(w), }, badwords)) + '<p><input type="submit" name="button_newwords" value="%s"></p>' % _('Add checked words to dictionary') )
--- a/MoinMoin/action/SubscribeUser.py Mon Jul 17 13:29:36 2006 +0200 +++ b/MoinMoin/action/SubscribeUser.py Mon Jul 17 13:29:58 2006 +0200 @@ -38,7 +38,7 @@ from MoinMoin.formatter.text_html import Formatter formatter = Formatter(request) - + result = subscribe_users(request, request.form['users'][0].split(","), pagename, formatter) request.write(result) @@ -82,7 +82,7 @@ result.extend([''.join([formatter.smiley('{X}'), formatter.text(" " + _("Not a user:") + " " + username), formatter.linebreak(preformatted=0)]) for username in usernamelist if username not in realusers]) return ''.join(result) - + def execute(pagename, request): _ = request.getText if not request.user.may.admin(pagename): @@ -91,7 +91,7 @@ elif not request.form.has_key('users'): show_form(pagename, request) else: - show_result(pagename,request) + show_result(pagename, request) if __name__ == '__main__': args = sys.argv
--- a/MoinMoin/action/backup.py Mon Jul 17 13:29:36 2006 +0200 +++ b/MoinMoin/action/backup.py Mon Jul 17 13:29:58 2006 +0200 @@ -25,13 +25,13 @@ tar.add(path) def sendBackup(request): - """ Send compressed tar file """ + """ Send compressed tar file """ dateStamp = time.strftime("%Y-%m-%d--%H-%M-%S-UTC", time.gmtime()) filename = "%s-%s.tar.%s" % (request.cfg.siteid, dateStamp, request.cfg.backup_compression) request.http_headers([ - "Content-Type: application/octet-stream", - "Content-Disposition: inline; filename=\"%s\"" % filename,]) - + "Content-Type: application/octet-stream", + "Content-Disposition: inline; filename=\"%s\"" % filename, ]) + tar = tarfile.open(fileobj=request, mode="w|%s" % request.cfg.backup_compression) # allow GNU tar's longer file/pathnames tar.posix = False @@ -75,7 +75,7 @@ title = _('Wiki Backup / Restore') request.theme.send_title(title, form=request.form, pagename=pagename) request.write(request.formatter.startContent("content")) - + request.write(_("""Some hints: * To restore a backup: * Restoring a backup will overwrite existing data, so be careful. @@ -108,14 +108,14 @@ 'backup_button': _('Backup'), 'restore_button': _('Restore'), }) - + request.write(request.formatter.endContent()) request.theme.send_footer(pagename) request.theme.send_closing_html() def sendMsg(request, pagename, msg): from MoinMoin import Page - return Page.Page(request, pagename).send_page(request, msg=msg) + return Page.Page(request, pagename).send_page(request, msg=msg) def backupAllowed(request): """ Return True if backup is allowed """ @@ -126,18 +126,18 @@ def execute(pagename, request): _ = request.getText - if not backupAllowed(request): - return sendMsg(request, pagename, + if not backupAllowed(request): + return sendMsg(request, pagename, msg=_('You are not allowed to do remote backup.')) - + dowhat = request.form.get('do', [None])[0] if dowhat == 'backup': sendBackup(request) elif dowhat == 'restore': restoreBackup(request, pagename) - elif dowhat == None: + elif dowhat is None: sendBackupForm(request, pagename) else: - return sendMsg(request, pagename, + return sendMsg(request, pagename, msg=_('Unknown backup subaction: %s.' % dowhat))
--- a/MoinMoin/action/fckdialog.py Mon Jul 17 13:29:36 2006 +0200 +++ b/MoinMoin/action/fckdialog.py Mon Jul 17 13:29:58 2006 +0200 @@ -136,7 +136,7 @@ </body> </html> ''') - + def macro_list(request): from MoinMoin import macro macros = macro.getNames(request.cfg) @@ -150,7 +150,7 @@ macro_re = re.compile( r"\|\|(<.*?>)?\{\{\{\[\[" + r"(?P<prototype>(?P<macro>\w*).*)" + - r"\]\]\}\}\}\s*\|\|" + + r"\]\]\}\}\}\s*\|\|" + r"\s*(?P<help>.*?)\s*\|\|\s*(?P<example>.*?)\s*\|\|$", re.U + re.M) help = {} for match in macro_re.finditer(content): @@ -163,7 +163,7 @@ def page_list(request): from MoinMoin import search - name = request.form.get("pagename",[""])[0] + name = request.form.get("pagename", [""])[0] if name: searchresult = search.searchPages(request, 't:"%s"' % name) pages = [p.page_name for p in searchresult.hits] @@ -207,7 +207,7 @@ from MoinMoin import search # XXX error handling! searchresult = search.searchPages(request, 't:"%s"' % name) - + pages = [p.page_name for p in searchresult.hits] pages.sort() pages[0:0] = [name] @@ -223,13 +223,13 @@ for page in pages]) else: page_list = "" - + # list of interwiki names interwiki_list = wikiutil.load_wikimap(request) interwiki = interwiki_list.keys() interwiki.sort() iwpreferred = request.cfg.interwiki_preferred - if not iwpreferred or iwpreferred and iwpreferred[-1] != None: + if not iwpreferred or iwpreferred and iwpreferred[-1] is not None: resultlist = iwpreferred for iw in interwiki: if not iw in iwpreferred: @@ -374,11 +374,11 @@ from MoinMoin import search # XXX error handling! searchresult = search.searchPages(request, 't:"%s"' % name) - + pages = [p.page_name for p in searchresult.hits] pages.sort() pages[0:0] = [name] - page_list =''' + page_list = ''' <tr> <td colspan=2> <select id="sctPagename" size="1" onchange="OnChangePagename(this.value);"> @@ -390,7 +390,7 @@ for page in pages]) else: page_list = "" - + # wiki url url_prefix = request.cfg.url_prefix scriptname = request.getScriptname()
--- a/MoinMoin/action/fullsearch.py Mon Jul 17 13:29:36 2006 +0200 +++ b/MoinMoin/action/fullsearch.py Mon Jul 17 13:29:58 2006 +0200 @@ -33,11 +33,11 @@ titlesearch = isTitleSearch(request) # context is relevant only for full search - if titlesearch: - context = 0 + if titlesearch: + context = 0 else: - context = int(request.form.get('context', [0])[0]) - + context = int(request.form.get('context', [0])[0]) + # Get other form parameters needle = request.form.get(fieldname, [''])[0] case = int(request.form.get('case', [0])[0]) @@ -86,10 +86,10 @@ results.sortByPagename() else: title = _('Full Text Search: "%s"') - results.sortByWeight() + results.sortByWeight() request.theme.send_title(title % needle, form=request.form, pagename=pagename) - + # Start content (important for RTL support) request.write(request.formatter.startContent("content")) @@ -102,7 +102,7 @@ output = results.pageListWithContext(request, request.formatter, info=info, context=context) else: - output = results.pageList(request, request.formatter, info=info) + output = results.pageList(request, request.formatter, info=info) request.write(output) request.write(request.formatter.endContent())
--- a/MoinMoin/action/login.py Mon Jul 17 13:29:36 2006 +0200 +++ b/MoinMoin/action/login.py Mon Jul 17 13:29:58 2006 +0200 @@ -49,7 +49,7 @@ # Require password else: - password = form.get('password',[None])[0] + password = form.get('password', [None])[0] if not password: error = _("Missing password. Please enter user name and password.") else: @@ -57,15 +57,15 @@ error = _("Sorry, login failed.") return self.page.send_page(request, msg=error) - + else: # show login form request.http_headers() request.theme.send_title(_("Login"), pagename=self.pagename) # Start content (important for RTL support) request.write(request.formatter.startContent("content")) - + request.write(userform.getLogin(request)) - + request.write(request.formatter.endContent()) request.theme.send_footer(self.pagename) request.theme.send_closing_html()
--- a/MoinMoin/action/logout.py Mon Jul 17 13:29:36 2006 +0200 +++ b/MoinMoin/action/logout.py Mon Jul 17 13:29:58 2006 +0200 @@ -11,14 +11,14 @@ from MoinMoin.Page import Page -def execute( pagename, request): +def execute(pagename, request): return LogoutHandler(pagename, request).handle() class LogoutHandler: def __init__(self, pagename, request): self.request = request self._ = request.getText - self.page = Page(request,pagename) + self.page = Page(request, pagename) def handle(self): _ = self._
--- a/MoinMoin/action/newpage.py Mon Jul 17 13:29:36 2006 +0200 +++ b/MoinMoin/action/newpage.py Mon Jul 17 13:29:58 2006 +0200 @@ -20,7 +20,7 @@ self.referrer = referrer # The page the user came from self.pagename = self.request.form.get('pagename', [None])[0] self.nametemplate = self.request.form.get('nametemplate', ['%s'])[0] - self.nametemplate = self.nametemplate.replace('\x00','') + self.nametemplate = self.nametemplate.replace('\x00', '') def checkAndCombineArguments(self): """ Check arguments in form, return error msg @@ -33,13 +33,13 @@ if not self.pagename and need_replace: return _("Cannot create a new page without a page name." " Please specify a page name.") - if need_replace: - # generate a string that can be safely used as the pagename - # template variable + if need_replace: + # generate a string that can be safely used as the pagename + # template variable repl = 'A@' i = 0 while self.nametemplate.find(repl) != -1: - repl += ['#','&','$','x','X',':','@'][i] + repl += ['#', '&', '$', 'x', 'X', ':', '@'][i] i += 1 i = i % 7 template = self.nametemplate.replace('%s', repl) @@ -53,7 +53,7 @@ else: self.pagename = template return '' - + def checkPermissions(self): """ Check write permission in form, return error msg @@ -69,7 +69,6 @@ def render(self): """ Redirect to the new page, using edit action and template """ - error = self.checkAndCombineArguments() or self.checkPermissions() if error: # Send back to the page you came from, with an error msg
--- a/MoinMoin/action/sitemap.py Mon Jul 17 13:29:36 2006 +0200 +++ b/MoinMoin/action/sitemap.py Mon Jul 17 13:29:58 2006 +0200 @@ -25,7 +25,7 @@ <priority>%(priority)s</priority> </url> """ % vars - + def sitemap_url(request, base, page): """ return a sitemap <url>..</url> fragment for page object <page> """ url = page.url(request) @@ -71,7 +71,7 @@ result = [] result.append("""<urlset xmlns="http://www.google.com/schemas/sitemap/0.84">\n""") - + # we include the / url as an important and often changed URL result.append(make_url_xml({ 'base': base,
--- a/MoinMoin/action/test.py Mon Jul 17 13:29:36 2006 +0200 +++ b/MoinMoin/action/test.py Mon Jul 17 13:29:58 2006 +0200 @@ -90,7 +90,7 @@ # TODO: do we need to hide the error when _tests can't be # imported? It might make it hard to debug the tests package # itself. - try: + try: from MoinMoin import _tests except ImportError: request.write(" *** The unit tests are not available ***")
--- a/MoinMoin/action/thread_monitor.py Mon Jul 17 13:29:36 2006 +0200 +++ b/MoinMoin/action/thread_monitor.py Mon Jul 17 13:29:58 2006 +0200 @@ -9,10 +9,10 @@ """ import os, time from StringIO import StringIO - + from MoinMoin import wikiutil from MoinMoin.util import thread_monitor - + def execute_fs(pagename, request): if thread_monitor.hook_enabled: s = StringIO() @@ -43,10 +43,10 @@ thread_monitor.trigger_dump(s) time.sleep(5) # allow for all threads to dump to request request.write(wikiutil.escape(s.getvalue())) - + request.write('</pre>') request.theme.send_footer(pagename) request.theme.send_closing_html() - + execute = execute_fs
--- a/MoinMoin/auth/_PHPsessionParser.py Mon Jul 17 13:29:36 2006 +0200 +++ b/MoinMoin/auth/_PHPsessionParser.py Mon Jul 17 13:29:58 2006 +0200 @@ -43,12 +43,12 @@ first_data = string[start+2:header_end] else: first_data = None - + #print "Saw type %r, first_data is %r." % (val_type, first_data) if val_type == 'a': # array (in Python rather a mixture of a list and a dict) i = 0 items = [] - + current_pos = header_end+2 data = string while i != (int(first_data) * 2): @@ -56,7 +56,7 @@ items.append(item) i += 1 current_pos += 1 - + t_list = list(transformList(items)) try: result = dict(t_list) # note that dict does not retain the order @@ -64,7 +64,7 @@ result = list(t_list) #print "Warning, could not convert to dict: %r" % (result, ) return result, current_pos - + if val_type == 's': # string current_pos = header_end+2 end = current_pos + int(first_data) @@ -91,7 +91,7 @@ if val_type == "N": # Null, called None in Python return None, start+1 - + return UnknownObject(start), start+1 def parseSession(boxed): @@ -134,10 +134,11 @@ if __name__ == '__main__': # testing code import time - a=time.clock() - + a = time.clock() + #print s p_s = loadSession("...") import pprint; pprint.pprint(p_s) print time.clock() - a print listSessions() +
--- a/MoinMoin/auth/__init__.py Mon Jul 17 13:29:36 2006 +0200 +++ b/MoinMoin/auth/__init__.py Mon Jul 17 13:29:58 2006 +0200 @@ -80,7 +80,7 @@ path = '/' c[cookie_name]['path'] = path # Set expires for older clients - c[cookie_name]['expires'] = request.httpDate(when=expires, rfc='850') + c[cookie_name]['expires'] = request.httpDate(when=expires, rfc='850') return c.output() def setCookie(request, u, cookie_name, cookie_string): @@ -96,7 +96,7 @@ < 0 -n hours, ignoring user 'remember_me' setting """ # Calculate cookie maxage and expires - lifetime = int(request.cfg.cookie_lifetime) * 3600 + lifetime = int(request.cfg.cookie_lifetime) * 3600 forever = 10 * 365 * 24 * 3600 # 10 years now = time.time() if not lifetime: @@ -109,7 +109,7 @@ elif lifetime < 0: maxage = (-lifetime) expires = now + maxage - + cookie = makeCookie(request, cookie_name, cookie_string, maxage, expires) # Set cookie request.setHttpHeader(cookie) @@ -141,7 +141,7 @@ maxage = 0 # Set expires to one year ago for older clients expires = time.time() - (3600 * 24 * 365) # 1 year ago - cookie = makeCookie(request, cookie_name, cookie_string, maxage, expires) + cookie = makeCookie(request, cookie_name, cookie_string, maxage, expires) # Set cookie request.setHttpHeader(cookie) # IMPORTANT: Prevent caching of current page and cookie @@ -159,7 +159,7 @@ verbose = False if hasattr(cfg, 'moin_login_verbose'): verbose = cfg.moin_login_verbose - + #request.log("auth.moin_login: name=%s login=%r logout=%r user_obj=%r" % (username, login, logout, user_obj)) if login: @@ -180,7 +180,7 @@ ongoing sessions, and logout. Use another method for initial login. """ import base64 - + username = kw.get('name') login = kw.get('login') logout = kw.get('logout') @@ -192,7 +192,7 @@ verbose = cfg.moin_session_verbose cookie_name = MOIN_SESSION - + if verbose: request.log("auth.moin_session: name=%s login=%r logout=%r user_obj=%r" % (username, login, logout, user_obj)) if login: @@ -223,14 +223,14 @@ # No valid cookie if verbose: request.log("either no cookie or no %s key" % cookie_name) return user_obj, True - + try: cookie_hash, cookie_body = cookie[cookie_name].value.split(':', 1) except ValueError: # Invalid cookie if verbose: request.log("invalid cookie format: (%s)" % cookie[cookie_name].value) return user_obj, True - + if cookie_hash != make_security_hash(request, cookie_body): # Invalid cookie # XXX Cookie clear here??? @@ -239,7 +239,7 @@ # We can trust the cookie if verbose: request.log("Cookie OK, authenticated.") - params = { 'username': '', 'id': '' } + params = {'username': '', 'id': '', } cookie_pairs = cookie_body.split(":") for key, value in [pair.split("=", 1) for pair in cookie_pairs]: params[key] = base64.decodestring(value) # assuming all values are base64 encoded @@ -251,7 +251,7 @@ auth_method='moin_session', auth_attribs=(), ) - + if logout: if verbose: request.log("Logout requested, setting u invalid and 'deleting' cookie") u.valid = 0 # just make user invalid, but remember him
--- a/MoinMoin/auth/http.py Mon Jul 17 13:29:36 2006 +0200 +++ b/MoinMoin/auth/http.py Mon Jul 17 13:29:58 2006 +0200 @@ -27,9 +27,9 @@ elif not isinstance(request, CLI.Request): env = request.env - auth_type = env.get('AUTH_TYPE','') - if auth_type in ['Basic', 'Digest', 'NTLM', 'Negotiate',]: - username = env.get('REMOTE_USER','') + auth_type = env.get('AUTH_TYPE', '') + if auth_type in ['Basic', 'Digest', 'NTLM', 'Negotiate', ]: + username = env.get('REMOTE_USER', '') if auth_type in ('NTLM', 'Negotiate',): # converting to standard case so the user can even enter wrong case # (added since windows does not distinguish between e.g.
--- a/MoinMoin/auth/interwiki.py Mon Jul 17 13:29:36 2006 +0200 +++ b/MoinMoin/auth/interwiki.py Mon Jul 17 13:29:58 2006 +0200 @@ -24,14 +24,14 @@ if err or wikitag not in request.cfg.trusted_wikis: return user_obj, True - + if password: homewiki = xmlrpclib.Server(wikiurl + "?action=xmlrpc2") account_data = homewiki.getUser(wikitail, password) if isinstance(account_data, str): # show error message return user_obj, True - + u = user.User(request, name=username) for key, value in account_data.iteritems(): if key not in ["may", "id", "valid", "trusted" @@ -45,6 +45,6 @@ else: pass # XXX redirect to homewiki - + return user_obj, True
--- a/MoinMoin/auth/ldap_login.py Mon Jul 17 13:29:36 2006 +0200 +++ b/MoinMoin/auth/ldap_login.py Mon Jul 17 13:29:58 2006 +0200 @@ -27,14 +27,14 @@ cfg = request.cfg verbose = cfg.ldap_verbose - + if verbose: request.log("got name=%s login=%r logout=%r" % (username, login, logout)) - + # we just intercept login and logout for ldap, other requests have to be # handled by another auth handler if not login and not logout: return user_obj, True - + u = None coding = cfg.ldap_coding try: @@ -68,14 +68,14 @@ dn, ldap_dict = lusers[0] if verbose: request.log("LDAP: debug lusers = %r" % lusers) - for key,val in ldap_dict.items(): + for key, val in ldap_dict.items(): request.log("LDAP: %s: %s" % (key, val)) try: if verbose: request.log("LDAP: DN found is %s, trying to bind with pw" % dn) l.simple_bind_s(dn, password.encode(coding)) if verbose: request.log("LDAP: Bound with dn %s (username: %s)" % (dn, username)) - + email = ldap_dict.get(cfg.ldap_email_attribute, [''])[0] email = email.decode(coding) sn, gn = ldap_dict.get('sn', [''])[0], ldap_dict.get('givenName', [''])[0] @@ -85,14 +85,14 @@ elif sn: aliasname = sn aliasname = aliasname.decode(coding) - + u = user.User(request, auth_username=username, password="{SHA}NotStored", auth_method='ldap', auth_attribs=('name', 'password', 'email', 'mailto_author',)) u.name = username u.aliasname = aliasname u.email = email u.remember_me = 0 # 0 enforces cookie_lifetime config param if verbose: request.log("LDAP: creating userprefs with name %s email %s alias %s" % (username, email, aliasname)) - + except ldap.INVALID_CREDENTIALS, err: request.log("LDAP: invalid credentials (wrong password?) for dn %s (username: %s)" % (dn, username))
--- a/MoinMoin/auth/mysql_group.py Mon Jul 17 13:29:36 2006 +0200 +++ b/MoinMoin/auth/mysql_group.py Mon Jul 17 13:29:58 2006 +0200 @@ -2,8 +2,6 @@ """ MoinMoin - auth plugin doing a check against MySQL group db - ... - @copyright: 2006 by Nick Phillips @license: GNU GPL, see COPYING for details. """ @@ -17,7 +15,7 @@ We don't worry about the type of request (login, logout, neither). We just check user is part of authorized group. """ - + username = kw.get('name') # login = kw.get('login') # logout = kw.get('logout') @@ -28,7 +26,7 @@ if hasattr(cfg, 'mysql_group_verbose'): verbose = cfg.mysql_group_verbose - + if verbose: request.log("auth.mysql_group: name=%s user_obj=%r" % (username, user_obj)) # Has any other method successfully authenticated? @@ -53,7 +51,7 @@ request.log("mysql_group: authorization failed due to exception connecting to DB, traceback follows...") request.log(''.join(traceback.format_exception(*info))) return None, False - + c = m.cursor() c.execute(cfg.mysql_group_query, user_obj.auth_username) results = c.fetchall()
--- a/MoinMoin/auth/php_session.py Mon Jul 17 13:29:36 2006 +0200 +++ b/MoinMoin/auth/php_session.py Mon Jul 17 13:29:58 2006 +0200 @@ -24,7 +24,7 @@ @param s_path: The path where the PHP sessions are stored. @param s_prefix: The prefix of the session files. """ - + self.s_path = s_path self.s_prefix = s_prefix self.apps = apps @@ -34,18 +34,18 @@ """ Extracts name, fullname and email from the session. """ username = session['egw_session']['session_lid'].split("@", 1)[0] known_accounts = session['egw_info_cache']['accounts']['cache']['account_data'] - + # if the next line breaks, then the cache was not filled with the current # user information user_info = [value for key, value in known_accounts.items() if value['account_lid'] == username][0] name = user_info.get('fullname', '') email = user_info.get('email', '') - + dec = lambda x: x and x.decode("iso-8859-1") - + return dec(username), dec(email), dec(name) - + user_obj = kw.get('user_obj') try: cookie = Cookie.SimpleCookie(request.saved_cookie) @@ -61,9 +61,9 @@ break else: return user_obj, True - + user = user.User(request, name=username, auth_username=username) - + changed = False if name != user.aliasname: user.aliasname = name @@ -71,7 +71,7 @@ if email != user.email: user.email = email changed = True - + if user: user.create_or_update(changed) if user and user.valid:
--- a/MoinMoin/auth/smb_mount.py Mon Jul 17 13:29:36 2006 +0200 +++ b/MoinMoin/auth/smb_mount.py Mon Jul 17 13:29:58 2006 +0200 @@ -21,7 +21,7 @@ cfg = request.cfg verbose = cfg.smb_verbose if verbose: request.log("got name=%s login=%r logout=%r" % (username, login, logout)) - + # we just intercept login to mount and logout to umount the smb share if login or logout: import os, pwd, subprocess @@ -36,7 +36,7 @@ cmd = u"sudo mount -t cifs -o user=%(user)s,domain=%(domain)s,uid=%(uid)d,dir_mode=%(dir_mode)s,file_mode=%(file_mode)s,iocharset=%(iocharset)s //%(server)s/%(share)s %(mountpoint)s >>%(log)s 2>&1" elif logout: cmd = u"sudo umount %(mountpoint)s >>%(log)s 2>&1" - + cmd = cmd % { 'user': username, 'uid': web_uid,
--- a/MoinMoin/i18n/__init__.py Mon Jul 17 13:29:36 2006 +0200 +++ b/MoinMoin/i18n/__init__.py Mon Jul 17 13:29:58 2006 +0200 @@ -284,8 +284,6 @@ if not request.cfg.language_ignore_browser: for lang in browserLanguages(request): if lang in available: - if request.http_accept_language: - request.setHttpHeader('Vary: Accept-Language') return lang # Or return the wiki default language...
--- a/MoinMoin/macro/Action.py Mon Jul 17 13:29:36 2006 +0200 +++ b/MoinMoin/macro/Action.py Mon Jul 17 13:29:58 2006 +0200 @@ -32,16 +32,16 @@ self.request = macro.request self.args = self.getArgs(args) - def getArgs(self, string): + def getArgs(self, argstr): """ Temporary function until Oliver Graf args parser is finished @param string: string from the wiki markup [[NewPage(string)]] @rtype: dict @return: dictionary with macro options """ - if not string: + if not argstr: return {} - args = [s.strip() for s in string.split(',')] + args = [s.strip() for s in argstr.split(',')] args = dict(zip(self.arguments, args)) return args @@ -56,10 +56,10 @@ # Default to show page instead of an error message (too lazy to # do an error message now). action = self.args.get('action', 'show') - + # Use translated text or action name text = self.args.get('text', action) - text = _(text, formatted=False) + text = _(text, formatted=False) # Escape user input action = wikiutil.escape(action, 1)
--- a/MoinMoin/macro/AttachInfo.py Mon Jul 17 13:29:36 2006 +0200 +++ b/MoinMoin/macro/AttachInfo.py Mon Jul 17 13:29:58 2006 +0200 @@ -15,6 +15,6 @@ pagename = macro.formatter.page.page_name if args: pagename = args - result = info(pagename, macro.request); + result = info(pagename, macro.request) return result
--- a/MoinMoin/macro/AttachList.py Mon Jul 17 13:29:36 2006 +0200 +++ b/MoinMoin/macro/AttachList.py Mon Jul 17 13:29:58 2006 +0200 @@ -15,6 +15,6 @@ pagename = macro.formatter.page.page_name if args: pagename = args - result = _build_filelist(macro.request, pagename, 0, 1); + result = _build_filelist(macro.request, pagename, 0, 1) return result
--- a/MoinMoin/macro/EditTemplates.py Mon Jul 17 13:29:36 2006 +0200 +++ b/MoinMoin/macro/EditTemplates.py Mon Jul 17 13:29:58 2006 +0200 @@ -32,5 +32,6 @@ result = result + self.formatter.bullet_list(0) return result - + return '' +
--- a/MoinMoin/macro/EditedSystemPages.py Mon Jul 17 13:29:36 2006 +0200 +++ b/MoinMoin/macro/EditedSystemPages.py Mon Jul 17 13:29:58 2006 +0200 @@ -31,7 +31,7 @@ # Get page filtered page list. We don't need to filter by # exists, because our filter check this already. pages = self.request.rootpage.getPageList(filter=filter, exists=0) - + # Format as numberd list, sorted by page name pages.sort() result = [] @@ -44,7 +44,7 @@ result.append(f.pagelink(0, name)) result.append(f.listitem(0)) result.append(f.number_list(0)) - + return ''.join(result)
--- a/MoinMoin/macro/EmbedObject.py Mon Jul 17 13:29:36 2006 +0200 +++ b/MoinMoin/macro/EmbedObject.py Mon Jul 17 13:29:58 2006 +0200 @@ -89,7 +89,7 @@ kw["loop"] = "true" kw["quality"] = "high" - for arg in args : + for arg in args: if '=' in arg: kw_count += 1 key, value = arg.split('=', 1) @@ -139,7 +139,7 @@ "quality": kw["quality"], "file": url, } - elif mime_type == "image/svg+xml": + elif mime_type == "image/svg+xml": return ''' <OBJECT CLASSID="" WIDTH="%(width)s" @@ -188,7 +188,7 @@ <PARAM NAME="Zoom" VALUE="-1"> <PARAM NAME = "SRC" VALUE = "%(file)s">Your browser cannot display Visio</OBJECT>''' % { "width": kw['width'], - "height": kw['height'], + "height": kw['height'], "file": url, } else:
--- a/MoinMoin/macro/FootNote.py Mon Jul 17 13:29:36 2006 +0200 +++ b/MoinMoin/macro/FootNote.py Mon Jul 17 13:29:58 2006 +0200 @@ -19,7 +19,7 @@ # create storage for footnotes if not hasattr(macro.request, 'footnotes'): macro.request.footnotes = [] - + if not args: return emit_footnotes(macro.request, macro.formatter) else: @@ -29,7 +29,7 @@ macro.request.footnotes.append((args, fn_id)) return "%s%s%s%s%s" % ( macro.formatter.sup(1), - macro.formatter.anchorlink(1, 'fndef' + fn_id, id = 'fnref' + fn_id), + macro.formatter.anchorlink(1, 'fndef' + fn_id, id='fnref' + fn_id), macro.formatter.text(str(idx+1)), macro.formatter.anchorlink(0), macro.formatter.sup(0),) @@ -51,15 +51,15 @@ # Add item result.append(formatter.listitem(1)) result.append(formatter.paragraph(1)) # see [1] - + fn_id = request.footnotes[idx][1] result.append(formatter.anchorlink(1, 'fnref' + fn_id, id='fndef' + fn_id)) result.append(formatter.text(str(idx + 1))) result.append(formatter.anchorlink(0)) result.append(formatter.text(" ")) - - out=StringIO.StringIO() + + out = StringIO.StringIO() request.redirect(out) parser = WikiParser(request.footnotes[idx][0], request, line_anchors=False) @@ -69,7 +69,7 @@ del out # [1] paragraph is automagically closed by wiki parser! result.append(formatter.listitem(0)) - + result.append(formatter.bullet_list(0)) # Finish div
--- a/MoinMoin/macro/FullSearch.py Mon Jul 17 13:29:36 2006 +0200 +++ b/MoinMoin/macro/FullSearch.py Mon Jul 17 13:29:58 2006 +0200 @@ -48,7 +48,7 @@ # TODO: search should implement those errors message for clients elif needle.isspace(): err = _('Please use a more selective search term instead of ' - '{{{"%s"}}}') % needle + '{{{"%s"}}}') % needle return '<span class="error">%s</span>' % err needle = needle.strip()
--- a/MoinMoin/macro/ImageLink.py Mon Jul 17 13:29:36 2006 +0200 +++ b/MoinMoin/macro/ImageLink.py Mon Jul 17 13:29:58 2006 +0200 @@ -113,7 +113,7 @@ argc = len(args) kw_count = 0 kw = {} # create a dictionary for the formatter.image call - for arg in args : + for arg in args: if '=' in arg: kw_count += 1 key, value = arg.split('=', 1) @@ -132,7 +132,7 @@ target = AttachFile.getAttachUrl(pagename, image, request) else: target = None - + if _is_URL(image): kw['src'] = image else: @@ -159,11 +159,11 @@ if target is None: target = kw['src'] - + if argc == 1: return "%s%s%s" % (formatter.url(1, kw['src']), formatter.image(**kw), - formatter.url(0)) + formatter.url(0)) if _is_URL(target): return "%s%s%s" % (formatter.url(1, target),
--- a/MoinMoin/macro/LikePages.py Mon Jul 17 13:29:36 2006 +0200 +++ b/MoinMoin/macro/LikePages.py Mon Jul 17 13:29:58 2006 +0200 @@ -13,7 +13,7 @@ def execute(macro, args): request = macro.request pagename = macro.formatter.page.page_name - + # Get matches start, end, matches = LikePages.findMatches(pagename, request) @@ -22,3 +22,4 @@ return request.redirectedOutput(LikePages.showMatches, pagename, request, start, end, matches, False) return args +
--- a/MoinMoin/macro/MonthCalendar.py Mon Jul 17 13:29:36 2006 +0200 +++ b/MoinMoin/macro/MonthCalendar.py Mon Jul 17 13:29:58 2006 +0200 @@ -145,7 +145,7 @@ [[MonthCalendar(,,,,,,MonthCalendarTemplate)]] """ -Dependencies = ['namespace','time'] +Dependencies = ['namespace', 'time', ] from MoinMoin import wikiutil from MoinMoin.Page import Page @@ -158,13 +158,13 @@ # XXX change here ----------------vvvvvv calendar.setfirstweekday(calendar.MONDAY) -def cliprgb(r,g,b): # don't use 255! - if r < 0: r=0 - if r > 254: r=254 - if b < 0: b=0 - if b > 254: b=254 - if g < 0: g=0 - if g > 254: g=254 +def cliprgb(r, g, b): # don't use 255! + if r < 0: r = 0 + if r > 254: r = 254 + if b < 0: b = 0 + if b > 254: b = 254 + if g < 0: g = 0 + if g > 254: g = 254 return r, g, b def yearmonthplusoffset(year, month, offset): @@ -198,7 +198,7 @@ parmmonth = int(strmonth) else: parmmonth = defmonth - + stroffset = args.group('offset') if stroffset: parmoffset = int(stroffset) @@ -229,7 +229,7 @@ else: parmtemplate = deftemplate return parmpagename, parmyear, parmmonth, parmoffset, parmoffset2, parmheight6, parmanniversary, parmtemplate - + # FIXME: vvvvvv is there a better way for matching a pagename ? _arg_basepage = r'\s*(?P<basepage>[^, ]+)?\s*' _arg_year = r',\s*(?P<year>\d+)?\s*' @@ -237,11 +237,11 @@ _arg_offset = r',\s*(?P<offset>[+-]?\d+)?\s*' _arg_offset2 = r',\s*(?P<offset2>[+-]?\d+)?\s*' _arg_height6 = r',\s*(?P<height6>[+-]?\d+)?\s*' -_arg_anniversary = r',\s*(?P<anniversary>[+-]?\d+)?\s*' +_arg_anniversary = r',\s*(?P<anniversary>[+-]?\d+)?\s*' _arg_template = r',\s*(?P<template>[^, ]+)?\s*' # XXX see basepage comment _args_re_pattern = r'^(%s)?(%s)?(%s)?(%s)?(%s)?(%s)?(%s)?(%s)?$' % \ - (_arg_basepage,_arg_year,_arg_month, \ - _arg_offset,_arg_offset2,_arg_height6,_arg_anniversary,_arg_template) + (_arg_basepage, _arg_year, _arg_month, \ + _arg_offset, _arg_offset2, _arg_height6, _arg_anniversary, _arg_template) def execute(macro, text): @@ -254,7 +254,7 @@ return '' args_re = re.compile(_args_re_pattern) - + currentyear, currentmonth, currentday, h, m, s, wd, yd, ds = request.user.getTime(time.time()) thispage = formatter.page.page_name # does the url have calendar params (= somebody has clicked on prev/next links in calendar) ? @@ -285,20 +285,20 @@ # does url have calendar params and is THIS the right calendar to modify (we can have multiple # calendars on the same page)? #if has_calparms and (cparmpagename,cparmyear,cparmmonth,cparmoffset) == (parmpagename,parmyear,parmmonth,parmoffset): - + # move all calendars when using the navigation: if has_calparms and cparmpagename == parmpagename: - year,month = yearmonthplusoffset(parmyear, parmmonth, parmoffset + cparmoffset2) + year, month = yearmonthplusoffset(parmyear, parmmonth, parmoffset + cparmoffset2) parmoffset2 = cparmoffset2 parmtemplate = cparmtemplate else: - year,month = yearmonthplusoffset(parmyear, parmmonth, parmoffset) + year, month = yearmonthplusoffset(parmyear, parmmonth, parmoffset) # get the calendar monthcal = calendar.monthcalendar(year, month) # european / US differences - months = ('January','February','March','April','May','June','July','August','September','October','November','December') + months = ('January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December') # Set things up for Monday or Sunday as the first day of the week if calendar.firstweekday() == calendar.MONDAY: wkend = (5, 6) @@ -312,15 +312,15 @@ qpagenames = '*'.join(map(wikiutil.quoteWikinameURL, parmpagename)) qtemplate = wikiutil.quoteWikinameURL(parmtemplate) querystr = "calparms=%%s,%d,%d,%d,%%d,%%s" % (parmyear, parmmonth, parmoffset) - prevlink = p.url(request, querystr % (qpagenames, parmoffset2 - 1, qtemplate), 0) - nextlink = p.url(request, querystr % (qpagenames, parmoffset2 + 1, qtemplate), 0) + prevlink = p.url(request, querystr % (qpagenames, parmoffset2 - 1, qtemplate), 0) + nextlink = p.url(request, querystr % (qpagenames, parmoffset2 + 1, qtemplate), 0) prevylink = p.url(request, querystr % (qpagenames, parmoffset2 - 12, qtemplate), 0) nextylink = p.url(request, querystr % (qpagenames, parmoffset2 + 12, qtemplate), 0) prevmonth = formatter.url(1, prevlink, 'cal-link') + '<' + formatter.url(0) nextmonth = formatter.url(1, nextlink, 'cal-link') + '>' + formatter.url(0) - prevyear = formatter.url(1, prevylink, 'cal-link') + '<<' + formatter.url(0) - nextyear = formatter.url(1, nextylink, 'cal-link') + '>>' + formatter.url(0) - + prevyear = formatter.url(1, prevylink, 'cal-link') + '<<' + formatter.url(0) + nextyear = formatter.url(1, nextylink, 'cal-link') + '>>' + formatter.url(0) + if parmpagename != [thispage]: pagelinks = '' r, g, b = (255, 0, 0) @@ -336,14 +336,14 @@ r, g, b = cliprgb(r, g, b) pagelinks = pagelinks + '<a style="%s" href="%s">%s</a>' % \ ('background-color:#%02x%02x%02x;color:#000000;text-decoration:none' % \ - (r,g,b), Page(request, parmpagename[0]).url(request), ch) + (r, g, b), Page(request, parmpagename[0]).url(request), ch) r, g, b = (r, g+colorstep, b) st = st + chstep r, g, b = (255-colorstep, 255, 255-colorstep) for page in parmpagename[1:]: pagelinks = pagelinks + '*<a style="%s" href="%s">%s</a>' % \ ('background-color:#%02x%02x%02x;color:#000000;text-decoration:none' % \ - (r,g,b), Page(request, page).url(request), page) + (r, g, b), Page(request, page).url(request), page) showpagename = ' %s<BR>\n' % pagelinks else: showpagename = '' @@ -369,10 +369,10 @@ cssday = "cal-workday" restd2.append(' <td class="%s" width="14%%">%s</td>\n' % (cssday, wday)) restr2 = ' <tr>\n%s </tr>\n' % "".join(restd2) - + if parmheight6: while len(monthcal) < 6: - monthcal = monthcal + [[0,0,0,0,0,0,0]] + monthcal = monthcal + [[0, 0, 0, 0, 0, 0, 0]] maketip_js = [] restrn = [] @@ -399,14 +399,14 @@ for match in header1_re.finditer(daycontent): if match: title = match.group(1) - title = wikiutil.escape(title).replace("'","\\'") + title = wikiutil.escape(title).replace("'", "\\'") titletext.append(title) tipname = link tiptitle = link tiptext = '<br>'.join(titletext) maketip_js.append("maketip('%s','%s','%s');" % (tipname, tiptitle, tiptext)) onmouse = {'onMouseOver': "tip('%s')" % tipname, - 'onMouseOut': "untip()"} + 'onMouseOut': "untip()"} else: csslink = "cal-emptyday" if parmtemplate:
--- a/MoinMoin/macro/NewPage.py Mon Jul 17 13:29:36 2006 +0200 +++ b/MoinMoin/macro/NewPage.py Mon Jul 17 13:29:58 2006 +0200 @@ -49,7 +49,7 @@ self.formatter = macro.formatter self.args = self.getArgs(args) - def getArgs(self, string): + def getArgs(self, argstr): """ Temporary function until Oliver Graf args parser is finished @param string: string from the wiki markup [[NewPage(string)]] @@ -58,7 +58,7 @@ """ if not string: return {} - args = [s.strip() for s in string.split(',')] + args = [s.strip() for s in argstr.split(',')] args = dict(zip(self.arguments, args)) return args @@ -75,28 +75,28 @@ template = self.args.get('template') or '' label = self.args.get('buttonLabel') nametemplate = self.args.get('nameTemplate') or u'%s' - + if parent == '@ME' and self.request.user.valid: parent = self.request.user.name - + requires_input = nametemplate.find('%s') != -1 - + if label: # Try to get a translation, this will probably not work in # most case, but better than nothing. label = _(label) else: label = _("Create New Page") - + # TODO: better abstract this using the formatter html = [ u'<form class="macro" method="get" action=""><div>', u'<input type="hidden" name="action" value="newpage">', u'<input type="hidden" name="parent" value="%s">' % wikiutil.escape(parent, 1), u'<input type="hidden" name="template" value="%s">' % wikiutil.escape(template, 1), - u'<input type="hidden" name="nametemplate" value="%s">' % wikiutil.escape(nametemplate,1), + u'<input type="hidden" name="nametemplate" value="%s">' % wikiutil.escape(nametemplate, 1), ] - + if requires_input: html += [ u'<input type="text" name="pagename" size="30">', @@ -106,7 +106,7 @@ u'</div></form>', ] return self.formatter.rawHTML('\n'.join(html)) - + def execute(macro, args): """ Temporary glue code to use with moin current macro system """ return NewPage(macro, args).renderInPage()
--- a/MoinMoin/macro/OrphanedPages.py Mon Jul 17 13:29:36 2006 +0200 +++ b/MoinMoin/macro/OrphanedPages.py Mon Jul 17 13:29:58 2006 +0200 @@ -13,7 +13,7 @@ if macro.request.mode_getpagelinks: # prevent recursion return '' - + # delete all linked pages from a dict of all pages pages = macro.request.rootpage.getPageDict() orphaned = {}
--- a/MoinMoin/macro/PageHits.py Mon Jul 17 13:29:36 2006 +0200 +++ b/MoinMoin/macro/PageHits.py Mon Jul 17 13:29:58 2006 +0200 @@ -21,7 +21,7 @@ class PageHits: - + def __init__(self, macro, args): self.macro = macro self.request = macro.request @@ -36,10 +36,10 @@ hits.sort() hits.reverse() return self.format(hits) - + def cachedHits(self): """ Return tuple (cache date, cached hits) for all pages """ - date, hits = 0, {} + date, hits = 0, {} if self.cache.exists(): try: date, hits = pickle.loads(self.cache.content()) @@ -54,7 +54,7 @@ logDate = event_log.date() except logfile.LogMissing: return - + changed = False event_log.set_filter(['VIEWPAGE']) for event in event_log.reverse(): @@ -64,13 +64,13 @@ if page: hits[page] = hits.get(page, 0) + 1 changed = True - + if changed: - self.updateCache(logDate, hits) - + self.updateCache(logDate, hits) + def updateCache(self, date, hits): self.cache.update(pickle.dumps((date, hits), PICKLE_PROTOCOL)) - + def filterReadableHits(self, hits): """ Filter out hits the user many not see """ userMayRead = self.request.user.may.read @@ -96,9 +96,10 @@ formatter.pagelink(0, pagename), formatter.listitem(0), ]) - result.append(formatter.number_list(0)) + result.append(formatter.number_list(0)) return ''.join(result) def execute(macro, args): return PageHits(macro, args).execute() +
--- a/MoinMoin/macro/RandomPage.py Mon Jul 17 13:29:36 2006 +0200 +++ b/MoinMoin/macro/RandomPage.py Mon Jul 17 13:29:58 2006 +0200 @@ -15,7 +15,7 @@ def execute(macro, args): request = macro.request - + # get number of wanted links try: links = max(int(args), 1) @@ -33,13 +33,13 @@ # Take one random page from the list pagename = random.choice(all_pages) all_pages.remove(pagename) - + # Filter out deleted pages or pages the user may not read. page = Page(request, pagename) if page.exists() and request.user.may.read(pagename): pages.append(pagename) found += 1 - + if not pages: return ''
--- a/MoinMoin/macro/RandomQuote.py Mon Jul 17 13:29:36 2006 +0200 +++ b/MoinMoin/macro/RandomQuote.py Mon Jul 17 13:29:58 2006 +0200 @@ -39,12 +39,12 @@ quotes = raw.splitlines() quotes = [quote.strip() for quote in quotes] quotes = [quote[2:] for quote in quotes if quote.startswith('* ')] - + if not quotes: return (macro.formatter.highlight(1) + _('No quotes on %(pagename)s.') % {'pagename': pagename} + macro.formatter.highlight(0)) - + quote = random.choice(quotes) page.set_raw_body(quote, 1) quote = macro.request.redirectedOutput(page.send_page,
--- a/MoinMoin/macro/RecentChanges.py Mon Jul 17 13:29:36 2006 +0200 +++ b/MoinMoin/macro/RecentChanges.py Mon Jul 17 13:29:58 2006 +0200 @@ -87,10 +87,10 @@ # print name of page, with a link to it force_split = len(page.page_name) > _MAX_PAGENAME_LENGTH - + d['icon_html'] = html_link d['pagelink_html'] = page.link_to(request, text=page.split_title(request, force=force_split)) - + # print time of change d['time_html'] = None if request.cfg.changed_time_fmt: @@ -100,7 +100,7 @@ 'mins': tdiff} else: d['time_html'] = time.strftime(request.cfg.changed_time_fmt, line.time_tuple) - + # print editor name or IP d['editors'] = None if request.cfg.show_names: @@ -111,7 +111,7 @@ if not name in counters: counters[name] = [] counters[name].append(idx+1) - poslist = map(None, counters.values(), counters.keys()) + poslist = map(None, counters.values(), counters.keys()) poslist.sort() ##request.write(repr(counters.items())) d['editors'] = [] @@ -126,7 +126,7 @@ comment = format_comment(request, lines[idx]) if comment: comments.append((idx+1, wikiutil.escape(comment))) - + d['changecount'] = len(lines) d['comments'] = comments @@ -135,9 +135,9 @@ wikiutil.quoteWikinameURL(line.pagename) + "?action=info", img, formatter=macro.formatter, rel="nofollow") d['info_html'] = info_html - + return request.theme.recentchanges_entry(d) - + def cmp_lines(first, second): return cmp(first[0], second[0]) @@ -174,22 +174,22 @@ if not max_days: max_days = _MAX_DAYS d['rc_max_days'] = max_days - + # give known user the option to extend the normal display if request.user.valid: d['rc_days'] = _DAYS_SELECTION else: d['rc_days'] = None - + d['rc_update_bookmark'] = None request.write(request.theme.recentchanges_header(d)) length = len(last_edits) - + index = 0 last_index = 0 day_count = 0 - + if length > 0: line = last_edits[index] line.time_tuple = request.user.getTime(wikiutil.version2timestamp(line.ed_time_usecs)) @@ -200,20 +200,20 @@ index += 1 - if (index>length): - break + if index > length: + break if index < length: line = last_edits[index] line.time_tuple = request.user.getTime(wikiutil.version2timestamp(line.ed_time_usecs)) day = line.time_tuple[0:3] - if (day != this_day) or (index==length): + if (day != this_day) or (index == length): d['bookmark_link_html'] = None d['date'] = request.user.getFormattedDate(wikiutil.version2timestamp(last_edits[last_index].ed_time_usecs)) request.write(request.theme.recentchanges_daybreak(d)) this_day = day - + for page in last_edits[last_index:index]: request.write(format_page_edits(macro, [page], None)) last_index = index @@ -223,7 +223,7 @@ d['rc_msg'] = msg request.write(request.theme.recentchanges_footer(d)) - + def execute(macro, args, **kw): # handle abandoned keyword if kw.get('abandoned', 0): @@ -235,7 +235,7 @@ user = request.user page = macro.formatter.page pagename = page.page_name - + d = {} d['q_page_name'] = wikiutil.quoteWikinameURL(pagename) @@ -256,7 +256,7 @@ currentBookmark = wikiutil.version2timestamp(bookmark_usecs) currentBookmark = user.getFormattedDateTime(currentBookmark) currentBookmark = _('(currently set to %s)') % currentBookmark - + url = wikiutil.quoteWikinameURL(pagename) + "?action=bookmark&time=del" deleteBookmark = wikiutil.link_tag(request, url, _("Delete Bookmark"), formatter=macro.formatter, rel="nofollow") @@ -267,14 +267,14 @@ "?action=bookmark&time=%d" % version d['rc_update_bookmark'] = wikiutil.link_tag(request, url, _("Set bookmark"), formatter=macro.formatter, rel="nofollow") - + # set max size in days max_days = min(int(request.form.get('max_days', [0])[0]), _DAYS_SELECTION[-1]) # default to _MAX_DAYS for useres without bookmark if not max_days and not bookmark_usecs: max_days = _MAX_DAYS d['rc_max_days'] = max_days - + # give known user the option to extend the normal display if request.user.valid: d['rc_days'] = _DAYS_SELECTION @@ -282,7 +282,7 @@ d['rc_days'] = [] request.write(request.theme.recentchanges_header(d)) - + pages = {} ignore_pages = {} @@ -298,7 +298,7 @@ line.time_tuple = request.user.getTime(wikiutil.version2timestamp(line.ed_time_usecs)) day = line.time_tuple[0:3] hilite = line.ed_time_usecs > (bookmark_usecs or line.ed_time_usecs) - + if ((this_day != day or (not hilite and not max_days))) and len(pages) > 0: # new day or bookmark reached: print out stuff this_day = day @@ -307,7 +307,7 @@ pages = pages.values() pages.sort(cmp_lines) pages.reverse() - + if request.user.valid: d['bookmark_link_html'] = wikiutil.link_tag( request, @@ -319,7 +319,7 @@ d['bookmark_link_html'] = None d['date'] = request.user.getFormattedDate(wikiutil.version2timestamp(pages[0][0].ed_time_usecs)) request.write(request.theme.recentchanges_daybreak(d)) - + for page in pages: request.write(format_page_edits(macro, page, bookmark_usecs)) pages = {} @@ -333,7 +333,7 @@ if line.pagename in ignore_pages: continue - + # end listing by default if user has a bookmark and we reached it if not max_days and not hilite: msg = _('[Bookmark reached]') @@ -353,7 +353,7 @@ pages = pages.values() pages.sort(cmp_lines) pages.reverse() - + if request.user.valid: d['bookmark_link_html'] = wikiutil.link_tag( request, @@ -365,10 +365,10 @@ d['bookmark_link_html'] = None d['date'] = request.user.getFormattedDate(wikiutil.version2timestamp(pages[0][0].ed_time_usecs)) request.write(request.theme.recentchanges_daybreak(d)) - + for page in pages: request.write(format_page_edits(macro, page, bookmark_usecs)) - + d['rc_msg'] = msg request.write(request.theme.recentchanges_footer(d))
--- a/MoinMoin/macro/SystemAdmin.py Mon Jul 17 13:29:36 2006 +0200 +++ b/MoinMoin/macro/SystemAdmin.py Mon Jul 17 13:29:58 2006 +0200 @@ -18,7 +18,7 @@ def execute(macro, args): _ = macro.request.getText request = macro.request - + # do not show system admin to users not in superuser list if not request.user.isSuperUser(): return ''
--- a/MoinMoin/macro/SystemInfo.py Mon Jul 17 13:29:36 2006 +0200 +++ b/MoinMoin/macro/SystemInfo.py Mon Jul 17 13:29:58 2006 +0200 @@ -45,7 +45,7 @@ _ = Macro._ request = Macro.request - + # check for 4XSLT try: import Ft @@ -96,24 +96,24 @@ eventlogger = eventlog.EventLog(request) nonestr = _("NONE") row('Event log', _formatInReadableUnits(eventlogger.size())) - + row(_('Global extension macros'), ', '.join(macro.extension_macros) or nonestr) - row(_('Local extension macros'), + row(_('Local extension macros'), ', '.join(wikiutil.wikiPlugins('macro', Macro.cfg)) or nonestr) - + ext_actions = [x for x in action.extension_actions if not x in request.cfg.actions_excluded] row(_('Global extension actions'), ', '.join(ext_actions) or nonestr) - row(_('Local extension actions'), + row(_('Local extension actions'), ', '.join(action.getPlugins(request)[1]) or nonestr) - + row(_('Global parsers'), ', '.join(parser.modules) or nonestr) - row(_('Local extension parsers'), + row(_('Local extension parsers'), ', '.join(wikiutil.wikiPlugins('parser', Macro.cfg)) or nonestr) - + state = (_('Disabled'), _('Enabled')) row(_('Xapian search'), state[request.cfg.xapian_search]) - + row(_('Active threads'), t_count or 'N/A') buf.write(u'</dl>')
--- a/MoinMoin/macro/TableOfContents.py Mon Jul 17 13:29:36 2006 +0200 +++ b/MoinMoin/macro/TableOfContents.py Mon Jul 17 13:29:58 2006 +0200 @@ -39,12 +39,12 @@ def __init__(self, macro, args): self.macro = macro self._ = self.macro.request.getText - + self.inc_re = re.compile(r"^\[\[Include\((.*)\)\]\]") self.arg_re = re.compile(_args_re_pattern) self.head_re = re.compile(_title_re) # single lines only self.pre_re = re.compile(r'\{\{\{.+?\}\}\}', re.S) - + self.result = [] self.baseindent = 0 self.indent = 0 @@ -67,7 +67,7 @@ if self.include_macro is None: self.include_macro = wikiutil.importPlugin(self.macro.request.cfg, 'macro', "Include") - return self.pre_re.sub('',apply(self.include_macro, args, kwargs)).split('\n') + return self.pre_re.sub('', apply(self.include_macro, args, kwargs)).split('\n') def run(self): _ = self._ @@ -76,7 +76,7 @@ self.result.append(self.macro.formatter.escapedText(_('Contents'))) self.result.append(self.macro.formatter.paragraph(0)) - self.process_lines(self.pre_re.sub('',self.macro.parser.raw).split('\n'), + self.process_lines(self.pre_re.sub('', self.macro.parser.raw).split('\n'), self.macro.formatter.page.page_name) # Close pending lists for i in range(self.baseindent, self.indent): @@ -110,12 +110,12 @@ level = int(tmp.group("level")) else: level = 1 - inc_page_lines = ["%s %s %s" %("=" * level, heading, "=" * level)] + inc_page_lines = ["%s %s %s" % ("=" * level, heading, "=" * level)] else: inc_page_lines = [] inc_page_lines = inc_page_lines + self.IncludeMacro(self.macro, match.group(1), called_by_toc=1) - + self.process_lines(inc_page_lines, inc_pagename) else: self.parse_line(line, pagename) @@ -138,12 +138,12 @@ self.indent = self.baseindent # Close lists - for i in range(0,self.indent-newindent): + for i in range(0, self.indent-newindent): self.result.append(self.macro.formatter.listitem(0)) self.result.append(self.macro.formatter.number_list(0)) # Open Lists - for i in range(0,newindent-self.indent): + for i in range(0, newindent-self.indent): self.result.append(self.macro.formatter.number_list(1)) self.result.append(self.macro.formatter.listitem(1)) @@ -155,7 +155,7 @@ # close last listitem if same level if self.indent == newindent: self.result.append(self.macro.formatter.listitem(0)) - + if self.indent >= newindent: self.result.append(self.macro.formatter.listitem(1)) self.result.append(self.macro.formatter.anchorlink(1, @@ -167,5 +167,6 @@ self.indent = newindent def execute(macro, args): - toc=TableOfContents(macro,args) + toc = TableOfContents(macro, args) return toc.run() +
--- a/MoinMoin/macro/TeudView.py Mon Jul 17 13:29:36 2006 +0200 +++ b/MoinMoin/macro/TeudView.py Mon Jul 17 13:29:58 2006 +0200 @@ -63,7 +63,7 @@ processor.appendStylesheetFile(xslfile) try: result = processor.runString(xmlstr, - topLevelParams = { + topLevelParams={ 'uri-prefix': pagename + "?module=", 'uri-suffix': "", } @@ -73,3 +73,4 @@ raise return navigation + result +
--- a/MoinMoin/macro/WantedPages.py Mon Jul 17 13:29:36 2006 +0200 +++ b/MoinMoin/macro/WantedPages.py Mon Jul 17 13:29:58 2006 +0200 @@ -15,12 +15,12 @@ _ = request.getText # prevent recursion - if request.mode_getpagelinks: - return '' + if request.mode_getpagelinks: + return '' # Get allpages switch from the form allpages = int(request.form.get('allpages', [0])[0]) != 0 - + # Control bar - filter the list of pages # TODO: we should make this a widget and use on all page listing pages controlbar = '''<div class="controlbar"> @@ -35,13 +35,13 @@ pages = request.rootpage.getPageDict() # build a dict of wanted pages - wanted = {} + wanted = {} for name, page in pages.items(): # Skip system pages, because missing translations are not wanted pages, # unless you are a translator and clicked "Include system pages" if not allpages and wikiutil.isSystemPage(request, name): continue - + # Add links to pages which does not exists in pages dict links = page.getPageLinks(request) for link in links: @@ -53,7 +53,7 @@ # Check for the extreme case when there are no wanted pages if not wanted: - return u"%s<p>%s</p>" % (controlbar ,_("No wanted pages in this wiki.")) + return u"%s<p>%s</p>" % (controlbar, _("No wanted pages in this wiki.")) # Return a list of page links wantednames = wanted.keys() @@ -74,7 +74,7 @@ where.sort() if macro.formatter.page.page_name in where: where.remove(macro.formatter.page.page_name) - querystr='highlight=%s' % wikiutil.url_quote_plus(name) + querystr = 'highlight=%s' % wikiutil.url_quote_plus(name) wherelinks = [pages[pagename].link_to(request, querystr=querystr, rel='nofollow') for pagename in where] result.append(": " + ', '.join(wherelinks)) @@ -82,3 +82,4 @@ result.append(macro.formatter.number_list(0)) return u'%s%s' % (controlbar, u''.join(result)) +
--- a/MoinMoin/macro/__init__.py Mon Jul 17 13:29:36 2006 +0200 +++ b/MoinMoin/macro/__init__.py Mon Jul 17 13:29:58 2006 +0200 @@ -87,7 +87,7 @@ # to have correct dir and lang html attributes for lang in i18n.wikiLanguages().keys(): Dependencies[lang] = [] - + def __init__(self, parser): self.parser = parser @@ -96,7 +96,7 @@ self.formatter = self.request.formatter self._ = self.request.getText self.cfg = self.request.cfg - + # Initialized on execute self.name = None @@ -131,10 +131,10 @@ return (self.formatter.lang(1, self.name) + self.formatter.text(text) + self.formatter.lang(0, self.name)) - + self.request.current_lang = self.name return '' - + def get_dependencies(self, macro_name): if macro_name in self.Dependencies: return self.Dependencies[macro_name] @@ -178,7 +178,7 @@ ] boxes = u'\n'.join(boxes) button = _("Search Text") - + # Format type = (type == "titlesearch") html = [ @@ -190,11 +190,11 @@ u'<input type="submit" value="%s">' % button, boxes, u'</div>', - u'</form>', + u'</form>', ] html = u'\n'.join(html) return self.formatter.rawHTML(html) - + def _macro_GoTo(self, args): """ Make a goto box @@ -241,7 +241,7 @@ def filter(name): return not wikiutil.isSystemPage(self.request, name) pages = self.request.rootpage.getPageList(filter=filter) - + word_re = re.compile(word_re, re.UNICODE) map = {} for name in pages: @@ -275,7 +275,7 @@ html.append(self.formatter.strong(1)) html.append(word) html.append(self.formatter.strong(0)) - + html.append(self.formatter.bullet_list(1)) links.sort() last_page = None @@ -286,7 +286,7 @@ html.append(Page(self.request, name).link_to(self.request, attachment_indicator=1)) html.append(self.formatter.listitem(0)) html.append(self.formatter.bullet_list(0)) - + def _make_index_key(index_letters, additional_html=''): index_letters.sort() def letter_link(ch): @@ -301,7 +301,7 @@ """ % (qpagename, not allpages, allpages_txt) ) # ?action=titleindex and ?action=titleindex&mimetype=text/xml removed - return u'%s%s' % (index, u''.join(html)) + return u'%s%s' % (index, u''.join(html)) def _macro_TitleIndex(self, args): @@ -324,16 +324,16 @@ # With whitespace argument, return same error message as FullSearch elif needle.isspace(): - err = _('Please use a more selective search term instead of {{{"%s"}}}') % needle + err = _('Please use a more selective search term instead of {{{"%s"}}}') % needle return '<span class="error">%s</span>' % err - + # Return a title search for needle, sorted by name. # XXX: what's with literal? results = search.searchPages(self.request, needle, titlesearch=1, case=case) results.sortByPagename() return results.pageList(self.request, self.formatter) - + def _macro_InterWiki(self, args): from StringIO import StringIO interwiki_list = wikiutil.load_wikimap(self.request) @@ -367,7 +367,7 @@ return (self.formatter.span(1, css_class="error") + 'Wrong argument: %s' % arg + self.formatter.span(0)) - + count = self.request.rootpage.getPageCount(exists=exists) return self.formatter.text("%d" % count) @@ -386,7 +386,7 @@ # Get page list readable by current user, filtered by needle hits = self.request.rootpage.getPageList(filter=needle_re.search) hits.sort() - + result = [] result.append(self.formatter.bullet_list(1)) for pagename in hits: @@ -408,8 +408,8 @@ # we ignore any time zone offsets here, assume UTC, # and accept (and ignore) any trailing stuff try: - year, month, day = int(args[0:4]), int(args[5:7]), int(args[8:10]) - hour, minute, second = int(args[11:13]), int(args[14:16]), int(args[17:19]) + year, month, day = int(args[0:4]), int(args[5:7]), int(args[8:10]) + hour, minute, second = int(args[11:13]), int(args[14:16]), int(args[17:19]) tz = args[19:] # +HHMM, -HHMM or Z or nothing (then we assume Z) tzoffset = 0 # we assume UTC no matter if there is a Z if tz: @@ -451,7 +451,7 @@ create_only = False if isinstance(args, unicode): args = args.strip(" '\"") - create_only = (args.lower()=="createonly") + create_only = (args.lower() == "createonly") return self.formatter.rawHTML(userform.getUserForm( self.request, @@ -492,8 +492,8 @@ return result def _macro_GetVal(self, args): - page,key = args.split(',') + page, key = args.split(',') d = self.request.dicts.dict(page) - result = d.get(key,'') + result = d.get(key, '') return self.formatter.text(result)
--- a/MoinMoin/request/CGI.py Mon Jul 17 13:29:36 2006 +0200 +++ b/MoinMoin/request/CGI.py Mon Jul 17 13:29:58 2006 +0200 @@ -28,7 +28,7 @@ except Exception, err: self.fail(err) - + def open_logs(self): # create log file for catching stderr output if not self.opened_logs: @@ -48,7 +48,7 @@ def flush(self): sys.stdout.flush() - + def finish(self): RequestBase.finish(self) # flush the output, ignore errors caused by the user closing the socket @@ -59,12 +59,12 @@ if ex.errno != errno.EPIPE: raise # Headers ---------------------------------------------------------- - + def http_headers(self, more_headers=[]): # Send only once if getattr(self, 'sent_headers', None): return - + self.sent_headers = 1 have_ct = 0
--- a/MoinMoin/request/CLI.py Mon Jul 17 13:29:36 2006 +0200 +++ b/MoinMoin/request/CLI.py Mon Jul 17 13:29:58 2006 +0200 @@ -29,7 +29,7 @@ RequestBase.__init__(self, properties) self.cfg.caching_formats = [] # don't spoil the cache self.initTheme() # usually request.run() does this, but we don't use it - + def read(self, n=None): """ Read from input stream. """ if n is None: @@ -43,7 +43,7 @@ def flush(self): sys.stdout.flush() - + def finish(self): RequestBase.finish(self) # flush the output, ignore errors caused by the user closing the socket
--- a/MoinMoin/request/MODPYTHON.py Mon Jul 17 13:29:36 2006 +0200 +++ b/MoinMoin/request/MODPYTHON.py Mon Jul 17 13:29:58 2006 +0200 @@ -31,15 +31,15 @@ # some mod_python 2.7.X has no get method for table objects, # so we make a real dict out of it first. if not hasattr(req.subprocess_env, 'get'): - env=dict(req.subprocess_env) + env = dict(req.subprocess_env) else: - env=req.subprocess_env + env = req.subprocess_env self._setup_vars_from_std_env(env) RequestBase.__init__(self) except Exception, err: self.fail(err) - + def fixURI(self, env): """ Fix problems with script_name and path_info using PythonOption directive to rewrite URI. @@ -71,7 +71,7 @@ import urlparse scriptAndPath = urlparse.urlparse(self.request_uri)[2] self.script_name = location.rstrip('/') - path = scriptAndPath.replace(self.script_name, '', 1) + path = scriptAndPath.replace(self.script_name, '', 1) self.path_info = wikiutil.url_unquote(path, want_unicode=False) RequestBase.fixURI(self, env) @@ -103,9 +103,9 @@ # objects. if hasattr(item, 'value'): item = item.value - fixedResult.append(item) + fixedResult.append(item) args[key] = fixedResult - + return self.decodeArgs(args) def run(self, req): @@ -131,7 +131,7 @@ def flush(self): """ We can't flush it, so do nothing. """ pass - + def finish(self): """ Just return apache.OK. Status is set in req.status. """ RequestBase.finish(self) @@ -168,7 +168,7 @@ self._have_status = 1 else: # this is a header we sent out - self.mpyreq.headers_out[key]=value + self.mpyreq.headers_out[key] = value def http_headers(self, more_headers=[]): """ Sends out headers and possibly sets default content-type
--- a/MoinMoin/request/STANDALONE.py Mon Jul 17 13:29:36 2006 +0200 +++ b/MoinMoin/request/STANDALONE.py Mon Jul 17 13:29:58 2006 +0200 @@ -14,7 +14,7 @@ class Request(RequestBase): """ specialized on StandAlone Server (MoinMoin.server.standalone) requests """ script_name = '' - + def __init__(self, sa, properties={}): """ @param sa: stand alone server object @@ -26,14 +26,14 @@ self.rfile = sa.rfile self.headers = sa.headers self.is_ssl = 0 - + # Copy headers - self.http_accept_language = (sa.headers.getheader('accept-language') + self.http_accept_language = (sa.headers.getheader('accept-language') or self.http_accept_language) - self.http_user_agent = sa.headers.getheader('user-agent', '') + self.http_user_agent = sa.headers.getheader('user-agent', '') co = filter(None, sa.headers.getheaders('cookie')) self.saved_cookie = ', '.join(co) or '' - + # Copy rest from standalone request self.server_name = sa.server.server_name self.server_port = str(sa.server.server_port) @@ -48,7 +48,7 @@ self.setURL(sa.headers) ##self.debugEnvironment(sa.headers) - + RequestBase.__init__(self, properties) except Exception, err: @@ -58,7 +58,7 @@ """ Override to create standalone form """ form = cgi.FieldStorage(self.rfile, headers=self.headers, environ={'REQUEST_METHOD': 'POST'}) return RequestBase._setup_args_from_cgi_form(self, form) - + def read(self, n=None): """ Read from input stream @@ -83,7 +83,7 @@ def flush(self): self.wfile.flush() - + def finish(self): RequestBase.finish(self) self.wfile.flush() @@ -93,16 +93,16 @@ def http_headers(self, more_headers=[]): if getattr(self, 'sent_headers', None): return - + self.sent_headers = 1 user_headers = getattr(self, 'user_headers', []) - + # check for status header and send it our_status = 200 for header in more_headers + user_headers: if header.lower().startswith("status:"): try: - our_status = int(header.split(':', 1)[1].strip().split(" ", 1)[0]) + our_status = int(header.split(':', 1)[1].strip().split(" ", 1)[0]) except: pass # there should be only one!
--- a/MoinMoin/request/TWISTED.py Mon Jul 17 13:29:36 2006 +0200 +++ b/MoinMoin/request/TWISTED.py Mon Jul 17 13:29:58 2006 +0200 @@ -18,12 +18,12 @@ try: self.twistd = twistedRequest self.reactor = reactor - + # Copy headers self.http_accept_language = self.twistd.getHeader('Accept-Language') self.saved_cookie = self.twistd.getHeader('Cookie') self.http_user_agent = self.twistd.getHeader('User-Agent') - + # Copy values from twisted request self.server_protocol = self.twistd.clientproto self.server_name = self.twistd.getRequestHostname().split(':')[0] @@ -42,7 +42,7 @@ self.setURL(self.twistd.getAllHeaders()) ##self.debugEnvironment(twistedRequest.getAllHeaders()) - + RequestBase.__init__(self, properties) except MoinMoinFinish: # might be triggered by http_redirect @@ -63,7 +63,7 @@ self.fail(self.delayedError) return self.finish() RequestBase.run(self) - + def setup_args(self, form=None): """ Return args dict @@ -73,12 +73,12 @@ # TODO: check if for a POST this included query_string args (needed for # TwikiDraw's action=AttachFile&do=savedrawing) return self.decodeArgs(self.twistd.args) - + def read(self, n=None): """ Read from input stream. """ # XXX why is that wrong?: #rd = self.reactor.callFromThread(self.twistd.read) - + # XXX do we need self.reactor.callFromThread with that? # XXX if yes, why doesn't it work? self.twistd.content.seek(0, 0) @@ -88,7 +88,7 @@ rd = self.twistd.content.read(n) #print "request.RequestTwisted.read: data=\n" + str(rd) return rd - + def write(self, *data): """ Write to output stream. """ #print "request.RequestTwisted.write: data=\n" + wd @@ -153,4 +153,4 @@ def setResponseCode(self, code, message=None): self.twistd.setResponseCode(code, message) - +
--- a/MoinMoin/request/WSGI.py Mon Jul 17 13:29:36 2006 +0200 +++ b/MoinMoin/request/WSGI.py Mon Jul 17 13:29:58 2006 +0200 @@ -17,42 +17,42 @@ try: self.env = env self.hasContentType = False - + self.stdin = env['wsgi.input'] self.stdout = StringIO.StringIO() - + self.status = '200 OK' self.headers = [] - + self._setup_vars_from_std_env(env) RequestBase.__init__(self, {}) except Exception, err: self.fail(err) - + def setup_args(self, form=None): # TODO: does this include query_string args for POST requests? # see also how CGI works now if form is None: form = cgi.FieldStorage(fp=self.stdin, environ=self.env, keep_blank_values=1) return self._setup_args_from_cgi_form(form) - + def read(self, n=None): if n is None: return self.stdin.read() else: return self.stdin.read(n) - + def write(self, *data): self.stdout.write(self.encode(data)) - + def reset_output(self): self.stdout = StringIO.StringIO() - + def setHttpHeader(self, header): if type(header) is unicode: header = header.encode('ascii') - + key, value = header.split(':', 1) value = value.lstrip() if key.lower() == 'content-type': @@ -62,27 +62,27 @@ return else: self.hasContentType = True - + elif key.lower() == 'status': # save status for finish self.status = value return - + self.headers.append((key, value)) - + def http_headers(self, more_headers=[]): for header in more_headers: self.setHttpHeader(header) - + if not self.hasContentType: self.headers.insert(0, ('Content-Type', 'text/html;charset=%s' % config.charset)) - + def flush(self): pass - + def finish(self): pass - + def output(self): return self.stdout.getvalue()
--- a/MoinMoin/request/__init__.py Mon Jul 17 13:29:36 2006 +0200 +++ b/MoinMoin/request/__init__.py Mon Jul 17 13:29:58 2006 +0200 @@ -1047,6 +1047,15 @@ else: pagename = None + # need to inform caches that content changes based on: + # * cookie (even if we aren't sending one now) + # * User-Agent (because a bot might be denied and get no content) + # * Accept-Language (except if moin is told to ignore browser language) + if self.cfg.language_ignore_browser: + self.setHttpHeader("Vary: Cookie,User-Agent") + else: + self.setHttpHeader("Vary: Cookie,User-Agent,Accept-Language") + # Handle request. We have these options: # 1. If user has a bad user name, delete its bad cookie and # send him to UserPreferences to make a new account. @@ -1124,7 +1133,11 @@ self.http_headers(["Status: 302 Found", "Location: %s" % url]) def setHttpHeader(self, header): - """ Save header for later send. """ + """ Save header for later send. + + Attention: although we use a list here, some implementations use a dict, + thus multiple calls with the same header type do NOT work in the end! + """ self.user_headers.append(header) def setResponseCode(self, code, message=None): @@ -1222,9 +1235,7 @@ # Set Cache control header for http 1.1 caches # See http://www.cse.ohio-state.edu/cgi-bin/rfc/rfc2109.html#sec-4.2.3 # and http://www.cse.ohio-state.edu/cgi-bin/rfc/rfc2068.html#sec-14.9 - self.setHttpHeader('Cache-Control: no-cache="set-cookie"') - self.setHttpHeader('Cache-Control: private') - self.setHttpHeader('Cache-Control: max-age=0') + self.setHttpHeader('Cache-Control: no-cache="set-cookie", private, max-age=0') # Set Expires for http 1.0 caches (does not support Cache-Control) yearago = time.time() - (3600 * 24 * 365)
--- a/MoinMoin/script/__init__.py Mon Jul 17 13:29:36 2006 +0200 +++ b/MoinMoin/script/__init__.py Mon Jul 17 13:29:58 2006 +0200 @@ -31,23 +31,23 @@ xmlrpc "channel", but scriptrequest.write needs to write to some buffer we transmit later as an xmlrpc function return value. """ - def __init__(self, in, out, err): - self.in = in - self.out = out - self.err = err + def __init__(self, instream, outstream, errstream): + self.instream = instream + self.outstream = outstrem + self.errstream = errstream def read(self, n=None): if n is None: - data = self.in.read() + data = self.instream.read() else: - data = self.in.read(n) + data = self.instream.read(n) return data def write(self, data): - self.out.write(data) + self.outstream.write(data) def write_err(self, data): - self.err.write(data) + self.errstream.write(data) class ScriptRequestCLI(ScriptRequest): @@ -71,15 +71,15 @@ string and we also need to catch the output / error output as strings. """ def __init__(self, instr): - self.in = StringIO(instr) - self.out = StringIO() - self.err = StringIO() + self.instream = StringIO(instr) + self.outstream = StringIO() + self.errstream = StringIO() def fetch_output(self): - outstr = self.out.get_value() - errstr = self.err.get_value() - self.out.close() - self.err.close() + outstr = self.outstream.get_value() + errstr = self.errstream.get_value() + self.outstream.close() + self.errstream.close() return outstr, errstr
--- a/MoinMoin/script/index/build.py Mon Jul 17 13:29:36 2006 +0200 +++ b/MoinMoin/script/index/build.py Mon Jul 17 13:29:58 2006 +0200 @@ -22,7 +22,7 @@ ) self.parser.add_option( "--mode", metavar="MODE", dest="mode", - help="either add (unconditionally add to index) or update (update an existing index)" + help="either add (unconditionally add to index), update (update an existing index) or rebuild (remove and add)" ) def mainloop(self): @@ -40,5 +40,4 @@ def command(self): from MoinMoin.search.Xapian import Index Index(self.request).indexPages(self.files, self.options.mode) - #Index(self.request).test(self.request)
--- a/MoinMoin/search/Xapian.py Mon Jul 17 13:29:36 2006 +0200 +++ b/MoinMoin/search/Xapian.py Mon Jul 17 13:29:58 2006 +0200 @@ -8,7 +8,7 @@ """ debug = True -import sys, os, re, codecs, time +import sys, os, re, codecs, time, os from pprint import pprint import xapian @@ -237,6 +237,13 @@ Assumes that the write lock is acquired """ fs_rootpage = 'FS' # XXX FS hardcoded + + # rebuilding the DB: delete it and add everything + if mode == 'rebuild': + for f in os.listdir(self.dir): + os.unlink(f) + mode = 'add' + try: wikiname = request.cfg.interwikiname or 'Self' itemid = "%s:%s" % (wikiname, os.path.join(fs_rootpage, filename))
--- a/MoinMoin/search/builtin.py Mon Jul 17 13:29:36 2006 +0200 +++ b/MoinMoin/search/builtin.py Mon Jul 17 13:29:58 2006 +0200 @@ -149,7 +149,7 @@ lock_dir = os.path.join(main_dir, 'index-lock') self.lock = lock.WriteLock(lock_dir, timeout=3600.0, readlocktimeout=60.0) - self.read_lock = lock.ReadLock(lock_dir, timeout=3600.0) + #self.read_lock = lock.ReadLock(lock_dir, timeout=3600.0) self.queue = UpdateQueue(os.path.join(main_dir, 'update-queue'), os.path.join(main_dir, 'update-queue-lock')) @@ -172,12 +172,12 @@ raise NotImplemented def search(self, query): - if not self.read_lock.acquire(1.0): - raise self.LockedException - try: - hits = self._search(query) - finally: - self.read_lock.release() + #if not self.read_lock.acquire(1.0): + # raise self.LockedException + #try: + hits = self._search(query) + #finally: + # self.read_lock.release() return hits def update_page(self, page): @@ -415,17 +415,25 @@ else: return self._moinSearch(pages) + def _xapianMatchDecider(self, term, pos): + if term[0] == 'S': # TitleMatch + return TitleMatch(start=pos, end=pos+len(term)-1) + else: # TextMatch (incl. headers) + return TextMatch(start=pos, end=pos+len(term)) + def _xapianMatch(self, page, uid): - matches = [] + """ Get all relevant Xapian matches per document id """ + positions = {} term = self._xapianEnquire.get_matching_terms_begin(uid) - #print hit['uid'] while term != self._xapianEnquire.get_matching_terms_end(uid): - print term.get_term(), ':', list(self._xapianIndex.termpositions(uid, term.get_term())) - for pos in self._xapianIndex.termpositions(uid, term.get_term()): - matches.append(TextMatch(start=pos, - end=pos+len(term.get_term()))) + term_name = term.get_term() + for pos in self._xapianIndex.termpositions(uid,term.get_term()): + if pos not in positions or \ + len(positions[pos]) < len(term_name): + positions[pos] = term_name term.next() - return matches + return [self._xapianMatchDecider(term, pos) for pos, term + in positions.iteritems()] def _moinSearch(self, pages=None): """ Search pages using moin's built-in full text search @@ -444,9 +452,11 @@ return hits def _moinMatch(self, page, uid): + """ Just kick off regular moinSearch """ return self.query.search(page) def _getHits(self, pages, matchSearchFunction): + """ Get the hit tuples in pages through matchSearchFunction """ hits = [] fs_rootpage = self.fs_rootpage for hit in pages: @@ -455,6 +465,7 @@ uid = hit['uid'] else: valuedict = hit + uid = None wikiname = valuedict['wikiname'] pagename = valuedict['pagename'] @@ -468,9 +479,9 @@ else: hits.append((wikiname, page, attachment, None)) else: - match = matchSearchFunction(page, uid) - if match: - hits.append((wikiname, page, attachment, match)) + matches = matchSearchFunction(page, uid) + if matches: + hits.append((wikiname, page, attachment, matches)) else: # other wiki hits.append((wikiname, pagename, attachment, None)) return hits
--- a/MoinMoin/search/results.py Mon Jul 17 13:29:36 2006 +0200 +++ b/MoinMoin/search/results.py Mon Jul 17 13:29:58 2006 +0200 @@ -494,7 +494,8 @@ start = len(header) # Find first match after start for i in xrange(len(matches)): - if matches[i].start >= start: + if matches[i].start >= start and \ + isinstance(matches[i], TextMatch): return i, start return 0, 0
--- a/MoinMoin/support/xapwrap/document.py Mon Jul 17 13:29:36 2006 +0200 +++ b/MoinMoin/support/xapwrap/document.py Mon Jul 17 13:29:58 2006 +0200 @@ -145,6 +145,9 @@ # add text fields for field in self.textFields: + # XXX: terms textFields won't get numbered + # after each other, needed for titles + position = 0 for token in analyzer.tokenize(field.text): if isinstance(token, tuple): token, position = token @@ -163,19 +166,20 @@ # the process, the string length could expand, so we # need to check here as well. d.add_posting(checkKeyLen(token), position) - position += INTER_FIELD_POSITION_GAP + #position += INTER_FIELD_POSITION_GAP if field.prefix: prefix = field.name for token in analyzer.tokenize(field.text): if isinstance(token, tuple): - token = token[0] + token, position = token + else: + position += 1 # token is unicode, but gets converted to UTF-8 # by makePairForWrite: term = makePairForWrite(prefix, token, prefixMap) d.add_posting(term, position) - position += 1 - position += INTER_FIELD_POSITION_GAP + #position += INTER_FIELD_POSITION_GAP # add keyword fields for field in self.keywords:
--- a/docs/CHANGES.fpletz Mon Jul 17 13:29:36 2006 +0200 +++ b/docs/CHANGES.fpletz Mon Jul 17 13:29:58 2006 +0200 @@ -5,14 +5,18 @@ * Only term-based regex searching possible, modifier or heuristic to enable usage of _moinSearch for full compatibility? * HACK: MoinMoin.Xapian.Index._get_languages (wait for proper metadata) + * Positions saved in Xapian aren't always correct, check. Code + generally needs some more love. ToDo: * Implement the new search UI * Write/update documentation for all the new search stuff * Indexing and searching of categories (new term prefix) - * Drop _moinSearch when using Xapian and use term positions provided - by Xapian itself, needs some reworking of WikiAnalyzer/xapwrap to - get the position of stemmed words right + * Reevaluate Xapwrap, possibly drop it and rip out usable stuff + (i.e. ExceptionTranslator) + * Add stemming support for highlighting stuff: + 1. regexp for whole word (all lowercase), or + 2. just the root of the word New Features: * Faster search thanks to Xapian @@ -122,3 +126,26 @@ * Basic (quick and dirty, limitations and bugs included, but commit-ready) implementation of getting matches out of the Xapian DB +2006-07-08 + * No work: daytrip to Munich + +2006-07-09 + * Bugfix for _moinSearch (not using Xapian) + +2006-07-11 + * Make matches which we get from Xapian more reliable + * Add TitleMatch support + * Xapwrap needed some tuning (aka hacking), think about dropping + and/or rewriting much of its code as it doesn't always fit (and + probably won't in the future) + +2006-07-12 +2006-07-13 + * No work + +2006-07-14 + * Minor bugfix for TitleMatch, now works correctly + * First interesting match must be a TextMatch + * Comment read_lock code from BaseIndex (should not be needed) + * Support complete rebuild of the database (delete and add) +