changeset 1595:3a63c9b31d7e

avoid infinite recursion in parsePageLinks
author Thomas Waldmann <tw AT waldmann-edv DOT de>
date Mon, 02 Oct 2006 00:37:39 +0200
parents 26fbd0276b12
children 706de8a16147
files MoinMoin/Page.py MoinMoin/request/__init__.py docs/CHANGES
diffstat 3 files changed, 25 insertions(+), 9 deletions(-) [+]
line wrap: on
line diff
--- a/MoinMoin/Page.py	Tue Sep 26 23:37:05 2006 +0200
+++ b/MoinMoin/Page.py	Mon Oct 02 00:37:39 2006 +0200
@@ -6,7 +6,7 @@
     @license: GNU GPL, see COPYING for details.
 """
 
-import StringIO, os, re, random, codecs
+import StringIO, os, re, random, codecs, logging
 
 from MoinMoin import config, caching, user, util, wikiutil
 from MoinMoin.logfile import eventlog
@@ -1574,26 +1574,40 @@
         More efficient now by using special pagelinks formatter and
         redirecting possible output into null file.
         """
-        request.clock.start('parsePagelinks')
+        pagename = self.page_name
+        if request.parsePageLinks_running.get(pagename, False):
+            #logging.debug("avoid recursion for page %r" % pagename)
+            return [] # avoid recursion
+
+        #logging.debug("running parsePageLinks for page %r" % pagename)
+        # remember we are already running this function for this page:
+        request.parsePageLinks_running[pagename] = True
+
+        request.clock.start('parsePageLinks')
+
         class Null:
-            def write(self, str): pass
+            def write(self, data):
+                pass
+
         request.redirect(Null())
-        request.mode_getpagelinks = 1
+        request.mode_getpagelinks += 1
+        #logging.debug("mode_getpagelinks == %r" % request.mode_getpagelinks)
         try:
             try:
                 from MoinMoin.formatter.pagelinks import Formatter
                 formatter = Formatter(request, store_pagelinks=1)
-                page = Page(request, self.page_name, formatter=formatter)
+                page = Page(request, pagename, formatter=formatter)
                 page.send_page(request, content_only=1)
             except:
                 import traceback
-                traceback.print_exc()
+                traceback.print_exc(200)
         finally:
-            request.mode_getpagelinks = 0
+            request.mode_getpagelinks -= 1
+            #logging.debug("mode_getpagelinks == %r" % request.mode_getpagelinks)
             request.redirect()
             if hasattr(request, '_fmt_hd_counters'):
                 del request._fmt_hd_counters
-            request.clock.stop('parsePagelinks')
+            request.clock.stop('parsePageLinks')
         return formatter.pagelinks
 
     def getCategories(self, request):
--- a/MoinMoin/request/__init__.py	Tue Sep 26 23:37:05 2006 +0200
+++ b/MoinMoin/request/__init__.py	Mon Oct 02 00:37:39 2006 +0200
@@ -210,7 +210,8 @@
                     self.makeUnavailable503()
 
             self.pragma = {}
-            self.mode_getpagelinks = 0
+            self.mode_getpagelinks = 0 # is > 0 as long as we are in a getPageLinks call
+            self.parsePageLinks_running = {} # avoid infinite recursion by remembering what we are already running
 
             self.lang = i18n.requestLanguage(self)
             # Language for content. Page content should use the wiki default lang,
--- a/docs/CHANGES	Tue Sep 26 23:37:05 2006 +0200
+++ b/docs/CHANGES	Mon Oct 02 00:37:39 2006 +0200
@@ -293,6 +293,7 @@
       unsafe to this regard. If you know more dangerous stuff, please just
       add the mimetypes there to protect your users and file a bug report
       telling us what we missed.
+    * Avoid infinite recursion in Page.parsePageLinks.
 
   Other changes:
     * HINT: if you run standard CGI, copy and edit the new moin.cgi from