Mercurial > moin > 1.9
changeset 3119:2a380f99afa8
migration scripts: copied from 1.6 as of rev 2560:e2d1e0285eda
author | Thomas Waldmann <tw AT waldmann-edv DOT de> |
---|---|
date | Tue, 26 Feb 2008 00:19:10 +0100 |
parents | 95026d5a2125 |
children | a7c1680aeaa0 |
files | MoinMoin/script/migration/1059999.py MoinMoin/script/migration/1060000.py MoinMoin/script/migration/1060100.py MoinMoin/script/migration/_conv160.py MoinMoin/script/migration/_conv160_wiki.py MoinMoin/script/migration/data.py MoinMoin/script/migration/text_moin158_wiki.py |
diffstat | 7 files changed, 94 insertions(+), 18 deletions(-) [+] |
line wrap: on
line diff
--- a/MoinMoin/script/migration/1059999.py Tue Feb 26 00:16:12 2008 +0100 +++ b/MoinMoin/script/migration/1059999.py Tue Feb 26 00:19:10 2008 +0100 @@ -32,9 +32,14 @@ src_data_dir = os.path.abspath(os.path.join(data_dir, '..', 'data.pre160')) # keep the orig data_dir here dst_data_dir = data_dir shutil.move(data_dir, src_data_dir) + # the 1.5 parser checks page existance, so we must use the orig, fully populated dir: + saved_data_dir = script.request.cfg.data_dir + script.request.cfg.data_dir = src_data_dir os.mkdir(dst_data_dir) shutil.move(os.path.join(src_data_dir, 'cache'), os.path.join(dst_data_dir, 'cache')) # mig script has locks there dc = DataConverter(script.request, src_data_dir, dst_data_dir) dc.pass2() + # restore correct data dir: + script.request.cfg.data_dir = saved_data_dir return 1060000
--- a/MoinMoin/script/migration/1060000.py Tue Feb 26 00:16:12 2008 +0100 +++ b/MoinMoin/script/migration/1060000.py Tue Feb 26 00:19:10 2008 +0100 @@ -1,13 +1,13 @@ # -*- coding: iso-8859-1 -*- """ - MoinMoin - dummy migration terminator script + MoinMoin - migration from base rev 1060000 - This must be the last migration script. + Nothing to do, we just return the new data dir revision. - @copyright: 2006 by Thomas Waldmann + @copyright: 2008 by Thomas Waldmann @license: GNU GPL, see COPYING for details. """ def execute(script, data_dir, rev): - return None + return 1060100
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/MoinMoin/script/migration/1060100.py Tue Feb 26 00:19:10 2008 +0100 @@ -0,0 +1,13 @@ +# -*- coding: iso-8859-1 -*- +""" + MoinMoin - dummy migration terminator script + + This must be the last migration script. + + @copyright: 2006 by Thomas Waldmann + @license: GNU GPL, see COPYING for details. +""" + +def execute(script, data_dir, rev): + return None +
--- a/MoinMoin/script/migration/_conv160.py Tue Feb 26 00:16:12 2008 +0100 +++ b/MoinMoin/script/migration/_conv160.py Tue Feb 26 00:19:10 2008 +0100 @@ -78,16 +78,22 @@ """ read complete event-log from disk """ data = [] try: + lineno = 0 f = file(self.fname, 'r') for line in f: + lineno += 1 line = line.replace('\r', '').replace('\n', '') if not line.strip(): # skip empty lines continue fields = line.split('\t') - timestamp, action, kvpairs = fields[:3] - timestamp = int(timestamp) - kvdict = wikiutil.parseQueryString(kvpairs) - data.append((timestamp, action, kvdict)) + try: + timestamp, action, kvpairs = fields[:3] + timestamp = int(timestamp) + kvdict = wikiutil.parseQueryString(kvpairs) + data.append((timestamp, action, kvdict)) + except ValueError, err: + # corrupt event log line, log error and skip it + print "Error: invalid event log (%s) line %d, err: %s, SKIPPING THIS LINE!" % (self.fname, lineno, str(err)) f.close() except IOError, err: # no event-log @@ -227,7 +233,7 @@ def __init__(self, request, attach_dir, attfile): self.request = request self.path = opj(attach_dir, attfile) - self.name = attfile.decode('utf-8') + self.name = attfile.decode('utf-8', 'replace') def copy(self, attach_dir): """ copy attachment file from orig path to new destination """ @@ -471,10 +477,15 @@ def save_list(self, fname, what): what_sorted = what.keys() - what_sorted.sort(cmp=lambda x, y: cmp(x[1:], y[1:])) + # make sure we have 3-tuples: + what_sorted = [(k + (None, ))[:3] for k in what_sorted] + # we only have python 2.3, thus no cmp keyword for the sort() call, + # thus we need to do it the more complicated way: + what_sorted = [(pn, fn, rtype) for rtype, pn, fn in what_sorted] # shuffle + what_sorted.sort() # sort + what_sorted = [(rtype, pn, fn) for pn, fn, rtype in what_sorted] # shuffle f = codecs.open(fname, 'w', 'utf-8') - for k in what_sorted: - rtype, pn, fn = (k + (None, ))[:3] + for rtype, pn, fn in what_sorted: if rtype == 'PAGE': line = (rtype, pn, pn) elif rtype == 'FILE':
--- a/MoinMoin/script/migration/_conv160_wiki.py Tue Feb 26 00:16:12 2008 +0100 +++ b/MoinMoin/script/migration/_conv160_wiki.py Tue Feb 26 00:19:10 2008 +0100 @@ -50,6 +50,46 @@ return result +STONEAGE_IMAGELINK = False # True for ImageLink(target,image), False for ImageLink(image,target) + +# copied from moin 1.6.0 macro/ImageLink.py (to be safe in case we remove ImageLink some day) +# ... and slightly modified/refactored for our needs here. +# hint: using parse_quoted_separated from wikiutil does NOT work here, because we do not have +# quoted urls when they contain a '=' char in the 1.5 data input. +def explore_args(args): + """ explore args for positional and keyword parameters """ + if args: + args = args.split(',') + args = [arg.strip() for arg in args] + else: + args = [] + + kw_count = 0 + kw = {} # keyword args + pp = [] # positional parameters + + kwAllowed = ('width', 'height', 'alt') + + for arg in args: + if '=' in arg: + key, value = arg.split('=', 1) + key_lowerstr = str(key.lower()) + # avoid that urls with "=" are interpreted as keyword + if key_lowerstr in kwAllowed: + kw_count += 1 + kw[key_lowerstr] = value + elif not kw_count and '://' in arg: + # assuming that this is the image + pp.append(arg) + else: + pp.append(arg) + + if STONEAGE_IMAGELINK and len(pp) >= 2: + pp[0], pp[1] = pp[1], pp[0] + + return pp, kw + + class Converter(Parser): def __init__(self, request, pagename, raw, renames): self.pagename = pagename @@ -59,7 +99,7 @@ self._ = None self.in_pre = 0 - self.formatting_rules = self.formatting_rules % {'macronames': u'|'.join(macro.getNames(self.request.cfg))} + self.formatting_rules = self.formatting_rules % {'macronames': u'|'.join(['ImageLink', ] + macro.getNames(self.request.cfg))} # no change def return_word(self, word): @@ -161,9 +201,9 @@ macro_name = m.group('macro_name') macro_args = m.group('macro_args') if macro_name == 'ImageLink': - fixed, kw, trailing = wikiutil.parse_quoted_separated(macro_args) + fixed, kw = explore_args(macro_args) #print "macro_args=%r" % macro_args - #print "fixed=%r, kw=%r, trailing=%r" % (fixed, kw, trailing) + #print "fixed=%r, kw=%r" % (fixed, kw) image, target = (fixed + ['', ''])[:2] if image is None: image = '' @@ -176,6 +216,8 @@ target = image elif target.startswith('inline:'): target = 'attachment:' + target[7:] # we don't support inline: + elif target.startswith('wiki:'): + target = target[5:] # drop wiki: image_attrs = [] alt = kw.get('alt') or '' width = kw.get('width') @@ -259,7 +301,12 @@ return '[[%s%s]]' % (pagename, text) wikitag, wikiurl, wikitail, wikitag_bad = wikiutil.resolve_wiki(self.request, url) - wikitail = wikiutil.url_unquote(wikitail) + if wikitag_bad: # likely we got some /InterWiki as wikitail, we don't want that! + pagename = wikiutil.url_unquote(pagename) + pagename = self._replace_target(pagename) + wikitail = pagename + else: # good + wikitail = wikiutil.url_unquote(wikitail) # link to self? if wikiutil.isPicture(wikitail):
--- a/MoinMoin/script/migration/data.py Tue Feb 26 00:16:12 2008 +0100 +++ b/MoinMoin/script/migration/data.py Tue Feb 26 00:19:10 2008 +0100 @@ -12,7 +12,7 @@ Important: you must have run all 12_to_13* and the final 152_to_1050300 mig scripts ONCE and in correct order manually before attempting to use the new style migration stuff. - + @copyright: 2006 MoinMoin:ThomasWaldmann @license: GNU GPL, see COPYING for details. """
--- a/MoinMoin/script/migration/text_moin158_wiki.py Tue Feb 26 00:16:12 2008 +0100 +++ b/MoinMoin/script/migration/text_moin158_wiki.py Tue Feb 26 00:19:10 2008 +0100 @@ -175,7 +175,7 @@ url = url[10:] # remove "wiki:self:" is_self_reference = 1 elif url2.startswith('wiki:'): - url = url[5:] # remove "wiki: + url = url[5:] # remove "wiki:" tag, tail = wikiutil.split_wiki(url) if text is None: