view data/plugin/macro/ @ 352:2b139cd6c4d7

CollectLists: verified sortable.js and extended comment
author Reimar Bauer <rb.proj AT googlemail DOT com>
date Wed, 04 Feb 2009 15:08:20 +0100
parents 402214f6607c
children 51772c48c3eb
line wrap: on
line source
# -*- coding: iso-8859-1 -*-
    MoinMoin - macro to collect data from definition lists from subpages pages
    into a databrowser widget table

    will create a table column for every subpage of the current page and fills in
    the data from each key value pair
    optional you can give the template page the definition list page depends on
    or the column_heading. In the latter case the order is used. Also it can optionally use
    another pagename. By setting optional a parser one can use e.g. wikimarkup. By
    filter_selection you can optional use the filter method of the databrowser widget.
    By using a different filter_pattern than '.*' you get only rows shown where that
    pattern was found.

    @copyright: 2006 by michael cohen <> (PageDicts)
    @copyright: 2008-2009 by MoinMoin:ReimarBauer (completly rewritten)
    @license: GNU GPL, see COPYING for details.
import re
from MoinMoin import wikiutil
from MoinMoin.Page import Page
from MoinMoin.util.dataset import TupleDataset, Column
from MoinMoin.wikidicts import Dict
from MoinMoin.widget.browser import DataBrowserWidget

Dependencies = ["pages"]

def macro_CollectLists(macro, pagename=unicode,
                      align=("left", "center", "right"),
                      column_heading=u'', template=u'',

    currently we don't support transpose together with filter_selection
    removing columns by filter_pattern and transpose is not implemented yet
    request = macro.request
    formatter = macro.formatter

        WikiParser = wikiutil.importPlugin(request.cfg, 'parser', parser, function="Parser")
    except wikiutil.PluginMissingError:
        WikiParser = None

    if not pagename:
        pagename =

    needle = '^%s/(.*)' % pagename
    filterfn = re.compile(needle).search
    pages = request.rootpage.getPageList(exists=1, filter=filterfn)
    if not pages:
        return ""
    # only one level of subpages is used (no Template pages)
    filterfn =
    templates = request.rootpage.getPageList(filter=filterfn)
    subpages = [page for page in pages if page not in templates]
    if subpages:

    # use selection and order
    if column_heading:
        column_heading_keys = [key.strip() for key in column_heading.split(',')]
    # use keys from template page
    elif Page(request, template).exists():
        page = Page(request, template)
        page_dict = Dict(request, template)
        column_heading_keys = page_dict.keys()
        # fallback use the keys of the first subpage
        page = Page(request, subpages[0])
        page_dict = Dict(request, subpages[0])
        column_heading_keys = page_dict.keys()

    data = TupleDataset()
    data.columns = []
    data.columns.extend([Column(pagename.strip('/'), label=pagename.strip('/'), align=align)])
    # may be transpose should be moved into the databrowser widget
    if transpose:
        data.addRow([pagename.strip('/')] + column_heading_keys)

    for name in subpages:
        page = Page(request, name)
        page_dict = Dict(request, name)
        row = []
        tmp_string = []
        for key in column_heading_keys:
            if key in page_dict.keys():
                value = page_dict.get(key, '')
                if WikiParser:
                    # xxx check how our brand new Image class solves this
                    if parser == u'text_moin_wiki':
                        value = value.replace('attachment:', 'attachment:%s/' % name)
                    row.append((wikiutil.renderText(request, WikiParser, value), wikiutil.escape(value, 1)))
                    row.append((wikiutil.escape(value, 1), wikiutil.escape(value, 1)))
        parent, child = name.split('/', 1)
        link = page.link_to(request, text="/%s" % child)
        tmp_string = ''.join(tmp_string)
        search_result =, tmp_string)
        # ToDo removing columns by filter_pattern and transpose is not implemented yet
        if tmp_string and search_result or transpose:
            data.addRow([link] + row)
        if transpose:
            data.columns.extend([Column(link, label=link, align=align)])

    if transpose: = map(None, zip(* =[1:]
        if filter_selection and not transpose:
            filtercols = filter_selection.split(',')
            for key in column_heading_keys:
                key = key.strip()
                if key in filtercols:
                    data.columns.append(Column(key, autofilter=(key in filtercols)))
                    data.columns.extend([Column(key, label=key, align=align)])

    table = DataBrowserWidget(request)
    html = ''.join(table.format(method='GET'))
    # seems to work together with
    # html = html.replace('id="dbw.table"', 'class="sortable" id="dbw.table"')
    return html