view data/plugin/macro/CollectLists.py @ 345:630b3cfb01dc

CollectLists: macro to collect data from definition lists from subpages pages into a databrowser widget table.
author Reimar Bauer <rb.proj AT googlemail DOT com>
date Tue, 03 Feb 2009 15:54:24 +0100
parents
children c8f59d078d98
line wrap: on
line source
# -*- coding: iso-8859-1 -*-
"""
    MoinMoin - macro to collect data from definition lists from subpages pages
    into a databrowser widget table

    <<CollectLists>>
    will create a table column for every subpage of the current page and fills in
    the data from each key value pair
    optional you can give the template page the definition list page depends on
    or the column_heading. In the latter case the order is used. Also it can optionally use
    another pagename. By setting optional a parser one can use e.g. wikimarkup. By
    filter_selection you can optional use the filter method of the databrowser widget.

    @copyright: 2006 by michael cohen <scudette@users.sourceforge.net> (PageDicts)
    @copyright: 2008-2009 by MoinMoin:ReimarBauer (completly rewritten)
    @license: GNU GPL, see COPYING for details.
"""
import re
from MoinMoin import wikiutil
from MoinMoin.Page import Page
from MoinMoin.util.dataset import TupleDataset, Column
from MoinMoin.wikidicts import Dict
from MoinMoin.widget.browser import DataBrowserWidget

Dependencies = ["pages"]

def macro_CollectLists(macro, pagename=unicode,
                      column_heading=u'', template=u'',
                      transpose=False,
                      parser=u'text_moin_wiki',
                      filter_selection=u'NeverExistingDefaultFilter'):

    """
    currently we don't support transpose together with filter_selection
    """
    request = macro.request
    formatter = macro.formatter

    try:
        WikiParser = wikiutil.importPlugin(request.cfg, 'parser', parser, function="Parser")
    except wikiutil.PluginMissingError:
        WikiParser = None

    if not pagename:
        pagename = formatter.page.page_name

    needle = '^%s/(.*)' % pagename
    filterfn = re.compile(needle).search
    pages = request.rootpage.getPageList(exists=1, filter=filterfn)
    if not pages:
        return ""
    # only one level of subpages is used (no Template pages)
    filterfn = request.cfg.cache.page_template_regexact.search
    templates = request.rootpage.getPageList(filter=filterfn)
    subpages = [page for page in pages if page not in templates]
    if subpages:
        subpages.sort()

    # use selection and order
    if column_heading:
        column_heading_keys = [key.strip() for key in column_heading.split(',')]
    # use keys from template page
    elif Page(request, template).exists():
        page = Page(request, template)
        page_dict = Dict(request, template)
        column_heading_keys = page_dict.keys()
    else:
        # fallback use the keys of the first subpage
        page = Page(request, subpages[0])
        page_dict = Dict(request, subpages[0])
        column_heading_keys = page_dict.keys()

    data = TupleDataset()
    data.columns = []
    data.columns.extend([Column(pagename.strip('/'), label=pagename.strip('/'), align='center')])

    if transpose:
        data.addRow([pagename.strip('/')] + column_heading_keys)

    for name in subpages:
        page = Page(request, name)
        page_dict = Dict(request, name)
        row = []
        for key in column_heading_keys:
            if key in page_dict.keys():
                value = page_dict.get(key, '')
                if WikiParser:
                    # xxx check how our brand new Image class solves this
                    if parser == u'text_moin_wiki':
                        value = value.replace('attachment:', 'attachment:%s/' % name)
                    row.append((wikiutil.renderText(request, WikiParser, value), wikiutil.escape(value, 1)))
                else:
                    row.append((wikiutil.escape(value, 1), wikiutil.escape(value, 1)))
            else:
                row.append('&nbsp;')
        parent, child = name.split('/')
        link = page.link_to(request, text="/%s" % child)
        data.addRow([link] + row)
        if transpose:
            data.columns.extend([Column(link, label=link, align='center')])

    if transpose:
        data.data = map(None, zip(*data.data))
        data.data = data.data[1:]
    else:
        if filter_selection:
            filtercols = filter_selection.split(',')
            for key in column_heading_keys:
                key = key.strip()
                if key in filtercols:
                    data.columns.append(Column(key, autofilter=(key in filtercols)))
                else:
                    data.columns.extend([Column(key, label=key, align='center')])

    table = DataBrowserWidget(request)
    table.setData(data)
    html = ''.join(table.format(method='GET'))
    # needs the sortable javascript code added to your common.js
    # needs to be checked if it works together with filter
    # html = html.replace('id="dbw.table"', 'class="sortable" id="dbw.table"')
    return html