author | Aurelien Campeas <aurelien.campeas@logilab.fr> |
Wed, 07 Jan 2009 14:42:43 +0100 | |
changeset 345 | 31f88b2e3500 |
parent 277 | a11a3c231050 |
child 350 | f34ef2c64605 |
permissions | -rw-r--r-- |
0 | 1 |
# -*- coding: utf-8 -*- |
2 |
"""user interface libraries |
|
3 |
||
4 |
contains some functions designed to help implementation of cubicweb user interface |
|
5 |
||
6 |
:organization: Logilab |
|
7 |
:copyright: 2001-2008 LOGILAB S.A. (Paris, FRANCE), all rights reserved. |
|
8 |
:contact: http://www.logilab.fr/ -- mailto:contact@logilab.fr |
|
9 |
""" |
|
10 |
__docformat__ = "restructuredtext en" |
|
11 |
||
12 |
import csv |
|
13 |
import decimal |
|
14 |
import locale |
|
15 |
import re |
|
16 |
from urllib import quote as urlquote |
|
17 |
from cStringIO import StringIO |
|
18 |
from xml.parsers.expat import ExpatError |
|
164 | 19 |
from copy import deepcopy |
0 | 20 |
|
21 |
import simplejson |
|
22 |
||
23 |
from mx.DateTime import DateTimeType, DateTimeDeltaType |
|
24 |
||
25 |
from logilab.common.textutils import unormalize |
|
26 |
||
27 |
def ustrftime(date, fmt='%Y-%m-%d'): |
|
28 |
"""like strftime, but returns a unicode string instead of an encoded |
|
29 |
string which may be problematic with localized date. |
|
30 |
|
|
31 |
encoding is guessed by locale.getpreferredencoding() |
|
32 |
""" |
|
33 |
# date format may depend on the locale |
|
34 |
encoding = locale.getpreferredencoding(do_setlocale=False) or 'UTF-8' |
|
35 |
return unicode(date.strftime(fmt), encoding) |
|
36 |
||
37 |
||
38 |
def rql_for_eid(eid): |
|
39 |
"""return the rql query necessary to fetch entity with the given eid. This |
|
40 |
function should only be used to generate link with rql inside, not to give |
|
41 |
to cursor.execute (in which case you won't benefit from rql cache). |
|
42 |
||
43 |
:Parameters: |
|
44 |
- `eid`: the eid of the entity we should search |
|
45 |
:rtype: str |
|
46 |
:return: the rql query |
|
47 |
""" |
|
48 |
return 'Any X WHERE X eid %s' % eid |
|
49 |
||
50 |
||
51 |
def printable_value(req, attrtype, value, props=None, displaytime=True): |
|
52 |
"""return a displayable value (i.e. unicode string)""" |
|
53 |
if value is None or attrtype == 'Bytes': |
|
54 |
return u'' |
|
55 |
if attrtype == 'String': |
|
56 |
# don't translate empty value if you don't want strange results |
|
57 |
if props is not None and value and props.get('internationalizable'): |
|
58 |
return req._(value) |
|
59 |
||
60 |
return value |
|
61 |
if attrtype == 'Date': |
|
62 |
return ustrftime(value, req.property_value('ui.date-format')) |
|
63 |
if attrtype == 'Time': |
|
64 |
return ustrftime(value, req.property_value('ui.time-format')) |
|
65 |
if attrtype == 'Datetime': |
|
66 |
if not displaytime: |
|
67 |
return ustrftime(value, req.property_value('ui.date-format')) |
|
68 |
return ustrftime(value, req.property_value('ui.datetime-format')) |
|
69 |
if attrtype == 'Boolean': |
|
70 |
if value: |
|
71 |
return req._('yes') |
|
72 |
return req._('no') |
|
73 |
if attrtype == 'Float': |
|
74 |
value = req.property_value('ui.float-format') % value |
|
75 |
return unicode(value) |
|
76 |
||
77 |
||
78 |
# text publishing ############################################################# |
|
79 |
||
80 |
try: |
|
81 |
from cubicweb.common.rest import rest_publish # pylint: disable-msg=W0611 |
|
82 |
except ImportError: |
|
83 |
def rest_publish(entity, data): |
|
84 |
"""default behaviour if docutils was not found""" |
|
85 |
return data |
|
86 |
||
87 |
TAG_PROG = re.compile(r'</?.*?>', re.U) |
|
88 |
def remove_html_tags(text): |
|
89 |
"""Removes HTML tags from text |
|
90 |
||
91 |
>>> remove_html_tags('<td>hi <a href="http://www.google.fr">world</a></td>') |
|
92 |
'hi world' |
|
93 |
>>> |
|
94 |
""" |
|
95 |
return TAG_PROG.sub('', text) |
|
96 |
||
97 |
||
98 |
REF_PROG = re.compile(r"<ref\s+rql=([\'\"])([^\1]*?)\1\s*>([^<]*)</ref>", re.U) |
|
99 |
def _subst_rql(view, obj): |
|
100 |
delim, rql, descr = obj.groups() |
|
101 |
return u'<a href="%s">%s</a>' % (view.build_url(rql=rql), descr) |
|
102 |
||
103 |
def html_publish(view, text): |
|
104 |
"""replace <ref rql=''> links by <a href="...">""" |
|
105 |
if not text: |
|
106 |
return u'' |
|
107 |
return REF_PROG.sub(lambda obj, view=view:_subst_rql(view, obj), text) |
|
108 |
||
277
a11a3c231050
fix lxml is available, we can have a nicer version of soup2xhtml even if its lxml < 2.0
Sylvain Thenault <sylvain.thenault@logilab.fr>
parents:
228
diff
changeset
|
109 |
# fallback implementation, nicer one defined below if lxml is available |
a11a3c231050
fix lxml is available, we can have a nicer version of soup2xhtml even if its lxml < 2.0
Sylvain Thenault <sylvain.thenault@logilab.fr>
parents:
228
diff
changeset
|
110 |
def soup2xhtml(data, encoding): |
a11a3c231050
fix lxml is available, we can have a nicer version of soup2xhtml even if its lxml < 2.0
Sylvain Thenault <sylvain.thenault@logilab.fr>
parents:
228
diff
changeset
|
111 |
return data |
a11a3c231050
fix lxml is available, we can have a nicer version of soup2xhtml even if its lxml < 2.0
Sylvain Thenault <sylvain.thenault@logilab.fr>
parents:
228
diff
changeset
|
112 |
|
a11a3c231050
fix lxml is available, we can have a nicer version of soup2xhtml even if its lxml < 2.0
Sylvain Thenault <sylvain.thenault@logilab.fr>
parents:
228
diff
changeset
|
113 |
# fallback implementation, nicer one defined below if lxml> 2.0 is available |
a11a3c231050
fix lxml is available, we can have a nicer version of soup2xhtml even if its lxml < 2.0
Sylvain Thenault <sylvain.thenault@logilab.fr>
parents:
228
diff
changeset
|
114 |
def safe_cut(text, length): |
a11a3c231050
fix lxml is available, we can have a nicer version of soup2xhtml even if its lxml < 2.0
Sylvain Thenault <sylvain.thenault@logilab.fr>
parents:
228
diff
changeset
|
115 |
"""returns a string of length <length> based on <text>, removing any html |
a11a3c231050
fix lxml is available, we can have a nicer version of soup2xhtml even if its lxml < 2.0
Sylvain Thenault <sylvain.thenault@logilab.fr>
parents:
228
diff
changeset
|
116 |
tags from given text if cut is necessary.""" |
a11a3c231050
fix lxml is available, we can have a nicer version of soup2xhtml even if its lxml < 2.0
Sylvain Thenault <sylvain.thenault@logilab.fr>
parents:
228
diff
changeset
|
117 |
if text is None: |
a11a3c231050
fix lxml is available, we can have a nicer version of soup2xhtml even if its lxml < 2.0
Sylvain Thenault <sylvain.thenault@logilab.fr>
parents:
228
diff
changeset
|
118 |
return u'' |
a11a3c231050
fix lxml is available, we can have a nicer version of soup2xhtml even if its lxml < 2.0
Sylvain Thenault <sylvain.thenault@logilab.fr>
parents:
228
diff
changeset
|
119 |
text_nohtml = remove_html_tags(text) |
a11a3c231050
fix lxml is available, we can have a nicer version of soup2xhtml even if its lxml < 2.0
Sylvain Thenault <sylvain.thenault@logilab.fr>
parents:
228
diff
changeset
|
120 |
# try to keep html tags if text is short enough |
a11a3c231050
fix lxml is available, we can have a nicer version of soup2xhtml even if its lxml < 2.0
Sylvain Thenault <sylvain.thenault@logilab.fr>
parents:
228
diff
changeset
|
121 |
if len(text_nohtml) <= length: |
a11a3c231050
fix lxml is available, we can have a nicer version of soup2xhtml even if its lxml < 2.0
Sylvain Thenault <sylvain.thenault@logilab.fr>
parents:
228
diff
changeset
|
122 |
return text |
a11a3c231050
fix lxml is available, we can have a nicer version of soup2xhtml even if its lxml < 2.0
Sylvain Thenault <sylvain.thenault@logilab.fr>
parents:
228
diff
changeset
|
123 |
# else if un-tagged text is too long, cut it |
a11a3c231050
fix lxml is available, we can have a nicer version of soup2xhtml even if its lxml < 2.0
Sylvain Thenault <sylvain.thenault@logilab.fr>
parents:
228
diff
changeset
|
124 |
return text_nohtml[:length-3] + u'...' |
a11a3c231050
fix lxml is available, we can have a nicer version of soup2xhtml even if its lxml < 2.0
Sylvain Thenault <sylvain.thenault@logilab.fr>
parents:
228
diff
changeset
|
125 |
|
a11a3c231050
fix lxml is available, we can have a nicer version of soup2xhtml even if its lxml < 2.0
Sylvain Thenault <sylvain.thenault@logilab.fr>
parents:
228
diff
changeset
|
126 |
|
0 | 127 |
try: |
128 |
from lxml import etree |
|
228
27b958dc72ae
[lxml] lxml version < 2 does not provide an iter method on some elements
Aurelien Campeas <aurelien.campeas@logilab.fr>
parents:
165
diff
changeset
|
129 |
except (ImportError, AttributeError): |
0 | 130 |
# gae environment: lxml not availabel |
277
a11a3c231050
fix lxml is available, we can have a nicer version of soup2xhtml even if its lxml < 2.0
Sylvain Thenault <sylvain.thenault@logilab.fr>
parents:
228
diff
changeset
|
131 |
pass |
0 | 132 |
else: |
133 |
||
134 |
def soup2xhtml(data, encoding): |
|
135 |
"""tidy (at least try) html soup and return the result |
|
136 |
Note: the function considers a string with no surrounding tag as valid |
|
137 |
if <div>`data`</div> can be parsed by an XML parser |
|
138 |
""" |
|
139 |
xmltree = etree.HTML('<div>%s</div>' % data) |
|
140 |
# NOTE: lxml 1.1 (etch platforms) doesn't recognize |
|
141 |
# the encoding=unicode parameter (lxml 2.0 does), this is |
|
142 |
# why we specify an encoding and re-decode to unicode later |
|
143 |
body = etree.tostring(xmltree[0], encoding=encoding) |
|
144 |
# remove <body> and </body> and decode to unicode |
|
145 |
return body[11:-13].decode(encoding) |
|
146 |
||
277
a11a3c231050
fix lxml is available, we can have a nicer version of soup2xhtml even if its lxml < 2.0
Sylvain Thenault <sylvain.thenault@logilab.fr>
parents:
228
diff
changeset
|
147 |
if hasattr(etree.HTML('<div>test</div>'), 'iter'): |
165 | 148 |
|
277
a11a3c231050
fix lxml is available, we can have a nicer version of soup2xhtml even if its lxml < 2.0
Sylvain Thenault <sylvain.thenault@logilab.fr>
parents:
228
diff
changeset
|
149 |
def safe_cut(text, length): |
a11a3c231050
fix lxml is available, we can have a nicer version of soup2xhtml even if its lxml < 2.0
Sylvain Thenault <sylvain.thenault@logilab.fr>
parents:
228
diff
changeset
|
150 |
"""returns an html document of length <length> based on <text>, |
a11a3c231050
fix lxml is available, we can have a nicer version of soup2xhtml even if its lxml < 2.0
Sylvain Thenault <sylvain.thenault@logilab.fr>
parents:
228
diff
changeset
|
151 |
and cut is necessary. |
a11a3c231050
fix lxml is available, we can have a nicer version of soup2xhtml even if its lxml < 2.0
Sylvain Thenault <sylvain.thenault@logilab.fr>
parents:
228
diff
changeset
|
152 |
""" |
a11a3c231050
fix lxml is available, we can have a nicer version of soup2xhtml even if its lxml < 2.0
Sylvain Thenault <sylvain.thenault@logilab.fr>
parents:
228
diff
changeset
|
153 |
if text is None: |
a11a3c231050
fix lxml is available, we can have a nicer version of soup2xhtml even if its lxml < 2.0
Sylvain Thenault <sylvain.thenault@logilab.fr>
parents:
228
diff
changeset
|
154 |
return u'' |
a11a3c231050
fix lxml is available, we can have a nicer version of soup2xhtml even if its lxml < 2.0
Sylvain Thenault <sylvain.thenault@logilab.fr>
parents:
228
diff
changeset
|
155 |
textParse = etree.HTML(text) |
a11a3c231050
fix lxml is available, we can have a nicer version of soup2xhtml even if its lxml < 2.0
Sylvain Thenault <sylvain.thenault@logilab.fr>
parents:
228
diff
changeset
|
156 |
compteur = 0 |
a11a3c231050
fix lxml is available, we can have a nicer version of soup2xhtml even if its lxml < 2.0
Sylvain Thenault <sylvain.thenault@logilab.fr>
parents:
228
diff
changeset
|
157 |
|
a11a3c231050
fix lxml is available, we can have a nicer version of soup2xhtml even if its lxml < 2.0
Sylvain Thenault <sylvain.thenault@logilab.fr>
parents:
228
diff
changeset
|
158 |
for element in textParse.iter(): |
a11a3c231050
fix lxml is available, we can have a nicer version of soup2xhtml even if its lxml < 2.0
Sylvain Thenault <sylvain.thenault@logilab.fr>
parents:
228
diff
changeset
|
159 |
if compteur > length: |
a11a3c231050
fix lxml is available, we can have a nicer version of soup2xhtml even if its lxml < 2.0
Sylvain Thenault <sylvain.thenault@logilab.fr>
parents:
228
diff
changeset
|
160 |
parent = element.getparent() |
a11a3c231050
fix lxml is available, we can have a nicer version of soup2xhtml even if its lxml < 2.0
Sylvain Thenault <sylvain.thenault@logilab.fr>
parents:
228
diff
changeset
|
161 |
parent.remove(element) |
a11a3c231050
fix lxml is available, we can have a nicer version of soup2xhtml even if its lxml < 2.0
Sylvain Thenault <sylvain.thenault@logilab.fr>
parents:
228
diff
changeset
|
162 |
else: |
a11a3c231050
fix lxml is available, we can have a nicer version of soup2xhtml even if its lxml < 2.0
Sylvain Thenault <sylvain.thenault@logilab.fr>
parents:
228
diff
changeset
|
163 |
if element.text is not None: |
a11a3c231050
fix lxml is available, we can have a nicer version of soup2xhtml even if its lxml < 2.0
Sylvain Thenault <sylvain.thenault@logilab.fr>
parents:
228
diff
changeset
|
164 |
text_resum = text_cut_letters(element.text,length) |
165 | 165 |
len_text_resum = len(''.join(text_resum.split())) |
166 |
compteur = compteur + len_text_resum |
|
277
a11a3c231050
fix lxml is available, we can have a nicer version of soup2xhtml even if its lxml < 2.0
Sylvain Thenault <sylvain.thenault@logilab.fr>
parents:
228
diff
changeset
|
167 |
element.text = text_resum |
a11a3c231050
fix lxml is available, we can have a nicer version of soup2xhtml even if its lxml < 2.0
Sylvain Thenault <sylvain.thenault@logilab.fr>
parents:
228
diff
changeset
|
168 |
|
a11a3c231050
fix lxml is available, we can have a nicer version of soup2xhtml even if its lxml < 2.0
Sylvain Thenault <sylvain.thenault@logilab.fr>
parents:
228
diff
changeset
|
169 |
if element.tail is not None: |
a11a3c231050
fix lxml is available, we can have a nicer version of soup2xhtml even if its lxml < 2.0
Sylvain Thenault <sylvain.thenault@logilab.fr>
parents:
228
diff
changeset
|
170 |
if compteur < length: |
a11a3c231050
fix lxml is available, we can have a nicer version of soup2xhtml even if its lxml < 2.0
Sylvain Thenault <sylvain.thenault@logilab.fr>
parents:
228
diff
changeset
|
171 |
text_resum = text_cut_letters(element.tail,length) |
a11a3c231050
fix lxml is available, we can have a nicer version of soup2xhtml even if its lxml < 2.0
Sylvain Thenault <sylvain.thenault@logilab.fr>
parents:
228
diff
changeset
|
172 |
len_text_resum = len(''.join(text_resum.split())) |
a11a3c231050
fix lxml is available, we can have a nicer version of soup2xhtml even if its lxml < 2.0
Sylvain Thenault <sylvain.thenault@logilab.fr>
parents:
228
diff
changeset
|
173 |
compteur = compteur + len_text_resum |
a11a3c231050
fix lxml is available, we can have a nicer version of soup2xhtml even if its lxml < 2.0
Sylvain Thenault <sylvain.thenault@logilab.fr>
parents:
228
diff
changeset
|
174 |
element.tail = text_resum |
a11a3c231050
fix lxml is available, we can have a nicer version of soup2xhtml even if its lxml < 2.0
Sylvain Thenault <sylvain.thenault@logilab.fr>
parents:
228
diff
changeset
|
175 |
else: |
a11a3c231050
fix lxml is available, we can have a nicer version of soup2xhtml even if its lxml < 2.0
Sylvain Thenault <sylvain.thenault@logilab.fr>
parents:
228
diff
changeset
|
176 |
element.tail = '' |
a11a3c231050
fix lxml is available, we can have a nicer version of soup2xhtml even if its lxml < 2.0
Sylvain Thenault <sylvain.thenault@logilab.fr>
parents:
228
diff
changeset
|
177 |
|
a11a3c231050
fix lxml is available, we can have a nicer version of soup2xhtml even if its lxml < 2.0
Sylvain Thenault <sylvain.thenault@logilab.fr>
parents:
228
diff
changeset
|
178 |
div = etree.HTML('<div></div>')[0][0] |
a11a3c231050
fix lxml is available, we can have a nicer version of soup2xhtml even if its lxml < 2.0
Sylvain Thenault <sylvain.thenault@logilab.fr>
parents:
228
diff
changeset
|
179 |
listNode = textParse[0].getchildren() |
a11a3c231050
fix lxml is available, we can have a nicer version of soup2xhtml even if its lxml < 2.0
Sylvain Thenault <sylvain.thenault@logilab.fr>
parents:
228
diff
changeset
|
180 |
for node in listNode: |
a11a3c231050
fix lxml is available, we can have a nicer version of soup2xhtml even if its lxml < 2.0
Sylvain Thenault <sylvain.thenault@logilab.fr>
parents:
228
diff
changeset
|
181 |
div.append(deepcopy(node)) |
a11a3c231050
fix lxml is available, we can have a nicer version of soup2xhtml even if its lxml < 2.0
Sylvain Thenault <sylvain.thenault@logilab.fr>
parents:
228
diff
changeset
|
182 |
return etree.tounicode(div) |
165 | 183 |
|
0 | 184 |
|
185 |
# HTML generation helper functions ############################################ |
|
186 |
||
187 |
from logilab.mtconverter import html_escape |
|
188 |
||
189 |
def tooltipize(text, tooltip, url=None): |
|
190 |
"""make an HTML tooltip""" |
|
191 |
url = url or '#' |
|
192 |
return u'<a href="%s" title="%s">%s</a>' % (url, tooltip, text) |
|
193 |
||
194 |
def toggle_action(nodeid): |
|
195 |
"""builds a HTML link that uses the js toggleVisibility function""" |
|
196 |
return u"javascript: toggleVisibility('%s')" % nodeid |
|
197 |
||
198 |
def toggle_link(nodeid, label): |
|
199 |
"""builds a HTML link that uses the js toggleVisibility function""" |
|
200 |
return u'<a href="%s">%s</a>' % (toggle_action(nodeid), label) |
|
201 |
||
202 |
def ajax_replace_url(nodeid, rql, vid=None, swap=False, **extraparams): |
|
203 |
"""builds a replacePageChunk-like url |
|
204 |
>>> ajax_replace_url('foo', 'Person P') |
|
205 |
"javascript: replacePageChunk('foo', 'Person%20P');" |
|
206 |
>>> ajax_replace_url('foo', 'Person P', 'oneline') |
|
207 |
"javascript: replacePageChunk('foo', 'Person%20P', 'oneline');" |
|
208 |
>>> ajax_replace_url('foo', 'Person P', 'oneline', name='bar', age=12) |
|
209 |
"javascript: replacePageChunk('foo', 'Person%20P', 'oneline', {'age':12, 'name':'bar'});" |
|
210 |
>>> ajax_replace_url('foo', 'Person P', name='bar', age=12) |
|
211 |
"javascript: replacePageChunk('foo', 'Person%20P', 'null', {'age':12, 'name':'bar'});" |
|
212 |
""" |
|
213 |
params = [repr(nodeid), repr(urlquote(rql))] |
|
214 |
if extraparams and not vid: |
|
215 |
params.append("'null'") |
|
216 |
elif vid: |
|
217 |
params.append(repr(vid)) |
|
218 |
if extraparams: |
|
219 |
params.append(simplejson.dumps(extraparams)) |
|
220 |
if swap: |
|
221 |
params.append('true') |
|
222 |
return "javascript: replacePageChunk(%s);" % ', '.join(params) |
|
223 |
||
224 |
def text_cut(text, nbwords=30): |
|
225 |
if text is None: |
|
226 |
return u'' |
|
227 |
minlength = len(' '.join(text.split()[:nbwords])) |
|
228 |
textlength = text.find('.', minlength) + 1 |
|
229 |
if textlength == 0: # no point found |
|
230 |
textlength = minlength |
|
231 |
return text[:textlength] |
|
232 |
||
159
ff7b0f8dcb3c
safe_cut fonction return a resum from html text.
Laure Bourgois <Laure.Bourgois@logilab.fr>
parents:
0
diff
changeset
|
233 |
def text_cut_letters(text, nbletters): |
ff7b0f8dcb3c
safe_cut fonction return a resum from html text.
Laure Bourgois <Laure.Bourgois@logilab.fr>
parents:
0
diff
changeset
|
234 |
if text is None: |
ff7b0f8dcb3c
safe_cut fonction return a resum from html text.
Laure Bourgois <Laure.Bourgois@logilab.fr>
parents:
0
diff
changeset
|
235 |
return u'' |
ff7b0f8dcb3c
safe_cut fonction return a resum from html text.
Laure Bourgois <Laure.Bourgois@logilab.fr>
parents:
0
diff
changeset
|
236 |
if len(''.join(text.split())) <= nbletters: |
ff7b0f8dcb3c
safe_cut fonction return a resum from html text.
Laure Bourgois <Laure.Bourgois@logilab.fr>
parents:
0
diff
changeset
|
237 |
return text |
ff7b0f8dcb3c
safe_cut fonction return a resum from html text.
Laure Bourgois <Laure.Bourgois@logilab.fr>
parents:
0
diff
changeset
|
238 |
else: |
ff7b0f8dcb3c
safe_cut fonction return a resum from html text.
Laure Bourgois <Laure.Bourgois@logilab.fr>
parents:
0
diff
changeset
|
239 |
text_nospace = ''.join(text.split()) |
ff7b0f8dcb3c
safe_cut fonction return a resum from html text.
Laure Bourgois <Laure.Bourgois@logilab.fr>
parents:
0
diff
changeset
|
240 |
textlength=text.find('.') + 1 |
ff7b0f8dcb3c
safe_cut fonction return a resum from html text.
Laure Bourgois <Laure.Bourgois@logilab.fr>
parents:
0
diff
changeset
|
241 |
|
ff7b0f8dcb3c
safe_cut fonction return a resum from html text.
Laure Bourgois <Laure.Bourgois@logilab.fr>
parents:
0
diff
changeset
|
242 |
if textlength==0: |
ff7b0f8dcb3c
safe_cut fonction return a resum from html text.
Laure Bourgois <Laure.Bourgois@logilab.fr>
parents:
0
diff
changeset
|
243 |
textlength=text.find(' ', nbletters+5) |
ff7b0f8dcb3c
safe_cut fonction return a resum from html text.
Laure Bourgois <Laure.Bourgois@logilab.fr>
parents:
0
diff
changeset
|
244 |
|
ff7b0f8dcb3c
safe_cut fonction return a resum from html text.
Laure Bourgois <Laure.Bourgois@logilab.fr>
parents:
0
diff
changeset
|
245 |
return text[:textlength] |
0 | 246 |
|
247 |
def cut(text, length): |
|
248 |
"""returns a string of length <length> based on <text> |
|
249 |
post: |
|
250 |
len(__return__) <= length |
|
251 |
""" |
|
252 |
if text is None: |
|
253 |
return u'' |
|
254 |
if len(text) <= length: |
|
255 |
return text |
|
256 |
# else if un-tagged text is too long, cut it |
|
257 |
return text[:length-3] + u'...' |
|
258 |
||
259 |
||
260 |
from StringIO import StringIO |
|
261 |
||
262 |
def ureport_as_html(layout): |
|
263 |
from logilab.common.ureports import HTMLWriter |
|
264 |
formater = HTMLWriter(True) |
|
265 |
stream = StringIO() #UStringIO() don't want unicode assertion |
|
266 |
formater.format(layout, stream) |
|
267 |
res = stream.getvalue() |
|
268 |
if isinstance(res, str): |
|
269 |
res = unicode(res, 'UTF8') |
|
270 |
return res |
|
271 |
||
272 |
def render_HTML_tree(tree, selected_node=None, render_node=None, caption=None): |
|
273 |
""" |
|
274 |
Generate a pure HTML representation of a tree given as an instance |
|
275 |
of a logilab.common.tree.Node |
|
276 |
||
277 |
selected_node is the currently selected node (if any) which will |
|
278 |
have its surrounding <div> have id="selected" (which default |
|
279 |
to a bold border libe with the default CSS). |
|
280 |
||
281 |
render_node is a function that should take a Node content (Node.id) |
|
282 |
as parameter and should return a string (what will be displayed |
|
283 |
in the cell). |
|
284 |
||
285 |
Warning: proper rendering of the generated html code depends on html_tree.css |
|
286 |
""" |
|
287 |
tree_depth = tree.depth_down() |
|
288 |
if render_node is None: |
|
289 |
render_node = str |
|
290 |
||
291 |
# helper function that build a matrix from the tree, like: |
|
292 |
# +------+-----------+-----------+ |
|
293 |
# | root | child_1_1 | child_2_1 | |
|
294 |
# | root | child_1_1 | child_2_2 | |
|
295 |
# | root | child_1_2 | | |
|
296 |
# | root | child_1_3 | child_2_3 | |
|
297 |
# | root | child_1_3 | child_2_4 | |
|
298 |
# +------+-----------+-----------+ |
|
299 |
# from: |
|
300 |
# root -+- child_1_1 -+- child_2_1 |
|
301 |
# | | |
|
302 |
# | +- child_2_2 |
|
303 |
# +- child_1_2 |
|
304 |
# | |
|
305 |
# +- child1_3 -+- child_2_3 |
|
306 |
# | |
|
307 |
# +- child_2_2 |
|
308 |
def build_matrix(path, matrix): |
|
309 |
if path[-1].is_leaf(): |
|
310 |
matrix.append(path[:]) |
|
311 |
else: |
|
312 |
for child in path[-1].children: |
|
313 |
build_matrix(path[:] + [child], matrix) |
|
314 |
||
315 |
matrix = [] |
|
316 |
build_matrix([tree], matrix) |
|
317 |
||
318 |
# make all lines in the matrix have the same number of columns |
|
319 |
for line in matrix: |
|
320 |
line.extend([None]*(tree_depth-len(line))) |
|
321 |
for i in range(len(matrix)-1, 0, -1): |
|
322 |
prev_line, line = matrix[i-1:i+1] |
|
323 |
for j in range(len(line)): |
|
324 |
if line[j] == prev_line[j]: |
|
325 |
line[j] = None |
|
326 |
||
327 |
# We build the matrix of link types (between 2 cells on a line of the matrix) |
|
328 |
# link types are : |
|
329 |
link_types = {(True, True, True ): 1, # T |
|
330 |
(False, False, True ): 2, # | |
|
331 |
(False, True, True ): 3, # + (actually, vert. bar with horiz. bar on the right) |
|
332 |
(False, True, False): 4, # L |
|
333 |
(True, True, False): 5, # - |
|
334 |
} |
|
335 |
links = [] |
|
336 |
for i, line in enumerate(matrix): |
|
337 |
links.append([]) |
|
338 |
for j in range(tree_depth-1): |
|
339 |
cell_11 = line[j] is not None |
|
340 |
cell_12 = line[j+1] is not None |
|
341 |
cell_21 = line[j+1] is not None and line[j+1].next_sibling() is not None |
|
342 |
link_type = link_types.get((cell_11, cell_12, cell_21), 0) |
|
343 |
if link_type == 0 and i > 0 and links[i-1][j] in (1,2,3): |
|
344 |
link_type = 2 |
|
345 |
links[-1].append(link_type) |
|
346 |
||
347 |
||
348 |
# We can now generate the HTML code for the <table> |
|
349 |
s = u'<table class="tree">\n' |
|
350 |
if caption: |
|
351 |
s += '<caption>%s</caption>\n' % caption |
|
352 |
||
353 |
for i, link_line in enumerate(links): |
|
354 |
line = matrix[i] |
|
355 |
||
356 |
s += '<tr>' |
|
357 |
for j, link_cell in enumerate(link_line): |
|
358 |
cell = line[j] |
|
359 |
if cell: |
|
360 |
if cell.id == selected_node: |
|
361 |
s += '<td class="tree_cell" rowspan="2"><div id="selected" class="tree_cell">%s</div></td>' % (render_node(cell.id)) |
|
362 |
else: |
|
363 |
s += '<td class="tree_cell" rowspan="2"><div class="tree_cell">%s</div></td>' % (render_node(cell.id)) |
|
364 |
else: |
|
365 |
s += '<td rowspan="2"> </td>' |
|
366 |
s += '<td class="tree_cell_%d_1"> </td>' % link_cell |
|
367 |
s += '<td class="tree_cell_%d_2"> </td>' % link_cell |
|
368 |
||
369 |
cell = line[-1] |
|
370 |
if cell: |
|
371 |
if cell.id == selected_node: |
|
372 |
s += '<td class="tree_cell" rowspan="2"><div id="selected" class="tree_cell">%s</div></td>' % (render_node(cell.id)) |
|
373 |
else: |
|
374 |
s += '<td class="tree_cell" rowspan="2"><div class="tree_cell">%s</div></td>' % (render_node(cell.id)) |
|
375 |
else: |
|
376 |
s += '<td rowspan="2"> </td>' |
|
377 |
||
378 |
s += '</tr>\n' |
|
379 |
if link_line: |
|
380 |
s += '<tr>' |
|
381 |
for j, link_cell in enumerate(link_line): |
|
382 |
s += '<td class="tree_cell_%d_3"> </td>' % link_cell |
|
383 |
s += '<td class="tree_cell_%d_4"> </td>' % link_cell |
|
384 |
s += '</tr>\n' |
|
385 |
||
386 |
s += '</table>' |
|
387 |
return s |
|
388 |
||
389 |
||
390 |
||
391 |
# traceback formatting ######################################################## |
|
392 |
||
393 |
import traceback |
|
394 |
||
395 |
def rest_traceback(info, exception): |
|
396 |
"""return a ReST formated traceback""" |
|
397 |
res = [u'Traceback\n---------\n::\n'] |
|
398 |
for stackentry in traceback.extract_tb(info[2]): |
|
399 |
res.append(u'\tFile %s, line %s, function %s' % tuple(stackentry[:3])) |
|
400 |
if stackentry[3]: |
|
401 |
res.append(u'\t %s' % stackentry[3].decode('utf-8', 'replace')) |
|
402 |
res.append(u'\n') |
|
403 |
try: |
|
404 |
res.append(u'\t Error: %s\n' % exception) |
|
405 |
except: |
|
406 |
pass |
|
407 |
return u'\n'.join(res) |
|
408 |
||
409 |
||
410 |
def html_traceback(info, exception, title='', |
|
411 |
encoding='ISO-8859-1', body=''): |
|
412 |
""" return an html formatted traceback from python exception infos. |
|
413 |
""" |
|
414 |
tcbk = info[2] |
|
415 |
stacktb = traceback.extract_tb(tcbk) |
|
416 |
strings = [] |
|
417 |
if body: |
|
418 |
strings.append(u'<div class="error_body">') |
|
419 |
# FIXME |
|
420 |
strings.append(body) |
|
421 |
strings.append(u'</div>') |
|
422 |
if title: |
|
423 |
strings.append(u'<h1 class="error">%s</h1>'% html_escape(title)) |
|
424 |
try: |
|
425 |
strings.append(u'<p class="error">%s</p>' % html_escape(str(exception)).replace("\n","<br />")) |
|
426 |
except UnicodeError: |
|
427 |
pass |
|
428 |
strings.append(u'<div class="error_traceback">') |
|
429 |
for index, stackentry in enumerate(stacktb): |
|
430 |
strings.append(u'<b>File</b> <b class="file">%s</b>, <b>line</b> ' |
|
431 |
u'<b class="line">%s</b>, <b>function</b> ' |
|
432 |
u'<b class="function">%s</b>:<br/>'%( |
|
433 |
html_escape(stackentry[0]), stackentry[1], html_escape(stackentry[2]))) |
|
434 |
if stackentry[3]: |
|
435 |
string = html_escape(stackentry[3]).decode('utf-8', 'replace') |
|
436 |
strings.append(u' %s<br/>\n' % (string)) |
|
437 |
# add locals info for each entry |
|
438 |
try: |
|
439 |
local_context = tcbk.tb_frame.f_locals |
|
440 |
html_info = [] |
|
441 |
chars = 0 |
|
442 |
for name, value in local_context.iteritems(): |
|
443 |
value = html_escape(repr(value)) |
|
444 |
info = u'<span class="name">%s</span>=%s, ' % (name, value) |
|
445 |
line_length = len(name) + len(value) |
|
446 |
chars += line_length |
|
447 |
# 150 is the result of *years* of research ;-) (CSS might be helpful here) |
|
448 |
if chars > 150: |
|
449 |
info = u'<br/>' + info |
|
450 |
chars = line_length |
|
451 |
html_info.append(info) |
|
452 |
boxid = 'ctxlevel%d' % index |
|
453 |
strings.append(u'[%s]' % toggle_link(boxid, '+')) |
|
454 |
strings.append(u'<div id="%s" class="pycontext hidden">%s</div>' % |
|
455 |
(boxid, ''.join(html_info))) |
|
456 |
tcbk = tcbk.tb_next |
|
457 |
except Exception: |
|
458 |
pass # doesn't really matter if we have no context info |
|
459 |
strings.append(u'</div>') |
|
460 |
return '\n'.join(strings) |
|
461 |
||
462 |
# csv files / unicode support ################################################# |
|
463 |
||
464 |
class UnicodeCSVWriter: |
|
465 |
"""proxies calls to csv.writer.writerow to be able to deal with unicode""" |
|
466 |
||
467 |
def __init__(self, wfunc, encoding, **kwargs): |
|
468 |
self.writer = csv.writer(self, **kwargs) |
|
469 |
self.wfunc = wfunc |
|
470 |
self.encoding = encoding |
|
471 |
||
472 |
def write(self, data): |
|
473 |
self.wfunc(data) |
|
474 |
||
475 |
def writerow(self, row): |
|
476 |
csvrow = [] |
|
477 |
for elt in row: |
|
478 |
if isinstance(elt, unicode): |
|
479 |
csvrow.append(elt.encode(self.encoding)) |
|
480 |
else: |
|
481 |
csvrow.append(str(elt)) |
|
482 |
self.writer.writerow(csvrow) |
|
483 |
||
484 |
def writerows(self, rows): |
|
485 |
for row in rows: |
|
486 |
self.writerow(row) |
|
487 |
||
488 |
||
489 |
# some decorators ############################################################# |
|
490 |
||
491 |
class limitsize(object): |
|
492 |
def __init__(self, maxsize): |
|
493 |
self.maxsize = maxsize |
|
494 |
||
495 |
def __call__(self, function): |
|
496 |
def newfunc(*args, **kwargs): |
|
497 |
ret = function(*args, **kwargs) |
|
498 |
if isinstance(ret, basestring): |
|
499 |
return ret[:self.maxsize] |
|
500 |
return ret |
|
501 |
return newfunc |
|
502 |
||
503 |
||
504 |
def jsonize(function): |
|
505 |
import simplejson |
|
506 |
def newfunc(*args, **kwargs): |
|
507 |
ret = function(*args, **kwargs) |
|
508 |
if isinstance(ret, decimal.Decimal): |
|
509 |
ret = float(ret) |
|
510 |
elif isinstance(ret, DateTimeType): |
|
511 |
ret = ret.strftime('%Y-%m-%d %H:%M') |
|
512 |
elif isinstance(ret, DateTimeDeltaType): |
|
513 |
ret = ret.seconds |
|
514 |
try: |
|
515 |
return simplejson.dumps(ret) |
|
516 |
except TypeError: |
|
517 |
return simplejson.dumps(repr(ret)) |
|
518 |
return newfunc |
|
519 |
||
520 |
||
521 |
def htmlescape(function): |
|
522 |
def newfunc(*args, **kwargs): |
|
523 |
ret = function(*args, **kwargs) |
|
524 |
assert isinstance(ret, basestring) |
|
525 |
return html_escape(ret) |
|
526 |
return newfunc |