[services] Prevent 'repo_stats' service from aggregating information
authorDavid Douard <david.douard@logilab.fr>
Tue, 10 Feb 2015 12:18:38 +0100
changeset 10309 35bf741203d6
parent 10308 3f94034cc972
child 10310 b49761555ad6
[services] Prevent 'repo_stats' service from aggregating information It's up to the consumer of the service to decide whether it wants aggregated data or not... Ensure the /siteinfo page rendering is not modified.
sobjects/services.py
web/views/debug.py
web/views/management.py
--- a/sobjects/services.py	Wed Apr 22 11:00:50 2015 +0200
+++ b/sobjects/services.py	Tue Feb 10 12:18:38 2015 +0100
@@ -43,7 +43,7 @@
             (len(source._cache), repo.config['rql-cache-size'],
             source.cache_hit, source.cache_miss, 'sql'),
             ):
-            results['%s_cache_size' % title] = '%s / %s' % (size, maxsize)
+            results['%s_cache_size' % title] = {'size': size, 'maxsize': maxsize}
             results['%s_cache_hit' % title] = hits
             results['%s_cache_miss' % title] = misses
             results['%s_cache_hit_percent' % title] = (hits * 100) / (hits + misses)
@@ -53,9 +53,9 @@
         results['nb_open_sessions'] = len(repo._sessions)
         results['nb_active_threads'] = threading.activeCount()
         looping_tasks = repo._tasks_manager._looping_tasks
-        results['looping_tasks'] = ', '.join(str(t) for t in looping_tasks)
+        results['looping_tasks'] = [(t.name, t.interval) for t in looping_tasks]
         results['available_cnxsets'] = repo._cnxsets_pool.qsize()
-        results['threads'] = ', '.join(sorted(str(t) for t in threading.enumerate()))
+        results['threads'] = [t.name for t in threading.enumerate()]
         return results
 
 class GcStatsService(Service):
--- a/web/views/debug.py	Wed Apr 22 11:00:50 2015 +0200
+++ b/web/views/debug.py	Tue Feb 10 12:18:38 2015 +0100
@@ -98,6 +98,13 @@
         w(u'<h3>%s</h3>' % _('resources usage'))
         w(u'<table>')
         stats = self._cw.call_service('repo_stats')
+        stats['looping_tasks'] = ', '.join('%s (%s seconds)' % (n, i) for n, i in stats['looping_tasks'])
+        stats['threads'] = ', '.join(sorted(stats['threads']))
+        for k in stats:
+            if k in ('extid_cache_size', 'type_source_cache_size'):
+                continue
+            if k.endswith('_cache_size'):
+                stats[k] = '%s / %s' % (stats[k]['size'], stats[k]['maxsize'])
         for element in sorted(stats):
             w(u'<tr><th align="left">%s</th><td>%s %s</td></tr>'
                    % (element, xml_escape(unicode(stats[element])),
--- a/web/views/management.py	Wed Apr 22 11:00:50 2015 +0200
+++ b/web/views/management.py	Tue Feb 10 12:18:38 2015 +0100
@@ -182,6 +182,13 @@
 
     def call(self):
         stats = self._cw.call_service('repo_stats')
+        stats['looping_tasks'] = ', '.join('%s (%s seconds)' % (n, i) for n, i in stats['looping_tasks'])
+        stats['threads'] = ', '.join(sorted(stats['threads']))
+        for k in stats:
+            if k in ('extid_cache_size', 'type_source_cache_size'):
+                continue
+            if k.endswith('_cache_size'):
+                stats[k] = '%s / %s' % (stats[k]['size'], stats[k]['maxsize'])
         results = []
         for element in stats:
             results.append(u'%s %s' % (element, stats[element]))