Browse Source

refactoring of view tool

avkonst 11 years ago
parent
commit
d999f74672

+ 10 - 9
mainline/ext/std/tools/collect.py

@@ -34,16 +34,7 @@ class Plugin(mpp.api.Plugin, mpp.api.Parent, mpp.api.IConfigurable, mpp.api.IRun
         self.exclude_files = []
         self.parsers       = []
         
-    def register_parser(self, fnmatch_exp_list, parser):
-        self.parsers.append((fnmatch_exp_list, parser))
 
-    def get_parser(self, file_path):
-        for parser in self.parsers:
-            for fnmatch_exp in parser[0]:
-                if fnmatch.fnmatch(file_path, fnmatch_exp):
-                    return parser[1]
-        return None
-    
     def declare_configuration(self, parser):
         parser.add_option("--std.general.proctime", "--sgpt", action="store_true", default=False,
                          help="If the option is set (True), the tool measures processing time per file [default: %default]")
@@ -81,6 +72,16 @@ class Plugin(mpp.api.Plugin, mpp.api.Parent, mpp.api.IConfigurable, mpp.api.IRun
         for directory in args:
             return self.reader.run(self, directory)
         
+    def register_parser(self, fnmatch_exp_list, parser):
+        self.parsers.append((fnmatch_exp_list, parser))
+
+    def get_parser(self, file_path):
+        for parser in self.parsers:
+            for fnmatch_exp in parser[0]:
+                if fnmatch.fnmatch(file_path, fnmatch_exp):
+                    return parser[1]
+        return None
+
     def add_exclude_rule(self, re_compiled_pattern):
         # TODO file name may have special regexp symbols what causes an exception
         # For example try to run a collection with "--db-file=metrix++" option

+ 27 - 0
mainline/ext/std/tools/view.ini

@@ -0,0 +1,27 @@
+;
+;    Metrix++, Copyright 2009-2013, Metrix++ Project
+;    Link: http://metrixplusplus.sourceforge.net
+;    
+;    This file is a part of Metrix++ Tool.
+;    
+;    Metrix++ is free software: you can redistribute it and/or modify
+;    it under the terms of the GNU General Public License as published by
+;    the Free Software Foundation, version 3 of the License.
+;    
+;    Metrix++ is distributed in the hope that it will be useful,
+;    but WITHOUT ANY WARRANTY; without even the implied warranty of
+;    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;    GNU General Public License for more details.
+;    
+;    You should have received a copy of the GNU General Public License
+;    along with Metrix++.  If not, see <http://www.gnu.org/licenses/>.
+;
+
+[Plugin]
+version: 1.0
+package: std.tools
+module:  view
+class:   Plugin
+depends: mpp.dbf, mpp.log
+actions: view
+enabled: True

+ 207 - 0
mainline/ext/std/tools/view.py

@@ -0,0 +1,207 @@
+#
+#    Metrix++, Copyright 2009-2013, Metrix++ Project
+#    Link: http://metrixplusplus.sourceforge.net
+#    
+#    This file is a part of Metrix++ Tool.
+#    
+#    Metrix++ is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License as published by
+#    the Free Software Foundation, version 3 of the License.
+#    
+#    Metrix++ is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+#    GNU General Public License for more details.
+#    
+#    You should have received a copy of the GNU General Public License
+#    along with Metrix++.  If not, see <http://www.gnu.org/licenses/>.
+#
+
+
+import mpp.api
+import mpp.utils
+
+class Plugin(mpp.api.Plugin, mpp.api.IConfigurable, mpp.api.IRunable):
+    
+    def __init__(self):
+        pass
+
+    def declare_configuration(self, parser):
+        parser.add_option("--format", "--ft", default='xml', choices=['txt', 'xml', 'python'], help="Format of the output data. "
+                          "Possible values are 'xml', 'txt' or 'python' [default: %default]")
+        parser.add_option("--nest-regions", "--nr", action="store_true", default=False,
+                          help="If the option is set (True), data for regions is exported in the form of a tree. "
+                          "Otherwise, all regions are exported in plain list. [default: %default]")
+    
+    def configure(self, options):
+        self.out_format = options.__dict__['format']
+        self.nest_regions = options.__dict__['nest_regions']
+
+    def run(self, args):
+        loader_prev = self.get_plugin_loader().get_plugin('mpp.dbf').get_loader_prev()
+        loader = self.get_plugin_loader().get_plugin('mpp.dbf').get_loader()
+    
+        paths = None
+        if len(args) == 0:
+            paths = [""]
+        else:
+            paths = args
+        
+        (result, exit_code) = export_to_str(self.out_format, paths, loader, loader_prev, self.nest_regions)
+        print result
+        return exit_code
+
+def export_to_str(out_format, paths, loader, loader_prev, nest_regions):
+    exit_code = 0
+    result = ""
+    if out_format == 'txt':
+        result += "=" * 80 + "\n" + "Export" + "\n" + "_" * 80 + "\n\n"
+    elif out_format == 'xml':
+        result += "<export>\n"
+    elif out_format == 'python':
+        result += "{'export': ["
+
+    for (ind, path) in enumerate(paths):
+        path = mpp.utils.preprocess_path(path)
+        
+        aggregated_data = loader.load_aggregated_data(path)
+        aggregated_data_tree = {}
+        subdirs = []
+        subfiles = []
+        if aggregated_data != None:
+            aggregated_data_tree = aggregated_data.get_data_tree()
+            subdirs = aggregated_data.get_subdirs()
+            subfiles = aggregated_data.get_subfiles()
+        else:
+            mpp.utils.report_bad_path(path)
+            exit_code += 1
+        aggregated_data_prev = loader_prev.load_aggregated_data(path)
+        if aggregated_data_prev != None:
+            aggregated_data_tree = append_diff(aggregated_data_tree,
+                                           aggregated_data_prev.get_data_tree())
+        
+        file_data = loader.load_file_data(path)
+        file_data_tree = {}
+        if file_data != None:
+            file_data_tree = file_data.get_data_tree() 
+            file_data_prev = loader_prev.load_file_data(path)
+            append_regions(file_data_tree, file_data, file_data_prev, nest_regions)
+        
+        data = {"info": {"path": path, "id": ind + 1},
+                "aggregated-data": aggregated_data_tree,
+                "file-data": file_data_tree,
+                "subdirs": subdirs,
+                "subfiles": subfiles}
+
+        if out_format == 'txt':
+            result += mpp.utils.serialize_to_txt(data, root_name = "data") + "\n"
+        elif out_format == 'xml':
+            result += mpp.utils.serialize_to_xml(data, root_name = "data") + "\n"
+        elif out_format == 'python':
+            postfix = ""
+            if ind < len(paths) - 1:
+                postfix = ", "
+            result += mpp.utils.serialize_to_python(data, root_name = "data") + postfix
+
+    if out_format == 'txt':
+        result += "\n"
+    elif out_format == 'xml':
+        result += "</export>"
+    elif out_format == 'python':
+        result += "]}"
+        
+    return (result, exit_code)
+
+def append_regions(file_data_tree, file_data, file_data_prev, nest_regions):
+    regions_matcher = None
+    if file_data_prev != None:
+        file_data_tree = append_diff(file_data_tree,
+                                     file_data_prev.get_data_tree())
+        regions_matcher = mpp.utils.FileRegionsMatcher(file_data, file_data_prev)
+    
+    if nest_regions == False:
+        regions = []
+        for region in file_data.iterate_regions():
+            region_data_tree = region.get_data_tree()
+            if regions_matcher != None and regions_matcher.is_matched(region.get_id()):
+                region_data_prev = file_data_prev.get_region(regions_matcher.get_prev_id(region.get_id()))
+                region_data_tree = append_diff(region_data_tree,
+                                               region_data_prev.get_data_tree())
+            regions.append({"info": {"name" : region.name,
+                                     'type' : file_data.get_region_types()().to_str(region.get_type()),
+                                     "cursor" : region.cursor,
+                                     'line_begin': region.line_begin,
+                                     'line_end': region.line_end,
+                                     'offset_begin': region.begin,
+                                     'offset_end': region.end},
+                            "data": region_data_tree})
+        file_data_tree['regions'] = regions
+    else:
+        def append_rec(region_id, file_data_tree, file_data, file_data_prev):
+            region = file_data.get_region(region_id)
+            region_data_tree = region.get_data_tree()
+            if regions_matcher != None and regions_matcher.is_matched(region.get_id()):
+                region_data_prev = file_data_prev.get_region(regions_matcher.get_prev_id(region.get_id()))
+                region_data_tree = append_diff(region_data_tree,
+                                               region_data_prev.get_data_tree())
+            result = {"info": {"name" : region.name,
+                               'type' : file_data.get_region_types()().to_str(region.get_type()),
+                               "cursor" : region.cursor,
+                               'line_begin': region.line_begin,
+                               'line_end': region.line_end,
+                               'offset_begin': region.begin,
+                               'offset_end': region.end},
+                      "data": region_data_tree,
+                      "subregions": []}
+            for sub_id in file_data.get_region(region_id).iterate_subregion_ids():
+                result['subregions'].append(append_rec(sub_id, file_data_tree, file_data, file_data_prev))
+            return result
+        file_data_tree['regions'] = []
+        file_data_tree['regions'].append(append_rec(1, file_data_tree, file_data, file_data_prev))
+
+def append_diff(main_tree, prev_tree):
+    assert(main_tree != None)
+    assert(prev_tree != None)
+    
+    for name in main_tree.keys():
+        if name not in prev_tree.keys():
+            continue
+        for field in main_tree[name].keys():
+            if field not in prev_tree[name].keys():
+                continue
+            if isinstance(main_tree[name][field], dict) and isinstance(prev_tree[name][field], dict):
+                diff = {}
+                for key in main_tree[name][field].keys():
+                    if key not in prev_tree[name][field].keys():
+                        continue
+                    main_val = main_tree[name][field][key]
+                    prev_val = prev_tree[name][field][key]
+                    if main_val == None:
+                        main_val = 0
+                    if prev_val == None:
+                        prev_val = 0
+                    if isinstance(main_val, list) and isinstance(prev_val, list):
+                        main_tree[name][field][key] = append_diff_list(main_val, prev_val)
+                    else:
+                        diff[key] = main_val - prev_val
+                main_tree[name][field]['__diff__'] = diff
+            elif (not isinstance(main_tree[name][field], dict)) and (not isinstance(prev_tree[name][field], dict)):
+                if '__diff__' not in main_tree[name]:
+                    main_tree[name]['__diff__'] = {}
+                main_tree[name]['__diff__'][field] = main_tree[name][field] - prev_tree[name][field]
+    return main_tree
+
+def append_diff_list(main_list, prev_list):
+    merged_list = {}
+    for bar in main_list:
+        merged_list[bar['metric']] = {'count': bar['count'], '__diff__':0}
+    for bar in prev_list:
+        if bar['metric'] in merged_list.keys():
+            merged_list[bar['metric']]['__diff__'] = \
+                merged_list[bar['metric']]['count'] - bar['count']
+        else:
+            merged_list[bar['metric']] = {'count': 0, '__diff__':-bar['count']}
+    result = []
+    for metric in sorted(merged_list.keys()):
+        result.append({'metric':metric, 'count':merged_list[metric]['count'], '__diff__':merged_list[metric]['__diff__']})
+    return result

+ 1 - 0
mainline/mpp/api.py

@@ -1168,6 +1168,7 @@ class IParser(object):
 class ICode(object):
     pass
 
+# refactor and remove
 class ITool(object):
     def run(self, tool_args):
         raise InterfaceNotImplemented(self)

+ 14 - 4
mainline/mpp/dbf.py

@@ -44,7 +44,8 @@ class Plugin(mpp.api.Plugin, mpp.api.IConfigurable):
         
     def initialize(self):
         
-        if self.get_plugin_loader() != None:
+        # TODO refactor and remove self.get_plugin_loader() != None
+        if self.get_plugin_loader() != None and self.get_plugin_loader().get_action() == 'collect':
             if os.path.exists(self.dbfile):
                 logging.warn("Removing existing file: " + self.dbfile)
                 # TODO can reuse existing db file to speed up the processing?
@@ -60,13 +61,22 @@ class Plugin(mpp.api.Plugin, mpp.api.IConfigurable):
                 self.parser.error("Failure in creating file: " + self.dbfile)
             
         else:
+            self.loader = mpp.api.Loader()
+            if self.loader.open_database(self.dbfile) == False:
+                self.parser.error("Can not open file: " + self.dbfile)
             self.loader_prev = mpp.api.Loader()
             if self.dbfile_prev != None:
                 if self.loader_prev.open_database(self.dbfile_prev) == False:
                     self.parser.error("Can not open file: " + self.dbfile_prev)
-            self.loader = mpp.api.Loader()
-            if self.loader.open_database(self.dbfile) == False:
-                self.parser.error("Can not open file: " + self.dbfile)
+                self._warn_on_metadata()
+
+    def _warn_on_metadata(self):
+        for each in self.loader.iterate_properties():
+            prev = self.loader_prev.get_property(each.name)
+            if prev != each.value:
+                logging.warn("Data files have been created by different versions of the tool or with different settings.")
+                logging.warn(" - identification of some change trends can be not reliable")
+                logging.warn(" - use 'info' action to view more details")
 
     def get_dbfile_path(self):
         return self.dbfile

+ 6 - 3
mainline/mpp/internal/loader.py

@@ -25,14 +25,16 @@ import sys
 import ConfigParser
 import re
 
-
-
 class Loader(object):
 
     def __init__(self):
         self.plugins = []
         self.hash    = {}
-        
+        self.action = None
+    
+    def get_action(self):
+        return self.action
+    
     def get_plugin(self, name):
         return self.hash[name]['instance']
     
@@ -137,6 +139,7 @@ class Loader(object):
 
         if command not in inicontainer.actions:
             optparser.error("Unknown action: {action}".format(action={command}))
+        self.action = command
 
         for item in self.iterate_plugins():
             if (isinstance(item, mpp.api.IConfigurable)):

+ 0 - 10
mainline/mpp/utils.py

@@ -98,16 +98,6 @@ class FileRegionsMatcher(object):
     def is_modified(self, curr_id):
         return self.ids[curr_id][1]
 
-def check_db_metadata(loader, loader_prev):
-    for each in loader.iterate_properties():
-        prev = loader_prev.get_property(each.name)
-        if prev != each.value:
-            logging.warn("Previous data file has got different metadata:")
-            logging.warn(" - identification of change trends can be not reliable")
-            logging.warn(" - use 'info' tool to view more details")
-            return 1
-    return 0
-
 def preprocess_path(path):
     path = re.sub(r'''[\\]+''', "/", path)
     logging.info("Processing: " + path)

+ 5 - 5
mainline/tests/general/test_basic/test_help_view_default_stdout.gold.txt

@@ -2,11 +2,6 @@ Usage: metrix++.py view [options] -- [path 1] ... [path N]
 
 Options:
   -h, --help            show this help message and exit
-  --log-level=LOG_LEVEL, --ll=LOG_LEVEL
-                        Defines log level. Possible values are
-                        'DEBUG','INFO','WARNING' or 'ERROR'. Default value is
-                        inherited from environment variable
-                        'METRIXPLUSPLUS_LOG_LEVEL' if set. [default: INFO]
   --db-file=DB_FILE, --dbf=DB_FILE
                         Primary database file to write (by the collector) and
                         post-process (by other tools) [default: ./metrixpp.db]
@@ -17,6 +12,11 @@ Options:
                         it may reduce the processing time significantly. Post-
                         processing tools use it in order to recognise/evaluate
                         change trends. [default: none].
+  --log-level=LOG_LEVEL, --ll=LOG_LEVEL
+                        Defines log level. Possible values are
+                        'DEBUG','INFO','WARNING' or 'ERROR'. Default value is
+                        inherited from environment variable
+                        'METRIXPLUSPLUS_LOG_LEVEL' if set. [default: INFO]
   --format=FORMAT, --ft=FORMAT
                         Format of the output data. Possible values are 'xml',
                         'txt' or 'python' [default: xml]

+ 0 - 4
mainline/tools/limit.py

@@ -67,10 +67,6 @@ def main(tool_args):
     for each in loader.iterate_namespace_names():
         warn_plugin.verify_fields(each, loader.get_namespace(each).iterate_field_names())
     
-    # Check for versions consistency
-    if db_plugin.dbfile_prev != None:
-        mpp.utils.check_db_metadata(loader, loader_prev)
-    
     paths = None
     if len(args) == 0:
         paths = [""]

+ 11 - 196
mainline/tools/view.py

@@ -18,10 +18,9 @@
 #
 
 
-import mpp.log
-import mpp.dbf
-import mpp.utils
-import mpp.cmdparser
+import os
+
+import mpp.internal.loader
 
 import mpp.api
 class Tool(mpp.api.ITool):
@@ -29,196 +28,12 @@ class Tool(mpp.api.ITool):
         return main(tool_args)
 
 def main(tool_args):
-    
-    log_plugin = mpp.log.Plugin()
-    db_plugin = mpp.dbf.Plugin()
-
-    parser = mpp.cmdparser.MultiOptionParser(usage="Usage: %prog view [options] -- [path 1] ... [path N]")
-    log_plugin.declare_configuration(parser)
-    db_plugin.declare_configuration(parser)
-    parser.add_option("--format", "--ft", default='xml', choices=['txt', 'xml', 'python'], help="Format of the output data. "
-                      "Possible values are 'xml', 'txt' or 'python' [default: %default]")
-    parser.add_option("--nest-regions", "--nr", action="store_true", default=False,
-                      help="If the option is set (True), data for regions is exported in the form of a tree. "
-                      "Otherwise, all regions are exported in plain list. [default: %default]")
-
-    (options, args) = parser.parse_args(tool_args)
-    log_plugin.configure(options)
-    db_plugin.configure(options)
-    out_format = options.__dict__['format']
-    nest_regions = options.__dict__['nest_regions']
-
-    log_plugin.initialize()
-    db_plugin.initialize()
-
-    loader_prev = db_plugin.get_loader_prev()
-    loader = db_plugin.get_loader()
-
-    # Check for versions consistency
-    if db_plugin.dbfile_prev != None:
-        mpp.utils.check_db_metadata(loader, loader_prev)
-    
-    paths = None
-    if len(args) == 0:
-        paths = [""]
-    else:
-        paths = args
-    
-    (result, exit_code) = export_to_str(out_format, paths, loader, loader_prev, nest_regions)
-    print result
+    loader = mpp.internal.loader.Loader()
+    mpp_paths = []
+    # TODO document this feature
+    if 'METRIXPLUSPLUS_PATH' in os.environ.keys():
+        mpp_paths = os.environ['METRIXPLUSPLUS_PATH'].split(os.pathsep)
+    args = loader.load('view', mpp_paths, tool_args)
+    exit_code = loader.run(args)
+    loader.unload()
     return exit_code
-
-def export_to_str(out_format, paths, loader, loader_prev, nest_regions):
-    exit_code = 0
-    result = ""
-    if out_format == 'txt':
-        result += "=" * 80 + "\n" + "Export" + "\n" + "_" * 80 + "\n\n"
-    elif out_format == 'xml':
-        result += "<export>\n"
-    elif out_format == 'python':
-        result += "{'export': ["
-
-    for (ind, path) in enumerate(paths):
-        path = mpp.utils.preprocess_path(path)
-        
-        aggregated_data = loader.load_aggregated_data(path)
-        aggregated_data_tree = {}
-        subdirs = []
-        subfiles = []
-        if aggregated_data != None:
-            aggregated_data_tree = aggregated_data.get_data_tree()
-            subdirs = aggregated_data.get_subdirs()
-            subfiles = aggregated_data.get_subfiles()
-        else:
-            mpp.utils.report_bad_path(path)
-            exit_code += 1
-        aggregated_data_prev = loader_prev.load_aggregated_data(path)
-        if aggregated_data_prev != None:
-            aggregated_data_tree = append_diff(aggregated_data_tree,
-                                           aggregated_data_prev.get_data_tree())
-        
-        file_data = loader.load_file_data(path)
-        file_data_tree = {}
-        if file_data != None:
-            file_data_tree = file_data.get_data_tree() 
-            file_data_prev = loader_prev.load_file_data(path)
-            append_regions(file_data_tree, file_data, file_data_prev, nest_regions)
-        
-        data = {"info": {"path": path, "id": ind + 1},
-                "aggregated-data": aggregated_data_tree,
-                "file-data": file_data_tree,
-                "subdirs": subdirs,
-                "subfiles": subfiles}
-
-        if out_format == 'txt':
-            result += mpp.utils.serialize_to_txt(data, root_name = "data") + "\n"
-        elif out_format == 'xml':
-            result += mpp.utils.serialize_to_xml(data, root_name = "data") + "\n"
-        elif out_format == 'python':
-            postfix = ""
-            if ind < len(paths) - 1:
-                postfix = ", "
-            result += mpp.utils.serialize_to_python(data, root_name = "data") + postfix
-
-    if out_format == 'txt':
-        result += "\n"
-    elif out_format == 'xml':
-        result += "</export>"
-    elif out_format == 'python':
-        result += "]}"
-        
-    return (result, exit_code)
-
-def append_regions(file_data_tree, file_data, file_data_prev, nest_regions):
-    regions_matcher = None
-    if file_data_prev != None:
-        file_data_tree = append_diff(file_data_tree,
-                                     file_data_prev.get_data_tree())
-        regions_matcher = mpp.utils.FileRegionsMatcher(file_data, file_data_prev)
-    
-    if nest_regions == False:
-        regions = []
-        for region in file_data.iterate_regions():
-            region_data_tree = region.get_data_tree()
-            if regions_matcher != None and regions_matcher.is_matched(region.get_id()):
-                region_data_prev = file_data_prev.get_region(regions_matcher.get_prev_id(region.get_id()))
-                region_data_tree = append_diff(region_data_tree,
-                                               region_data_prev.get_data_tree())
-            regions.append({"info": {"name" : region.name,
-                                     'type' : file_data.get_region_types()().to_str(region.get_type()),
-                                     "cursor" : region.cursor,
-                                     'line_begin': region.line_begin,
-                                     'line_end': region.line_end,
-                                     'offset_begin': region.begin,
-                                     'offset_end': region.end},
-                            "data": region_data_tree})
-        file_data_tree['regions'] = regions
-    else:
-        def append_rec(region_id, file_data_tree, file_data, file_data_prev):
-            region = file_data.get_region(region_id)
-            region_data_tree = region.get_data_tree()
-            if regions_matcher != None and regions_matcher.is_matched(region.get_id()):
-                region_data_prev = file_data_prev.get_region(regions_matcher.get_prev_id(region.get_id()))
-                region_data_tree = append_diff(region_data_tree,
-                                               region_data_prev.get_data_tree())
-            result = {"info": {"name" : region.name,
-                               'type' : file_data.get_region_types()().to_str(region.get_type()),
-                               "cursor" : region.cursor,
-                               'line_begin': region.line_begin,
-                               'line_end': region.line_end,
-                               'offset_begin': region.begin,
-                               'offset_end': region.end},
-                      "data": region_data_tree,
-                      "subregions": []}
-            for sub_id in file_data.get_region(region_id).iterate_subregion_ids():
-                result['subregions'].append(append_rec(sub_id, file_data_tree, file_data, file_data_prev))
-            return result
-        file_data_tree['regions'] = []
-        file_data_tree['regions'].append(append_rec(1, file_data_tree, file_data, file_data_prev))
-
-def append_diff(main_tree, prev_tree):
-    assert(main_tree != None)
-    assert(prev_tree != None)
-    
-    for name in main_tree.keys():
-        if name not in prev_tree.keys():
-            continue
-        for field in main_tree[name].keys():
-            if field not in prev_tree[name].keys():
-                continue
-            if isinstance(main_tree[name][field], dict) and isinstance(prev_tree[name][field], dict):
-                diff = {}
-                for key in main_tree[name][field].keys():
-                    if key not in prev_tree[name][field].keys():
-                        continue
-                    main_val = main_tree[name][field][key]
-                    prev_val = prev_tree[name][field][key]
-                    if main_val == None:
-                        main_val = 0
-                    if prev_val == None:
-                        prev_val = 0
-                    if isinstance(main_val, list) and isinstance(prev_val, list):
-                        main_tree[name][field][key] = append_diff_list(main_val, prev_val)
-                    else:
-                        diff[key] = main_val - prev_val
-                main_tree[name][field]['__diff__'] = diff
-            elif (not isinstance(main_tree[name][field], dict)) and (not isinstance(prev_tree[name][field], dict)):
-                if '__diff__' not in main_tree[name]:
-                    main_tree[name]['__diff__'] = {}
-                main_tree[name]['__diff__'][field] = main_tree[name][field] - prev_tree[name][field]
-    return main_tree
-
-def append_diff_list(main_list, prev_list):
-    merged_list = {}
-    for bar in main_list:
-        merged_list[bar['metric']] = {'count': bar['count'], '__diff__':0}
-    for bar in prev_list:
-        if bar['metric'] in merged_list.keys():
-            merged_list[bar['metric']]['__diff__'] = \
-                merged_list[bar['metric']]['count'] - bar['count']
-        else:
-            merged_list[bar['metric']] = {'count': 0, '__diff__':-bar['count']}
-    result = []
-    for metric in sorted(merged_list.keys()):
-        result.append({'metric':metric, 'count':merged_list[metric]['count'], '__diff__':merged_list[metric]['__diff__']})
-    return result