Browse Source

--scope-mode option for view tool. improvements for limit tool. bug fixes for view tool.

avkonst 11 years ago
parent
commit
135364e94e

+ 1 - 1
mainline/ext/std/tools/limit.ini

@@ -22,6 +22,6 @@ version: 1.0
 package: std.tools
 module:  limit
 class:   Plugin
-depends: mpp.dbf, mpp.warn
+depends: mpp.dbf
 actions: limit
 enabled: True

+ 115 - 5
mainline/ext/std/tools/limit.py

@@ -18,6 +18,7 @@
 #
 
 import logging
+import re
 
 import mpp.api
 import mpp.utils
@@ -25,18 +26,128 @@ import mpp.cout
 
 class Plugin(mpp.api.Plugin, mpp.api.IConfigurable, mpp.api.IRunable):
     
+    MODE_NEW     = 0x01
+    MODE_TREND   = 0x03
+    MODE_TOUCHED = 0x07
+    MODE_ALL     = 0x15
+
     def declare_configuration(self, parser):
+        self.parser = parser
         parser.add_option("--hotspots", "--hs", default=None, help="If not set (none), all exceeded limits are printed."
                           " If set, exceeded limits are sorted (the worst is the first) and only first HOTSPOTS limits are printed."
                           " [default: %default]", type=int)
         parser.add_option("--disable-suppressions", "--ds", action="store_true", default=False,
                           help = "If not set (none), all suppressions are ignored"
                                  " and associated warnings are printed. [default: %default]")
+        parser.add_option("--warn-mode", "--wm", default='all', choices=['new', 'trend', 'touched', 'all'],
+                         help="Defines the warnings mode. "
+                         "'all' - all warnings active, "
+                         "'new' - warnings for new regions/files only, "
+                         "'trend' - warnings for new regions/files and for bad trend of modified regions/files, "
+                         "'touched' - warnings for new and modified regions/files "
+                         "[default: %default]")
+        parser.add_option("--min-limit", "--min", action="multiopt",
+                          help="A threshold per 'namespace:field' metric in order to select regions, "
+                          "which have got metric value less than the specified limit. "
+                          "This option can be specified multiple times, if it is necessary to apply several limits. "
+                          "Should be in the format: <namespace>:<field>:<limit-value>, for example: "
+                          "'std.code.lines:comments:1'.")
+        parser.add_option("--max-limit", "--max", action="multiopt",
+                          help="A threshold per 'namespace:field' metric in order to select regions, "
+                          "which have got metric value more than the specified limit. "
+                          "This option can be specified multiple times, if it is necessary to apply several limits. "
+                          "Should be in the format: <namespace>:<field>:<limit-value>, for example: "
+                          "'std.code.complexity:cyclomatic:7'.")
     
     def configure(self, options):
         self.hotspots = options.__dict__['hotspots']
         self.no_suppress = options.__dict__['disable_suppressions']
 
+        if options.__dict__['warn_mode'] == 'new':
+            self.mode = self.MODE_NEW
+        elif options.__dict__['warn_mode'] == 'trend':
+            self.mode = self.MODE_TREND
+        elif options.__dict__['warn_mode'] == 'touched':
+            self.mode = self.MODE_TOUCHED
+        elif options.__dict__['warn_mode'] == 'all':
+            self.mode = self.MODE_ALL
+            
+        if self.mode != self.MODE_ALL and options.__dict__['db_file_prev'] == None:
+            self.parser.error("option --warn-mode: The mode '" + options.__dict__['warn_mode'] + "' requires '--db-file-prev' option set")
+
+        class Limit(object):
+            def __init__(self, limit_type, limit, namespace, field, db_filter):
+                self.type = limit_type
+                self.limit = limit
+                self.namespace = namespace
+                self.field = field
+                self.filter = db_filter
+                
+            def __repr__(self):
+                return "namespace '" + self.namespace + "', filter '" + str(self.filter) + "'"
+        
+        self.limits = []
+        pattern = re.compile(r'''([^:]+)[:]([^:]+)[:]([-+]?[0-9]+(?:[.][0-9]+)?)''')
+        if options.__dict__['max_limit'] != None:
+            for each in options.__dict__['max_limit']:
+                match = re.match(pattern, each)
+                if match == None:
+                    self.parser.error("option --max-limit: Invalid format: " + each)
+                limit = Limit("max", float(match.group(3)), match.group(1), match.group(2), (match.group(2), '>', float(match.group(3))))
+                self.limits.append(limit)
+        if options.__dict__['min_limit'] != None:
+            for each in options.__dict__['min_limit']:  
+                match = re.match(pattern, each)
+                if match == None:
+                    self.parser.error("option --min-limit: Invalid format: " + each)
+                limit = Limit("min", float(match.group(3)), match.group(1), match.group(2), (match.group(2), '<', float(match.group(3))))
+                self.limits.append(limit)
+
+    def initialize(self):
+        super(Plugin, self).initialize()
+        db_loader = self.get_plugin_loader().get_plugin('mpp.dbf').get_loader()
+        self._verify_namespaces(db_loader.iterate_namespace_names())
+        for each in db_loader.iterate_namespace_names():
+            self._verify_fields(each, db_loader.get_namespace(each).iterate_field_names())
+    
+    def _verify_namespaces(self, valid_namespaces):
+        valid = []
+        for each in valid_namespaces:
+            valid.append(each)
+        for each in self.limits:
+            if each.namespace not in valid:
+                self.parser.error("option --{0}-limit: metric '{1}:{2}' is not available in the database file.".
+                                  format(each.type, each.namespace, each.field))
+
+    def _verify_fields(self, namespace, valid_fields):
+        valid = []
+        for each in valid_fields:
+            valid.append(each)
+        for each in self.limits:
+            if each.namespace == namespace:
+                if each.field not in valid:
+                    self.parser.error("option --{0}-limit: metric '{1}:{2}' is not available in the database file.".
+                                      format(each.type, each.namespace, each.field))
+                    
+    def iterate_limits(self):
+        for each in self.limits:
+            yield each   
+
+    def is_mode_matched(self, limit, value, diff, is_modified):
+        if is_modified == None:
+            # means new region, True in all modes
+            return True
+        if self.mode == self.MODE_ALL:
+            return True 
+        if self.mode == self.MODE_TOUCHED and is_modified == True:
+            return True 
+        if self.mode == self.MODE_TREND and is_modified == True:
+            if limit < value and diff > 0:
+                return True
+            if limit > value and diff < 0:
+                return True
+        return False
+
     def run(self, args):
         return main(self, args)
 
@@ -46,7 +157,6 @@ def main(plugin, args):
 
     loader_prev = plugin.get_plugin_loader().get_plugin('mpp.dbf').get_loader_prev()
     loader = plugin.get_plugin_loader().get_plugin('mpp.dbf').get_loader()
-    warn_plugin = plugin.get_plugin_loader().get_plugin('mpp.warn')
     
     paths = None
     if len(args) == 0:
@@ -56,13 +166,13 @@ def main(plugin, args):
 
     # Try to optimise iterative change scans
     modified_file_ids = None
-    if warn_plugin.mode != warn_plugin.MODE_ALL:
+    if plugin.mode != plugin.MODE_ALL:
         modified_file_ids = get_list_of_modified_files(loader, loader_prev)
         
     for path in paths:
         path = mpp.utils.preprocess_path(path)
         
-        for limit in warn_plugin.iterate_limits():
+        for limit in plugin.iterate_limits():
             logging.info("Applying limit: " + str(limit))
             filters = [limit.filter]
             if modified_file_ids != None:
@@ -74,7 +184,7 @@ def main(plugin, args):
                 sort_by = limit.field
                 if limit.type == "max":
                     sort_by = "-" + sort_by
-                if warn_plugin.mode == warn_plugin.MODE_ALL:
+                if plugin.mode == plugin.MODE_ALL:
                     # if it is not ALL mode, the tool counts number of printed warnings below
                     limit_by = plugin.hotspots
                 limit_warnings = plugin.hotspots
@@ -112,7 +222,7 @@ def main(plugin, args):
                             diff = mpp.api.DiffData(select_data,
                                                            file_data_prev.get_region(prev_id)).get_data(limit.namespace, limit.field)
 
-                if (warn_plugin.is_mode_matched(limit.limit,
+                if (plugin.is_mode_matched(limit.limit,
                                                 select_data.get_data(limit.namespace, limit.field),
                                                 diff,
                                                 is_modified) == False):

+ 172 - 9
mainline/ext/std/tools/view.py

@@ -17,6 +17,7 @@
 #    along with Metrix++.  If not, see <http://www.gnu.org/licenses/>.
 #
 
+import logging
 
 import mpp.api
 import mpp.utils
@@ -24,7 +25,12 @@ import mpp.cout
 
 class Plugin(mpp.api.Plugin, mpp.api.IConfigurable, mpp.api.IRunable):
     
+    MODE_NEW     = 0x01
+    MODE_TOUCHED = 0x03
+    MODE_ALL     = 0x07
+
     def declare_configuration(self, parser):
+        self.parser = parser
         parser.add_option("--format", "--ft", default='txt', choices=['txt', 'xml', 'python'],
                           help="Format of the output data. "
                           "Possible values are 'xml', 'txt' or 'python' [default: %default]")
@@ -34,12 +40,29 @@ class Plugin(mpp.api.Plugin, mpp.api.IConfigurable, mpp.api.IRunable):
         parser.add_option("--max-distribution-rows", "--mdr", type=int, default=20,
                           help="Maximum number of rows in distribution tables. "
                                "If it is set to 0, the tool does not optimize the size of distribution tables [default: %default]")
+        parser.add_option("--scope-mode", "--sm", default='all', choices=['new', 'touched', 'all'],
+                         help="Defines the analysis scope mode. "
+                         "'all' - all available regions and files are taken into account, "
+                         "'new' - only new regions and files are taken into account, "
+                         "'touched' - only new and modified regions and files are taken into account. "
+                         "Modes 'new' and 'touched' may require more time for processing than mode 'all' "
+                         "[default: %default]")
     
     def configure(self, options):
         self.out_format = options.__dict__['format']
         self.nest_regions = options.__dict__['nest_regions']
         self.dist_columns = options.__dict__['max_distribution_rows']
 
+        if options.__dict__['scope_mode'] == 'new':
+            self.mode = self.MODE_NEW
+        elif options.__dict__['scope_mode'] == 'touched':
+            self.mode = self.MODE_TOUCHED
+        elif options.__dict__['scope_mode'] == 'all':
+            self.mode = self.MODE_ALL
+
+        if self.mode != self.MODE_ALL and options.__dict__['db_file_prev'] == None:
+            self.parser.error("option --scope-mode: The mode '" + options.__dict__['scope_mode'] + "' requires '--db-file-prev' option set")
+
     def run(self, args):
         loader_prev = self.get_plugin_loader().get_plugin('mpp.dbf').get_loader_prev()
         loader = self.get_plugin_loader().get_plugin('mpp.dbf').get_loader()
@@ -55,11 +78,12 @@ class Plugin(mpp.api.Plugin, mpp.api.IConfigurable, mpp.api.IRunable):
                                             loader,
                                             loader_prev,
                                             self.nest_regions,
-                                            self.dist_columns)
+                                            self.dist_columns,
+                                            self.mode)
         print result
         return exit_code
 
-def export_to_str(out_format, paths, loader, loader_prev, nest_regions, dist_columns):
+def export_to_str(out_format, paths, loader, loader_prev, nest_regions, dist_columns, mode):
     exit_code = 0
     result = ""
     if out_format == 'xml':
@@ -70,7 +94,7 @@ def export_to_str(out_format, paths, loader, loader_prev, nest_regions, dist_col
     for (ind, path) in enumerate(paths):
         path = mpp.utils.preprocess_path(path)
         
-        aggregated_data = loader.load_aggregated_data(path)
+        aggregated_data, aggregated_data_prev = load_aggregated_data_with_mode(loader, loader_prev, path , mode)
         aggregated_data_tree = {}
         subdirs = []
         subfiles = []
@@ -81,7 +105,6 @@ def export_to_str(out_format, paths, loader, loader_prev, nest_regions, dist_col
         else:
             mpp.utils.report_bad_path(path)
             exit_code += 1
-        aggregated_data_prev = loader_prev.load_aggregated_data(path)
         if aggregated_data_prev != None:
             aggregated_data_tree = append_diff(aggregated_data_tree,
                                            aggregated_data_prev.get_data_tree())
@@ -118,6 +141,140 @@ def export_to_str(out_format, paths, loader, loader_prev, nest_regions, dist_col
         
     return (result, exit_code)
 
+def load_aggregated_data_with_mode(loader, loader_prev, path, mode):
+    if mode == Plugin.MODE_ALL:
+        aggregated_data = loader.load_aggregated_data(path)
+        aggregated_data_prev = loader_prev.load_aggregated_data(path)
+    else:
+        assert(mode == Plugin.MODE_NEW or mode == Plugin.MODE_TOUCHED)
+        
+        class AggregatedFilteredData(mpp.api.AggregatedData):
+            
+            def __init__(self, loader, path):
+                super(AggregatedFilteredData, self).__init__(loader, path)
+                self.in_processing_mode = True
+                for name in loader.iterate_namespace_names():
+                    namespace = loader.get_namespace(name)
+                    for field in namespace.iterate_field_names():
+                        self.set_data(name, field, {
+                            'count': 0,
+                            'nonzero': namespace.get_field_packager(field).is_non_zero(),
+                            'min': None,
+                            'max': None,
+                            'total': 0.0,
+                            'avg': None,
+                            'distribution-bars': {}
+                        })
+                        
+            def get_data_tree(self, namespaces=None):
+                self.in_processing_mode = False
+                # need to convert distribution map to a list and calculate average
+                for name in loader.iterate_namespace_names():
+                    namespace = loader.get_namespace(name)
+                    for field in namespace.iterate_field_names():
+                        data = self.get_data(name, field)
+                        bars_list = []
+                        for metric_value in sorted(data['distribution-bars'].keys()):
+                            bars_list.append({'metric': metric_value,
+                                              'count': data['distribution-bars'][metric_value],
+                                              'ratio': ((float(data['distribution-bars'][metric_value]) /
+                                                          float(data['count'])))})
+                        data['distribution-bars'] = bars_list
+                        if data['count'] != 0:
+                            data['avg'] = float(data['total']) / float(data['count'])
+                        self.set_data(name, field, data)
+                return super(AggregatedFilteredData, self).get_data_tree(namespaces=namespaces)
+            
+            def _append_data(self, orig_data):
+                # flag to protect ourselves from getting incomplete data
+                # the workflow in this tool: append data first and after get it using get_data_tree()
+                assert(self.in_processing_mode == True)
+                data = orig_data.get_data_tree()
+                for namespace in data.keys():
+                    for field in data[namespace].keys():
+                        aggr_data = self.get_data(namespace, field)
+                        metric_value = data[namespace][field]
+                        if aggr_data['min'] == None or aggr_data['min'] > metric_value:
+                            aggr_data['min'] = metric_value
+                        if aggr_data['max'] == None or aggr_data['max'] < metric_value:
+                            aggr_data['max'] = metric_value
+                        aggr_data['count'] += 1
+                        aggr_data['total'] += metric_value
+                        # average is calculated later on get_data_tree
+                        if metric_value not in aggr_data['distribution-bars'].keys():
+                            aggr_data['distribution-bars'][metric_value] = 0
+                        aggr_data['distribution-bars'][metric_value] += 1
+                        self.set_data(namespace, field, aggr_data)
+            
+            def _append_file_data(self, file_data):
+                self._append_data(file_data)
+                for region in file_data.iterate_regions():
+                    self._append_data(region)
+                
+        result = AggregatedFilteredData(loader, path)
+        result_prev = AggregatedFilteredData(loader_prev, path)
+        
+        prev_file_ids = set()
+        file_data_iterator = loader.iterate_file_data(path)
+        if file_data_iterator != None:
+            for file_data in file_data_iterator:
+                file_data_prev = loader_prev.load_file_data(file_data.get_path())
+                if file_data_prev != None:
+                    prev_file_ids.add(file_data_prev.get_id())
+                    
+                if (file_data_prev == None and (mode == Plugin.MODE_NEW or mode == Plugin.MODE_TOUCHED)):
+                    # new file and required mode matched
+                    logging.info("Processing: " + file_data.get_path() + " [new]")
+                    result._append_file_data(file_data)
+                elif (file_data.get_checksum() != file_data_prev.get_checksum()):
+                    # modified file and required mode matched
+                    logging.info("Processing: " + file_data.get_path() + " [modified]")
+                    # append file data without appending regions...
+                    if (mode == Plugin.MODE_TOUCHED):
+                        # if required mode matched
+                        result._append_data(file_data)
+                        result_prev._append_data(file_data_prev)
+                    # process regions separately
+                    matcher = mpp.utils.FileRegionsMatcher(file_data, file_data_prev)
+                    prev_reg_ids = set()
+                    for region in file_data.iterate_regions():
+                        prev_id = matcher.get_prev_id(region.get_id())
+                        if prev_id != None:
+                            prev_reg_ids.add(prev_id)
+                        if (matcher.is_matched(region.get_id()) == False and
+                            (mode == Plugin.MODE_NEW or mode == Plugin.MODE_TOUCHED)):
+                            # new region
+                            logging.debug("Processing region: " + region.get_name() + " [new]")
+                            result._append_data(region)
+                        elif matcher.is_modified(region.get_id()) and mode == Plugin.MODE_TOUCHED:
+                            # modified region
+                            logging.debug("Processing region: " + region.get_name() + " [modified]")
+                            result._append_data(region)
+                            result_prev._append_data(file_data_prev.get_region(prev_id))
+                            
+                    if mode == Plugin.MODE_TOUCHED:
+                        for region_prev in file_data_prev.iterate_regions():
+                            if region_prev.get_id() not in prev_reg_ids:
+                                # deleted region
+                                logging.debug("Processing: " + region_prev.get_name() + " [deleted]")
+                                result_prev._append_data(region_prev)
+                
+        if mode == Plugin.MODE_TOUCHED:
+            file_data_prev_iterator = loader_prev.iterate_file_data(path)
+            if file_data_prev_iterator != None:
+                for file_data_prev in file_data_prev_iterator:
+                    if file_data_prev.get_id() not in prev_file_ids:
+                        # deleted file and required mode matched
+                        logging.info("Processing: " + file_data.get_path() + " [deleted]")
+                        result_prev._append_file_data(file_data_prev)
+
+        return (result, result_prev)
+            
+    return (aggregated_data, aggregated_data_prev)
+
+
+
+
 def append_regions(file_data_tree, file_data, file_data_prev, nest_regions):
     regions_matcher = None
     if file_data_prev != None:
@@ -200,7 +357,7 @@ def append_diff(main_tree, prev_tree):
 def append_diff_list(main_list, prev_list):
     merged_list = {}
     for bar in main_list:
-        merged_list[bar['metric']] = {'count': bar['count'], '__diff__':0, 'ratio': bar['ratio']}
+        merged_list[bar['metric']] = {'count': bar['count'], '__diff__':bar['count'], 'ratio': bar['ratio']}
     for bar in prev_list:
         if bar['metric'] in merged_list.keys():
             merged_list[bar['metric']]['__diff__'] = \
@@ -216,6 +373,7 @@ def append_diff_list(main_list, prev_list):
     return result
 
 def append_suppressions(path, data, loader):
+    # TODO can not append right suppressions for mode != ALL, fix it
     for namespace in data.keys():
         for field in data[namespace].keys():
             selected_data = loader.load_selected_data('std.suppress',
@@ -249,7 +407,7 @@ def compress_dist(data, columns):
             remaining_count = metric_data['count']
             next_consume = None
             next_bar = None
-            max_count = 0
+            max_count = -(0xFFFFFFFF)
             min_count = 0xFFFFFFFF
             sum_ratio = 0
             for (ind, bar) in enumerate(distr):
@@ -300,9 +458,11 @@ def compress_dist(data, columns):
                         break
 
             if (float(max_count - min_count) / metric_data['count'] < 0.05 and
-                metric_data['count'] > 1 and
+                metric_data['count'] > columns and
                 len(new_dist) > 1):
-                # trick here: if all bars are even in the new distribution
+                # trick here:
+                # if all bars are even in the new distribution AND
+                # there are many items in the distribution (> max distribution rows),
                 # it is better to do linear compression instead
                 new_dist = []
                 step = int(round(float(metric_data['max'] - metric_data['min']) / columns))
@@ -436,7 +596,10 @@ def cout_txt(data, loader):
                 sum_ratio += bar['ratio']
                 diff_str = ""
                 if '__diff__' in bar.keys():
-                    diff_str = ' [{0:{1}}]'.format(bar['__diff__'], '+' if bar['__diff__'] >= 0 else '')
+                    if bar['__diff__'] >= 0:
+                        diff_str = ' [+{0:<{1}}]'.format(bar['__diff__'], count_str_len)
+                    else:
+                        diff_str = ' [{0:<{1}}]'.format(bar['__diff__'], count_str_len+1)
                 if isinstance(bar['metric'], float):
                     metric_str = "{0:.4f}".format(bar['metric'])
                 else:

+ 0 - 27
mainline/mpp/warn.ini

@@ -1,27 +0,0 @@
-;
-;    Metrix++, Copyright 2009-2013, Metrix++ Project
-;    Link: http://metrixplusplus.sourceforge.net
-;    
-;    This file is a part of Metrix++ Tool.
-;    
-;    Metrix++ is free software: you can redistribute it and/or modify
-;    it under the terms of the GNU General Public License as published by
-;    the Free Software Foundation, version 3 of the License.
-;    
-;    Metrix++ is distributed in the hope that it will be useful,
-;    but WITHOUT ANY WARRANTY; without even the implied warranty of
-;    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-;    GNU General Public License for more details.
-;    
-;    You should have received a copy of the GNU General Public License
-;    along with Metrix++.  If not, see <http://www.gnu.org/licenses/>.
-;
-
-[Plugin]
-version: 1.0
-package: mpp
-module:  warn
-class:   Plugin
-depends: None
-actions: None
-enabled: True

+ 0 - 139
mainline/mpp/warn.py

@@ -1,139 +0,0 @@
-#
-#    Metrix++, Copyright 2009-2013, Metrix++ Project
-#    Link: http://metrixplusplus.sourceforge.net
-#    
-#    This file is a part of Metrix++ Tool.
-#    
-#    Metrix++ is free software: you can redistribute it and/or modify
-#    it under the terms of the GNU General Public License as published by
-#    the Free Software Foundation, version 3 of the License.
-#    
-#    Metrix++ is distributed in the hope that it will be useful,
-#    but WITHOUT ANY WARRANTY; without even the implied warranty of
-#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-#    GNU General Public License for more details.
-#    
-#    You should have received a copy of the GNU General Public License
-#    along with Metrix++.  If not, see <http://www.gnu.org/licenses/>.
-#
-
-import re
-
-import mpp.api
-
-class Plugin(mpp.api.Plugin, mpp.api.IConfigurable):
-    
-    MODE_NEW     = 0x01
-    MODE_TREND   = 0x03
-    MODE_TOUCHED = 0x07
-    MODE_ALL     = 0x15
-    
-    
-    def declare_configuration(self, parser):
-        self.parser = parser
-        parser.add_option("--warn-mode", "--wm", default='all', choices=['new', 'trend', 'touched', 'all'],
-                         help="Defines the warnings mode. "
-                         "'new' - warnings for new regions only, "
-                         "'trend' - warnings for new regions and for bad trend of modified regions, "
-                         "'touched' - warnings for new regions and modified regions, "
-                         "'all' - all warnings active "
-                         "[default: %default]")
-
-        parser.add_option("--min-limit", "--min", action="multiopt",
-                          help="A threshold per 'namespace:field' metric in order to select regions, "
-                          "which have got metric value less than the specified limit. "
-                          "This option can be specified multiple times, if it is necessary to apply several limits. "
-                          "Should be in the format: <namespace>:<field>:<limit-value>, for example: "
-                          "'std.code.lines:comments:1'.")
-        parser.add_option("--max-limit", "--max", action="multiopt",
-                          help="A threshold per 'namespace:field' metric in order to select regions, "
-                          "which have got metric value more than the specified limit. "
-                          "This option can be specified multiple times, if it is necessary to apply several limits. "
-                          "Should be in the format: <namespace>:<field>:<limit-value>, for example: "
-                          "'std.code.complexity:cyclomatic:7'.")
-        
-    def configure(self, options):
-        if options.__dict__['warn_mode'] == 'new':
-            self.mode = self.MODE_NEW
-        elif options.__dict__['warn_mode'] == 'trend':
-            self.mode = self.MODE_TREND
-        elif options.__dict__['warn_mode'] == 'touched':
-            self.mode = self.MODE_TOUCHED
-        elif options.__dict__['warn_mode'] == 'all':
-            self.mode = self.MODE_ALL
-            
-        if self.mode != self.MODE_ALL and options.__dict__['db_file_prev'] == None:
-            self.parser.error("option --warn-mode: The mode '" + options.__dict__['warn_mode'] + "' requires '--db-file-prev' option set")
-
-        class Limit(object):
-            def __init__(self, limit_type, limit, namespace, field, db_filter):
-                self.type = limit_type
-                self.limit = limit
-                self.namespace = namespace
-                self.field = field
-                self.filter = db_filter
-                
-            def __repr__(self):
-                return "namespace '" + self.namespace + "', filter '" + str(self.filter) + "'"
-        
-        self.limits = []
-        pattern = re.compile(r'''([^:]+)[:]([^:]+)[:]([-+]?[0-9]+(?:[.][0-9]+)?)''')
-        if options.__dict__['max_limit'] != None:
-            for each in options.__dict__['max_limit']:
-                match = re.match(pattern, each)
-                if match == None:
-                    self.parser.error("option --max-limit: Invalid format: " + each)
-                limit = Limit("max", float(match.group(3)), match.group(1), match.group(2), (match.group(2), '>', float(match.group(3))))
-                self.limits.append(limit)
-        if options.__dict__['min_limit'] != None:
-            for each in options.__dict__['min_limit']:  
-                match = re.match(pattern, each)
-                if match == None:
-                    self.parser.error("option --min-limit: Invalid format: " + each)
-                limit = Limit("min", float(match.group(3)), match.group(1), match.group(2), (match.group(2), '<', float(match.group(3))))
-                self.limits.append(limit)
-    
-    def initialize(self):
-        super(Plugin, self).initialize()
-        db_loader = self.get_plugin_loader().get_plugin('mpp.dbf').get_loader()
-        self._verify_namespaces(db_loader.iterate_namespace_names())
-        for each in db_loader.iterate_namespace_names():
-            self._verify_fields(each, db_loader.get_namespace(each).iterate_field_names())
-    
-    def _verify_namespaces(self, valid_namespaces):
-        valid = []
-        for each in valid_namespaces:
-            valid.append(each)
-        for each in self.limits:
-            if each.namespace not in valid:
-                self.parser.error("option --{0}-limit: metric '{1}:{2}' is not available in the database file.".
-                                  format(each.type, each.namespace, each.field))
-
-    def _verify_fields(self, namespace, valid_fields):
-        valid = []
-        for each in valid_fields:
-            valid.append(each)
-        for each in self.limits:
-            if each.namespace == namespace:
-                if each.field not in valid:
-                    self.parser.error("option --{0}-limit: metric '{1}:{2}' is not available in the database file.".
-                                      format(each.type, each.namespace, each.field))
-                    
-    def iterate_limits(self):
-        for each in self.limits:
-            yield each   
-
-    def is_mode_matched(self, limit, value, diff, is_modified):
-        if is_modified == None:
-            return True
-        if self.mode == self.MODE_ALL:
-            return True 
-        if self.mode == self.MODE_TOUCHED and is_modified == True:
-            return True 
-        if self.mode == self.MODE_TREND and is_modified == True:
-            if limit < value and diff > 0:
-                return True
-            if limit > value and diff < 0:
-                return True
-        return False
-        

+ 13 - 13
mainline/tests/general/test_basic/test_help_limit_default_stdout.gold.txt

@@ -16,12 +16,20 @@ Options:
                         'DEBUG','INFO','WARNING' or 'ERROR'. Default value is
                         inherited from environment variable
                         'METRIXPLUSPLUS_LOG_LEVEL' if set. [default: INFO]
+  --hotspots=HOTSPOTS, --hs=HOTSPOTS
+                        If not set (none), all exceeded limits are printed. If
+                        set, exceeded limits are sorted (the worst is the
+                        first) and only first HOTSPOTS limits are printed.
+                        [default: none]
+  --disable-suppressions, --ds
+                        If not set (none), all suppressions are ignored and
+                        associated warnings are printed. [default: False]
   --warn-mode=WARN_MODE, --wm=WARN_MODE
-                        Defines the warnings mode. 'new' - warnings for new
-                        regions only, 'trend' - warnings for new regions and
-                        for bad trend of modified regions, 'touched' -
-                        warnings for new regions and modified regions, 'all' -
-                        all warnings active [default: all]
+                        Defines the warnings mode. 'all' - all warnings
+                        active, 'new' - warnings for new regions/files only,
+                        'trend' - warnings for new regions/files and for bad
+                        trend of modified regions/files, 'touched' - warnings
+                        for new and modified regions/files [default: all]
   --min-limit=MIN_LIMIT, --min=MIN_LIMIT
                         A threshold per 'namespace:field' metric in order to
                         select regions, which have got metric value less than
@@ -38,11 +46,3 @@ Options:
                         limits. Should be in the format: <namespace>:<field
                         >:<limit-value>, for example:
                         'std.code.complexity:cyclomatic:7'.
-  --hotspots=HOTSPOTS, --hs=HOTSPOTS
-                        If not set (none), all exceeded limits are printed. If
-                        set, exceeded limits are sorted (the worst is the
-                        first) and only first HOTSPOTS limits are printed.
-                        [default: none]
-  --disable-suppressions, --ds
-                        If not set (none), all suppressions are ignored and
-                        associated warnings are printed. [default: False]

+ 8 - 0
mainline/tests/general/test_basic/test_help_view_default_stdout.gold.txt

@@ -26,3 +26,11 @@ Options:
                         Maximum number of rows in distribution tables. If it
                         is set to 0, the tool does not optimize the size of
                         distribution tables [default: 20]
+  --scope-mode=SCOPE_MODE, --sm=SCOPE_MODE
+                        Defines the analysis scope mode. 'all' - all available
+                        regions and files are taken into account, 'new' - only
+                        new regions and files are taken into account,
+                        'touched' - only new and modified regions and files
+                        are taken into account. Modes 'new' and 'touched' may
+                        require more time for processing than mode 'all'
+                        [default: all]

+ 2 - 2
mainline/tests/general/test_basic/test_view_format_view_nest_per_file_stdout.gold.txt

@@ -94,9 +94,9 @@
             <std.code.complexity>
                 <cyclomatic nonzero="False" count="6" total="8.0" min="0" max="3" avg="1.33333333333" sup="0">
                     <distribution-bars>
-                        <distribution-bar count="1" __diff__="0" metric="0" ratio="0.166666666667" />
+                        <distribution-bar count="1" __diff__="1" metric="0" ratio="0.166666666667" />
                         <distribution-bar count="3" __diff__="0" metric="1" ratio="0.5" />
-                        <distribution-bar count="1" __diff__="0" metric="2" ratio="0.166666666667" />
+                        <distribution-bar count="1" __diff__="1" metric="2" ratio="0.166666666667" />
                         <distribution-bar count="1" __diff__="0" metric="3" ratio="0.166666666667" />
                     </distribution-bars>
                     <__diff__ nonzero="0" count="2" avg="-0.166666666667" min="-1" max="0" total="2.0" />

+ 2 - 2
mainline/tests/general/test_basic/test_view_format_view_nest_stdout.gold.txt

@@ -12,9 +12,9 @@
             <std.code.complexity>
                 <cyclomatic nonzero="False" count="7" total="11.0" min="0" max="3" avg="1.57142857143" sup="0">
                     <distribution-bars>
-                        <distribution-bar count="1" __diff__="0" metric="0" ratio="0.142857142857" />
+                        <distribution-bar count="1" __diff__="1" metric="0" ratio="0.142857142857" />
                         <distribution-bar count="3" __diff__="0" metric="1" ratio="0.428571428571" />
-                        <distribution-bar count="1" __diff__="0" metric="2" ratio="0.142857142857" />
+                        <distribution-bar count="1" __diff__="1" metric="2" ratio="0.142857142857" />
                         <distribution-bar count="2" __diff__="1" metric="3" ratio="0.285714285714" />
                     </distribution-bars>
                     <__diff__ nonzero="0" count="3" avg="0.0714285714286" min="-1" max="0" total="5.0" />

+ 2 - 2
mainline/tests/general/test_basic/test_workflow_view_second_per_file_stdout.gold.txt

@@ -74,9 +74,9 @@
             <std.code.complexity>
                 <cyclomatic nonzero="False" count="6" total="8.0" min="0" max="3" avg="1.33333333333" sup="0">
                     <distribution-bars>
-                        <distribution-bar count="1" __diff__="0" metric="0" ratio="0.166666666667" />
+                        <distribution-bar count="1" __diff__="1" metric="0" ratio="0.166666666667" />
                         <distribution-bar count="3" __diff__="0" metric="1" ratio="0.5" />
-                        <distribution-bar count="1" __diff__="0" metric="2" ratio="0.166666666667" />
+                        <distribution-bar count="1" __diff__="1" metric="2" ratio="0.166666666667" />
                         <distribution-bar count="1" __diff__="0" metric="3" ratio="0.166666666667" />
                     </distribution-bars>
                     <__diff__ nonzero="0" count="2" avg="-0.166666666667" min="-1" max="0" total="2.0" />

+ 2 - 2
mainline/tests/general/test_basic/test_workflow_view_second_stdout.gold.txt

@@ -12,9 +12,9 @@
             <std.code.complexity>
                 <cyclomatic nonzero="False" count="7" total="11.0" min="0" max="3" avg="1.57142857143" sup="0">
                     <distribution-bars>
-                        <distribution-bar count="1" __diff__="0" metric="0" ratio="0.142857142857" />
+                        <distribution-bar count="1" __diff__="1" metric="0" ratio="0.142857142857" />
                         <distribution-bar count="3" __diff__="0" metric="1" ratio="0.428571428571" />
-                        <distribution-bar count="1" __diff__="0" metric="2" ratio="0.142857142857" />
+                        <distribution-bar count="1" __diff__="1" metric="2" ratio="0.142857142857" />
                         <distribution-bar count="2" __diff__="1" metric="3" ratio="0.285714285714" />
                     </distribution-bars>
                     <__diff__ nonzero="0" count="3" avg="0.0714285714286" min="-1" max="0" total="5.0" />