avkonst 11 years ago
parent
commit
2d6dde17e4

+ 1 - 1
mainline/doc/project.html

@@ -463,7 +463,7 @@ along with Metrix++. If not, see <http://www.gnu.org/licenses/>.
 
                 <p>Unfortunately, there is no rich documentation at this stage.
                 Briefly, database API (class Loader implemented in '<a
-                href="http://metrixplusplus.svn.sourceforge.net/viewvc/metrixplusplus/mainline/core/db/loader.py">core.db.loader</a>')
+                href="http://metrixplusplus.svn.sourceforge.net/viewvc/metrixplusplus/mainline/core/db/loader.py">mpp.db.loader</a>')
                 is a starting point for any new post-analysis tool. There are 2
                 standard tools (<a
                 href="http://metrixplusplus.svn.sourceforge.net/viewvc/metrixplusplus/mainline/tools/export.py">export</a>

+ 4 - 4
mainline/ext/std/code/complexity.py

@@ -17,11 +17,11 @@
 #    along with Metrix++.  If not, see <http://www.gnu.org/licenses/>.
 #
 
-import core.api
+import mpp.api
 
 import re
 
-class Plugin(core.api.Plugin, core.api.SimpleMetricMixin, core.api.Child, core.api.IConfigurable):
+class Plugin(mpp.api.Plugin, mpp.api.SimpleMetricMixin, mpp.api.Child, mpp.api.IConfigurable):
     
     def declare_configuration(self, parser):
         parser.add_option("--std.code.complexity.cyclomatic", "--sccc", action="store_true", default=False,
@@ -47,8 +47,8 @@ class Plugin(core.api.Plugin, core.api.SimpleMetricMixin, core.api.Child, core.a
                                 'cs': self.pattern_cs,
                                 'java': self.pattern_java
                             },
-                            marker_type_mask=core.api.Marker.T.CODE,
-                            region_type_mask=core.api.Region.T.FUNCTION)
+                            marker_type_mask=mpp.api.Marker.T.CODE,
+                            region_type_mask=mpp.api.Region.T.FUNCTION)
         
         super(Plugin, self).initialize(fields=self.get_fields())
         

+ 10 - 10
mainline/ext/std/code/cpp.py

@@ -21,10 +21,10 @@
 import re
 import binascii
 
-import core.api
-import core.cout
+import mpp.api
+import mpp.cout
 
-class Plugin(core.api.Plugin, core.api.Parent, core.api.IParser, core.api.IConfigurable, core.api.ICode):
+class Plugin(mpp.api.Plugin, mpp.api.Parent, mpp.api.IParser, mpp.api.IConfigurable, mpp.api.ICode):
     
     def declare_configuration(self, parser):
         parser.add_option("--std.code.cpp.files", default="*.c,*.h,*.cpp,*.hpp,*.cc,*.hh,*.cxx,*.hxx",
@@ -35,7 +35,7 @@ class Plugin(core.api.Plugin, core.api.Parent, core.api.IParser, core.api.IConfi
         self.files.sort() # sorted list goes to properties
         
     def initialize(self):
-        core.api.Plugin.initialize(self, properties=[
+        mpp.api.Plugin.initialize(self, properties=[
             self.Property('files', ','.join(self.files))
         ])
         self.get_plugin_loader().register_parser(self.files, self)
@@ -228,9 +228,9 @@ class CppCodeParser(object):
                 if blocks[curblk]['indent_start'] == indent_current:
                     next_block = reset_next_block(m.end())
                     if curblk == 0:
-                        core.cout.notify(data.get_path(),
+                        mpp.cout.notify(data.get_path(),
                                          cursor_current + len(self.regex_ln.findall(text, cursor_last_pos, m.start())),
-                                         core.cout.SEVERITY_WARNING,
+                                         mpp.cout.SEVERITY_WARNING,
                                          "Non-matching closing bracket '}' detected.")
                         count_mismatched_brackets += 1
                         continue
@@ -245,9 +245,9 @@ class CppCodeParser(object):
                 # shift indent left
                 indent_current -= 1
                 if indent_current < 0:
-                    core.cout.notify(data.get_path(),
+                    mpp.cout.notify(data.get_path(),
                                      cursor_current + len(self.regex_ln.findall(text, cursor_last_pos, m.start())),
-                                     core.cout.SEVERITY_WARNING,
+                                     mpp.cout.SEVERITY_WARNING,
                                      "Non-matching closing bracket '}' detected.")
                     count_mismatched_brackets += 1
                     indent_current = 0
@@ -289,9 +289,9 @@ class CppCodeParser(object):
 
         while indent_current > 0:
             # log all
-            core.cout.notify(data.get_path(),
+            mpp.cout.notify(data.get_path(),
                              cursor_current + len(self.regex_ln.findall(text, cursor_last_pos, len(text))),
-                             core.cout.SEVERITY_WARNING,
+                             mpp.cout.SEVERITY_WARNING,
                              "Non-matching opening bracket '{' detected.")
             count_mismatched_brackets += 1
             indent_current -= 1

+ 10 - 10
mainline/ext/std/code/cs.py

@@ -21,10 +21,10 @@
 import re
 import binascii
 
-import core.api
-import core.cout
+import mpp.api
+import mpp.cout
 
-class Plugin(core.api.Plugin, core.api.Parent, core.api.IParser, core.api.IConfigurable, core.api.ICode):
+class Plugin(mpp.api.Plugin, mpp.api.Parent, mpp.api.IParser, mpp.api.IConfigurable, mpp.api.ICode):
     
     def declare_configuration(self, parser):
         parser.add_option("--std.code.cs.files", default="*.cs",
@@ -35,7 +35,7 @@ class Plugin(core.api.Plugin, core.api.Parent, core.api.IParser, core.api.IConfi
         self.files.sort() # sorted list goes to properties
         
     def initialize(self):
-        core.api.Plugin.initialize(self, properties=[
+        mpp.api.Plugin.initialize(self, properties=[
             self.Property('files', ','.join(self.files))
         ])
         self.get_plugin_loader().register_parser(self.files, self)
@@ -244,9 +244,9 @@ class CsCodeParser(object):
                 if blocks[curblk]['indent_start'] == indent_current:
                     next_block = reset_next_block(m.end())
                     if curblk == 0:
-                        core.cout.notify(data.get_path(),
+                        mpp.cout.notify(data.get_path(),
                                          cursor_current + len(self.regex_ln.findall(text, cursor_last_pos, m.start())),
-                                         core.cout.SEVERITY_WARNING,
+                                         mpp.cout.SEVERITY_WARNING,
                                          "Non-matching closing bracket '}' detected.")
                         count_mismatched_brackets += 1
                         continue
@@ -261,9 +261,9 @@ class CsCodeParser(object):
                 # shift indent left
                 indent_current -= 1
                 if indent_current < 0:
-                    core.cout.notify(data.get_path(),
+                    mpp.cout.notify(data.get_path(),
                                      cursor_current + len(self.regex_ln.findall(text, cursor_last_pos, m.start())),
-                                     core.cout.SEVERITY_WARNING,
+                                     mpp.cout.SEVERITY_WARNING,
                                      "Non-matching closing bracket '}' detected.")
                     count_mismatched_brackets += 1
                     indent_current = 0
@@ -304,9 +304,9 @@ class CsCodeParser(object):
 
         while indent_current > 0:
             # log all
-            core.cout.notify(data.get_path(),
+            mpp.cout.notify(data.get_path(),
                              cursor_current + len(self.regex_ln.findall(text, cursor_last_pos, len(text))),
-                             core.cout.SEVERITY_WARNING,
+                             mpp.cout.SEVERITY_WARNING,
                              "Non-matching opening bracket '{' detected.")
             count_mismatched_brackets += 1
             indent_current -= 1

+ 10 - 10
mainline/ext/std/code/java.py

@@ -21,10 +21,10 @@
 import re
 import binascii
 
-import core.api
-import core.cout
+import mpp.api
+import mpp.cout
 
-class Plugin(core.api.Plugin, core.api.Parent, core.api.IParser, core.api.IConfigurable, core.api.ICode):
+class Plugin(mpp.api.Plugin, mpp.api.Parent, mpp.api.IParser, mpp.api.IConfigurable, mpp.api.ICode):
     
     def declare_configuration(self, parser):
         parser.add_option("--std.code.java.files", default="*.java",
@@ -35,7 +35,7 @@ class Plugin(core.api.Plugin, core.api.Parent, core.api.IParser, core.api.IConfi
         self.files.sort() # sorted list goes to properties
         
     def initialize(self):
-        core.api.Plugin.initialize(self, properties=[
+        mpp.api.Plugin.initialize(self, properties=[
             self.Property('files', ','.join(self.files))
         ])
         self.get_plugin_loader().register_parser(self.files, self)
@@ -204,9 +204,9 @@ class JavaCodeParser(object):
                 if blocks[curblk]['indent_start'] == indent_current:
                     next_block = reset_next_block(m.end())
                     if curblk == 0:
-                        core.cout.notify(data.get_path(),
+                        mpp.cout.notify(data.get_path(),
                                          cursor_current + len(self.regex_ln.findall(text, cursor_last_pos, m.start())),
-                                         core.cout.SEVERITY_WARNING,
+                                         mpp.cout.SEVERITY_WARNING,
                                          "Non-matching closing bracket '}' detected.")
                         count_mismatched_brackets += 1
                         continue
@@ -221,9 +221,9 @@ class JavaCodeParser(object):
                 # shift indent left
                 indent_current -= 1
                 if indent_current < 0:
-                    core.cout.notify(data.get_path(),
+                    mpp.cout.notify(data.get_path(),
                                      cursor_current + len(self.regex_ln.findall(text, cursor_last_pos, m.start())),
-                                     core.cout.SEVERITY_WARNING,
+                                     mpp.cout.SEVERITY_WARNING,
                                      "Non-matching closing bracket '}' detected.")
                     count_mismatched_brackets += 1
                     indent_current = 0
@@ -263,9 +263,9 @@ class JavaCodeParser(object):
 
         while indent_current > 0:
             # log all
-            core.cout.notify(data.get_path(),
+            mpp.cout.notify(data.get_path(),
                              cursor_current + len(self.regex_ln.findall(text, cursor_last_pos, len(text))),
-                             core.cout.SEVERITY_WARNING,
+                             mpp.cout.SEVERITY_WARNING,
                              "Non-matching opening bracket '{' detected.")
             count_mismatched_brackets += 1
             indent_current -= 1

+ 4 - 4
mainline/ext/std/code/length.py

@@ -17,9 +17,9 @@
 #    along with Metrix++.  If not, see <http://www.gnu.org/licenses/>.
 #
 
-import core.api
+import mpp.api
 
-class Plugin(core.api.Plugin, core.api.Child, core.api.IConfigurable):
+class Plugin(mpp.api.Plugin, mpp.api.Child, mpp.api.IConfigurable):
     
     def declare_configuration(self, parser):
         parser.add_option("--std.code.length.total", "--sclent", action="store_true", default=False,
@@ -32,10 +32,10 @@ class Plugin(core.api.Plugin, core.api.Child, core.api.IConfigurable):
         fields = []
         if self.is_active == True:
             fields.append(self.Field('total', int))
-        core.api.Plugin.initialize(self, fields=fields)
+        mpp.api.Plugin.initialize(self, fields=fields)
         
         if len(fields) != 0:
-            self.subscribe_by_parents_interface(core.api.ICode, 'callback')
+            self.subscribe_by_parents_interface(mpp.api.ICode, 'callback')
 
     def callback(self, parent, data, is_updated):
         is_updated = is_updated or self.is_updated

+ 7 - 7
mainline/ext/std/code/lines.py

@@ -17,10 +17,10 @@
 #    along with Metrix++.  If not, see <http://www.gnu.org/licenses/>.
 #
 
-import core.api
+import mpp.api
 import re
 
-class Plugin(core.api.Plugin, core.api.SimpleMetricMixin, core.api.Child, core.api.IConfigurable):
+class Plugin(mpp.api.Plugin, mpp.api.SimpleMetricMixin, mpp.api.Child, mpp.api.IConfigurable):
     
     def declare_configuration(self, parser):
         parser.add_option("--std.code.lines.code", "--sclc", action="store_true", default=False,
@@ -52,25 +52,25 @@ class Plugin(core.api.Plugin, core.api.SimpleMetricMixin, core.api.Child, core.a
         self.declare_metric(self.is_active_code,
                        self.Field('code', int),
                        self.pattern_line,
-                       core.api.Marker.T.CODE)
+                       mpp.api.Marker.T.CODE)
         self.declare_metric(self.is_active_preprocessor,
                        self.Field('preprocessor', int),
                        self.pattern_line,
-                       core.api.Marker.T.PREPROCESSOR)
+                       mpp.api.Marker.T.PREPROCESSOR)
         self.declare_metric(self.is_active_comments,
                        self.Field('comments', int),
                        self.pattern_line,
-                       core.api.Marker.T.COMMENT)
+                       mpp.api.Marker.T.COMMENT)
         self.declare_metric(self.is_active_total,
                        self.Field('total', int),
                        self.pattern_line,
-                       core.api.Marker.T.ANY,
+                       mpp.api.Marker.T.ANY,
                        merge_markers=True)
 
         super(Plugin, self).initialize(fields=self.get_fields())
 
         if self.is_active() == True:
-            self.subscribe_by_parents_interface(core.api.ICode, 'callback')
+            self.subscribe_by_parents_interface(mpp.api.ICode, 'callback')
 
     def callback(self, parent, data, is_updated):
         is_updated = is_updated or self.is_updated

+ 3 - 3
mainline/ext/std/code/test.py

@@ -17,16 +17,16 @@
 #    along with Metrix++.  If not, see <http://www.gnu.org/licenses/>.
 #
 
-import core.api
+import mpp.api
 import logging
 
 # used for testing and development purposes
-class Plugin(core.api.Plugin, core.api.Child):
+class Plugin(mpp.api.Plugin, mpp.api.Child):
     
     def initialize(self):
         return
         # do not trigger version property set, it is a module for testing purposes
-        self.subscribe_by_parents_interface(core.api.ICode)
+        self.subscribe_by_parents_interface(mpp.api.ICode)
 
     def callback(self, parent, data, is_updated):
 

+ 14 - 14
mainline/ext/std/suppress.py

@@ -17,12 +17,12 @@
 #    along with Metrix++.  If not, see <http://www.gnu.org/licenses/>.
 #
 
-import core.api
-import core.cout
+import mpp.api
+import mpp.cout
 
 import re
 
-class Plugin(core.api.Plugin, core.api.Child, core.api.IConfigurable):
+class Plugin(mpp.api.Plugin, mpp.api.Child, mpp.api.IConfigurable):
     
     def declare_configuration(self, parser):
         parser.add_option("--std.suppress", "--ss", action="store_true", default=False,
@@ -42,15 +42,15 @@ class Plugin(core.api.Plugin, core.api.Child, core.api.IConfigurable):
             fields.append(self.Field('count', int, non_zero=True))
             fields.append(self.Field('list', str))
         # - init per regions table
-        core.api.Plugin.initialize(self, fields=fields)
+        mpp.api.Plugin.initialize(self, fields=fields)
         # - init per file table
-        core.api.Plugin.initialize(self,
+        mpp.api.Plugin.initialize(self,
                                    namespace = self.get_name() + '.file',
                                    support_regions = False,
                                    fields=fields)
         
         if len(fields) != 0:
-            self.subscribe_by_parents_interface(core.api.ICode)
+            self.subscribe_by_parents_interface(mpp.api.ICode)
 
     # suppress pattern
     pattern = re.compile(r'''metrix[+][+][:][ \t]+suppress[ \t]+([^ \t\r\n\*]+)''')
@@ -81,8 +81,8 @@ class Plugin(core.api.Plugin, core.api.Child, core.api.IConfigurable):
                         namespace_name, field = m.split(':')
                         namespace = self.get_plugin_loader().get_database_loader().get_namespace(namespace_name)
                         if namespace == None or namespace.get_field_packager(field) == None:
-                            core.cout.notify(data.get_path(), region.get_cursor(),
-                                                  core.cout.SEVERITY_WARNING,
+                            mpp.cout.notify(data.get_path(), region.get_cursor(),
+                                                  mpp.cout.SEVERITY_WARNING,
                                                   "Suppressed metric '" + namespace_name + ":" + field +
                                                     "' is not being collected",
                                                   [("Metric name", namespace_name + ":" + field),
@@ -90,8 +90,8 @@ class Plugin(core.api.Plugin, core.api.Child, core.api.IConfigurable):
                             continue
                         if namespace.are_regions_supported() == False:
                             if region.get_id() != 1:
-                                core.cout.notify(data.get_path(), region.get_cursor(),
-                                                  core.cout.SEVERITY_WARNING,
+                                mpp.cout.notify(data.get_path(), region.get_cursor(),
+                                                  mpp.cout.SEVERITY_WARNING,
                                                   "Suppressed metric '" + namespace_name + ":" + field +
                                                     "' is attributed to a file, not a region. "
                                                     "Remove it or move to the beginning of the file.",
@@ -100,8 +100,8 @@ class Plugin(core.api.Plugin, core.api.Child, core.api.IConfigurable):
                                 continue
                             
                             if m in file_list_text:
-                                core.cout.notify(data.get_path(), region.get_cursor(),
-                                              core.cout.SEVERITY_WARNING,
+                                mpp.cout.notify(data.get_path(), region.get_cursor(),
+                                              mpp.cout.SEVERITY_WARNING,
                                               "Duplicate suppression of the metric '" +
                                                namespace_name + ":" + field + "'",
                                               [("Metric name", namespace_name + ":" + field),
@@ -113,8 +113,8 @@ class Plugin(core.api.Plugin, core.api.Child, core.api.IConfigurable):
                             continue
                         
                         if m in list_text:
-                            core.cout.notify(data.get_path(), region.get_cursor(),
-                                          core.cout.SEVERITY_WARNING,
+                            mpp.cout.notify(data.get_path(), region.get_cursor(),
+                                          mpp.cout.SEVERITY_WARNING,
                                           "Duplicate suppression of the metric '" +
                                            namespace_name + ":" + field + "'",
                                           [("Metric name", namespace_name + ":" + field),

+ 18 - 0
mainline/mpp/__init__.py

@@ -0,0 +1,18 @@
+#
+#    Metrix++, Copyright 2009-2013, Metrix++ Project
+#    Link: http://metrixplusplus.sourceforge.net
+#    
+#    This file is a part of Metrix++ Tool.
+#    
+#    Metrix++ is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License as published by
+#    the Free Software Foundation, version 3 of the License.
+#    
+#    Metrix++ is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+#    GNU General Public License for more details.
+#    
+#    You should have received a copy of the GNU General Public License
+#    along with Metrix++.  If not, see <http://www.gnu.org/licenses/>.
+#

File diff suppressed because it is too large
+ 1225 - 0
mainline/mpp/api.py


+ 39 - 0
mainline/mpp/cmdparser.py

@@ -0,0 +1,39 @@
+#
+#    Metrix++, Copyright 2009-2013, Metrix++ Project
+#    Link: http://metrixplusplus.sourceforge.net
+#    
+#    This file is a part of Metrix++ Tool.
+#    
+#    Metrix++ is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License as published by
+#    the Free Software Foundation, version 3 of the License.
+#    
+#    Metrix++ is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+#    GNU General Public License for more details.
+#    
+#    You should have received a copy of the GNU General Public License
+#    along with Metrix++.  If not, see <http://www.gnu.org/licenses/>.
+#
+
+import optparse
+
+class MultiOptionParser(optparse.OptionParser):
+    
+    class MultipleOption(optparse.Option):
+        ACTIONS = optparse.Option.ACTIONS + ("multiopt",)
+        STORE_ACTIONS = optparse.Option.STORE_ACTIONS + ("multiopt",)
+        TYPED_ACTIONS = optparse.Option.TYPED_ACTIONS + ("multiopt",)
+        ALWAYS_TYPED_ACTIONS = optparse.Option.ALWAYS_TYPED_ACTIONS + ("multiopt",)
+    
+        def take_action(self, action, dest, opt, value, values, parser):
+            if action == "multiopt":
+                values.ensure_value(dest, []).append(value)
+            else:
+                optparse.Option.take_action(self, action, dest, opt, value, values, parser)
+
+    
+    def __init__(self, *args, **kwargs):
+        optparse.OptionParser.__init__(self, *args, option_class=self.MultipleOption, **kwargs)
+        

+ 40 - 0
mainline/mpp/cout.py

@@ -0,0 +1,40 @@
+#
+#    Metrix++, Copyright 2009-2013, Metrix++ Project
+#    Link: http://metrixplusplus.sourceforge.net
+#    
+#    This file is a part of Metrix++ Tool.
+#    
+#    Metrix++ is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License as published by
+#    the Free Software Foundation, version 3 of the License.
+#    
+#    Metrix++ is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+#    GNU General Public License for more details.
+#    
+#    You should have received a copy of the GNU General Public License
+#    along with Metrix++.  If not, see <http://www.gnu.org/licenses/>.
+#
+
+SEVERITY_INFO    = 0x01
+SEVERITY_WARNING = 0x02
+SEVERITY_ERROR   = 0x03
+
+def notify(path, cursor, level, message, details = []):
+    notification = path + ":" + str(cursor) + ": "
+    if level == SEVERITY_INFO:
+        notification += "info: "
+    elif level == SEVERITY_WARNING:
+        notification += "warning: "
+    elif level == SEVERITY_ERROR:
+        notification += "error: "
+    else:
+        assert(len("Invalid message severity level specified") == 0)
+    notification += message + "\n"
+
+    DETAILS_OFFSET = 15
+    for each in details:
+        notification += "\t" + str(each[0]) + (" " * (DETAILS_OFFSET - len(each[0]))) + ": " + str(each[1]) + "\n"
+        
+    print notification

+ 18 - 0
mainline/mpp/db/__init__.py

@@ -0,0 +1,18 @@
+#
+#    Metrix++, Copyright 2009-2013, Metrix++ Project
+#    Link: http://metrixplusplus.sourceforge.net
+#    
+#    This file is a part of Metrix++ Tool.
+#    
+#    Metrix++ is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License as published by
+#    the Free Software Foundation, version 3 of the License.
+#    
+#    Metrix++ is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+#    GNU General Public License for more details.
+#    
+#    You should have received a copy of the GNU General Public License
+#    along with Metrix++.  If not, see <http://www.gnu.org/licenses/>.
+#

+ 83 - 0
mainline/mpp/db/post.py

@@ -0,0 +1,83 @@
+#
+#    Metrix++, Copyright 2009-2013, Metrix++ Project
+#    Link: http://metrixplusplus.sourceforge.net
+#    
+#    This file is a part of Metrix++ Tool.
+#    
+#    Metrix++ is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License as published by
+#    the Free Software Foundation, version 3 of the License.
+#    
+#    Metrix++ is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+#    GNU General Public License for more details.
+#    
+#    You should have received a copy of the GNU General Public License
+#    along with Metrix++.  If not, see <http://www.gnu.org/licenses/>.
+#
+
+import mpp.api
+
+import os.path
+import re
+
+import logging
+
+class Plugin(mpp.api.Plugin, mpp.api.IConfigurable):
+    
+    def declare_configuration(self, parser):
+        parser.add_option("--db-file", "--dbf", default='./metrixpp.db',
+                         help="Primary database file to write (by the collector) and post-process (by other tools) [default: %default]")
+        parser.add_option("--db-file-prev", "--dbfp", default=None,
+                         help="Database file with data collected for the past/previous revision."
+                             " If it is set for the collector tool to perform an incremental/iterative collection,"
+                             " it may reduce the processing time significantly."
+                             " Post-processing tools use it in order to recognise/evaluate change trends. [default: %default].")
+        self.parser = parser
+    
+    def configure(self, options):
+        self.dbfile = options.__dict__['db_file']
+        self.dbfile_prev = options.__dict__['db_file_prev']
+        
+        if self.dbfile_prev != None and os.path.exists(self.dbfile_prev) == False:
+            self.parser.error("File does not exist:" + self.dbfile_prev)
+
+        
+    def initialize(self):
+        
+        if self.get_plugin_loader() != None:
+            if os.path.exists(self.dbfile):
+                logging.warn("Removing existing file: " + self.dbfile)
+                # TODO can reuse existing db file to speed up the processing?
+                # TODO add option to choose to remove or to overwrite?
+                try:
+                    os.unlink(self.dbfile)
+                except:
+                    logging.warn("Failure in removing file: " + self.dbfile)
+    
+            created = self.get_plugin_loader().get_database_loader().create_database(self.dbfile, previous_db = self.dbfile_prev)
+            if created == False:
+                self.parser.error("Failure in creating file: " + self.dbfile)
+            
+            # do not process files dumped by this module
+            self.get_plugin_loader().get_plugin('mpp.dir').add_exclude_rule(re.compile(r'^' + os.path.basename(self.dbfile) + r'$'))
+            if self.dbfile_prev != None:
+                self.get_plugin_loader().get_plugin('mpp.dir').add_exclude_rule(re.compile(r'^' + os.path.basename(self.dbfile_prev) + r'$'))
+
+        else:
+            self.loader_prev = mpp.api.Loader()
+            if self.dbfile_prev != None:
+                if self.loader_prev.open_database(self.dbfile_prev) == False:
+                    self.parser.error("Can not open file: " + self.dbfile_prev)
+            self.loader = mpp.api.Loader()
+            if self.loader.open_database(self.dbfile) == False:
+                self.parser.error("Can not open file: " + self.dbfile)
+
+    def get_loader(self):
+        return self.loader
+
+    def get_loader_prev(self, none_if_empty=False):
+        if none_if_empty == True and self.dbfile_prev == None:
+            return None
+        return self.loader_prev

+ 696 - 0
mainline/mpp/db/sqlite.py

@@ -0,0 +1,696 @@
+#
+#    Metrix++, Copyright 2009-2013, Metrix++ Project
+#    Link: http://metrixplusplus.sourceforge.net
+#    
+#    This file is a part of Metrix++ Tool.
+#    
+#    Metrix++ is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License as published by
+#    the Free Software Foundation, version 3 of the License.
+#    
+#    Metrix++ is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+#    GNU General Public License for more details.
+#    
+#    You should have received a copy of the GNU General Public License
+#    along with Metrix++.  If not, see <http://www.gnu.org/licenses/>.
+#
+
+import sqlite3
+import re
+import os.path
+import logging
+import itertools 
+import shutil
+import traceback
+
+class Database(object):
+    
+    last_used_id = 0
+    version = "1.0"
+    
+    class TableData(object):
+        def __init__(self, table_id, name, support_regions):
+            self.id = table_id
+            self.name = name
+            self.support_regions = support_regions
+    
+    class ColumnData(object):
+        def __init__(self, column_id, name, sql_type, non_zero):
+            self.id = column_id
+            self.name = name
+            self.sql_type = sql_type
+            self.non_zero = non_zero
+
+    class TagData(object):
+        def __init__(self, tag_id, name):
+            self.id = tag_id
+            self.name = name
+
+    class PropertyData(object):
+        def __init__(self, property_id, name, value):
+            self.id = property_id
+            self.name = name
+            self.value = value
+
+    class FileData(object):
+        def __init__(self, file_id, path, checksum):
+            self.id = file_id
+            self.path = path
+            self.checksum = checksum
+
+    class RegionData(object):
+        def __init__(self, file_id, region_id, name, begin, end, line_begin, line_end, cursor, group, checksum):
+            self.file_id = file_id
+            self.region_id = region_id
+            self.name = name
+            self.begin = begin
+            self.end = end
+            self.line_begin = line_begin
+            self.line_end = line_end
+            self.cursor = cursor
+            self.group = group
+            self.checksum = checksum
+
+    class MarkerData(object):
+        def __init__(self, file_id, begin, end, group):
+            self.file_id = file_id
+            self.begin = begin
+            self.end = end
+            self.group = group
+
+    def __init__(self):
+        self.read_only = False
+        self.conn = None
+        self.dirs = None
+        self.is_cloned = False
+        
+        self.last_used_id += 1
+        self.id = self.last_used_id
+    
+    def __del__(self):
+        if self.conn != None:
+            if self.is_cloned == True:
+                logging.debug("Cleaning up database file")
+                self.InternalCleanUpUtils().clean_up_not_confirmed(self)
+            logging.debug("Committing database file")
+            self.conn.commit()
+    
+    class InternalCleanUpUtils(object):
+        
+        def clean_up_not_confirmed(self, db_loader):
+            sql = "DELETE FROM __info__ WHERE (confirmed = 0)"
+            db_loader.log(sql)
+            db_loader.conn.execute(sql)
+            sql = "DELETE FROM __tags__ WHERE (confirmed = 0)"
+            db_loader.log(sql)
+            db_loader.conn.execute(sql)
+
+            sql = "SELECT * FROM __tables__ WHERE (confirmed = 0)"
+            db_loader.log(sql)
+            for table in db_loader.conn.execute(sql).fetchall():
+                sql = "DELETE FROM __columns__ WHERE table_id = '" + str(table['id']) + "'"
+                db_loader.log(sql)
+                db_loader.conn.execute(sql)
+                sql = "DELETE FROM __tables__ WHERE id = '" + str(table['id']) + "'"
+                db_loader.log(sql)
+                db_loader.conn.execute(sql)
+                sql = "DROP TABLE '" + table['name'] + "'"
+                db_loader.log(sql)
+                db_loader.conn.execute(sql)
+
+            sql = "SELECT __columns__.name AS column_name, __tables__.name AS table_name, __columns__.id AS column_id FROM __columns__, __tables__ WHERE (__columns__.confirmed = 0 AND __columns__.table_id = __tables__.id)"
+            db_loader.log(sql)
+            for column in db_loader.conn.execute(sql).fetchall():
+                logging.info("New database file inherits useless column: '" + column['table_name'] + "'.'" + column['column_name'] + "'")
+                sql = "DELETE FROM __columns__ WHERE id = '" + str(column['column_id']) + "'"
+                db_loader.log(sql)
+                db_loader.conn.execute(sql)
+                sql = "UPDATE '" + column['table_name'] + "' SET '" + column['column_name'] + "' = NULL"
+                db_loader.log(sql)
+                db_loader.conn.execute(sql)
+            
+            self.clean_up_file(db_loader)
+
+        def clean_up_file(self, db_loader, file_id = None):
+            sql = "SELECT * FROM __tables__"
+            db_loader.log(sql)
+            for table in itertools.chain(db_loader.conn.execute(sql).fetchall(), [{'name':'__regions__'}, {'name':'__markers__'}]):
+                sql = ""
+                if file_id == None:
+                    sql = "DELETE FROM '" + table['name'] + "' WHERE file_id IN (SELECT __files__.id FROM __files__ WHERE __files__.confirmed = 0)"
+                else:
+                    sql = "DELETE FROM '" + table['name'] + "' WHERE (file_id = " + str(file_id) + ")"
+                db_loader.log(sql)
+                db_loader.conn.execute(sql)
+            
+    class InternalPathUtils(object):
+        
+        def iterate_heads(self, path):
+            dirs = []
+            head = os.path.dirname(path)
+            last_head = None # to process Windows drives
+            while (head != "" and last_head != head):
+                dirs.append(os.path.basename(head))
+                last_head = head
+                head = os.path.dirname(head)
+            dirs.reverse()
+            for each in dirs:
+                yield each
+                
+        def normalize_path(self, path):
+            if path == None:
+                return None
+            return re.sub(r'''[\\]''', "/", path)
+        
+        def update_dirs(self, db_loader, path = None):
+            if db_loader.dirs == None:
+                if path == None:
+                    db_loader.dirs = {} # initial construction
+                else:
+                    return # avoid useless cache updates 
+            elif path == None:
+                return # avoid multiple initial constructions
+            
+            path = self.normalize_path(path)
+            rows = None
+            if path == None:
+                sql = "SELECT * FROM __files__"
+                db_loader.log(sql)
+                rows = db_loader.conn.execute(sql).fetchall()
+            else:
+                rows = [{"path": path}]
+            for row in rows:
+                cur_head = db_loader.dirs
+                for dir_name in self.iterate_heads(row["path"]):
+                    if dir_name not in cur_head.keys():
+                        cur_head[dir_name] = {}
+                    cur_head = cur_head[dir_name]
+                cur_head[os.path.basename(row["path"])] = None
+
+
+    def create(self, file_name, clone_from = None):
+        if clone_from != None:
+            self.is_cloned = True
+            logging.debug("Cloning database file: " + clone_from)
+            shutil.copy2(clone_from, file_name)
+            logging.debug("Connecting database file: " + file_name)
+            self.conn = sqlite3.connect(file_name)
+            self.conn.row_factory = sqlite3.Row
+            self.read_only = False
+            
+            sql = "UPDATE __tables__ SET confirmed = 0"
+            self.log(sql)
+            self.conn.execute(sql)
+            sql = "UPDATE __columns__ SET confirmed = 0"
+            self.log(sql)
+            self.conn.execute(sql)
+            sql = "UPDATE __tags__ SET confirmed = 0"
+            self.log(sql)
+            self.conn.execute(sql)
+            sql = "UPDATE __files__ SET confirmed = 0"
+            self.log(sql)
+            self.conn.execute(sql)
+                
+        else:
+            self.connect(file_name)
+        
+    def connect(self, file_name, read_only = False):
+        logging.debug("Connecting database file: " + file_name)
+        self.conn = sqlite3.connect(file_name)
+        self.conn.row_factory = sqlite3.Row
+        self.read_only = read_only
+        if self.read_only == False:
+            try:
+                sql = "CREATE TABLE __info__ (id integer NOT NULL PRIMARY KEY AUTOINCREMENT, property text NOT NULL, value text, confirmed integer NOT NULL, UNIQUE (property) ON CONFLICT REPLACE)"
+                self.log(sql)
+                self.conn.execute(sql)
+                sql = "INSERT INTO __info__ (property, value, confirmed) VALUES ('version', '" + self.version + "', 1)"
+                self.log(sql)
+                self.conn.execute(sql)
+                sql = "CREATE TABLE __tables__ (id integer NOT NULL PRIMARY KEY, name text NOT NULL, version text NOT NULL, support_regions integer NOT NULL, confirmed integer NOT NULL, UNIQUE (name))"
+                self.log(sql)
+                self.conn.execute(sql)
+                sql = "CREATE TABLE __columns__ (id integer NOT NULL PRIMARY KEY, name text NOT NULL, type text NOT NULL, table_id integer NOT_NULL, non_zero integer NOT NULL, confirmed integer NOT NULL, UNIQUE (name, table_id))"
+                self.log(sql)
+                self.conn.execute(sql)
+                sql = "CREATE TABLE __tags__ (id integer NOT NULL PRIMARY KEY, name text NOT NULL UNIQUE, confirmed integer NOT NULL)"
+                self.log(sql)
+                self.conn.execute(sql)
+                sql = "CREATE TABLE __files__ (id integer NOT NULL PRIMARY KEY AUTOINCREMENT, path text NOT NULL, checksum integer NOT NULL, tag1 integer, tag2 integer, tag3 integer, confirmed integer NOT NULL, UNIQUE(path))"
+                self.log(sql)
+                self.conn.execute(sql)
+                sql = "CREATE TABLE __regions__ (file_id integer NOT NULL, region_id integer NOT NULL, name text NOT NULL, begin integer NOT NULL, end integer NOT NULL, line_begin integer NOT NULL, line_end integer NOT NULL, cursor integer NOT NULL, group_id integer NOT NULL, checksum integer NOT NULL, PRIMARY KEY (file_id, region_id))"
+                self.log(sql)
+                self.conn.execute(sql)
+                sql = "CREATE TABLE __markers__ (id integer NOT NULL PRIMARY KEY, file_id integer NOT NULL, begin integer NOT NULL, end integer NOT NULL, group_id integer NOT NULL)"
+                self.log(sql)
+                self.conn.execute(sql)
+            except sqlite3.OperationalError as e:
+                logging.debug("sqlite3.OperationalError: " + str(e))
+                
+    def set_property(self, property_name, value):
+        ret_val = None
+        sql = "SELECT * FROM __info__ WHERE (property = '" + property_name + "')"
+        self.log(sql)
+        result = self.conn.execute(sql).fetchall()
+        if len(result) != 0:
+            ret_val = result[0]['value']
+
+        sql = "INSERT INTO __info__ (property, value, confirmed) VALUES ('" + property_name + "', '" + value + "', 1)"
+        self.log(sql)
+        self.conn.execute(sql)
+        return ret_val
+        
+    def get_property(self, property_name):
+        ret_val = None
+        sql = "SELECT * FROM __info__ WHERE (property = '" + property_name + "' AND confirmed = 1)"
+        self.log(sql)
+        result = self.conn.execute(sql).fetchall()
+        if len(result) != 0:
+            ret_val = result[0]['value']
+        return ret_val
+
+    def iterate_properties(self):
+        sql = "SELECT * FROM __info__ WHERE (confirmed = 1)"
+        self.log(sql)
+        for each in self.conn.execute(sql).fetchall():
+            yield self.PropertyData(each['id'], each['property'], each['value'])
+
+    def create_table(self, table_name, support_regions = False, version='1.0'):
+        assert(self.read_only == False)
+
+        sql = "SELECT * FROM __tables__ WHERE (name = '" + table_name + "'AND confirmed == 0)"
+        self.log(sql)
+        result = self.conn.execute(sql).fetchall()
+        if len(result) != 0:
+            if result[0]['version'] != version:
+                # in case of changes in version, drop existing table data
+                sql = "DELETE FROM __columns__ WHERE table_id = '" + str(result[0]['id']) + "'"
+                self.log(sql)
+                self.conn.execute(sql)
+                sql = "DELETE FROM __tables__ WHERE id = '" + str(result[0]['id']) + "'"
+                self.log(sql)
+                self.conn.execute(sql)
+                sql = "DROP TABLE '" + result[0]['name'] + "'"
+                self.log(sql)
+                self.conn.execute(sql)
+            else:                
+                sql = "UPDATE __tables__ SET confirmed = 1 WHERE (name = '" + table_name + "')"
+                self.log(sql)
+                self.conn.execute(sql)
+                return False      
+        
+        sql = "CREATE TABLE '" + table_name + "' (file_id integer NOT NULL PRIMARY KEY)"
+        if support_regions == True:
+            sql = str("CREATE TABLE '" + table_name + "' (file_id integer NOT NULL, region_id integer NOT NULL, "
+                      + "PRIMARY KEY (file_id, region_id))")
+            
+        self.log(sql)
+        self.conn.execute(sql)
+        sql = "INSERT INTO __tables__ (name, version, support_regions, confirmed) VALUES ('" + table_name + "', '" + version + "', '" + str(int(support_regions)) + "', 1)"
+        self.log(sql)
+        self.conn.execute(sql)
+        return True
+
+    def iterate_tables(self):
+        sql = "SELECT * FROM __tables__ WHERE (confirmed = 1)"
+        self.log(sql)
+        result = self.conn.execute(sql).fetchall()
+        for row in result:
+            yield self.TableData(int(row["id"]), str(row["name"]), bool(row["support_regions"]))
+            
+    def check_table(self, table_name):
+        sql = "SELECT * FROM __tables__ WHERE (name = '" + table_name + "' AND confirmed = 1)"
+        self.log(sql)
+        result = self.conn.execute(sql).fetchall()
+        if len(result) == 0:
+            return False
+        return True
+
+    def create_column(self, table_name, column_name, column_type, non_zero=False):
+        assert(self.read_only == False)
+        if column_type == None:
+            logging.debug("Skipping column '" + column_name + "' creation for table '" + table_name + "'")
+            return
+        
+        sql = "SELECT id FROM __tables__ WHERE (name = '" + table_name + "')"
+        self.log(sql)
+        table_id = self.conn.execute(sql).next()['id']
+
+        sql = "SELECT * FROM __columns__ WHERE (table_id = '" + str(table_id) + "' AND name = '" + column_name + "' AND confirmed == 0)"
+        self.log(sql)
+        result = self.conn.execute(sql).fetchall()
+        if len(result) != 0:
+            # Major changes in columns should result in step up of table version,
+            # which causes drop the table in case of database reuse
+            assert(result[0]['type'] == column_type)
+            assert(result[0]['non_zero'] == non_zero)
+            sql = "UPDATE __columns__ SET confirmed = 1 WHERE (table_id = '" + str(table_id) + "' AND name = '" + column_name + "')"
+            self.log(sql)
+            self.conn.execute(sql)
+            return False       
+        
+        sql = "ALTER TABLE '" + table_name + "' ADD COLUMN '" + column_name + "' " + column_type
+        self.log(sql)
+        self.conn.execute(sql)
+        sql = "SELECT id FROM __tables__ WHERE (name = '" + table_name + "')"
+        self.log(sql)
+        table_id = self.conn.execute(sql).next()['id']
+        sql = "INSERT INTO __columns__ (name, type, table_id, non_zero, confirmed) VALUES ('" + column_name + "', '" + column_type + "', '" + str(table_id) + "', '" + str(int(non_zero)) + "', 1)"
+        self.log(sql)
+        self.conn.execute(sql)
+        return True        
+
+    def iterate_columns(self, table_name):
+        sql = "SELECT id FROM __tables__ WHERE (name = '" + table_name + "')"
+        self.log(sql)
+        table_id = self.conn.execute(sql).next()['id']
+        sql = "SELECT * FROM __columns__ WHERE (table_id = '" + str(table_id) + "' AND confirmed = 1)"
+        self.log(sql)
+        result = self.conn.execute(sql).fetchall()
+        for row in result:
+            yield self.ColumnData(int(row["id"]), str(row["name"]), str(row["type"]), bool(row["non_zero"]))
+
+    def check_column(self, table_name, column_name):
+        sql = "SELECT id FROM __tables__ WHERE (name = '" + table_name + "')"
+        self.log(sql)
+        table_id = self.conn.execute(sql).next()['id']
+        sql = "SELECT * FROM __columns__ WHERE (table_id = '" + str(table_id) + "' AND name = '" + column_name + "' AND confirmed = 1)"
+        self.log(sql)
+        result = self.conn.execute(sql).fetchall()
+        if len(result) == 0:
+            return False
+        return True
+    
+    def create_tag(self, tag_name):
+        assert(self.read_only == False)
+        
+        sql = "SELECT * FROM __tags__ WHERE (name = '" + tag_name + "' AND confirmed == 0)"
+        self.log(sql)
+        result = self.conn.execute(sql).fetchall()
+        if len(result) != 0:
+            sql = "UPDATE __tags__ SET confirmed = 1 WHERE (name = '" + tag_name + "')"
+            self.log(sql)
+            self.conn.execute(sql)
+            return        
+        
+        sql = "INSERT INTO __tags__ (name, confirmed) VALUES ('" + tag_name + "', 1)"
+        self.log(sql)
+        self.conn.execute(sql)        
+
+    def iterate_tags(self):
+        sql = "SELECT * FROM __tags__ WHERE (confirmed = 1)"
+        self.log(sql)
+        result = self.conn.execute(sql).fetchall()
+        for row in result:
+            yield self.TagData(int(row["id"]), str(row["name"]))
+
+    def check_tag(self, tag_name):
+        sql = "SELECT * FROM __tags__ WHERE (name = '" + tag_name + "' AND confirmed = 1)"
+        self.log(sql)
+        result = self.conn.execute(sql).fetchall()
+        if len(result) == 0:
+            return False
+        return True
+
+    # TODO activate usage of tags
+    def create_file(self, path, checksum):
+        assert(self.read_only == False)
+        path = self.InternalPathUtils().normalize_path(path)
+
+        if self.is_cloned == True:
+            sql = "SELECT * FROM __files__ WHERE (path = '" + path + "')"
+            self.log(sql)
+            result = self.conn.execute(sql).fetchall()
+            if len(result) != 0:
+                if result[0]['checksum'] == checksum:
+                    old_id = result[0]['id']
+                    sql = "UPDATE __files__ SET confirmed = 1 WHERE (id = " + str(old_id) +")"
+                    self.log(sql)
+                    self.conn.execute(sql)
+                    return (old_id, False)
+                else:
+                    self.InternalCleanUpUtils().clean_up_file(self, result[0]['id'])
+        
+        sql = "INSERT OR REPLACE INTO __files__ (path, checksum, confirmed) VALUES (?, ?, 1)"
+        column_data = [path, checksum]
+        self.log(sql + " /with arguments: " + str(column_data))
+        cur = self.conn.cursor()
+        cur.execute(sql, column_data)
+        self.InternalPathUtils().update_dirs(self, path=path)
+        return (cur.lastrowid, True)
+    
+    def iterate_dircontent(self, path, include_subdirs = True, include_subfiles = True):
+        self.InternalPathUtils().update_dirs(self)
+        path = self.InternalPathUtils().normalize_path(path)
+        cur_head = self.dirs
+        valid = True
+        if path != "":
+            for head in self.InternalPathUtils().iterate_heads(path):
+                if head not in cur_head.keys():
+                    # non existing directory
+                    valid = False
+                else:
+                    cur_head = cur_head[head]
+            basename = os.path.basename(path)
+            if basename not in cur_head.keys() or cur_head[basename] == None:
+                # do not exist or points to the file
+                valid = False
+            else:
+                cur_head = cur_head[basename]
+        if valid == True:
+            for elem in cur_head.keys():
+                if include_subdirs == True and cur_head[elem] != None:
+                    yield elem
+                if include_subfiles == True and cur_head[elem] == None:
+                    yield elem
+
+    def check_file(self, path):
+        return self.get_file(path) != None
+
+    def check_dir(self, path):
+        for each in self.iterate_dircontent(path):
+            each = each # used
+            return True # there is at least one item
+        return False
+
+    def get_file(self, path):
+        path = self.InternalPathUtils().normalize_path(path)
+        result = self.select_rows("__files__", filters = [("path", "=", path), ("confirmed", "=", 1)])
+        if len(result) == 0:
+            return None
+        assert(len(result) == 1)
+        return self.FileData(result[0]['id'], result[0]['path'], result[0]['checksum'])
+
+    def iterate_files(self, path_like = None):
+        for row in self.select_rows('__files__', path_like=path_like, filters=[('confirmed','=','1')]): 
+            yield self.FileData(row['id'], row['path'], row['checksum'])
+
+    def create_region(self, file_id, region_id, name, begin, end, line_begin, line_end, cursor, group, checksum):
+        assert(self.read_only == False)
+        sql = "INSERT OR REPLACE INTO __regions__ (file_id, region_id, name, begin, end, line_begin, line_end, cursor, group_id, checksum) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"
+        column_data = [file_id, region_id, name, begin, end, line_begin, line_end, cursor, group, checksum]
+        self.log(sql + " /with arguments: " + str(column_data))
+        cur = self.conn.cursor()
+        cur.execute(sql, column_data)
+        return cur.lastrowid
+    
+    def get_region(self, file_id, region_id):
+        result = self.select_rows("__regions__", filters = [("file_id", "=", file_id), ("region_id", "=", region_id)])
+        if len(result) == 0:
+            return None
+        return self.RegionData(result[0]['file_id'],
+                               result[0]['region_id'],
+                               result[0]['name'],
+                               result[0]['begin'],
+                               result[0]['end'],
+                               result[0]['line_begin'],
+                               result[0]['line_end'],
+                               result[0]['cursor'],
+                               result[0]['group_id'],
+                               result[0]['checksum'])
+
+    def iterate_regions(self, file_id):
+        for each in self.select_rows("__regions__", filters = [("file_id", "=", file_id)]):
+            yield self.RegionData(each['file_id'],
+                                  each['region_id'],
+                                  each['name'],
+                                  each['begin'],
+                                  each['end'],
+                                  each['line_begin'],
+                                  each['line_end'],
+                                  each['cursor'],
+                                  each['group_id'],
+                                  each['checksum'])
+    
+    def create_marker(self, file_id, begin, end, group):
+        assert(self.read_only == False)
+        sql = "INSERT OR REPLACE INTO __markers__ (file_id, begin, end, group_id) VALUES (?, ?, ?, ?)"
+        column_data = [file_id, begin, end, group]
+        self.log(sql + " /with arguments: " + str(column_data))
+        cur = self.conn.cursor()
+        cur.execute(sql, column_data)
+        return cur.lastrowid
+    
+    def iterate_markers(self, file_id):
+        for each in self.select_rows("__markers__", filters = [("file_id", "=", file_id)]):
+            yield self.MarkerData(each['file_id'],
+                                  each['begin'],
+                                  each['end'],
+                                  each['group_id'])
+
+    def add_row(self, table_name, file_id, region_id, array_data):
+        assert(self.read_only == False)
+        column_names = "'file_id'"
+        column_values = "?"
+        column_data = [file_id]
+        if region_id != None:
+            column_names += ", 'region_id'"
+            column_values += ", ?"
+            column_data.append(region_id)
+        useful_data = 0
+        for each in array_data:
+            column_names +=  ", '" + each[0] + "'"
+            column_values += ", ?"
+            column_data.append(each[1])
+            useful_data += 1
+        if useful_data == 0:
+            return
+        sql = "INSERT OR REPLACE INTO '" + table_name + "' (" + column_names + ") VALUES (" + column_values + ")"
+        self.log(sql + " /with arguments: " + str(column_data))
+        cur = self.conn.cursor()
+        cur.execute(sql, column_data)
+        return cur.lastrowid
+
+    def select_rows(self, table_name, path_like = None, column_names = [], filters = [], order_by = None, limit_by = None):
+        safe_column_names = []
+        for each in column_names:
+            safe_column_names.append("'" + each + "'")
+        return self.select_rows_unsafe(table_name, path_like = path_like,
+                                       column_names = safe_column_names, filters = filters,
+                                       order_by = order_by, limit_by = limit_by)
+
+    def select_rows_unsafe(self, table_name, path_like = None, column_names = [], filters = [], 
+                           group_by = None, order_by = None, limit_by = None):
+        path_like = self.InternalPathUtils().normalize_path(path_like)
+        if self.conn == None:
+            return []
+
+        table_stmt = "'" + table_name + "'"
+
+        what_stmt = ", ".join(column_names)
+        if len(what_stmt) == 0:
+            what_stmt = "*"
+        elif path_like != None and table_name != '__files__' and group_by == None:
+            what_stmt += ", '__files__'.'path', '__files__'.'id'"
+        inner_stmt = ""
+        if path_like != None and table_name != '__files__':
+            inner_stmt = " INNER JOIN '__files__' ON '__files__'.'id' = '" + table_name + "'.'file_id' "
+
+        where_stmt = " "
+        values = ()
+        if len(filters) != 0:
+            if filters[0][1] == 'IN':
+                where_stmt = " WHERE (`" + filters[0][0] + "` " + filters[0][1] + " " + filters[0][2]
+            else:    
+                where_stmt = " WHERE (`" + filters[0][0] + "` " + filters[0][1] + " ?"
+                values = (filters[0][2],)
+            for each in filters[1:]:
+                if each[1] == 'IN':
+                    where_stmt += " AND `" + each[0] + "` " + each[1] + " " + each[2]
+                else:
+                    where_stmt += " AND `" + each[0] + "` " + each[1] + " ?"
+                    values += (each[2], )
+            if path_like != None:
+                where_stmt += " AND '__files__'.'path' LIKE ?"
+                values += (path_like, )
+            where_stmt += ")"
+        elif path_like != None:
+            where_stmt = " WHERE '__files__'.'path' LIKE ?"
+            values += (path_like, )
+        
+        group_stmt = ""
+        if group_by != None:
+            group_stmt = " GROUP BY (`" + group_by + "`)"
+
+        order_stmt = ""
+        if order_by != None:
+            if order_by.startswith("-"):
+                order_stmt = " ORDER BY (`" + order_by[1:] + "`) DESC "
+            else:
+                order_stmt = " ORDER BY (`" + order_by + "`) "
+
+        limit_stmt = ""
+        if limit_by != None:
+            limit_stmt = " LIMIT " + str(limit_by)
+
+        sql = "SELECT " + what_stmt + " FROM " + table_stmt + inner_stmt + where_stmt + group_stmt + order_stmt + limit_stmt
+        self.log(sql + " /with arguments: " + str(values))
+        return self.conn.execute(sql, values).fetchall()
+
+    def get_row(self, table_name, file_id, region_id):
+        selected = self.get_rows(table_name, file_id, region_id)
+        # assures that only one row in database
+        # if assertion happens, caller's intention is not right, use get_rows instead    
+        assert(len(selected) == 0 or len(selected) == 1)
+        if len(selected) == 0:
+            return None
+        return selected[0]
+
+    def get_rows(self, table_name, file_id, region_id):
+        filters = [("file_id", '=', file_id)]
+        if region_id != None:
+            filters.append(("region_id", '=', region_id))
+        return self.select_rows(table_name, filters=filters)
+    
+    def aggregate_rows(self, table_name, path_like = None, column_names = None, filters = []):
+        
+        if column_names == None:
+            column_names = []
+            for column in self.iterate_columns(table_name):
+                column_names.append(column.name)
+                
+        if len(column_names) == 0:
+            # it is possible that a table does not have meanfull columns
+            return {} 
+        
+        total_column_names = []
+        for column_name in column_names:
+            for func in ['max', 'min', 'avg', 'total', 'count']:
+                total_column_names.append(func + "('" + table_name + "'.'" + column_name + "') AS " + "'" + column_name + "_" + func + "'")
+             
+        data = self.select_rows_unsafe(table_name, path_like = path_like, column_names = total_column_names, filters = filters)
+        assert(len(data) == 1)
+        result = {}
+        for column_name in column_names:
+            result[column_name] = {}
+            for func in ['max', 'min', 'avg', 'total', 'count']:
+                result[column_name][func] = data[0][column_name + "_" + func]
+        return result
+    
+    def count_rows(self, table_name, path_like = None, group_by_column = None, filters = []):
+        
+        count_column_names = None
+        
+        if group_by_column != None:
+            for column in self.iterate_columns(table_name):
+                if group_by_column == column.name:
+                    count_column_names = ["`" + group_by_column + "`", "COUNT(`" + group_by_column + "`)"]
+                    break
+        else:
+            count_column_names = ["COUNT(*)"]
+            
+        if count_column_names == None:
+            return []
+             
+        data = self.select_rows_unsafe(table_name, path_like = path_like, column_names = count_column_names,
+                                       filters = filters, group_by = group_by_column)
+        return data
+
+    def log(self, sql):
+        if logging.getLogger().getEffectiveLevel() <= logging.DEBUG:
+            logging.debug("[" + str(self.id) + "] Executing query: " + sql)
+            traceback.print_stack()
+        

+ 143 - 0
mainline/mpp/dir.py

@@ -0,0 +1,143 @@
+#
+#    Metrix++, Copyright 2009-2013, Metrix++ Project
+#    Link: http://metrixplusplus.sourceforge.net
+#    
+#    This file is a part of Metrix++ Tool.
+#    
+#    Metrix++ is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License as published by
+#    the Free Software Foundation, version 3 of the License.
+#    
+#    Metrix++ is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+#    GNU General Public License for more details.
+#    
+#    You should have received a copy of the GNU General Public License
+#    along with Metrix++.  If not, see <http://www.gnu.org/licenses/>.
+#
+
+import mpp.api
+
+import re
+import os
+import logging
+import time
+import binascii
+
+class Plugin(mpp.api.Plugin, mpp.api.Parent, mpp.api.IConfigurable, mpp.api.IRunable):
+    
+    def __init__(self):
+        self.reader = DirectoryReader()
+        self.exclude_rules = []
+    
+    def declare_configuration(self, parser):
+        parser.add_option("--non-recursively", "--nr", action="store_true", default=False,
+                         help="If the option is set (True), sub-directories are not processed [default: %default]")
+        parser.add_option("--exclude-files", "--ef", default=r'^[.]',
+                         help="Defines the pattern to exclude files from processing [default: %default]")
+        parser.add_option("--std.general.proctime", "--sgpt", action="store_true", default=False,
+                         help="If the option is set (True), the tool measures processing time per file [default: %default]")
+        parser.add_option("--std.general.procerrors", "--sgpe", action="store_true", default=False,
+                         help="If the option is set (True), the tool counts number of processing/parsing errors per file [default: %default]")
+        parser.add_option("--std.general.size", "--sgs", action="store_true", default=False,
+                         help="If the option is set (True), the tool collects file size metric (in bytes) [default: %default]")
+    
+    def configure(self, options):
+        self.non_recursively = options.__dict__['non_recursively']
+        self.add_exclude_rule(re.compile(options.__dict__['exclude_files']))
+        self.is_proctime_enabled = options.__dict__['std.general.proctime']
+        self.is_procerrors_enabled = options.__dict__['std.general.procerrors']
+        self.is_size_enabled = options.__dict__['std.general.size']
+
+    def initialize(self):
+        fields = []
+        if self.is_proctime_enabled == True:
+            fields.append(self.Field('proctime', float))
+        if self.is_procerrors_enabled == True:
+            fields.append(self.Field('procerrors', int))
+        if self.is_size_enabled == True:
+            fields.append(self.Field('size', int))
+        mpp.api.Plugin.initialize(self, namespace='std.general', support_regions=False, fields=fields)
+        
+    def run(self, args):
+        if len(args) == 0:
+            return self.reader.run(self, "./")
+        for directory in args:
+            return self.reader.run(self, directory)
+        
+    def add_exclude_rule(self, re_compiled_pattern):
+        # TODO file name may have special regexp symbols what causes an exception
+        # For example try to run a collection with "--db-file=metrix++" option
+        self.exclude_rules.append(re_compiled_pattern)
+        
+    def is_file_excluded(self, file_name):
+        for each in self.exclude_rules:
+            if re.match(each, file_name) != None:
+                return True
+        return False 
+        
+class DirectoryReader():
+    
+    def run(self, plugin, directory):
+        
+        IS_TEST_MODE = False
+        if 'METRIXPLUSPLUS_TEST_MODE' in os.environ.keys():
+            IS_TEST_MODE = True
+        
+        def run_per_file(plugin, fname, full_path):
+            exit_code = 0
+            norm_path = re.sub(r'''[\\]''', "/", full_path)
+            if plugin.is_file_excluded(fname) == False:
+                if os.path.isdir(full_path):
+                    if plugin.non_recursively == False:
+                        exit_code += run_recursively(plugin, full_path)
+                else:
+                    parser = plugin.get_plugin_loader().get_parser(full_path)
+                    if parser == None:
+                        logging.info("Skipping: " + norm_path)
+                    else:
+                        logging.info("Processing: " + norm_path)
+                        ts = time.time()
+                        f = open(full_path, 'r');
+                        text = f.read();
+                        f.close()
+                        checksum = binascii.crc32(text) & 0xffffffff # to match python 3
+
+                        (data, is_updated) = plugin.get_plugin_loader().get_database_loader().create_file_data(norm_path, checksum, text)
+                        procerrors = parser.process(plugin, data, is_updated)
+                        if plugin.is_proctime_enabled == True:
+                            data.set_data('std.general', 'proctime',
+                                          (time.time() - ts) if IS_TEST_MODE == False else 0.01)
+                        if plugin.is_procerrors_enabled == True and procerrors != None and procerrors != 0:
+                            data.set_data('std.general', 'procerrors', procerrors)
+                        if plugin.is_size_enabled == True:
+                            data.set_data('std.general', 'size', len(text))
+                        plugin.get_plugin_loader().get_database_loader().save_file_data(data)
+                        logging.debug("-" * 60)
+                        exit_code += procerrors
+            else:
+                logging.info("Excluding: " + norm_path)
+            return exit_code
+        
+        def run_recursively(plugin, directory):
+            exit_code = 0
+            for fname in os.listdir(directory):
+                full_path = os.path.join(directory, fname)
+                exit_code += run_per_file(plugin, fname, full_path)
+            return exit_code
+        
+        if os.path.exists(directory) == False:
+            logging.error("Skipping (does not exist): " + directory)
+            return 1
+        
+        if os.path.isdir(directory):
+            total_errors = run_recursively(plugin, directory)
+        else:
+            total_errors = run_per_file(plugin, os.path.basename(directory), directory)
+        total_errors = total_errors # used, warnings are per file if not zero
+        return 0 # ignore errors, collection is successful anyway
+    
+
+
+    

+ 18 - 0
mainline/mpp/export/__init__.py

@@ -0,0 +1,18 @@
+#
+#    Metrix++, Copyright 2009-2013, Metrix++ Project
+#    Link: http://metrixplusplus.sourceforge.net
+#    
+#    This file is a part of Metrix++ Tool.
+#    
+#    Metrix++ is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License as published by
+#    the Free Software Foundation, version 3 of the License.
+#    
+#    Metrix++ is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+#    GNU General Public License for more details.
+#    
+#    You should have received a copy of the GNU General Public License
+#    along with Metrix++.  If not, see <http://www.gnu.org/licenses/>.
+#

+ 38 - 0
mainline/mpp/export/convert.py

@@ -0,0 +1,38 @@
+#
+#    Metrix++, Copyright 2009-2013, Metrix++ Project
+#    Link: http://metrixplusplus.sourceforge.net
+#    
+#    This file is a part of Metrix++ Tool.
+#    
+#    Metrix++ is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License as published by
+#    the Free Software Foundation, version 3 of the License.
+#    
+#    Metrix++ is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+#    GNU General Public License for more details.
+#    
+#    You should have received a copy of the GNU General Public License
+#    along with Metrix++.  If not, see <http://www.gnu.org/licenses/>.
+#
+
+
+import mpp.export.utils.py2xml
+import mpp.export.utils.py2txt
+
+def to_xml(data, root_name = None):
+    serializer = mpp.export.utils.py2xml.Py2XML()
+    return serializer.parse(data, objName=root_name)
+
+def to_python(data, root_name = None):
+    prefix = ""
+    postfix = ""
+    if root_name != None:
+        prefix = "{'" + root_name + ": " 
+        postfix = "}"
+    return prefix + data.__repr__() + postfix
+
+def to_txt(data, root_name = None):
+    serializer = mpp.export.utils.py2txt.Py2TXT()
+    return serializer.parse(data, objName=root_name, indent = -1)

+ 18 - 0
mainline/mpp/export/utils/__init__.py

@@ -0,0 +1,18 @@
+#
+#    Metrix++, Copyright 2009-2013, Metrix++ Project
+#    Link: http://metrixplusplus.sourceforge.net
+#    
+#    This file is a part of Metrix++ Tool.
+#    
+#    Metrix++ is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License as published by
+#    the Free Software Foundation, version 3 of the License.
+#    
+#    Metrix++ is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+#    GNU General Public License for more details.
+#    
+#    You should have received a copy of the GNU General Public License
+#    along with Metrix++.  If not, see <http://www.gnu.org/licenses/>.
+#

+ 138 - 0
mainline/mpp/export/utils/py2txt.py

@@ -0,0 +1,138 @@
+#
+#    Metrix++, Copyright 2009-2013, Metrix++ Project
+#    Link: http://metrixplusplus.sourceforge.net
+#    
+#    This file is a part of Metrix++ Tool.
+#    
+#    Metrix++ is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License as published by
+#    the Free Software Foundation, version 3 of the License.
+#    
+#    Metrix++ is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+#    GNU General Public License for more details.
+#    
+#    You should have received a copy of the GNU General Public License
+#    along with Metrix++.  If not, see <http://www.gnu.org/licenses/>.
+#
+
+# Copied from http://code.activestate.com/recipes/577268-python-data-structure-to-TXT-serialization/ and modified
+
+'''
+Py2TXT - Python to TXT serialization
+
+This code transforms a Python data structures into an TXT document
+
+Usage:
+    serializer = Py2TXT()
+    txt_string = serializer.parse( python_object )
+    print python_object
+    print txt_string
+'''
+
+INDENT_SPACE_SYMBOL = ".   " 
+
+class Py2TXT():
+
+    def __init__( self ):
+
+        self.data = "" # where we store the processed TXT string
+
+    def parse( self, pythonObj, objName=None, indent = 0 ):
+        '''
+        processes Python data structure into TXT string
+        needs objName if pythonObj is a List
+        '''
+        if pythonObj == None:
+            return "\n" + (INDENT_SPACE_SYMBOL * indent) + ""
+
+        if isinstance( pythonObj, dict ):
+            self.data = self._PyDict2TXT( pythonObj, objName, indent = indent + 1 )
+            
+        elif isinstance( pythonObj, list ):
+            # we need name for List object
+            self.data = self._PyList2TXT( pythonObj, objName, indent = indent + 1 )
+            
+        else:
+            self.data = "\n" + (INDENT_SPACE_SYMBOL * indent) + "%(n)s: %(o)s" % { 'n':objName, 'o':str( pythonObj ) }
+            
+        self.data = (INDENT_SPACE_SYMBOL * (indent + 1)) + "-" * 80 + self.data + "\n" + (INDENT_SPACE_SYMBOL * (indent + 1)) + "=" * 80 
+        return self.data
+
+    def _PyDict2TXT( self, pyDictObj, objName=None, indent = 0 ):
+        '''
+        process Python Dict objects
+        They can store TXT attributes and/or children
+        '''
+        tagStr = ""     # TXT string for this level
+        attributes = {} # attribute key/value pairs
+        attrStr = ""    # attribute string of this level
+        childStr = ""   # TXT string of this level's children
+
+        for k, v in pyDictObj.items():
+
+            if isinstance( v, dict ):
+                # child tags, with attributes
+                childStr += self._PyDict2TXT( v, k, indent = indent + 1 )
+
+            elif isinstance( v, list ):
+                # child tags, list of children
+                childStr += self._PyList2TXT( v, k, indent = indent + 1 )
+
+            else:
+                # tag could have many attributes, let's save until later
+                attributes.update( { k:v } )
+
+        if objName == None:
+            return childStr
+
+        # create TXT string for attributes
+        attrStr += ""
+        for k, v in attributes.items():
+            attrStr += "\n" + (INDENT_SPACE_SYMBOL * (indent + 1)) + "%s=\"%s\"" % ( k, v )
+
+        # let's assemble our tag string
+        if childStr == "":
+            tagStr += "\n" + (INDENT_SPACE_SYMBOL * indent) + "%(n)s: %(a)s" % { 'n':objName, 'a':attrStr }
+        else:
+            tagStr += "\n" + (INDENT_SPACE_SYMBOL * indent) + "%(n)s: %(a)s %(c)s" % { 'n':objName, 'a':attrStr, 'c':childStr }
+
+        return tagStr
+
+    def _PyList2TXT( self, pyListObj, objName=None, indent = 0 ):
+        '''
+        process Python List objects
+        They have no attributes, just children
+        Lists only hold Dicts or Strings
+        '''
+        tagStr = ""    # TXT string for this level
+        childStr = ""  # TXT string of children
+
+        for childObj in pyListObj:
+            
+            if isinstance( childObj, dict ):
+                # here's some Magic
+                # we're assuming that List parent has a plural name of child:
+                # eg, persons > person, so cut off last char
+                # name-wise, only really works for one level, however
+                # in practice, this is probably ok
+                childStr += "\n" + (INDENT_SPACE_SYMBOL * indent) + self._PyDict2TXT( childObj, objName[:-1], indent = indent + 1 )
+            elif isinstance( childObj, list ):
+                # here's some Magic
+                # we're assuming that List parent has a plural name of child:
+                # eg, persons > person, so cut off last char
+                # name-wise, only really works for one level, however
+                # in practice, this is probably ok
+                childStr += self._PyList2TXT( childObj, objName[:-1], indent = indent + 1 )
+            else:
+                childStr += "\n" + (INDENT_SPACE_SYMBOL * (indent + 1))
+                for string in childObj:
+                    childStr += str(string);
+
+        if objName == None:
+            return childStr
+
+        tagStr += "\n" + (INDENT_SPACE_SYMBOL * indent) + "%(n)s:%(c)s" % { 'n':objName, 'c':childStr }
+
+        return tagStr

+ 141 - 0
mainline/mpp/export/utils/py2xml.py

@@ -0,0 +1,141 @@
+#
+#    Metrix++, Copyright 2009-2013, Metrix++ Project
+#    Link: http://metrixplusplus.sourceforge.net
+#    
+#    This file is a part of Metrix++ Tool.
+#    
+#    Metrix++ is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License as published by
+#    the Free Software Foundation, version 3 of the License.
+#    
+#    Metrix++ is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+#    GNU General Public License for more details.
+#    
+#    You should have received a copy of the GNU General Public License
+#    along with Metrix++.  If not, see <http://www.gnu.org/licenses/>.
+#
+
+# Copied from http://code.activestate.com/recipes/577268-python-data-structure-to-xml-serialization/
+# - indent feature and better formatting added
+# - fixed handling of lists in lists
+# - fixed root object name for dictionaries
+
+INDENT_SPACE_SYMBOL = "    " 
+
+'''
+Py2XML - Python to XML serialization
+
+This code transforms a Python data structures into an XML document
+
+Usage:
+    serializer = Py2XML()
+    xml_string = serializer.parse( python_object )
+    print python_object
+    print xml_string
+'''
+
+class Py2XML():
+
+    def __init__( self ):
+
+        self.data = "" # where we store the processed XML string
+
+    def parse( self, pythonObj, objName=None, indent = 0 ):
+        '''
+        processes Python data structure into XML string
+        needs objName if pythonObj is a List
+        '''
+        if pythonObj == None:
+            return "\n" + (INDENT_SPACE_SYMBOL * indent) + ""
+
+        if isinstance( pythonObj, dict ):
+            self.data = self._PyDict2XML( pythonObj, objName, indent=indent+1 )
+            
+        elif isinstance( pythonObj, list ):
+            # we need name for List object
+            self.data = self._PyList2XML( pythonObj, objName, indent=indent+1 )
+            
+        else:
+            self.data = "\n" + (INDENT_SPACE_SYMBOL * indent) + "<%(n)s>%(o)s</%(n)s>" % { 'n':objName, 'o':str( pythonObj ) }
+            
+        return self.data
+
+    def _PyDict2XML( self, pyDictObj, objName=None, indent = 0 ):
+        '''
+        process Python Dict objects
+        They can store XML attributes and/or children
+        '''
+        tagStr = ""     # XML string for this level
+        attributes = {} # attribute key/value pairs
+        attrStr = ""    # attribute string of this level
+        childStr = ""   # XML string of this level's children
+
+        for k, v in pyDictObj.items():
+
+            if isinstance( v, dict ):
+                # child tags, with attributes
+                childStr += self._PyDict2XML( v, k, indent=indent+1 )
+
+            elif isinstance( v, list ):
+                # child tags, list of children
+                childStr += self._PyList2XML( v, k, indent=indent+1 )
+
+            else:
+                # tag could have many attributes, let's save until later
+                attributes.update( { k:v } )
+
+        if objName == None:
+            return childStr
+
+        # create XML string for attributes
+        for k, v in attributes.items():
+            attrStr += " %s=\"%s\"" % ( k, v )
+
+        # let's assemble our tag string
+        if childStr == "":
+            tagStr += "\n" + (INDENT_SPACE_SYMBOL * indent) + "<%(n)s%(a)s />" % { 'n':objName, 'a':attrStr }
+        else:
+            tagStr += ("\n" + (INDENT_SPACE_SYMBOL * indent) + "<%(n)s%(a)s>%(c)s" + "\n" + (INDENT_SPACE_SYMBOL * indent) + "</%(n)s>") % { 'n':objName, 'a':attrStr, 'c':childStr }
+
+        return tagStr
+
+    def _PyList2XML( self, pyListObj, objName=None, indent = 0 ):
+        '''
+        process Python List objects
+        They have no attributes, just children
+        Lists only hold Dicts or Strings
+        '''
+        tagStr = ""    # XML string for this level
+        childStr = ""  # XML string of children
+
+        for childObj in pyListObj:
+            
+            if isinstance( childObj, dict ):
+                # here's some Magic
+                # we're assuming that List parent has a plural name of child:
+                # eg, persons > person, so cut off last char
+                # name-wise, only really works for one level, however
+                # in practice, this is probably ok
+                childStr += self._PyDict2XML( childObj, objName[:-1], indent=indent+1 )
+            elif isinstance( childObj, list ):
+                # here's some Magic
+                # we're assuming that List parent has a plural name of child:
+                # eg, persons > person, so cut off last char
+                # name-wise, only really works for one level, however
+                # in practice, this is probably ok
+                childStr += self._PyList2XML( childObj, objName[:-1], indent=indent+1 )
+                pass
+            else:
+                childStr += "\n" + (INDENT_SPACE_SYMBOL * (indent + 1)) + "<" + objName[:-1] + ">"
+                for string in childObj:
+                    childStr += str(string);
+                childStr += "</" + objName[:-1] + ">"
+                
+        if objName == None:
+            return childStr
+
+        tagStr += ("\n" + (INDENT_SPACE_SYMBOL * indent) + "<%(n)s>%(c)s" + "\n" + (INDENT_SPACE_SYMBOL * indent) + "</%(n)s>") % { 'n':objName, 'c':childStr }
+
+        return tagStr

+ 26 - 0
mainline/mpp/ext-priority/core.db.post.ini

@@ -0,0 +1,26 @@
+;
+;    Metrix++, Copyright 2009-2013, Metrix++ Project
+;    Link: http://metrixplusplus.sourceforge.net
+;    
+;    This file is a part of Metrix++ Tool.
+;    
+;    Metrix++ is free software: you can redistribute it and/or modify
+;    it under the terms of the GNU General Public License as published by
+;    the Free Software Foundation, version 3 of the License.
+;    
+;    Metrix++ is distributed in the hope that it will be useful,
+;    but WITHOUT ANY WARRANTY; without even the implied warranty of
+;    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;    GNU General Public License for more details.
+;    
+;    You should have received a copy of the GNU General Public License
+;    along with Metrix++.  If not, see <http://www.gnu.org/licenses/>.
+;
+
+[Plugin]
+version: 1.0
+package: mpp.db
+module:  post
+class:   Plugin
+depends: None
+enabled: True

+ 26 - 0
mainline/mpp/ext-priority/core.dir.ini

@@ -0,0 +1,26 @@
+;
+;    Metrix++, Copyright 2009-2013, Metrix++ Project
+;    Link: http://metrixplusplus.sourceforge.net
+;    
+;    This file is a part of Metrix++ Tool.
+;    
+;    Metrix++ is free software: you can redistribute it and/or modify
+;    it under the terms of the GNU General Public License as published by
+;    the Free Software Foundation, version 3 of the License.
+;    
+;    Metrix++ is distributed in the hope that it will be useful,
+;    but WITHOUT ANY WARRANTY; without even the implied warranty of
+;    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;    GNU General Public License for more details.
+;    
+;    You should have received a copy of the GNU General Public License
+;    along with Metrix++.  If not, see <http://www.gnu.org/licenses/>.
+;
+
+[Plugin]
+version: 1.0
+package: mpp
+module:  dir
+class:   Plugin
+depends: None
+enabled: True

+ 26 - 0
mainline/mpp/ext-priority/core.log.ini

@@ -0,0 +1,26 @@
+;
+;    Metrix++, Copyright 2009-2013, Metrix++ Project
+;    Link: http://metrixplusplus.sourceforge.net
+;    
+;    This file is a part of Metrix++ Tool.
+;    
+;    Metrix++ is free software: you can redistribute it and/or modify
+;    it under the terms of the GNU General Public License as published by
+;    the Free Software Foundation, version 3 of the License.
+;    
+;    Metrix++ is distributed in the hope that it will be useful,
+;    but WITHOUT ANY WARRANTY; without even the implied warranty of
+;    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;    GNU General Public License for more details.
+;    
+;    You should have received a copy of the GNU General Public License
+;    along with Metrix++.  If not, see <http://www.gnu.org/licenses/>.
+;
+
+[Plugin]
+version: 1.0
+package: mpp
+module:  log
+class:   Plugin
+depends: None
+enabled: True

+ 132 - 0
mainline/mpp/loader.py

@@ -0,0 +1,132 @@
+#
+#    Metrix++, Copyright 2009-2013, Metrix++ Project
+#    Link: http://metrixplusplus.sourceforge.net
+#    
+#    This file is a part of Metrix++ Tool.
+#    
+#    Metrix++ is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License as published by
+#    the Free Software Foundation, version 3 of the License.
+#    
+#    Metrix++ is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+#    GNU General Public License for more details.
+#    
+#    You should have received a copy of the GNU General Public License
+#    along with Metrix++.  If not, see <http://www.gnu.org/licenses/>.
+#
+
+import mpp.api
+
+import os
+import fnmatch
+
+class Loader(object):
+
+    def __init__(self):
+        self.plugins = []
+        self.parsers = []
+        self.hash    = {}
+        self.db = mpp.api.Loader()
+        
+    def get_database_loader(self):
+        return self.db
+
+    def get_plugin(self, name):
+        return self.hash[name]['instance']
+    
+    def iterate_plugins(self, is_reversed = False):
+        if is_reversed == False:
+            for item in self.plugins:
+                yield item['instance']
+        else:
+            for item in reversed(self.plugins):
+                yield item['instance']
+            
+    def register_parser(self, fnmatch_exp_list, parser):
+        self.parsers.append((fnmatch_exp_list, parser))
+
+    def get_parser(self, file_path):
+        for parser in self.parsers:
+            for fnmatch_exp in parser[0]:
+                if fnmatch.fnmatch(file_path, fnmatch_exp):
+                    return parser[1]
+        return None
+
+    def load(self, directory, optparser, args):
+        import sys
+        sys.path.append(directory)
+        
+        def load_recursively(manager, directory):
+            import ConfigParser
+            import re
+        
+            pattern = re.compile(r'.*[.]ini$', flags=re.IGNORECASE)
+        
+            dirList = os.listdir(directory)
+            for fname in dirList:
+                fname = os.path.join(directory, fname)
+                if os.path.isdir(fname):
+                    load_recursively(manager, fname)
+                elif re.match(pattern, fname):
+                    config = ConfigParser.ConfigParser()
+                    config.read(fname)
+                    item = {'package': config.get('Plugin', 'package'),
+                            'module': config.get('Plugin', 'module'),
+                            'class': config.get('Plugin', 'class'),
+                            'version': config.get('Plugin', 'version'),
+                            'depends': config.get('Plugin', 'depends'),
+                            'enabled': config.getboolean('Plugin', 'enabled')}
+                    if item['enabled']:
+                        manager.plugins.append(item)
+                        manager.hash[item['package'] + '.' + item['module']] = item
+
+        load_recursively(self, os.path.join(os.path.dirname(os.path.abspath(__file__)), 'ext-priority'))
+        load_recursively(self, directory)
+        # TODO check dependencies
+        for item in self.plugins:
+            plugin = __import__(item['package'], globals(), locals(), [item['module']], -1)
+            module_attr = plugin.__getattribute__(item['module'])
+            class_attr = module_attr.__getattribute__(item['class'])
+            item['instance'] = class_attr.__new__(class_attr)
+            item['instance'].__init__()
+            item['instance'].set_name(item['package'] + "." + item['module'])
+            item['instance'].set_version(item['version'])
+            item['instance'].set_plugin_loader(self)
+
+        for item in self.iterate_plugins():
+            if (isinstance(item, mpp.api.IConfigurable)):
+                item.declare_configuration(optparser)
+
+        (options, args) = optparser.parse_args(args)
+        for item in self.iterate_plugins():
+            if (isinstance(item, mpp.api.IConfigurable)):
+                item.configure(options)
+
+        for item in self.iterate_plugins():
+            item.initialize()
+            
+        return args
+
+    def unload(self):
+        for item in self.iterate_plugins(is_reversed = True):
+            item.terminate()
+
+    def run(self, args):
+        exit_code = 0
+        for item in self.iterate_plugins():
+            if (isinstance(item, mpp.api.IRunable)):
+                exit_code += item.run(args)
+        return exit_code
+
+    def __repr__(self):
+        result = object.__repr__(self) + ' with loaded:'
+        for item in self.iterate_plugins():
+            result += '\n\t' + item.__repr__()
+            if isinstance(item, mpp.api.Parent):
+                result += ' with subscribed:'
+                for child in item.iterate_children():
+                    result += '\n\t\t' + child.__repr__()
+        return result
+

+ 58 - 0
mainline/mpp/log.py

@@ -0,0 +1,58 @@
+#
+#    Metrix++, Copyright 2009-2013, Metrix++ Project
+#    Link: http://metrixplusplus.sourceforge.net
+#    
+#    This file is a part of Metrix++ Tool.
+#    
+#    Metrix++ is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License as published by
+#    the Free Software Foundation, version 3 of the License.
+#    
+#    Metrix++ is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+#    GNU General Public License for more details.
+#    
+#    You should have received a copy of the GNU General Public License
+#    along with Metrix++.  If not, see <http://www.gnu.org/licenses/>.
+#
+
+import mpp.api
+import logging
+import os
+
+class Plugin(mpp.api.BasePlugin, mpp.api.IConfigurable):
+    
+    def declare_configuration(self, parser, default_value='INFO'):
+        allowed_values = ['DEBUG','INFO','WARNING','ERROR']
+        default_value_cur = default_value
+        if os.environ.has_key('METRIXPLUSPLUS_LOG_LEVEL') and os.environ['METRIXPLUSPLUS_LOG_LEVEL'] in allowed_values:
+            default_value_cur = os.environ['METRIXPLUSPLUS_LOG_LEVEL']
+        parser.add_option("--log-level", "--ll", default=default_value_cur, choices=allowed_values,
+                         help="Defines log level. Possible values are 'DEBUG','INFO','WARNING' or 'ERROR'. "
+                         "Default value is inherited from environment variable 'METRIXPLUSPLUS_LOG_LEVEL' if set. "
+                         "[default: " + default_value + "]")
+    
+    def configure(self, options):
+        if options.__dict__['log_level'] == 'ERROR':
+            log_level = logging.ERROR
+        elif options.__dict__['log_level'] == 'WARNING':
+            log_level = logging.WARNING
+        elif options.__dict__['log_level'] == 'INFO':
+            log_level = logging.INFO
+        elif options.__dict__['log_level'] == 'DEBUG':
+            log_level = logging.DEBUG
+        else:
+            raise AssertionError("Unhandled choice of log level")
+        
+        self.level = log_level
+        logging.getLogger().setLevel(self.level)
+        os.environ['METRIXPLUSPLUS_LOG_LEVEL'] = options.__dict__['log_level']
+        logging.warn("Logging enabled with " + options.__dict__['log_level'] + " level")
+
+    def initialize(self):
+        super(Plugin, self).initialize()
+        set_default_format()
+
+def set_default_format():
+    logging.basicConfig(format="[LOG]: %(levelname)s:\t%(message)s", level=logging.WARN)

+ 114 - 0
mainline/mpp/utils.py

@@ -0,0 +1,114 @@
+#
+#    Metrix++, Copyright 2009-2013, Metrix++ Project
+#    Link: http://metrixplusplus.sourceforge.net
+#    
+#    This file is a part of Metrix++ Tool.
+#    
+#    Metrix++ is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License as published by
+#    the Free Software Foundation, version 3 of the License.
+#    
+#    Metrix++ is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+#    GNU General Public License for more details.
+#    
+#    You should have received a copy of the GNU General Public License
+#    along with Metrix++.  If not, see <http://www.gnu.org/licenses/>.
+#
+
+import logging
+import re
+
+class FileRegionsMatcher(object):
+
+    class FileRegionsDisposableGetter(object):
+        
+        def __init__(self, file_data):
+            self.checksums = {}
+            self.names = {}
+            
+            for each in file_data.iterate_regions():
+                if each.get_checksum() not in self.checksums:
+                    self.checksums[each.get_checksum()] = []
+                self.checksums[each.get_checksum()].append((each.get_id(), each.get_name())) 
+                
+                if each.get_name() not in self.names:
+                    self.names[each.get_name()] = []
+                self.names[each.get_name()].append((each.get_id(), each.get_checksum())) 
+            
+        def get_next_id_once_by_checksum(self, checksum):
+            if checksum not in self.checksums.keys():
+                return None
+    
+            if len(self.checksums[checksum]) == 0:
+                return None
+            
+            elem = self.checksums[checksum].pop(0)
+            next_id = elem[0]
+            next_name = elem[1]
+    
+            self.names[next_name].remove((next_id, checksum))
+            return next_id
+    
+        def get_next_id_once_by_name(self, name):
+            if name not in self.names.keys():
+                return None
+            
+            if len(self.names[name]) == 0:
+                return None
+            
+            elem = self.names[name].pop(0)
+            next_id = elem[0]
+            next_checksum = elem[1]
+    
+            self.checksums[next_checksum].remove((next_id, name))
+            return next_id
+    
+    def __init__(self, file_data, prev_file_data):
+        self.ids = [None] # add one to shift id from zero
+        
+        once_filter = self.FileRegionsDisposableGetter(prev_file_data)
+        unmatched_region_ids = []
+        for (ind, region) in enumerate(file_data.iterate_regions()):
+            assert(ind + 1 == region.get_id())
+            # Identify corresponding region in previous database (attempt by checksum)
+            prev_id = once_filter.get_next_id_once_by_checksum(region.checksum)
+            if prev_id != None:
+                self.ids.append((prev_id, False))
+            else:
+                unmatched_region_ids.append(region.get_id())
+                self.ids.append((None, True))
+                            
+        # Identify corresponding region in previous database (attempt by name)
+        for region_id in unmatched_region_ids: 
+            prev_id = once_filter.get_next_id_once_by_name(file_data.get_region(region_id).name)
+            if prev_id != None:
+                self.ids[region_id] = (prev_id, True)
+    
+    def get_prev_id(self, curr_id):
+        return self.ids[curr_id][0]
+
+    def is_matched(self, curr_id):
+        return (self.ids[curr_id][0] != None)
+
+    def is_modified(self, curr_id):
+        return self.ids[curr_id][1]
+
+def check_db_metadata(loader, loader_prev):
+    for each in loader.iterate_properties():
+        prev = loader_prev.get_property(each.name)
+        if prev != each.value:
+            logging.warn("Previous data file has got different metadata:")
+            logging.warn(" - identification of change trends can be not reliable")
+            logging.warn(" - use 'info' tool to view more details")
+            return 1
+    return 0
+
+def preprocess_path(path):
+    path = re.sub(r'''[\\]+''', "/", path)
+    logging.info("Processing: " + path)
+    return path
+
+def report_bad_path(path):
+    logging.error("Specified path '" + path + "' is invalid: not found in the database records.")

+ 130 - 0
mainline/mpp/warn.py

@@ -0,0 +1,130 @@
+#
+#    Metrix++, Copyright 2009-2013, Metrix++ Project
+#    Link: http://metrixplusplus.sourceforge.net
+#    
+#    This file is a part of Metrix++ Tool.
+#    
+#    Metrix++ is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License as published by
+#    the Free Software Foundation, version 3 of the License.
+#    
+#    Metrix++ is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+#    GNU General Public License for more details.
+#    
+#    You should have received a copy of the GNU General Public License
+#    along with Metrix++.  If not, see <http://www.gnu.org/licenses/>.
+#
+
+import re
+
+import mpp.api
+
+class Plugin(mpp.api.Plugin, mpp.api.IConfigurable):
+    
+    MODE_NEW     = 0x01
+    MODE_TREND   = 0x03
+    MODE_TOUCHED = 0x07
+    MODE_ALL     = 0x15
+    
+    
+    def declare_configuration(self, parser):
+        self.parser = parser
+        parser.add_option("--warn-mode", "--wm", default='all', choices=['new', 'trend', 'touched', 'all'],
+                         help="Defines the warnings mode. "
+                         "'new' - warnings for new regions only, "
+                         "'trend' - warnings for new regions and for bad trend of modified regions, "
+                         "'touched' - warnings for new regions and modified regions, "
+                         "'all' - all warnings active "
+                         "[default: %default]")
+
+        parser.add_option("--min-limit", "--min", action="multiopt",
+                          help="A threshold per 'namespace:field' metric in order to select regions, "
+                          "which have got metric value less than the specified limit. "
+                          "This option can be specified multiple times, if it is necessary to apply several limits. "
+                          "Should be in the format: <namespace>:<field>:<limit-value>, for example: "
+                          "'std.code.lines:comments:1'.")
+        parser.add_option("--max-limit", "--max", action="multiopt",
+                          help="A threshold per 'namespace:field' metric in order to select regions, "
+                          "which have got metric value more than the specified limit. "
+                          "This option can be specified multiple times, if it is necessary to apply several limits. "
+                          "Should be in the format: <namespace>:<field>:<limit-value>, for example: "
+                          "'std.code.complexity:cyclomatic:7'.")
+        
+    def configure(self, options):
+        if options.__dict__['warn_mode'] == 'new':
+            self.mode = self.MODE_NEW
+        elif options.__dict__['warn_mode'] == 'trend':
+            self.mode = self.MODE_TREND
+        elif options.__dict__['warn_mode'] == 'touched':
+            self.mode = self.MODE_TOUCHED
+        elif options.__dict__['warn_mode'] == 'all':
+            self.mode = self.MODE_ALL
+            
+        if self.mode != self.MODE_ALL and options.__dict__['db_file_prev'] == None:
+            self.parser.error("The mode '" + options.__dict__['warn_mode'] + "' for 'general.warn' option requires '--db-file-prev' option set")
+
+        class Limit(object):
+            def __init__(self, limit_type, limit, namespace, field, db_filter):
+                self.type = limit_type
+                self.limit = limit
+                self.namespace = namespace
+                self.field = field
+                self.filter = db_filter
+                
+            def __repr__(self):
+                return "namespace '" + self.namespace + "', filter '" + str(self.filter) + "'"
+        
+        self.limits = []
+        pattern = re.compile(r'''([^:]+)[:]([^:]+)[:]([-+]?[0-9]+(?:[.][0-9]+)?)''')
+        if options.__dict__['max_limit'] != None:
+            for each in options.__dict__['max_limit']:
+                match = re.match(pattern, each)
+                if match == None:
+                    self.parser.error("Invalid format of the '--max-limit' option: " + each)
+                limit = Limit("max", float(match.group(3)), match.group(1), match.group(2), (match.group(2), '>', float(match.group(3))))
+                self.limits.append(limit)
+        if options.__dict__['min_limit'] != None:
+            for each in options.__dict__['min_limit']:  
+                match = re.match(pattern, each)
+                if match == None:
+                    self.parser.error("Invalid format of the '--min-limit' option: " + each)
+                limit = Limit("min", float(match.group(3)), match.group(1), match.group(2), (match.group(2), '<', float(match.group(3))))
+                self.limits.append(limit)
+                
+    def verify_namespaces(self, valid_namespaces):
+        valid = []
+        for each in valid_namespaces:
+            valid.append(each)
+        for each in self.limits:
+            if each.namespace not in valid:
+                self.parser.error("Invalid limit option (namespace does not exist): " + each.namespace)
+
+    def verify_fields(self, namespace, valid_fields):
+        valid = []
+        for each in valid_fields:
+            valid.append(each)
+        for each in self.limits:
+            if each.namespace == namespace:
+                if each.field not in valid:
+                    self.parser.error("Invalid limit option (field does not exist): " + each.namespace + ":" + each.field)
+                    
+    def iterate_limits(self):
+        for each in self.limits:
+            yield each   
+
+    def is_mode_matched(self, limit, value, diff, is_modified):
+        if is_modified == None:
+            return True
+        if self.mode == self.MODE_ALL:
+            return True 
+        if self.mode == self.MODE_TOUCHED and is_modified == True:
+            return True 
+        if self.mode == self.MODE_TREND and is_modified == True:
+            if limit < value and diff > 0:
+                return True
+            if limit > value and diff < 0:
+                return True
+        return False
+        

+ 6 - 6
mainline/tools/collect.py

@@ -20,17 +20,17 @@
 
 import os.path
 
-import core.loader
-import core.cmdparser
+import mpp.loader
+import mpp.cmdparser
 
-import core.api
-class Tool(core.api.ITool):
+import mpp.api
+class Tool(mpp.api.ITool):
     def run(self, tool_args):
         return main(tool_args)
 
 def main(tool_args):
-    loader = core.loader.Loader()
-    parser =core.cmdparser.MultiOptionParser(usage="Usage: %prog collect [options] -- [path 1] ... [path N]")
+    loader = mpp.loader.Loader()
+    parser =mpp.cmdparser.MultiOptionParser(usage="Usage: %prog collect [options] -- [path 1] ... [path N]")
     args = loader.load(os.path.join(os.environ['METRIXPLUSPLUS_INSTALL_DIR'], 'ext'), parser, tool_args)
     exit_code = loader.run(args)
     loader.unload()

+ 15 - 14
mainline/tools/debug.py

@@ -21,22 +21,22 @@
 import logging
 import cgi
 
-import core.api
-import core.log
-import core.cmdparser
-import core.db.post
+import mpp.api
+import mpp.log
+import mpp.cmdparser
+import mpp.db.post
 
-import core.utils
+import mpp.utils
 
-class Tool(core.api.ITool):
+class Tool(mpp.api.ITool):
     def run(self, tool_args):
         return main(tool_args)
 
 def main(tool_args):
-    log_plugin = core.log.Plugin()
-    db_plugin = core.db.post.Plugin()
+    log_plugin = mpp.log.Plugin()
+    db_plugin = mpp.db.post.Plugin()
 
-    parser = core.cmdparser.MultiOptionParser(usage="Usage: %prog debug [options] -- [path 1] ... [path N]")
+    parser = mpp.cmdparser.MultiOptionParser(usage="Usage: %prog debug [options] -- [path 1] ... [path N]")
     log_plugin.declare_configuration(parser)
     db_plugin.declare_configuration(parser)
     parser.add_option("-m", "--mode", default='dumphtml', choices=['dumphtml'],
@@ -46,9 +46,10 @@ def main(tool_args):
     log_plugin.configure(options)
     db_plugin.configure(options)
 
-    loader = core.api.Loader()
-    if loader.open_database(db_plugin.dbfile) == False:
-        parser.error("Can not open file: " + db_plugin.dbfile)
+    log_plugin.initialize()
+    db_plugin.initialize()
+
+    loader = db_plugin.get_loader()
 
     if options.__dict__['mode'] == 'dumphtml':
         return dumphtml(args, loader)
@@ -60,11 +61,11 @@ def dumphtml(args, loader):
     result = ""
     result += '<html><body>'
     for path in args:
-        path = core.utils.preprocess_path(path)
+        path = mpp.utils.preprocess_path(path)
         
         data = loader.load_file_data(path)
         if data == None:
-            core.utils.report_bad_path(path)
+            mpp.utils.report_bad_path(path)
             exit_code += 1
             continue
         

+ 15 - 19
mainline/tools/export.py

@@ -22,23 +22,23 @@
 import logging
 import csv
 
-import core.api
-import core.log
-import core.db.post
-import core.cmdparser
+import mpp.api
+import mpp.log
+import mpp.db.post
+import mpp.cmdparser
 
-import core.utils
+import mpp.utils
 
-class Tool(core.api.ITool):
+class Tool(mpp.api.ITool):
     def run(self, tool_args):
         return main(tool_args)
 
 def main(tool_args):
     
-    log_plugin = core.log.Plugin()
-    db_plugin = core.db.post.Plugin()
+    log_plugin = mpp.log.Plugin()
+    db_plugin = mpp.db.post.Plugin()
 
-    parser = core.cmdparser.MultiOptionParser(usage="Usage: %prog export [options] -- [path 1] ... [path N]")
+    parser = mpp.cmdparser.MultiOptionParser(usage="Usage: %prog export [options] -- [path 1] ... [path N]")
     log_plugin.declare_configuration(parser)
     db_plugin.declare_configuration(parser)
     parser.add_option("--format", "--ft", default='csv', choices=['csv', 'xml'], help="Format of the output data. "
@@ -49,15 +49,11 @@ def main(tool_args):
     db_plugin.configure(options)
     out_format = options.__dict__['format']
 
-    loader_prev = core.api.Loader()
-    if db_plugin.dbfile_prev != None:
-        if loader_prev.open_database(db_plugin.dbfile_prev) == False:
-            parser.error("Can not open file: " + db_plugin.dbfile_prev)
+    log_plugin.initialize()
+    db_plugin.initialize()
 
-
-    loader = core.api.Loader()
-    if loader.open_database(db_plugin.dbfile) == False:
-        parser.error("Can not open file: " + db_plugin.dbfile)
+    loader_prev = db_plugin.get_loader_prev()
+    loader = db_plugin.get_loader()
     
     # Check for versions consistency
     for each in loader.iterate_properties():
@@ -106,7 +102,7 @@ def export_to_stdout(out_format, paths, loader, loader_prev):
         assert False, "Unknown output format " + out_format
 
     for path in paths:
-        path = core.utils.preprocess_path(path)
+        path = mpp.utils.preprocess_path(path)
         
         files = loader.iterate_file_data(path)
         if files != None:
@@ -121,7 +117,7 @@ def export_to_stdout(out_format, paths, loader, loader_prev):
                     per_file_data.append(file_data.get_data(column[0], column[1]))
                 csvWriter.writerow([file_data.get_path(), None] + per_file_data)
         else:
-            core.utils.report_bad_path(path)
+            mpp.utils.report_bad_path(path)
             exit_code += 1
 
     if out_format == 'xml':

+ 15 - 19
mainline/tools/info.py

@@ -18,23 +18,23 @@
 #
 
 
-import core.api
-import core.db.post
-import core.log
-import core.cmdparser
+import mpp.api
+import mpp.db.post
+import mpp.log
+import mpp.cmdparser
 
-import core.utils
+import mpp.utils
 
-class Tool(core.api.ITool):
+class Tool(mpp.api.ITool):
     def run(self, tool_args):
         return main(tool_args)
 
 def main(tool_args):
     exit_code = 0
-    log_plugin = core.log.Plugin()
-    db_plugin = core.db.post.Plugin()
+    log_plugin = mpp.log.Plugin()
+    db_plugin = mpp.db.post.Plugin()
 
-    parser = core.cmdparser.MultiOptionParser(usage="Usage: %prog info [options] -- [path 1] ... [path N]")
+    parser = mpp.cmdparser.MultiOptionParser(usage="Usage: %prog info [options] -- [path 1] ... [path N]")
     log_plugin.declare_configuration(parser)
     db_plugin.declare_configuration(parser)
 
@@ -42,15 +42,11 @@ def main(tool_args):
     log_plugin.configure(options)
     db_plugin.configure(options)
     
-    loader = core.api.Loader()
-    if loader.open_database(db_plugin.dbfile) == False:
-        parser.error("Can not open file: " + db_plugin.dbfile)
+    log_plugin.initialize()
+    db_plugin.initialize()
 
-    loader_prev = None
-    if db_plugin.dbfile_prev != None:
-        loader_prev = core.api.Loader()
-        if loader_prev.open_database(db_plugin.dbfile_prev) == False:
-            parser.error("Can not open file: " + db_plugin.dbfile_prev)
+    loader_prev = db_plugin.get_loader_prev(none_if_empty=True)
+    loader = db_plugin.get_loader()
 
     print "Properties:"
     for each in loader.iterate_properties():
@@ -86,11 +82,11 @@ def main(tool_args):
     else:
         paths = args
     for path in paths:
-        path = core.utils.preprocess_path(path)
+        path = mpp.utils.preprocess_path(path)
 
         file_iterator = loader.iterate_file_data(path=path)
         if file_iterator == None:
-            core.utils.report_bad_path(path)
+            mpp.utils.report_bad_path(path)
             exit_code += 1
             continue
         for each in file_iterator:

+ 23 - 26
mainline/tools/limit.py

@@ -19,28 +19,28 @@
 
 import logging
 
-import core.log
-import core.db.post
-import core.utils
-import core.cout
-import core.warn
-import core.cmdparser
+import mpp.log
+import mpp.db.post
+import mpp.utils
+import mpp.cout
+import mpp.warn
+import mpp.cmdparser
 
-import core.utils
+import mpp.utils
 
-import core.api
-class Tool(core.api.ITool):
+import mpp.api
+class Tool(mpp.api.ITool):
     def run(self, tool_args):
         return main(tool_args)
 
 def main(tool_args):
     
     exit_code = 0
-    log_plugin = core.log.Plugin()
-    db_plugin = core.db.post.Plugin()
-    warn_plugin = core.warn.Plugin()
+    log_plugin = mpp.log.Plugin()
+    db_plugin = mpp.db.post.Plugin()
+    warn_plugin = mpp.warn.Plugin()
 
-    parser = core.cmdparser.MultiOptionParser(usage="Usage: %prog limit [options] -- [path 1] ... [path N]")
+    parser = mpp.cmdparser.MultiOptionParser(usage="Usage: %prog limit [options] -- [path 1] ... [path N]")
     log_plugin.declare_configuration(parser)
     db_plugin.declare_configuration(parser)
     warn_plugin.declare_configuration(parser)
@@ -58,14 +58,11 @@ def main(tool_args):
     hotspots = options.__dict__['hotspots']
     no_suppress = options.__dict__['disable_suppressions']
 
-    loader_prev = core.api.Loader()
-    if db_plugin.dbfile_prev != None:
-        if loader_prev.open_database(db_plugin.dbfile_prev) == False:
-            parser.error("Can not open file: " + db_plugin.dbfile_prev)
+    log_plugin.initialize()
+    db_plugin.initialize()
 
-    loader = core.api.Loader()
-    if loader.open_database(db_plugin.dbfile) == False:
-        parser.error("Can not open file: " + db_plugin.dbfile)
+    loader_prev = db_plugin.get_loader_prev()
+    loader = db_plugin.get_loader()
     
     warn_plugin.verify_namespaces(loader.iterate_namespace_names())
     for each in loader.iterate_namespace_names():
@@ -73,7 +70,7 @@ def main(tool_args):
     
     # Check for versions consistency
     if db_plugin.dbfile_prev != None:
-        core.utils.check_db_metadata(loader, loader_prev)
+        mpp.utils.check_db_metadata(loader, loader_prev)
     
     paths = None
     if len(args) == 0:
@@ -87,7 +84,7 @@ def main(tool_args):
         modified_file_ids = get_list_of_modified_files(loader, loader_prev)
         
     for path in paths:
-        path = core.utils.preprocess_path(path)
+        path = mpp.utils.preprocess_path(path)
         
         for limit in warn_plugin.iterate_limits():
             logging.info("Applying limit: " + str(limit))
@@ -108,7 +105,7 @@ def main(tool_args):
                                                    sort_by=sort_by,
                                                    limit_by=limit_by)
             if selected_data == None:
-                core.utils.report_bad_path(path)
+                mpp.utils.report_bad_path(path)
                 exit_code += 1
                 continue
             
@@ -122,14 +119,14 @@ def main(tool_args):
                         diff = 0
                         is_modified = False
                     else:
-                        matcher = core.utils.FileRegionsMatcher(file_data, file_data_prev)
+                        matcher = mpp.utils.FileRegionsMatcher(file_data, file_data_prev)
                         prev_id = matcher.get_prev_id(select_data.get_region().get_id())
                         if matcher.is_matched(select_data.get_region().get_id()):
                             if matcher.is_modified(select_data.get_region().get_id()):
                                 is_modified = True
                             else:
                                 is_modified = False
-                            diff = core.api.DiffData(select_data,
+                            diff = mpp.api.DiffData(select_data,
                                                            file_data_prev.get_region(prev_id)).get_data(limit.namespace, limit.field)
 
                 if (warn_plugin.is_mode_matched(limit.limit,
@@ -211,7 +208,7 @@ def report_limit_exceeded(path, cursor, namespace, field, region_name,
                ("Change trend", '{0:{1}}'.format(trend_value, '+' if trend_value else '')),
                ("Limit", stat_limit),
                ("Suppressed", is_suppressed)]
-    core.cout.notify(path, cursor, core.cout.SEVERITY_WARNING, message, details)
+    mpp.cout.notify(path, cursor, mpp.cout.SEVERITY_WARNING, message, details)
 
     
     

+ 6 - 6
mainline/tools/test.py

@@ -22,19 +22,19 @@ import subprocess
 import os.path
 import itertools
 
-import core.log
-import core.cmdparser
+import mpp.log
+import mpp.cmdparser
 
-import core.api
-class Tool(core.api.ITool):
+import mpp.api
+class Tool(mpp.api.ITool):
     def run(self, tool_args):
         return main(tool_args)
 
 def main(tool_args):
     exit_code = 0
-    log_plugin = core.log.Plugin()
+    log_plugin = mpp.log.Plugin()
 
-    parser = core.cmdparser.MultiOptionParser(usage="Usage: %prog test [options] -- [testgroup-dir-path-1[/testsuite-file-path-1]] ... [...path-N]")
+    parser = mpp.cmdparser.MultiOptionParser(usage="Usage: %prog test [options] -- [testgroup-dir-path-1[/testsuite-file-path-1]] ... [...path-N]")
     log_plugin.declare_configuration(parser, default_value='ERROR')
     parser.add_option("-g", "--generate-golds", "--gg", action="store_true", default=False,
                          help="If the option is set (True), new gold files are generated (replacing existing) [default: %default]")

+ 18 - 18
mainline/tools/view.py

@@ -18,25 +18,25 @@
 #
 
 
-import core.log
-import core.db.post
-import core.utils
-import core.cmdparser
-import core.export.convert
+import mpp.log
+import mpp.db.post
+import mpp.utils
+import mpp.cmdparser
+import mpp.export.convert
 
-import core.utils
+import mpp.utils
 
-import core.api
-class Tool(core.api.ITool):
+import mpp.api
+class Tool(mpp.api.ITool):
     def run(self, tool_args):
         return main(tool_args)
 
 def main(tool_args):
     
-    log_plugin = core.log.Plugin()
-    db_plugin = core.db.post.Plugin()
+    log_plugin = mpp.log.Plugin()
+    db_plugin = mpp.db.post.Plugin()
 
-    parser = core.cmdparser.MultiOptionParser(usage="Usage: %prog view [options] -- [path 1] ... [path N]")
+    parser = mpp.cmdparser.MultiOptionParser(usage="Usage: %prog view [options] -- [path 1] ... [path N]")
     log_plugin.declare_configuration(parser)
     db_plugin.declare_configuration(parser)
     parser.add_option("--format", "--ft", default='xml', choices=['txt', 'xml', 'python'], help="Format of the output data. "
@@ -59,7 +59,7 @@ def main(tool_args):
 
     # Check for versions consistency
     if db_plugin.dbfile_prev != None:
-        core.utils.check_db_metadata(loader, loader_prev)
+        mpp.utils.check_db_metadata(loader, loader_prev)
     
     paths = None
     if len(args) == 0:
@@ -82,7 +82,7 @@ def export_to_str(out_format, paths, loader, loader_prev, nest_regions):
         result += "{'export': ["
 
     for (ind, path) in enumerate(paths):
-        path = core.utils.preprocess_path(path)
+        path = mpp.utils.preprocess_path(path)
         
         aggregated_data = loader.load_aggregated_data(path)
         aggregated_data_tree = {}
@@ -93,7 +93,7 @@ def export_to_str(out_format, paths, loader, loader_prev, nest_regions):
             subdirs = aggregated_data.get_subdirs()
             subfiles = aggregated_data.get_subfiles()
         else:
-            core.utils.report_bad_path(path)
+            mpp.utils.report_bad_path(path)
             exit_code += 1
         aggregated_data_prev = loader_prev.load_aggregated_data(path)
         if aggregated_data_prev != None:
@@ -114,14 +114,14 @@ def export_to_str(out_format, paths, loader, loader_prev, nest_regions):
                 "subfiles": subfiles}
 
         if out_format == 'txt':
-            result += core.export.convert.to_txt(data, root_name = "data") + "\n"
+            result += mpp.export.convert.to_txt(data, root_name = "data") + "\n"
         elif out_format == 'xml':
-            result += core.export.convert.to_xml(data, root_name = "data") + "\n"
+            result += mpp.export.convert.to_xml(data, root_name = "data") + "\n"
         elif out_format == 'python':
             postfix = ""
             if ind < len(paths) - 1:
                 postfix = ", "
-            result += core.export.convert.to_python(data, root_name = "data") + postfix
+            result += mpp.export.convert.to_python(data, root_name = "data") + postfix
 
     if out_format == 'txt':
         result += "\n"
@@ -137,7 +137,7 @@ def append_regions(file_data_tree, file_data, file_data_prev, nest_regions):
     if file_data_prev != None:
         file_data_tree = append_diff(file_data_tree,
                                      file_data_prev.get_data_tree())
-        regions_matcher = core.utils.FileRegionsMatcher(file_data, file_data_prev)
+        regions_matcher = mpp.utils.FileRegionsMatcher(file_data, file_data_prev)
     
     if nest_regions == False:
         regions = []