瀏覽代碼

api improvement

avkonst 11 年之前
父節點
當前提交
3c692286e3

+ 560 - 22
mainline/core/api.py

@@ -18,6 +18,14 @@
 #
 
 import logging
+import os.path
+import core.db.sqlite
+
+##############################################################################
+#
+# 
+#
+##############################################################################
 
 class Data(object):
 
@@ -101,7 +109,7 @@ class LoadableData(Data):
             self.load_namespace(each)
         return Data.get_data_tree(self)
     
-class FileRegionData(LoadableData):
+class Region(LoadableData):
     
     class T(object):
         NONE      = 0x00
@@ -239,7 +247,7 @@ class FileData(LoadableData):
     def get_content(self):
         return self.content
 
-    def internal_append_region(self, region):
+    def _internal_append_region(self, region):
         # here we apply some magic - we rely on special ordering of coming regions,
         # which is supported by code parsers
         prev_id = None
@@ -259,7 +267,7 @@ class FileData(LoadableData):
         if self.regions == None:
             self.regions = []
             for each in self.loader.db.iterate_regions(self.get_id()):
-                self.internal_append_region(FileRegionData(self.loader,
+                self._internal_append_region(Region(self.loader,
                                                    self.get_id(),
                                                    each.region_id,
                                                    each.name,
@@ -276,7 +284,7 @@ class FileData(LoadableData):
         if self.regions == None:
             self.regions = [] # do not load in time of collection
         new_id = len(self.regions) + 1
-        self.internal_append_region(FileRegionData(self.loader, self.get_id(), new_id, region_name, offset_begin, offset_end, line_begin, line_end, cursor_line, group, checksum))
+        self._internal_append_region(Region(self.loader, self.get_id(), new_id, region_name, offset_begin, offset_end, line_begin, line_end, cursor_line, group, checksum))
         self.loader.db.create_region(self.file_id, new_id, region_name, offset_begin, offset_end, line_begin, line_end, cursor_line, group, checksum)
         return new_id
         
@@ -285,9 +293,9 @@ class FileData(LoadableData):
         return self.regions[region_id - 1]
     
     def get_region_types(self):
-        return FileRegionData.T
+        return Region.T
 
-    def iterate_regions(self, filter_group = FileRegionData.T.ANY):
+    def iterate_regions(self, filter_group = Region.T.ANY):
         self.load_regions()
         for each in self.regions:
             if each.group & filter_group:
@@ -455,6 +463,551 @@ class FileData(LoadableData):
     def __repr__(self):
         return Data.__repr__(self) + " and regions " + self.regions.__repr__()
 
+class AggregatedData(Data):
+    
+    def __init__(self, loader, path):
+        Data.__init__(self)
+        self.path = path
+        self.loader = loader
+        self.subdirs = None
+        self.subfiles = None
+        
+    def get_subdirs(self):
+        if self.subdirs != None:
+            return self.subdirs
+        self.subdirs = []
+        if self.path != None:
+            for subdir in self.loader.db.iterate_dircontent(self.path, include_subdirs = True, include_subfiles = False):
+                self.subdirs.append(subdir)
+        return self.subdirs
+    
+    def get_subfiles(self):
+        if self.subfiles != None:
+            return self.subfiles
+        self.subfiles = []
+        if self.path != None:
+            for subfile in self.loader.db.iterate_dircontent(self.path, include_subdirs = False, include_subfiles = True):
+                self.subfiles.append(subfile)
+        return self.subfiles
+
+
+class SelectData(Data):
+
+    def __init__(self, loader, path, file_id, region_id):
+        Data.__init__(self)
+        self.loader = loader
+        self.path = path
+        self.file_id = file_id
+        self.region_id = region_id
+        self.region = None
+    
+    def get_path(self):
+        return self.path
+    
+    def get_region(self):
+        if self.region == None and self.region_id != None:
+            row = self.loader.db.get_region(self.file_id, self.region_id)
+            if row != None:
+                self.region = Region(self.loader,
+                                             self.file_id,
+                                             self.region_id,
+                                             row.name,
+                                             row.begin,
+                                             row.end,
+                                             row.line_begin,
+                                             row.line_end,
+                                             row.cursor,
+                                             row.group,
+                                             row.checksum)
+        return self.region
+
+
+class DiffData(Data):
+    
+    def __init__(self, new_data, old_data):
+        Data.__init__(self)
+        self.new_data = new_data
+        self.old_data = old_data
+    
+    def get_data(self, namespace, field):
+        new_data = self.new_data.get_data(namespace, field)
+        old_data = self.old_data.get_data(namespace, field)
+        if new_data == None:
+            return None
+        if old_data == None:
+            # non_zero fields has got zero value by default if missed
+            # the data can be also unavailable,
+            # because previous collection does not include that
+            # but external tools (like limit.py) should warn about this,
+            # using list of registered database properties
+            old_data = 0
+        return new_data - old_data
+
+####################################
+# Packager Interface
+####################################
+
+class PackagerError(Exception):
+    def __init__(self):
+        Exception.__init__(self, "Failed to pack or unpack.")
+
+class PackagerFactory(object):
+
+    def create(self, python_type, non_zero):
+        if python_type == None:
+            return PackagerFactory.SkipPackager()
+        if python_type == int:
+            if non_zero == False:
+                return PackagerFactory.IntPackager()
+            else:
+                return PackagerFactory.IntNonZeroPackager()
+        if python_type == float and non_zero == False:
+            return PackagerFactory.FloatPackager()
+        if python_type == str:
+            return PackagerFactory.StringPackager()
+        
+        class PackagerFactoryError(Exception):
+            def __init__(self, python_type):
+                Exception.__init__(self, "Python type '" + str(python_type) + "' is not supported by the factory.")
+        raise PackagerFactoryError(python_type)
+    
+    def get_python_type(self, sql_type):
+        if sql_type == "integer":
+            return int
+        if sql_type == "real":
+            return float
+        if sql_type == "text":
+            return str
+
+        class PackagerFactoryError(Exception):
+            def __init__(self, sql_type):
+                Exception.__init__(self, "SQL type '" + str(sql_type) + "' is not supported by the factory.")
+        raise PackagerFactoryError(sql_type)
+
+    class IPackager(object):
+        def pack(self, unpacked_data):
+            raise core.api.InterfaceNotImplemented(self)
+        def unpack(self, packed_data):
+            raise core.api.InterfaceNotImplemented(self)
+        def get_sql_type(self):
+            raise core.api.InterfaceNotImplemented(self)
+        def get_python_type(self):
+            raise core.api.InterfaceNotImplemented(self)
+        def is_non_zero(self):
+            return False
+        
+    class IntPackager(IPackager):
+        def pack(self, unpacked_data):
+            if not isinstance(unpacked_data, int):
+                raise PackagerError()
+            return str(unpacked_data)
+            
+        def unpack(self, packed_data): 
+            try:
+                return int(packed_data)
+            except ValueError:
+                raise PackagerError()
+    
+        def get_sql_type(self):
+            return "integer"
+        
+        def get_python_type(self):
+            return int
+    
+    class IntNonZeroPackager(IntPackager):
+        def pack(self, unpacked_data):
+            if unpacked_data == 0:
+                raise PackagerError()
+            return PackagerFactory.IntPackager.pack(self, unpacked_data)
+        def is_non_zero(self):
+            return True
+
+    class FloatPackager(IPackager):
+        def pack(self, unpacked_data):
+            if not isinstance(unpacked_data, float):
+                raise PackagerError()
+            return str(unpacked_data)
+            
+        def unpack(self, packed_data): 
+            try:
+                return float(packed_data)
+            except ValueError:
+                raise PackagerError()
+    
+        def get_sql_type(self):
+            return "real"
+
+        def get_python_type(self):
+            return float
+
+    class FloatNonZeroPackager(FloatPackager):
+        def pack(self, unpacked_data):
+            if unpacked_data == 0:
+                raise PackagerError()
+            return PackagerFactory.FloatPackager.pack(self, unpacked_data)
+        def is_non_zero(self):
+            return True
+
+    class StringPackager(IPackager):
+        def pack(self, unpacked_data):
+            if not isinstance(unpacked_data, str):
+                raise PackagerError()
+            return str(unpacked_data)
+            
+        def unpack(self, packed_data): 
+            try:
+                return str(packed_data)
+            except ValueError:
+                raise PackagerError()
+    
+        def get_sql_type(self):
+            return "text"
+
+        def get_python_type(self):
+            return str
+    
+    class SkipPackager(IPackager):
+        def pack(self, unpacked_data):
+            return None
+            
+        def unpack(self, packed_data): 
+            return None
+    
+        def get_sql_type(self):
+            return None
+            
+        def get_python_type(self):
+            return None
+            
+####################################
+# Loader
+####################################
+
+class NamespaceError(Exception):
+    def __init__(self, namespace, reason):
+        Exception.__init__(self, "Namespace '"
+                        + namespace 
+                        + "': '"
+                        + reason
+                        + "'")
+
+class FieldError(Exception):
+    def __init__(self, field, reason):
+        Exception.__init__(self, "Field '"
+                    + field 
+                    + "': '"
+                    + reason
+                    + "'")
+
+class Namespace(object):
+    
+    def __init__(self, db_handle, name, support_regions = False, version='1.0'):
+        if not isinstance(name, str):
+            raise NamespaceError(name, "name not a string")
+        self.name = name
+        self.support_regions = support_regions
+        self.fields = {}
+        self.db = db_handle
+        
+        if self.db.check_table(name) == False:        
+            self.db.create_table(name, support_regions, version)
+        else:
+            for column in self.db.iterate_columns(name):
+                self.add_field(column.name, PackagerFactory().get_python_type(column.sql_type), non_zero=column.non_zero)
+        
+    def get_name(self):
+        return self.name
+
+    def are_regions_supported(self):
+        return self.support_regions
+    
+    def add_field(self, field_name, python_type, non_zero=False):
+        if not isinstance(field_name, str):
+            raise FieldError(field_name, "field_name not a string")
+        packager = PackagerFactory().create(python_type, non_zero)
+        if field_name in self.fields.keys():
+            raise FieldError(field_name, "double used")
+        self.fields[field_name] = packager
+        
+        if self.db.check_column(self.get_name(), field_name) == False:        
+            # - False if cloned
+            # - True if created
+            return self.db.create_column(self.name, field_name, packager.get_sql_type(), non_zero=non_zero)
+        return None # if double request
+    
+    def iterate_field_names(self):
+        for name in self.fields.keys():
+            yield name
+    
+    def get_field_packager(self, field_name):
+        if field_name in self.fields.keys():
+            return self.fields[field_name]
+        else:
+            return None
+        
+    def get_field_sql_type(self, field_name):
+        return self.get_field_packager(field_name).get_sql_type()
+
+    def get_field_python_type(self, field_name):
+        return self.get_field_packager(field_name).get_python_type()
+    
+class DataNotPackable(Exception):
+    def __init__(self, namespace, field, value, packager, extra_message):
+        Exception.__init__(self, "Data '"
+                           + str(value)
+                           + "' of type "
+                           + str(value.__class__) 
+                           + " referred by '"
+                           + namespace
+                           + "=>"
+                           + field
+                           + "' is not packable by registered packager '"
+                           + str(packager.__class__)
+                           + "': " + extra_message)
+
+class Loader(object):
+    
+    def __init__(self):
+        self.namespaces = {}
+        self.db = None
+        self.last_file_data = None # for performance boost reasons
+    
+    def create_database(self, dbfile, previous_db = None):
+        self.db = core.db.sqlite.Database()
+        try:
+            self.db.create(dbfile, clone_from=previous_db)
+        except:
+            return False
+        return True
+        
+    def open_database(self, dbfile, read_only = True):
+        self.db = core.db.sqlite.Database()
+        if os.path.exists(dbfile) == False:
+            return False
+        try:
+            self.db.connect(dbfile, read_only=read_only)
+        except:
+            return False
+        
+        for table in self.db.iterate_tables():
+            self.create_namespace(table.name, table.support_regions)
+
+        return True
+
+    def set_property(self, property_name, value):
+        if self.db == None:
+            return None
+        return self.db.set_property(property_name, value)
+    
+    def get_property(self, property_name):
+        if self.db == None:
+            return None
+        return self.db.get_property(property_name)
+
+    def iterate_properties(self):
+        if self.db == None:
+            return None
+        return self.db.iterate_properties()
+            
+    def create_namespace(self, name, support_regions = False, version='1.0'):
+        if self.db == None:
+            return None
+        
+        if name in self.namespaces.keys():
+            raise NamespaceError(name, "double used")
+        new_namespace = Namespace(self.db, name, support_regions, version)
+        self.namespaces[name] = new_namespace
+        return new_namespace
+    
+    def iterate_namespace_names(self):
+        for name in self.namespaces.keys():
+            yield name
+
+    def get_namespace(self, name):
+        if name in self.namespaces.keys():
+            return self.namespaces[name]
+        else:
+            return None
+
+    def create_file_data(self, path, checksum, content):
+        if self.db == None:
+            return None
+
+        (new_id, is_updated) = self.db.create_file(path, checksum)
+        result = FileData(self, path, new_id, checksum, content) 
+        self.last_file_data = result
+        return (result, is_updated)
+
+    def load_file_data(self, path):
+        if self.db == None:
+            return None
+
+        if self.last_file_data != None and self.last_file_data.get_path() == path:
+            return self.last_file_data
+        
+        data = self.db.get_file(path)
+        if data == None:
+            return None
+        
+        result = FileData(self, data.path, data.id, data.checksum, None)
+        self.last_file_data = result
+        return result
+
+    def save_file_data(self, file_data):
+        if self.db == None:
+            return None
+
+        class DataIterator(object):
+
+            def iterate_packed_values(self, data, namespace, support_regions = False):
+                for each in data.iterate_fields(namespace):
+                    space = self.loader.get_namespace(namespace)
+                    if space == None:
+                        raise DataNotPackable(namespace, each[0], each[1], None, "The namespace has not been found")
+                    
+                    packager = space.get_field_packager(each[0])
+                    if packager == None:
+                        raise DataNotPackable(namespace, each[0], each[1], None, "The field has not been found")
+        
+                    if space.support_regions != support_regions:
+                        raise DataNotPackable(namespace, each[0], each[1], packager, "Incompatible support for regions")
+                    
+                    try:
+                        packed_data = packager.pack(each[1])
+                        if packed_data == None:
+                            continue
+                    except PackagerError:
+                        raise DataNotPackable(namespace, each[0], each[1], packager, "Packager raised exception")
+                    
+                    yield (each[0], packed_data)
+            
+            def __init__(self, loader, data, namespace, support_regions = False):
+                self.loader = loader
+                self.iterator = self.iterate_packed_values(data, namespace, support_regions)
+    
+            def __iter__(self):
+                return self.iterator
+        
+        for namespace in file_data.iterate_namespaces():
+            if file_data.is_namespace_updated(namespace) == False:
+                continue
+            self.db.add_row(namespace,
+                            file_data.get_id(),
+                            None,
+                            DataIterator(self, file_data, namespace))
+        
+        if file_data.are_regions_loaded():
+            for region in file_data.iterate_regions():
+                for namespace in region.iterate_namespaces():
+                    if region.is_namespace_updated(namespace) == False:
+                        continue
+                    self.db.add_row(namespace,
+                                    file_data.get_id(),
+                                    region.get_id(),
+                                    DataIterator(self, region, namespace, support_regions = True))
+
+    def iterate_file_data(self, path = None, path_like_filter = "%"):
+        if self.db == None:
+            return None
+        
+        final_path_like = path_like_filter
+        if path != None:
+            if self.db.check_dir(path) == False and self.db.check_file(path) == False:
+                return None
+            final_path_like = path + path_like_filter
+
+        class FileDataIterator(object):
+            def iterate_file_data(self, loader, final_path_like):
+                for data in loader.db.iterate_files(path_like=final_path_like):
+                    yield FileData(loader, data.path, data.id, data.checksum, None)
+            
+            def __init__(self, loader, final_path_like):
+                self.iterator = self.iterate_file_data(loader, final_path_like)
+    
+            def __iter__(self):
+                return self.iterator
+
+        if self.db == None:
+            return None
+        return FileDataIterator(self, final_path_like)
+
+    def load_aggregated_data(self, path = None, path_like_filter = "%", namespaces = None):
+        if self.db == None:
+            return None
+
+        final_path_like = path_like_filter
+        if path != None:
+            if self.db.check_dir(path) == False and self.db.check_file(path) == False:
+                return None
+            final_path_like = path + path_like_filter
+        
+        if namespaces == None:
+            namespaces = self.namespaces.keys()
+        
+        result = AggregatedData(self, path)
+        for name in namespaces:
+            namespace = self.get_namespace(name)
+            data = self.db.aggregate_rows(name, path_like = final_path_like)
+            for field in data.keys():
+                if namespace.get_field_packager(field).get_python_type() == str:
+                    continue
+                if namespace.get_field_packager(field).is_non_zero() == True:
+                    data[field]['min'] = None
+                    data[field]['avg'] = None
+                distribution = self.db.count_rows(name, path_like = final_path_like, group_by_column = field)
+                data[field]['distribution-bars'] = []
+                for each in distribution:
+                    if each[0] == None:
+                        continue
+                    assert(float(data[field]['count'] != 0))
+                    data[field]['distribution-bars'].append({'metric': each[0],
+                                                             'count': each[1],
+                                                             'ratio': round((float(each[1]) / float(data[field]['count'])), 4)})
+                result.set_data(name, field, data[field])
+        return result
+    
+    def load_selected_data(self, namespace, fields = None, path = None, path_like_filter = "%", filters = [],
+                           sort_by = None, limit_by = None):
+        if self.db == None:
+            return None
+        
+        final_path_like = path_like_filter
+        if path != None:
+            if self.db.check_dir(path) == False and self.db.check_file(path) == False:
+                return None
+            final_path_like = path + path_like_filter
+        
+        namespace_obj = self.get_namespace(namespace)
+        if namespace_obj == None:
+            return None
+        
+        class SelectDataIterator(object):
+        
+            def iterate_selected_values(self, loader, namespace_obj, final_path_like, fields, filters, sort_by, limit_by):
+                for row in loader.db.select_rows(namespace_obj.get_name(), path_like=final_path_like, filters=filters,
+                                                 order_by=sort_by, limit_by=limit_by):
+                    region_id = None
+                    if namespace_obj.are_regions_supported() == True:
+                        region_id = row['region_id']
+                    data = SelectData(loader, row['path'], row['id'], region_id)
+                    field_names = fields
+                    if fields == None:
+                        field_names = namespace_obj.iterate_field_names()
+                    for field in field_names:
+                        data.set_data(namespace, field, row[field])
+                    yield data
+            
+            def __init__(self, loader, namespace_obj, final_path_like, fields, filters, sort_by, limit_by):
+                self.iterator = self.iterate_selected_values(loader, namespace_obj, final_path_like, fields, filters, sort_by, limit_by)
+    
+            def __iter__(self):
+                return self.iterator
+
+        return SelectDataIterator(self, namespace_obj, final_path_like, fields, filters, sort_by, limit_by)
+
+
+
+
 
 class BasePlugin(object):
     
@@ -539,7 +1092,7 @@ class SimpleMetricMixin(object):
     def declare_metric(self, is_active, field,
                        pattern_to_search_or_map_of_patterns,
                        marker_type_mask=Marker.T.ANY,
-                       region_type_mask=FileRegionData.T.ANY,
+                       region_type_mask=Region.T.ANY,
                        exclude_subregions=True,
                        merge_markers=False):
         if hasattr(self, '_fields') == False:
@@ -669,19 +1222,4 @@ class Parent(object):
         for child in self.children:
             yield child
 
-# TODO re-factor and remove this
-class ExitError(Exception):
-    def __init__(self, plugin, reason):
-        if plugin != None:
-            Exception.__init__(self, "Plugin '"
-                               + plugin.get_name()
-                               + "' requested abnormal termination: "
-                               + reason)
-        else:
-            Exception.__init__(self, "'Abnormal termination requested: "
-                               + reason)
-            
-
-
-
 

+ 0 - 572
mainline/core/db/loader.py

@@ -1,572 +0,0 @@
-#
-#    Metrix++, Copyright 2009-2013, Metrix++ Project
-#    Link: http://metrixplusplus.sourceforge.net
-#    
-#    This file is a part of Metrix++ Tool.
-#    
-#    Metrix++ is free software: you can redistribute it and/or modify
-#    it under the terms of the GNU General Public License as published by
-#    the Free Software Foundation, version 3 of the License.
-#    
-#    Metrix++ is distributed in the hope that it will be useful,
-#    but WITHOUT ANY WARRANTY; without even the implied warranty of
-#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-#    GNU General Public License for more details.
-#    
-#    You should have received a copy of the GNU General Public License
-#    along with Metrix++.  If not, see <http://www.gnu.org/licenses/>.
-#
-
-import logging
-import os.path
-
-import core.api
-import core.db.sqlite
-
-####################################
-# Data Interface
-####################################
-
-from core.api import Data, FileRegionData, Marker, FileData
-
-class AggregatedData(Data):
-    
-    def __init__(self, loader, path):
-        Data.__init__(self)
-        self.path = path
-        self.loader = loader
-        self.subdirs = None
-        self.subfiles = None
-        
-    def get_subdirs(self):
-        if self.subdirs != None:
-            return self.subdirs
-        self.subdirs = []
-        if self.path != None:
-            for subdir in self.loader.db.iterate_dircontent(self.path, include_subdirs = True, include_subfiles = False):
-                self.subdirs.append(subdir)
-        return self.subdirs
-    
-    def get_subfiles(self):
-        if self.subfiles != None:
-            return self.subfiles
-        self.subfiles = []
-        if self.path != None:
-            for subfile in self.loader.db.iterate_dircontent(self.path, include_subdirs = False, include_subfiles = True):
-                self.subfiles.append(subfile)
-        return self.subfiles
-
-
-class SelectData(Data):
-
-    def __init__(self, loader, path, file_id, region_id):
-        Data.__init__(self)
-        self.loader = loader
-        self.path = path
-        self.file_id = file_id
-        self.region_id = region_id
-        self.region = None
-    
-    def get_path(self):
-        return self.path
-    
-    def get_region(self):
-        if self.region == None and self.region_id != None:
-            row = self.loader.db.get_region(self.file_id, self.region_id)
-            if row != None:
-                self.region = FileRegionData(self.loader,
-                                             self.file_id,
-                                             self.region_id,
-                                             row.name,
-                                             row.begin,
-                                             row.end,
-                                             row.line_begin,
-                                             row.line_end,
-                                             row.cursor,
-                                             row.group,
-                                             row.checksum)
-        return self.region
-
-
-class DiffData(Data):
-    
-    def __init__(self, new_data, old_data):
-        Data.__init__(self)
-        self.new_data = new_data
-        self.old_data = old_data
-    
-    def get_data(self, namespace, field):
-        new_data = self.new_data.get_data(namespace, field)
-        old_data = self.old_data.get_data(namespace, field)
-        if new_data == None:
-            return None
-        if old_data == None:
-            # non_zero fields has got zero value by default if missed
-            # the data can be also unavailable,
-            # because previous collection does not include that
-            # but external tools (like limit.py) should warn about this,
-            # using list of registered database properties
-            old_data = 0
-        return new_data - old_data
-
-####################################
-# Packager Interface
-####################################
-
-class PackagerError(Exception):
-    def __init__(self):
-        Exception.__init__(self, "Failed to pack or unpack.")
-
-class PackagerFactory(object):
-
-    def create(self, python_type, non_zero):
-        if python_type == None:
-            return PackagerFactory.SkipPackager()
-        if python_type == int:
-            if non_zero == False:
-                return PackagerFactory.IntPackager()
-            else:
-                return PackagerFactory.IntNonZeroPackager()
-        if python_type == float and non_zero == False:
-            return PackagerFactory.FloatPackager()
-        if python_type == str:
-            return PackagerFactory.StringPackager()
-        
-        class PackagerFactoryError(Exception):
-            def __init__(self, python_type):
-                Exception.__init__(self, "Python type '" + str(python_type) + "' is not supported by the factory.")
-        raise PackagerFactoryError(python_type)
-    
-    def get_python_type(self, sql_type):
-        if sql_type == "integer":
-            return int
-        if sql_type == "real":
-            return float
-        if sql_type == "text":
-            return str
-
-        class PackagerFactoryError(Exception):
-            def __init__(self, sql_type):
-                Exception.__init__(self, "SQL type '" + str(sql_type) + "' is not supported by the factory.")
-        raise PackagerFactoryError(sql_type)
-
-    class IPackager(object):
-        def pack(self, unpacked_data):
-            raise core.api.InterfaceNotImplemented(self)
-        def unpack(self, packed_data):
-            raise core.api.InterfaceNotImplemented(self)
-        def get_sql_type(self):
-            raise core.api.InterfaceNotImplemented(self)
-        def get_python_type(self):
-            raise core.api.InterfaceNotImplemented(self)
-        def is_non_zero(self):
-            return False
-        
-    class IntPackager(IPackager):
-        def pack(self, unpacked_data):
-            if not isinstance(unpacked_data, int):
-                raise PackagerError()
-            return str(unpacked_data)
-            
-        def unpack(self, packed_data): 
-            try:
-                return int(packed_data)
-            except ValueError:
-                raise PackagerError()
-    
-        def get_sql_type(self):
-            return "integer"
-        
-        def get_python_type(self):
-            return int
-    
-    class IntNonZeroPackager(IntPackager):
-        def pack(self, unpacked_data):
-            if unpacked_data == 0:
-                raise PackagerError()
-            return PackagerFactory.IntPackager.pack(self, unpacked_data)
-        def is_non_zero(self):
-            return True
-
-    class FloatPackager(IPackager):
-        def pack(self, unpacked_data):
-            if not isinstance(unpacked_data, float):
-                raise PackagerError()
-            return str(unpacked_data)
-            
-        def unpack(self, packed_data): 
-            try:
-                return float(packed_data)
-            except ValueError:
-                raise PackagerError()
-    
-        def get_sql_type(self):
-            return "real"
-
-        def get_python_type(self):
-            return float
-
-    class FloatNonZeroPackager(FloatPackager):
-        def pack(self, unpacked_data):
-            if unpacked_data == 0:
-                raise PackagerError()
-            return PackagerFactory.FloatPackager.pack(self, unpacked_data)
-        def is_non_zero(self):
-            return True
-
-    class StringPackager(IPackager):
-        def pack(self, unpacked_data):
-            if not isinstance(unpacked_data, str):
-                raise PackagerError()
-            return str(unpacked_data)
-            
-        def unpack(self, packed_data): 
-            try:
-                return str(packed_data)
-            except ValueError:
-                raise PackagerError()
-    
-        def get_sql_type(self):
-            return "text"
-
-        def get_python_type(self):
-            return str
-    
-    class SkipPackager(IPackager):
-        def pack(self, unpacked_data):
-            return None
-            
-        def unpack(self, packed_data): 
-            return None
-    
-        def get_sql_type(self):
-            return None
-            
-        def get_python_type(self):
-            return None
-            
-####################################
-# Loader
-####################################
-
-class NamespaceError(Exception):
-    def __init__(self, namespace, reason):
-        Exception.__init__(self, "Namespace '"
-                        + namespace 
-                        + "': '"
-                        + reason
-                        + "'")
-
-class FieldError(Exception):
-    def __init__(self, field, reason):
-        Exception.__init__(self, "Field '"
-                    + field 
-                    + "': '"
-                    + reason
-                    + "'")
-
-class Namespace(object):
-    
-    def __init__(self, db_handle, name, support_regions = False, version='1.0'):
-        if not isinstance(name, str):
-            raise NamespaceError(name, "name not a string")
-        self.name = name
-        self.support_regions = support_regions
-        self.fields = {}
-        self.db = db_handle
-        
-        if self.db.check_table(name) == False:        
-            self.db.create_table(name, support_regions, version)
-        else:
-            for column in self.db.iterate_columns(name):
-                self.add_field(column.name, PackagerFactory().get_python_type(column.sql_type), non_zero=column.non_zero)
-        
-    def get_name(self):
-        return self.name
-
-    def are_regions_supported(self):
-        return self.support_regions
-    
-    def add_field(self, field_name, python_type, non_zero=False):
-        if not isinstance(field_name, str):
-            raise FieldError(field_name, "field_name not a string")
-        packager = PackagerFactory().create(python_type, non_zero)
-        if field_name in self.fields.keys():
-            raise FieldError(field_name, "double used")
-        self.fields[field_name] = packager
-        
-        if self.db.check_column(self.get_name(), field_name) == False:        
-            # - False if cloned
-            # - True if created
-            return self.db.create_column(self.name, field_name, packager.get_sql_type(), non_zero=non_zero)
-        return None # if double request
-    
-    def iterate_field_names(self):
-        for name in self.fields.keys():
-            yield name
-    
-    def get_field_packager(self, field_name):
-        if field_name in self.fields.keys():
-            return self.fields[field_name]
-        else:
-            return None
-        
-    def get_field_sql_type(self, field_name):
-        return self.get_field_packager(field_name).get_sql_type()
-
-    def get_field_python_type(self, field_name):
-        return self.get_field_packager(field_name).get_python_type()
-    
-class DataNotPackable(Exception):
-    def __init__(self, namespace, field, value, packager, extra_message):
-        Exception.__init__(self, "Data '"
-                           + str(value)
-                           + "' of type "
-                           + str(value.__class__) 
-                           + " referred by '"
-                           + namespace
-                           + "=>"
-                           + field
-                           + "' is not packable by registered packager '"
-                           + str(packager.__class__)
-                           + "': " + extra_message)
-
-class Loader(object):
-    
-    def __init__(self):
-        self.namespaces = {}
-        self.db = None
-        self.last_file_data = None # for performance boost reasons
-    
-    def create_database(self, dbfile, previous_db = None):
-        self.db = core.db.sqlite.Database()
-        if os.path.exists(dbfile):
-            logging.warn("Removing existing file: " + dbfile)
-            # TODO can reuse existing db file to speed up the processing?
-            # TODO add option to choose to remove or to overwrite?
-            os.unlink(dbfile)
-        if previous_db != None and os.path.exists(previous_db) == False:
-            raise core.api.ExitError(None, "Database file '" + previous_db + "'  does not exist")
-
-        self.db.create(dbfile, clone_from=previous_db)
-        
-    def open_database(self, dbfile, read_only = True):
-        self.db = core.db.sqlite.Database()
-        if os.path.exists(dbfile) == False:
-            raise core.api.ExitError(None, "Database file '" + dbfile + "'  does not exist")
-        self.db.connect(dbfile, read_only=read_only)
-        
-        for table in self.db.iterate_tables():
-            self.create_namespace(table.name, table.support_regions)
-            
-    def set_property(self, property_name, value):
-        if self.db == None:
-            return None
-        return self.db.set_property(property_name, value)
-    
-    def get_property(self, property_name):
-        if self.db == None:
-            return None
-        return self.db.get_property(property_name)
-
-    def iterate_properties(self):
-        if self.db == None:
-            return None
-        return self.db.iterate_properties()
-            
-    def create_namespace(self, name, support_regions = False, version='1.0'):
-        if self.db == None:
-            return None
-        
-        if name in self.namespaces.keys():
-            raise NamespaceError(name, "double used")
-        new_namespace = Namespace(self.db, name, support_regions, version)
-        self.namespaces[name] = new_namespace
-        return new_namespace
-    
-    def iterate_namespace_names(self):
-        for name in self.namespaces.keys():
-            yield name
-
-    def get_namespace(self, name):
-        if name in self.namespaces.keys():
-            return self.namespaces[name]
-        else:
-            return None
-
-    def create_file_data(self, path, checksum, content):
-        if self.db == None:
-            return None
-
-        (new_id, is_updated) = self.db.create_file(path, checksum)
-        result = FileData(self, path, new_id, checksum, content) 
-        self.last_file_data = result
-        return (result, is_updated)
-
-    def load_file_data(self, path):
-        if self.db == None:
-            return None
-
-        if self.last_file_data != None and self.last_file_data.get_path() == path:
-            return self.last_file_data
-        
-        data = self.db.get_file(path)
-        if data == None:
-            return None
-        
-        result = FileData(self, data.path, data.id, data.checksum, None)
-        self.last_file_data = result
-        return result
-
-    def save_file_data(self, file_data):
-        if self.db == None:
-            return None
-
-        class DataIterator(object):
-
-            def iterate_packed_values(self, data, namespace, support_regions = False):
-                for each in data.iterate_fields(namespace):
-                    space = self.loader.get_namespace(namespace)
-                    if space == None:
-                        raise DataNotPackable(namespace, each[0], each[1], None, "The namespace has not been found")
-                    
-                    packager = space.get_field_packager(each[0])
-                    if packager == None:
-                        raise DataNotPackable(namespace, each[0], each[1], None, "The field has not been found")
-        
-                    if space.support_regions != support_regions:
-                        raise DataNotPackable(namespace, each[0], each[1], packager, "Incompatible support for regions")
-                    
-                    try:
-                        packed_data = packager.pack(each[1])
-                        if packed_data == None:
-                            continue
-                    except PackagerError:
-                        raise DataNotPackable(namespace, each[0], each[1], packager, "Packager raised exception")
-                    
-                    yield (each[0], packed_data)
-            
-            def __init__(self, loader, data, namespace, support_regions = False):
-                self.loader = loader
-                self.iterator = self.iterate_packed_values(data, namespace, support_regions)
-    
-            def __iter__(self):
-                return self.iterator
-        
-        for namespace in file_data.iterate_namespaces():
-            if file_data.is_namespace_updated(namespace) == False:
-                continue
-            self.db.add_row(namespace,
-                            file_data.get_id(),
-                            None,
-                            DataIterator(self, file_data, namespace))
-        
-        if file_data.are_regions_loaded():
-            for region in file_data.iterate_regions():
-                for namespace in region.iterate_namespaces():
-                    if region.is_namespace_updated(namespace) == False:
-                        continue
-                    self.db.add_row(namespace,
-                                    file_data.get_id(),
-                                    region.get_id(),
-                                    DataIterator(self, region, namespace, support_regions = True))
-
-    def iterate_file_data(self, path = None, path_like_filter = "%"):
-        if self.db == None:
-            return None
-        
-        final_path_like = path_like_filter
-        if path != None:
-            if self.db.check_dir(path) == False and self.db.check_file(path) == False:
-                return None
-            final_path_like = path + path_like_filter
-
-        class FileDataIterator(object):
-            def iterate_file_data(self, loader, final_path_like):
-                for data in loader.db.iterate_files(path_like=final_path_like):
-                    yield FileData(loader, data.path, data.id, data.checksum, None)
-            
-            def __init__(self, loader, final_path_like):
-                self.iterator = self.iterate_file_data(loader, final_path_like)
-    
-            def __iter__(self):
-                return self.iterator
-
-        if self.db == None:
-            return None
-        return FileDataIterator(self, final_path_like)
-
-    def load_aggregated_data(self, path = None, path_like_filter = "%", namespaces = None):
-        if self.db == None:
-            return None
-
-        final_path_like = path_like_filter
-        if path != None:
-            if self.db.check_dir(path) == False and self.db.check_file(path) == False:
-                return None
-            final_path_like = path + path_like_filter
-        
-        if namespaces == None:
-            namespaces = self.namespaces.keys()
-        
-        result = AggregatedData(self, path)
-        for name in namespaces:
-            namespace = self.get_namespace(name)
-            data = self.db.aggregate_rows(name, path_like = final_path_like)
-            for field in data.keys():
-                if namespace.get_field_packager(field).get_python_type() == str:
-                    continue
-                if namespace.get_field_packager(field).is_non_zero() == True:
-                    data[field]['min'] = None
-                    data[field]['avg'] = None
-                distribution = self.db.count_rows(name, path_like = final_path_like, group_by_column = field)
-                data[field]['distribution-bars'] = []
-                for each in distribution:
-                    if each[0] == None:
-                        continue
-                    assert(float(data[field]['count'] != 0))
-                    data[field]['distribution-bars'].append({'metric': each[0],
-                                                             'count': each[1],
-                                                             'ratio': round((float(each[1]) / float(data[field]['count'])), 4)})
-                result.set_data(name, field, data[field])
-        return result
-    
-    def load_selected_data(self, namespace, fields = None, path = None, path_like_filter = "%", filters = [],
-                           sort_by = None, limit_by = None):
-        if self.db == None:
-            return None
-        
-        final_path_like = path_like_filter
-        if path != None:
-            if self.db.check_dir(path) == False and self.db.check_file(path) == False:
-                return None
-            final_path_like = path + path_like_filter
-        
-        namespace_obj = self.get_namespace(namespace)
-        if namespace_obj == None:
-            return None
-        
-        class SelectDataIterator(object):
-        
-            def iterate_selected_values(self, loader, namespace_obj, final_path_like, fields, filters, sort_by, limit_by):
-                for row in loader.db.select_rows(namespace_obj.get_name(), path_like=final_path_like, filters=filters,
-                                                 order_by=sort_by, limit_by=limit_by):
-                    region_id = None
-                    if namespace_obj.are_regions_supported() == True:
-                        region_id = row['region_id']
-                    data = SelectData(loader, row['path'], row['id'], region_id)
-                    field_names = fields
-                    if fields == None:
-                        field_names = namespace_obj.iterate_field_names()
-                    for field in field_names:
-                        data.set_data(namespace, field, row[field])
-                    yield data
-            
-            def __init__(self, loader, namespace_obj, final_path_like, fields, filters, sort_by, limit_by):
-                self.iterator = self.iterate_selected_values(loader, namespace_obj, final_path_like, fields, filters, sort_by, limit_by)
-    
-            def __iter__(self):
-                return self.iterator
-
-        return SelectDataIterator(self, namespace_obj, final_path_like, fields, filters, sort_by, limit_by)
-    

+ 38 - 9
mainline/core/db/post.py

@@ -22,6 +22,8 @@ import core.api
 import os.path
 import re
 
+import logging
+
 class Plugin(core.api.Plugin, core.api.IConfigurable):
     
     def declare_configuration(self, parser):
@@ -32,21 +34,48 @@ class Plugin(core.api.Plugin, core.api.IConfigurable):
                              " If it is set for the collector tool to perform an incremental/iterative collection,"
                              " it may reduce the processing time significantly."
                              " Post-processing tools use it in order to recognise/evaluate change trends. [default: %default].")
+        self.parser = parser
     
     def configure(self, options):
         self.dbfile = options.__dict__['db_file']
         self.dbfile_prev = options.__dict__['db_file_prev']
         
-    def initialize(self):
-        
-        self.get_plugin_loader().get_database_loader().create_database(self.dbfile, previous_db = self.dbfile_prev)    
-        
-        # do not process files dumped by this module
-        self.get_plugin_loader().get_plugin('core.dir').add_exclude_rule(re.compile(r'^' + os.path.basename(self.dbfile) + r'$'))
-        if self.dbfile_prev != None:
-            self.get_plugin_loader().get_plugin('core.dir').add_exclude_rule(re.compile(r'^' + os.path.basename(self.dbfile_prev) + r'$'))
+        if self.dbfile_prev != None and os.path.exists(self.dbfile_prev) == False:
+            self.parser.error("File does not exist:" + self.dbfile_prev)
+
         
+    def initialize(self):
         
+        if self.get_plugin_loader() != None:
+            if os.path.exists(self.dbfile):
+                logging.warn("Removing existing file: " + self.dbfile)
+                # TODO can reuse existing db file to speed up the processing?
+                # TODO add option to choose to remove or to overwrite?
+                try:
+                    os.unlink(self.dbfile)
+                except:
+                    logging.warn("Failure in removing file: " + self.dbfile)
+    
+            created = self.get_plugin_loader().get_database_loader().create_database(self.dbfile, previous_db = self.dbfile_prev)
+            if created == False:
+                self.parser.error("Failure in creating file: " + self.dbfile)
+            
+            # do not process files dumped by this module
+            self.get_plugin_loader().get_plugin('core.dir').add_exclude_rule(re.compile(r'^' + os.path.basename(self.dbfile) + r'$'))
+            if self.dbfile_prev != None:
+                self.get_plugin_loader().get_plugin('core.dir').add_exclude_rule(re.compile(r'^' + os.path.basename(self.dbfile_prev) + r'$'))
+
+        else:
+            self.loader_prev = core.api.Loader()
+            if self.dbfile_prev != None:
+                if self.loader_prev.open_database(self.dbfile_prev) == False:
+                    self.parser.error("Can not open file: " + self.dbfile_prev)
+            self.loader = core.api.Loader()
+            if self.loader.open_database(self.dbfile) == False:
+                self.parser.error("Can not open file: " + self.dbfile)
 
+    def get_loader(self):
+        return self.loader
 
-    
+    def get_loader_prev(self):
+        return self.loader_prev

+ 1 - 2
mainline/core/loader.py

@@ -18,7 +18,6 @@
 #
 
 import core.api
-import core.db.loader
 
 import os
 import fnmatch
@@ -29,7 +28,7 @@ class Loader(object):
         self.plugins = []
         self.parsers = []
         self.hash    = {}
-        self.db = core.db.loader.Loader()
+        self.db = core.api.Loader()
         
     def get_database_loader(self):
         return self.db

+ 5 - 2
mainline/core/log.py

@@ -21,7 +21,7 @@ import core.api
 import logging
 import os
 
-class Plugin(core.api.Plugin, core.api.IConfigurable):
+class Plugin(core.api.BasePlugin, core.api.IConfigurable):
     
     def declare_configuration(self, parser, default_value='INFO'):
         allowed_values = ['DEBUG','INFO','WARNING','ERROR']
@@ -50,6 +50,9 @@ class Plugin(core.api.Plugin, core.api.IConfigurable):
         os.environ['METRIXPLUSPLUS_LOG_LEVEL'] = options.__dict__['log_level']
         logging.warn("Logging enabled with " + options.__dict__['log_level'] + " level")
 
+    def initialize(self):
+        super(Plugin, self).initialize()
+        set_default_format()
 
 def set_default_format():
-    logging.basicConfig(format="[LOG]: %(levelname)s:\t%(message)s", level=logging.WARN)
+    logging.basicConfig(format="[LOG]: %(levelname)s:\t%(message)s", level=logging.WARN)

+ 1 - 1
mainline/core/warn.py

@@ -44,7 +44,7 @@ class Plugin(core.api.Plugin, core.api.IConfigurable):
                           "which have got metric value less than the specified limit. "
                           "This option can be specified multiple times, if it is necessary to apply several limits. "
                           "Should be in the format: <namespace>:<field>:<limit-value>, for example: "
-                          "'std.code.complexity:cyclomatic:7'.") # TODO think about better example
+                          "'std.code.lines:comments:1'.")
         parser.add_option("--max-limit", "--max", action="multiopt",
                           help="A threshold per 'namespace:field' metric in order to select regions, "
                           "which have got metric value more than the specified limit. "

+ 1 - 1
mainline/ext/std/code/complexity.py

@@ -48,7 +48,7 @@ class Plugin(core.api.Plugin, core.api.SimpleMetricMixin, core.api.Child, core.a
                                 'java': self.pattern_java
                             },
                             marker_type_mask=core.api.Marker.T.CODE,
-                            region_type_mask=core.api.FileRegionData.T.FUNCTION)
+                            region_type_mask=core.api.Region.T.FUNCTION)
         
         super(Plugin, self).initialize(fields=self.get_fields())
         

+ 1 - 1
mainline/tests/general/test_basic/test_help_limit_default_stdout.gold.txt

@@ -30,7 +30,7 @@ Options:
                         multiple times, if it is necessary to apply several
                         limits. Should be in the format: <namespace>:<field
                         >:<limit-value>, for example:
-                        'std.code.complexity:cyclomatic:7'.
+                        'std.code.lines:comments:1'.
   --max-limit=MAX_LIMIT, --max=MAX_LIMIT
                         A threshold per 'namespace:field' metric in order to
                         select regions, which have got metric value more than

+ 4 - 4
mainline/tools/debug.py

@@ -21,14 +21,13 @@
 import logging
 import cgi
 
+import core.api
 import core.log
 import core.cmdparser
 import core.db.post
-import core.db.loader
 
 import tools.utils
 
-import core.api
 class Tool(core.api.ITool):
     def run(self, tool_args):
         return main(tool_args)
@@ -47,8 +46,9 @@ def main(tool_args):
     log_plugin.configure(options)
     db_plugin.configure(options)
 
-    loader = core.db.loader.Loader()
-    loader.open_database(db_plugin.dbfile)
+    loader = core.api.Loader()
+    if loader.open_database(db_plugin.dbfile) == False:
+        parser.error("Can not open file: " + db_plugin.dbfile)
 
     if options.__dict__['mode'] == 'dumphtml':
         return dumphtml(args, loader)

+ 8 - 6
mainline/tools/export.py

@@ -22,14 +22,13 @@
 import logging
 import csv
 
+import core.api
 import core.log
-import core.db.loader
 import core.db.post
 import core.cmdparser
 
 import tools.utils
 
-import core.api
 class Tool(core.api.ITool):
     def run(self, tool_args):
         return main(tool_args)
@@ -50,12 +49,15 @@ def main(tool_args):
     db_plugin.configure(options)
     out_format = options.__dict__['format']
 
-    loader_prev = core.db.loader.Loader()
+    loader_prev = core.api.Loader()
     if db_plugin.dbfile_prev != None:
-        loader_prev.open_database(db_plugin.dbfile_prev)
+        if loader_prev.open_database(db_plugin.dbfile_prev) == False:
+            parser.error("Can not open file: " + db_plugin.dbfile_prev)
+
 
-    loader = core.db.loader.Loader()
-    loader.open_database(db_plugin.dbfile)
+    loader = core.api.Loader()
+    if loader.open_database(db_plugin.dbfile) == False:
+        parser.error("Can not open file: " + db_plugin.dbfile)
     
     # Check for versions consistency
     for each in loader.iterate_properties():

+ 8 - 6
mainline/tools/info.py

@@ -18,14 +18,13 @@
 #
 
 
-import core.db.loader
+import core.api
 import core.db.post
 import core.log
 import core.cmdparser
 
 import tools.utils
 
-import core.api
 class Tool(core.api.ITool):
     def run(self, tool_args):
         return main(tool_args)
@@ -43,12 +42,15 @@ def main(tool_args):
     log_plugin.configure(options)
     db_plugin.configure(options)
     
-    loader = core.db.loader.Loader()
-    loader.open_database(db_plugin.dbfile)
+    loader = core.api.Loader()
+    if loader.open_database(db_plugin.dbfile) == False:
+        parser.error("Can not open file: " + db_plugin.dbfile)
+
     loader_prev = None
     if db_plugin.dbfile_prev != None:
-        loader_prev = core.db.loader.Loader()
-        loader_prev.open_database(db_plugin.dbfile_prev)
+        loader_prev = core.api.Loader()
+        if loader_prev.open_database(db_plugin.dbfile_prev) == False:
+            parser.error("Can not open file: " + db_plugin.dbfile_prev)
 
     print "Properties:"
     for each in loader.iterate_properties():

+ 7 - 6
mainline/tools/limit.py

@@ -20,7 +20,6 @@
 import logging
 
 import core.log
-import core.db.loader
 import core.db.post
 import core.db.utils
 import core.cout
@@ -59,12 +58,14 @@ def main(tool_args):
     hotspots = options.__dict__['hotspots']
     no_suppress = options.__dict__['disable_suppressions']
 
-    loader_prev = core.db.loader.Loader()
+    loader_prev = core.api.Loader()
     if db_plugin.dbfile_prev != None:
-        loader_prev.open_database(db_plugin.dbfile_prev)
+        if loader_prev.open_database(db_plugin.dbfile_prev) == False:
+            parser.error("Can not open file: " + db_plugin.dbfile_prev)
 
-    loader = core.db.loader.Loader()
-    loader.open_database(db_plugin.dbfile)
+    loader = core.api.Loader()
+    if loader.open_database(db_plugin.dbfile) == False:
+        parser.error("Can not open file: " + db_plugin.dbfile)
     
     warn_plugin.verify_namespaces(loader.iterate_namespace_names())
     for each in loader.iterate_namespace_names():
@@ -128,7 +129,7 @@ def main(tool_args):
                                 is_modified = True
                             else:
                                 is_modified = False
-                            diff = core.db.loader.DiffData(select_data,
+                            diff = core.api.DiffData(select_data,
                                                            file_data_prev.get_region(prev_id)).get_data(limit.namespace, limit.field)
 
                 if (warn_plugin.is_mode_matched(limit.limit,

+ 7 - 6
mainline/tools/view.py

@@ -19,7 +19,6 @@
 
 
 import core.log
-import core.db.loader
 import core.db.post
 import core.db.utils
 import core.cmdparser
@@ -52,13 +51,15 @@ def main(tool_args):
     out_format = options.__dict__['format']
     nest_regions = options.__dict__['nest_regions']
 
-    loader_prev = core.db.loader.Loader()
+    loader_prev = core.api.Loader()
     if db_plugin.dbfile_prev != None:
-        loader_prev.open_database(db_plugin.dbfile_prev)
+        if loader_prev.open_database(db_plugin.dbfile_prev) == False:
+            parser.error("Can not open file: " + db_plugin.dbfile_prev)
+
+    loader = core.api.Loader()
+    if loader.open_database(db_plugin.dbfile) == False:
+        parser.error("Can not open file: " + db_plugin.dbfile)
 
-    loader = core.db.loader.Loader()
-    loader.open_database(db_plugin.dbfile)
-    
     # Check for versions consistency
     if db_plugin.dbfile_prev != None:
         tools.utils.check_db_metadata(loader, loader_prev)