export.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232
  1. #
  2. # Metrix++, Copyright 2009-2013, Metrix++ Project
  3. # Link: http://metrixplusplus.sourceforge.net
  4. #
  5. # This file is a part of Metrix++ Tool.
  6. #
  7. # Metrix++ is free software: you can redistribute it and/or modify
  8. # it under the terms of the GNU General Public License as published by
  9. # the Free Software Foundation, version 3 of the License.
  10. #
  11. # Metrix++ is distributed in the hope that it will be useful,
  12. # but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. # GNU General Public License for more details.
  15. #
  16. # You should have received a copy of the GNU General Public License
  17. # along with Metrix++. If not, see <http://www.gnu.org/licenses/>.
  18. #
  19. import logging
  20. import time
  21. import re
  22. import core.log
  23. import core.db.loader
  24. import core.db.post
  25. import core.db.utils
  26. import core.cmdparser
  27. import core.export.convert
  28. def main():
  29. log_plugin = core.log.Plugin()
  30. db_plugin = core.db.post.Plugin()
  31. parser = core.cmdparser.MultiOptionParser(usage="Usage: %prog [options] -- [path 1] ... [path N]")
  32. log_plugin.declare_configuration(parser)
  33. db_plugin.declare_configuration(parser)
  34. parser.add_option("--general.format", default='xml', choices=['txt', 'xml', 'python'], help="Format of the output data. "
  35. "Possible values are 'xml', 'txt' or 'python' [default: %default]")
  36. parser.add_option("--general.namespaces", default=None, help="Allows to enumerate namespaces of interest."
  37. " If not defined all namespaces available in database file will be processed."
  38. " Separate several namespaces by comma, for example 'general,std.code.complexity'"
  39. " [default: %default]")
  40. parser.add_option("--general.nest-regions", action="store_true", default=False,
  41. help="If the option is set (True), data for regions is exported in the form of a tree. "
  42. "Otherwise, all regions are exported in plain list. [default: %default]")
  43. (options, args) = parser.parse_args()
  44. log_plugin.configure(options)
  45. db_plugin.configure(options)
  46. out_format = options.__dict__['general.format']
  47. nest_regions = options.__dict__['general.nest_regions']
  48. namespaces = None
  49. if options.__dict__['general.namespaces'] != None:
  50. namespaces = re.split(',', options.__dict__['general.namespaces'])
  51. loader_prev = core.db.loader.Loader()
  52. if db_plugin.dbfile_prev != None:
  53. loader_prev.open_database(db_plugin.dbfile_prev)
  54. loader = core.db.loader.Loader()
  55. loader.open_database(db_plugin.dbfile)
  56. # Check for versions consistency
  57. for each in loader.iterate_properties():
  58. if db_plugin.dbfile_prev != None:
  59. prev = loader_prev.get_property(each.name)
  60. if prev != each.value:
  61. logging.warn("Previous data has got different metadata:")
  62. logging.warn(" - identification of change trends can be not reliable")
  63. logging.warn(" - use 'info' tool to get more details")
  64. break
  65. paths = None
  66. if len(args) == 0:
  67. paths = [""]
  68. else:
  69. paths = args
  70. (result, exit_code) = export_to_str(out_format, paths, loader, loader_prev, namespaces, nest_regions)
  71. print result
  72. return exit_code
  73. def export_to_str(out_format, paths, loader, loader_prev, namespaces, nest_regions):
  74. exit_code = 0
  75. result = ""
  76. if out_format == 'txt':
  77. result += "=" * 80 + "\n" + "Export" + "\n" + "_" * 80 + "\n\n"
  78. elif out_format == 'xml':
  79. result += "<export>\n"
  80. elif out_format == 'python':
  81. result += "{'export': ["
  82. for (ind, path) in enumerate(paths):
  83. logging.info("Processing: " + re.sub(r'''[\\]''', "/", path))
  84. aggregated_data = loader.load_aggregated_data(path, namespaces=namespaces)
  85. aggregated_data_tree = {}
  86. subdirs = []
  87. subfiles = []
  88. if aggregated_data != None:
  89. aggregated_data_tree = aggregated_data.get_data_tree(namespaces=namespaces)
  90. subdirs = aggregated_data.get_subdirs()
  91. subfiles = aggregated_data.get_subfiles()
  92. else:
  93. logging.error("Specified path '" + path + "' is invalid (not found in the database records)")
  94. exit_code += 1
  95. aggregated_data_prev = loader_prev.load_aggregated_data(path, namespaces=namespaces)
  96. if aggregated_data_prev != None:
  97. aggregated_data_tree = append_diff(aggregated_data_tree,
  98. aggregated_data_prev.get_data_tree(namespaces=namespaces))
  99. file_data = loader.load_file_data(path)
  100. file_data_tree = {}
  101. if file_data != None:
  102. file_data_tree = file_data.get_data_tree(namespaces=namespaces)
  103. file_data_prev = loader_prev.load_file_data(path)
  104. append_regions(file_data_tree, file_data, file_data_prev, namespaces, nest_regions)
  105. data = {"info": {"path": path, "id": ind + 1},
  106. "aggregated-data": aggregated_data_tree,
  107. "file-data": file_data_tree,
  108. "subdirs": subdirs,
  109. "subfiles": subfiles}
  110. if out_format == 'txt':
  111. result += core.export.convert.to_txt(data, root_name = "data") + "\n"
  112. elif out_format == 'xml':
  113. result += core.export.convert.to_xml(data, root_name = "data") + "\n"
  114. elif out_format == 'python':
  115. postfix = ""
  116. if ind < len(paths) - 1:
  117. postfix = ", "
  118. result += core.export.convert.to_python(data, root_name = "data") + postfix
  119. if out_format == 'txt':
  120. result += "\n"
  121. elif out_format == 'xml':
  122. result += "</export>"
  123. elif out_format == 'python':
  124. result += "]}"
  125. return (result, exit_code)
  126. def append_regions(file_data_tree, file_data, file_data_prev, namespaces, nest_regions):
  127. regions_matcher = None
  128. if file_data_prev != None:
  129. file_data_tree = append_diff(file_data_tree,
  130. file_data_prev.get_data_tree(namespaces=namespaces))
  131. regions_matcher = core.db.utils.FileRegionsMatcher(file_data, file_data_prev)
  132. if nest_regions == False:
  133. regions = []
  134. for region in file_data.iterate_regions():
  135. region_data_tree = region.get_data_tree(namespaces=namespaces)
  136. if regions_matcher != None and regions_matcher.is_matched(region.get_id()):
  137. region_data_prev = file_data_prev.get_region(regions_matcher.get_prev_id(region.get_id()))
  138. region_data_tree = append_diff(region_data_tree,
  139. region_data_prev.get_data_tree(namespaces=namespaces))
  140. regions.append({"info": {"name" : region.name,
  141. 'type' : file_data.get_region_types()().to_str(region.get_type()),
  142. "cursor" : region.cursor,
  143. 'line_begin': region.line_begin,
  144. 'line_end': region.line_end,
  145. 'offset_begin': region.begin,
  146. 'offset_end': region.end},
  147. "data": region_data_tree})
  148. file_data_tree['regions'] = regions
  149. else:
  150. def append_rec(region_id, file_data_tree, file_data, file_data_prev, namespaces):
  151. region = file_data.get_region(region_id)
  152. region_data_tree = region.get_data_tree(namespaces=namespaces)
  153. if regions_matcher != None and regions_matcher.is_matched(region.get_id()):
  154. region_data_prev = file_data_prev.get_region(regions_matcher.get_prev_id(region.get_id()))
  155. region_data_tree = append_diff(region_data_tree,
  156. region_data_prev.get_data_tree(namespaces=namespaces))
  157. result = {"info": {"name" : region.name,
  158. 'type' : file_data.get_region_types()().to_str(region.get_type()),
  159. "cursor" : region.cursor,
  160. 'line_begin': region.line_begin,
  161. 'line_end': region.line_end,
  162. 'offset_begin': region.begin,
  163. 'offset_end': region.end},
  164. "data": region_data_tree,
  165. "subregions": []}
  166. for sub_id in file_data.get_region(region_id).iterate_subregion_ids():
  167. result['subregions'].append(append_rec(sub_id, file_data_tree, file_data, file_data_prev, namespaces))
  168. return result
  169. file_data_tree['regions'] = []
  170. file_data_tree['regions'].append(append_rec(1, file_data_tree, file_data, file_data_prev, namespaces))
  171. def append_diff(main_tree, prev_tree):
  172. assert(main_tree != None)
  173. assert(prev_tree != None)
  174. for name in main_tree.keys():
  175. if name not in prev_tree.keys():
  176. continue
  177. for field in main_tree[name].keys():
  178. if field not in prev_tree[name].keys():
  179. continue
  180. if isinstance(main_tree[name][field], dict) and isinstance(prev_tree[name][field], dict):
  181. diff = {}
  182. for key in main_tree[name][field].keys():
  183. if key not in prev_tree[name][field].keys():
  184. continue
  185. main_val = main_tree[name][field][key]
  186. prev_val = prev_tree[name][field][key]
  187. if main_val == None:
  188. main_val = 0
  189. if prev_val == None:
  190. prev_val = 0
  191. diff[key] = main_val - prev_val
  192. main_tree[name][field]['__diff__'] = diff
  193. elif (not isinstance(main_tree[name][field], dict)) and (not isinstance(prev_tree[name][field], dict)):
  194. if '__diff__' not in main_tree[name]:
  195. main_tree[name]['__diff__'] = {}
  196. main_tree[name]['__diff__'][field] = main_tree[name][field] - prev_tree[name][field]
  197. return main_tree
  198. if __name__ == '__main__':
  199. ts = time.time()
  200. core.log.set_default_format()
  201. exit_code = main()
  202. logging.warning("Exit code: " + str(exit_code) + ". Time spent: " + str(round((time.time() - ts), 2)) + " seconds. Done")
  203. exit(exit_code)