view.py 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467
  1. #
  2. # Metrix++, Copyright 2009-2013, Metrix++ Project
  3. # Link: http://metrixplusplus.sourceforge.net
  4. #
  5. # This file is a part of Metrix++ Tool.
  6. #
  7. # Metrix++ is free software: you can redistribute it and/or modify
  8. # it under the terms of the GNU General Public License as published by
  9. # the Free Software Foundation, version 3 of the License.
  10. #
  11. # Metrix++ is distributed in the hope that it will be useful,
  12. # but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. # GNU General Public License for more details.
  15. #
  16. # You should have received a copy of the GNU General Public License
  17. # along with Metrix++. If not, see <http://www.gnu.org/licenses/>.
  18. #
  19. import mpp.api
  20. import mpp.utils
  21. import mpp.cout
  22. class Plugin(mpp.api.Plugin, mpp.api.IConfigurable, mpp.api.IRunable):
  23. def declare_configuration(self, parser):
  24. parser.add_option("--format", "--ft", default='txt', choices=['txt', 'xml', 'python'],
  25. help="Format of the output data. "
  26. "Possible values are 'xml', 'txt' or 'python' [default: %default]")
  27. parser.add_option("--nest-regions", "--nr", action="store_true", default=False,
  28. help="If the option is set (True), data for regions is exported in the form of a tree. "
  29. "Otherwise, all regions are exported in plain list. [default: %default]")
  30. parser.add_option("--max-distribution-rows", "--mdr", type=int, default=20,
  31. help="Maximum number of rows in distribution tables. "
  32. "If it is set to 0, the tool does not optimize the size of distribution tables [default: %default]")
  33. def configure(self, options):
  34. self.out_format = options.__dict__['format']
  35. self.nest_regions = options.__dict__['nest_regions']
  36. self.dist_columns = options.__dict__['max_distribution_rows']
  37. def run(self, args):
  38. loader_prev = self.get_plugin_loader().get_plugin('mpp.dbf').get_loader_prev()
  39. loader = self.get_plugin_loader().get_plugin('mpp.dbf').get_loader()
  40. paths = None
  41. if len(args) == 0:
  42. paths = [""]
  43. else:
  44. paths = args
  45. (result, exit_code) = export_to_str(self.out_format,
  46. paths,
  47. loader,
  48. loader_prev,
  49. self.nest_regions,
  50. self.dist_columns)
  51. print result
  52. return exit_code
  53. def export_to_str(out_format, paths, loader, loader_prev, nest_regions, dist_columns):
  54. exit_code = 0
  55. result = ""
  56. if out_format == 'xml':
  57. result += "<export>\n"
  58. elif out_format == 'python':
  59. result += "{'export': ["
  60. for (ind, path) in enumerate(paths):
  61. path = mpp.utils.preprocess_path(path)
  62. aggregated_data = loader.load_aggregated_data(path)
  63. aggregated_data_tree = {}
  64. subdirs = []
  65. subfiles = []
  66. if aggregated_data != None:
  67. aggregated_data_tree = aggregated_data.get_data_tree()
  68. subdirs = aggregated_data.get_subdirs()
  69. subfiles = aggregated_data.get_subfiles()
  70. else:
  71. mpp.utils.report_bad_path(path)
  72. exit_code += 1
  73. aggregated_data_prev = loader_prev.load_aggregated_data(path)
  74. if aggregated_data_prev != None:
  75. aggregated_data_tree = append_diff(aggregated_data_tree,
  76. aggregated_data_prev.get_data_tree())
  77. aggregated_data_tree = append_suppressions(path, aggregated_data_tree, loader)
  78. aggregated_data_tree = compress_dist(aggregated_data_tree, dist_columns)
  79. file_data = loader.load_file_data(path)
  80. file_data_tree = {}
  81. if file_data != None:
  82. file_data_tree = file_data.get_data_tree()
  83. file_data_prev = loader_prev.load_file_data(path)
  84. append_regions(file_data_tree, file_data, file_data_prev, nest_regions)
  85. data = {"info": {"path": path, "id": ind + 1},
  86. "aggregated-data": aggregated_data_tree,
  87. "file-data": file_data_tree,
  88. "subdirs": subdirs,
  89. "subfiles": subfiles}
  90. if out_format == 'txt':
  91. cout_txt(data, loader)
  92. elif out_format == 'xml':
  93. result += mpp.utils.serialize_to_xml(data, root_name = "data") + "\n"
  94. elif out_format == 'python':
  95. postfix = ""
  96. if ind < len(paths) - 1:
  97. postfix = ", "
  98. result += mpp.utils.serialize_to_python(data, root_name = "data") + postfix
  99. if out_format == 'xml':
  100. result += "</export>"
  101. elif out_format == 'python':
  102. result += "]}"
  103. return (result, exit_code)
  104. def append_regions(file_data_tree, file_data, file_data_prev, nest_regions):
  105. regions_matcher = None
  106. if file_data_prev != None:
  107. file_data_tree = append_diff(file_data_tree,
  108. file_data_prev.get_data_tree())
  109. regions_matcher = mpp.utils.FileRegionsMatcher(file_data, file_data_prev)
  110. if nest_regions == False:
  111. regions = []
  112. for region in file_data.iterate_regions():
  113. region_data_tree = region.get_data_tree()
  114. if regions_matcher != None and regions_matcher.is_matched(region.get_id()):
  115. region_data_prev = file_data_prev.get_region(regions_matcher.get_prev_id(region.get_id()))
  116. region_data_tree = append_diff(region_data_tree,
  117. region_data_prev.get_data_tree())
  118. regions.append({"info": {"name" : region.name,
  119. 'type' : file_data.get_region_types()().to_str(region.get_type()),
  120. "cursor" : region.cursor,
  121. 'line_begin': region.line_begin,
  122. 'line_end': region.line_end,
  123. 'offset_begin': region.begin,
  124. 'offset_end': region.end},
  125. "data": region_data_tree})
  126. file_data_tree['regions'] = regions
  127. else:
  128. def append_rec(region_id, file_data_tree, file_data, file_data_prev):
  129. region = file_data.get_region(region_id)
  130. region_data_tree = region.get_data_tree()
  131. if regions_matcher != None and regions_matcher.is_matched(region.get_id()):
  132. region_data_prev = file_data_prev.get_region(regions_matcher.get_prev_id(region.get_id()))
  133. region_data_tree = append_diff(region_data_tree,
  134. region_data_prev.get_data_tree())
  135. result = {"info": {"name" : region.name,
  136. 'type' : file_data.get_region_types()().to_str(region.get_type()),
  137. "cursor" : region.cursor,
  138. 'line_begin': region.line_begin,
  139. 'line_end': region.line_end,
  140. 'offset_begin': region.begin,
  141. 'offset_end': region.end},
  142. "data": region_data_tree,
  143. "subregions": []}
  144. for sub_id in file_data.get_region(region_id).iterate_subregion_ids():
  145. result['subregions'].append(append_rec(sub_id, file_data_tree, file_data, file_data_prev))
  146. return result
  147. file_data_tree['regions'] = []
  148. file_data_tree['regions'].append(append_rec(1, file_data_tree, file_data, file_data_prev))
  149. def append_diff(main_tree, prev_tree):
  150. assert(main_tree != None)
  151. assert(prev_tree != None)
  152. for name in main_tree.keys():
  153. if name not in prev_tree.keys():
  154. continue
  155. for field in main_tree[name].keys():
  156. if field not in prev_tree[name].keys():
  157. continue
  158. if isinstance(main_tree[name][field], dict) and isinstance(prev_tree[name][field], dict):
  159. diff = {}
  160. for key in main_tree[name][field].keys():
  161. if key not in prev_tree[name][field].keys():
  162. continue
  163. main_val = main_tree[name][field][key]
  164. prev_val = prev_tree[name][field][key]
  165. if main_val == None:
  166. main_val = 0
  167. if prev_val == None:
  168. prev_val = 0
  169. if isinstance(main_val, list) and isinstance(prev_val, list):
  170. main_tree[name][field][key] = append_diff_list(main_val, prev_val)
  171. else:
  172. diff[key] = main_val - prev_val
  173. main_tree[name][field]['__diff__'] = diff
  174. elif (not isinstance(main_tree[name][field], dict)) and (not isinstance(prev_tree[name][field], dict)):
  175. if '__diff__' not in main_tree[name]:
  176. main_tree[name]['__diff__'] = {}
  177. main_tree[name]['__diff__'][field] = main_tree[name][field] - prev_tree[name][field]
  178. return main_tree
  179. def append_diff_list(main_list, prev_list):
  180. merged_list = {}
  181. for bar in main_list:
  182. merged_list[bar['metric']] = {'count': bar['count'], '__diff__':0, 'ratio': bar['ratio']}
  183. for bar in prev_list:
  184. if bar['metric'] in merged_list.keys():
  185. merged_list[bar['metric']]['__diff__'] = \
  186. merged_list[bar['metric']]['count'] - bar['count']
  187. else:
  188. merged_list[bar['metric']] = {'count': 0, '__diff__':-bar['count'], 'ratio': 0}
  189. result = []
  190. for metric in sorted(merged_list.keys()):
  191. result.append({'metric':metric,
  192. 'count':merged_list[metric]['count'],
  193. 'ratio':merged_list[metric]['ratio'],
  194. '__diff__':merged_list[metric]['__diff__']})
  195. return result
  196. def append_suppressions(path, data, loader):
  197. for namespace in data.keys():
  198. for field in data[namespace].keys():
  199. selected_data = loader.load_selected_data('std.suppress',
  200. fields = ['list'],
  201. path=path,
  202. filters = [('list', 'LIKE', '%[{0}:{1}]%'.format(namespace, field))])
  203. if selected_data == None:
  204. data[namespace][field]['sup'] = 0
  205. else:
  206. count = 0
  207. for each in selected_data:
  208. each = each # used
  209. count += 1
  210. data[namespace][field]['sup'] = count
  211. return data
  212. def compress_dist(data, columns):
  213. if columns == 0:
  214. return data
  215. for namespace in data.keys():
  216. for field in data[namespace].keys():
  217. metric_data = data[namespace][field]
  218. distr = metric_data['distribution-bars']
  219. columns = float(columns) # to trigger floating calculations
  220. if metric_data['count'] == 0:
  221. continue
  222. new_dist = []
  223. remaining_count = metric_data['count']
  224. next_consume = None
  225. next_bar = None
  226. max_count = 0
  227. min_count = 0xFFFFFFFF
  228. sum_ratio = 0
  229. for (ind, bar) in enumerate(distr):
  230. if next_bar == None:
  231. # start new bar
  232. next_bar = {'count': bar['count'],
  233. 'ratio': bar['ratio'],
  234. 'metric_s': bar['metric'],
  235. 'metric_f': bar['metric']}
  236. if '__diff__' in bar.keys():
  237. next_bar['__diff__'] = bar['__diff__']
  238. next_consume = int(round(remaining_count/ (columns - len(new_dist))))
  239. else:
  240. # merge to existing bar
  241. next_bar['count'] += bar['count']
  242. next_bar['ratio'] += bar['ratio']
  243. next_bar['metric_f'] = bar['metric']
  244. if '__diff__' in bar.keys():
  245. next_bar['__diff__'] += bar['__diff__']
  246. next_consume -= bar['count']
  247. if (next_consume <= 0 # consumed enough
  248. or (ind + 1) == len(distr)): # or the last bar
  249. # append to new distribution
  250. if isinstance(next_bar['metric_s'], float):
  251. next_bar['metric_s'] = "{0:.4f}".format(next_bar['metric_s'])
  252. next_bar['metric_f'] = "{0:.4f}".format(next_bar['metric_f'])
  253. else:
  254. next_bar['metric_s'] = str(next_bar['metric_s'])
  255. next_bar['metric_f'] = str(next_bar['metric_f'])
  256. if next_bar['metric_s'] == next_bar['metric_f']:
  257. next_bar['metric'] = next_bar['metric_s']
  258. else:
  259. next_bar['metric'] = next_bar['metric_s'] + "-" + next_bar['metric_f']
  260. del next_bar['metric_s']
  261. del next_bar['metric_f']
  262. new_dist.append(next_bar)
  263. sum_ratio += next_bar['ratio']
  264. if max_count < next_bar['count']:
  265. max_count = next_bar['count']
  266. if min_count > next_bar['count'] and next_bar['count'] != 0:
  267. min_count = next_bar['count']
  268. remaining_count -= next_bar['count']
  269. next_bar = None
  270. # check that consumed all
  271. assert((ind + 1) != len(distr) or remaining_count == 0)
  272. if remaining_count == 0:
  273. break
  274. if (float(max_count - min_count) / metric_data['count'] < 0.05 and
  275. metric_data['count'] > 1 and
  276. len(new_dist) > 1):
  277. # trick here: if all bars are even in the new distribution
  278. # it is better to do linear compression instead
  279. new_dist = []
  280. step = int(round(float(metric_data['max'] - metric_data['min']) / columns))
  281. next_end_limit = metric_data['min']
  282. next_bar = None
  283. for (ind, bar) in enumerate(distr):
  284. if next_bar == None:
  285. # start new bar
  286. next_bar = {'count': bar['count'],
  287. 'ratio': bar['ratio'],
  288. 'metric_s': next_end_limit,
  289. 'metric_f': bar['metric']}
  290. if '__diff__' in bar.keys():
  291. next_bar['__diff__'] = bar['__diff__']
  292. next_end_limit += step
  293. else:
  294. # merge to existing bar
  295. next_bar['count'] += bar['count']
  296. next_bar['ratio'] += bar['ratio']
  297. next_bar['metric_f'] = bar['metric']
  298. if '__diff__' in bar.keys():
  299. next_bar['__diff__'] += bar['__diff__']
  300. if (next_bar['metric_f'] >= next_end_limit # consumed enough
  301. or (ind + 1) == len(distr)): # or the last bar
  302. if (ind + 1) != len(distr):
  303. next_bar['metric_f'] = next_end_limit
  304. # append to new distribution
  305. if isinstance(next_bar['metric_s'], float):
  306. next_bar['metric_s'] = "{0:.4f}".format(next_bar['metric_s'])
  307. next_bar['metric_f'] = "{0:.4f}".format(next_bar['metric_f'])
  308. else:
  309. next_bar['metric_s'] = str(next_bar['metric_s'])
  310. next_bar['metric_f'] = str(next_bar['metric_f'])
  311. next_bar['metric'] = next_bar['metric_s'] + "-" + next_bar['metric_f']
  312. del next_bar['metric_s']
  313. del next_bar['metric_f']
  314. new_dist.append(next_bar)
  315. next_bar = None
  316. data[namespace][field]['distribution-bars'] = new_dist
  317. return data
  318. def cout_txt_regions(path, regions, indent = 0):
  319. for region in regions:
  320. details = [
  321. ('Region name', region['info']['name']),
  322. ('Region type', region['info']['type']),
  323. ('Offsets', str(region['info']['offset_begin']) + "-" + str(region['info']['offset_end'])),
  324. ('Line numbers', str(region['info']['line_begin']) + "-" + str(region['info']['line_end']))
  325. ]
  326. for namespace in region['data'].keys():
  327. diff_data = {}
  328. if '__diff__' in region['data'][namespace].keys():
  329. diff_data = region['data'][namespace]['__diff__']
  330. for field in region['data'][namespace].keys():
  331. diff_str = ""
  332. if field == '__diff__':
  333. continue
  334. if field in diff_data.keys():
  335. diff_str = " [" + ("+" if diff_data[field] >= 0 else "") + str(diff_data[field]) + "]"
  336. details.append((namespace + ":" + field, str(region['data'][namespace][field]) + diff_str))
  337. mpp.cout.notify(path,
  338. region['info']['cursor'],
  339. mpp.cout.SEVERITY_INFO,
  340. "Metrics per '" + region['info']['name']+ "' region",
  341. details,
  342. indent=indent)
  343. if 'subregions' in region.keys():
  344. cout_txt_regions(path, region['subregions'], indent=indent+1)
  345. def cout_txt(data, loader):
  346. details = []
  347. for key in data['file-data'].keys():
  348. if key == 'regions':
  349. cout_txt_regions(data['info']['path'], data['file-data'][key])
  350. else:
  351. namespace = key
  352. diff_data = {}
  353. if '__diff__' in data['file-data'][namespace].keys():
  354. diff_data = data['file-data'][namespace]['__diff__']
  355. for field in data['file-data'][namespace].keys():
  356. diff_str = ""
  357. if field == '__diff__':
  358. continue
  359. if field in diff_data.keys():
  360. diff_str = " [" + ("+" if diff_data[field] >= 0 else "") + str(diff_data[field]) + "]"
  361. details.append((namespace + ":" + field, str(data['file-data'][namespace][field]) + diff_str))
  362. if len(details) > 0:
  363. mpp.cout.notify(data['info']['path'],
  364. 0,
  365. mpp.cout.SEVERITY_INFO,
  366. "Metrics per file",
  367. details)
  368. attr_map = {'total': 'Total',
  369. 'avg': 'Average',
  370. 'min': 'Minimum',
  371. 'max': 'Maximum',
  372. }
  373. for namespace in data['aggregated-data'].keys():
  374. for field in data['aggregated-data'][namespace].keys():
  375. details = []
  376. diff_data = {}
  377. if '__diff__' in data['aggregated-data'][namespace][field].keys():
  378. diff_data = data['aggregated-data'][namespace][field]['__diff__']
  379. for attr in ['avg', 'min', 'max', 'total']:
  380. diff_str = ""
  381. if attr in diff_data.keys():
  382. diff_str = " [" + ("+" if diff_data[attr] >= 0 else "") + str(diff_data[attr]) + "]"
  383. if attr == 'avg' and data['aggregated-data'][namespace][field]['nonzero'] == True:
  384. diff_str += " (excluding zero metric values)"
  385. details.append((attr_map[attr], str(data['aggregated-data'][namespace][field][attr]) + diff_str))
  386. measured = data['aggregated-data'][namespace][field]['count']
  387. if 'count' in diff_data.keys():
  388. diff_str = ' [{0:{1}}]'.format(diff_data['count'], '+' if diff_data['count'] >= 0 else '')
  389. count_str_len = len(str(measured))
  390. elem_name = 'regions'
  391. if loader.get_namespace(namespace).are_regions_supported() == False:
  392. elem_name = 'files'
  393. details.append(('Distribution',
  394. '{0}{1} {2} in total (including {3} suppressed)'.format(measured,
  395. diff_str,
  396. elem_name,
  397. data['aggregated-data'][namespace][field]['sup'])))
  398. details.append((' Metric value', 'Ratio : R-sum : Number of ' + elem_name))
  399. sum_ratio = 0
  400. for bar in data['aggregated-data'][namespace][field]['distribution-bars']:
  401. sum_ratio += bar['ratio']
  402. diff_str = ""
  403. if '__diff__' in bar.keys():
  404. diff_str = ' [{0:{1}}]'.format(bar['__diff__'], '+' if bar['__diff__'] >= 0 else '')
  405. if isinstance(bar['metric'], float):
  406. metric_str = "{0:.4f}".format(bar['metric'])
  407. else:
  408. metric_str = str(bar['metric'])
  409. metric_str = (" " * (mpp.cout.DETAILS_OFFSET - len(metric_str) - 1)) + metric_str
  410. count_str = str(bar['count'])
  411. count_str = ((" " * (count_str_len - len(count_str))) + count_str + diff_str + "\t")
  412. details.append((metric_str,
  413. "{0:.3f}".format(bar['ratio']) + " : " + "{0:.3f}".format(sum_ratio) + " : " +
  414. count_str + ('|' * int(round(bar['ratio']*100)))))
  415. mpp.cout.notify(data['info']['path'],
  416. '', # no line number
  417. mpp.cout.SEVERITY_INFO,
  418. "Overall metrics for '" + namespace + ":" + field + "' metric",
  419. details)
  420. details = []
  421. for each in data['subdirs']:
  422. details.append(('Directory', each))
  423. for each in data['subfiles']:
  424. details.append(('File', each))
  425. if len(details) > 0:
  426. mpp.cout.notify(data['info']['path'],
  427. '', # no line number
  428. mpp.cout.SEVERITY_INFO,
  429. "Directory content:",
  430. details)