report.py 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246
  1. #
  2. # Metrix++, Copyright 2009-2019, Metrix++ Project
  3. # Link: https://github.com/metrixplusplus/metrixplusplus
  4. #
  5. # This file is a part of Metrix++ Tool.
  6. #
  7. import logging
  8. import re
  9. import os
  10. import pytablewriter
  11. import mpp.api
  12. import mpp.utils
  13. import mpp.cout
  14. DIGIT_COUNT = 8
  15. class Plugin(mpp.api.Plugin, mpp.api.IConfigurable, mpp.api.IRunable):
  16. def declare_configuration(self, parser):
  17. self.parser = parser
  18. parser.add_option("--output-dir", "--od", default='./metrixpp/',
  19. help="Set the output folder. [default: %default].")
  20. parser.add_option("--format", "--ft", default='txt', choices=['txt', 'md', 'html', 'rst', 'latex', 'xlsx', 'doxygen'],
  21. help="Format of the output data. "
  22. "Possible values are 'txt', 'md', 'html', 'rst', 'latex', 'xlsx' or 'doxygen' [default: %default]")
  23. def configure(self, options):
  24. self.out_dir = options.__dict__['output_dir']
  25. self.out_format = options.__dict__['format']
  26. def initialize(self):
  27. super(Plugin, self).initialize()
  28. def loadSubdirs(self, loader, path, subdirs, subfiles):
  29. aggregated_data = loader.load_aggregated_data(path)
  30. if not aggregated_data:
  31. return subdirs, subfiles
  32. for subfile in aggregated_data.get_subfiles():
  33. subfiles.append(aggregated_data.path + "/" + subfile)
  34. for subdir in aggregated_data.get_subdirs():
  35. subdir = aggregated_data.path + "/" + subdir
  36. subdirs.append(subdir)
  37. subdirs, subfiles = self.loadSubdirs(loader, subdir, subdirs, subfiles)
  38. return subdirs, subfiles
  39. def create_doxygen_report(self, paths, output_dir, overview_data, data, loader, loader_prev):
  40. exit_code = 1
  41. if output_dir:
  42. os.makedirs(output_dir, exist_ok=True)
  43. with open(os.path.join(output_dir, "metrixpp.dox"), mode="w+") as file:
  44. file.write("/* this file is autogenerated by metrix++ - changes will be overwritten */\n")
  45. file.write("/*!\n")
  46. file.write("\\page metrix_overview Metrix overview\n\n")
  47. file.write("\\section metrix_sec Metrix Warnings\n\n")
  48. file.write("Metrix Limits exceeded {} times.\n\n".format(len(overview_data["warnings"])))
  49. if len(overview_data["warnings"]) > 0:
  50. file.write("Warning list: \\ref metrix_warnings\n\n")
  51. for file_data in overview_data["matrix"]:
  52. file_data[0] = "\\ref " + file_data[0]
  53. writer = pytablewriter.MarkdownTableWriter()
  54. writer.table_name = "metrix overview"
  55. writer.headers = overview_data["fields"]
  56. writer.value_matrix = overview_data["matrix"]
  57. writer.margin = 1
  58. writer.stream = file
  59. writer.write_table()
  60. file.write("\n\n")
  61. for path in paths:
  62. file.write("\\file {}\n\n".format(path))
  63. writer = pytablewriter.MarkdownTableWriter()
  64. writer.table_name = "metrix"
  65. writer.headers = data[path]["file_fields"]
  66. writer.value_matrix = data[path]["file_matrix"]
  67. writer.margin = 1
  68. writer.stream = file
  69. writer.write_table()
  70. file.write("\n")
  71. for region in data[path]["region_matrix"]:
  72. if region[0] != "-" and region[0] != "__global__":
  73. region[0] = "\\ref " + region[0]
  74. writer = pytablewriter.MarkdownTableWriter()
  75. writer.table_name = "region metrix"
  76. writer.headers = data[path]["region_fields"]
  77. writer.value_matrix = data[path]["region_matrix"]
  78. writer.margin = 1
  79. writer.stream = file
  80. writer.write_table()
  81. file.write("\n")
  82. # add warnings as list items
  83. for warning in data[path]["warnings"]:
  84. warning_text = "Metric '" + warning.namespace + ":" + warning.field + "'"
  85. if warning.region_name and warning.region_name != "__global__":
  86. warning_text = warning_text + " for region \\ref " + warning.region_name
  87. elif warning.region_name == "__global__":
  88. warning_text = warning_text + " for region " + warning.region_name
  89. else:
  90. warning_text = warning_text + " for the file \\ref " + warning.path
  91. warning_text = warning_text + " exceeds the limit."
  92. warning_text = warning_text + " (value: {} - limit: {})".format(warning.stat_level, warning.stat_limit)
  93. file.write("\\xrefitem metrix_warnings \"Metrix Warning\" \"Metrix Warnings\" {}\n".format(warning_text))
  94. file.write("\n\n")
  95. file.write("*/\n")
  96. exit_code = 0
  97. else:
  98. logging.error("no output directory set")
  99. return exit_code
  100. def run(self, args):
  101. exit_code = 0
  102. data = {}
  103. overview_data = {}
  104. warnings = []
  105. loader_prev = self.get_plugin('mpp.dbf').get_loader_prev()
  106. loader = self.get_plugin('mpp.dbf').get_loader()
  107. limit_backend = self.get_plugin('std.tools.limit_backend')
  108. paths = None
  109. if len(args) == 0:
  110. subdirs, paths = self.loadSubdirs(loader, ".", [], [])
  111. else:
  112. paths = args
  113. for path in paths:
  114. path = mpp.utils.preprocess_path(path)
  115. data[path] = {}
  116. data[path]["file_data"] = {}
  117. data[path]["file_fields"] = ["warnings"]
  118. data[path]["file_matrix"] = [[]]
  119. data[path]["regions"] = {}
  120. data[path]["region_fields"] = ["region", "warnings"]
  121. data[path]["region_matrix"] = []
  122. data[path]["warnings"] = []
  123. file_data = loader.load_file_data(path)
  124. # get warnings from limit plugin
  125. data[path]["warnings"] = limit_backend.get_all_warnings(path)
  126. # convert paths to increase readability
  127. for warning in data[path]["warnings"]:
  128. warning.path = os.path.relpath(warning.path)
  129. # load file based data
  130. data_tree = file_data.get_data_tree()
  131. for namespace in file_data.iterate_namespaces():
  132. for field in file_data.iterate_fields(namespace):
  133. data[path]["file_data"][namespace + "." + field[0]] = field[1]
  134. data[path]["file_fields"].append(namespace + "." + field[0])
  135. for field in data[path]["file_fields"]:
  136. if field == "warnings":
  137. data[path]["file_matrix"][0].append(len(data[path]["warnings"]))
  138. else:
  139. data[path]["file_matrix"][0].append(data[path]["file_data"][field])
  140. # load region based data
  141. file_data.load_regions()
  142. for region in file_data.regions:
  143. data[path]["regions"][region.name] = {}
  144. data_tree = region.get_data_tree()
  145. for namespace in region.iterate_namespaces():
  146. for field in region.iterate_fields(namespace):
  147. data[path]["regions"][region.name][namespace + "." + field[0]] = field[1]
  148. if not (namespace + "." + field[0]) in data[path]["region_fields"]:
  149. data[path]["region_fields"].append(namespace + "." + field[0])
  150. # iterate over all found regions in the file
  151. for region in data[path]["regions"]:
  152. # add static columns with region name and warning count
  153. warning_count = sum(warning.region_name == region for warning in data[path]["warnings"])
  154. region_row = [region, str(warning_count)]
  155. # start iterating after the static fields
  156. for field in data[path]["region_fields"][2:]:
  157. if field in data[path]["regions"][region]:
  158. region_row.append(data[path]["regions"][region][field])
  159. else:
  160. region_row.append("-")
  161. data[path]["region_matrix"].append(region_row)
  162. # assemble overview table
  163. overview_data["warnings"] = []
  164. overview_data["fields"] = ["file", "warnings"]
  165. overview_data["matrix"] = []
  166. for key, value in data.items():
  167. for field in value["file_fields"]:
  168. if not field in overview_data["fields"]:
  169. overview_data["fields"].append(field)
  170. for key, value in data.items():
  171. overview_data["warnings"] = overview_data["warnings"] + value["warnings"]
  172. row = [os.path.relpath(key), len(value["warnings"])]
  173. for field in overview_data["fields"][2:]:
  174. if field in value["file_data"]:
  175. row.append(value["file_data"][field])
  176. else:
  177. row.append("-")
  178. overview_data["matrix"].append(row)
  179. if self.out_format == "doxygen":
  180. exit_code = self.create_doxygen_report(paths,
  181. self.out_dir,
  182. overview_data,
  183. data,
  184. loader,
  185. loader_prev)
  186. else:
  187. logging.error("unknown or no output format set")
  188. exit_code = 1
  189. # should default to simple text i guess
  190. return exit_code