2
0

results_to_text.py 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126
  1. #!/usr/bin/env python3
  2. #
  3. # Simple benchmarking framework
  4. #
  5. # Copyright (c) 2019 Virtuozzo International GmbH.
  6. #
  7. # This program is free software; you can redistribute it and/or modify
  8. # it under the terms of the GNU General Public License as published by
  9. # the Free Software Foundation; either version 2 of the License, or
  10. # (at your option) any later version.
  11. #
  12. # This program is distributed in the hope that it will be useful,
  13. # but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. # GNU General Public License for more details.
  16. #
  17. # You should have received a copy of the GNU General Public License
  18. # along with this program. If not, see <http://www.gnu.org/licenses/>.
  19. #
  20. import math
  21. import tabulate
  22. # We want leading whitespace for difference row cells (see below)
  23. tabulate.PRESERVE_WHITESPACE = True
  24. def format_value(x, stdev):
  25. stdev_pr = stdev / x * 100
  26. if stdev_pr < 1.5:
  27. # don't care too much
  28. return f'{x:.2g}'
  29. else:
  30. return f'{x:.2g} ± {math.ceil(stdev_pr)}%'
  31. def result_to_text(result):
  32. """Return text representation of bench_one() returned dict."""
  33. if 'average' in result:
  34. s = format_value(result['average'], result['stdev'])
  35. if 'n-failed' in result:
  36. s += '\n({} failed)'.format(result['n-failed'])
  37. return s
  38. else:
  39. return 'FAILED'
  40. def results_dimension(results):
  41. dim = None
  42. for case in results['cases']:
  43. for env in results['envs']:
  44. res = results['tab'][case['id']][env['id']]
  45. if dim is None:
  46. dim = res['dimension']
  47. else:
  48. assert dim == res['dimension']
  49. assert dim in ('iops', 'seconds')
  50. return dim
  51. def results_to_text(results):
  52. """Return text representation of bench() returned dict."""
  53. n_columns = len(results['envs'])
  54. named_columns = n_columns > 2
  55. dim = results_dimension(results)
  56. tab = []
  57. if named_columns:
  58. # Environment columns are named A, B, ...
  59. tab.append([''] + [chr(ord('A') + i) for i in range(n_columns)])
  60. tab.append([''] + [c['id'] for c in results['envs']])
  61. for case in results['cases']:
  62. row = [case['id']]
  63. case_results = results['tab'][case['id']]
  64. for env in results['envs']:
  65. res = case_results[env['id']]
  66. row.append(result_to_text(res))
  67. tab.append(row)
  68. # Add row of difference between columns. For each column starting from
  69. # B we calculate difference with all previous columns.
  70. row = ['', ''] # case name and first column
  71. for i in range(1, n_columns):
  72. cell = ''
  73. env = results['envs'][i]
  74. res = case_results[env['id']]
  75. if 'average' not in res:
  76. # Failed result
  77. row.append(cell)
  78. continue
  79. for j in range(0, i):
  80. env_j = results['envs'][j]
  81. res_j = case_results[env_j['id']]
  82. cell += ' '
  83. if 'average' not in res_j:
  84. # Failed result
  85. cell += '--'
  86. continue
  87. col_j = tab[0][j + 1] if named_columns else ''
  88. diff_pr = round((res['average'] - res_j['average']) /
  89. res_j['average'] * 100)
  90. cell += f' {col_j}{diff_pr:+}%'
  91. row.append(cell)
  92. tab.append(row)
  93. return f'All results are in {dim}\n\n' + tabulate.tabulate(tab)
  94. if __name__ == '__main__':
  95. import sys
  96. import json
  97. if len(sys.argv) < 2:
  98. print(f'USAGE: {sys.argv[0]} results.json')
  99. exit(1)
  100. with open(sys.argv[1]) as f:
  101. print(results_to_text(json.load(f)))