123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623 |
- #
- # Migration test graph plotting
- #
- # Copyright (c) 2016 Red Hat, Inc.
- #
- # This library is free software; you can redistribute it and/or
- # modify it under the terms of the GNU Lesser General Public
- # License as published by the Free Software Foundation; either
- # version 2.1 of the License, or (at your option) any later version.
- #
- # This library is distributed in the hope that it will be useful,
- # but WITHOUT ANY WARRANTY; without even the implied warranty of
- # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- # Lesser General Public License for more details.
- #
- # You should have received a copy of the GNU Lesser General Public
- # License along with this library; if not, see <http://www.gnu.org/licenses/>.
- #
- import sys
- class Plot(object):
- # Generated using
- # http://tools.medialab.sciences-po.fr/iwanthue/
- COLORS = ["#CD54D0",
- "#79D94C",
- "#7470CD",
- "#D2D251",
- "#863D79",
- "#76DDA6",
- "#D4467B",
- "#61923D",
- "#CB9CCA",
- "#D98F36",
- "#8CC8DA",
- "#CE4831",
- "#5E7693",
- "#9B803F",
- "#412F4C",
- "#CECBA6",
- "#6D3229",
- "#598B73",
- "#C8827C",
- "#394427"]
- def __init__(self,
- reports,
- migration_iters,
- total_guest_cpu,
- split_guest_cpu,
- qemu_cpu,
- vcpu_cpu):
- self._reports = reports
- self._migration_iters = migration_iters
- self._total_guest_cpu = total_guest_cpu
- self._split_guest_cpu = split_guest_cpu
- self._qemu_cpu = qemu_cpu
- self._vcpu_cpu = vcpu_cpu
- self._color_idx = 0
- def _next_color(self):
- color = self.COLORS[self._color_idx]
- self._color_idx += 1
- if self._color_idx >= len(self.COLORS):
- self._color_idx = 0
- return color
- def _get_progress_label(self, progress):
- if progress:
- return "\n\n" + "\n".join(
- ["Status: %s" % progress._status,
- "Iteration: %d" % progress._ram._iterations,
- "Throttle: %02d%%" % progress._throttle_pcent,
- "Dirty rate: %dMB/s" % (progress._ram._dirty_rate_pps * 4 / 1024.0)])
- else:
- return "\n\n" + "\n".join(
- ["Status: %s" % "none",
- "Iteration: %d" % 0])
- def _find_start_time(self, report):
- startqemu = report._qemu_timings._records[0]._timestamp
- startguest = report._guest_timings._records[0]._timestamp
- if startqemu < startguest:
- return startqemu
- else:
- return stasrtguest
- def _get_guest_max_value(self, report):
- maxvalue = 0
- for record in report._guest_timings._records:
- if record._value > maxvalue:
- maxvalue = record._value
- return maxvalue
- def _get_qemu_max_value(self, report):
- maxvalue = 0
- oldvalue = None
- oldtime = None
- for record in report._qemu_timings._records:
- if oldvalue is not None:
- cpudelta = (record._value - oldvalue) / 1000.0
- timedelta = record._timestamp - oldtime
- if timedelta == 0:
- continue
- util = cpudelta / timedelta * 100.0
- else:
- util = 0
- oldvalue = record._value
- oldtime = record._timestamp
- if util > maxvalue:
- maxvalue = util
- return maxvalue
- def _get_total_guest_cpu_graph(self, report, starttime):
- xaxis = []
- yaxis = []
- labels = []
- progress_idx = -1
- for record in report._guest_timings._records:
- while ((progress_idx + 1) < len(report._progress_history) and
- report._progress_history[progress_idx + 1]._now < record._timestamp):
- progress_idx = progress_idx + 1
- if progress_idx >= 0:
- progress = report._progress_history[progress_idx]
- else:
- progress = None
- xaxis.append(record._timestamp - starttime)
- yaxis.append(record._value)
- labels.append(self._get_progress_label(progress))
- from plotly import graph_objs as go
- return go.Scatter(x=xaxis,
- y=yaxis,
- name="Guest PIDs: %s" % report._scenario._name,
- mode='lines',
- line={
- "dash": "solid",
- "color": self._next_color(),
- "shape": "linear",
- "width": 1
- },
- text=labels)
- def _get_split_guest_cpu_graphs(self, report, starttime):
- threads = {}
- for record in report._guest_timings._records:
- if record._tid in threads:
- continue
- threads[record._tid] = {
- "xaxis": [],
- "yaxis": [],
- "labels": [],
- }
- progress_idx = -1
- for record in report._guest_timings._records:
- while ((progress_idx + 1) < len(report._progress_history) and
- report._progress_history[progress_idx + 1]._now < record._timestamp):
- progress_idx = progress_idx + 1
- if progress_idx >= 0:
- progress = report._progress_history[progress_idx]
- else:
- progress = None
- threads[record._tid]["xaxis"].append(record._timestamp - starttime)
- threads[record._tid]["yaxis"].append(record._value)
- threads[record._tid]["labels"].append(self._get_progress_label(progress))
- graphs = []
- from plotly import graph_objs as go
- for tid in threads.keys():
- graphs.append(
- go.Scatter(x=threads[tid]["xaxis"],
- y=threads[tid]["yaxis"],
- name="PID %s: %s" % (tid, report._scenario._name),
- mode="lines",
- line={
- "dash": "solid",
- "color": self._next_color(),
- "shape": "linear",
- "width": 1
- },
- text=threads[tid]["labels"]))
- return graphs
- def _get_migration_iters_graph(self, report, starttime):
- xaxis = []
- yaxis = []
- labels = []
- for progress in report._progress_history:
- xaxis.append(progress._now - starttime)
- yaxis.append(0)
- labels.append(self._get_progress_label(progress))
- from plotly import graph_objs as go
- return go.Scatter(x=xaxis,
- y=yaxis,
- text=labels,
- name="Migration iterations",
- mode="markers",
- marker={
- "color": self._next_color(),
- "symbol": "star",
- "size": 5
- })
- def _get_qemu_cpu_graph(self, report, starttime):
- xaxis = []
- yaxis = []
- labels = []
- progress_idx = -1
- first = report._qemu_timings._records[0]
- abstimestamps = [first._timestamp]
- absvalues = [first._value]
- for record in report._qemu_timings._records[1:]:
- while ((progress_idx + 1) < len(report._progress_history) and
- report._progress_history[progress_idx + 1]._now < record._timestamp):
- progress_idx = progress_idx + 1
- if progress_idx >= 0:
- progress = report._progress_history[progress_idx]
- else:
- progress = None
- oldvalue = absvalues[-1]
- oldtime = abstimestamps[-1]
- cpudelta = (record._value - oldvalue) / 1000.0
- timedelta = record._timestamp - oldtime
- if timedelta == 0:
- continue
- util = cpudelta / timedelta * 100.0
- abstimestamps.append(record._timestamp)
- absvalues.append(record._value)
- xaxis.append(record._timestamp - starttime)
- yaxis.append(util)
- labels.append(self._get_progress_label(progress))
- from plotly import graph_objs as go
- return go.Scatter(x=xaxis,
- y=yaxis,
- yaxis="y2",
- name="QEMU: %s" % report._scenario._name,
- mode='lines',
- line={
- "dash": "solid",
- "color": self._next_color(),
- "shape": "linear",
- "width": 1
- },
- text=labels)
- def _get_vcpu_cpu_graphs(self, report, starttime):
- threads = {}
- for record in report._vcpu_timings._records:
- if record._tid in threads:
- continue
- threads[record._tid] = {
- "xaxis": [],
- "yaxis": [],
- "labels": [],
- "absvalue": [record._value],
- "abstime": [record._timestamp],
- }
- progress_idx = -1
- for record in report._vcpu_timings._records:
- while ((progress_idx + 1) < len(report._progress_history) and
- report._progress_history[progress_idx + 1]._now < record._timestamp):
- progress_idx = progress_idx + 1
- if progress_idx >= 0:
- progress = report._progress_history[progress_idx]
- else:
- progress = None
- oldvalue = threads[record._tid]["absvalue"][-1]
- oldtime = threads[record._tid]["abstime"][-1]
- cpudelta = (record._value - oldvalue) / 1000.0
- timedelta = record._timestamp - oldtime
- if timedelta == 0:
- continue
- util = cpudelta / timedelta * 100.0
- if util > 100:
- util = 100
- threads[record._tid]["absvalue"].append(record._value)
- threads[record._tid]["abstime"].append(record._timestamp)
- threads[record._tid]["xaxis"].append(record._timestamp - starttime)
- threads[record._tid]["yaxis"].append(util)
- threads[record._tid]["labels"].append(self._get_progress_label(progress))
- graphs = []
- from plotly import graph_objs as go
- for tid in threads.keys():
- graphs.append(
- go.Scatter(x=threads[tid]["xaxis"],
- y=threads[tid]["yaxis"],
- yaxis="y2",
- name="VCPU %s: %s" % (tid, report._scenario._name),
- mode="lines",
- line={
- "dash": "solid",
- "color": self._next_color(),
- "shape": "linear",
- "width": 1
- },
- text=threads[tid]["labels"]))
- return graphs
- def _generate_chart_report(self, report):
- graphs = []
- starttime = self._find_start_time(report)
- if self._total_guest_cpu:
- graphs.append(self._get_total_guest_cpu_graph(report, starttime))
- if self._split_guest_cpu:
- graphs.extend(self._get_split_guest_cpu_graphs(report, starttime))
- if self._qemu_cpu:
- graphs.append(self._get_qemu_cpu_graph(report, starttime))
- if self._vcpu_cpu:
- graphs.extend(self._get_vcpu_cpu_graphs(report, starttime))
- if self._migration_iters:
- graphs.append(self._get_migration_iters_graph(report, starttime))
- return graphs
- def _generate_annotation(self, starttime, progress):
- return {
- "text": progress._status,
- "x": progress._now - starttime,
- "y": 10,
- }
- def _generate_annotations(self, report):
- starttime = self._find_start_time(report)
- annotations = {}
- started = False
- for progress in report._progress_history:
- if progress._status == "setup":
- continue
- if progress._status not in annotations:
- annotations[progress._status] = self._generate_annotation(starttime, progress)
- return annotations.values()
- def _generate_chart(self):
- from plotly.offline import plot
- from plotly import graph_objs as go
- graphs = []
- yaxismax = 0
- yaxismax2 = 0
- for report in self._reports:
- graphs.extend(self._generate_chart_report(report))
- maxvalue = self._get_guest_max_value(report)
- if maxvalue > yaxismax:
- yaxismax = maxvalue
- maxvalue = self._get_qemu_max_value(report)
- if maxvalue > yaxismax2:
- yaxismax2 = maxvalue
- yaxismax += 100
- if not self._qemu_cpu:
- yaxismax2 = 110
- yaxismax2 += 10
- annotations = []
- if self._migration_iters:
- for report in self._reports:
- annotations.extend(self._generate_annotations(report))
- layout = go.Layout(title="Migration comparison",
- xaxis={
- "title": "Wallclock time (secs)",
- "showgrid": False,
- },
- yaxis={
- "title": "Memory update speed (ms/GB)",
- "showgrid": False,
- "range": [0, yaxismax],
- },
- yaxis2={
- "title": "Hostutilization (%)",
- "overlaying": "y",
- "side": "right",
- "range": [0, yaxismax2],
- "showgrid": False,
- },
- annotations=annotations)
- figure = go.Figure(data=graphs, layout=layout)
- return plot(figure,
- show_link=False,
- include_plotlyjs=False,
- output_type="div")
- def _generate_report(self):
- pieces = []
- for report in self._reports:
- pieces.append("""
- <h3>Report %s</h3>
- <table>
- """ % report._scenario._name)
- pieces.append("""
- <tr class="subhead">
- <th colspan="2">Test config</th>
- </tr>
- <tr>
- <th>Emulator:</th>
- <td>%s</td>
- </tr>
- <tr>
- <th>Kernel:</th>
- <td>%s</td>
- </tr>
- <tr>
- <th>Ramdisk:</th>
- <td>%s</td>
- </tr>
- <tr>
- <th>Transport:</th>
- <td>%s</td>
- </tr>
- <tr>
- <th>Host:</th>
- <td>%s</td>
- </tr>
- """ % (report._binary, report._kernel,
- report._initrd, report._transport, report._dst_host))
- hardware = report._hardware
- pieces.append("""
- <tr class="subhead">
- <th colspan="2">Hardware config</th>
- </tr>
- <tr>
- <th>CPUs:</th>
- <td>%d</td>
- </tr>
- <tr>
- <th>RAM:</th>
- <td>%d GB</td>
- </tr>
- <tr>
- <th>Source CPU bind:</th>
- <td>%s</td>
- </tr>
- <tr>
- <th>Source RAM bind:</th>
- <td>%s</td>
- </tr>
- <tr>
- <th>Dest CPU bind:</th>
- <td>%s</td>
- </tr>
- <tr>
- <th>Dest RAM bind:</th>
- <td>%s</td>
- </tr>
- <tr>
- <th>Preallocate RAM:</th>
- <td>%s</td>
- </tr>
- <tr>
- <th>Locked RAM:</th>
- <td>%s</td>
- </tr>
- <tr>
- <th>Huge pages:</th>
- <td>%s</td>
- </tr>
- """ % (hardware._cpus, hardware._mem,
- ",".join(hardware._src_cpu_bind),
- ",".join(hardware._src_mem_bind),
- ",".join(hardware._dst_cpu_bind),
- ",".join(hardware._dst_mem_bind),
- "yes" if hardware._prealloc_pages else "no",
- "yes" if hardware._locked_pages else "no",
- "yes" if hardware._huge_pages else "no"))
- scenario = report._scenario
- pieces.append("""
- <tr class="subhead">
- <th colspan="2">Scenario config</th>
- </tr>
- <tr>
- <th>Max downtime:</th>
- <td>%d milli-sec</td>
- </tr>
- <tr>
- <th>Max bandwidth:</th>
- <td>%d MB/sec</td>
- </tr>
- <tr>
- <th>Max iters:</th>
- <td>%d</td>
- </tr>
- <tr>
- <th>Max time:</th>
- <td>%d secs</td>
- </tr>
- <tr>
- <th>Pause:</th>
- <td>%s</td>
- </tr>
- <tr>
- <th>Pause iters:</th>
- <td>%d</td>
- </tr>
- <tr>
- <th>Post-copy:</th>
- <td>%s</td>
- </tr>
- <tr>
- <th>Post-copy iters:</th>
- <td>%d</td>
- </tr>
- <tr>
- <th>Auto-converge:</th>
- <td>%s</td>
- </tr>
- <tr>
- <th>Auto-converge iters:</th>
- <td>%d</td>
- </tr>
- <tr>
- <th>MT compression:</th>
- <td>%s</td>
- </tr>
- <tr>
- <th>MT compression threads:</th>
- <td>%d</td>
- </tr>
- <tr>
- <th>XBZRLE compression:</th>
- <td>%s</td>
- </tr>
- <tr>
- <th>XBZRLE compression cache:</th>
- <td>%d%% of RAM</td>
- </tr>
- """ % (scenario._downtime, scenario._bandwidth,
- scenario._max_iters, scenario._max_time,
- "yes" if scenario._pause else "no", scenario._pause_iters,
- "yes" if scenario._post_copy else "no", scenario._post_copy_iters,
- "yes" if scenario._auto_converge else "no", scenario._auto_converge_step,
- "yes" if scenario._compression_mt else "no", scenario._compression_mt_threads,
- "yes" if scenario._compression_xbzrle else "no", scenario._compression_xbzrle_cache))
- pieces.append("""
- </table>
- """)
- return "\n".join(pieces)
- def _generate_style(self):
- return """
- #report table tr th {
- text-align: right;
- }
- #report table tr td {
- text-align: left;
- }
- #report table tr.subhead th {
- background: rgb(192, 192, 192);
- text-align: center;
- }
- """
- def generate_html(self, fh):
- print("""<html>
- <head>
- <script type="text/javascript" src="plotly.min.js">
- </script>
- <style type="text/css">
- %s
- </style>
- <title>Migration report</title>
- </head>
- <body>
- <h1>Migration report</h1>
- <h2>Chart summary</h2>
- <div id="chart">
- """ % self._generate_style(), file=fh)
- print(self._generate_chart(), file=fh)
- print("""
- </div>
- <h2>Report details</h2>
- <div id="report">
- """, file=fh)
- print(self._generate_report(), file=fh)
- print("""
- </div>
- </body>
- </html>
- """, file=fh)
- def generate(self, filename):
- if filename is None:
- self.generate_html(sys.stdout)
- else:
- with open(filename, "w") as fh:
- self.generate_html(fh)
|