2
0

riscv-iommu-hpm.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381
  1. /*
  2. * RISC-V IOMMU - Hardware Performance Monitor (HPM) helpers
  3. *
  4. * Copyright (C) 2022-2023 Rivos Inc.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms and conditions of the GNU General Public License,
  8. * version 2 or later, as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License along
  16. * with this program; if not, see <http://www.gnu.org/licenses/>.
  17. */
  18. #include "qemu/osdep.h"
  19. #include "qemu/timer.h"
  20. #include "cpu_bits.h"
  21. #include "riscv-iommu-hpm.h"
  22. #include "riscv-iommu.h"
  23. #include "riscv-iommu-bits.h"
  24. #include "trace.h"
  25. /* For now we assume IOMMU HPM frequency to be 1GHz so 1-cycle is of 1-ns. */
  26. static inline uint64_t get_cycles(void)
  27. {
  28. return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
  29. }
  30. uint64_t riscv_iommu_hpmcycle_read(RISCVIOMMUState *s)
  31. {
  32. const uint64_t cycle = riscv_iommu_reg_get64(
  33. s, RISCV_IOMMU_REG_IOHPMCYCLES);
  34. const uint32_t inhibit = riscv_iommu_reg_get32(
  35. s, RISCV_IOMMU_REG_IOCOUNTINH);
  36. const uint64_t ctr_prev = s->hpmcycle_prev;
  37. const uint64_t ctr_val = s->hpmcycle_val;
  38. trace_riscv_iommu_hpm_read(cycle, inhibit, ctr_prev, ctr_val);
  39. if (get_field(inhibit, RISCV_IOMMU_IOCOUNTINH_CY)) {
  40. /*
  41. * Counter should not increment if inhibit bit is set. We can't really
  42. * stop the QEMU_CLOCK_VIRTUAL, so we just return the last updated
  43. * counter value to indicate that counter was not incremented.
  44. */
  45. return (ctr_val & RISCV_IOMMU_IOHPMCYCLES_COUNTER) |
  46. (cycle & RISCV_IOMMU_IOHPMCYCLES_OVF);
  47. }
  48. return (ctr_val + get_cycles() - ctr_prev) |
  49. (cycle & RISCV_IOMMU_IOHPMCYCLES_OVF);
  50. }
  51. static void hpm_incr_ctr(RISCVIOMMUState *s, uint32_t ctr_idx)
  52. {
  53. const uint32_t off = ctr_idx << 3;
  54. uint64_t cntr_val;
  55. cntr_val = ldq_le_p(&s->regs_rw[RISCV_IOMMU_REG_IOHPMCTR_BASE + off]);
  56. stq_le_p(&s->regs_rw[RISCV_IOMMU_REG_IOHPMCTR_BASE + off], cntr_val + 1);
  57. trace_riscv_iommu_hpm_incr_ctr(cntr_val);
  58. /* Handle the overflow scenario. */
  59. if (cntr_val == UINT64_MAX) {
  60. /*
  61. * Generate interrupt only if OF bit is clear. +1 to offset the cycle
  62. * register OF bit.
  63. */
  64. const uint32_t ovf =
  65. riscv_iommu_reg_mod32(s, RISCV_IOMMU_REG_IOCOUNTOVF,
  66. BIT(ctr_idx + 1), 0);
  67. if (!get_field(ovf, BIT(ctr_idx + 1))) {
  68. riscv_iommu_reg_mod64(s,
  69. RISCV_IOMMU_REG_IOHPMEVT_BASE + off,
  70. RISCV_IOMMU_IOHPMEVT_OF,
  71. 0);
  72. riscv_iommu_notify(s, RISCV_IOMMU_INTR_PM);
  73. }
  74. }
  75. }
  76. void riscv_iommu_hpm_incr_ctr(RISCVIOMMUState *s, RISCVIOMMUContext *ctx,
  77. unsigned event_id)
  78. {
  79. const uint32_t inhibit = riscv_iommu_reg_get32(
  80. s, RISCV_IOMMU_REG_IOCOUNTINH);
  81. uint32_t did_gscid;
  82. uint32_t pid_pscid;
  83. uint32_t ctr_idx;
  84. gpointer value;
  85. uint32_t ctrs;
  86. uint64_t evt;
  87. if (!(s->cap & RISCV_IOMMU_CAP_HPM)) {
  88. return;
  89. }
  90. value = g_hash_table_lookup(s->hpm_event_ctr_map,
  91. GUINT_TO_POINTER(event_id));
  92. if (value == NULL) {
  93. return;
  94. }
  95. for (ctrs = GPOINTER_TO_UINT(value); ctrs != 0; ctrs &= ctrs - 1) {
  96. ctr_idx = ctz32(ctrs);
  97. if (get_field(inhibit, BIT(ctr_idx + 1))) {
  98. continue;
  99. }
  100. evt = riscv_iommu_reg_get64(s,
  101. RISCV_IOMMU_REG_IOHPMEVT_BASE + (ctr_idx << 3));
  102. /*
  103. * It's quite possible that event ID has been changed in counter
  104. * but hashtable hasn't been updated yet. We don't want to increment
  105. * counter for the old event ID.
  106. */
  107. if (event_id != get_field(evt, RISCV_IOMMU_IOHPMEVT_EVENT_ID)) {
  108. continue;
  109. }
  110. if (get_field(evt, RISCV_IOMMU_IOHPMEVT_IDT)) {
  111. did_gscid = get_field(ctx->gatp, RISCV_IOMMU_DC_IOHGATP_GSCID);
  112. pid_pscid = get_field(ctx->ta, RISCV_IOMMU_DC_TA_PSCID);
  113. } else {
  114. did_gscid = ctx->devid;
  115. pid_pscid = ctx->process_id;
  116. }
  117. if (get_field(evt, RISCV_IOMMU_IOHPMEVT_PV_PSCV)) {
  118. /*
  119. * If the transaction does not have a valid process_id, counter
  120. * increments if device_id matches DID_GSCID. If the transaction
  121. * has a valid process_id, counter increments if device_id
  122. * matches DID_GSCID and process_id matches PID_PSCID. See
  123. * IOMMU Specification, Chapter 5.23. Performance-monitoring
  124. * event selector.
  125. */
  126. if (ctx->process_id &&
  127. get_field(evt, RISCV_IOMMU_IOHPMEVT_PID_PSCID) != pid_pscid) {
  128. continue;
  129. }
  130. }
  131. if (get_field(evt, RISCV_IOMMU_IOHPMEVT_DV_GSCV)) {
  132. uint32_t mask = ~0;
  133. if (get_field(evt, RISCV_IOMMU_IOHPMEVT_DMASK)) {
  134. /*
  135. * 1001 1011 mask = GSCID
  136. * 0000 0111 mask = mask ^ (mask + 1)
  137. * 1111 1000 mask = ~mask;
  138. */
  139. mask = get_field(evt, RISCV_IOMMU_IOHPMEVT_DID_GSCID);
  140. mask = mask ^ (mask + 1);
  141. mask = ~mask;
  142. }
  143. if ((get_field(evt, RISCV_IOMMU_IOHPMEVT_DID_GSCID) & mask) !=
  144. (did_gscid & mask)) {
  145. continue;
  146. }
  147. }
  148. hpm_incr_ctr(s, ctr_idx);
  149. }
  150. }
  151. /* Timer callback for cycle counter overflow. */
  152. void riscv_iommu_hpm_timer_cb(void *priv)
  153. {
  154. RISCVIOMMUState *s = priv;
  155. const uint32_t inhibit = riscv_iommu_reg_get32(
  156. s, RISCV_IOMMU_REG_IOCOUNTINH);
  157. uint32_t ovf;
  158. if (get_field(inhibit, RISCV_IOMMU_IOCOUNTINH_CY)) {
  159. return;
  160. }
  161. if (s->irq_overflow_left > 0) {
  162. uint64_t irq_trigger_at =
  163. qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + s->irq_overflow_left;
  164. timer_mod_anticipate_ns(s->hpm_timer, irq_trigger_at);
  165. s->irq_overflow_left = 0;
  166. return;
  167. }
  168. ovf = riscv_iommu_reg_get32(s, RISCV_IOMMU_REG_IOCOUNTOVF);
  169. if (!get_field(ovf, RISCV_IOMMU_IOCOUNTOVF_CY)) {
  170. /*
  171. * We don't need to set hpmcycle_val to zero and update hpmcycle_prev to
  172. * current clock value. The way we calculate iohpmcycs will overflow
  173. * and return the correct value. This avoids the need to synchronize
  174. * timer callback and write callback.
  175. */
  176. riscv_iommu_reg_mod32(s, RISCV_IOMMU_REG_IOCOUNTOVF,
  177. RISCV_IOMMU_IOCOUNTOVF_CY, 0);
  178. riscv_iommu_reg_mod64(s, RISCV_IOMMU_REG_IOHPMCYCLES,
  179. RISCV_IOMMU_IOHPMCYCLES_OVF, 0);
  180. riscv_iommu_notify(s, RISCV_IOMMU_INTR_PM);
  181. }
  182. }
  183. static void hpm_setup_timer(RISCVIOMMUState *s, uint64_t value)
  184. {
  185. const uint32_t inhibit = riscv_iommu_reg_get32(
  186. s, RISCV_IOMMU_REG_IOCOUNTINH);
  187. uint64_t overflow_at, overflow_ns;
  188. if (get_field(inhibit, RISCV_IOMMU_IOCOUNTINH_CY)) {
  189. return;
  190. }
  191. /*
  192. * We are using INT64_MAX here instead to UINT64_MAX because cycle counter
  193. * has 63-bit precision and INT64_MAX is the maximum it can store.
  194. */
  195. if (value) {
  196. overflow_ns = INT64_MAX - value + 1;
  197. } else {
  198. overflow_ns = INT64_MAX;
  199. }
  200. overflow_at = (uint64_t)qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + overflow_ns;
  201. if (overflow_at > INT64_MAX) {
  202. s->irq_overflow_left = overflow_at - INT64_MAX;
  203. overflow_at = INT64_MAX;
  204. }
  205. timer_mod_anticipate_ns(s->hpm_timer, overflow_at);
  206. }
  207. /* Updates the internal cycle counter state when iocntinh:CY is changed. */
  208. void riscv_iommu_process_iocntinh_cy(RISCVIOMMUState *s, bool prev_cy_inh)
  209. {
  210. const uint32_t inhibit = riscv_iommu_reg_get32(
  211. s, RISCV_IOMMU_REG_IOCOUNTINH);
  212. /* We only need to process CY bit toggle. */
  213. if (!(inhibit ^ prev_cy_inh)) {
  214. return;
  215. }
  216. trace_riscv_iommu_hpm_iocntinh_cy(prev_cy_inh);
  217. if (!(inhibit & RISCV_IOMMU_IOCOUNTINH_CY)) {
  218. /*
  219. * Cycle counter is enabled. Just start the timer again and update
  220. * the clock snapshot value to point to the current time to make
  221. * sure iohpmcycles read is correct.
  222. */
  223. s->hpmcycle_prev = get_cycles();
  224. hpm_setup_timer(s, s->hpmcycle_val);
  225. } else {
  226. /*
  227. * Cycle counter is disabled. Stop the timer and update the cycle
  228. * counter to record the current value which is last programmed
  229. * value + the cycles passed so far.
  230. */
  231. s->hpmcycle_val = s->hpmcycle_val + (get_cycles() - s->hpmcycle_prev);
  232. timer_del(s->hpm_timer);
  233. }
  234. }
  235. void riscv_iommu_process_hpmcycle_write(RISCVIOMMUState *s)
  236. {
  237. const uint64_t val = riscv_iommu_reg_get64(s, RISCV_IOMMU_REG_IOHPMCYCLES);
  238. const uint32_t ovf = riscv_iommu_reg_get32(s, RISCV_IOMMU_REG_IOCOUNTOVF);
  239. trace_riscv_iommu_hpm_cycle_write(ovf, val);
  240. /*
  241. * Clear OF bit in IOCNTOVF if it's being cleared in IOHPMCYCLES register.
  242. */
  243. if (get_field(ovf, RISCV_IOMMU_IOCOUNTOVF_CY) &&
  244. !get_field(val, RISCV_IOMMU_IOHPMCYCLES_OVF)) {
  245. riscv_iommu_reg_mod32(s, RISCV_IOMMU_REG_IOCOUNTOVF, 0,
  246. RISCV_IOMMU_IOCOUNTOVF_CY);
  247. }
  248. s->hpmcycle_val = val & ~RISCV_IOMMU_IOHPMCYCLES_OVF;
  249. s->hpmcycle_prev = get_cycles();
  250. hpm_setup_timer(s, s->hpmcycle_val);
  251. }
  252. static inline bool check_valid_event_id(unsigned event_id)
  253. {
  254. return event_id > RISCV_IOMMU_HPMEVENT_INVALID &&
  255. event_id < RISCV_IOMMU_HPMEVENT_MAX;
  256. }
  257. static gboolean hpm_event_equal(gpointer key, gpointer value, gpointer udata)
  258. {
  259. uint32_t *pair = udata;
  260. if (GPOINTER_TO_UINT(value) & (1 << pair[0])) {
  261. pair[1] = GPOINTER_TO_UINT(key);
  262. return true;
  263. }
  264. return false;
  265. }
  266. /* Caller must check ctr_idx against hpm_ctrs to see if its supported or not. */
  267. static void update_event_map(RISCVIOMMUState *s, uint64_t value,
  268. uint32_t ctr_idx)
  269. {
  270. unsigned event_id = get_field(value, RISCV_IOMMU_IOHPMEVT_EVENT_ID);
  271. uint32_t pair[2] = { ctr_idx, RISCV_IOMMU_HPMEVENT_INVALID };
  272. uint32_t new_value = 1 << ctr_idx;
  273. gpointer data;
  274. /*
  275. * If EventID field is RISCV_IOMMU_HPMEVENT_INVALID
  276. * remove the current mapping.
  277. */
  278. if (event_id == RISCV_IOMMU_HPMEVENT_INVALID) {
  279. data = g_hash_table_find(s->hpm_event_ctr_map, hpm_event_equal, pair);
  280. new_value = GPOINTER_TO_UINT(data) & ~(new_value);
  281. if (new_value != 0) {
  282. g_hash_table_replace(s->hpm_event_ctr_map,
  283. GUINT_TO_POINTER(pair[1]),
  284. GUINT_TO_POINTER(new_value));
  285. } else {
  286. g_hash_table_remove(s->hpm_event_ctr_map,
  287. GUINT_TO_POINTER(pair[1]));
  288. }
  289. return;
  290. }
  291. /* Update the counter mask if the event is already enabled. */
  292. if (g_hash_table_lookup_extended(s->hpm_event_ctr_map,
  293. GUINT_TO_POINTER(event_id),
  294. NULL,
  295. &data)) {
  296. new_value |= GPOINTER_TO_UINT(data);
  297. }
  298. g_hash_table_insert(s->hpm_event_ctr_map,
  299. GUINT_TO_POINTER(event_id),
  300. GUINT_TO_POINTER(new_value));
  301. }
  302. void riscv_iommu_process_hpmevt_write(RISCVIOMMUState *s, uint32_t evt_reg)
  303. {
  304. const uint32_t ctr_idx = (evt_reg - RISCV_IOMMU_REG_IOHPMEVT_BASE) >> 3;
  305. const uint32_t ovf = riscv_iommu_reg_get32(s, RISCV_IOMMU_REG_IOCOUNTOVF);
  306. uint64_t val = riscv_iommu_reg_get64(s, evt_reg);
  307. if (ctr_idx >= s->hpm_cntrs) {
  308. return;
  309. }
  310. trace_riscv_iommu_hpm_evt_write(ctr_idx, ovf, val);
  311. /* Clear OF bit in IOCNTOVF if it's being cleared in IOHPMEVT register. */
  312. if (get_field(ovf, BIT(ctr_idx + 1)) &&
  313. !get_field(val, RISCV_IOMMU_IOHPMEVT_OF)) {
  314. /* +1 to offset CYCLE register OF bit. */
  315. riscv_iommu_reg_mod32(
  316. s, RISCV_IOMMU_REG_IOCOUNTOVF, 0, BIT(ctr_idx + 1));
  317. }
  318. if (!check_valid_event_id(get_field(val, RISCV_IOMMU_IOHPMEVT_EVENT_ID))) {
  319. /* Reset EventID (WARL) field to invalid. */
  320. val = set_field(val, RISCV_IOMMU_IOHPMEVT_EVENT_ID,
  321. RISCV_IOMMU_HPMEVENT_INVALID);
  322. riscv_iommu_reg_set64(s, evt_reg, val);
  323. }
  324. update_event_map(s, val, ctr_idx);
  325. }