2
0

arm_gicv3_redist.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581
  1. /*
  2. * ARM GICv3 emulation: Redistributor
  3. *
  4. * Copyright (c) 2015 Huawei.
  5. * Copyright (c) 2016 Linaro Limited.
  6. * Written by Shlomo Pongratz, Peter Maydell
  7. *
  8. * This code is licensed under the GPL, version 2 or (at your option)
  9. * any later version.
  10. */
  11. #include "qemu/osdep.h"
  12. #include "qemu/log.h"
  13. #include "trace.h"
  14. #include "gicv3_internal.h"
  15. static uint32_t mask_group(GICv3CPUState *cs, MemTxAttrs attrs)
  16. {
  17. /* Return a 32-bit mask which should be applied for this set of 32
  18. * interrupts; each bit is 1 if access is permitted by the
  19. * combination of attrs.secure and GICR_GROUPR. (GICR_NSACR does
  20. * not affect config register accesses, unlike GICD_NSACR.)
  21. */
  22. if (!attrs.secure && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) {
  23. /* bits for Group 0 or Secure Group 1 interrupts are RAZ/WI */
  24. return cs->gicr_igroupr0;
  25. }
  26. return 0xFFFFFFFFU;
  27. }
  28. static int gicr_ns_access(GICv3CPUState *cs, int irq)
  29. {
  30. /* Return the 2 bit NSACR.NS_access field for this SGI */
  31. assert(irq < 16);
  32. return extract32(cs->gicr_nsacr, irq * 2, 2);
  33. }
  34. static void gicr_write_set_bitmap_reg(GICv3CPUState *cs, MemTxAttrs attrs,
  35. uint32_t *reg, uint32_t val)
  36. {
  37. /* Helper routine to implement writing to a "set-bitmap" register */
  38. val &= mask_group(cs, attrs);
  39. *reg |= val;
  40. gicv3_redist_update(cs);
  41. }
  42. static void gicr_write_clear_bitmap_reg(GICv3CPUState *cs, MemTxAttrs attrs,
  43. uint32_t *reg, uint32_t val)
  44. {
  45. /* Helper routine to implement writing to a "clear-bitmap" register */
  46. val &= mask_group(cs, attrs);
  47. *reg &= ~val;
  48. gicv3_redist_update(cs);
  49. }
  50. static uint32_t gicr_read_bitmap_reg(GICv3CPUState *cs, MemTxAttrs attrs,
  51. uint32_t reg)
  52. {
  53. reg &= mask_group(cs, attrs);
  54. return reg;
  55. }
  56. static uint8_t gicr_read_ipriorityr(GICv3CPUState *cs, MemTxAttrs attrs,
  57. int irq)
  58. {
  59. /* Read the value of GICR_IPRIORITYR<n> for the specified interrupt,
  60. * honouring security state (these are RAZ/WI for Group 0 or Secure
  61. * Group 1 interrupts).
  62. */
  63. uint32_t prio;
  64. prio = cs->gicr_ipriorityr[irq];
  65. if (!attrs.secure && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) {
  66. if (!(cs->gicr_igroupr0 & (1U << irq))) {
  67. /* Fields for Group 0 or Secure Group 1 interrupts are RAZ/WI */
  68. return 0;
  69. }
  70. /* NS view of the interrupt priority */
  71. prio = (prio << 1) & 0xff;
  72. }
  73. return prio;
  74. }
  75. static void gicr_write_ipriorityr(GICv3CPUState *cs, MemTxAttrs attrs, int irq,
  76. uint8_t value)
  77. {
  78. /* Write the value of GICD_IPRIORITYR<n> for the specified interrupt,
  79. * honouring security state (these are RAZ/WI for Group 0 or Secure
  80. * Group 1 interrupts).
  81. */
  82. if (!attrs.secure && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) {
  83. if (!(cs->gicr_igroupr0 & (1U << irq))) {
  84. /* Fields for Group 0 or Secure Group 1 interrupts are RAZ/WI */
  85. return;
  86. }
  87. /* NS view of the interrupt priority */
  88. value = 0x80 | (value >> 1);
  89. }
  90. cs->gicr_ipriorityr[irq] = value;
  91. }
  92. static MemTxResult gicr_readb(GICv3CPUState *cs, hwaddr offset,
  93. uint64_t *data, MemTxAttrs attrs)
  94. {
  95. switch (offset) {
  96. case GICR_IPRIORITYR ... GICR_IPRIORITYR + 0x1f:
  97. *data = gicr_read_ipriorityr(cs, attrs, offset - GICR_IPRIORITYR);
  98. return MEMTX_OK;
  99. default:
  100. return MEMTX_ERROR;
  101. }
  102. }
  103. static MemTxResult gicr_writeb(GICv3CPUState *cs, hwaddr offset,
  104. uint64_t value, MemTxAttrs attrs)
  105. {
  106. switch (offset) {
  107. case GICR_IPRIORITYR ... GICR_IPRIORITYR + 0x1f:
  108. gicr_write_ipriorityr(cs, attrs, offset - GICR_IPRIORITYR, value);
  109. gicv3_redist_update(cs);
  110. return MEMTX_OK;
  111. default:
  112. return MEMTX_ERROR;
  113. }
  114. }
  115. static MemTxResult gicr_readl(GICv3CPUState *cs, hwaddr offset,
  116. uint64_t *data, MemTxAttrs attrs)
  117. {
  118. switch (offset) {
  119. case GICR_CTLR:
  120. *data = cs->gicr_ctlr;
  121. return MEMTX_OK;
  122. case GICR_IIDR:
  123. *data = gicv3_iidr();
  124. return MEMTX_OK;
  125. case GICR_TYPER:
  126. *data = extract64(cs->gicr_typer, 0, 32);
  127. return MEMTX_OK;
  128. case GICR_TYPER + 4:
  129. *data = extract64(cs->gicr_typer, 32, 32);
  130. return MEMTX_OK;
  131. case GICR_STATUSR:
  132. /* RAZ/WI for us (this is an optional register and our implementation
  133. * does not track RO/WO/reserved violations to report them to the guest)
  134. */
  135. *data = 0;
  136. return MEMTX_OK;
  137. case GICR_WAKER:
  138. *data = cs->gicr_waker;
  139. return MEMTX_OK;
  140. case GICR_PROPBASER:
  141. *data = extract64(cs->gicr_propbaser, 0, 32);
  142. return MEMTX_OK;
  143. case GICR_PROPBASER + 4:
  144. *data = extract64(cs->gicr_propbaser, 32, 32);
  145. return MEMTX_OK;
  146. case GICR_PENDBASER:
  147. *data = extract64(cs->gicr_pendbaser, 0, 32);
  148. return MEMTX_OK;
  149. case GICR_PENDBASER + 4:
  150. *data = extract64(cs->gicr_pendbaser, 32, 32);
  151. return MEMTX_OK;
  152. case GICR_IGROUPR0:
  153. if (!attrs.secure && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) {
  154. *data = 0;
  155. return MEMTX_OK;
  156. }
  157. *data = cs->gicr_igroupr0;
  158. return MEMTX_OK;
  159. case GICR_ISENABLER0:
  160. case GICR_ICENABLER0:
  161. *data = gicr_read_bitmap_reg(cs, attrs, cs->gicr_ienabler0);
  162. return MEMTX_OK;
  163. case GICR_ISPENDR0:
  164. case GICR_ICPENDR0:
  165. {
  166. /* The pending register reads as the logical OR of the pending
  167. * latch and the input line level for level-triggered interrupts.
  168. */
  169. uint32_t val = cs->gicr_ipendr0 | (~cs->edge_trigger & cs->level);
  170. *data = gicr_read_bitmap_reg(cs, attrs, val);
  171. return MEMTX_OK;
  172. }
  173. case GICR_ISACTIVER0:
  174. case GICR_ICACTIVER0:
  175. *data = gicr_read_bitmap_reg(cs, attrs, cs->gicr_iactiver0);
  176. return MEMTX_OK;
  177. case GICR_IPRIORITYR ... GICR_IPRIORITYR + 0x1f:
  178. {
  179. int i, irq = offset - GICR_IPRIORITYR;
  180. uint32_t value = 0;
  181. for (i = irq + 3; i >= irq; i--) {
  182. value <<= 8;
  183. value |= gicr_read_ipriorityr(cs, attrs, i);
  184. }
  185. *data = value;
  186. return MEMTX_OK;
  187. }
  188. case GICR_ICFGR0:
  189. case GICR_ICFGR1:
  190. {
  191. /* Our edge_trigger bitmap is one bit per irq; take the correct
  192. * half of it, and spread it out into the odd bits.
  193. */
  194. uint32_t value;
  195. value = cs->edge_trigger & mask_group(cs, attrs);
  196. value = extract32(value, (offset == GICR_ICFGR1) ? 16 : 0, 16);
  197. value = half_shuffle32(value) << 1;
  198. *data = value;
  199. return MEMTX_OK;
  200. }
  201. case GICR_IGRPMODR0:
  202. if ((cs->gic->gicd_ctlr & GICD_CTLR_DS) || !attrs.secure) {
  203. /* RAZ/WI if security disabled, or if
  204. * security enabled and this is an NS access
  205. */
  206. *data = 0;
  207. return MEMTX_OK;
  208. }
  209. *data = cs->gicr_igrpmodr0;
  210. return MEMTX_OK;
  211. case GICR_NSACR:
  212. if ((cs->gic->gicd_ctlr & GICD_CTLR_DS) || !attrs.secure) {
  213. /* RAZ/WI if security disabled, or if
  214. * security enabled and this is an NS access
  215. */
  216. *data = 0;
  217. return MEMTX_OK;
  218. }
  219. *data = cs->gicr_nsacr;
  220. return MEMTX_OK;
  221. case GICR_IDREGS ... GICR_IDREGS + 0x2f:
  222. *data = gicv3_idreg(offset - GICR_IDREGS);
  223. return MEMTX_OK;
  224. default:
  225. return MEMTX_ERROR;
  226. }
  227. }
  228. static MemTxResult gicr_writel(GICv3CPUState *cs, hwaddr offset,
  229. uint64_t value, MemTxAttrs attrs)
  230. {
  231. switch (offset) {
  232. case GICR_CTLR:
  233. /* For our implementation, GICR_TYPER.DPGS is 0 and so all
  234. * the DPG bits are RAZ/WI. We don't do anything asynchronously,
  235. * so UWP and RWP are RAZ/WI. And GICR_TYPER.LPIS is 0 (we don't
  236. * implement LPIs) so Enable_LPIs is RES0. So there are no writable
  237. * bits for us.
  238. */
  239. return MEMTX_OK;
  240. case GICR_STATUSR:
  241. /* RAZ/WI for our implementation */
  242. return MEMTX_OK;
  243. case GICR_WAKER:
  244. /* Only the ProcessorSleep bit is writeable. When the guest sets
  245. * it it requests that we transition the channel between the
  246. * redistributor and the cpu interface to quiescent, and that
  247. * we set the ChildrenAsleep bit once the inteface has reached the
  248. * quiescent state.
  249. * Setting the ProcessorSleep to 0 reverses the quiescing, and
  250. * ChildrenAsleep is cleared once the transition is complete.
  251. * Since our interface is not asynchronous, we complete these
  252. * transitions instantaneously, so we set ChildrenAsleep to the
  253. * same value as ProcessorSleep here.
  254. */
  255. value &= GICR_WAKER_ProcessorSleep;
  256. if (value & GICR_WAKER_ProcessorSleep) {
  257. value |= GICR_WAKER_ChildrenAsleep;
  258. }
  259. cs->gicr_waker = value;
  260. return MEMTX_OK;
  261. case GICR_PROPBASER:
  262. cs->gicr_propbaser = deposit64(cs->gicr_propbaser, 0, 32, value);
  263. return MEMTX_OK;
  264. case GICR_PROPBASER + 4:
  265. cs->gicr_propbaser = deposit64(cs->gicr_propbaser, 32, 32, value);
  266. return MEMTX_OK;
  267. case GICR_PENDBASER:
  268. cs->gicr_pendbaser = deposit64(cs->gicr_pendbaser, 0, 32, value);
  269. return MEMTX_OK;
  270. case GICR_PENDBASER + 4:
  271. cs->gicr_pendbaser = deposit64(cs->gicr_pendbaser, 32, 32, value);
  272. return MEMTX_OK;
  273. case GICR_IGROUPR0:
  274. if (!attrs.secure && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) {
  275. return MEMTX_OK;
  276. }
  277. cs->gicr_igroupr0 = value;
  278. gicv3_redist_update(cs);
  279. return MEMTX_OK;
  280. case GICR_ISENABLER0:
  281. gicr_write_set_bitmap_reg(cs, attrs, &cs->gicr_ienabler0, value);
  282. return MEMTX_OK;
  283. case GICR_ICENABLER0:
  284. gicr_write_clear_bitmap_reg(cs, attrs, &cs->gicr_ienabler0, value);
  285. return MEMTX_OK;
  286. case GICR_ISPENDR0:
  287. gicr_write_set_bitmap_reg(cs, attrs, &cs->gicr_ipendr0, value);
  288. return MEMTX_OK;
  289. case GICR_ICPENDR0:
  290. gicr_write_clear_bitmap_reg(cs, attrs, &cs->gicr_ipendr0, value);
  291. return MEMTX_OK;
  292. case GICR_ISACTIVER0:
  293. gicr_write_set_bitmap_reg(cs, attrs, &cs->gicr_iactiver0, value);
  294. return MEMTX_OK;
  295. case GICR_ICACTIVER0:
  296. gicr_write_clear_bitmap_reg(cs, attrs, &cs->gicr_iactiver0, value);
  297. return MEMTX_OK;
  298. case GICR_IPRIORITYR ... GICR_IPRIORITYR + 0x1f:
  299. {
  300. int i, irq = offset - GICR_IPRIORITYR;
  301. for (i = irq; i < irq + 4; i++, value >>= 8) {
  302. gicr_write_ipriorityr(cs, attrs, i, value);
  303. }
  304. gicv3_redist_update(cs);
  305. return MEMTX_OK;
  306. }
  307. case GICR_ICFGR0:
  308. /* Register is all RAZ/WI or RAO/WI bits */
  309. return MEMTX_OK;
  310. case GICR_ICFGR1:
  311. {
  312. uint32_t mask;
  313. /* Since our edge_trigger bitmap is one bit per irq, our input
  314. * 32-bits will compress down into 16 bits which we need
  315. * to write into the bitmap.
  316. */
  317. value = half_unshuffle32(value >> 1) << 16;
  318. mask = mask_group(cs, attrs) & 0xffff0000U;
  319. cs->edge_trigger &= ~mask;
  320. cs->edge_trigger |= (value & mask);
  321. gicv3_redist_update(cs);
  322. return MEMTX_OK;
  323. }
  324. case GICR_IGRPMODR0:
  325. if ((cs->gic->gicd_ctlr & GICD_CTLR_DS) || !attrs.secure) {
  326. /* RAZ/WI if security disabled, or if
  327. * security enabled and this is an NS access
  328. */
  329. return MEMTX_OK;
  330. }
  331. cs->gicr_igrpmodr0 = value;
  332. gicv3_redist_update(cs);
  333. return MEMTX_OK;
  334. case GICR_NSACR:
  335. if ((cs->gic->gicd_ctlr & GICD_CTLR_DS) || !attrs.secure) {
  336. /* RAZ/WI if security disabled, or if
  337. * security enabled and this is an NS access
  338. */
  339. return MEMTX_OK;
  340. }
  341. cs->gicr_nsacr = value;
  342. /* no update required as this only affects access permission checks */
  343. return MEMTX_OK;
  344. case GICR_IIDR:
  345. case GICR_TYPER:
  346. case GICR_IDREGS ... GICR_IDREGS + 0x2f:
  347. /* RO registers, ignore the write */
  348. qemu_log_mask(LOG_GUEST_ERROR,
  349. "%s: invalid guest write to RO register at offset "
  350. TARGET_FMT_plx "\n", __func__, offset);
  351. return MEMTX_OK;
  352. default:
  353. return MEMTX_ERROR;
  354. }
  355. }
  356. static MemTxResult gicr_readll(GICv3CPUState *cs, hwaddr offset,
  357. uint64_t *data, MemTxAttrs attrs)
  358. {
  359. switch (offset) {
  360. case GICR_TYPER:
  361. *data = cs->gicr_typer;
  362. return MEMTX_OK;
  363. case GICR_PROPBASER:
  364. *data = cs->gicr_propbaser;
  365. return MEMTX_OK;
  366. case GICR_PENDBASER:
  367. *data = cs->gicr_pendbaser;
  368. return MEMTX_OK;
  369. default:
  370. return MEMTX_ERROR;
  371. }
  372. }
  373. static MemTxResult gicr_writell(GICv3CPUState *cs, hwaddr offset,
  374. uint64_t value, MemTxAttrs attrs)
  375. {
  376. switch (offset) {
  377. case GICR_PROPBASER:
  378. cs->gicr_propbaser = value;
  379. return MEMTX_OK;
  380. case GICR_PENDBASER:
  381. cs->gicr_pendbaser = value;
  382. return MEMTX_OK;
  383. case GICR_TYPER:
  384. /* RO register, ignore the write */
  385. qemu_log_mask(LOG_GUEST_ERROR,
  386. "%s: invalid guest write to RO register at offset "
  387. TARGET_FMT_plx "\n", __func__, offset);
  388. return MEMTX_OK;
  389. default:
  390. return MEMTX_ERROR;
  391. }
  392. }
  393. MemTxResult gicv3_redist_read(void *opaque, hwaddr offset, uint64_t *data,
  394. unsigned size, MemTxAttrs attrs)
  395. {
  396. GICv3State *s = opaque;
  397. GICv3CPUState *cs;
  398. MemTxResult r;
  399. int cpuidx;
  400. assert((offset & (size - 1)) == 0);
  401. /* This region covers all the redistributor pages; there are
  402. * (for GICv3) two 64K pages per CPU. At the moment they are
  403. * all contiguous (ie in this one region), though we might later
  404. * want to allow splitting of redistributor pages into several
  405. * blocks so we can support more CPUs.
  406. */
  407. cpuidx = offset / 0x20000;
  408. offset %= 0x20000;
  409. assert(cpuidx < s->num_cpu);
  410. cs = &s->cpu[cpuidx];
  411. switch (size) {
  412. case 1:
  413. r = gicr_readb(cs, offset, data, attrs);
  414. break;
  415. case 4:
  416. r = gicr_readl(cs, offset, data, attrs);
  417. break;
  418. case 8:
  419. r = gicr_readll(cs, offset, data, attrs);
  420. break;
  421. default:
  422. r = MEMTX_ERROR;
  423. break;
  424. }
  425. if (r == MEMTX_ERROR) {
  426. qemu_log_mask(LOG_GUEST_ERROR,
  427. "%s: invalid guest read at offset " TARGET_FMT_plx
  428. "size %u\n", __func__, offset, size);
  429. trace_gicv3_redist_badread(gicv3_redist_affid(cs), offset,
  430. size, attrs.secure);
  431. /* The spec requires that reserved registers are RAZ/WI;
  432. * so use MEMTX_ERROR returns from leaf functions as a way to
  433. * trigger the guest-error logging but don't return it to
  434. * the caller, or we'll cause a spurious guest data abort.
  435. */
  436. r = MEMTX_OK;
  437. *data = 0;
  438. } else {
  439. trace_gicv3_redist_read(gicv3_redist_affid(cs), offset, *data,
  440. size, attrs.secure);
  441. }
  442. return r;
  443. }
  444. MemTxResult gicv3_redist_write(void *opaque, hwaddr offset, uint64_t data,
  445. unsigned size, MemTxAttrs attrs)
  446. {
  447. GICv3State *s = opaque;
  448. GICv3CPUState *cs;
  449. MemTxResult r;
  450. int cpuidx;
  451. assert((offset & (size - 1)) == 0);
  452. /* This region covers all the redistributor pages; there are
  453. * (for GICv3) two 64K pages per CPU. At the moment they are
  454. * all contiguous (ie in this one region), though we might later
  455. * want to allow splitting of redistributor pages into several
  456. * blocks so we can support more CPUs.
  457. */
  458. cpuidx = offset / 0x20000;
  459. offset %= 0x20000;
  460. assert(cpuidx < s->num_cpu);
  461. cs = &s->cpu[cpuidx];
  462. switch (size) {
  463. case 1:
  464. r = gicr_writeb(cs, offset, data, attrs);
  465. break;
  466. case 4:
  467. r = gicr_writel(cs, offset, data, attrs);
  468. break;
  469. case 8:
  470. r = gicr_writell(cs, offset, data, attrs);
  471. break;
  472. default:
  473. r = MEMTX_ERROR;
  474. break;
  475. }
  476. if (r == MEMTX_ERROR) {
  477. qemu_log_mask(LOG_GUEST_ERROR,
  478. "%s: invalid guest write at offset " TARGET_FMT_plx
  479. "size %u\n", __func__, offset, size);
  480. trace_gicv3_redist_badwrite(gicv3_redist_affid(cs), offset, data,
  481. size, attrs.secure);
  482. /* The spec requires that reserved registers are RAZ/WI;
  483. * so use MEMTX_ERROR returns from leaf functions as a way to
  484. * trigger the guest-error logging but don't return it to
  485. * the caller, or we'll cause a spurious guest data abort.
  486. */
  487. r = MEMTX_OK;
  488. } else {
  489. trace_gicv3_redist_write(gicv3_redist_affid(cs), offset, data,
  490. size, attrs.secure);
  491. }
  492. return r;
  493. }
  494. void gicv3_redist_set_irq(GICv3CPUState *cs, int irq, int level)
  495. {
  496. /* Update redistributor state for a change in an external PPI input line */
  497. if (level == extract32(cs->level, irq, 1)) {
  498. return;
  499. }
  500. trace_gicv3_redist_set_irq(gicv3_redist_affid(cs), irq, level);
  501. cs->level = deposit32(cs->level, irq, 1, level);
  502. if (level) {
  503. /* 0->1 edges latch the pending bit for edge-triggered interrupts */
  504. if (extract32(cs->edge_trigger, irq, 1)) {
  505. cs->gicr_ipendr0 = deposit32(cs->gicr_ipendr0, irq, 1, 1);
  506. }
  507. }
  508. gicv3_redist_update(cs);
  509. }
  510. void gicv3_redist_send_sgi(GICv3CPUState *cs, int grp, int irq, bool ns)
  511. {
  512. /* Update redistributor state for a generated SGI */
  513. int irqgrp = gicv3_irq_group(cs->gic, cs, irq);
  514. /* If we are asked for a Secure Group 1 SGI and it's actually
  515. * configured as Secure Group 0 this is OK (subject to the usual
  516. * NSACR checks).
  517. */
  518. if (grp == GICV3_G1 && irqgrp == GICV3_G0) {
  519. grp = GICV3_G0;
  520. }
  521. if (grp != irqgrp) {
  522. return;
  523. }
  524. if (ns && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) {
  525. /* If security is enabled we must test the NSACR bits */
  526. int nsaccess = gicr_ns_access(cs, irq);
  527. if ((irqgrp == GICV3_G0 && nsaccess < 1) ||
  528. (irqgrp == GICV3_G1 && nsaccess < 2)) {
  529. return;
  530. }
  531. }
  532. /* OK, we can accept the SGI */
  533. trace_gicv3_redist_send_sgi(gicv3_redist_affid(cs), irq);
  534. cs->gicr_ipendr0 = deposit32(cs->gicr_ipendr0, irq, 1, 1);
  535. gicv3_redist_update(cs);
  536. }