sh_intc.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449
  1. /*
  2. * SuperH interrupt controller module
  3. *
  4. * Copyright (c) 2007 Magnus Damm
  5. * Based on sh_timer.c and arm_timer.c by Paul Brook
  6. * Copyright (c) 2005-2006 CodeSourcery.
  7. *
  8. * This code is licensed under the GPL.
  9. */
  10. #include "qemu/osdep.h"
  11. #include "qemu/log.h"
  12. #include "cpu.h"
  13. #include "hw/sh4/sh_intc.h"
  14. #include "hw/irq.h"
  15. #include "hw/sh4/sh.h"
  16. #include "trace.h"
  17. void sh_intc_toggle_source(struct intc_source *source,
  18. int enable_adj, int assert_adj)
  19. {
  20. int enable_changed = 0;
  21. int pending_changed = 0;
  22. int old_pending;
  23. if (source->enable_count == source->enable_max && enable_adj == -1) {
  24. enable_changed = -1;
  25. }
  26. source->enable_count += enable_adj;
  27. if (source->enable_count == source->enable_max) {
  28. enable_changed = 1;
  29. }
  30. source->asserted += assert_adj;
  31. old_pending = source->pending;
  32. source->pending = source->asserted &&
  33. (source->enable_count == source->enable_max);
  34. if (old_pending != source->pending) {
  35. pending_changed = 1;
  36. }
  37. if (pending_changed) {
  38. if (source->pending) {
  39. source->parent->pending++;
  40. if (source->parent->pending == 1) {
  41. cpu_interrupt(first_cpu, CPU_INTERRUPT_HARD);
  42. }
  43. } else {
  44. source->parent->pending--;
  45. if (source->parent->pending == 0) {
  46. cpu_reset_interrupt(first_cpu, CPU_INTERRUPT_HARD);
  47. }
  48. }
  49. }
  50. if (enable_changed || assert_adj || pending_changed) {
  51. trace_sh_intc_sources(source->parent->pending, source->asserted,
  52. source->enable_count, source->enable_max,
  53. source->vect, source->asserted ? "asserted " :
  54. assert_adj ? "deasserted" : "",
  55. enable_changed == 1 ? "enabled " :
  56. enable_changed == -1 ? "disabled " : "",
  57. source->pending ? "pending" : "");
  58. }
  59. }
  60. static void sh_intc_set_irq(void *opaque, int n, int level)
  61. {
  62. struct intc_desc *desc = opaque;
  63. struct intc_source *source = &desc->sources[n];
  64. if (level && !source->asserted) {
  65. sh_intc_toggle_source(source, 0, 1);
  66. } else if (!level && source->asserted) {
  67. sh_intc_toggle_source(source, 0, -1);
  68. }
  69. }
  70. int sh_intc_get_pending_vector(struct intc_desc *desc, int imask)
  71. {
  72. unsigned int i;
  73. /* slow: use a linked lists of pending sources instead */
  74. /* wrong: take interrupt priority into account (one list per priority) */
  75. if (imask == 0x0f) {
  76. return -1; /* FIXME, update code to include priority per source */
  77. }
  78. for (i = 0; i < desc->nr_sources; i++) {
  79. struct intc_source *source = &desc->sources[i];
  80. if (source->pending) {
  81. trace_sh_intc_pending(desc->pending, source->vect);
  82. return source->vect;
  83. }
  84. }
  85. g_assert_not_reached();
  86. }
  87. typedef enum {
  88. INTC_MODE_NONE,
  89. INTC_MODE_DUAL_SET,
  90. INTC_MODE_DUAL_CLR,
  91. INTC_MODE_ENABLE_REG,
  92. INTC_MODE_MASK_REG,
  93. } SHIntCMode;
  94. #define INTC_MODE_IS_PRIO 0x80
  95. static SHIntCMode sh_intc_mode(unsigned long address, unsigned long set_reg,
  96. unsigned long clr_reg)
  97. {
  98. if (address != A7ADDR(set_reg) && address != A7ADDR(clr_reg)) {
  99. return INTC_MODE_NONE;
  100. }
  101. if (set_reg && clr_reg) {
  102. return address == A7ADDR(set_reg) ?
  103. INTC_MODE_DUAL_SET : INTC_MODE_DUAL_CLR;
  104. }
  105. return set_reg ? INTC_MODE_ENABLE_REG : INTC_MODE_MASK_REG;
  106. }
  107. static void sh_intc_locate(struct intc_desc *desc,
  108. unsigned long address,
  109. unsigned long **datap,
  110. intc_enum **enums,
  111. unsigned int *first,
  112. unsigned int *width,
  113. unsigned int *modep)
  114. {
  115. SHIntCMode mode;
  116. unsigned int i;
  117. /* this is slow but works for now */
  118. if (desc->mask_regs) {
  119. for (i = 0; i < desc->nr_mask_regs; i++) {
  120. struct intc_mask_reg *mr = &desc->mask_regs[i];
  121. mode = sh_intc_mode(address, mr->set_reg, mr->clr_reg);
  122. if (mode != INTC_MODE_NONE) {
  123. *modep = mode;
  124. *datap = &mr->value;
  125. *enums = mr->enum_ids;
  126. *first = mr->reg_width - 1;
  127. *width = 1;
  128. return;
  129. }
  130. }
  131. }
  132. if (desc->prio_regs) {
  133. for (i = 0; i < desc->nr_prio_regs; i++) {
  134. struct intc_prio_reg *pr = &desc->prio_regs[i];
  135. mode = sh_intc_mode(address, pr->set_reg, pr->clr_reg);
  136. if (mode != INTC_MODE_NONE) {
  137. *modep = mode | INTC_MODE_IS_PRIO;
  138. *datap = &pr->value;
  139. *enums = pr->enum_ids;
  140. *first = pr->reg_width / pr->field_width - 1;
  141. *width = pr->field_width;
  142. return;
  143. }
  144. }
  145. }
  146. g_assert_not_reached();
  147. }
  148. static void sh_intc_toggle_mask(struct intc_desc *desc, intc_enum id,
  149. int enable, int is_group)
  150. {
  151. struct intc_source *source = &desc->sources[id];
  152. if (!id) {
  153. return;
  154. }
  155. if (!source->next_enum_id && (!source->enable_max || !source->vect)) {
  156. qemu_log_mask(LOG_UNIMP,
  157. "sh_intc: reserved interrupt source %d modified\n", id);
  158. return;
  159. }
  160. if (source->vect) {
  161. sh_intc_toggle_source(source, enable ? 1 : -1, 0);
  162. }
  163. if ((is_group || !source->vect) && source->next_enum_id) {
  164. sh_intc_toggle_mask(desc, source->next_enum_id, enable, 1);
  165. }
  166. if (!source->vect) {
  167. trace_sh_intc_set(id, !!enable);
  168. }
  169. }
  170. static uint64_t sh_intc_read(void *opaque, hwaddr offset, unsigned size)
  171. {
  172. struct intc_desc *desc = opaque;
  173. intc_enum *enum_ids;
  174. unsigned int first;
  175. unsigned int width;
  176. unsigned int mode;
  177. unsigned long *valuep;
  178. sh_intc_locate(desc, (unsigned long)offset, &valuep,
  179. &enum_ids, &first, &width, &mode);
  180. trace_sh_intc_read(size, (uint64_t)offset, *valuep);
  181. return *valuep;
  182. }
  183. static void sh_intc_write(void *opaque, hwaddr offset,
  184. uint64_t value, unsigned size)
  185. {
  186. struct intc_desc *desc = opaque;
  187. intc_enum *enum_ids;
  188. unsigned int first;
  189. unsigned int width;
  190. unsigned int mode;
  191. unsigned long *valuep;
  192. unsigned int k;
  193. unsigned long mask;
  194. trace_sh_intc_write(size, (uint64_t)offset, value);
  195. sh_intc_locate(desc, (unsigned long)offset, &valuep,
  196. &enum_ids, &first, &width, &mode);
  197. switch (mode) {
  198. case INTC_MODE_ENABLE_REG | INTC_MODE_IS_PRIO:
  199. break;
  200. case INTC_MODE_DUAL_SET:
  201. value |= *valuep;
  202. break;
  203. case INTC_MODE_DUAL_CLR:
  204. value = *valuep & ~value;
  205. break;
  206. default:
  207. g_assert_not_reached();
  208. }
  209. for (k = 0; k <= first; k++) {
  210. mask = (1 << width) - 1;
  211. mask <<= (first - k) * width;
  212. if ((*valuep & mask) != (value & mask)) {
  213. sh_intc_toggle_mask(desc, enum_ids[k], value & mask, 0);
  214. }
  215. }
  216. *valuep = value;
  217. }
  218. static const MemoryRegionOps sh_intc_ops = {
  219. .read = sh_intc_read,
  220. .write = sh_intc_write,
  221. .endianness = DEVICE_NATIVE_ENDIAN,
  222. };
  223. static void sh_intc_register_source(struct intc_desc *desc,
  224. intc_enum source,
  225. struct intc_group *groups,
  226. int nr_groups)
  227. {
  228. unsigned int i, k;
  229. intc_enum id;
  230. if (desc->mask_regs) {
  231. for (i = 0; i < desc->nr_mask_regs; i++) {
  232. struct intc_mask_reg *mr = &desc->mask_regs[i];
  233. for (k = 0; k < ARRAY_SIZE(mr->enum_ids); k++) {
  234. id = mr->enum_ids[k];
  235. if (id && id == source) {
  236. desc->sources[id].enable_max++;
  237. }
  238. }
  239. }
  240. }
  241. if (desc->prio_regs) {
  242. for (i = 0; i < desc->nr_prio_regs; i++) {
  243. struct intc_prio_reg *pr = &desc->prio_regs[i];
  244. for (k = 0; k < ARRAY_SIZE(pr->enum_ids); k++) {
  245. id = pr->enum_ids[k];
  246. if (id && id == source) {
  247. desc->sources[id].enable_max++;
  248. }
  249. }
  250. }
  251. }
  252. if (groups) {
  253. for (i = 0; i < nr_groups; i++) {
  254. struct intc_group *gr = &groups[i];
  255. for (k = 0; k < ARRAY_SIZE(gr->enum_ids); k++) {
  256. id = gr->enum_ids[k];
  257. if (id && id == source) {
  258. desc->sources[id].enable_max++;
  259. }
  260. }
  261. }
  262. }
  263. }
  264. void sh_intc_register_sources(struct intc_desc *desc,
  265. struct intc_vect *vectors,
  266. int nr_vectors,
  267. struct intc_group *groups,
  268. int nr_groups)
  269. {
  270. unsigned int i, k;
  271. intc_enum id;
  272. struct intc_source *s;
  273. for (i = 0; i < nr_vectors; i++) {
  274. struct intc_vect *vect = &vectors[i];
  275. sh_intc_register_source(desc, vect->enum_id, groups, nr_groups);
  276. id = vect->enum_id;
  277. if (id) {
  278. s = &desc->sources[id];
  279. s->vect = vect->vect;
  280. trace_sh_intc_register("source", vect->enum_id, s->vect,
  281. s->enable_count, s->enable_max);
  282. }
  283. }
  284. if (groups) {
  285. for (i = 0; i < nr_groups; i++) {
  286. struct intc_group *gr = &groups[i];
  287. id = gr->enum_id;
  288. s = &desc->sources[id];
  289. s->next_enum_id = gr->enum_ids[0];
  290. for (k = 1; k < ARRAY_SIZE(gr->enum_ids); k++) {
  291. if (gr->enum_ids[k]) {
  292. id = gr->enum_ids[k - 1];
  293. s = &desc->sources[id];
  294. s->next_enum_id = gr->enum_ids[k];
  295. }
  296. }
  297. trace_sh_intc_register("group", gr->enum_id, 0xffff,
  298. s->enable_count, s->enable_max);
  299. }
  300. }
  301. }
  302. static unsigned int sh_intc_register(MemoryRegion *sysmem,
  303. struct intc_desc *desc,
  304. const unsigned long address,
  305. const char *type,
  306. const char *action,
  307. const unsigned int index)
  308. {
  309. char name[60];
  310. MemoryRegion *iomem, *iomem_p4, *iomem_a7;
  311. if (!address) {
  312. return 0;
  313. }
  314. iomem = &desc->iomem;
  315. iomem_p4 = &desc->iomem_aliases[index];
  316. iomem_a7 = iomem_p4 + 1;
  317. snprintf(name, sizeof(name), "intc-%s-%s-%s", type, action, "p4");
  318. memory_region_init_alias(iomem_p4, NULL, name, iomem, A7ADDR(address), 4);
  319. memory_region_add_subregion(sysmem, P4ADDR(address), iomem_p4);
  320. snprintf(name, sizeof(name), "intc-%s-%s-%s", type, action, "a7");
  321. memory_region_init_alias(iomem_a7, NULL, name, iomem, A7ADDR(address), 4);
  322. memory_region_add_subregion(sysmem, A7ADDR(address), iomem_a7);
  323. /* used to increment aliases index */
  324. return 2;
  325. }
  326. int sh_intc_init(MemoryRegion *sysmem,
  327. struct intc_desc *desc,
  328. int nr_sources,
  329. struct intc_mask_reg *mask_regs,
  330. int nr_mask_regs,
  331. struct intc_prio_reg *prio_regs,
  332. int nr_prio_regs)
  333. {
  334. unsigned int i, j;
  335. desc->pending = 0;
  336. desc->nr_sources = nr_sources;
  337. desc->mask_regs = mask_regs;
  338. desc->nr_mask_regs = nr_mask_regs;
  339. desc->prio_regs = prio_regs;
  340. desc->nr_prio_regs = nr_prio_regs;
  341. /* Allocate 4 MemoryRegions per register (2 actions * 2 aliases) */
  342. desc->iomem_aliases = g_new0(MemoryRegion,
  343. (nr_mask_regs + nr_prio_regs) * 4);
  344. desc->sources = g_new0(struct intc_source, nr_sources);
  345. for (i = 0; i < nr_sources; i++) {
  346. desc->sources[i].parent = desc;
  347. }
  348. desc->irqs = qemu_allocate_irqs(sh_intc_set_irq, desc, nr_sources);
  349. memory_region_init_io(&desc->iomem, NULL, &sh_intc_ops, desc, "intc",
  350. 0x100000000ULL);
  351. j = 0;
  352. if (desc->mask_regs) {
  353. for (i = 0; i < desc->nr_mask_regs; i++) {
  354. struct intc_mask_reg *mr = &desc->mask_regs[i];
  355. j += sh_intc_register(sysmem, desc, mr->set_reg, "mask", "set", j);
  356. j += sh_intc_register(sysmem, desc, mr->clr_reg, "mask", "clr", j);
  357. }
  358. }
  359. if (desc->prio_regs) {
  360. for (i = 0; i < desc->nr_prio_regs; i++) {
  361. struct intc_prio_reg *pr = &desc->prio_regs[i];
  362. j += sh_intc_register(sysmem, desc, pr->set_reg, "prio", "set", j);
  363. j += sh_intc_register(sysmem, desc, pr->clr_reg, "prio", "clr", j);
  364. }
  365. }
  366. return 0;
  367. }
  368. /*
  369. * Assert level <n> IRL interrupt.
  370. * 0:deassert. 1:lowest priority,... 15:highest priority
  371. */
  372. void sh_intc_set_irl(void *opaque, int n, int level)
  373. {
  374. struct intc_source *s = opaque;
  375. int i, irl = level ^ 15;
  376. intc_enum id = s->next_enum_id;
  377. for (i = 0; id; id = s->next_enum_id, i++) {
  378. s = &s->parent->sources[id];
  379. if (i == irl) {
  380. sh_intc_toggle_source(s, s->enable_count ? 0 : 1,
  381. s->asserted ? 0 : 1);
  382. } else if (s->asserted) {
  383. sh_intc_toggle_source(s, 0, -1);
  384. }
  385. }
  386. }