2
0

aspeed_xdma.c 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167
  1. /*
  2. * ASPEED XDMA Controller
  3. * Eddie James <eajames@linux.ibm.com>
  4. *
  5. * Copyright (C) 2019 IBM Corp
  6. * SPDX-License-Identifer: GPL-2.0-or-later
  7. */
  8. #include "qemu/osdep.h"
  9. #include "qemu/log.h"
  10. #include "qemu/error-report.h"
  11. #include "hw/irq.h"
  12. #include "hw/misc/aspeed_xdma.h"
  13. #include "migration/vmstate.h"
  14. #include "qapi/error.h"
  15. #include "trace.h"
  16. #define XDMA_BMC_CMDQ_ADDR 0x10
  17. #define XDMA_BMC_CMDQ_ENDP 0x14
  18. #define XDMA_BMC_CMDQ_WRP 0x18
  19. #define XDMA_BMC_CMDQ_W_MASK 0x0003FFFF
  20. #define XDMA_BMC_CMDQ_RDP 0x1C
  21. #define XDMA_BMC_CMDQ_RDP_MAGIC 0xEE882266
  22. #define XDMA_IRQ_ENG_CTRL 0x20
  23. #define XDMA_IRQ_ENG_CTRL_US_COMP BIT(4)
  24. #define XDMA_IRQ_ENG_CTRL_DS_COMP BIT(5)
  25. #define XDMA_IRQ_ENG_CTRL_W_MASK 0xBFEFF07F
  26. #define XDMA_IRQ_ENG_STAT 0x24
  27. #define XDMA_IRQ_ENG_STAT_US_COMP BIT(4)
  28. #define XDMA_IRQ_ENG_STAT_DS_COMP BIT(5)
  29. #define XDMA_IRQ_ENG_STAT_RESET 0xF8000000
  30. #define XDMA_MEM_SIZE 0x1000
  31. #define TO_REG(addr) ((addr) / sizeof(uint32_t))
  32. static uint64_t aspeed_xdma_read(void *opaque, hwaddr addr, unsigned int size)
  33. {
  34. uint32_t val = 0;
  35. AspeedXDMAState *xdma = opaque;
  36. if (addr < ASPEED_XDMA_REG_SIZE) {
  37. val = xdma->regs[TO_REG(addr)];
  38. }
  39. return (uint64_t)val;
  40. }
  41. static void aspeed_xdma_write(void *opaque, hwaddr addr, uint64_t val,
  42. unsigned int size)
  43. {
  44. unsigned int idx;
  45. uint32_t val32 = (uint32_t)val;
  46. AspeedXDMAState *xdma = opaque;
  47. if (addr >= ASPEED_XDMA_REG_SIZE) {
  48. return;
  49. }
  50. switch (addr) {
  51. case XDMA_BMC_CMDQ_ENDP:
  52. xdma->regs[TO_REG(addr)] = val32 & XDMA_BMC_CMDQ_W_MASK;
  53. break;
  54. case XDMA_BMC_CMDQ_WRP:
  55. idx = TO_REG(addr);
  56. xdma->regs[idx] = val32 & XDMA_BMC_CMDQ_W_MASK;
  57. xdma->regs[TO_REG(XDMA_BMC_CMDQ_RDP)] = xdma->regs[idx];
  58. trace_aspeed_xdma_write(addr, val);
  59. if (xdma->bmc_cmdq_readp_set) {
  60. xdma->bmc_cmdq_readp_set = 0;
  61. } else {
  62. xdma->regs[TO_REG(XDMA_IRQ_ENG_STAT)] |=
  63. XDMA_IRQ_ENG_STAT_US_COMP | XDMA_IRQ_ENG_STAT_DS_COMP;
  64. if (xdma->regs[TO_REG(XDMA_IRQ_ENG_CTRL)] &
  65. (XDMA_IRQ_ENG_CTRL_US_COMP | XDMA_IRQ_ENG_CTRL_DS_COMP))
  66. qemu_irq_raise(xdma->irq);
  67. }
  68. break;
  69. case XDMA_BMC_CMDQ_RDP:
  70. trace_aspeed_xdma_write(addr, val);
  71. if (val32 == XDMA_BMC_CMDQ_RDP_MAGIC) {
  72. xdma->bmc_cmdq_readp_set = 1;
  73. }
  74. break;
  75. case XDMA_IRQ_ENG_CTRL:
  76. xdma->regs[TO_REG(addr)] = val32 & XDMA_IRQ_ENG_CTRL_W_MASK;
  77. break;
  78. case XDMA_IRQ_ENG_STAT:
  79. trace_aspeed_xdma_write(addr, val);
  80. idx = TO_REG(addr);
  81. if (val32 & (XDMA_IRQ_ENG_STAT_US_COMP | XDMA_IRQ_ENG_STAT_DS_COMP)) {
  82. xdma->regs[idx] &=
  83. ~(XDMA_IRQ_ENG_STAT_US_COMP | XDMA_IRQ_ENG_STAT_DS_COMP);
  84. qemu_irq_lower(xdma->irq);
  85. }
  86. break;
  87. default:
  88. xdma->regs[TO_REG(addr)] = val32;
  89. break;
  90. }
  91. }
  92. static const MemoryRegionOps aspeed_xdma_ops = {
  93. .read = aspeed_xdma_read,
  94. .write = aspeed_xdma_write,
  95. .endianness = DEVICE_NATIVE_ENDIAN,
  96. .valid.min_access_size = 4,
  97. .valid.max_access_size = 4,
  98. };
  99. static void aspeed_xdma_realize(DeviceState *dev, Error **errp)
  100. {
  101. SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
  102. AspeedXDMAState *xdma = ASPEED_XDMA(dev);
  103. sysbus_init_irq(sbd, &xdma->irq);
  104. memory_region_init_io(&xdma->iomem, OBJECT(xdma), &aspeed_xdma_ops, xdma,
  105. TYPE_ASPEED_XDMA, XDMA_MEM_SIZE);
  106. sysbus_init_mmio(sbd, &xdma->iomem);
  107. }
  108. static void aspeed_xdma_reset(DeviceState *dev)
  109. {
  110. AspeedXDMAState *xdma = ASPEED_XDMA(dev);
  111. xdma->bmc_cmdq_readp_set = 0;
  112. memset(xdma->regs, 0, ASPEED_XDMA_REG_SIZE);
  113. xdma->regs[TO_REG(XDMA_IRQ_ENG_STAT)] = XDMA_IRQ_ENG_STAT_RESET;
  114. qemu_irq_lower(xdma->irq);
  115. }
  116. static const VMStateDescription aspeed_xdma_vmstate = {
  117. .name = TYPE_ASPEED_XDMA,
  118. .version_id = 1,
  119. .fields = (VMStateField[]) {
  120. VMSTATE_UINT32_ARRAY(regs, AspeedXDMAState, ASPEED_XDMA_NUM_REGS),
  121. VMSTATE_END_OF_LIST(),
  122. },
  123. };
  124. static void aspeed_xdma_class_init(ObjectClass *classp, void *data)
  125. {
  126. DeviceClass *dc = DEVICE_CLASS(classp);
  127. dc->realize = aspeed_xdma_realize;
  128. dc->reset = aspeed_xdma_reset;
  129. dc->vmsd = &aspeed_xdma_vmstate;
  130. }
  131. static const TypeInfo aspeed_xdma_info = {
  132. .name = TYPE_ASPEED_XDMA,
  133. .parent = TYPE_SYS_BUS_DEVICE,
  134. .instance_size = sizeof(AspeedXDMAState),
  135. .class_init = aspeed_xdma_class_init,
  136. };
  137. static void aspeed_xdma_register_type(void)
  138. {
  139. type_register_static(&aspeed_xdma_info);
  140. }
  141. type_init(aspeed_xdma_register_type);