xilinx_axidma.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661
  1. /*
  2. * QEMU model of Xilinx AXI-DMA block.
  3. *
  4. * Copyright (c) 2011 Edgar E. Iglesias.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a copy
  7. * of this software and associated documentation files (the "Software"), to deal
  8. * in the Software without restriction, including without limitation the rights
  9. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  10. * copies of the Software, and to permit persons to whom the Software is
  11. * furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  21. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  22. * THE SOFTWARE.
  23. */
  24. #include "qemu/osdep.h"
  25. #include "hw/sysbus.h"
  26. #include "qapi/error.h"
  27. #include "qemu/timer.h"
  28. #include "hw/hw.h"
  29. #include "hw/irq.h"
  30. #include "hw/ptimer.h"
  31. #include "hw/qdev-properties.h"
  32. #include "qemu/log.h"
  33. #include "qemu/module.h"
  34. #include "hw/stream.h"
  35. #define D(x)
  36. #define TYPE_XILINX_AXI_DMA "xlnx.axi-dma"
  37. #define TYPE_XILINX_AXI_DMA_DATA_STREAM "xilinx-axi-dma-data-stream"
  38. #define TYPE_XILINX_AXI_DMA_CONTROL_STREAM "xilinx-axi-dma-control-stream"
  39. #define XILINX_AXI_DMA(obj) \
  40. OBJECT_CHECK(XilinxAXIDMA, (obj), TYPE_XILINX_AXI_DMA)
  41. #define XILINX_AXI_DMA_DATA_STREAM(obj) \
  42. OBJECT_CHECK(XilinxAXIDMAStreamSlave, (obj),\
  43. TYPE_XILINX_AXI_DMA_DATA_STREAM)
  44. #define XILINX_AXI_DMA_CONTROL_STREAM(obj) \
  45. OBJECT_CHECK(XilinxAXIDMAStreamSlave, (obj),\
  46. TYPE_XILINX_AXI_DMA_CONTROL_STREAM)
  47. #define R_DMACR (0x00 / 4)
  48. #define R_DMASR (0x04 / 4)
  49. #define R_CURDESC (0x08 / 4)
  50. #define R_TAILDESC (0x10 / 4)
  51. #define R_MAX (0x30 / 4)
  52. #define CONTROL_PAYLOAD_WORDS 5
  53. #define CONTROL_PAYLOAD_SIZE (CONTROL_PAYLOAD_WORDS * (sizeof(uint32_t)))
  54. typedef struct XilinxAXIDMA XilinxAXIDMA;
  55. typedef struct XilinxAXIDMAStreamSlave XilinxAXIDMAStreamSlave;
  56. enum {
  57. DMACR_RUNSTOP = 1,
  58. DMACR_TAILPTR_MODE = 2,
  59. DMACR_RESET = 4
  60. };
  61. enum {
  62. DMASR_HALTED = 1,
  63. DMASR_IDLE = 2,
  64. DMASR_IOC_IRQ = 1 << 12,
  65. DMASR_DLY_IRQ = 1 << 13,
  66. DMASR_IRQ_MASK = 7 << 12
  67. };
  68. struct SDesc {
  69. uint64_t nxtdesc;
  70. uint64_t buffer_address;
  71. uint64_t reserved;
  72. uint32_t control;
  73. uint32_t status;
  74. uint8_t app[CONTROL_PAYLOAD_SIZE];
  75. };
  76. enum {
  77. SDESC_CTRL_EOF = (1 << 26),
  78. SDESC_CTRL_SOF = (1 << 27),
  79. SDESC_CTRL_LEN_MASK = (1 << 23) - 1
  80. };
  81. enum {
  82. SDESC_STATUS_EOF = (1 << 26),
  83. SDESC_STATUS_SOF_BIT = 27,
  84. SDESC_STATUS_SOF = (1 << SDESC_STATUS_SOF_BIT),
  85. SDESC_STATUS_COMPLETE = (1 << 31)
  86. };
  87. struct Stream {
  88. ptimer_state *ptimer;
  89. qemu_irq irq;
  90. int nr;
  91. struct SDesc desc;
  92. int pos;
  93. unsigned int complete_cnt;
  94. uint32_t regs[R_MAX];
  95. uint8_t app[20];
  96. unsigned char txbuf[16 * 1024];
  97. };
  98. struct XilinxAXIDMAStreamSlave {
  99. Object parent;
  100. struct XilinxAXIDMA *dma;
  101. };
  102. struct XilinxAXIDMA {
  103. SysBusDevice busdev;
  104. MemoryRegion iomem;
  105. uint32_t freqhz;
  106. StreamSlave *tx_data_dev;
  107. StreamSlave *tx_control_dev;
  108. XilinxAXIDMAStreamSlave rx_data_dev;
  109. XilinxAXIDMAStreamSlave rx_control_dev;
  110. struct Stream streams[2];
  111. StreamCanPushNotifyFn notify;
  112. void *notify_opaque;
  113. };
  114. /*
  115. * Helper calls to extract info from descriptors and other trivial
  116. * state from regs.
  117. */
  118. static inline int stream_desc_sof(struct SDesc *d)
  119. {
  120. return d->control & SDESC_CTRL_SOF;
  121. }
  122. static inline int stream_desc_eof(struct SDesc *d)
  123. {
  124. return d->control & SDESC_CTRL_EOF;
  125. }
  126. static inline int stream_resetting(struct Stream *s)
  127. {
  128. return !!(s->regs[R_DMACR] & DMACR_RESET);
  129. }
  130. static inline int stream_running(struct Stream *s)
  131. {
  132. return s->regs[R_DMACR] & DMACR_RUNSTOP;
  133. }
  134. static inline int stream_idle(struct Stream *s)
  135. {
  136. return !!(s->regs[R_DMASR] & DMASR_IDLE);
  137. }
  138. static void stream_reset(struct Stream *s)
  139. {
  140. s->regs[R_DMASR] = DMASR_HALTED; /* starts up halted. */
  141. s->regs[R_DMACR] = 1 << 16; /* Starts with one in compl threshold. */
  142. }
  143. /* Map an offset addr into a channel index. */
  144. static inline int streamid_from_addr(hwaddr addr)
  145. {
  146. int sid;
  147. sid = addr / (0x30);
  148. sid &= 1;
  149. return sid;
  150. }
  151. static void stream_desc_load(struct Stream *s, hwaddr addr)
  152. {
  153. struct SDesc *d = &s->desc;
  154. cpu_physical_memory_read(addr, d, sizeof *d);
  155. /* Convert from LE into host endianness. */
  156. d->buffer_address = le64_to_cpu(d->buffer_address);
  157. d->nxtdesc = le64_to_cpu(d->nxtdesc);
  158. d->control = le32_to_cpu(d->control);
  159. d->status = le32_to_cpu(d->status);
  160. }
  161. static void stream_desc_store(struct Stream *s, hwaddr addr)
  162. {
  163. struct SDesc *d = &s->desc;
  164. /* Convert from host endianness into LE. */
  165. d->buffer_address = cpu_to_le64(d->buffer_address);
  166. d->nxtdesc = cpu_to_le64(d->nxtdesc);
  167. d->control = cpu_to_le32(d->control);
  168. d->status = cpu_to_le32(d->status);
  169. cpu_physical_memory_write(addr, d, sizeof *d);
  170. }
  171. static void stream_update_irq(struct Stream *s)
  172. {
  173. unsigned int pending, mask, irq;
  174. pending = s->regs[R_DMASR] & DMASR_IRQ_MASK;
  175. mask = s->regs[R_DMACR] & DMASR_IRQ_MASK;
  176. irq = pending & mask;
  177. qemu_set_irq(s->irq, !!irq);
  178. }
  179. static void stream_reload_complete_cnt(struct Stream *s)
  180. {
  181. unsigned int comp_th;
  182. comp_th = (s->regs[R_DMACR] >> 16) & 0xff;
  183. s->complete_cnt = comp_th;
  184. }
  185. static void timer_hit(void *opaque)
  186. {
  187. struct Stream *s = opaque;
  188. stream_reload_complete_cnt(s);
  189. s->regs[R_DMASR] |= DMASR_DLY_IRQ;
  190. stream_update_irq(s);
  191. }
  192. static void stream_complete(struct Stream *s)
  193. {
  194. unsigned int comp_delay;
  195. /* Start the delayed timer. */
  196. ptimer_transaction_begin(s->ptimer);
  197. comp_delay = s->regs[R_DMACR] >> 24;
  198. if (comp_delay) {
  199. ptimer_stop(s->ptimer);
  200. ptimer_set_count(s->ptimer, comp_delay);
  201. ptimer_run(s->ptimer, 1);
  202. }
  203. s->complete_cnt--;
  204. if (s->complete_cnt == 0) {
  205. /* Raise the IOC irq. */
  206. s->regs[R_DMASR] |= DMASR_IOC_IRQ;
  207. stream_reload_complete_cnt(s);
  208. }
  209. ptimer_transaction_commit(s->ptimer);
  210. }
  211. static void stream_process_mem2s(struct Stream *s, StreamSlave *tx_data_dev,
  212. StreamSlave *tx_control_dev)
  213. {
  214. uint32_t prev_d;
  215. unsigned int txlen;
  216. if (!stream_running(s) || stream_idle(s)) {
  217. return;
  218. }
  219. while (1) {
  220. stream_desc_load(s, s->regs[R_CURDESC]);
  221. if (s->desc.status & SDESC_STATUS_COMPLETE) {
  222. s->regs[R_DMASR] |= DMASR_HALTED;
  223. break;
  224. }
  225. if (stream_desc_sof(&s->desc)) {
  226. s->pos = 0;
  227. stream_push(tx_control_dev, s->desc.app, sizeof(s->desc.app));
  228. }
  229. txlen = s->desc.control & SDESC_CTRL_LEN_MASK;
  230. if ((txlen + s->pos) > sizeof s->txbuf) {
  231. hw_error("%s: too small internal txbuf! %d\n", __func__,
  232. txlen + s->pos);
  233. }
  234. cpu_physical_memory_read(s->desc.buffer_address,
  235. s->txbuf + s->pos, txlen);
  236. s->pos += txlen;
  237. if (stream_desc_eof(&s->desc)) {
  238. stream_push(tx_data_dev, s->txbuf, s->pos);
  239. s->pos = 0;
  240. stream_complete(s);
  241. }
  242. /* Update the descriptor. */
  243. s->desc.status = txlen | SDESC_STATUS_COMPLETE;
  244. stream_desc_store(s, s->regs[R_CURDESC]);
  245. /* Advance. */
  246. prev_d = s->regs[R_CURDESC];
  247. s->regs[R_CURDESC] = s->desc.nxtdesc;
  248. if (prev_d == s->regs[R_TAILDESC]) {
  249. s->regs[R_DMASR] |= DMASR_IDLE;
  250. break;
  251. }
  252. }
  253. }
  254. static size_t stream_process_s2mem(struct Stream *s, unsigned char *buf,
  255. size_t len)
  256. {
  257. uint32_t prev_d;
  258. unsigned int rxlen;
  259. size_t pos = 0;
  260. int sof = 1;
  261. if (!stream_running(s) || stream_idle(s)) {
  262. return 0;
  263. }
  264. while (len) {
  265. stream_desc_load(s, s->regs[R_CURDESC]);
  266. if (s->desc.status & SDESC_STATUS_COMPLETE) {
  267. s->regs[R_DMASR] |= DMASR_HALTED;
  268. break;
  269. }
  270. rxlen = s->desc.control & SDESC_CTRL_LEN_MASK;
  271. if (rxlen > len) {
  272. /* It fits. */
  273. rxlen = len;
  274. }
  275. cpu_physical_memory_write(s->desc.buffer_address, buf + pos, rxlen);
  276. len -= rxlen;
  277. pos += rxlen;
  278. /* Update the descriptor. */
  279. if (!len) {
  280. stream_complete(s);
  281. memcpy(s->desc.app, s->app, sizeof(s->desc.app));
  282. s->desc.status |= SDESC_STATUS_EOF;
  283. }
  284. s->desc.status |= sof << SDESC_STATUS_SOF_BIT;
  285. s->desc.status |= SDESC_STATUS_COMPLETE;
  286. stream_desc_store(s, s->regs[R_CURDESC]);
  287. sof = 0;
  288. /* Advance. */
  289. prev_d = s->regs[R_CURDESC];
  290. s->regs[R_CURDESC] = s->desc.nxtdesc;
  291. if (prev_d == s->regs[R_TAILDESC]) {
  292. s->regs[R_DMASR] |= DMASR_IDLE;
  293. break;
  294. }
  295. }
  296. return pos;
  297. }
  298. static void xilinx_axidma_reset(DeviceState *dev)
  299. {
  300. int i;
  301. XilinxAXIDMA *s = XILINX_AXI_DMA(dev);
  302. for (i = 0; i < 2; i++) {
  303. stream_reset(&s->streams[i]);
  304. }
  305. }
  306. static size_t
  307. xilinx_axidma_control_stream_push(StreamSlave *obj, unsigned char *buf,
  308. size_t len)
  309. {
  310. XilinxAXIDMAStreamSlave *cs = XILINX_AXI_DMA_CONTROL_STREAM(obj);
  311. struct Stream *s = &cs->dma->streams[1];
  312. if (len != CONTROL_PAYLOAD_SIZE) {
  313. hw_error("AXI DMA requires %d byte control stream payload\n",
  314. (int)CONTROL_PAYLOAD_SIZE);
  315. }
  316. memcpy(s->app, buf, len);
  317. return len;
  318. }
  319. static bool
  320. xilinx_axidma_data_stream_can_push(StreamSlave *obj,
  321. StreamCanPushNotifyFn notify,
  322. void *notify_opaque)
  323. {
  324. XilinxAXIDMAStreamSlave *ds = XILINX_AXI_DMA_DATA_STREAM(obj);
  325. struct Stream *s = &ds->dma->streams[1];
  326. if (!stream_running(s) || stream_idle(s)) {
  327. ds->dma->notify = notify;
  328. ds->dma->notify_opaque = notify_opaque;
  329. return false;
  330. }
  331. return true;
  332. }
  333. static size_t
  334. xilinx_axidma_data_stream_push(StreamSlave *obj, unsigned char *buf, size_t len)
  335. {
  336. XilinxAXIDMAStreamSlave *ds = XILINX_AXI_DMA_DATA_STREAM(obj);
  337. struct Stream *s = &ds->dma->streams[1];
  338. size_t ret;
  339. ret = stream_process_s2mem(s, buf, len);
  340. stream_update_irq(s);
  341. return ret;
  342. }
  343. static uint64_t axidma_read(void *opaque, hwaddr addr,
  344. unsigned size)
  345. {
  346. XilinxAXIDMA *d = opaque;
  347. struct Stream *s;
  348. uint32_t r = 0;
  349. int sid;
  350. sid = streamid_from_addr(addr);
  351. s = &d->streams[sid];
  352. addr = addr % 0x30;
  353. addr >>= 2;
  354. switch (addr) {
  355. case R_DMACR:
  356. /* Simulate one cycles reset delay. */
  357. s->regs[addr] &= ~DMACR_RESET;
  358. r = s->regs[addr];
  359. break;
  360. case R_DMASR:
  361. s->regs[addr] &= 0xffff;
  362. s->regs[addr] |= (s->complete_cnt & 0xff) << 16;
  363. s->regs[addr] |= (ptimer_get_count(s->ptimer) & 0xff) << 24;
  364. r = s->regs[addr];
  365. break;
  366. default:
  367. r = s->regs[addr];
  368. D(qemu_log("%s ch=%d addr=" TARGET_FMT_plx " v=%x\n",
  369. __func__, sid, addr * 4, r));
  370. break;
  371. }
  372. return r;
  373. }
  374. static void axidma_write(void *opaque, hwaddr addr,
  375. uint64_t value, unsigned size)
  376. {
  377. XilinxAXIDMA *d = opaque;
  378. struct Stream *s;
  379. int sid;
  380. sid = streamid_from_addr(addr);
  381. s = &d->streams[sid];
  382. addr = addr % 0x30;
  383. addr >>= 2;
  384. switch (addr) {
  385. case R_DMACR:
  386. /* Tailptr mode is always on. */
  387. value |= DMACR_TAILPTR_MODE;
  388. /* Remember our previous reset state. */
  389. value |= (s->regs[addr] & DMACR_RESET);
  390. s->regs[addr] = value;
  391. if (value & DMACR_RESET) {
  392. stream_reset(s);
  393. }
  394. if ((value & 1) && !stream_resetting(s)) {
  395. /* Start processing. */
  396. s->regs[R_DMASR] &= ~(DMASR_HALTED | DMASR_IDLE);
  397. }
  398. stream_reload_complete_cnt(s);
  399. break;
  400. case R_DMASR:
  401. /* Mask away write to clear irq lines. */
  402. value &= ~(value & DMASR_IRQ_MASK);
  403. s->regs[addr] = value;
  404. break;
  405. case R_TAILDESC:
  406. s->regs[addr] = value;
  407. s->regs[R_DMASR] &= ~DMASR_IDLE; /* Not idle. */
  408. if (!sid) {
  409. stream_process_mem2s(s, d->tx_data_dev, d->tx_control_dev);
  410. }
  411. break;
  412. default:
  413. D(qemu_log("%s: ch=%d addr=" TARGET_FMT_plx " v=%x\n",
  414. __func__, sid, addr * 4, (unsigned)value));
  415. s->regs[addr] = value;
  416. break;
  417. }
  418. if (sid == 1 && d->notify) {
  419. StreamCanPushNotifyFn notifytmp = d->notify;
  420. d->notify = NULL;
  421. notifytmp(d->notify_opaque);
  422. }
  423. stream_update_irq(s);
  424. }
  425. static const MemoryRegionOps axidma_ops = {
  426. .read = axidma_read,
  427. .write = axidma_write,
  428. .endianness = DEVICE_NATIVE_ENDIAN,
  429. };
  430. static void xilinx_axidma_realize(DeviceState *dev, Error **errp)
  431. {
  432. XilinxAXIDMA *s = XILINX_AXI_DMA(dev);
  433. XilinxAXIDMAStreamSlave *ds = XILINX_AXI_DMA_DATA_STREAM(&s->rx_data_dev);
  434. XilinxAXIDMAStreamSlave *cs = XILINX_AXI_DMA_CONTROL_STREAM(
  435. &s->rx_control_dev);
  436. Error *local_err = NULL;
  437. object_property_add_link(OBJECT(ds), "dma", TYPE_XILINX_AXI_DMA,
  438. (Object **)&ds->dma,
  439. object_property_allow_set_link,
  440. OBJ_PROP_LINK_STRONG,
  441. &local_err);
  442. object_property_add_link(OBJECT(cs), "dma", TYPE_XILINX_AXI_DMA,
  443. (Object **)&cs->dma,
  444. object_property_allow_set_link,
  445. OBJ_PROP_LINK_STRONG,
  446. &local_err);
  447. if (local_err) {
  448. goto xilinx_axidma_realize_fail;
  449. }
  450. object_property_set_link(OBJECT(ds), OBJECT(s), "dma", &local_err);
  451. object_property_set_link(OBJECT(cs), OBJECT(s), "dma", &local_err);
  452. if (local_err) {
  453. goto xilinx_axidma_realize_fail;
  454. }
  455. int i;
  456. for (i = 0; i < 2; i++) {
  457. struct Stream *st = &s->streams[i];
  458. st->nr = i;
  459. st->ptimer = ptimer_init(timer_hit, st, PTIMER_POLICY_DEFAULT);
  460. ptimer_transaction_begin(st->ptimer);
  461. ptimer_set_freq(st->ptimer, s->freqhz);
  462. ptimer_transaction_commit(st->ptimer);
  463. }
  464. return;
  465. xilinx_axidma_realize_fail:
  466. error_propagate(errp, local_err);
  467. }
  468. static void xilinx_axidma_init(Object *obj)
  469. {
  470. XilinxAXIDMA *s = XILINX_AXI_DMA(obj);
  471. SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
  472. object_initialize_child(OBJECT(s), "axistream-connected-target",
  473. &s->rx_data_dev, sizeof(s->rx_data_dev),
  474. TYPE_XILINX_AXI_DMA_DATA_STREAM, &error_abort,
  475. NULL);
  476. object_initialize_child(OBJECT(s), "axistream-control-connected-target",
  477. &s->rx_control_dev, sizeof(s->rx_control_dev),
  478. TYPE_XILINX_AXI_DMA_CONTROL_STREAM, &error_abort,
  479. NULL);
  480. sysbus_init_irq(sbd, &s->streams[0].irq);
  481. sysbus_init_irq(sbd, &s->streams[1].irq);
  482. memory_region_init_io(&s->iomem, obj, &axidma_ops, s,
  483. "xlnx.axi-dma", R_MAX * 4 * 2);
  484. sysbus_init_mmio(sbd, &s->iomem);
  485. }
  486. static Property axidma_properties[] = {
  487. DEFINE_PROP_UINT32("freqhz", XilinxAXIDMA, freqhz, 50000000),
  488. DEFINE_PROP_LINK("axistream-connected", XilinxAXIDMA,
  489. tx_data_dev, TYPE_STREAM_SLAVE, StreamSlave *),
  490. DEFINE_PROP_LINK("axistream-control-connected", XilinxAXIDMA,
  491. tx_control_dev, TYPE_STREAM_SLAVE, StreamSlave *),
  492. DEFINE_PROP_END_OF_LIST(),
  493. };
  494. static void axidma_class_init(ObjectClass *klass, void *data)
  495. {
  496. DeviceClass *dc = DEVICE_CLASS(klass);
  497. dc->realize = xilinx_axidma_realize,
  498. dc->reset = xilinx_axidma_reset;
  499. dc->props = axidma_properties;
  500. }
  501. static StreamSlaveClass xilinx_axidma_data_stream_class = {
  502. .push = xilinx_axidma_data_stream_push,
  503. .can_push = xilinx_axidma_data_stream_can_push,
  504. };
  505. static StreamSlaveClass xilinx_axidma_control_stream_class = {
  506. .push = xilinx_axidma_control_stream_push,
  507. };
  508. static void xilinx_axidma_stream_class_init(ObjectClass *klass, void *data)
  509. {
  510. StreamSlaveClass *ssc = STREAM_SLAVE_CLASS(klass);
  511. ssc->push = ((StreamSlaveClass *)data)->push;
  512. ssc->can_push = ((StreamSlaveClass *)data)->can_push;
  513. }
  514. static const TypeInfo axidma_info = {
  515. .name = TYPE_XILINX_AXI_DMA,
  516. .parent = TYPE_SYS_BUS_DEVICE,
  517. .instance_size = sizeof(XilinxAXIDMA),
  518. .class_init = axidma_class_init,
  519. .instance_init = xilinx_axidma_init,
  520. };
  521. static const TypeInfo xilinx_axidma_data_stream_info = {
  522. .name = TYPE_XILINX_AXI_DMA_DATA_STREAM,
  523. .parent = TYPE_OBJECT,
  524. .instance_size = sizeof(struct XilinxAXIDMAStreamSlave),
  525. .class_init = xilinx_axidma_stream_class_init,
  526. .class_data = &xilinx_axidma_data_stream_class,
  527. .interfaces = (InterfaceInfo[]) {
  528. { TYPE_STREAM_SLAVE },
  529. { }
  530. }
  531. };
  532. static const TypeInfo xilinx_axidma_control_stream_info = {
  533. .name = TYPE_XILINX_AXI_DMA_CONTROL_STREAM,
  534. .parent = TYPE_OBJECT,
  535. .instance_size = sizeof(struct XilinxAXIDMAStreamSlave),
  536. .class_init = xilinx_axidma_stream_class_init,
  537. .class_data = &xilinx_axidma_control_stream_class,
  538. .interfaces = (InterfaceInfo[]) {
  539. { TYPE_STREAM_SLAVE },
  540. { }
  541. }
  542. };
  543. static void xilinx_axidma_register_types(void)
  544. {
  545. type_register_static(&axidma_info);
  546. type_register_static(&xilinx_axidma_data_stream_info);
  547. type_register_static(&xilinx_axidma_control_stream_info);
  548. }
  549. type_init(xilinx_axidma_register_types)