npcm_gmac.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940
  1. /*
  2. * Nuvoton NPCM7xx/8xx GMAC Module
  3. *
  4. * Copyright 2024 Google LLC
  5. * Authors:
  6. * Hao Wu <wuhaotsh@google.com>
  7. * Nabih Estefan <nabihestefan@google.com>
  8. *
  9. * This program is free software; you can redistribute it and/or modify it
  10. * under the terms of the GNU General Public License as published by the
  11. * Free Software Foundation; either version 2 of the License, or
  12. * (at your option) any later version.
  13. *
  14. * This program is distributed in the hope that it will be useful, but WITHOUT
  15. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  16. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
  17. * for more details.
  18. *
  19. * Unsupported/unimplemented features:
  20. * - MII is not implemented, MII_ADDR.BUSY and MII_DATA always return zero
  21. * - Precision timestamp (PTP) is not implemented.
  22. */
  23. #include "qemu/osdep.h"
  24. #include "hw/registerfields.h"
  25. #include "hw/net/mii.h"
  26. #include "hw/net/npcm_gmac.h"
  27. #include "migration/vmstate.h"
  28. #include "net/checksum.h"
  29. #include "net/eth.h"
  30. #include "net/net.h"
  31. #include "qemu/cutils.h"
  32. #include "qemu/log.h"
  33. #include "qemu/units.h"
  34. #include "system/dma.h"
  35. #include "trace.h"
  36. REG32(NPCM_DMA_BUS_MODE, 0x1000)
  37. REG32(NPCM_DMA_XMT_POLL_DEMAND, 0x1004)
  38. REG32(NPCM_DMA_RCV_POLL_DEMAND, 0x1008)
  39. REG32(NPCM_DMA_RX_BASE_ADDR, 0x100c)
  40. REG32(NPCM_DMA_TX_BASE_ADDR, 0x1010)
  41. REG32(NPCM_DMA_STATUS, 0x1014)
  42. REG32(NPCM_DMA_CONTROL, 0x1018)
  43. REG32(NPCM_DMA_INTR_ENA, 0x101c)
  44. REG32(NPCM_DMA_MISSED_FRAME_CTR, 0x1020)
  45. REG32(NPCM_DMA_HOST_TX_DESC, 0x1048)
  46. REG32(NPCM_DMA_HOST_RX_DESC, 0x104c)
  47. REG32(NPCM_DMA_CUR_TX_BUF_ADDR, 0x1050)
  48. REG32(NPCM_DMA_CUR_RX_BUF_ADDR, 0x1054)
  49. REG32(NPCM_DMA_HW_FEATURE, 0x1058)
  50. REG32(NPCM_GMAC_MAC_CONFIG, 0x0)
  51. REG32(NPCM_GMAC_FRAME_FILTER, 0x4)
  52. REG32(NPCM_GMAC_HASH_HIGH, 0x8)
  53. REG32(NPCM_GMAC_HASH_LOW, 0xc)
  54. REG32(NPCM_GMAC_MII_ADDR, 0x10)
  55. REG32(NPCM_GMAC_MII_DATA, 0x14)
  56. REG32(NPCM_GMAC_FLOW_CTRL, 0x18)
  57. REG32(NPCM_GMAC_VLAN_FLAG, 0x1c)
  58. REG32(NPCM_GMAC_VERSION, 0x20)
  59. REG32(NPCM_GMAC_WAKEUP_FILTER, 0x28)
  60. REG32(NPCM_GMAC_PMT, 0x2c)
  61. REG32(NPCM_GMAC_LPI_CTRL, 0x30)
  62. REG32(NPCM_GMAC_TIMER_CTRL, 0x34)
  63. REG32(NPCM_GMAC_INT_STATUS, 0x38)
  64. REG32(NPCM_GMAC_INT_MASK, 0x3c)
  65. REG32(NPCM_GMAC_MAC0_ADDR_HI, 0x40)
  66. REG32(NPCM_GMAC_MAC0_ADDR_LO, 0x44)
  67. REG32(NPCM_GMAC_MAC1_ADDR_HI, 0x48)
  68. REG32(NPCM_GMAC_MAC1_ADDR_LO, 0x4c)
  69. REG32(NPCM_GMAC_MAC2_ADDR_HI, 0x50)
  70. REG32(NPCM_GMAC_MAC2_ADDR_LO, 0x54)
  71. REG32(NPCM_GMAC_MAC3_ADDR_HI, 0x58)
  72. REG32(NPCM_GMAC_MAC3_ADDR_LO, 0x5c)
  73. REG32(NPCM_GMAC_RGMII_STATUS, 0xd8)
  74. REG32(NPCM_GMAC_WATCHDOG, 0xdc)
  75. REG32(NPCM_GMAC_PTP_TCR, 0x700)
  76. REG32(NPCM_GMAC_PTP_SSIR, 0x704)
  77. REG32(NPCM_GMAC_PTP_STSR, 0x708)
  78. REG32(NPCM_GMAC_PTP_STNSR, 0x70c)
  79. REG32(NPCM_GMAC_PTP_STSUR, 0x710)
  80. REG32(NPCM_GMAC_PTP_STNSUR, 0x714)
  81. REG32(NPCM_GMAC_PTP_TAR, 0x718)
  82. REG32(NPCM_GMAC_PTP_TTSR, 0x71c)
  83. /* Register Fields */
  84. #define NPCM_GMAC_MII_ADDR_BUSY BIT(0)
  85. #define NPCM_GMAC_MII_ADDR_WRITE BIT(1)
  86. #define NPCM_GMAC_MII_ADDR_GR(rv) extract16((rv), 6, 5)
  87. #define NPCM_GMAC_MII_ADDR_PA(rv) extract16((rv), 11, 5)
  88. #define NPCM_GMAC_INT_MASK_LPIIM BIT(10)
  89. #define NPCM_GMAC_INT_MASK_PMTM BIT(3)
  90. #define NPCM_GMAC_INT_MASK_RGIM BIT(0)
  91. #define NPCM_DMA_BUS_MODE_SWR BIT(0)
  92. static const uint32_t npcm_gmac_cold_reset_values[NPCM_GMAC_NR_REGS] = {
  93. /* Reduce version to 3.2 so that the kernel can enable interrupt. */
  94. [R_NPCM_GMAC_VERSION] = 0x00001032,
  95. [R_NPCM_GMAC_TIMER_CTRL] = 0x03e80000,
  96. [R_NPCM_GMAC_MAC0_ADDR_HI] = 0x8000ffff,
  97. [R_NPCM_GMAC_MAC0_ADDR_LO] = 0xffffffff,
  98. [R_NPCM_GMAC_MAC1_ADDR_HI] = 0x0000ffff,
  99. [R_NPCM_GMAC_MAC1_ADDR_LO] = 0xffffffff,
  100. [R_NPCM_GMAC_MAC2_ADDR_HI] = 0x0000ffff,
  101. [R_NPCM_GMAC_MAC2_ADDR_LO] = 0xffffffff,
  102. [R_NPCM_GMAC_MAC3_ADDR_HI] = 0x0000ffff,
  103. [R_NPCM_GMAC_MAC3_ADDR_LO] = 0xffffffff,
  104. [R_NPCM_GMAC_PTP_TCR] = 0x00002000,
  105. [R_NPCM_DMA_BUS_MODE] = 0x00020101,
  106. [R_NPCM_DMA_HW_FEATURE] = 0x100d4f37,
  107. };
  108. static const uint16_t phy_reg_init[] = {
  109. [MII_BMCR] = MII_BMCR_AUTOEN | MII_BMCR_FD | MII_BMCR_SPEED1000,
  110. [MII_BMSR] = MII_BMSR_100TX_FD | MII_BMSR_100TX_HD | MII_BMSR_10T_FD |
  111. MII_BMSR_10T_HD | MII_BMSR_EXTSTAT | MII_BMSR_AUTONEG |
  112. MII_BMSR_LINK_ST | MII_BMSR_EXTCAP,
  113. [MII_PHYID1] = 0x0362,
  114. [MII_PHYID2] = 0x5e6a,
  115. [MII_ANAR] = MII_ANAR_TXFD | MII_ANAR_TX | MII_ANAR_10FD |
  116. MII_ANAR_10 | MII_ANAR_CSMACD,
  117. [MII_ANLPAR] = MII_ANLPAR_ACK | MII_ANLPAR_PAUSE |
  118. MII_ANLPAR_TXFD | MII_ANLPAR_TX | MII_ANLPAR_10FD |
  119. MII_ANLPAR_10 | MII_ANLPAR_CSMACD,
  120. [MII_ANER] = 0x64 | MII_ANER_NWAY,
  121. [MII_ANNP] = 0x2001,
  122. [MII_CTRL1000] = MII_CTRL1000_FULL,
  123. [MII_STAT1000] = MII_STAT1000_FULL,
  124. [MII_EXTSTAT] = 0x3000, /* 1000BASTE_T full-duplex capable */
  125. };
  126. static void npcm_gmac_soft_reset(NPCMGMACState *gmac)
  127. {
  128. memcpy(gmac->regs, npcm_gmac_cold_reset_values,
  129. NPCM_GMAC_NR_REGS * sizeof(uint32_t));
  130. /* Clear reset bits */
  131. gmac->regs[R_NPCM_DMA_BUS_MODE] &= ~NPCM_DMA_BUS_MODE_SWR;
  132. }
  133. static void gmac_phy_set_link(NPCMGMACState *gmac, bool active)
  134. {
  135. /* Autonegotiation status mirrors link status. */
  136. if (active) {
  137. gmac->phy_regs[0][MII_BMSR] |= (MII_BMSR_LINK_ST | MII_BMSR_AN_COMP);
  138. } else {
  139. gmac->phy_regs[0][MII_BMSR] &= ~(MII_BMSR_LINK_ST | MII_BMSR_AN_COMP);
  140. }
  141. }
  142. static bool gmac_can_receive(NetClientState *nc)
  143. {
  144. NPCMGMACState *gmac = NPCM_GMAC(qemu_get_nic_opaque(nc));
  145. /* If GMAC receive is disabled. */
  146. if (!(gmac->regs[R_NPCM_GMAC_MAC_CONFIG] & NPCM_GMAC_MAC_CONFIG_RX_EN)) {
  147. return false;
  148. }
  149. /* If GMAC DMA RX is stopped. */
  150. if (!(gmac->regs[R_NPCM_DMA_CONTROL] & NPCM_DMA_CONTROL_START_STOP_RX)) {
  151. return false;
  152. }
  153. return true;
  154. }
  155. /*
  156. * Function that updates the GMAC IRQ
  157. * It find the logical OR of the enabled bits for NIS (if enabled)
  158. * It find the logical OR of the enabled bits for AIS (if enabled)
  159. */
  160. static void gmac_update_irq(NPCMGMACState *gmac)
  161. {
  162. /*
  163. * Check if the normal interrupts summary is enabled
  164. * if so, add the bits for the summary that are enabled
  165. */
  166. if (gmac->regs[R_NPCM_DMA_INTR_ENA] & gmac->regs[R_NPCM_DMA_STATUS] &
  167. (NPCM_DMA_INTR_ENAB_NIE_BITS)) {
  168. gmac->regs[R_NPCM_DMA_STATUS] |= NPCM_DMA_STATUS_NIS;
  169. }
  170. /*
  171. * Check if the abnormal interrupts summary is enabled
  172. * if so, add the bits for the summary that are enabled
  173. */
  174. if (gmac->regs[R_NPCM_DMA_INTR_ENA] & gmac->regs[R_NPCM_DMA_STATUS] &
  175. (NPCM_DMA_INTR_ENAB_AIE_BITS)) {
  176. gmac->regs[R_NPCM_DMA_STATUS] |= NPCM_DMA_STATUS_AIS;
  177. }
  178. /* Get the logical OR of both normal and abnormal interrupts */
  179. int level = !!((gmac->regs[R_NPCM_DMA_STATUS] &
  180. gmac->regs[R_NPCM_DMA_INTR_ENA] &
  181. NPCM_DMA_STATUS_NIS) |
  182. (gmac->regs[R_NPCM_DMA_STATUS] &
  183. gmac->regs[R_NPCM_DMA_INTR_ENA] &
  184. NPCM_DMA_STATUS_AIS));
  185. /* Set the IRQ */
  186. trace_npcm_gmac_update_irq(DEVICE(gmac)->canonical_path,
  187. gmac->regs[R_NPCM_DMA_STATUS],
  188. gmac->regs[R_NPCM_DMA_INTR_ENA],
  189. level);
  190. qemu_set_irq(gmac->irq, level);
  191. }
  192. static int gmac_read_rx_desc(dma_addr_t addr, struct NPCMGMACRxDesc *desc)
  193. {
  194. if (dma_memory_read(&address_space_memory, addr, desc,
  195. sizeof(*desc), MEMTXATTRS_UNSPECIFIED)) {
  196. qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to read descriptor @ 0x%"
  197. HWADDR_PRIx "\n", __func__, addr);
  198. return -1;
  199. }
  200. desc->rdes0 = le32_to_cpu(desc->rdes0);
  201. desc->rdes1 = le32_to_cpu(desc->rdes1);
  202. desc->rdes2 = le32_to_cpu(desc->rdes2);
  203. desc->rdes3 = le32_to_cpu(desc->rdes3);
  204. return 0;
  205. }
  206. static int gmac_write_rx_desc(dma_addr_t addr, struct NPCMGMACRxDesc *desc)
  207. {
  208. struct NPCMGMACRxDesc le_desc;
  209. le_desc.rdes0 = cpu_to_le32(desc->rdes0);
  210. le_desc.rdes1 = cpu_to_le32(desc->rdes1);
  211. le_desc.rdes2 = cpu_to_le32(desc->rdes2);
  212. le_desc.rdes3 = cpu_to_le32(desc->rdes3);
  213. if (dma_memory_write(&address_space_memory, addr, &le_desc,
  214. sizeof(le_desc), MEMTXATTRS_UNSPECIFIED)) {
  215. qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to write descriptor @ 0x%"
  216. HWADDR_PRIx "\n", __func__, addr);
  217. return -1;
  218. }
  219. return 0;
  220. }
  221. static int gmac_read_tx_desc(dma_addr_t addr, struct NPCMGMACTxDesc *desc)
  222. {
  223. if (dma_memory_read(&address_space_memory, addr, desc,
  224. sizeof(*desc), MEMTXATTRS_UNSPECIFIED)) {
  225. qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to read descriptor @ 0x%"
  226. HWADDR_PRIx "\n", __func__, addr);
  227. return -1;
  228. }
  229. desc->tdes0 = le32_to_cpu(desc->tdes0);
  230. desc->tdes1 = le32_to_cpu(desc->tdes1);
  231. desc->tdes2 = le32_to_cpu(desc->tdes2);
  232. desc->tdes3 = le32_to_cpu(desc->tdes3);
  233. return 0;
  234. }
  235. static int gmac_write_tx_desc(dma_addr_t addr, struct NPCMGMACTxDesc *desc)
  236. {
  237. struct NPCMGMACTxDesc le_desc;
  238. le_desc.tdes0 = cpu_to_le32(desc->tdes0);
  239. le_desc.tdes1 = cpu_to_le32(desc->tdes1);
  240. le_desc.tdes2 = cpu_to_le32(desc->tdes2);
  241. le_desc.tdes3 = cpu_to_le32(desc->tdes3);
  242. if (dma_memory_write(&address_space_memory, addr, &le_desc,
  243. sizeof(le_desc), MEMTXATTRS_UNSPECIFIED)) {
  244. qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to write descriptor @ 0x%"
  245. HWADDR_PRIx "\n", __func__, addr);
  246. return -1;
  247. }
  248. return 0;
  249. }
  250. static int gmac_rx_transfer_frame_to_buffer(uint32_t rx_buf_len,
  251. uint32_t *left_frame,
  252. uint32_t rx_buf_addr,
  253. bool *eof_transferred,
  254. const uint8_t **frame_ptr,
  255. uint16_t *transferred)
  256. {
  257. uint32_t to_transfer;
  258. /*
  259. * Check that buffer is bigger than the frame being transfered
  260. * If bigger then transfer only whats left of frame
  261. * Else, fill frame with all the content possible
  262. */
  263. if (rx_buf_len >= *left_frame) {
  264. to_transfer = *left_frame;
  265. *eof_transferred = true;
  266. } else {
  267. to_transfer = rx_buf_len;
  268. }
  269. /* write frame part to memory */
  270. if (dma_memory_write(&address_space_memory, (uint64_t) rx_buf_addr,
  271. *frame_ptr, to_transfer, MEMTXATTRS_UNSPECIFIED)) {
  272. return -1;
  273. }
  274. /* update frame pointer and size of whats left of frame */
  275. *frame_ptr += to_transfer;
  276. *left_frame -= to_transfer;
  277. *transferred += to_transfer;
  278. return 0;
  279. }
  280. static void gmac_dma_set_state(NPCMGMACState *gmac, int shift, uint32_t state)
  281. {
  282. gmac->regs[R_NPCM_DMA_STATUS] = deposit32(gmac->regs[R_NPCM_DMA_STATUS],
  283. shift, 3, state);
  284. }
  285. static ssize_t gmac_receive(NetClientState *nc, const uint8_t *buf, size_t len)
  286. {
  287. /*
  288. * Comments have steps that relate to the
  289. * receiving process steps in pg 386
  290. */
  291. NPCMGMACState *gmac = NPCM_GMAC(qemu_get_nic_opaque(nc));
  292. uint32_t left_frame = len;
  293. const uint8_t *frame_ptr = buf;
  294. uint32_t desc_addr;
  295. uint32_t rx_buf_len, rx_buf_addr;
  296. struct NPCMGMACRxDesc rx_desc;
  297. uint16_t transferred = 0;
  298. bool eof_transferred = false;
  299. trace_npcm_gmac_packet_receive(DEVICE(gmac)->canonical_path, len);
  300. if (!gmac_can_receive(nc)) {
  301. qemu_log_mask(LOG_GUEST_ERROR, "GMAC Currently is not able for Rx");
  302. return -1;
  303. }
  304. if (!gmac->regs[R_NPCM_DMA_HOST_RX_DESC]) {
  305. gmac->regs[R_NPCM_DMA_HOST_RX_DESC] =
  306. NPCM_DMA_HOST_RX_DESC_MASK(gmac->regs[R_NPCM_DMA_RX_BASE_ADDR]);
  307. }
  308. desc_addr = NPCM_DMA_HOST_RX_DESC_MASK(gmac->regs[R_NPCM_DMA_HOST_RX_DESC]);
  309. /* step 1 */
  310. gmac_dma_set_state(gmac, NPCM_DMA_STATUS_RX_PROCESS_STATE_SHIFT,
  311. NPCM_DMA_STATUS_RX_RUNNING_FETCHING_STATE);
  312. trace_npcm_gmac_packet_desc_read(DEVICE(gmac)->canonical_path, desc_addr);
  313. if (gmac_read_rx_desc(desc_addr, &rx_desc)) {
  314. qemu_log_mask(LOG_GUEST_ERROR, "RX Descriptor @ 0x%x cant be read\n",
  315. desc_addr);
  316. gmac_dma_set_state(gmac, NPCM_DMA_STATUS_RX_PROCESS_STATE_SHIFT,
  317. NPCM_DMA_STATUS_RX_SUSPENDED_STATE);
  318. return -1;
  319. }
  320. /* step 2 */
  321. if (!(rx_desc.rdes0 & RX_DESC_RDES0_OWN)) {
  322. qemu_log_mask(LOG_GUEST_ERROR,
  323. "RX Descriptor @ 0x%x is owned by software\n",
  324. desc_addr);
  325. gmac->regs[R_NPCM_DMA_STATUS] |= NPCM_DMA_STATUS_RU;
  326. gmac->regs[R_NPCM_DMA_STATUS] |= NPCM_DMA_STATUS_RI;
  327. gmac_dma_set_state(gmac, NPCM_DMA_STATUS_RX_PROCESS_STATE_SHIFT,
  328. NPCM_DMA_STATUS_RX_SUSPENDED_STATE);
  329. gmac_update_irq(gmac);
  330. return len;
  331. }
  332. /* step 3 */
  333. /*
  334. * TODO --
  335. * Implement all frame filtering and processing (with its own interrupts)
  336. */
  337. trace_npcm_gmac_debug_desc_data(DEVICE(gmac)->canonical_path, &rx_desc,
  338. rx_desc.rdes0, rx_desc.rdes1, rx_desc.rdes2,
  339. rx_desc.rdes3);
  340. /* Clear rdes0 for the incoming descriptor and set FS in first descriptor.*/
  341. rx_desc.rdes0 = RX_DESC_RDES0_FIRST_DESC_MASK;
  342. gmac_dma_set_state(gmac, NPCM_DMA_STATUS_RX_PROCESS_STATE_SHIFT,
  343. NPCM_DMA_STATUS_RX_RUNNING_TRANSFERRING_STATE);
  344. /* Pad the frame with FCS as the kernel driver will strip it away. */
  345. left_frame += ETH_FCS_LEN;
  346. /* repeat while we still have frame to transfer to memory */
  347. while (!eof_transferred) {
  348. /* Return descriptor no matter what happens */
  349. rx_desc.rdes0 &= ~RX_DESC_RDES0_OWN;
  350. /* Set the frame to be an IPv4/IPv6 frame. */
  351. rx_desc.rdes0 |= RX_DESC_RDES0_FRM_TYPE_MASK;
  352. /* step 4 */
  353. rx_buf_len = RX_DESC_RDES1_BFFR1_SZ_MASK(rx_desc.rdes1);
  354. rx_buf_addr = rx_desc.rdes2;
  355. gmac->regs[R_NPCM_DMA_CUR_RX_BUF_ADDR] = rx_buf_addr;
  356. gmac_rx_transfer_frame_to_buffer(rx_buf_len, &left_frame, rx_buf_addr,
  357. &eof_transferred, &frame_ptr,
  358. &transferred);
  359. trace_npcm_gmac_packet_receiving_buffer(DEVICE(gmac)->canonical_path,
  360. rx_buf_len, rx_buf_addr);
  361. /* if we still have frame left and the second buffer is not chained */
  362. if (!(rx_desc.rdes1 & RX_DESC_RDES1_SEC_ADDR_CHND_MASK) && \
  363. !eof_transferred) {
  364. /* repeat process from above on buffer 2 */
  365. rx_buf_len = RX_DESC_RDES1_BFFR2_SZ_MASK(rx_desc.rdes1);
  366. rx_buf_addr = rx_desc.rdes3;
  367. gmac->regs[R_NPCM_DMA_CUR_RX_BUF_ADDR] = rx_buf_addr;
  368. gmac_rx_transfer_frame_to_buffer(rx_buf_len, &left_frame,
  369. rx_buf_addr, &eof_transferred,
  370. &frame_ptr, &transferred);
  371. trace_npcm_gmac_packet_receiving_buffer( \
  372. DEVICE(gmac)->canonical_path,
  373. rx_buf_len, rx_buf_addr);
  374. }
  375. /* update address for descriptor */
  376. gmac->regs[R_NPCM_DMA_HOST_RX_DESC] = rx_buf_addr;
  377. /* Return descriptor */
  378. rx_desc.rdes0 &= ~RX_DESC_RDES0_OWN;
  379. /* Update frame length transferred */
  380. rx_desc.rdes0 |= ((uint32_t)transferred)
  381. << RX_DESC_RDES0_FRAME_LEN_SHIFT;
  382. trace_npcm_gmac_debug_desc_data(DEVICE(gmac)->canonical_path, &rx_desc,
  383. rx_desc.rdes0, rx_desc.rdes1,
  384. rx_desc.rdes2, rx_desc.rdes3);
  385. /* step 5 */
  386. gmac_write_rx_desc(desc_addr, &rx_desc);
  387. trace_npcm_gmac_debug_desc_data(DEVICE(gmac)->canonical_path,
  388. &rx_desc, rx_desc.rdes0,
  389. rx_desc.rdes1, rx_desc.rdes2,
  390. rx_desc.rdes3);
  391. /* read new descriptor into rx_desc if needed*/
  392. if (!eof_transferred) {
  393. /* Get next descriptor address (chained or sequential) */
  394. if (rx_desc.rdes1 & RX_DESC_RDES1_RC_END_RING_MASK) {
  395. desc_addr = gmac->regs[R_NPCM_DMA_RX_BASE_ADDR];
  396. } else if (rx_desc.rdes1 & RX_DESC_RDES1_SEC_ADDR_CHND_MASK) {
  397. desc_addr = rx_desc.rdes3;
  398. } else {
  399. desc_addr += sizeof(rx_desc);
  400. }
  401. trace_npcm_gmac_packet_desc_read(DEVICE(gmac)->canonical_path,
  402. desc_addr);
  403. if (gmac_read_rx_desc(desc_addr, &rx_desc)) {
  404. qemu_log_mask(LOG_GUEST_ERROR,
  405. "RX Descriptor @ 0x%x cant be read\n",
  406. desc_addr);
  407. gmac->regs[R_NPCM_DMA_STATUS] |= NPCM_DMA_STATUS_RU;
  408. gmac_update_irq(gmac);
  409. return len;
  410. }
  411. /* step 6 */
  412. if (!(rx_desc.rdes0 & RX_DESC_RDES0_OWN)) {
  413. if (!(gmac->regs[R_NPCM_DMA_CONTROL] & \
  414. NPCM_DMA_CONTROL_FLUSH_MASK)) {
  415. rx_desc.rdes0 |= RX_DESC_RDES0_DESC_ERR_MASK;
  416. }
  417. eof_transferred = true;
  418. }
  419. /* Clear rdes0 for the incoming descriptor */
  420. rx_desc.rdes0 = 0;
  421. }
  422. }
  423. gmac_dma_set_state(gmac, NPCM_DMA_STATUS_RX_PROCESS_STATE_SHIFT,
  424. NPCM_DMA_STATUS_RX_RUNNING_CLOSING_STATE);
  425. rx_desc.rdes0 |= RX_DESC_RDES0_LAST_DESC_MASK;
  426. if (!(rx_desc.rdes1 & RX_DESC_RDES1_DIS_INTR_COMP_MASK)) {
  427. gmac->regs[R_NPCM_DMA_STATUS] |= NPCM_DMA_STATUS_RI;
  428. gmac_update_irq(gmac);
  429. }
  430. trace_npcm_gmac_debug_desc_data(DEVICE(gmac)->canonical_path, &rx_desc,
  431. rx_desc.rdes0, rx_desc.rdes1, rx_desc.rdes2,
  432. rx_desc.rdes3);
  433. /* step 8 */
  434. gmac->regs[R_NPCM_DMA_CONTROL] |= NPCM_DMA_CONTROL_FLUSH_MASK;
  435. /* step 9 */
  436. trace_npcm_gmac_packet_received(DEVICE(gmac)->canonical_path, left_frame);
  437. gmac_dma_set_state(gmac, NPCM_DMA_STATUS_RX_PROCESS_STATE_SHIFT,
  438. NPCM_DMA_STATUS_RX_RUNNING_WAITING_STATE);
  439. gmac_write_rx_desc(desc_addr, &rx_desc);
  440. /* Get next descriptor address (chained or sequential) */
  441. if (rx_desc.rdes1 & RX_DESC_RDES1_RC_END_RING_MASK) {
  442. desc_addr = gmac->regs[R_NPCM_DMA_RX_BASE_ADDR];
  443. } else if (rx_desc.rdes1 & RX_DESC_RDES1_SEC_ADDR_CHND_MASK) {
  444. desc_addr = rx_desc.rdes3;
  445. } else {
  446. desc_addr += sizeof(rx_desc);
  447. }
  448. gmac->regs[R_NPCM_DMA_HOST_RX_DESC] = desc_addr;
  449. return len;
  450. }
  451. static int gmac_tx_get_csum(uint32_t tdes1)
  452. {
  453. uint32_t mask = TX_DESC_TDES1_CHKSM_INS_CTRL_MASK(tdes1);
  454. int csum = 0;
  455. if (likely(mask > 0)) {
  456. csum |= CSUM_IP;
  457. }
  458. if (likely(mask > 1)) {
  459. csum |= CSUM_TCP | CSUM_UDP;
  460. }
  461. return csum;
  462. }
  463. static void gmac_try_send_next_packet(NPCMGMACState *gmac)
  464. {
  465. /*
  466. * Comments about steps refer to steps for
  467. * transmitting in page 384 of datasheet
  468. */
  469. uint16_t tx_buffer_size = 2048;
  470. g_autofree uint8_t *tx_send_buffer = g_malloc(tx_buffer_size);
  471. uint32_t desc_addr;
  472. struct NPCMGMACTxDesc tx_desc;
  473. uint32_t tx_buf_addr, tx_buf_len;
  474. uint16_t length = 0;
  475. uint8_t *buf = tx_send_buffer;
  476. uint32_t prev_buf_size = 0;
  477. int csum = 0;
  478. /* steps 1&2 */
  479. if (!gmac->regs[R_NPCM_DMA_HOST_TX_DESC]) {
  480. gmac->regs[R_NPCM_DMA_HOST_TX_DESC] =
  481. NPCM_DMA_HOST_TX_DESC_MASK(gmac->regs[R_NPCM_DMA_TX_BASE_ADDR]);
  482. }
  483. desc_addr = gmac->regs[R_NPCM_DMA_HOST_TX_DESC];
  484. while (true) {
  485. gmac_dma_set_state(gmac, NPCM_DMA_STATUS_TX_PROCESS_STATE_SHIFT,
  486. NPCM_DMA_STATUS_TX_RUNNING_FETCHING_STATE);
  487. if (gmac_read_tx_desc(desc_addr, &tx_desc)) {
  488. qemu_log_mask(LOG_GUEST_ERROR,
  489. "TX Descriptor @ 0x%x can't be read\n",
  490. desc_addr);
  491. return;
  492. }
  493. /* step 3 */
  494. trace_npcm_gmac_packet_desc_read(DEVICE(gmac)->canonical_path,
  495. desc_addr);
  496. trace_npcm_gmac_debug_desc_data(DEVICE(gmac)->canonical_path, &tx_desc,
  497. tx_desc.tdes0, tx_desc.tdes1, tx_desc.tdes2, tx_desc.tdes3);
  498. /* 1 = DMA Owned, 0 = Software Owned */
  499. if (!(tx_desc.tdes0 & TX_DESC_TDES0_OWN)) {
  500. trace_npcm_gmac_tx_desc_owner(DEVICE(gmac)->canonical_path,
  501. desc_addr);
  502. gmac->regs[R_NPCM_DMA_STATUS] |= NPCM_DMA_STATUS_TU;
  503. gmac_dma_set_state(gmac, NPCM_DMA_STATUS_TX_PROCESS_STATE_SHIFT,
  504. NPCM_DMA_STATUS_TX_SUSPENDED_STATE);
  505. gmac_update_irq(gmac);
  506. return;
  507. }
  508. gmac_dma_set_state(gmac, NPCM_DMA_STATUS_TX_PROCESS_STATE_SHIFT,
  509. NPCM_DMA_STATUS_TX_RUNNING_READ_STATE);
  510. /* Give the descriptor back regardless of what happens. */
  511. tx_desc.tdes0 &= ~TX_DESC_TDES0_OWN;
  512. if (tx_desc.tdes1 & TX_DESC_TDES1_FIRST_SEG_MASK) {
  513. csum = gmac_tx_get_csum(tx_desc.tdes1);
  514. }
  515. /* step 4 */
  516. tx_buf_addr = tx_desc.tdes2;
  517. gmac->regs[R_NPCM_DMA_CUR_TX_BUF_ADDR] = tx_buf_addr;
  518. tx_buf_len = TX_DESC_TDES1_BFFR1_SZ_MASK(tx_desc.tdes1);
  519. buf = &tx_send_buffer[prev_buf_size];
  520. if ((prev_buf_size + tx_buf_len) > sizeof(buf)) {
  521. tx_buffer_size = prev_buf_size + tx_buf_len;
  522. tx_send_buffer = g_realloc(tx_send_buffer, tx_buffer_size);
  523. buf = &tx_send_buffer[prev_buf_size];
  524. }
  525. /* step 5 */
  526. if (dma_memory_read(&address_space_memory, tx_buf_addr, buf,
  527. tx_buf_len, MEMTXATTRS_UNSPECIFIED)) {
  528. qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to read packet @ 0x%x\n",
  529. __func__, tx_buf_addr);
  530. return;
  531. }
  532. length += tx_buf_len;
  533. prev_buf_size += tx_buf_len;
  534. /* If not chained we'll have a second buffer. */
  535. if (!(tx_desc.tdes1 & TX_DESC_TDES1_SEC_ADDR_CHND_MASK)) {
  536. tx_buf_addr = tx_desc.tdes3;
  537. gmac->regs[R_NPCM_DMA_CUR_TX_BUF_ADDR] = tx_buf_addr;
  538. tx_buf_len = TX_DESC_TDES1_BFFR2_SZ_MASK(tx_desc.tdes1);
  539. buf = &tx_send_buffer[prev_buf_size];
  540. if ((prev_buf_size + tx_buf_len) > sizeof(buf)) {
  541. tx_buffer_size = prev_buf_size + tx_buf_len;
  542. tx_send_buffer = g_realloc(tx_send_buffer, tx_buffer_size);
  543. buf = &tx_send_buffer[prev_buf_size];
  544. }
  545. if (dma_memory_read(&address_space_memory, tx_buf_addr, buf,
  546. tx_buf_len, MEMTXATTRS_UNSPECIFIED)) {
  547. qemu_log_mask(LOG_GUEST_ERROR,
  548. "%s: Failed to read packet @ 0x%x\n",
  549. __func__, tx_buf_addr);
  550. return;
  551. }
  552. length += tx_buf_len;
  553. prev_buf_size += tx_buf_len;
  554. }
  555. if (tx_desc.tdes1 & TX_DESC_TDES1_LAST_SEG_MASK) {
  556. net_checksum_calculate(tx_send_buffer, length, csum);
  557. qemu_send_packet(qemu_get_queue(gmac->nic), tx_send_buffer, length);
  558. trace_npcm_gmac_packet_sent(DEVICE(gmac)->canonical_path, length);
  559. buf = tx_send_buffer;
  560. length = 0;
  561. }
  562. /* step 6 */
  563. gmac_dma_set_state(gmac, NPCM_DMA_STATUS_TX_PROCESS_STATE_SHIFT,
  564. NPCM_DMA_STATUS_TX_RUNNING_CLOSING_STATE);
  565. gmac_write_tx_desc(desc_addr, &tx_desc);
  566. if (tx_desc.tdes1 & TX_DESC_TDES1_TX_END_RING_MASK) {
  567. desc_addr = gmac->regs[R_NPCM_DMA_TX_BASE_ADDR];
  568. } else if (tx_desc.tdes1 & TX_DESC_TDES1_SEC_ADDR_CHND_MASK) {
  569. desc_addr = tx_desc.tdes3;
  570. } else {
  571. desc_addr += sizeof(tx_desc);
  572. }
  573. gmac->regs[R_NPCM_DMA_HOST_TX_DESC] = desc_addr;
  574. /* step 7 */
  575. if (tx_desc.tdes1 & TX_DESC_TDES1_INTERR_COMP_MASK) {
  576. gmac->regs[R_NPCM_DMA_STATUS] |= NPCM_DMA_STATUS_TI;
  577. gmac_update_irq(gmac);
  578. }
  579. }
  580. }
  581. static void gmac_cleanup(NetClientState *nc)
  582. {
  583. /* Nothing to do yet. */
  584. }
  585. static void gmac_set_link(NetClientState *nc)
  586. {
  587. NPCMGMACState *gmac = qemu_get_nic_opaque(nc);
  588. trace_npcm_gmac_set_link(!nc->link_down);
  589. gmac_phy_set_link(gmac, !nc->link_down);
  590. }
  591. static void npcm_gmac_mdio_access(NPCMGMACState *gmac, uint16_t v)
  592. {
  593. bool busy = v & NPCM_GMAC_MII_ADDR_BUSY;
  594. uint8_t is_write;
  595. uint8_t pa, gr;
  596. uint16_t data;
  597. if (busy) {
  598. is_write = v & NPCM_GMAC_MII_ADDR_WRITE;
  599. pa = NPCM_GMAC_MII_ADDR_PA(v);
  600. gr = NPCM_GMAC_MII_ADDR_GR(v);
  601. /* Both pa and gr are 5 bits, so they are less than 32. */
  602. g_assert(pa < NPCM_GMAC_MAX_PHYS);
  603. g_assert(gr < NPCM_GMAC_MAX_PHY_REGS);
  604. if (v & NPCM_GMAC_MII_ADDR_WRITE) {
  605. data = gmac->regs[R_NPCM_GMAC_MII_DATA];
  606. /* Clear reset bit for BMCR register */
  607. switch (gr) {
  608. case MII_BMCR:
  609. data &= ~MII_BMCR_RESET;
  610. /* Autonegotiation is a W1C bit*/
  611. if (data & MII_BMCR_ANRESTART) {
  612. /* Tells autonegotiation to not restart again */
  613. data &= ~MII_BMCR_ANRESTART;
  614. }
  615. if ((data & MII_BMCR_AUTOEN) &&
  616. !(gmac->phy_regs[pa][MII_BMSR] & MII_BMSR_AN_COMP)) {
  617. /* sets autonegotiation as complete */
  618. gmac->phy_regs[pa][MII_BMSR] |= MII_BMSR_AN_COMP;
  619. /* Resolve AN automatically->need to set this */
  620. gmac->phy_regs[0][MII_ANLPAR] = 0x0000;
  621. }
  622. }
  623. gmac->phy_regs[pa][gr] = data;
  624. } else {
  625. data = gmac->phy_regs[pa][gr];
  626. gmac->regs[R_NPCM_GMAC_MII_DATA] = data;
  627. }
  628. trace_npcm_gmac_mdio_access(DEVICE(gmac)->canonical_path, is_write, pa,
  629. gr, data);
  630. }
  631. gmac->regs[R_NPCM_GMAC_MII_ADDR] = v & ~NPCM_GMAC_MII_ADDR_BUSY;
  632. }
  633. static uint64_t npcm_gmac_read(void *opaque, hwaddr offset, unsigned size)
  634. {
  635. NPCMGMACState *gmac = opaque;
  636. uint32_t v = 0;
  637. switch (offset) {
  638. /* Write only registers */
  639. case A_NPCM_DMA_XMT_POLL_DEMAND:
  640. case A_NPCM_DMA_RCV_POLL_DEMAND:
  641. qemu_log_mask(LOG_GUEST_ERROR,
  642. "%s: Read of write-only reg: offset: 0x%04" HWADDR_PRIx
  643. "\n", DEVICE(gmac)->canonical_path, offset);
  644. break;
  645. default:
  646. v = gmac->regs[offset / sizeof(uint32_t)];
  647. }
  648. trace_npcm_gmac_reg_read(DEVICE(gmac)->canonical_path, offset, v);
  649. return v;
  650. }
  651. static void npcm_gmac_write(void *opaque, hwaddr offset,
  652. uint64_t v, unsigned size)
  653. {
  654. NPCMGMACState *gmac = opaque;
  655. trace_npcm_gmac_reg_write(DEVICE(gmac)->canonical_path, offset, v);
  656. switch (offset) {
  657. /* Read only registers */
  658. case A_NPCM_GMAC_VERSION:
  659. case A_NPCM_GMAC_INT_STATUS:
  660. case A_NPCM_GMAC_RGMII_STATUS:
  661. case A_NPCM_GMAC_PTP_STSR:
  662. case A_NPCM_GMAC_PTP_STNSR:
  663. case A_NPCM_DMA_MISSED_FRAME_CTR:
  664. case A_NPCM_DMA_HOST_TX_DESC:
  665. case A_NPCM_DMA_HOST_RX_DESC:
  666. case A_NPCM_DMA_CUR_TX_BUF_ADDR:
  667. case A_NPCM_DMA_CUR_RX_BUF_ADDR:
  668. case A_NPCM_DMA_HW_FEATURE:
  669. qemu_log_mask(LOG_GUEST_ERROR,
  670. "%s: Write of read-only reg: offset: 0x%04" HWADDR_PRIx
  671. ", value: 0x%04" PRIx64 "\n",
  672. DEVICE(gmac)->canonical_path, offset, v);
  673. break;
  674. case A_NPCM_GMAC_MAC_CONFIG:
  675. gmac->regs[offset / sizeof(uint32_t)] = v;
  676. break;
  677. case A_NPCM_GMAC_MII_ADDR:
  678. npcm_gmac_mdio_access(gmac, v);
  679. break;
  680. case A_NPCM_GMAC_MAC0_ADDR_HI:
  681. gmac->regs[offset / sizeof(uint32_t)] = v;
  682. gmac->conf.macaddr.a[0] = v >> 8;
  683. gmac->conf.macaddr.a[1] = v >> 0;
  684. break;
  685. case A_NPCM_GMAC_MAC0_ADDR_LO:
  686. gmac->regs[offset / sizeof(uint32_t)] = v;
  687. gmac->conf.macaddr.a[2] = v >> 24;
  688. gmac->conf.macaddr.a[3] = v >> 16;
  689. gmac->conf.macaddr.a[4] = v >> 8;
  690. gmac->conf.macaddr.a[5] = v >> 0;
  691. break;
  692. case A_NPCM_GMAC_MAC1_ADDR_HI:
  693. case A_NPCM_GMAC_MAC1_ADDR_LO:
  694. case A_NPCM_GMAC_MAC2_ADDR_HI:
  695. case A_NPCM_GMAC_MAC2_ADDR_LO:
  696. case A_NPCM_GMAC_MAC3_ADDR_HI:
  697. case A_NPCM_GMAC_MAC3_ADDR_LO:
  698. gmac->regs[offset / sizeof(uint32_t)] = v;
  699. qemu_log_mask(LOG_UNIMP,
  700. "%s: Only MAC Address 0 is supported. This request "
  701. "is ignored.\n", DEVICE(gmac)->canonical_path);
  702. break;
  703. case A_NPCM_DMA_BUS_MODE:
  704. gmac->regs[offset / sizeof(uint32_t)] = v;
  705. if (v & NPCM_DMA_BUS_MODE_SWR) {
  706. npcm_gmac_soft_reset(gmac);
  707. }
  708. break;
  709. case A_NPCM_DMA_RCV_POLL_DEMAND:
  710. /* We dont actually care about the value */
  711. gmac_dma_set_state(gmac, NPCM_DMA_STATUS_RX_PROCESS_STATE_SHIFT,
  712. NPCM_DMA_STATUS_RX_RUNNING_WAITING_STATE);
  713. break;
  714. case A_NPCM_DMA_XMT_POLL_DEMAND:
  715. /* We dont actually care about the value */
  716. gmac_try_send_next_packet(gmac);
  717. break;
  718. case A_NPCM_DMA_CONTROL:
  719. gmac->regs[offset / sizeof(uint32_t)] = v;
  720. if (v & NPCM_DMA_CONTROL_START_STOP_TX) {
  721. gmac_try_send_next_packet(gmac);
  722. } else {
  723. gmac_dma_set_state(gmac, NPCM_DMA_STATUS_TX_PROCESS_STATE_SHIFT,
  724. NPCM_DMA_STATUS_TX_STOPPED_STATE);
  725. }
  726. if (v & NPCM_DMA_CONTROL_START_STOP_RX) {
  727. gmac_dma_set_state(gmac, NPCM_DMA_STATUS_RX_PROCESS_STATE_SHIFT,
  728. NPCM_DMA_STATUS_RX_RUNNING_WAITING_STATE);
  729. qemu_flush_queued_packets(qemu_get_queue(gmac->nic));
  730. } else {
  731. gmac_dma_set_state(gmac, NPCM_DMA_STATUS_RX_PROCESS_STATE_SHIFT,
  732. NPCM_DMA_STATUS_RX_STOPPED_STATE);
  733. }
  734. break;
  735. case A_NPCM_DMA_STATUS:
  736. /* Check that RO bits are not written to */
  737. if (NPCM_DMA_STATUS_RO_MASK(v)) {
  738. qemu_log_mask(LOG_GUEST_ERROR,
  739. "%s: Write of read-only bits of reg: offset: 0x%04"
  740. HWADDR_PRIx ", value: 0x%04" PRIx64 "\n",
  741. DEVICE(gmac)->canonical_path, offset, v);
  742. }
  743. /* for W1C bits, implement W1C */
  744. gmac->regs[offset / sizeof(uint32_t)] &= ~NPCM_DMA_STATUS_W1C_MASK(v);
  745. if (v & NPCM_DMA_STATUS_RU) {
  746. /* Clearing RU bit indicates descriptor is owned by DMA again. */
  747. gmac_dma_set_state(gmac, NPCM_DMA_STATUS_RX_PROCESS_STATE_SHIFT,
  748. NPCM_DMA_STATUS_RX_RUNNING_WAITING_STATE);
  749. qemu_flush_queued_packets(qemu_get_queue(gmac->nic));
  750. }
  751. break;
  752. default:
  753. gmac->regs[offset / sizeof(uint32_t)] = v;
  754. break;
  755. }
  756. gmac_update_irq(gmac);
  757. }
  758. static void npcm_gmac_reset(DeviceState *dev)
  759. {
  760. NPCMGMACState *gmac = NPCM_GMAC(dev);
  761. npcm_gmac_soft_reset(gmac);
  762. memcpy(gmac->phy_regs[0], phy_reg_init, sizeof(phy_reg_init));
  763. trace_npcm_gmac_reset(DEVICE(gmac)->canonical_path,
  764. gmac->phy_regs[0][MII_BMSR]);
  765. }
  766. static NetClientInfo net_npcm_gmac_info = {
  767. .type = NET_CLIENT_DRIVER_NIC,
  768. .size = sizeof(NICState),
  769. .can_receive = gmac_can_receive,
  770. .receive = gmac_receive,
  771. .cleanup = gmac_cleanup,
  772. .link_status_changed = gmac_set_link,
  773. };
  774. static const struct MemoryRegionOps npcm_gmac_ops = {
  775. .read = npcm_gmac_read,
  776. .write = npcm_gmac_write,
  777. .endianness = DEVICE_LITTLE_ENDIAN,
  778. .valid = {
  779. .min_access_size = 4,
  780. .max_access_size = 4,
  781. .unaligned = false,
  782. },
  783. };
  784. static void npcm_gmac_realize(DeviceState *dev, Error **errp)
  785. {
  786. NPCMGMACState *gmac = NPCM_GMAC(dev);
  787. SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
  788. memory_region_init_io(&gmac->iomem, OBJECT(gmac), &npcm_gmac_ops, gmac,
  789. TYPE_NPCM_GMAC, 8 * KiB);
  790. sysbus_init_mmio(sbd, &gmac->iomem);
  791. sysbus_init_irq(sbd, &gmac->irq);
  792. qemu_macaddr_default_if_unset(&gmac->conf.macaddr);
  793. gmac->nic = qemu_new_nic(&net_npcm_gmac_info, &gmac->conf, TYPE_NPCM_GMAC,
  794. dev->id, &dev->mem_reentrancy_guard, gmac);
  795. qemu_format_nic_info_str(qemu_get_queue(gmac->nic), gmac->conf.macaddr.a);
  796. gmac->regs[R_NPCM_GMAC_MAC0_ADDR_HI] = (gmac->conf.macaddr.a[0] << 8) + \
  797. gmac->conf.macaddr.a[1];
  798. gmac->regs[R_NPCM_GMAC_MAC0_ADDR_LO] = (gmac->conf.macaddr.a[2] << 24) + \
  799. (gmac->conf.macaddr.a[3] << 16) + \
  800. (gmac->conf.macaddr.a[4] << 8) + \
  801. gmac->conf.macaddr.a[5];
  802. }
  803. static void npcm_gmac_unrealize(DeviceState *dev)
  804. {
  805. NPCMGMACState *gmac = NPCM_GMAC(dev);
  806. qemu_del_nic(gmac->nic);
  807. }
  808. static const VMStateDescription vmstate_npcm_gmac = {
  809. .name = TYPE_NPCM_GMAC,
  810. .version_id = 0,
  811. .minimum_version_id = 0,
  812. .fields = (VMStateField[]) {
  813. VMSTATE_UINT32_ARRAY(regs, NPCMGMACState, NPCM_GMAC_NR_REGS),
  814. VMSTATE_END_OF_LIST(),
  815. },
  816. };
  817. static const Property npcm_gmac_properties[] = {
  818. DEFINE_NIC_PROPERTIES(NPCMGMACState, conf),
  819. };
  820. static void npcm_gmac_class_init(ObjectClass *klass, void *data)
  821. {
  822. DeviceClass *dc = DEVICE_CLASS(klass);
  823. set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
  824. dc->desc = "NPCM GMAC Controller";
  825. dc->realize = npcm_gmac_realize;
  826. dc->unrealize = npcm_gmac_unrealize;
  827. device_class_set_legacy_reset(dc, npcm_gmac_reset);
  828. dc->vmsd = &vmstate_npcm_gmac;
  829. device_class_set_props(dc, npcm_gmac_properties);
  830. }
  831. static const TypeInfo npcm_gmac_types[] = {
  832. {
  833. .name = TYPE_NPCM_GMAC,
  834. .parent = TYPE_SYS_BUS_DEVICE,
  835. .instance_size = sizeof(NPCMGMACState),
  836. .class_init = npcm_gmac_class_init,
  837. },
  838. };
  839. DEFINE_TYPES(npcm_gmac_types)