2
0

sungem.c 43 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491
  1. /*
  2. * QEMU model of SUN GEM ethernet controller
  3. *
  4. * As found in Apple ASICs among others
  5. *
  6. * Copyright 2016 Ben Herrenschmidt
  7. * Copyright 2017 Mark Cave-Ayland
  8. */
  9. #include "qemu/osdep.h"
  10. #include "hw/pci/pci_device.h"
  11. #include "hw/qdev-properties.h"
  12. #include "migration/vmstate.h"
  13. #include "qemu/log.h"
  14. #include "qemu/module.h"
  15. #include "net/net.h"
  16. #include "net/eth.h"
  17. #include "net/checksum.h"
  18. #include "hw/net/mii.h"
  19. #include "system/system.h"
  20. #include "trace.h"
  21. #include "qom/object.h"
  22. #define TYPE_SUNGEM "sungem"
  23. OBJECT_DECLARE_SIMPLE_TYPE(SunGEMState, SUNGEM)
  24. #define MAX_PACKET_SIZE 9016
  25. #define SUNGEM_MMIO_SIZE 0x200000
  26. /* Global registers */
  27. #define SUNGEM_MMIO_GREG_SIZE 0x2000
  28. #define GREG_SEBSTATE 0x0000UL /* SEB State Register */
  29. #define GREG_STAT 0x000CUL /* Status Register */
  30. #define GREG_STAT_TXINTME 0x00000001 /* TX INTME frame transferred */
  31. #define GREG_STAT_TXALL 0x00000002 /* All TX frames transferred */
  32. #define GREG_STAT_TXDONE 0x00000004 /* One TX frame transferred */
  33. #define GREG_STAT_RXDONE 0x00000010 /* One RX frame arrived */
  34. #define GREG_STAT_RXNOBUF 0x00000020 /* No free RX buffers available */
  35. #define GREG_STAT_RXTAGERR 0x00000040 /* RX tag framing is corrupt */
  36. #define GREG_STAT_TXMAC 0x00004000 /* TX MAC signalled interrupt */
  37. #define GREG_STAT_RXMAC 0x00008000 /* RX MAC signalled interrupt */
  38. #define GREG_STAT_MAC 0x00010000 /* MAC Control signalled irq */
  39. #define GREG_STAT_TXNR 0xfff80000 /* == TXDMA_TXDONE reg val */
  40. #define GREG_STAT_TXNR_SHIFT 19
  41. /* These interrupts are edge latches in the status register,
  42. * reading it (or writing the corresponding bit in IACK) will
  43. * clear them
  44. */
  45. #define GREG_STAT_LATCH (GREG_STAT_TXALL | GREG_STAT_TXINTME | \
  46. GREG_STAT_RXDONE | GREG_STAT_RXDONE | \
  47. GREG_STAT_RXNOBUF | GREG_STAT_RXTAGERR)
  48. #define GREG_IMASK 0x0010UL /* Interrupt Mask Register */
  49. #define GREG_IACK 0x0014UL /* Interrupt ACK Register */
  50. #define GREG_STAT2 0x001CUL /* Alias of GREG_STAT */
  51. #define GREG_PCIESTAT 0x1000UL /* PCI Error Status Register */
  52. #define GREG_PCIEMASK 0x1004UL /* PCI Error Mask Register */
  53. #define GREG_SWRST 0x1010UL /* Software Reset Register */
  54. #define GREG_SWRST_TXRST 0x00000001 /* TX Software Reset */
  55. #define GREG_SWRST_RXRST 0x00000002 /* RX Software Reset */
  56. #define GREG_SWRST_RSTOUT 0x00000004 /* Force RST# pin active */
  57. /* TX DMA Registers */
  58. #define SUNGEM_MMIO_TXDMA_SIZE 0x1000
  59. #define TXDMA_KICK 0x0000UL /* TX Kick Register */
  60. #define TXDMA_CFG 0x0004UL /* TX Configuration Register */
  61. #define TXDMA_CFG_ENABLE 0x00000001 /* Enable TX DMA channel */
  62. #define TXDMA_CFG_RINGSZ 0x0000001e /* TX descriptor ring size */
  63. #define TXDMA_DBLOW 0x0008UL /* TX Desc. Base Low */
  64. #define TXDMA_DBHI 0x000CUL /* TX Desc. Base High */
  65. #define TXDMA_PCNT 0x0024UL /* TX FIFO Packet Counter */
  66. #define TXDMA_SMACHINE 0x0028UL /* TX State Machine Register */
  67. #define TXDMA_DPLOW 0x0030UL /* TX Data Pointer Low */
  68. #define TXDMA_DPHI 0x0034UL /* TX Data Pointer High */
  69. #define TXDMA_TXDONE 0x0100UL /* TX Completion Register */
  70. #define TXDMA_FTAG 0x0108UL /* TX FIFO Tag */
  71. #define TXDMA_FSZ 0x0118UL /* TX FIFO Size */
  72. /* Receive DMA Registers */
  73. #define SUNGEM_MMIO_RXDMA_SIZE 0x2000
  74. #define RXDMA_CFG 0x0000UL /* RX Configuration Register */
  75. #define RXDMA_CFG_ENABLE 0x00000001 /* Enable RX DMA channel */
  76. #define RXDMA_CFG_RINGSZ 0x0000001e /* RX descriptor ring size */
  77. #define RXDMA_CFG_FBOFF 0x00001c00 /* Offset of first data byte */
  78. #define RXDMA_CFG_CSUMOFF 0x000fe000 /* Skip bytes before csum calc */
  79. #define RXDMA_DBLOW 0x0004UL /* RX Descriptor Base Low */
  80. #define RXDMA_DBHI 0x0008UL /* RX Descriptor Base High */
  81. #define RXDMA_PCNT 0x0018UL /* RX FIFO Packet Counter */
  82. #define RXDMA_SMACHINE 0x001CUL /* RX State Machine Register */
  83. #define RXDMA_PTHRESH 0x0020UL /* Pause Thresholds */
  84. #define RXDMA_DPLOW 0x0024UL /* RX Data Pointer Low */
  85. #define RXDMA_DPHI 0x0028UL /* RX Data Pointer High */
  86. #define RXDMA_KICK 0x0100UL /* RX Kick Register */
  87. #define RXDMA_DONE 0x0104UL /* RX Completion Register */
  88. #define RXDMA_BLANK 0x0108UL /* RX Blanking Register */
  89. #define RXDMA_FTAG 0x0110UL /* RX FIFO Tag */
  90. #define RXDMA_FSZ 0x0120UL /* RX FIFO Size */
  91. /* WOL Registers */
  92. #define SUNGEM_MMIO_WOL_SIZE 0x14
  93. #define WOL_MATCH0 0x0000UL
  94. #define WOL_MATCH1 0x0004UL
  95. #define WOL_MATCH2 0x0008UL
  96. #define WOL_MCOUNT 0x000CUL
  97. #define WOL_WAKECSR 0x0010UL
  98. /* MAC Registers */
  99. #define SUNGEM_MMIO_MAC_SIZE 0x200
  100. #define MAC_TXRST 0x0000UL /* TX MAC Software Reset Command */
  101. #define MAC_RXRST 0x0004UL /* RX MAC Software Reset Command */
  102. #define MAC_TXSTAT 0x0010UL /* TX MAC Status Register */
  103. #define MAC_RXSTAT 0x0014UL /* RX MAC Status Register */
  104. #define MAC_CSTAT 0x0018UL /* MAC Control Status Register */
  105. #define MAC_CSTAT_PTR 0xffff0000 /* Pause Time Received */
  106. #define MAC_TXMASK 0x0020UL /* TX MAC Mask Register */
  107. #define MAC_RXMASK 0x0024UL /* RX MAC Mask Register */
  108. #define MAC_MCMASK 0x0028UL /* MAC Control Mask Register */
  109. #define MAC_TXCFG 0x0030UL /* TX MAC Configuration Register */
  110. #define MAC_TXCFG_ENAB 0x00000001 /* TX MAC Enable */
  111. #define MAC_RXCFG 0x0034UL /* RX MAC Configuration Register */
  112. #define MAC_RXCFG_ENAB 0x00000001 /* RX MAC Enable */
  113. #define MAC_RXCFG_SFCS 0x00000004 /* Strip FCS */
  114. #define MAC_RXCFG_PROM 0x00000008 /* Promiscuous Mode */
  115. #define MAC_RXCFG_PGRP 0x00000010 /* Promiscuous Group */
  116. #define MAC_RXCFG_HFE 0x00000020 /* Hash Filter Enable */
  117. #define MAC_XIFCFG 0x003CUL /* XIF Configuration Register */
  118. #define MAC_XIFCFG_LBCK 0x00000002 /* Loopback TX to RX */
  119. #define MAC_MINFSZ 0x0050UL /* MinFrameSize Register */
  120. #define MAC_MAXFSZ 0x0054UL /* MaxFrameSize Register */
  121. #define MAC_ADDR0 0x0080UL /* MAC Address 0 Register */
  122. #define MAC_ADDR1 0x0084UL /* MAC Address 1 Register */
  123. #define MAC_ADDR2 0x0088UL /* MAC Address 2 Register */
  124. #define MAC_ADDR3 0x008CUL /* MAC Address 3 Register */
  125. #define MAC_ADDR4 0x0090UL /* MAC Address 4 Register */
  126. #define MAC_ADDR5 0x0094UL /* MAC Address 5 Register */
  127. #define MAC_HASH0 0x00C0UL /* Hash Table 0 Register */
  128. #define MAC_PATMPS 0x0114UL /* Peak Attempts Register */
  129. #define MAC_SMACHINE 0x0134UL /* State Machine Register */
  130. /* MIF Registers */
  131. #define SUNGEM_MMIO_MIF_SIZE 0x20
  132. #define MIF_FRAME 0x000CUL /* MIF Frame/Output Register */
  133. #define MIF_FRAME_OP 0x30000000 /* OPcode */
  134. #define MIF_FRAME_PHYAD 0x0f800000 /* PHY ADdress */
  135. #define MIF_FRAME_REGAD 0x007c0000 /* REGister ADdress */
  136. #define MIF_FRAME_TALSB 0x00010000 /* Turn Around LSB */
  137. #define MIF_FRAME_DATA 0x0000ffff /* Instruction Payload */
  138. #define MIF_CFG 0x0010UL /* MIF Configuration Register */
  139. #define MIF_CFG_MDI0 0x00000100 /* MDIO_0 present or read-bit */
  140. #define MIF_CFG_MDI1 0x00000200 /* MDIO_1 present or read-bit */
  141. #define MIF_STATUS 0x0018UL /* MIF Status Register */
  142. #define MIF_SMACHINE 0x001CUL /* MIF State Machine Register */
  143. /* PCS/Serialink Registers */
  144. #define SUNGEM_MMIO_PCS_SIZE 0x60
  145. #define PCS_MIISTAT 0x0004UL /* PCS MII Status Register */
  146. #define PCS_ISTAT 0x0018UL /* PCS Interrupt Status Reg */
  147. #define PCS_SSTATE 0x005CUL /* Serialink State Register */
  148. /* Descriptors */
  149. struct gem_txd {
  150. uint64_t control_word;
  151. uint64_t buffer;
  152. };
  153. #define TXDCTRL_BUFSZ 0x0000000000007fffULL /* Buffer Size */
  154. #define TXDCTRL_CSTART 0x00000000001f8000ULL /* CSUM Start Offset */
  155. #define TXDCTRL_COFF 0x000000001fe00000ULL /* CSUM Stuff Offset */
  156. #define TXDCTRL_CENAB 0x0000000020000000ULL /* CSUM Enable */
  157. #define TXDCTRL_EOF 0x0000000040000000ULL /* End of Frame */
  158. #define TXDCTRL_SOF 0x0000000080000000ULL /* Start of Frame */
  159. #define TXDCTRL_INTME 0x0000000100000000ULL /* "Interrupt Me" */
  160. struct gem_rxd {
  161. uint64_t status_word;
  162. uint64_t buffer;
  163. };
  164. #define RXDCTRL_HPASS 0x1000000000000000ULL /* Passed Hash Filter */
  165. #define RXDCTRL_ALTMAC 0x2000000000000000ULL /* Matched ALT MAC */
  166. struct SunGEMState {
  167. PCIDevice pdev;
  168. MemoryRegion sungem;
  169. MemoryRegion greg;
  170. MemoryRegion txdma;
  171. MemoryRegion rxdma;
  172. MemoryRegion wol;
  173. MemoryRegion mac;
  174. MemoryRegion mif;
  175. MemoryRegion pcs;
  176. NICState *nic;
  177. NICConf conf;
  178. uint32_t phy_addr;
  179. uint32_t gregs[SUNGEM_MMIO_GREG_SIZE >> 2];
  180. uint32_t txdmaregs[SUNGEM_MMIO_TXDMA_SIZE >> 2];
  181. uint32_t rxdmaregs[SUNGEM_MMIO_RXDMA_SIZE >> 2];
  182. uint32_t macregs[SUNGEM_MMIO_MAC_SIZE >> 2];
  183. uint32_t mifregs[SUNGEM_MMIO_MIF_SIZE >> 2];
  184. uint32_t pcsregs[SUNGEM_MMIO_PCS_SIZE >> 2];
  185. /* Cache some useful things */
  186. uint32_t rx_mask;
  187. uint32_t tx_mask;
  188. /* Current tx packet */
  189. uint8_t tx_data[MAX_PACKET_SIZE];
  190. uint32_t tx_size;
  191. uint64_t tx_first_ctl;
  192. };
  193. static void sungem_eval_irq(SunGEMState *s)
  194. {
  195. uint32_t stat, mask;
  196. mask = s->gregs[GREG_IMASK >> 2];
  197. stat = s->gregs[GREG_STAT >> 2] & ~GREG_STAT_TXNR;
  198. if (stat & ~mask) {
  199. pci_set_irq(PCI_DEVICE(s), 1);
  200. } else {
  201. pci_set_irq(PCI_DEVICE(s), 0);
  202. }
  203. }
  204. static void sungem_update_status(SunGEMState *s, uint32_t bits, bool val)
  205. {
  206. uint32_t stat;
  207. stat = s->gregs[GREG_STAT >> 2];
  208. if (val) {
  209. stat |= bits;
  210. } else {
  211. stat &= ~bits;
  212. }
  213. s->gregs[GREG_STAT >> 2] = stat;
  214. sungem_eval_irq(s);
  215. }
  216. static void sungem_eval_cascade_irq(SunGEMState *s)
  217. {
  218. uint32_t stat, mask;
  219. mask = s->macregs[MAC_TXSTAT >> 2];
  220. stat = s->macregs[MAC_TXMASK >> 2];
  221. if (stat & ~mask) {
  222. sungem_update_status(s, GREG_STAT_TXMAC, true);
  223. } else {
  224. sungem_update_status(s, GREG_STAT_TXMAC, false);
  225. }
  226. mask = s->macregs[MAC_RXSTAT >> 2];
  227. stat = s->macregs[MAC_RXMASK >> 2];
  228. if (stat & ~mask) {
  229. sungem_update_status(s, GREG_STAT_RXMAC, true);
  230. } else {
  231. sungem_update_status(s, GREG_STAT_RXMAC, false);
  232. }
  233. mask = s->macregs[MAC_CSTAT >> 2];
  234. stat = s->macregs[MAC_MCMASK >> 2] & ~MAC_CSTAT_PTR;
  235. if (stat & ~mask) {
  236. sungem_update_status(s, GREG_STAT_MAC, true);
  237. } else {
  238. sungem_update_status(s, GREG_STAT_MAC, false);
  239. }
  240. }
  241. static void sungem_do_tx_csum(SunGEMState *s)
  242. {
  243. uint16_t start, off;
  244. uint32_t csum;
  245. start = (s->tx_first_ctl & TXDCTRL_CSTART) >> 15;
  246. off = (s->tx_first_ctl & TXDCTRL_COFF) >> 21;
  247. trace_sungem_tx_checksum(start, off);
  248. if (start > (s->tx_size - 2) || off > (s->tx_size - 2)) {
  249. trace_sungem_tx_checksum_oob();
  250. return;
  251. }
  252. csum = net_raw_checksum(s->tx_data + start, s->tx_size - start);
  253. stw_be_p(s->tx_data + off, csum);
  254. }
  255. static void sungem_send_packet(SunGEMState *s, const uint8_t *buf,
  256. int size)
  257. {
  258. NetClientState *nc = qemu_get_queue(s->nic);
  259. if (s->macregs[MAC_XIFCFG >> 2] & MAC_XIFCFG_LBCK) {
  260. qemu_receive_packet(nc, buf, size);
  261. } else {
  262. qemu_send_packet(nc, buf, size);
  263. }
  264. }
  265. static void sungem_process_tx_desc(SunGEMState *s, struct gem_txd *desc)
  266. {
  267. PCIDevice *d = PCI_DEVICE(s);
  268. uint32_t len;
  269. /* If it's a start of frame, discard anything we had in the
  270. * buffer and start again. This should be an error condition
  271. * if we had something ... for now we ignore it
  272. */
  273. if (desc->control_word & TXDCTRL_SOF) {
  274. if (s->tx_first_ctl) {
  275. trace_sungem_tx_unfinished();
  276. }
  277. s->tx_size = 0;
  278. s->tx_first_ctl = desc->control_word;
  279. }
  280. /* Grab data size */
  281. len = desc->control_word & TXDCTRL_BUFSZ;
  282. /* Clamp it to our max size */
  283. if ((s->tx_size + len) > MAX_PACKET_SIZE) {
  284. trace_sungem_tx_overflow();
  285. len = MAX_PACKET_SIZE - s->tx_size;
  286. }
  287. /* Read the data */
  288. pci_dma_read(d, desc->buffer, &s->tx_data[s->tx_size], len);
  289. s->tx_size += len;
  290. /* If end of frame, send packet */
  291. if (desc->control_word & TXDCTRL_EOF) {
  292. trace_sungem_tx_finished(s->tx_size);
  293. /* Handle csum */
  294. if (s->tx_first_ctl & TXDCTRL_CENAB) {
  295. sungem_do_tx_csum(s);
  296. }
  297. /* Send it */
  298. sungem_send_packet(s, s->tx_data, s->tx_size);
  299. /* No more pending packet */
  300. s->tx_size = 0;
  301. s->tx_first_ctl = 0;
  302. }
  303. }
  304. static void sungem_tx_kick(SunGEMState *s)
  305. {
  306. PCIDevice *d = PCI_DEVICE(s);
  307. uint32_t comp, kick;
  308. uint32_t txdma_cfg, txmac_cfg, ints;
  309. uint64_t dbase;
  310. trace_sungem_tx_kick();
  311. /* Check that both TX MAC and TX DMA are enabled. We don't
  312. * handle DMA-less direct FIFO operations (we don't emulate
  313. * the FIFO at all).
  314. *
  315. * A write to TXDMA_KICK while DMA isn't enabled can happen
  316. * when the driver is resetting the pointer.
  317. */
  318. txdma_cfg = s->txdmaregs[TXDMA_CFG >> 2];
  319. txmac_cfg = s->macregs[MAC_TXCFG >> 2];
  320. if (!(txdma_cfg & TXDMA_CFG_ENABLE) ||
  321. !(txmac_cfg & MAC_TXCFG_ENAB)) {
  322. trace_sungem_tx_disabled();
  323. return;
  324. }
  325. /* XXX Test min frame size register ? */
  326. /* XXX Test max frame size register ? */
  327. dbase = s->txdmaregs[TXDMA_DBHI >> 2];
  328. dbase = (dbase << 32) | s->txdmaregs[TXDMA_DBLOW >> 2];
  329. comp = s->txdmaregs[TXDMA_TXDONE >> 2] & s->tx_mask;
  330. kick = s->txdmaregs[TXDMA_KICK >> 2] & s->tx_mask;
  331. trace_sungem_tx_process(comp, kick, s->tx_mask + 1);
  332. /* This is rather primitive for now, we just send everything we
  333. * can in one go, like e1000. Ideally we should do the sending
  334. * from some kind of background task
  335. */
  336. while (comp != kick) {
  337. struct gem_txd desc;
  338. /* Read the next descriptor */
  339. pci_dma_read(d, dbase + comp * sizeof(desc), &desc, sizeof(desc));
  340. /* Byteswap descriptor */
  341. desc.control_word = le64_to_cpu(desc.control_word);
  342. desc.buffer = le64_to_cpu(desc.buffer);
  343. trace_sungem_tx_desc(comp, desc.control_word, desc.buffer);
  344. /* Send it for processing */
  345. sungem_process_tx_desc(s, &desc);
  346. /* Interrupt */
  347. ints = GREG_STAT_TXDONE;
  348. if (desc.control_word & TXDCTRL_INTME) {
  349. ints |= GREG_STAT_TXINTME;
  350. }
  351. sungem_update_status(s, ints, true);
  352. /* Next ! */
  353. comp = (comp + 1) & s->tx_mask;
  354. s->txdmaregs[TXDMA_TXDONE >> 2] = comp;
  355. }
  356. /* We sent everything, set status/irq bit */
  357. sungem_update_status(s, GREG_STAT_TXALL, true);
  358. }
  359. static bool sungem_rx_full(SunGEMState *s, uint32_t kick, uint32_t done)
  360. {
  361. return kick == ((done + 1) & s->rx_mask);
  362. }
  363. static bool sungem_can_receive(NetClientState *nc)
  364. {
  365. SunGEMState *s = qemu_get_nic_opaque(nc);
  366. uint32_t kick, done, rxdma_cfg, rxmac_cfg;
  367. bool full;
  368. rxmac_cfg = s->macregs[MAC_RXCFG >> 2];
  369. rxdma_cfg = s->rxdmaregs[RXDMA_CFG >> 2];
  370. /* If MAC disabled, can't receive */
  371. if ((rxmac_cfg & MAC_RXCFG_ENAB) == 0) {
  372. trace_sungem_rx_mac_disabled();
  373. return false;
  374. }
  375. if ((rxdma_cfg & RXDMA_CFG_ENABLE) == 0) {
  376. trace_sungem_rx_txdma_disabled();
  377. return false;
  378. }
  379. /* Check RX availability */
  380. kick = s->rxdmaregs[RXDMA_KICK >> 2];
  381. done = s->rxdmaregs[RXDMA_DONE >> 2];
  382. full = sungem_rx_full(s, kick, done);
  383. trace_sungem_rx_check(!full, kick, done);
  384. return !full;
  385. }
  386. enum {
  387. rx_no_match,
  388. rx_match_promisc,
  389. rx_match_bcast,
  390. rx_match_allmcast,
  391. rx_match_mcast,
  392. rx_match_mac,
  393. rx_match_altmac,
  394. };
  395. static int sungem_check_rx_mac(SunGEMState *s, const uint8_t *mac, uint32_t crc)
  396. {
  397. uint32_t rxcfg = s->macregs[MAC_RXCFG >> 2];
  398. uint32_t mac0, mac1, mac2;
  399. /* Promisc enabled ? */
  400. if (rxcfg & MAC_RXCFG_PROM) {
  401. return rx_match_promisc;
  402. }
  403. /* Format MAC address into dwords */
  404. mac0 = (mac[4] << 8) | mac[5];
  405. mac1 = (mac[2] << 8) | mac[3];
  406. mac2 = (mac[0] << 8) | mac[1];
  407. trace_sungem_rx_mac_check(mac0, mac1, mac2);
  408. /* Is this a broadcast frame ? */
  409. if (mac0 == 0xffff && mac1 == 0xffff && mac2 == 0xffff) {
  410. return rx_match_bcast;
  411. }
  412. /* TODO: Implement address filter registers (or we don't care ?) */
  413. /* Is this a multicast frame ? */
  414. if (mac[0] & 1) {
  415. trace_sungem_rx_mac_multicast();
  416. /* Promisc group enabled ? */
  417. if (rxcfg & MAC_RXCFG_PGRP) {
  418. return rx_match_allmcast;
  419. }
  420. /* TODO: Check MAC control frames (or we don't care) ? */
  421. /* Check hash filter (somebody check that's correct ?) */
  422. if (rxcfg & MAC_RXCFG_HFE) {
  423. uint32_t hash, idx;
  424. crc >>= 24;
  425. idx = (crc >> 2) & 0x3c;
  426. hash = s->macregs[(MAC_HASH0 + idx) >> 2];
  427. if (hash & (1 << (15 - (crc & 0xf)))) {
  428. return rx_match_mcast;
  429. }
  430. }
  431. return rx_no_match;
  432. }
  433. /* Main MAC check */
  434. trace_sungem_rx_mac_compare(s->macregs[MAC_ADDR0 >> 2],
  435. s->macregs[MAC_ADDR1 >> 2],
  436. s->macregs[MAC_ADDR2 >> 2]);
  437. if (mac0 == s->macregs[MAC_ADDR0 >> 2] &&
  438. mac1 == s->macregs[MAC_ADDR1 >> 2] &&
  439. mac2 == s->macregs[MAC_ADDR2 >> 2]) {
  440. return rx_match_mac;
  441. }
  442. /* Alt MAC check */
  443. if (mac0 == s->macregs[MAC_ADDR3 >> 2] &&
  444. mac1 == s->macregs[MAC_ADDR4 >> 2] &&
  445. mac2 == s->macregs[MAC_ADDR5 >> 2]) {
  446. return rx_match_altmac;
  447. }
  448. return rx_no_match;
  449. }
  450. static ssize_t sungem_receive(NetClientState *nc, const uint8_t *buf,
  451. size_t size)
  452. {
  453. SunGEMState *s = qemu_get_nic_opaque(nc);
  454. PCIDevice *d = PCI_DEVICE(s);
  455. uint32_t mac_crc, done, kick, max_fsize;
  456. uint32_t fcs_size, ints, rxdma_cfg, rxmac_cfg, csum, coff;
  457. struct gem_rxd desc;
  458. uint64_t dbase, baddr;
  459. unsigned int rx_cond;
  460. trace_sungem_rx_packet(size);
  461. rxmac_cfg = s->macregs[MAC_RXCFG >> 2];
  462. rxdma_cfg = s->rxdmaregs[RXDMA_CFG >> 2];
  463. max_fsize = s->macregs[MAC_MAXFSZ >> 2] & 0x7fff;
  464. /* If MAC or DMA disabled, can't receive */
  465. if (!(rxdma_cfg & RXDMA_CFG_ENABLE) ||
  466. !(rxmac_cfg & MAC_RXCFG_ENAB)) {
  467. trace_sungem_rx_disabled();
  468. return 0;
  469. }
  470. /* Size adjustment for FCS */
  471. if (rxmac_cfg & MAC_RXCFG_SFCS) {
  472. fcs_size = 0;
  473. } else {
  474. fcs_size = 4;
  475. }
  476. /* Discard frame smaller than a MAC or larger than max frame size
  477. * (when accounting for FCS)
  478. */
  479. if (size < 6 || (size + 4) > max_fsize) {
  480. trace_sungem_rx_bad_frame_size(size);
  481. /* XXX Increment error statistics ? */
  482. return size;
  483. }
  484. /* Get MAC crc */
  485. mac_crc = net_crc32_le(buf, ETH_ALEN);
  486. /* Packet isn't for me ? */
  487. rx_cond = sungem_check_rx_mac(s, buf, mac_crc);
  488. if (rx_cond == rx_no_match) {
  489. /* Just drop it */
  490. trace_sungem_rx_unmatched();
  491. return size;
  492. }
  493. /* Get ring pointers */
  494. kick = s->rxdmaregs[RXDMA_KICK >> 2] & s->rx_mask;
  495. done = s->rxdmaregs[RXDMA_DONE >> 2] & s->rx_mask;
  496. trace_sungem_rx_process(done, kick, s->rx_mask + 1);
  497. /* Ring full ? Can't receive */
  498. if (sungem_rx_full(s, kick, done)) {
  499. trace_sungem_rx_ringfull();
  500. return 0;
  501. }
  502. /* Note: The real GEM will fetch descriptors in blocks of 4,
  503. * for now we handle them one at a time, I think the driver will
  504. * cope
  505. */
  506. dbase = s->rxdmaregs[RXDMA_DBHI >> 2];
  507. dbase = (dbase << 32) | s->rxdmaregs[RXDMA_DBLOW >> 2];
  508. /* Read the next descriptor */
  509. pci_dma_read(d, dbase + done * sizeof(desc), &desc, sizeof(desc));
  510. trace_sungem_rx_desc(le64_to_cpu(desc.status_word),
  511. le64_to_cpu(desc.buffer));
  512. /* Effective buffer address */
  513. baddr = le64_to_cpu(desc.buffer) & ~7ull;
  514. baddr |= (rxdma_cfg & RXDMA_CFG_FBOFF) >> 10;
  515. /* Write buffer out */
  516. pci_dma_write(d, baddr, buf, size);
  517. if (fcs_size) {
  518. /* Should we add an FCS ? Linux doesn't ask us to strip it,
  519. * however I believe nothing checks it... For now we just
  520. * do nothing. It's faster this way.
  521. */
  522. }
  523. /* Calculate the checksum */
  524. coff = (rxdma_cfg & RXDMA_CFG_CSUMOFF) >> 13;
  525. csum = net_raw_checksum((uint8_t *)buf + coff, size - coff);
  526. /* Build the updated descriptor */
  527. desc.status_word = (size + fcs_size) << 16;
  528. desc.status_word |= ((uint64_t)(mac_crc >> 16)) << 44;
  529. desc.status_word |= csum;
  530. if (rx_cond == rx_match_mcast) {
  531. desc.status_word |= RXDCTRL_HPASS;
  532. }
  533. if (rx_cond == rx_match_altmac) {
  534. desc.status_word |= RXDCTRL_ALTMAC;
  535. }
  536. desc.status_word = cpu_to_le64(desc.status_word);
  537. pci_dma_write(d, dbase + done * sizeof(desc), &desc, sizeof(desc));
  538. done = (done + 1) & s->rx_mask;
  539. s->rxdmaregs[RXDMA_DONE >> 2] = done;
  540. /* XXX Unconditionally set RX interrupt for now. The interrupt
  541. * mitigation timer might well end up adding more overhead than
  542. * helping here...
  543. */
  544. ints = GREG_STAT_RXDONE;
  545. if (sungem_rx_full(s, kick, done)) {
  546. ints |= GREG_STAT_RXNOBUF;
  547. }
  548. sungem_update_status(s, ints, true);
  549. return size;
  550. }
  551. static void sungem_set_link_status(NetClientState *nc)
  552. {
  553. /* We don't do anything for now as I believe none of the OSes
  554. * drivers use the MIF autopoll feature nor the PHY interrupt
  555. */
  556. }
  557. static void sungem_update_masks(SunGEMState *s)
  558. {
  559. uint32_t sz;
  560. sz = 1 << (((s->rxdmaregs[RXDMA_CFG >> 2] & RXDMA_CFG_RINGSZ) >> 1) + 5);
  561. s->rx_mask = sz - 1;
  562. sz = 1 << (((s->txdmaregs[TXDMA_CFG >> 2] & TXDMA_CFG_RINGSZ) >> 1) + 5);
  563. s->tx_mask = sz - 1;
  564. }
  565. static void sungem_reset_rx(SunGEMState *s)
  566. {
  567. trace_sungem_rx_reset();
  568. /* XXX Do RXCFG */
  569. /* XXX Check value */
  570. s->rxdmaregs[RXDMA_FSZ >> 2] = 0x140;
  571. s->rxdmaregs[RXDMA_DONE >> 2] = 0;
  572. s->rxdmaregs[RXDMA_KICK >> 2] = 0;
  573. s->rxdmaregs[RXDMA_CFG >> 2] = 0x1000010;
  574. s->rxdmaregs[RXDMA_PTHRESH >> 2] = 0xf8;
  575. s->rxdmaregs[RXDMA_BLANK >> 2] = 0;
  576. sungem_update_masks(s);
  577. }
  578. static void sungem_reset_tx(SunGEMState *s)
  579. {
  580. trace_sungem_tx_reset();
  581. /* XXX Do TXCFG */
  582. /* XXX Check value */
  583. s->txdmaregs[TXDMA_FSZ >> 2] = 0x90;
  584. s->txdmaregs[TXDMA_TXDONE >> 2] = 0;
  585. s->txdmaregs[TXDMA_KICK >> 2] = 0;
  586. s->txdmaregs[TXDMA_CFG >> 2] = 0x118010;
  587. sungem_update_masks(s);
  588. s->tx_size = 0;
  589. s->tx_first_ctl = 0;
  590. }
  591. static void sungem_reset_all(SunGEMState *s, bool pci_reset)
  592. {
  593. trace_sungem_reset(pci_reset);
  594. sungem_reset_rx(s);
  595. sungem_reset_tx(s);
  596. s->gregs[GREG_IMASK >> 2] = 0xFFFFFFF;
  597. s->gregs[GREG_STAT >> 2] = 0;
  598. if (pci_reset) {
  599. uint8_t *ma = s->conf.macaddr.a;
  600. s->gregs[GREG_SWRST >> 2] = 0;
  601. s->macregs[MAC_ADDR0 >> 2] = (ma[4] << 8) | ma[5];
  602. s->macregs[MAC_ADDR1 >> 2] = (ma[2] << 8) | ma[3];
  603. s->macregs[MAC_ADDR2 >> 2] = (ma[0] << 8) | ma[1];
  604. } else {
  605. s->gregs[GREG_SWRST >> 2] &= GREG_SWRST_RSTOUT;
  606. }
  607. s->mifregs[MIF_CFG >> 2] = MIF_CFG_MDI0;
  608. }
  609. static void sungem_mii_write(SunGEMState *s, uint8_t phy_addr,
  610. uint8_t reg_addr, uint16_t val)
  611. {
  612. trace_sungem_mii_write(phy_addr, reg_addr, val);
  613. /* XXX TODO */
  614. }
  615. static uint16_t __sungem_mii_read(SunGEMState *s, uint8_t phy_addr,
  616. uint8_t reg_addr)
  617. {
  618. if (phy_addr != s->phy_addr) {
  619. return 0xffff;
  620. }
  621. /* Primitive emulation of a BCM5201 to please the driver,
  622. * ID is 0x00406210. TODO: Do a gigabit PHY like BCM5400
  623. */
  624. switch (reg_addr) {
  625. case MII_BMCR:
  626. return 0;
  627. case MII_PHYID1:
  628. return 0x0040;
  629. case MII_PHYID2:
  630. return 0x6210;
  631. case MII_BMSR:
  632. if (qemu_get_queue(s->nic)->link_down) {
  633. return MII_BMSR_100TX_FD | MII_BMSR_AUTONEG;
  634. } else {
  635. return MII_BMSR_100TX_FD | MII_BMSR_AN_COMP |
  636. MII_BMSR_AUTONEG | MII_BMSR_LINK_ST;
  637. }
  638. case MII_ANLPAR:
  639. case MII_ANAR:
  640. return MII_ANLPAR_TXFD;
  641. case 0x18: /* 5201 AUX status */
  642. return 3; /* 100FD */
  643. default:
  644. return 0;
  645. };
  646. }
  647. static uint16_t sungem_mii_read(SunGEMState *s, uint8_t phy_addr,
  648. uint8_t reg_addr)
  649. {
  650. uint16_t val;
  651. val = __sungem_mii_read(s, phy_addr, reg_addr);
  652. trace_sungem_mii_read(phy_addr, reg_addr, val);
  653. return val;
  654. }
  655. static uint32_t sungem_mii_op(SunGEMState *s, uint32_t val)
  656. {
  657. uint8_t phy_addr, reg_addr, op;
  658. /* Ignore not start of frame */
  659. if ((val >> 30) != 1) {
  660. trace_sungem_mii_invalid_sof(val >> 30);
  661. return 0xffff;
  662. }
  663. phy_addr = (val & MIF_FRAME_PHYAD) >> 23;
  664. reg_addr = (val & MIF_FRAME_REGAD) >> 18;
  665. op = (val & MIF_FRAME_OP) >> 28;
  666. switch (op) {
  667. case 1:
  668. sungem_mii_write(s, phy_addr, reg_addr, val & MIF_FRAME_DATA);
  669. return val | MIF_FRAME_TALSB;
  670. case 2:
  671. return sungem_mii_read(s, phy_addr, reg_addr) | MIF_FRAME_TALSB;
  672. default:
  673. trace_sungem_mii_invalid_op(op);
  674. }
  675. return 0xffff | MIF_FRAME_TALSB;
  676. }
  677. static void sungem_mmio_greg_write(void *opaque, hwaddr addr, uint64_t val,
  678. unsigned size)
  679. {
  680. SunGEMState *s = opaque;
  681. if (!(addr < 0x20) && !(addr >= 0x1000 && addr <= 0x1010)) {
  682. qemu_log_mask(LOG_GUEST_ERROR,
  683. "Write to unknown GREG register 0x%"HWADDR_PRIx"\n",
  684. addr);
  685. return;
  686. }
  687. trace_sungem_mmio_greg_write(addr, val);
  688. /* Pre-write filter */
  689. switch (addr) {
  690. /* Read only registers */
  691. case GREG_SEBSTATE:
  692. case GREG_STAT:
  693. case GREG_STAT2:
  694. case GREG_PCIESTAT:
  695. return; /* No actual write */
  696. case GREG_IACK:
  697. val &= GREG_STAT_LATCH;
  698. s->gregs[GREG_STAT >> 2] &= ~val;
  699. sungem_eval_irq(s);
  700. return; /* No actual write */
  701. case GREG_PCIEMASK:
  702. val &= 0x7;
  703. break;
  704. }
  705. s->gregs[addr >> 2] = val;
  706. /* Post write action */
  707. switch (addr) {
  708. case GREG_IMASK:
  709. /* Re-evaluate interrupt */
  710. sungem_eval_irq(s);
  711. break;
  712. case GREG_SWRST:
  713. switch (val & (GREG_SWRST_TXRST | GREG_SWRST_RXRST)) {
  714. case GREG_SWRST_RXRST:
  715. sungem_reset_rx(s);
  716. break;
  717. case GREG_SWRST_TXRST:
  718. sungem_reset_tx(s);
  719. break;
  720. case GREG_SWRST_RXRST | GREG_SWRST_TXRST:
  721. sungem_reset_all(s, false);
  722. }
  723. break;
  724. }
  725. }
  726. static uint64_t sungem_mmio_greg_read(void *opaque, hwaddr addr, unsigned size)
  727. {
  728. SunGEMState *s = opaque;
  729. uint32_t val;
  730. if (!(addr < 0x20) && !(addr >= 0x1000 && addr <= 0x1010)) {
  731. qemu_log_mask(LOG_GUEST_ERROR,
  732. "Read from unknown GREG register 0x%"HWADDR_PRIx"\n",
  733. addr);
  734. return 0;
  735. }
  736. val = s->gregs[addr >> 2];
  737. trace_sungem_mmio_greg_read(addr, val);
  738. switch (addr) {
  739. case GREG_STAT:
  740. /* Side effect, clear bottom 7 bits */
  741. s->gregs[GREG_STAT >> 2] &= ~GREG_STAT_LATCH;
  742. sungem_eval_irq(s);
  743. /* Inject TX completion in returned value */
  744. val = (val & ~GREG_STAT_TXNR) |
  745. (s->txdmaregs[TXDMA_TXDONE >> 2] << GREG_STAT_TXNR_SHIFT);
  746. break;
  747. case GREG_STAT2:
  748. /* Return the status reg without side effect
  749. * (and inject TX completion in returned value)
  750. */
  751. val = (s->gregs[GREG_STAT >> 2] & ~GREG_STAT_TXNR) |
  752. (s->txdmaregs[TXDMA_TXDONE >> 2] << GREG_STAT_TXNR_SHIFT);
  753. break;
  754. }
  755. return val;
  756. }
  757. static const MemoryRegionOps sungem_mmio_greg_ops = {
  758. .read = sungem_mmio_greg_read,
  759. .write = sungem_mmio_greg_write,
  760. .endianness = DEVICE_LITTLE_ENDIAN,
  761. .impl = {
  762. .min_access_size = 4,
  763. .max_access_size = 4,
  764. },
  765. };
  766. static void sungem_mmio_txdma_write(void *opaque, hwaddr addr, uint64_t val,
  767. unsigned size)
  768. {
  769. SunGEMState *s = opaque;
  770. if (!(addr < 0x38) && !(addr >= 0x100 && addr <= 0x118)) {
  771. qemu_log_mask(LOG_GUEST_ERROR,
  772. "Write to unknown TXDMA register 0x%"HWADDR_PRIx"\n",
  773. addr);
  774. return;
  775. }
  776. trace_sungem_mmio_txdma_write(addr, val);
  777. /* Pre-write filter */
  778. switch (addr) {
  779. /* Read only registers */
  780. case TXDMA_TXDONE:
  781. case TXDMA_PCNT:
  782. case TXDMA_SMACHINE:
  783. case TXDMA_DPLOW:
  784. case TXDMA_DPHI:
  785. case TXDMA_FSZ:
  786. case TXDMA_FTAG:
  787. return; /* No actual write */
  788. }
  789. s->txdmaregs[addr >> 2] = val;
  790. /* Post write action */
  791. switch (addr) {
  792. case TXDMA_KICK:
  793. sungem_tx_kick(s);
  794. break;
  795. case TXDMA_CFG:
  796. sungem_update_masks(s);
  797. break;
  798. }
  799. }
  800. static uint64_t sungem_mmio_txdma_read(void *opaque, hwaddr addr, unsigned size)
  801. {
  802. SunGEMState *s = opaque;
  803. uint32_t val;
  804. if (!(addr < 0x38) && !(addr >= 0x100 && addr <= 0x118)) {
  805. qemu_log_mask(LOG_GUEST_ERROR,
  806. "Read from unknown TXDMA register 0x%"HWADDR_PRIx"\n",
  807. addr);
  808. return 0;
  809. }
  810. val = s->txdmaregs[addr >> 2];
  811. trace_sungem_mmio_txdma_read(addr, val);
  812. return val;
  813. }
  814. static const MemoryRegionOps sungem_mmio_txdma_ops = {
  815. .read = sungem_mmio_txdma_read,
  816. .write = sungem_mmio_txdma_write,
  817. .endianness = DEVICE_LITTLE_ENDIAN,
  818. .impl = {
  819. .min_access_size = 4,
  820. .max_access_size = 4,
  821. },
  822. };
  823. static void sungem_mmio_rxdma_write(void *opaque, hwaddr addr, uint64_t val,
  824. unsigned size)
  825. {
  826. SunGEMState *s = opaque;
  827. if (!(addr <= 0x28) && !(addr >= 0x100 && addr <= 0x120)) {
  828. qemu_log_mask(LOG_GUEST_ERROR,
  829. "Write to unknown RXDMA register 0x%"HWADDR_PRIx"\n",
  830. addr);
  831. return;
  832. }
  833. trace_sungem_mmio_rxdma_write(addr, val);
  834. /* Pre-write filter */
  835. switch (addr) {
  836. /* Read only registers */
  837. case RXDMA_DONE:
  838. case RXDMA_PCNT:
  839. case RXDMA_SMACHINE:
  840. case RXDMA_DPLOW:
  841. case RXDMA_DPHI:
  842. case RXDMA_FSZ:
  843. case RXDMA_FTAG:
  844. return; /* No actual write */
  845. }
  846. s->rxdmaregs[addr >> 2] = val;
  847. /* Post write action */
  848. switch (addr) {
  849. case RXDMA_KICK:
  850. trace_sungem_rx_kick(val);
  851. break;
  852. case RXDMA_CFG:
  853. sungem_update_masks(s);
  854. if ((s->macregs[MAC_RXCFG >> 2] & MAC_RXCFG_ENAB) != 0 &&
  855. (s->rxdmaregs[RXDMA_CFG >> 2] & RXDMA_CFG_ENABLE) != 0) {
  856. qemu_flush_queued_packets(qemu_get_queue(s->nic));
  857. }
  858. break;
  859. }
  860. }
  861. static uint64_t sungem_mmio_rxdma_read(void *opaque, hwaddr addr, unsigned size)
  862. {
  863. SunGEMState *s = opaque;
  864. uint32_t val;
  865. if (!(addr <= 0x28) && !(addr >= 0x100 && addr <= 0x120)) {
  866. qemu_log_mask(LOG_GUEST_ERROR,
  867. "Read from unknown RXDMA register 0x%"HWADDR_PRIx"\n",
  868. addr);
  869. return 0;
  870. }
  871. val = s->rxdmaregs[addr >> 2];
  872. trace_sungem_mmio_rxdma_read(addr, val);
  873. return val;
  874. }
  875. static const MemoryRegionOps sungem_mmio_rxdma_ops = {
  876. .read = sungem_mmio_rxdma_read,
  877. .write = sungem_mmio_rxdma_write,
  878. .endianness = DEVICE_LITTLE_ENDIAN,
  879. .impl = {
  880. .min_access_size = 4,
  881. .max_access_size = 4,
  882. },
  883. };
  884. static void sungem_mmio_wol_write(void *opaque, hwaddr addr, uint64_t val,
  885. unsigned size)
  886. {
  887. trace_sungem_mmio_wol_write(addr, val);
  888. switch (addr) {
  889. case WOL_WAKECSR:
  890. if (val != 0) {
  891. qemu_log_mask(LOG_UNIMP, "sungem: WOL not supported\n");
  892. }
  893. break;
  894. default:
  895. qemu_log_mask(LOG_UNIMP, "sungem: WOL not supported\n");
  896. }
  897. }
  898. static uint64_t sungem_mmio_wol_read(void *opaque, hwaddr addr, unsigned size)
  899. {
  900. uint32_t val = -1;
  901. qemu_log_mask(LOG_UNIMP, "sungem: WOL not supported\n");
  902. trace_sungem_mmio_wol_read(addr, val);
  903. return val;
  904. }
  905. static const MemoryRegionOps sungem_mmio_wol_ops = {
  906. .read = sungem_mmio_wol_read,
  907. .write = sungem_mmio_wol_write,
  908. .endianness = DEVICE_LITTLE_ENDIAN,
  909. .impl = {
  910. .min_access_size = 4,
  911. .max_access_size = 4,
  912. },
  913. };
  914. static void sungem_mmio_mac_write(void *opaque, hwaddr addr, uint64_t val,
  915. unsigned size)
  916. {
  917. SunGEMState *s = opaque;
  918. if (!(addr <= 0x134)) {
  919. qemu_log_mask(LOG_GUEST_ERROR,
  920. "Write to unknown MAC register 0x%"HWADDR_PRIx"\n",
  921. addr);
  922. return;
  923. }
  924. trace_sungem_mmio_mac_write(addr, val);
  925. /* Pre-write filter */
  926. switch (addr) {
  927. /* Read only registers */
  928. case MAC_TXRST: /* Not technically read-only but will do for now */
  929. case MAC_RXRST: /* Not technically read-only but will do for now */
  930. case MAC_TXSTAT:
  931. case MAC_RXSTAT:
  932. case MAC_CSTAT:
  933. case MAC_PATMPS:
  934. case MAC_SMACHINE:
  935. return; /* No actual write */
  936. case MAC_MINFSZ:
  937. /* 10-bits implemented */
  938. val &= 0x3ff;
  939. break;
  940. }
  941. s->macregs[addr >> 2] = val;
  942. /* Post write action */
  943. switch (addr) {
  944. case MAC_TXMASK:
  945. case MAC_RXMASK:
  946. case MAC_MCMASK:
  947. sungem_eval_cascade_irq(s);
  948. break;
  949. case MAC_RXCFG:
  950. sungem_update_masks(s);
  951. if ((s->macregs[MAC_RXCFG >> 2] & MAC_RXCFG_ENAB) != 0 &&
  952. (s->rxdmaregs[RXDMA_CFG >> 2] & RXDMA_CFG_ENABLE) != 0) {
  953. qemu_flush_queued_packets(qemu_get_queue(s->nic));
  954. }
  955. break;
  956. }
  957. }
  958. static uint64_t sungem_mmio_mac_read(void *opaque, hwaddr addr, unsigned size)
  959. {
  960. SunGEMState *s = opaque;
  961. uint32_t val;
  962. if (!(addr <= 0x134)) {
  963. qemu_log_mask(LOG_GUEST_ERROR,
  964. "Read from unknown MAC register 0x%"HWADDR_PRIx"\n",
  965. addr);
  966. return 0;
  967. }
  968. val = s->macregs[addr >> 2];
  969. trace_sungem_mmio_mac_read(addr, val);
  970. switch (addr) {
  971. case MAC_TXSTAT:
  972. /* Side effect, clear all */
  973. s->macregs[addr >> 2] = 0;
  974. sungem_update_status(s, GREG_STAT_TXMAC, false);
  975. break;
  976. case MAC_RXSTAT:
  977. /* Side effect, clear all */
  978. s->macregs[addr >> 2] = 0;
  979. sungem_update_status(s, GREG_STAT_RXMAC, false);
  980. break;
  981. case MAC_CSTAT:
  982. /* Side effect, interrupt bits */
  983. s->macregs[addr >> 2] &= MAC_CSTAT_PTR;
  984. sungem_update_status(s, GREG_STAT_MAC, false);
  985. break;
  986. }
  987. return val;
  988. }
  989. static const MemoryRegionOps sungem_mmio_mac_ops = {
  990. .read = sungem_mmio_mac_read,
  991. .write = sungem_mmio_mac_write,
  992. .endianness = DEVICE_LITTLE_ENDIAN,
  993. .impl = {
  994. .min_access_size = 4,
  995. .max_access_size = 4,
  996. },
  997. };
  998. static void sungem_mmio_mif_write(void *opaque, hwaddr addr, uint64_t val,
  999. unsigned size)
  1000. {
  1001. SunGEMState *s = opaque;
  1002. if (!(addr <= 0x1c)) {
  1003. qemu_log_mask(LOG_GUEST_ERROR,
  1004. "Write to unknown MIF register 0x%"HWADDR_PRIx"\n",
  1005. addr);
  1006. return;
  1007. }
  1008. trace_sungem_mmio_mif_write(addr, val);
  1009. /* Pre-write filter */
  1010. switch (addr) {
  1011. /* Read only registers */
  1012. case MIF_STATUS:
  1013. case MIF_SMACHINE:
  1014. return; /* No actual write */
  1015. case MIF_CFG:
  1016. /* Maintain the RO MDI bits to advertise an MDIO PHY on MDI0 */
  1017. val &= ~MIF_CFG_MDI1;
  1018. val |= MIF_CFG_MDI0;
  1019. break;
  1020. }
  1021. s->mifregs[addr >> 2] = val;
  1022. /* Post write action */
  1023. switch (addr) {
  1024. case MIF_FRAME:
  1025. s->mifregs[addr >> 2] = sungem_mii_op(s, val);
  1026. break;
  1027. }
  1028. }
  1029. static uint64_t sungem_mmio_mif_read(void *opaque, hwaddr addr, unsigned size)
  1030. {
  1031. SunGEMState *s = opaque;
  1032. uint32_t val;
  1033. if (!(addr <= 0x1c)) {
  1034. qemu_log_mask(LOG_GUEST_ERROR,
  1035. "Read from unknown MIF register 0x%"HWADDR_PRIx"\n",
  1036. addr);
  1037. return 0;
  1038. }
  1039. val = s->mifregs[addr >> 2];
  1040. trace_sungem_mmio_mif_read(addr, val);
  1041. return val;
  1042. }
  1043. static const MemoryRegionOps sungem_mmio_mif_ops = {
  1044. .read = sungem_mmio_mif_read,
  1045. .write = sungem_mmio_mif_write,
  1046. .endianness = DEVICE_LITTLE_ENDIAN,
  1047. .impl = {
  1048. .min_access_size = 4,
  1049. .max_access_size = 4,
  1050. },
  1051. };
  1052. static void sungem_mmio_pcs_write(void *opaque, hwaddr addr, uint64_t val,
  1053. unsigned size)
  1054. {
  1055. SunGEMState *s = opaque;
  1056. if (!(addr <= 0x18) && !(addr >= 0x50 && addr <= 0x5c)) {
  1057. qemu_log_mask(LOG_GUEST_ERROR,
  1058. "Write to unknown PCS register 0x%"HWADDR_PRIx"\n",
  1059. addr);
  1060. return;
  1061. }
  1062. trace_sungem_mmio_pcs_write(addr, val);
  1063. /* Pre-write filter */
  1064. switch (addr) {
  1065. /* Read only registers */
  1066. case PCS_MIISTAT:
  1067. case PCS_ISTAT:
  1068. case PCS_SSTATE:
  1069. return; /* No actual write */
  1070. }
  1071. s->pcsregs[addr >> 2] = val;
  1072. }
  1073. static uint64_t sungem_mmio_pcs_read(void *opaque, hwaddr addr, unsigned size)
  1074. {
  1075. SunGEMState *s = opaque;
  1076. uint32_t val;
  1077. if (!(addr <= 0x18) && !(addr >= 0x50 && addr <= 0x5c)) {
  1078. qemu_log_mask(LOG_GUEST_ERROR,
  1079. "Read from unknown PCS register 0x%"HWADDR_PRIx"\n",
  1080. addr);
  1081. return 0;
  1082. }
  1083. val = s->pcsregs[addr >> 2];
  1084. trace_sungem_mmio_pcs_read(addr, val);
  1085. return val;
  1086. }
  1087. static const MemoryRegionOps sungem_mmio_pcs_ops = {
  1088. .read = sungem_mmio_pcs_read,
  1089. .write = sungem_mmio_pcs_write,
  1090. .endianness = DEVICE_LITTLE_ENDIAN,
  1091. .impl = {
  1092. .min_access_size = 4,
  1093. .max_access_size = 4,
  1094. },
  1095. };
  1096. static void sungem_uninit(PCIDevice *dev)
  1097. {
  1098. SunGEMState *s = SUNGEM(dev);
  1099. qemu_del_nic(s->nic);
  1100. }
  1101. static NetClientInfo net_sungem_info = {
  1102. .type = NET_CLIENT_DRIVER_NIC,
  1103. .size = sizeof(NICState),
  1104. .can_receive = sungem_can_receive,
  1105. .receive = sungem_receive,
  1106. .link_status_changed = sungem_set_link_status,
  1107. };
  1108. static void sungem_realize(PCIDevice *pci_dev, Error **errp)
  1109. {
  1110. DeviceState *dev = DEVICE(pci_dev);
  1111. SunGEMState *s = SUNGEM(pci_dev);
  1112. uint8_t *pci_conf;
  1113. pci_conf = pci_dev->config;
  1114. pci_set_word(pci_conf + PCI_STATUS,
  1115. PCI_STATUS_FAST_BACK |
  1116. PCI_STATUS_DEVSEL_MEDIUM |
  1117. PCI_STATUS_66MHZ);
  1118. pci_set_word(pci_conf + PCI_SUBSYSTEM_VENDOR_ID, 0x0);
  1119. pci_set_word(pci_conf + PCI_SUBSYSTEM_ID, 0x0);
  1120. pci_conf[PCI_INTERRUPT_PIN] = 1; /* interrupt pin A */
  1121. pci_conf[PCI_MIN_GNT] = 0x40;
  1122. pci_conf[PCI_MAX_LAT] = 0x40;
  1123. sungem_reset_all(s, true);
  1124. memory_region_init(&s->sungem, OBJECT(s), "sungem", SUNGEM_MMIO_SIZE);
  1125. memory_region_init_io(&s->greg, OBJECT(s), &sungem_mmio_greg_ops, s,
  1126. "sungem.greg", SUNGEM_MMIO_GREG_SIZE);
  1127. memory_region_add_subregion(&s->sungem, 0, &s->greg);
  1128. memory_region_init_io(&s->txdma, OBJECT(s), &sungem_mmio_txdma_ops, s,
  1129. "sungem.txdma", SUNGEM_MMIO_TXDMA_SIZE);
  1130. memory_region_add_subregion(&s->sungem, 0x2000, &s->txdma);
  1131. memory_region_init_io(&s->rxdma, OBJECT(s), &sungem_mmio_rxdma_ops, s,
  1132. "sungem.rxdma", SUNGEM_MMIO_RXDMA_SIZE);
  1133. memory_region_add_subregion(&s->sungem, 0x4000, &s->rxdma);
  1134. memory_region_init_io(&s->wol, OBJECT(s), &sungem_mmio_wol_ops, s,
  1135. "sungem.wol", SUNGEM_MMIO_WOL_SIZE);
  1136. memory_region_add_subregion(&s->sungem, 0x3000, &s->wol);
  1137. memory_region_init_io(&s->mac, OBJECT(s), &sungem_mmio_mac_ops, s,
  1138. "sungem.mac", SUNGEM_MMIO_MAC_SIZE);
  1139. memory_region_add_subregion(&s->sungem, 0x6000, &s->mac);
  1140. memory_region_init_io(&s->mif, OBJECT(s), &sungem_mmio_mif_ops, s,
  1141. "sungem.mif", SUNGEM_MMIO_MIF_SIZE);
  1142. memory_region_add_subregion(&s->sungem, 0x6200, &s->mif);
  1143. memory_region_init_io(&s->pcs, OBJECT(s), &sungem_mmio_pcs_ops, s,
  1144. "sungem.pcs", SUNGEM_MMIO_PCS_SIZE);
  1145. memory_region_add_subregion(&s->sungem, 0x9000, &s->pcs);
  1146. pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->sungem);
  1147. qemu_macaddr_default_if_unset(&s->conf.macaddr);
  1148. s->nic = qemu_new_nic(&net_sungem_info, &s->conf,
  1149. object_get_typename(OBJECT(dev)),
  1150. dev->id, &dev->mem_reentrancy_guard, s);
  1151. qemu_format_nic_info_str(qemu_get_queue(s->nic),
  1152. s->conf.macaddr.a);
  1153. }
  1154. static void sungem_reset(DeviceState *dev)
  1155. {
  1156. SunGEMState *s = SUNGEM(dev);
  1157. sungem_reset_all(s, true);
  1158. }
  1159. static void sungem_instance_init(Object *obj)
  1160. {
  1161. SunGEMState *s = SUNGEM(obj);
  1162. device_add_bootindex_property(obj, &s->conf.bootindex,
  1163. "bootindex", "/ethernet-phy@0",
  1164. DEVICE(obj));
  1165. }
  1166. static const Property sungem_properties[] = {
  1167. DEFINE_NIC_PROPERTIES(SunGEMState, conf),
  1168. /* Phy address should be 0 for most Apple machines except
  1169. * for K2 in which case it's 1. Will be set by a machine
  1170. * override.
  1171. */
  1172. DEFINE_PROP_UINT32("phy_addr", SunGEMState, phy_addr, 0),
  1173. };
  1174. static const VMStateDescription vmstate_sungem = {
  1175. .name = "sungem",
  1176. .version_id = 0,
  1177. .minimum_version_id = 0,
  1178. .fields = (const VMStateField[]) {
  1179. VMSTATE_PCI_DEVICE(pdev, SunGEMState),
  1180. VMSTATE_MACADDR(conf.macaddr, SunGEMState),
  1181. VMSTATE_UINT32(phy_addr, SunGEMState),
  1182. VMSTATE_UINT32_ARRAY(gregs, SunGEMState, (SUNGEM_MMIO_GREG_SIZE >> 2)),
  1183. VMSTATE_UINT32_ARRAY(txdmaregs, SunGEMState,
  1184. (SUNGEM_MMIO_TXDMA_SIZE >> 2)),
  1185. VMSTATE_UINT32_ARRAY(rxdmaregs, SunGEMState,
  1186. (SUNGEM_MMIO_RXDMA_SIZE >> 2)),
  1187. VMSTATE_UINT32_ARRAY(macregs, SunGEMState, (SUNGEM_MMIO_MAC_SIZE >> 2)),
  1188. VMSTATE_UINT32_ARRAY(mifregs, SunGEMState, (SUNGEM_MMIO_MIF_SIZE >> 2)),
  1189. VMSTATE_UINT32_ARRAY(pcsregs, SunGEMState, (SUNGEM_MMIO_PCS_SIZE >> 2)),
  1190. VMSTATE_UINT32(rx_mask, SunGEMState),
  1191. VMSTATE_UINT32(tx_mask, SunGEMState),
  1192. VMSTATE_UINT8_ARRAY(tx_data, SunGEMState, MAX_PACKET_SIZE),
  1193. VMSTATE_UINT32(tx_size, SunGEMState),
  1194. VMSTATE_UINT64(tx_first_ctl, SunGEMState),
  1195. VMSTATE_END_OF_LIST()
  1196. }
  1197. };
  1198. static void sungem_class_init(ObjectClass *klass, void *data)
  1199. {
  1200. DeviceClass *dc = DEVICE_CLASS(klass);
  1201. PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
  1202. k->realize = sungem_realize;
  1203. k->exit = sungem_uninit;
  1204. k->vendor_id = PCI_VENDOR_ID_APPLE;
  1205. k->device_id = PCI_DEVICE_ID_APPLE_UNI_N_GMAC;
  1206. k->revision = 0x01;
  1207. k->class_id = PCI_CLASS_NETWORK_ETHERNET;
  1208. dc->vmsd = &vmstate_sungem;
  1209. device_class_set_legacy_reset(dc, sungem_reset);
  1210. device_class_set_props(dc, sungem_properties);
  1211. set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
  1212. }
  1213. static const TypeInfo sungem_info = {
  1214. .name = TYPE_SUNGEM,
  1215. .parent = TYPE_PCI_DEVICE,
  1216. .instance_size = sizeof(SunGEMState),
  1217. .class_init = sungem_class_init,
  1218. .instance_init = sungem_instance_init,
  1219. .interfaces = (InterfaceInfo[]) {
  1220. { INTERFACE_CONVENTIONAL_PCI_DEVICE },
  1221. { }
  1222. }
  1223. };
  1224. static void sungem_register_types(void)
  1225. {
  1226. type_register_static(&sungem_info);
  1227. }
  1228. type_init(sungem_register_types)