cadence_gem.c 59 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843
  1. /*
  2. * QEMU Cadence GEM emulation
  3. *
  4. * Copyright (c) 2011 Xilinx, Inc.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a copy
  7. * of this software and associated documentation files (the "Software"), to deal
  8. * in the Software without restriction, including without limitation the rights
  9. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  10. * copies of the Software, and to permit persons to whom the Software is
  11. * furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  21. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  22. * THE SOFTWARE.
  23. */
  24. #include "qemu/osdep.h"
  25. #include <zlib.h> /* for crc32 */
  26. #include "hw/irq.h"
  27. #include "hw/net/cadence_gem.h"
  28. #include "hw/qdev-properties.h"
  29. #include "hw/registerfields.h"
  30. #include "migration/vmstate.h"
  31. #include "qapi/error.h"
  32. #include "qemu/log.h"
  33. #include "qemu/module.h"
  34. #include "system/dma.h"
  35. #include "net/checksum.h"
  36. #include "net/eth.h"
  37. #define CADENCE_GEM_ERR_DEBUG 0
  38. #define DB_PRINT(...) do {\
  39. if (CADENCE_GEM_ERR_DEBUG) { \
  40. qemu_log(": %s: ", __func__); \
  41. qemu_log(__VA_ARGS__); \
  42. } \
  43. } while (0)
  44. REG32(NWCTRL, 0x0) /* Network Control reg */
  45. FIELD(NWCTRL, LOOPBACK , 0, 1)
  46. FIELD(NWCTRL, LOOPBACK_LOCAL , 1, 1)
  47. FIELD(NWCTRL, ENABLE_RECEIVE, 2, 1)
  48. FIELD(NWCTRL, ENABLE_TRANSMIT, 3, 1)
  49. FIELD(NWCTRL, MAN_PORT_EN , 4, 1)
  50. FIELD(NWCTRL, CLEAR_ALL_STATS_REGS , 5, 1)
  51. FIELD(NWCTRL, INC_ALL_STATS_REGS, 6, 1)
  52. FIELD(NWCTRL, STATS_WRITE_EN, 7, 1)
  53. FIELD(NWCTRL, BACK_PRESSURE, 8, 1)
  54. FIELD(NWCTRL, TRANSMIT_START , 9, 1)
  55. FIELD(NWCTRL, TRANSMIT_HALT, 10, 1)
  56. FIELD(NWCTRL, TX_PAUSE_FRAME_RE, 11, 1)
  57. FIELD(NWCTRL, TX_PAUSE_FRAME_ZE, 12, 1)
  58. FIELD(NWCTRL, STATS_TAKE_SNAP, 13, 1)
  59. FIELD(NWCTRL, STATS_READ_SNAP, 14, 1)
  60. FIELD(NWCTRL, STORE_RX_TS, 15, 1)
  61. FIELD(NWCTRL, PFC_ENABLE, 16, 1)
  62. FIELD(NWCTRL, PFC_PRIO_BASED, 17, 1)
  63. FIELD(NWCTRL, FLUSH_RX_PKT_PCLK , 18, 1)
  64. FIELD(NWCTRL, TX_LPI_EN, 19, 1)
  65. FIELD(NWCTRL, PTP_UNICAST_ENA, 20, 1)
  66. FIELD(NWCTRL, ALT_SGMII_MODE, 21, 1)
  67. FIELD(NWCTRL, STORE_UDP_OFFSET, 22, 1)
  68. FIELD(NWCTRL, EXT_TSU_PORT_EN, 23, 1)
  69. FIELD(NWCTRL, ONE_STEP_SYNC_MO, 24, 1)
  70. FIELD(NWCTRL, PFC_CTRL , 25, 1)
  71. FIELD(NWCTRL, EXT_RXQ_SEL_EN , 26, 1)
  72. FIELD(NWCTRL, OSS_CORRECTION_FIELD, 27, 1)
  73. FIELD(NWCTRL, SEL_MII_ON_RGMII, 28, 1)
  74. FIELD(NWCTRL, TWO_PT_FIVE_GIG, 29, 1)
  75. FIELD(NWCTRL, IFG_EATS_QAV_CREDIT, 30, 1)
  76. REG32(NWCFG, 0x4) /* Network Config reg */
  77. FIELD(NWCFG, SPEED, 0, 1)
  78. FIELD(NWCFG, FULL_DUPLEX, 1, 1)
  79. FIELD(NWCFG, DISCARD_NON_VLAN_FRAMES, 2, 1)
  80. FIELD(NWCFG, JUMBO_FRAMES, 3, 1)
  81. FIELD(NWCFG, PROMISC, 4, 1)
  82. FIELD(NWCFG, NO_BROADCAST, 5, 1)
  83. FIELD(NWCFG, MULTICAST_HASH_EN, 6, 1)
  84. FIELD(NWCFG, UNICAST_HASH_EN, 7, 1)
  85. FIELD(NWCFG, RECV_1536_BYTE_FRAMES, 8, 1)
  86. FIELD(NWCFG, EXTERNAL_ADDR_MATCH_EN, 9, 1)
  87. FIELD(NWCFG, GIGABIT_MODE_ENABLE, 10, 1)
  88. FIELD(NWCFG, PCS_SELECT, 11, 1)
  89. FIELD(NWCFG, RETRY_TEST, 12, 1)
  90. FIELD(NWCFG, PAUSE_ENABLE, 13, 1)
  91. FIELD(NWCFG, RECV_BUF_OFFSET, 14, 2)
  92. FIELD(NWCFG, LEN_ERR_DISCARD, 16, 1)
  93. FIELD(NWCFG, FCS_REMOVE, 17, 1)
  94. FIELD(NWCFG, MDC_CLOCK_DIV, 18, 3)
  95. FIELD(NWCFG, DATA_BUS_WIDTH, 21, 2)
  96. FIELD(NWCFG, DISABLE_COPY_PAUSE_FRAMES, 23, 1)
  97. FIELD(NWCFG, RECV_CSUM_OFFLOAD_EN, 24, 1)
  98. FIELD(NWCFG, EN_HALF_DUPLEX_RX, 25, 1)
  99. FIELD(NWCFG, IGNORE_RX_FCS, 26, 1)
  100. FIELD(NWCFG, SGMII_MODE_ENABLE, 27, 1)
  101. FIELD(NWCFG, IPG_STRETCH_ENABLE, 28, 1)
  102. FIELD(NWCFG, NSP_ACCEPT, 29, 1)
  103. FIELD(NWCFG, IGNORE_IPG_RX_ER, 30, 1)
  104. FIELD(NWCFG, UNI_DIRECTION_ENABLE, 31, 1)
  105. REG32(NWSTATUS, 0x8) /* Network Status reg */
  106. REG32(USERIO, 0xc) /* User IO reg */
  107. REG32(DMACFG, 0x10) /* DMA Control reg */
  108. FIELD(DMACFG, SEND_BCAST_TO_ALL_QS, 31, 1)
  109. FIELD(DMACFG, DMA_ADDR_BUS_WIDTH, 30, 1)
  110. FIELD(DMACFG, TX_BD_EXT_MODE_EN , 29, 1)
  111. FIELD(DMACFG, RX_BD_EXT_MODE_EN , 28, 1)
  112. FIELD(DMACFG, FORCE_MAX_AMBA_BURST_TX, 26, 1)
  113. FIELD(DMACFG, FORCE_MAX_AMBA_BURST_RX, 25, 1)
  114. FIELD(DMACFG, FORCE_DISCARD_ON_ERR, 24, 1)
  115. FIELD(DMACFG, RX_BUF_SIZE, 16, 8)
  116. FIELD(DMACFG, CRC_ERROR_REPORT, 13, 1)
  117. FIELD(DMACFG, INF_LAST_DBUF_SIZE_EN, 12, 1)
  118. FIELD(DMACFG, TX_PBUF_CSUM_OFFLOAD, 11, 1)
  119. FIELD(DMACFG, TX_PBUF_SIZE, 10, 1)
  120. FIELD(DMACFG, RX_PBUF_SIZE, 8, 2)
  121. FIELD(DMACFG, ENDIAN_SWAP_PACKET, 7, 1)
  122. FIELD(DMACFG, ENDIAN_SWAP_MGNT, 6, 1)
  123. FIELD(DMACFG, HDR_DATA_SPLIT_EN, 5, 1)
  124. FIELD(DMACFG, AMBA_BURST_LEN , 0, 5)
  125. #define GEM_DMACFG_RBUFSZ_MUL 64 /* DMA RX Buffer Size multiplier */
  126. REG32(TXSTATUS, 0x14) /* TX Status reg */
  127. FIELD(TXSTATUS, TX_USED_BIT_READ_MIDFRAME, 12, 1)
  128. FIELD(TXSTATUS, TX_FRAME_TOO_LARGE, 11, 1)
  129. FIELD(TXSTATUS, TX_DMA_LOCKUP, 10, 1)
  130. FIELD(TXSTATUS, TX_MAC_LOCKUP, 9, 1)
  131. FIELD(TXSTATUS, RESP_NOT_OK, 8, 1)
  132. FIELD(TXSTATUS, LATE_COLLISION, 7, 1)
  133. FIELD(TXSTATUS, TRANSMIT_UNDER_RUN, 6, 1)
  134. FIELD(TXSTATUS, TRANSMIT_COMPLETE, 5, 1)
  135. FIELD(TXSTATUS, AMBA_ERROR, 4, 1)
  136. FIELD(TXSTATUS, TRANSMIT_GO, 3, 1)
  137. FIELD(TXSTATUS, RETRY_LIMIT, 2, 1)
  138. FIELD(TXSTATUS, COLLISION, 1, 1)
  139. FIELD(TXSTATUS, USED_BIT_READ, 0, 1)
  140. REG32(RXQBASE, 0x18) /* RX Q Base address reg */
  141. REG32(TXQBASE, 0x1c) /* TX Q Base address reg */
  142. REG32(RXSTATUS, 0x20) /* RX Status reg */
  143. FIELD(RXSTATUS, RX_DMA_LOCKUP, 5, 1)
  144. FIELD(RXSTATUS, RX_MAC_LOCKUP, 4, 1)
  145. FIELD(RXSTATUS, RESP_NOT_OK, 3, 1)
  146. FIELD(RXSTATUS, RECEIVE_OVERRUN, 2, 1)
  147. FIELD(RXSTATUS, FRAME_RECEIVED, 1, 1)
  148. FIELD(RXSTATUS, BUF_NOT_AVAILABLE, 0, 1)
  149. REG32(ISR, 0x24) /* Interrupt Status reg */
  150. FIELD(ISR, TX_LOCKUP, 31, 1)
  151. FIELD(ISR, RX_LOCKUP, 30, 1)
  152. FIELD(ISR, TSU_TIMER, 29, 1)
  153. FIELD(ISR, WOL, 28, 1)
  154. FIELD(ISR, RECV_LPI, 27, 1)
  155. FIELD(ISR, TSU_SEC_INCR, 26, 1)
  156. FIELD(ISR, PTP_PDELAY_RESP_XMIT, 25, 1)
  157. FIELD(ISR, PTP_PDELAY_REQ_XMIT, 24, 1)
  158. FIELD(ISR, PTP_PDELAY_RESP_RECV, 23, 1)
  159. FIELD(ISR, PTP_PDELAY_REQ_RECV, 22, 1)
  160. FIELD(ISR, PTP_SYNC_XMIT, 21, 1)
  161. FIELD(ISR, PTP_DELAY_REQ_XMIT, 20, 1)
  162. FIELD(ISR, PTP_SYNC_RECV, 19, 1)
  163. FIELD(ISR, PTP_DELAY_REQ_RECV, 18, 1)
  164. FIELD(ISR, PCS_LP_PAGE_RECV, 17, 1)
  165. FIELD(ISR, PCS_AN_COMPLETE, 16, 1)
  166. FIELD(ISR, EXT_IRQ, 15, 1)
  167. FIELD(ISR, PAUSE_FRAME_XMIT, 14, 1)
  168. FIELD(ISR, PAUSE_TIME_ELAPSED, 13, 1)
  169. FIELD(ISR, PAUSE_FRAME_RECV, 12, 1)
  170. FIELD(ISR, RESP_NOT_OK, 11, 1)
  171. FIELD(ISR, RECV_OVERRUN, 10, 1)
  172. FIELD(ISR, LINK_CHANGE, 9, 1)
  173. FIELD(ISR, USXGMII_INT, 8, 1)
  174. FIELD(ISR, XMIT_COMPLETE, 7, 1)
  175. FIELD(ISR, AMBA_ERROR, 6, 1)
  176. FIELD(ISR, RETRY_EXCEEDED, 5, 1)
  177. FIELD(ISR, XMIT_UNDER_RUN, 4, 1)
  178. FIELD(ISR, TX_USED, 3, 1)
  179. FIELD(ISR, RX_USED, 2, 1)
  180. FIELD(ISR, RECV_COMPLETE, 1, 1)
  181. FIELD(ISR, MGNT_FRAME_SENT, 0, 1)
  182. REG32(IER, 0x28) /* Interrupt Enable reg */
  183. REG32(IDR, 0x2c) /* Interrupt Disable reg */
  184. REG32(IMR, 0x30) /* Interrupt Mask reg */
  185. REG32(PHYMNTNC, 0x34) /* Phy Maintenance reg */
  186. FIELD(PHYMNTNC, DATA, 0, 16)
  187. FIELD(PHYMNTNC, REG_ADDR, 18, 5)
  188. FIELD(PHYMNTNC, PHY_ADDR, 23, 5)
  189. FIELD(PHYMNTNC, OP, 28, 2)
  190. FIELD(PHYMNTNC, ST, 30, 2)
  191. #define MDIO_OP_READ 0x2
  192. #define MDIO_OP_WRITE 0x1
  193. REG32(RXPAUSE, 0x38) /* RX Pause Time reg */
  194. REG32(TXPAUSE, 0x3c) /* TX Pause Time reg */
  195. REG32(TXPARTIALSF, 0x40) /* TX Partial Store and Forward */
  196. REG32(RXPARTIALSF, 0x44) /* RX Partial Store and Forward */
  197. REG32(JUMBO_MAX_LEN, 0x48) /* Max Jumbo Frame Size */
  198. REG32(HASHLO, 0x80) /* Hash Low address reg */
  199. REG32(HASHHI, 0x84) /* Hash High address reg */
  200. REG32(SPADDR1LO, 0x88) /* Specific addr 1 low reg */
  201. REG32(SPADDR1HI, 0x8c) /* Specific addr 1 high reg */
  202. REG32(SPADDR2LO, 0x90) /* Specific addr 2 low reg */
  203. REG32(SPADDR2HI, 0x94) /* Specific addr 2 high reg */
  204. REG32(SPADDR3LO, 0x98) /* Specific addr 3 low reg */
  205. REG32(SPADDR3HI, 0x9c) /* Specific addr 3 high reg */
  206. REG32(SPADDR4LO, 0xa0) /* Specific addr 4 low reg */
  207. REG32(SPADDR4HI, 0xa4) /* Specific addr 4 high reg */
  208. REG32(TIDMATCH1, 0xa8) /* Type ID1 Match reg */
  209. REG32(TIDMATCH2, 0xac) /* Type ID2 Match reg */
  210. REG32(TIDMATCH3, 0xb0) /* Type ID3 Match reg */
  211. REG32(TIDMATCH4, 0xb4) /* Type ID4 Match reg */
  212. REG32(WOLAN, 0xb8) /* Wake on LAN reg */
  213. REG32(IPGSTRETCH, 0xbc) /* IPG Stretch reg */
  214. REG32(SVLAN, 0xc0) /* Stacked VLAN reg */
  215. REG32(MODID, 0xfc) /* Module ID reg */
  216. REG32(OCTTXLO, 0x100) /* Octets transmitted Low reg */
  217. REG32(OCTTXHI, 0x104) /* Octets transmitted High reg */
  218. REG32(TXCNT, 0x108) /* Error-free Frames transmitted */
  219. REG32(TXBCNT, 0x10c) /* Error-free Broadcast Frames */
  220. REG32(TXMCNT, 0x110) /* Error-free Multicast Frame */
  221. REG32(TXPAUSECNT, 0x114) /* Pause Frames Transmitted */
  222. REG32(TX64CNT, 0x118) /* Error-free 64 TX */
  223. REG32(TX65CNT, 0x11c) /* Error-free 65-127 TX */
  224. REG32(TX128CNT, 0x120) /* Error-free 128-255 TX */
  225. REG32(TX256CNT, 0x124) /* Error-free 256-511 */
  226. REG32(TX512CNT, 0x128) /* Error-free 512-1023 TX */
  227. REG32(TX1024CNT, 0x12c) /* Error-free 1024-1518 TX */
  228. REG32(TX1519CNT, 0x130) /* Error-free larger than 1519 TX */
  229. REG32(TXURUNCNT, 0x134) /* TX under run error counter */
  230. REG32(SINGLECOLLCNT, 0x138) /* Single Collision Frames */
  231. REG32(MULTCOLLCNT, 0x13c) /* Multiple Collision Frames */
  232. REG32(EXCESSCOLLCNT, 0x140) /* Excessive Collision Frames */
  233. REG32(LATECOLLCNT, 0x144) /* Late Collision Frames */
  234. REG32(DEFERTXCNT, 0x148) /* Deferred Transmission Frames */
  235. REG32(CSENSECNT, 0x14c) /* Carrier Sense Error Counter */
  236. REG32(OCTRXLO, 0x150) /* Octets Received register Low */
  237. REG32(OCTRXHI, 0x154) /* Octets Received register High */
  238. REG32(RXCNT, 0x158) /* Error-free Frames Received */
  239. REG32(RXBROADCNT, 0x15c) /* Error-free Broadcast Frames RX */
  240. REG32(RXMULTICNT, 0x160) /* Error-free Multicast Frames RX */
  241. REG32(RXPAUSECNT, 0x164) /* Pause Frames Received Counter */
  242. REG32(RX64CNT, 0x168) /* Error-free 64 byte Frames RX */
  243. REG32(RX65CNT, 0x16c) /* Error-free 65-127B Frames RX */
  244. REG32(RX128CNT, 0x170) /* Error-free 128-255B Frames RX */
  245. REG32(RX256CNT, 0x174) /* Error-free 256-512B Frames RX */
  246. REG32(RX512CNT, 0x178) /* Error-free 512-1023B Frames RX */
  247. REG32(RX1024CNT, 0x17c) /* Error-free 1024-1518B Frames RX */
  248. REG32(RX1519CNT, 0x180) /* Error-free 1519-max Frames RX */
  249. REG32(RXUNDERCNT, 0x184) /* Undersize Frames Received */
  250. REG32(RXOVERCNT, 0x188) /* Oversize Frames Received */
  251. REG32(RXJABCNT, 0x18c) /* Jabbers Received Counter */
  252. REG32(RXFCSCNT, 0x190) /* Frame Check seq. Error Counter */
  253. REG32(RXLENERRCNT, 0x194) /* Length Field Error Counter */
  254. REG32(RXSYMERRCNT, 0x198) /* Symbol Error Counter */
  255. REG32(RXALIGNERRCNT, 0x19c) /* Alignment Error Counter */
  256. REG32(RXRSCERRCNT, 0x1a0) /* Receive Resource Error Counter */
  257. REG32(RXORUNCNT, 0x1a4) /* Receive Overrun Counter */
  258. REG32(RXIPCSERRCNT, 0x1a8) /* IP header Checksum Err Counter */
  259. REG32(RXTCPCCNT, 0x1ac) /* TCP Checksum Error Counter */
  260. REG32(RXUDPCCNT, 0x1b0) /* UDP Checksum Error Counter */
  261. REG32(1588S, 0x1d0) /* 1588 Timer Seconds */
  262. REG32(1588NS, 0x1d4) /* 1588 Timer Nanoseconds */
  263. REG32(1588ADJ, 0x1d8) /* 1588 Timer Adjust */
  264. REG32(1588INC, 0x1dc) /* 1588 Timer Increment */
  265. REG32(PTPETXS, 0x1e0) /* PTP Event Frame Transmitted (s) */
  266. REG32(PTPETXNS, 0x1e4) /* PTP Event Frame Transmitted (ns) */
  267. REG32(PTPERXS, 0x1e8) /* PTP Event Frame Received (s) */
  268. REG32(PTPERXNS, 0x1ec) /* PTP Event Frame Received (ns) */
  269. REG32(PTPPTXS, 0x1e0) /* PTP Peer Frame Transmitted (s) */
  270. REG32(PTPPTXNS, 0x1e4) /* PTP Peer Frame Transmitted (ns) */
  271. REG32(PTPPRXS, 0x1e8) /* PTP Peer Frame Received (s) */
  272. REG32(PTPPRXNS, 0x1ec) /* PTP Peer Frame Received (ns) */
  273. /* Design Configuration Registers */
  274. REG32(DESCONF, 0x280)
  275. REG32(DESCONF2, 0x284)
  276. REG32(DESCONF3, 0x288)
  277. REG32(DESCONF4, 0x28c)
  278. REG32(DESCONF5, 0x290)
  279. REG32(DESCONF6, 0x294)
  280. FIELD(DESCONF6, DMA_ADDR_64B, 23, 1)
  281. REG32(DESCONF7, 0x298)
  282. REG32(INT_Q1_STATUS, 0x400)
  283. REG32(INT_Q1_MASK, 0x640)
  284. REG32(TRANSMIT_Q1_PTR, 0x440)
  285. REG32(TRANSMIT_Q7_PTR, 0x458)
  286. REG32(RECEIVE_Q1_PTR, 0x480)
  287. REG32(RECEIVE_Q7_PTR, 0x498)
  288. REG32(TBQPH, 0x4c8)
  289. REG32(RBQPH, 0x4d4)
  290. REG32(INT_Q1_ENABLE, 0x600)
  291. REG32(INT_Q7_ENABLE, 0x618)
  292. REG32(INT_Q1_DISABLE, 0x620)
  293. REG32(INT_Q7_DISABLE, 0x638)
  294. REG32(SCREENING_TYPE1_REG0, 0x500)
  295. FIELD(SCREENING_TYPE1_REG0, QUEUE_NUM, 0, 4)
  296. FIELD(SCREENING_TYPE1_REG0, DSTC_MATCH, 4, 8)
  297. FIELD(SCREENING_TYPE1_REG0, UDP_PORT_MATCH, 12, 16)
  298. FIELD(SCREENING_TYPE1_REG0, DSTC_ENABLE, 28, 1)
  299. FIELD(SCREENING_TYPE1_REG0, UDP_PORT_MATCH_EN, 29, 1)
  300. FIELD(SCREENING_TYPE1_REG0, DROP_ON_MATCH, 30, 1)
  301. REG32(SCREENING_TYPE2_REG0, 0x540)
  302. FIELD(SCREENING_TYPE2_REG0, QUEUE_NUM, 0, 4)
  303. FIELD(SCREENING_TYPE2_REG0, VLAN_PRIORITY, 4, 3)
  304. FIELD(SCREENING_TYPE2_REG0, VLAN_ENABLE, 8, 1)
  305. FIELD(SCREENING_TYPE2_REG0, ETHERTYPE_REG_INDEX, 9, 3)
  306. FIELD(SCREENING_TYPE2_REG0, ETHERTYPE_ENABLE, 12, 1)
  307. FIELD(SCREENING_TYPE2_REG0, COMPARE_A, 13, 5)
  308. FIELD(SCREENING_TYPE2_REG0, COMPARE_A_ENABLE, 18, 1)
  309. FIELD(SCREENING_TYPE2_REG0, COMPARE_B, 19, 5)
  310. FIELD(SCREENING_TYPE2_REG0, COMPARE_B_ENABLE, 24, 1)
  311. FIELD(SCREENING_TYPE2_REG0, COMPARE_C, 25, 5)
  312. FIELD(SCREENING_TYPE2_REG0, COMPARE_C_ENABLE, 30, 1)
  313. FIELD(SCREENING_TYPE2_REG0, DROP_ON_MATCH, 31, 1)
  314. REG32(SCREENING_TYPE2_ETHERTYPE_REG0, 0x6e0)
  315. REG32(TYPE2_COMPARE_0_WORD_0, 0x700)
  316. FIELD(TYPE2_COMPARE_0_WORD_0, MASK_VALUE, 0, 16)
  317. FIELD(TYPE2_COMPARE_0_WORD_0, COMPARE_VALUE, 16, 16)
  318. REG32(TYPE2_COMPARE_0_WORD_1, 0x704)
  319. FIELD(TYPE2_COMPARE_0_WORD_1, OFFSET_VALUE, 0, 7)
  320. FIELD(TYPE2_COMPARE_0_WORD_1, COMPARE_OFFSET, 7, 2)
  321. FIELD(TYPE2_COMPARE_0_WORD_1, DISABLE_MASK, 9, 1)
  322. FIELD(TYPE2_COMPARE_0_WORD_1, COMPARE_VLAN_ID, 10, 1)
  323. /*****************************************/
  324. /* Marvell PHY definitions */
  325. #define BOARD_PHY_ADDRESS 0 /* PHY address we will emulate a device at */
  326. #define PHY_REG_CONTROL 0
  327. #define PHY_REG_STATUS 1
  328. #define PHY_REG_PHYID1 2
  329. #define PHY_REG_PHYID2 3
  330. #define PHY_REG_ANEGADV 4
  331. #define PHY_REG_LINKPABIL 5
  332. #define PHY_REG_ANEGEXP 6
  333. #define PHY_REG_NEXTP 7
  334. #define PHY_REG_LINKPNEXTP 8
  335. #define PHY_REG_100BTCTRL 9
  336. #define PHY_REG_1000BTSTAT 10
  337. #define PHY_REG_EXTSTAT 15
  338. #define PHY_REG_PHYSPCFC_CTL 16
  339. #define PHY_REG_PHYSPCFC_ST 17
  340. #define PHY_REG_INT_EN 18
  341. #define PHY_REG_INT_ST 19
  342. #define PHY_REG_EXT_PHYSPCFC_CTL 20
  343. #define PHY_REG_RXERR 21
  344. #define PHY_REG_EACD 22
  345. #define PHY_REG_LED 24
  346. #define PHY_REG_LED_OVRD 25
  347. #define PHY_REG_EXT_PHYSPCFC_CTL2 26
  348. #define PHY_REG_EXT_PHYSPCFC_ST 27
  349. #define PHY_REG_CABLE_DIAG 28
  350. #define PHY_REG_CONTROL_RST 0x8000
  351. #define PHY_REG_CONTROL_LOOP 0x4000
  352. #define PHY_REG_CONTROL_ANEG 0x1000
  353. #define PHY_REG_CONTROL_ANRESTART 0x0200
  354. #define PHY_REG_STATUS_LINK 0x0004
  355. #define PHY_REG_STATUS_ANEGCMPL 0x0020
  356. #define PHY_REG_INT_ST_ANEGCMPL 0x0800
  357. #define PHY_REG_INT_ST_LINKC 0x0400
  358. #define PHY_REG_INT_ST_ENERGY 0x0010
  359. /***********************************************************************/
  360. #define GEM_RX_REJECT (-1)
  361. #define GEM_RX_PROMISCUOUS_ACCEPT (-2)
  362. #define GEM_RX_BROADCAST_ACCEPT (-3)
  363. #define GEM_RX_MULTICAST_HASH_ACCEPT (-4)
  364. #define GEM_RX_UNICAST_HASH_ACCEPT (-5)
  365. #define GEM_RX_SAR_ACCEPT 0
  366. /***********************************************************************/
  367. #define DESC_1_USED 0x80000000
  368. #define DESC_1_LENGTH 0x00001FFF
  369. #define DESC_1_TX_WRAP 0x40000000
  370. #define DESC_1_TX_LAST 0x00008000
  371. #define DESC_0_RX_WRAP 0x00000002
  372. #define DESC_0_RX_OWNERSHIP 0x00000001
  373. #define R_DESC_1_RX_SAR_SHIFT 25
  374. #define R_DESC_1_RX_SAR_LENGTH 2
  375. #define R_DESC_1_RX_SAR_MATCH (1 << 27)
  376. #define R_DESC_1_RX_UNICAST_HASH (1 << 29)
  377. #define R_DESC_1_RX_MULTICAST_HASH (1 << 30)
  378. #define R_DESC_1_RX_BROADCAST (1 << 31)
  379. #define DESC_1_RX_SOF 0x00004000
  380. #define DESC_1_RX_EOF 0x00008000
  381. #define GEM_MODID_VALUE 0x00020118
  382. static inline uint64_t tx_desc_get_buffer(CadenceGEMState *s, uint32_t *desc)
  383. {
  384. uint64_t ret = desc[0];
  385. if (FIELD_EX32(s->regs[R_DMACFG], DMACFG, DMA_ADDR_BUS_WIDTH)) {
  386. ret |= (uint64_t)desc[2] << 32;
  387. }
  388. return ret;
  389. }
  390. static inline unsigned tx_desc_get_used(uint32_t *desc)
  391. {
  392. return (desc[1] & DESC_1_USED) ? 1 : 0;
  393. }
  394. static inline void tx_desc_set_used(uint32_t *desc)
  395. {
  396. desc[1] |= DESC_1_USED;
  397. }
  398. static inline unsigned tx_desc_get_wrap(uint32_t *desc)
  399. {
  400. return (desc[1] & DESC_1_TX_WRAP) ? 1 : 0;
  401. }
  402. static inline unsigned tx_desc_get_last(uint32_t *desc)
  403. {
  404. return (desc[1] & DESC_1_TX_LAST) ? 1 : 0;
  405. }
  406. static inline unsigned tx_desc_get_length(uint32_t *desc)
  407. {
  408. return desc[1] & DESC_1_LENGTH;
  409. }
  410. static inline void print_gem_tx_desc(uint32_t *desc, uint8_t queue)
  411. {
  412. DB_PRINT("TXDESC (queue %" PRId8 "):\n", queue);
  413. DB_PRINT("bufaddr: 0x%08x\n", *desc);
  414. DB_PRINT("used_hw: %d\n", tx_desc_get_used(desc));
  415. DB_PRINT("wrap: %d\n", tx_desc_get_wrap(desc));
  416. DB_PRINT("last: %d\n", tx_desc_get_last(desc));
  417. DB_PRINT("length: %d\n", tx_desc_get_length(desc));
  418. }
  419. static inline uint64_t rx_desc_get_buffer(CadenceGEMState *s, uint32_t *desc)
  420. {
  421. uint64_t ret = desc[0] & ~0x3UL;
  422. if (FIELD_EX32(s->regs[R_DMACFG], DMACFG, DMA_ADDR_BUS_WIDTH)) {
  423. ret |= (uint64_t)desc[2] << 32;
  424. }
  425. return ret;
  426. }
  427. static inline int gem_get_desc_len(CadenceGEMState *s, bool rx_n_tx)
  428. {
  429. int ret = 2;
  430. if (FIELD_EX32(s->regs[R_DMACFG], DMACFG, DMA_ADDR_BUS_WIDTH)) {
  431. ret += 2;
  432. }
  433. if (s->regs[R_DMACFG] & (rx_n_tx ? R_DMACFG_RX_BD_EXT_MODE_EN_MASK
  434. : R_DMACFG_TX_BD_EXT_MODE_EN_MASK)) {
  435. ret += 2;
  436. }
  437. assert(ret <= DESC_MAX_NUM_WORDS);
  438. return ret;
  439. }
  440. static inline unsigned rx_desc_get_wrap(uint32_t *desc)
  441. {
  442. return desc[0] & DESC_0_RX_WRAP ? 1 : 0;
  443. }
  444. static inline unsigned rx_desc_get_ownership(uint32_t *desc)
  445. {
  446. return desc[0] & DESC_0_RX_OWNERSHIP ? 1 : 0;
  447. }
  448. static inline void rx_desc_set_ownership(uint32_t *desc)
  449. {
  450. desc[0] |= DESC_0_RX_OWNERSHIP;
  451. }
  452. static inline void rx_desc_set_sof(uint32_t *desc)
  453. {
  454. desc[1] |= DESC_1_RX_SOF;
  455. }
  456. static inline void rx_desc_clear_control(uint32_t *desc)
  457. {
  458. desc[1] = 0;
  459. }
  460. static inline void rx_desc_set_eof(uint32_t *desc)
  461. {
  462. desc[1] |= DESC_1_RX_EOF;
  463. }
  464. static inline void rx_desc_set_length(uint32_t *desc, unsigned len)
  465. {
  466. desc[1] &= ~DESC_1_LENGTH;
  467. desc[1] |= len;
  468. }
  469. static inline void rx_desc_set_broadcast(uint32_t *desc)
  470. {
  471. desc[1] |= R_DESC_1_RX_BROADCAST;
  472. }
  473. static inline void rx_desc_set_unicast_hash(uint32_t *desc)
  474. {
  475. desc[1] |= R_DESC_1_RX_UNICAST_HASH;
  476. }
  477. static inline void rx_desc_set_multicast_hash(uint32_t *desc)
  478. {
  479. desc[1] |= R_DESC_1_RX_MULTICAST_HASH;
  480. }
  481. static inline void rx_desc_set_sar(uint32_t *desc, int sar_idx)
  482. {
  483. desc[1] = deposit32(desc[1], R_DESC_1_RX_SAR_SHIFT, R_DESC_1_RX_SAR_LENGTH,
  484. sar_idx);
  485. desc[1] |= R_DESC_1_RX_SAR_MATCH;
  486. }
  487. /* The broadcast MAC address: 0xFFFFFFFFFFFF */
  488. static const uint8_t broadcast_addr[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
  489. static uint32_t gem_get_max_buf_len(CadenceGEMState *s, bool tx)
  490. {
  491. uint32_t size;
  492. if (FIELD_EX32(s->regs[R_NWCFG], NWCFG, JUMBO_FRAMES)) {
  493. size = s->regs[R_JUMBO_MAX_LEN];
  494. if (size > s->jumbo_max_len) {
  495. size = s->jumbo_max_len;
  496. qemu_log_mask(LOG_GUEST_ERROR, "GEM_JUMBO_MAX_LEN reg cannot be"
  497. " greater than 0x%" PRIx32 "\n", s->jumbo_max_len);
  498. }
  499. } else if (tx) {
  500. size = 1518;
  501. } else {
  502. size = FIELD_EX32(s->regs[R_NWCFG],
  503. NWCFG, RECV_1536_BYTE_FRAMES) ? 1538 : 1518;
  504. }
  505. return size;
  506. }
  507. static void gem_set_isr(CadenceGEMState *s, int q, uint32_t flag)
  508. {
  509. if (q == 0) {
  510. s->regs[R_ISR] |= flag & ~(s->regs[R_IMR]);
  511. } else {
  512. s->regs[R_INT_Q1_STATUS + q - 1] |= flag &
  513. ~(s->regs[R_INT_Q1_MASK + q - 1]);
  514. }
  515. }
  516. /*
  517. * gem_init_register_masks:
  518. * One time initialization.
  519. * Set masks to identify which register bits have magical clear properties
  520. */
  521. static void gem_init_register_masks(CadenceGEMState *s)
  522. {
  523. unsigned int i;
  524. /* Mask of register bits which are read only */
  525. memset(&s->regs_ro[0], 0, sizeof(s->regs_ro));
  526. s->regs_ro[R_NWCTRL] = 0xFFF80000;
  527. s->regs_ro[R_NWSTATUS] = 0xFFFFFFFF;
  528. s->regs_ro[R_DMACFG] = 0x8E00F000;
  529. s->regs_ro[R_TXSTATUS] = 0xFFFFFE08;
  530. s->regs_ro[R_RXQBASE] = 0x00000003;
  531. s->regs_ro[R_TXQBASE] = 0x00000003;
  532. s->regs_ro[R_RXSTATUS] = 0xFFFFFFF0;
  533. s->regs_ro[R_ISR] = 0xFFFFFFFF;
  534. s->regs_ro[R_IMR] = 0xFFFFFFFF;
  535. s->regs_ro[R_MODID] = 0xFFFFFFFF;
  536. for (i = 0; i < s->num_priority_queues; i++) {
  537. s->regs_ro[R_INT_Q1_STATUS + i] = 0xFFFFFFFF;
  538. s->regs_ro[R_INT_Q1_ENABLE + i] = 0xFFFFF319;
  539. s->regs_ro[R_INT_Q1_DISABLE + i] = 0xFFFFF319;
  540. s->regs_ro[R_INT_Q1_MASK + i] = 0xFFFFFFFF;
  541. }
  542. /* Mask of register bits which are clear on read */
  543. memset(&s->regs_rtc[0], 0, sizeof(s->regs_rtc));
  544. s->regs_rtc[R_ISR] = 0xFFFFFFFF;
  545. for (i = 0; i < s->num_priority_queues; i++) {
  546. s->regs_rtc[R_INT_Q1_STATUS + i] = 0x00000CE6;
  547. }
  548. /* Mask of register bits which are write 1 to clear */
  549. memset(&s->regs_w1c[0], 0, sizeof(s->regs_w1c));
  550. s->regs_w1c[R_TXSTATUS] = 0x000001F7;
  551. s->regs_w1c[R_RXSTATUS] = 0x0000000F;
  552. /* Mask of register bits which are write only */
  553. memset(&s->regs_wo[0], 0, sizeof(s->regs_wo));
  554. s->regs_wo[R_NWCTRL] = 0x00073E60;
  555. s->regs_wo[R_IER] = 0x07FFFFFF;
  556. s->regs_wo[R_IDR] = 0x07FFFFFF;
  557. for (i = 0; i < s->num_priority_queues; i++) {
  558. s->regs_wo[R_INT_Q1_ENABLE + i] = 0x00000CE6;
  559. s->regs_wo[R_INT_Q1_DISABLE + i] = 0x00000CE6;
  560. }
  561. }
  562. /*
  563. * phy_update_link:
  564. * Make the emulated PHY link state match the QEMU "interface" state.
  565. */
  566. static void phy_update_link(CadenceGEMState *s)
  567. {
  568. DB_PRINT("down %d\n", qemu_get_queue(s->nic)->link_down);
  569. /* Autonegotiation status mirrors link status. */
  570. if (qemu_get_queue(s->nic)->link_down) {
  571. s->phy_regs[PHY_REG_STATUS] &= ~(PHY_REG_STATUS_ANEGCMPL |
  572. PHY_REG_STATUS_LINK);
  573. s->phy_regs[PHY_REG_INT_ST] |= PHY_REG_INT_ST_LINKC;
  574. } else {
  575. s->phy_regs[PHY_REG_STATUS] |= (PHY_REG_STATUS_ANEGCMPL |
  576. PHY_REG_STATUS_LINK);
  577. s->phy_regs[PHY_REG_INT_ST] |= (PHY_REG_INT_ST_LINKC |
  578. PHY_REG_INT_ST_ANEGCMPL |
  579. PHY_REG_INT_ST_ENERGY);
  580. }
  581. }
  582. static bool gem_can_receive(NetClientState *nc)
  583. {
  584. CadenceGEMState *s;
  585. int i;
  586. s = qemu_get_nic_opaque(nc);
  587. /* Do nothing if receive is not enabled. */
  588. if (!FIELD_EX32(s->regs[R_NWCTRL], NWCTRL, ENABLE_RECEIVE)) {
  589. if (s->can_rx_state != 1) {
  590. s->can_rx_state = 1;
  591. DB_PRINT("can't receive - no enable\n");
  592. }
  593. return false;
  594. }
  595. for (i = 0; i < s->num_priority_queues; i++) {
  596. if (rx_desc_get_ownership(s->rx_desc[i]) != 1) {
  597. break;
  598. }
  599. };
  600. if (i == s->num_priority_queues) {
  601. if (s->can_rx_state != 2) {
  602. s->can_rx_state = 2;
  603. DB_PRINT("can't receive - all the buffer descriptors are busy\n");
  604. }
  605. return false;
  606. }
  607. if (s->can_rx_state != 0) {
  608. s->can_rx_state = 0;
  609. DB_PRINT("can receive\n");
  610. }
  611. return true;
  612. }
  613. /*
  614. * gem_update_int_status:
  615. * Raise or lower interrupt based on current status.
  616. */
  617. static void gem_update_int_status(CadenceGEMState *s)
  618. {
  619. int i;
  620. qemu_set_irq(s->irq[0], !!s->regs[R_ISR]);
  621. for (i = 1; i < s->num_priority_queues; ++i) {
  622. qemu_set_irq(s->irq[i], !!s->regs[R_INT_Q1_STATUS + i - 1]);
  623. }
  624. }
  625. /*
  626. * gem_receive_updatestats:
  627. * Increment receive statistics.
  628. */
  629. static void gem_receive_updatestats(CadenceGEMState *s, const uint8_t *packet,
  630. unsigned bytes)
  631. {
  632. uint64_t octets;
  633. /* Total octets (bytes) received */
  634. octets = ((uint64_t)(s->regs[R_OCTRXLO]) << 32) |
  635. s->regs[R_OCTRXHI];
  636. octets += bytes;
  637. s->regs[R_OCTRXLO] = octets >> 32;
  638. s->regs[R_OCTRXHI] = octets;
  639. /* Error-free Frames received */
  640. s->regs[R_RXCNT]++;
  641. /* Error-free Broadcast Frames counter */
  642. if (!memcmp(packet, broadcast_addr, 6)) {
  643. s->regs[R_RXBROADCNT]++;
  644. }
  645. /* Error-free Multicast Frames counter */
  646. if (packet[0] == 0x01) {
  647. s->regs[R_RXMULTICNT]++;
  648. }
  649. if (bytes <= 64) {
  650. s->regs[R_RX64CNT]++;
  651. } else if (bytes <= 127) {
  652. s->regs[R_RX65CNT]++;
  653. } else if (bytes <= 255) {
  654. s->regs[R_RX128CNT]++;
  655. } else if (bytes <= 511) {
  656. s->regs[R_RX256CNT]++;
  657. } else if (bytes <= 1023) {
  658. s->regs[R_RX512CNT]++;
  659. } else if (bytes <= 1518) {
  660. s->regs[R_RX1024CNT]++;
  661. } else {
  662. s->regs[R_RX1519CNT]++;
  663. }
  664. }
  665. /*
  666. * Get the MAC Address bit from the specified position
  667. */
  668. static unsigned get_bit(const uint8_t *mac, unsigned bit)
  669. {
  670. unsigned byte;
  671. byte = mac[bit / 8];
  672. byte >>= (bit & 0x7);
  673. byte &= 1;
  674. return byte;
  675. }
  676. /*
  677. * Calculate a GEM MAC Address hash index
  678. */
  679. static unsigned calc_mac_hash(const uint8_t *mac)
  680. {
  681. int index_bit, mac_bit;
  682. unsigned hash_index;
  683. hash_index = 0;
  684. mac_bit = 5;
  685. for (index_bit = 5; index_bit >= 0; index_bit--) {
  686. hash_index |= (get_bit(mac, mac_bit) ^
  687. get_bit(mac, mac_bit + 6) ^
  688. get_bit(mac, mac_bit + 12) ^
  689. get_bit(mac, mac_bit + 18) ^
  690. get_bit(mac, mac_bit + 24) ^
  691. get_bit(mac, mac_bit + 30) ^
  692. get_bit(mac, mac_bit + 36) ^
  693. get_bit(mac, mac_bit + 42)) << index_bit;
  694. mac_bit--;
  695. }
  696. return hash_index;
  697. }
  698. /*
  699. * gem_mac_address_filter:
  700. * Accept or reject this destination address?
  701. * Returns:
  702. * GEM_RX_REJECT: reject
  703. * >= 0: Specific address accept (which matched SAR is returned)
  704. * others for various other modes of accept:
  705. * GEM_RM_PROMISCUOUS_ACCEPT, GEM_RX_BROADCAST_ACCEPT,
  706. * GEM_RX_MULTICAST_HASH_ACCEPT or GEM_RX_UNICAST_HASH_ACCEPT
  707. */
  708. static int gem_mac_address_filter(CadenceGEMState *s, const uint8_t *packet)
  709. {
  710. uint8_t *gem_spaddr;
  711. int i, is_mc;
  712. /* Promiscuous mode? */
  713. if (FIELD_EX32(s->regs[R_NWCFG], NWCFG, PROMISC)) {
  714. return GEM_RX_PROMISCUOUS_ACCEPT;
  715. }
  716. if (!memcmp(packet, broadcast_addr, 6)) {
  717. /* Reject broadcast packets? */
  718. if (FIELD_EX32(s->regs[R_NWCFG], NWCFG, NO_BROADCAST)) {
  719. return GEM_RX_REJECT;
  720. }
  721. return GEM_RX_BROADCAST_ACCEPT;
  722. }
  723. /* Accept packets -w- hash match? */
  724. is_mc = is_multicast_ether_addr(packet);
  725. if ((is_mc && (FIELD_EX32(s->regs[R_NWCFG], NWCFG, MULTICAST_HASH_EN))) ||
  726. (!is_mc && FIELD_EX32(s->regs[R_NWCFG], NWCFG, UNICAST_HASH_EN))) {
  727. uint64_t buckets;
  728. unsigned hash_index;
  729. hash_index = calc_mac_hash(packet);
  730. buckets = ((uint64_t)s->regs[R_HASHHI] << 32) | s->regs[R_HASHLO];
  731. if ((buckets >> hash_index) & 1) {
  732. return is_mc ? GEM_RX_MULTICAST_HASH_ACCEPT
  733. : GEM_RX_UNICAST_HASH_ACCEPT;
  734. }
  735. }
  736. /* Check all 4 specific addresses */
  737. gem_spaddr = (uint8_t *)&(s->regs[R_SPADDR1LO]);
  738. for (i = 3; i >= 0; i--) {
  739. if (s->sar_active[i] && !memcmp(packet, gem_spaddr + 8 * i, 6)) {
  740. return GEM_RX_SAR_ACCEPT + i;
  741. }
  742. }
  743. /* No address match; reject the packet */
  744. return GEM_RX_REJECT;
  745. }
  746. /* Figure out which queue the received data should be sent to */
  747. static int get_queue_from_screen(CadenceGEMState *s, uint8_t *rxbuf_ptr,
  748. unsigned rxbufsize)
  749. {
  750. uint32_t reg;
  751. bool matched, mismatched;
  752. int i, j;
  753. for (i = 0; i < s->num_type1_screeners; i++) {
  754. reg = s->regs[R_SCREENING_TYPE1_REG0 + i];
  755. matched = false;
  756. mismatched = false;
  757. /* Screening is based on UDP Port */
  758. if (FIELD_EX32(reg, SCREENING_TYPE1_REG0, UDP_PORT_MATCH_EN)) {
  759. uint16_t udp_port = rxbuf_ptr[14 + 22] << 8 | rxbuf_ptr[14 + 23];
  760. if (udp_port == FIELD_EX32(reg, SCREENING_TYPE1_REG0, UDP_PORT_MATCH)) {
  761. matched = true;
  762. } else {
  763. mismatched = true;
  764. }
  765. }
  766. /* Screening is based on DS/TC */
  767. if (FIELD_EX32(reg, SCREENING_TYPE1_REG0, DSTC_ENABLE)) {
  768. uint8_t dscp = rxbuf_ptr[14 + 1];
  769. if (dscp == FIELD_EX32(reg, SCREENING_TYPE1_REG0, DSTC_MATCH)) {
  770. matched = true;
  771. } else {
  772. mismatched = true;
  773. }
  774. }
  775. if (matched && !mismatched) {
  776. return FIELD_EX32(reg, SCREENING_TYPE1_REG0, QUEUE_NUM);
  777. }
  778. }
  779. for (i = 0; i < s->num_type2_screeners; i++) {
  780. reg = s->regs[R_SCREENING_TYPE2_REG0 + i];
  781. matched = false;
  782. mismatched = false;
  783. if (FIELD_EX32(reg, SCREENING_TYPE2_REG0, ETHERTYPE_ENABLE)) {
  784. uint16_t type = rxbuf_ptr[12] << 8 | rxbuf_ptr[13];
  785. int et_idx = FIELD_EX32(reg, SCREENING_TYPE2_REG0,
  786. ETHERTYPE_REG_INDEX);
  787. if (et_idx > s->num_type2_screeners) {
  788. qemu_log_mask(LOG_GUEST_ERROR, "Out of range ethertype "
  789. "register index: %d\n", et_idx);
  790. }
  791. if (type == s->regs[R_SCREENING_TYPE2_ETHERTYPE_REG0 +
  792. et_idx]) {
  793. matched = true;
  794. } else {
  795. mismatched = true;
  796. }
  797. }
  798. /* Compare A, B, C */
  799. for (j = 0; j < 3; j++) {
  800. uint32_t cr0, cr1, mask, compare, disable_mask;
  801. uint32_t rx_cmp;
  802. int offset;
  803. int cr_idx = extract32(reg, R_SCREENING_TYPE2_REG0_COMPARE_A_SHIFT + j * 6,
  804. R_SCREENING_TYPE2_REG0_COMPARE_A_LENGTH);
  805. if (!extract32(reg, R_SCREENING_TYPE2_REG0_COMPARE_A_ENABLE_SHIFT + j * 6,
  806. R_SCREENING_TYPE2_REG0_COMPARE_A_ENABLE_LENGTH)) {
  807. continue;
  808. }
  809. if (cr_idx > s->num_type2_screeners) {
  810. qemu_log_mask(LOG_GUEST_ERROR, "Out of range compare "
  811. "register index: %d\n", cr_idx);
  812. }
  813. cr0 = s->regs[R_TYPE2_COMPARE_0_WORD_0 + cr_idx * 2];
  814. cr1 = s->regs[R_TYPE2_COMPARE_0_WORD_1 + cr_idx * 2];
  815. offset = FIELD_EX32(cr1, TYPE2_COMPARE_0_WORD_1, OFFSET_VALUE);
  816. switch (FIELD_EX32(cr1, TYPE2_COMPARE_0_WORD_1, COMPARE_OFFSET)) {
  817. case 3: /* Skip UDP header */
  818. qemu_log_mask(LOG_UNIMP, "TCP compare offsets"
  819. "unimplemented - assuming UDP\n");
  820. offset += 8;
  821. /* Fallthrough */
  822. case 2: /* skip the IP header */
  823. offset += 20;
  824. /* Fallthrough */
  825. case 1: /* Count from after the ethertype */
  826. offset += 14;
  827. break;
  828. case 0:
  829. /* Offset from start of frame */
  830. break;
  831. }
  832. disable_mask =
  833. FIELD_EX32(cr1, TYPE2_COMPARE_0_WORD_1, DISABLE_MASK);
  834. if (disable_mask) {
  835. /*
  836. * If disable_mask is set, mask_value is used as an
  837. * additional 2 byte Compare Value; that is equivalent
  838. * to using the whole cr0 register as the comparison value.
  839. * Load 32 bits of data from rx_buf, and set mask to
  840. * all-ones so we compare all 32 bits.
  841. */
  842. rx_cmp = ldl_le_p(rxbuf_ptr + offset);
  843. mask = 0xFFFFFFFF;
  844. compare = cr0;
  845. } else {
  846. rx_cmp = lduw_le_p(rxbuf_ptr + offset);
  847. mask = FIELD_EX32(cr0, TYPE2_COMPARE_0_WORD_0, MASK_VALUE);
  848. compare =
  849. FIELD_EX32(cr0, TYPE2_COMPARE_0_WORD_0, COMPARE_VALUE);
  850. }
  851. if ((rx_cmp & mask) == (compare & mask)) {
  852. matched = true;
  853. } else {
  854. mismatched = true;
  855. }
  856. }
  857. if (matched && !mismatched) {
  858. return FIELD_EX32(reg, SCREENING_TYPE2_REG0, QUEUE_NUM);
  859. }
  860. }
  861. /* We made it here, assume it's queue 0 */
  862. return 0;
  863. }
  864. static uint32_t gem_get_queue_base_addr(CadenceGEMState *s, bool tx, int q)
  865. {
  866. uint32_t base_addr = 0;
  867. switch (q) {
  868. case 0:
  869. base_addr = s->regs[tx ? R_TXQBASE : R_RXQBASE];
  870. break;
  871. case 1 ... (MAX_PRIORITY_QUEUES - 1):
  872. base_addr = s->regs[(tx ? R_TRANSMIT_Q1_PTR :
  873. R_RECEIVE_Q1_PTR) + q - 1];
  874. break;
  875. default:
  876. g_assert_not_reached();
  877. };
  878. return base_addr;
  879. }
  880. static inline uint32_t gem_get_tx_queue_base_addr(CadenceGEMState *s, int q)
  881. {
  882. return gem_get_queue_base_addr(s, true, q);
  883. }
  884. static inline uint32_t gem_get_rx_queue_base_addr(CadenceGEMState *s, int q)
  885. {
  886. return gem_get_queue_base_addr(s, false, q);
  887. }
  888. static hwaddr gem_get_desc_addr(CadenceGEMState *s, bool tx, int q)
  889. {
  890. hwaddr desc_addr = 0;
  891. if (FIELD_EX32(s->regs[R_DMACFG], DMACFG, DMA_ADDR_BUS_WIDTH)) {
  892. desc_addr = s->regs[tx ? R_TBQPH : R_RBQPH];
  893. }
  894. desc_addr <<= 32;
  895. desc_addr |= tx ? s->tx_desc_addr[q] : s->rx_desc_addr[q];
  896. return desc_addr;
  897. }
  898. static hwaddr gem_get_tx_desc_addr(CadenceGEMState *s, int q)
  899. {
  900. return gem_get_desc_addr(s, true, q);
  901. }
  902. static hwaddr gem_get_rx_desc_addr(CadenceGEMState *s, int q)
  903. {
  904. return gem_get_desc_addr(s, false, q);
  905. }
  906. static void gem_get_rx_desc(CadenceGEMState *s, int q)
  907. {
  908. hwaddr desc_addr = gem_get_rx_desc_addr(s, q);
  909. DB_PRINT("read descriptor 0x%" HWADDR_PRIx "\n", desc_addr);
  910. /* read current descriptor */
  911. address_space_read(&s->dma_as, desc_addr, MEMTXATTRS_UNSPECIFIED,
  912. s->rx_desc[q],
  913. sizeof(uint32_t) * gem_get_desc_len(s, true));
  914. /* Descriptor owned by software ? */
  915. if (rx_desc_get_ownership(s->rx_desc[q]) == 1) {
  916. DB_PRINT("descriptor 0x%" HWADDR_PRIx " owned by sw.\n", desc_addr);
  917. s->regs[R_RXSTATUS] |= R_RXSTATUS_BUF_NOT_AVAILABLE_MASK;
  918. gem_set_isr(s, q, R_ISR_RX_USED_MASK);
  919. /* Handle interrupt consequences */
  920. gem_update_int_status(s);
  921. }
  922. }
  923. /*
  924. * gem_receive:
  925. * Fit a packet handed to us by QEMU into the receive descriptor ring.
  926. */
  927. static ssize_t gem_receive(NetClientState *nc, const uint8_t *buf, size_t size)
  928. {
  929. CadenceGEMState *s = qemu_get_nic_opaque(nc);
  930. unsigned rxbufsize, bytes_to_copy;
  931. unsigned rxbuf_offset;
  932. uint8_t *rxbuf_ptr;
  933. bool first_desc = true;
  934. int maf;
  935. int q = 0;
  936. /* Is this destination MAC address "for us" ? */
  937. maf = gem_mac_address_filter(s, buf);
  938. if (maf == GEM_RX_REJECT) {
  939. return size; /* no, drop silently b/c it's not an error */
  940. }
  941. /* Discard packets with receive length error enabled ? */
  942. if (FIELD_EX32(s->regs[R_NWCFG], NWCFG, LEN_ERR_DISCARD)) {
  943. unsigned type_len;
  944. /* Fish the ethertype / length field out of the RX packet */
  945. type_len = buf[12] << 8 | buf[13];
  946. /* It is a length field, not an ethertype */
  947. if (type_len < 0x600) {
  948. if (size < type_len) {
  949. /* discard */
  950. return -1;
  951. }
  952. }
  953. }
  954. /*
  955. * Determine configured receive buffer offset (probably 0)
  956. */
  957. rxbuf_offset = FIELD_EX32(s->regs[R_NWCFG], NWCFG, RECV_BUF_OFFSET);
  958. /* The configure size of each receive buffer. Determines how many
  959. * buffers needed to hold this packet.
  960. */
  961. rxbufsize = FIELD_EX32(s->regs[R_DMACFG], DMACFG, RX_BUF_SIZE);
  962. rxbufsize *= GEM_DMACFG_RBUFSZ_MUL;
  963. bytes_to_copy = size;
  964. /* Hardware allows a zero value here but warns against it. To avoid QEMU
  965. * indefinite loops we enforce a minimum value here
  966. */
  967. if (rxbufsize < GEM_DMACFG_RBUFSZ_MUL) {
  968. rxbufsize = GEM_DMACFG_RBUFSZ_MUL;
  969. }
  970. /* Pad to minimum length. Assume FCS field is stripped, logic
  971. * below will increment it to the real minimum of 64 when
  972. * not FCS stripping
  973. */
  974. if (size < 60) {
  975. size = 60;
  976. }
  977. /* Strip of FCS field ? (usually yes) */
  978. if (FIELD_EX32(s->regs[R_NWCFG], NWCFG, FCS_REMOVE)) {
  979. rxbuf_ptr = (void *)buf;
  980. } else {
  981. uint32_t crc_val;
  982. if (size > MAX_FRAME_SIZE - sizeof(crc_val)) {
  983. size = MAX_FRAME_SIZE - sizeof(crc_val);
  984. }
  985. bytes_to_copy = size;
  986. /* The application wants the FCS field, which QEMU does not provide.
  987. * We must try and calculate one.
  988. */
  989. memcpy(s->rx_packet, buf, size);
  990. memset(s->rx_packet + size, 0, MAX_FRAME_SIZE - size);
  991. rxbuf_ptr = s->rx_packet;
  992. crc_val = cpu_to_le32(crc32(0, s->rx_packet, MAX(size, 60)));
  993. memcpy(s->rx_packet + size, &crc_val, sizeof(crc_val));
  994. bytes_to_copy += 4;
  995. size += 4;
  996. }
  997. DB_PRINT("config bufsize: %u packet size: %zd\n", rxbufsize, size);
  998. /* Find which queue we are targeting */
  999. q = get_queue_from_screen(s, rxbuf_ptr, rxbufsize);
  1000. if (size > gem_get_max_buf_len(s, false)) {
  1001. qemu_log_mask(LOG_GUEST_ERROR, "rx frame too long\n");
  1002. gem_set_isr(s, q, R_ISR_AMBA_ERROR_MASK);
  1003. return -1;
  1004. }
  1005. while (bytes_to_copy) {
  1006. hwaddr desc_addr;
  1007. /* Do nothing if receive is not enabled. */
  1008. if (!gem_can_receive(nc)) {
  1009. return -1;
  1010. }
  1011. DB_PRINT("copy %" PRIu32 " bytes to 0x%" PRIx64 "\n",
  1012. MIN(bytes_to_copy, rxbufsize),
  1013. rx_desc_get_buffer(s, s->rx_desc[q]));
  1014. /* Copy packet data to emulated DMA buffer */
  1015. address_space_write(&s->dma_as, rx_desc_get_buffer(s, s->rx_desc[q]) +
  1016. rxbuf_offset,
  1017. MEMTXATTRS_UNSPECIFIED, rxbuf_ptr,
  1018. MIN(bytes_to_copy, rxbufsize));
  1019. rxbuf_ptr += MIN(bytes_to_copy, rxbufsize);
  1020. bytes_to_copy -= MIN(bytes_to_copy, rxbufsize);
  1021. rx_desc_clear_control(s->rx_desc[q]);
  1022. /* Update the descriptor. */
  1023. if (first_desc) {
  1024. rx_desc_set_sof(s->rx_desc[q]);
  1025. first_desc = false;
  1026. }
  1027. if (bytes_to_copy == 0) {
  1028. rx_desc_set_eof(s->rx_desc[q]);
  1029. rx_desc_set_length(s->rx_desc[q], size);
  1030. }
  1031. rx_desc_set_ownership(s->rx_desc[q]);
  1032. switch (maf) {
  1033. case GEM_RX_PROMISCUOUS_ACCEPT:
  1034. break;
  1035. case GEM_RX_BROADCAST_ACCEPT:
  1036. rx_desc_set_broadcast(s->rx_desc[q]);
  1037. break;
  1038. case GEM_RX_UNICAST_HASH_ACCEPT:
  1039. rx_desc_set_unicast_hash(s->rx_desc[q]);
  1040. break;
  1041. case GEM_RX_MULTICAST_HASH_ACCEPT:
  1042. rx_desc_set_multicast_hash(s->rx_desc[q]);
  1043. break;
  1044. case GEM_RX_REJECT:
  1045. abort();
  1046. default: /* SAR */
  1047. rx_desc_set_sar(s->rx_desc[q], maf);
  1048. }
  1049. /* Descriptor write-back. */
  1050. desc_addr = gem_get_rx_desc_addr(s, q);
  1051. address_space_write(&s->dma_as, desc_addr, MEMTXATTRS_UNSPECIFIED,
  1052. s->rx_desc[q],
  1053. sizeof(uint32_t) * gem_get_desc_len(s, true));
  1054. /* Next descriptor */
  1055. if (rx_desc_get_wrap(s->rx_desc[q])) {
  1056. DB_PRINT("wrapping RX descriptor list\n");
  1057. s->rx_desc_addr[q] = gem_get_rx_queue_base_addr(s, q);
  1058. } else {
  1059. DB_PRINT("incrementing RX descriptor list\n");
  1060. s->rx_desc_addr[q] += 4 * gem_get_desc_len(s, true);
  1061. }
  1062. gem_get_rx_desc(s, q);
  1063. }
  1064. /* Count it */
  1065. gem_receive_updatestats(s, buf, size);
  1066. s->regs[R_RXSTATUS] |= R_RXSTATUS_FRAME_RECEIVED_MASK;
  1067. gem_set_isr(s, q, R_ISR_RECV_COMPLETE_MASK);
  1068. /* Handle interrupt consequences */
  1069. gem_update_int_status(s);
  1070. return size;
  1071. }
  1072. /*
  1073. * gem_transmit_updatestats:
  1074. * Increment transmit statistics.
  1075. */
  1076. static void gem_transmit_updatestats(CadenceGEMState *s, const uint8_t *packet,
  1077. unsigned bytes)
  1078. {
  1079. uint64_t octets;
  1080. /* Total octets (bytes) transmitted */
  1081. octets = ((uint64_t)(s->regs[R_OCTTXLO]) << 32) |
  1082. s->regs[R_OCTTXHI];
  1083. octets += bytes;
  1084. s->regs[R_OCTTXLO] = octets >> 32;
  1085. s->regs[R_OCTTXHI] = octets;
  1086. /* Error-free Frames transmitted */
  1087. s->regs[R_TXCNT]++;
  1088. /* Error-free Broadcast Frames counter */
  1089. if (!memcmp(packet, broadcast_addr, 6)) {
  1090. s->regs[R_TXBCNT]++;
  1091. }
  1092. /* Error-free Multicast Frames counter */
  1093. if (packet[0] == 0x01) {
  1094. s->regs[R_TXMCNT]++;
  1095. }
  1096. if (bytes <= 64) {
  1097. s->regs[R_TX64CNT]++;
  1098. } else if (bytes <= 127) {
  1099. s->regs[R_TX65CNT]++;
  1100. } else if (bytes <= 255) {
  1101. s->regs[R_TX128CNT]++;
  1102. } else if (bytes <= 511) {
  1103. s->regs[R_TX256CNT]++;
  1104. } else if (bytes <= 1023) {
  1105. s->regs[R_TX512CNT]++;
  1106. } else if (bytes <= 1518) {
  1107. s->regs[R_TX1024CNT]++;
  1108. } else {
  1109. s->regs[R_TX1519CNT]++;
  1110. }
  1111. }
  1112. /*
  1113. * gem_transmit:
  1114. * Fish packets out of the descriptor ring and feed them to QEMU
  1115. */
  1116. static void gem_transmit(CadenceGEMState *s)
  1117. {
  1118. uint32_t desc[DESC_MAX_NUM_WORDS];
  1119. hwaddr packet_desc_addr;
  1120. uint8_t *p;
  1121. unsigned total_bytes;
  1122. int q = 0;
  1123. /* Do nothing if transmit is not enabled. */
  1124. if (!FIELD_EX32(s->regs[R_NWCTRL], NWCTRL, ENABLE_TRANSMIT)) {
  1125. return;
  1126. }
  1127. DB_PRINT("\n");
  1128. /* The packet we will hand off to QEMU.
  1129. * Packets scattered across multiple descriptors are gathered to this
  1130. * one contiguous buffer first.
  1131. */
  1132. p = s->tx_packet;
  1133. total_bytes = 0;
  1134. for (q = s->num_priority_queues - 1; q >= 0; q--) {
  1135. /* read current descriptor */
  1136. packet_desc_addr = gem_get_tx_desc_addr(s, q);
  1137. DB_PRINT("read descriptor 0x%" HWADDR_PRIx "\n", packet_desc_addr);
  1138. address_space_read(&s->dma_as, packet_desc_addr,
  1139. MEMTXATTRS_UNSPECIFIED, desc,
  1140. sizeof(uint32_t) * gem_get_desc_len(s, false));
  1141. /* Handle all descriptors owned by hardware */
  1142. while (tx_desc_get_used(desc) == 0) {
  1143. /* Do nothing if transmit is not enabled. */
  1144. if (!FIELD_EX32(s->regs[R_NWCTRL], NWCTRL, ENABLE_TRANSMIT)) {
  1145. return;
  1146. }
  1147. print_gem_tx_desc(desc, q);
  1148. /* The real hardware would eat this (and possibly crash).
  1149. * For QEMU let's lend a helping hand.
  1150. */
  1151. if ((tx_desc_get_buffer(s, desc) == 0) ||
  1152. (tx_desc_get_length(desc) == 0)) {
  1153. DB_PRINT("Invalid TX descriptor @ 0x%" HWADDR_PRIx "\n",
  1154. packet_desc_addr);
  1155. break;
  1156. }
  1157. if (tx_desc_get_length(desc) > gem_get_max_buf_len(s, true) -
  1158. (p - s->tx_packet)) {
  1159. qemu_log_mask(LOG_GUEST_ERROR, "TX descriptor @ 0x%" \
  1160. HWADDR_PRIx " too large: size 0x%x space 0x%zx\n",
  1161. packet_desc_addr, tx_desc_get_length(desc),
  1162. gem_get_max_buf_len(s, true) - (p - s->tx_packet));
  1163. gem_set_isr(s, q, R_ISR_AMBA_ERROR_MASK);
  1164. break;
  1165. }
  1166. /* Gather this fragment of the packet from "dma memory" to our
  1167. * contig buffer.
  1168. */
  1169. address_space_read(&s->dma_as, tx_desc_get_buffer(s, desc),
  1170. MEMTXATTRS_UNSPECIFIED,
  1171. p, tx_desc_get_length(desc));
  1172. p += tx_desc_get_length(desc);
  1173. total_bytes += tx_desc_get_length(desc);
  1174. /* Last descriptor for this packet; hand the whole thing off */
  1175. if (tx_desc_get_last(desc)) {
  1176. uint32_t desc_first[DESC_MAX_NUM_WORDS];
  1177. hwaddr desc_addr = gem_get_tx_desc_addr(s, q);
  1178. /* Modify the 1st descriptor of this packet to be owned by
  1179. * the processor.
  1180. */
  1181. address_space_read(&s->dma_as, desc_addr,
  1182. MEMTXATTRS_UNSPECIFIED, desc_first,
  1183. sizeof(desc_first));
  1184. tx_desc_set_used(desc_first);
  1185. address_space_write(&s->dma_as, desc_addr,
  1186. MEMTXATTRS_UNSPECIFIED, desc_first,
  1187. sizeof(desc_first));
  1188. /* Advance the hardware current descriptor past this packet */
  1189. if (tx_desc_get_wrap(desc)) {
  1190. s->tx_desc_addr[q] = gem_get_tx_queue_base_addr(s, q);
  1191. } else {
  1192. s->tx_desc_addr[q] = packet_desc_addr +
  1193. 4 * gem_get_desc_len(s, false);
  1194. }
  1195. DB_PRINT("TX descriptor next: 0x%08x\n", s->tx_desc_addr[q]);
  1196. s->regs[R_TXSTATUS] |= R_TXSTATUS_TRANSMIT_COMPLETE_MASK;
  1197. gem_set_isr(s, q, R_ISR_XMIT_COMPLETE_MASK);
  1198. /* Handle interrupt consequences */
  1199. gem_update_int_status(s);
  1200. /* Is checksum offload enabled? */
  1201. if (FIELD_EX32(s->regs[R_DMACFG], DMACFG, TX_PBUF_CSUM_OFFLOAD)) {
  1202. net_checksum_calculate(s->tx_packet, total_bytes, CSUM_ALL);
  1203. }
  1204. /* Update MAC statistics */
  1205. gem_transmit_updatestats(s, s->tx_packet, total_bytes);
  1206. /* Send the packet somewhere */
  1207. if (s->phy_loop || FIELD_EX32(s->regs[R_NWCTRL], NWCTRL,
  1208. LOOPBACK_LOCAL)) {
  1209. qemu_receive_packet(qemu_get_queue(s->nic), s->tx_packet,
  1210. total_bytes);
  1211. } else {
  1212. qemu_send_packet(qemu_get_queue(s->nic), s->tx_packet,
  1213. total_bytes);
  1214. }
  1215. /* Prepare for next packet */
  1216. p = s->tx_packet;
  1217. total_bytes = 0;
  1218. }
  1219. /* read next descriptor */
  1220. if (tx_desc_get_wrap(desc)) {
  1221. if (FIELD_EX32(s->regs[R_DMACFG], DMACFG, DMA_ADDR_BUS_WIDTH)) {
  1222. packet_desc_addr = s->regs[R_TBQPH];
  1223. packet_desc_addr <<= 32;
  1224. } else {
  1225. packet_desc_addr = 0;
  1226. }
  1227. packet_desc_addr |= gem_get_tx_queue_base_addr(s, q);
  1228. } else {
  1229. packet_desc_addr += 4 * gem_get_desc_len(s, false);
  1230. }
  1231. DB_PRINT("read descriptor 0x%" HWADDR_PRIx "\n", packet_desc_addr);
  1232. address_space_read(&s->dma_as, packet_desc_addr,
  1233. MEMTXATTRS_UNSPECIFIED, desc,
  1234. sizeof(uint32_t) * gem_get_desc_len(s, false));
  1235. }
  1236. if (tx_desc_get_used(desc)) {
  1237. s->regs[R_TXSTATUS] |= R_TXSTATUS_USED_BIT_READ_MASK;
  1238. /* IRQ TXUSED is defined only for queue 0 */
  1239. if (q == 0) {
  1240. gem_set_isr(s, 0, R_ISR_TX_USED_MASK);
  1241. }
  1242. gem_update_int_status(s);
  1243. }
  1244. }
  1245. }
  1246. static void gem_phy_reset(CadenceGEMState *s)
  1247. {
  1248. memset(&s->phy_regs[0], 0, sizeof(s->phy_regs));
  1249. s->phy_regs[PHY_REG_CONTROL] = 0x1140;
  1250. s->phy_regs[PHY_REG_STATUS] = 0x7969;
  1251. s->phy_regs[PHY_REG_PHYID1] = 0x0141;
  1252. s->phy_regs[PHY_REG_PHYID2] = 0x0CC2;
  1253. s->phy_regs[PHY_REG_ANEGADV] = 0x01E1;
  1254. s->phy_regs[PHY_REG_LINKPABIL] = 0xCDE1;
  1255. s->phy_regs[PHY_REG_ANEGEXP] = 0x000F;
  1256. s->phy_regs[PHY_REG_NEXTP] = 0x2001;
  1257. s->phy_regs[PHY_REG_LINKPNEXTP] = 0x40E6;
  1258. s->phy_regs[PHY_REG_100BTCTRL] = 0x0300;
  1259. s->phy_regs[PHY_REG_1000BTSTAT] = 0x7C00;
  1260. s->phy_regs[PHY_REG_EXTSTAT] = 0x3000;
  1261. s->phy_regs[PHY_REG_PHYSPCFC_CTL] = 0x0078;
  1262. s->phy_regs[PHY_REG_PHYSPCFC_ST] = 0x7C00;
  1263. s->phy_regs[PHY_REG_EXT_PHYSPCFC_CTL] = 0x0C60;
  1264. s->phy_regs[PHY_REG_LED] = 0x4100;
  1265. s->phy_regs[PHY_REG_EXT_PHYSPCFC_CTL2] = 0x000A;
  1266. s->phy_regs[PHY_REG_EXT_PHYSPCFC_ST] = 0x848B;
  1267. phy_update_link(s);
  1268. }
  1269. static void gem_reset(DeviceState *d)
  1270. {
  1271. int i;
  1272. CadenceGEMState *s = CADENCE_GEM(d);
  1273. const uint8_t *a;
  1274. uint32_t queues_mask = 0;
  1275. DB_PRINT("\n");
  1276. /* Set post reset register values */
  1277. memset(&s->regs[0], 0, sizeof(s->regs));
  1278. s->regs[R_NWCFG] = 0x00080000;
  1279. s->regs[R_NWSTATUS] = 0x00000006;
  1280. s->regs[R_DMACFG] = 0x00020784;
  1281. s->regs[R_IMR] = 0x07ffffff;
  1282. s->regs[R_TXPAUSE] = 0x0000ffff;
  1283. s->regs[R_TXPARTIALSF] = 0x000003ff;
  1284. s->regs[R_RXPARTIALSF] = 0x000003ff;
  1285. s->regs[R_MODID] = s->revision;
  1286. s->regs[R_DESCONF] = 0x02D00111;
  1287. s->regs[R_DESCONF2] = 0x2ab10000 | s->jumbo_max_len;
  1288. s->regs[R_DESCONF5] = 0x002f2045;
  1289. s->regs[R_DESCONF6] = R_DESCONF6_DMA_ADDR_64B_MASK;
  1290. s->regs[R_INT_Q1_MASK] = 0x00000CE6;
  1291. s->regs[R_JUMBO_MAX_LEN] = s->jumbo_max_len;
  1292. if (s->num_priority_queues > 1) {
  1293. queues_mask = MAKE_64BIT_MASK(1, s->num_priority_queues - 1);
  1294. s->regs[R_DESCONF6] |= queues_mask;
  1295. }
  1296. /* Set MAC address */
  1297. a = &s->conf.macaddr.a[0];
  1298. s->regs[R_SPADDR1LO] = a[0] | (a[1] << 8) | (a[2] << 16) | (a[3] << 24);
  1299. s->regs[R_SPADDR1HI] = a[4] | (a[5] << 8);
  1300. for (i = 0; i < 4; i++) {
  1301. s->sar_active[i] = false;
  1302. }
  1303. gem_phy_reset(s);
  1304. gem_update_int_status(s);
  1305. }
  1306. static uint16_t gem_phy_read(CadenceGEMState *s, unsigned reg_num)
  1307. {
  1308. DB_PRINT("reg: %d value: 0x%04x\n", reg_num, s->phy_regs[reg_num]);
  1309. return s->phy_regs[reg_num];
  1310. }
  1311. static void gem_phy_write(CadenceGEMState *s, unsigned reg_num, uint16_t val)
  1312. {
  1313. DB_PRINT("reg: %d value: 0x%04x\n", reg_num, val);
  1314. switch (reg_num) {
  1315. case PHY_REG_CONTROL:
  1316. if (val & PHY_REG_CONTROL_RST) {
  1317. /* Phy reset */
  1318. gem_phy_reset(s);
  1319. val &= ~(PHY_REG_CONTROL_RST | PHY_REG_CONTROL_LOOP);
  1320. s->phy_loop = 0;
  1321. }
  1322. if (val & PHY_REG_CONTROL_ANEG) {
  1323. /* Complete autonegotiation immediately */
  1324. val &= ~(PHY_REG_CONTROL_ANEG | PHY_REG_CONTROL_ANRESTART);
  1325. s->phy_regs[PHY_REG_STATUS] |= PHY_REG_STATUS_ANEGCMPL;
  1326. }
  1327. if (val & PHY_REG_CONTROL_LOOP) {
  1328. DB_PRINT("PHY placed in loopback\n");
  1329. s->phy_loop = 1;
  1330. } else {
  1331. s->phy_loop = 0;
  1332. }
  1333. break;
  1334. }
  1335. s->phy_regs[reg_num] = val;
  1336. }
  1337. static void gem_handle_phy_access(CadenceGEMState *s)
  1338. {
  1339. uint32_t val = s->regs[R_PHYMNTNC];
  1340. uint32_t phy_addr, reg_num;
  1341. phy_addr = FIELD_EX32(val, PHYMNTNC, PHY_ADDR);
  1342. if (phy_addr != s->phy_addr) {
  1343. /* no phy at this address */
  1344. if (FIELD_EX32(val, PHYMNTNC, OP) == MDIO_OP_READ) {
  1345. s->regs[R_PHYMNTNC] = FIELD_DP32(val, PHYMNTNC, DATA, 0xffff);
  1346. }
  1347. return;
  1348. }
  1349. reg_num = FIELD_EX32(val, PHYMNTNC, REG_ADDR);
  1350. switch (FIELD_EX32(val, PHYMNTNC, OP)) {
  1351. case MDIO_OP_READ:
  1352. s->regs[R_PHYMNTNC] = FIELD_DP32(val, PHYMNTNC, DATA,
  1353. gem_phy_read(s, reg_num));
  1354. break;
  1355. case MDIO_OP_WRITE:
  1356. gem_phy_write(s, reg_num, val);
  1357. break;
  1358. default:
  1359. break; /* only clause 22 operations are supported */
  1360. }
  1361. }
  1362. /*
  1363. * gem_read32:
  1364. * Read a GEM register.
  1365. */
  1366. static uint64_t gem_read(void *opaque, hwaddr offset, unsigned size)
  1367. {
  1368. CadenceGEMState *s;
  1369. uint32_t retval;
  1370. s = opaque;
  1371. offset >>= 2;
  1372. retval = s->regs[offset];
  1373. DB_PRINT("offset: 0x%04x read: 0x%08x\n", (unsigned)offset*4, retval);
  1374. switch (offset) {
  1375. case R_ISR:
  1376. DB_PRINT("lowering irqs on ISR read\n");
  1377. /* The interrupts get updated at the end of the function. */
  1378. break;
  1379. }
  1380. /* Squash read to clear bits */
  1381. s->regs[offset] &= ~(s->regs_rtc[offset]);
  1382. /* Do not provide write only bits */
  1383. retval &= ~(s->regs_wo[offset]);
  1384. DB_PRINT("0x%08x\n", retval);
  1385. gem_update_int_status(s);
  1386. return retval;
  1387. }
  1388. /*
  1389. * gem_write32:
  1390. * Write a GEM register.
  1391. */
  1392. static void gem_write(void *opaque, hwaddr offset, uint64_t val,
  1393. unsigned size)
  1394. {
  1395. CadenceGEMState *s = (CadenceGEMState *)opaque;
  1396. uint32_t readonly;
  1397. int i;
  1398. DB_PRINT("offset: 0x%04x write: 0x%08x ", (unsigned)offset, (unsigned)val);
  1399. offset >>= 2;
  1400. /* Squash bits which are read only in write value */
  1401. val &= ~(s->regs_ro[offset]);
  1402. /* Preserve (only) bits which are read only and wtc in register */
  1403. readonly = s->regs[offset] & (s->regs_ro[offset] | s->regs_w1c[offset]);
  1404. /* Copy register write to backing store */
  1405. s->regs[offset] = (val & ~s->regs_w1c[offset]) | readonly;
  1406. /* do w1c */
  1407. s->regs[offset] &= ~(s->regs_w1c[offset] & val);
  1408. /* Handle register write side effects */
  1409. switch (offset) {
  1410. case R_NWCTRL:
  1411. if (FIELD_EX32(val, NWCTRL, ENABLE_RECEIVE)) {
  1412. for (i = 0; i < s->num_priority_queues; ++i) {
  1413. gem_get_rx_desc(s, i);
  1414. }
  1415. }
  1416. if (FIELD_EX32(val, NWCTRL, TRANSMIT_START)) {
  1417. gem_transmit(s);
  1418. }
  1419. if (!(FIELD_EX32(val, NWCTRL, ENABLE_TRANSMIT))) {
  1420. /* Reset to start of Q when transmit disabled. */
  1421. for (i = 0; i < s->num_priority_queues; i++) {
  1422. s->tx_desc_addr[i] = gem_get_tx_queue_base_addr(s, i);
  1423. }
  1424. }
  1425. if (gem_can_receive(qemu_get_queue(s->nic))) {
  1426. qemu_flush_queued_packets(qemu_get_queue(s->nic));
  1427. }
  1428. break;
  1429. case R_TXSTATUS:
  1430. gem_update_int_status(s);
  1431. break;
  1432. case R_RXQBASE:
  1433. s->rx_desc_addr[0] = val;
  1434. break;
  1435. case R_RECEIVE_Q1_PTR ... R_RECEIVE_Q7_PTR:
  1436. s->rx_desc_addr[offset - R_RECEIVE_Q1_PTR + 1] = val;
  1437. break;
  1438. case R_TXQBASE:
  1439. s->tx_desc_addr[0] = val;
  1440. break;
  1441. case R_TRANSMIT_Q1_PTR ... R_TRANSMIT_Q7_PTR:
  1442. s->tx_desc_addr[offset - R_TRANSMIT_Q1_PTR + 1] = val;
  1443. break;
  1444. case R_RXSTATUS:
  1445. gem_update_int_status(s);
  1446. break;
  1447. case R_IER:
  1448. s->regs[R_IMR] &= ~val;
  1449. gem_update_int_status(s);
  1450. break;
  1451. case R_JUMBO_MAX_LEN:
  1452. s->regs[R_JUMBO_MAX_LEN] = val & MAX_JUMBO_FRAME_SIZE_MASK;
  1453. break;
  1454. case R_INT_Q1_ENABLE ... R_INT_Q7_ENABLE:
  1455. s->regs[R_INT_Q1_MASK + offset - R_INT_Q1_ENABLE] &= ~val;
  1456. gem_update_int_status(s);
  1457. break;
  1458. case R_IDR:
  1459. s->regs[R_IMR] |= val;
  1460. gem_update_int_status(s);
  1461. break;
  1462. case R_INT_Q1_DISABLE ... R_INT_Q7_DISABLE:
  1463. s->regs[R_INT_Q1_MASK + offset - R_INT_Q1_DISABLE] |= val;
  1464. gem_update_int_status(s);
  1465. break;
  1466. case R_SPADDR1LO:
  1467. case R_SPADDR2LO:
  1468. case R_SPADDR3LO:
  1469. case R_SPADDR4LO:
  1470. s->sar_active[(offset - R_SPADDR1LO) / 2] = false;
  1471. break;
  1472. case R_SPADDR1HI:
  1473. case R_SPADDR2HI:
  1474. case R_SPADDR3HI:
  1475. case R_SPADDR4HI:
  1476. s->sar_active[(offset - R_SPADDR1HI) / 2] = true;
  1477. break;
  1478. case R_PHYMNTNC:
  1479. gem_handle_phy_access(s);
  1480. break;
  1481. }
  1482. DB_PRINT("newval: 0x%08x\n", s->regs[offset]);
  1483. }
  1484. static const MemoryRegionOps gem_ops = {
  1485. .read = gem_read,
  1486. .write = gem_write,
  1487. .endianness = DEVICE_LITTLE_ENDIAN,
  1488. };
  1489. static void gem_set_link(NetClientState *nc)
  1490. {
  1491. CadenceGEMState *s = qemu_get_nic_opaque(nc);
  1492. DB_PRINT("\n");
  1493. phy_update_link(s);
  1494. gem_update_int_status(s);
  1495. }
  1496. static NetClientInfo net_gem_info = {
  1497. .type = NET_CLIENT_DRIVER_NIC,
  1498. .size = sizeof(NICState),
  1499. .can_receive = gem_can_receive,
  1500. .receive = gem_receive,
  1501. .link_status_changed = gem_set_link,
  1502. };
  1503. static void gem_realize(DeviceState *dev, Error **errp)
  1504. {
  1505. CadenceGEMState *s = CADENCE_GEM(dev);
  1506. int i;
  1507. address_space_init(&s->dma_as,
  1508. s->dma_mr ? s->dma_mr : get_system_memory(), "dma");
  1509. if (s->num_priority_queues == 0 ||
  1510. s->num_priority_queues > MAX_PRIORITY_QUEUES) {
  1511. error_setg(errp, "Invalid num-priority-queues value: %" PRIx8,
  1512. s->num_priority_queues);
  1513. return;
  1514. } else if (s->num_type1_screeners > MAX_TYPE1_SCREENERS) {
  1515. error_setg(errp, "Invalid num-type1-screeners value: %" PRIx8,
  1516. s->num_type1_screeners);
  1517. return;
  1518. } else if (s->num_type2_screeners > MAX_TYPE2_SCREENERS) {
  1519. error_setg(errp, "Invalid num-type2-screeners value: %" PRIx8,
  1520. s->num_type2_screeners);
  1521. return;
  1522. }
  1523. for (i = 0; i < s->num_priority_queues; ++i) {
  1524. sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->irq[i]);
  1525. }
  1526. qemu_macaddr_default_if_unset(&s->conf.macaddr);
  1527. s->nic = qemu_new_nic(&net_gem_info, &s->conf,
  1528. object_get_typename(OBJECT(dev)), dev->id,
  1529. &dev->mem_reentrancy_guard, s);
  1530. if (s->jumbo_max_len > MAX_FRAME_SIZE) {
  1531. error_setg(errp, "jumbo-max-len is greater than %d",
  1532. MAX_FRAME_SIZE);
  1533. return;
  1534. }
  1535. }
  1536. static void gem_init(Object *obj)
  1537. {
  1538. CadenceGEMState *s = CADENCE_GEM(obj);
  1539. DeviceState *dev = DEVICE(obj);
  1540. DB_PRINT("\n");
  1541. gem_init_register_masks(s);
  1542. memory_region_init_io(&s->iomem, OBJECT(s), &gem_ops, s,
  1543. "enet", sizeof(s->regs));
  1544. sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iomem);
  1545. }
  1546. static const VMStateDescription vmstate_cadence_gem = {
  1547. .name = "cadence_gem",
  1548. .version_id = 4,
  1549. .minimum_version_id = 4,
  1550. .fields = (const VMStateField[]) {
  1551. VMSTATE_UINT32_ARRAY(regs, CadenceGEMState, CADENCE_GEM_MAXREG),
  1552. VMSTATE_UINT16_ARRAY(phy_regs, CadenceGEMState, 32),
  1553. VMSTATE_UINT8(phy_loop, CadenceGEMState),
  1554. VMSTATE_UINT32_ARRAY(rx_desc_addr, CadenceGEMState,
  1555. MAX_PRIORITY_QUEUES),
  1556. VMSTATE_UINT32_ARRAY(tx_desc_addr, CadenceGEMState,
  1557. MAX_PRIORITY_QUEUES),
  1558. VMSTATE_BOOL_ARRAY(sar_active, CadenceGEMState, 4),
  1559. VMSTATE_END_OF_LIST(),
  1560. }
  1561. };
  1562. static const Property gem_properties[] = {
  1563. DEFINE_NIC_PROPERTIES(CadenceGEMState, conf),
  1564. DEFINE_PROP_UINT32("revision", CadenceGEMState, revision,
  1565. GEM_MODID_VALUE),
  1566. DEFINE_PROP_UINT8("phy-addr", CadenceGEMState, phy_addr, BOARD_PHY_ADDRESS),
  1567. DEFINE_PROP_UINT8("num-priority-queues", CadenceGEMState,
  1568. num_priority_queues, 1),
  1569. DEFINE_PROP_UINT8("num-type1-screeners", CadenceGEMState,
  1570. num_type1_screeners, 4),
  1571. DEFINE_PROP_UINT8("num-type2-screeners", CadenceGEMState,
  1572. num_type2_screeners, 4),
  1573. DEFINE_PROP_UINT16("jumbo-max-len", CadenceGEMState,
  1574. jumbo_max_len, 10240),
  1575. DEFINE_PROP_LINK("dma", CadenceGEMState, dma_mr,
  1576. TYPE_MEMORY_REGION, MemoryRegion *),
  1577. };
  1578. static void gem_class_init(ObjectClass *klass, void *data)
  1579. {
  1580. DeviceClass *dc = DEVICE_CLASS(klass);
  1581. dc->realize = gem_realize;
  1582. device_class_set_props(dc, gem_properties);
  1583. dc->vmsd = &vmstate_cadence_gem;
  1584. device_class_set_legacy_reset(dc, gem_reset);
  1585. }
  1586. static const TypeInfo gem_info = {
  1587. .name = TYPE_CADENCE_GEM,
  1588. .parent = TYPE_SYS_BUS_DEVICE,
  1589. .instance_size = sizeof(CadenceGEMState),
  1590. .instance_init = gem_init,
  1591. .class_init = gem_class_init,
  1592. };
  1593. static void gem_register_types(void)
  1594. {
  1595. type_register_static(&gem_info);
  1596. }
  1597. type_init(gem_register_types)