cadence_gem.c 58 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720
  1. /*
  2. * QEMU Cadence GEM emulation
  3. *
  4. * Copyright (c) 2011 Xilinx, Inc.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a copy
  7. * of this software and associated documentation files (the "Software"), to deal
  8. * in the Software without restriction, including without limitation the rights
  9. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  10. * copies of the Software, and to permit persons to whom the Software is
  11. * furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  21. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  22. * THE SOFTWARE.
  23. */
  24. #include "qemu/osdep.h"
  25. #include <zlib.h> /* For crc32 */
  26. #include "hw/irq.h"
  27. #include "hw/net/cadence_gem.h"
  28. #include "hw/qdev-properties.h"
  29. #include "migration/vmstate.h"
  30. #include "qapi/error.h"
  31. #include "qemu/log.h"
  32. #include "qemu/module.h"
  33. #include "sysemu/dma.h"
  34. #include "net/checksum.h"
  35. #include "net/eth.h"
  36. #define CADENCE_GEM_ERR_DEBUG 0
  37. #define DB_PRINT(...) do {\
  38. if (CADENCE_GEM_ERR_DEBUG) { \
  39. qemu_log(": %s: ", __func__); \
  40. qemu_log(__VA_ARGS__); \
  41. } \
  42. } while (0)
  43. #define GEM_NWCTRL (0x00000000 / 4) /* Network Control reg */
  44. #define GEM_NWCFG (0x00000004 / 4) /* Network Config reg */
  45. #define GEM_NWSTATUS (0x00000008 / 4) /* Network Status reg */
  46. #define GEM_USERIO (0x0000000C / 4) /* User IO reg */
  47. #define GEM_DMACFG (0x00000010 / 4) /* DMA Control reg */
  48. #define GEM_TXSTATUS (0x00000014 / 4) /* TX Status reg */
  49. #define GEM_RXQBASE (0x00000018 / 4) /* RX Q Base address reg */
  50. #define GEM_TXQBASE (0x0000001C / 4) /* TX Q Base address reg */
  51. #define GEM_RXSTATUS (0x00000020 / 4) /* RX Status reg */
  52. #define GEM_ISR (0x00000024 / 4) /* Interrupt Status reg */
  53. #define GEM_IER (0x00000028 / 4) /* Interrupt Enable reg */
  54. #define GEM_IDR (0x0000002C / 4) /* Interrupt Disable reg */
  55. #define GEM_IMR (0x00000030 / 4) /* Interrupt Mask reg */
  56. #define GEM_PHYMNTNC (0x00000034 / 4) /* Phy Maintenance reg */
  57. #define GEM_RXPAUSE (0x00000038 / 4) /* RX Pause Time reg */
  58. #define GEM_TXPAUSE (0x0000003C / 4) /* TX Pause Time reg */
  59. #define GEM_TXPARTIALSF (0x00000040 / 4) /* TX Partial Store and Forward */
  60. #define GEM_RXPARTIALSF (0x00000044 / 4) /* RX Partial Store and Forward */
  61. #define GEM_JUMBO_MAX_LEN (0x00000048 / 4) /* Max Jumbo Frame Size */
  62. #define GEM_HASHLO (0x00000080 / 4) /* Hash Low address reg */
  63. #define GEM_HASHHI (0x00000084 / 4) /* Hash High address reg */
  64. #define GEM_SPADDR1LO (0x00000088 / 4) /* Specific addr 1 low reg */
  65. #define GEM_SPADDR1HI (0x0000008C / 4) /* Specific addr 1 high reg */
  66. #define GEM_SPADDR2LO (0x00000090 / 4) /* Specific addr 2 low reg */
  67. #define GEM_SPADDR2HI (0x00000094 / 4) /* Specific addr 2 high reg */
  68. #define GEM_SPADDR3LO (0x00000098 / 4) /* Specific addr 3 low reg */
  69. #define GEM_SPADDR3HI (0x0000009C / 4) /* Specific addr 3 high reg */
  70. #define GEM_SPADDR4LO (0x000000A0 / 4) /* Specific addr 4 low reg */
  71. #define GEM_SPADDR4HI (0x000000A4 / 4) /* Specific addr 4 high reg */
  72. #define GEM_TIDMATCH1 (0x000000A8 / 4) /* Type ID1 Match reg */
  73. #define GEM_TIDMATCH2 (0x000000AC / 4) /* Type ID2 Match reg */
  74. #define GEM_TIDMATCH3 (0x000000B0 / 4) /* Type ID3 Match reg */
  75. #define GEM_TIDMATCH4 (0x000000B4 / 4) /* Type ID4 Match reg */
  76. #define GEM_WOLAN (0x000000B8 / 4) /* Wake on LAN reg */
  77. #define GEM_IPGSTRETCH (0x000000BC / 4) /* IPG Stretch reg */
  78. #define GEM_SVLAN (0x000000C0 / 4) /* Stacked VLAN reg */
  79. #define GEM_MODID (0x000000FC / 4) /* Module ID reg */
  80. #define GEM_OCTTXLO (0x00000100 / 4) /* Octects transmitted Low reg */
  81. #define GEM_OCTTXHI (0x00000104 / 4) /* Octects transmitted High reg */
  82. #define GEM_TXCNT (0x00000108 / 4) /* Error-free Frames transmitted */
  83. #define GEM_TXBCNT (0x0000010C / 4) /* Error-free Broadcast Frames */
  84. #define GEM_TXMCNT (0x00000110 / 4) /* Error-free Multicast Frame */
  85. #define GEM_TXPAUSECNT (0x00000114 / 4) /* Pause Frames Transmitted */
  86. #define GEM_TX64CNT (0x00000118 / 4) /* Error-free 64 TX */
  87. #define GEM_TX65CNT (0x0000011C / 4) /* Error-free 65-127 TX */
  88. #define GEM_TX128CNT (0x00000120 / 4) /* Error-free 128-255 TX */
  89. #define GEM_TX256CNT (0x00000124 / 4) /* Error-free 256-511 */
  90. #define GEM_TX512CNT (0x00000128 / 4) /* Error-free 512-1023 TX */
  91. #define GEM_TX1024CNT (0x0000012C / 4) /* Error-free 1024-1518 TX */
  92. #define GEM_TX1519CNT (0x00000130 / 4) /* Error-free larger than 1519 TX */
  93. #define GEM_TXURUNCNT (0x00000134 / 4) /* TX under run error counter */
  94. #define GEM_SINGLECOLLCNT (0x00000138 / 4) /* Single Collision Frames */
  95. #define GEM_MULTCOLLCNT (0x0000013C / 4) /* Multiple Collision Frames */
  96. #define GEM_EXCESSCOLLCNT (0x00000140 / 4) /* Excessive Collision Frames */
  97. #define GEM_LATECOLLCNT (0x00000144 / 4) /* Late Collision Frames */
  98. #define GEM_DEFERTXCNT (0x00000148 / 4) /* Deferred Transmission Frames */
  99. #define GEM_CSENSECNT (0x0000014C / 4) /* Carrier Sense Error Counter */
  100. #define GEM_OCTRXLO (0x00000150 / 4) /* Octects Received register Low */
  101. #define GEM_OCTRXHI (0x00000154 / 4) /* Octects Received register High */
  102. #define GEM_RXCNT (0x00000158 / 4) /* Error-free Frames Received */
  103. #define GEM_RXBROADCNT (0x0000015C / 4) /* Error-free Broadcast Frames RX */
  104. #define GEM_RXMULTICNT (0x00000160 / 4) /* Error-free Multicast Frames RX */
  105. #define GEM_RXPAUSECNT (0x00000164 / 4) /* Pause Frames Received Counter */
  106. #define GEM_RX64CNT (0x00000168 / 4) /* Error-free 64 byte Frames RX */
  107. #define GEM_RX65CNT (0x0000016C / 4) /* Error-free 65-127B Frames RX */
  108. #define GEM_RX128CNT (0x00000170 / 4) /* Error-free 128-255B Frames RX */
  109. #define GEM_RX256CNT (0x00000174 / 4) /* Error-free 256-512B Frames RX */
  110. #define GEM_RX512CNT (0x00000178 / 4) /* Error-free 512-1023B Frames RX */
  111. #define GEM_RX1024CNT (0x0000017C / 4) /* Error-free 1024-1518B Frames RX */
  112. #define GEM_RX1519CNT (0x00000180 / 4) /* Error-free 1519-max Frames RX */
  113. #define GEM_RXUNDERCNT (0x00000184 / 4) /* Undersize Frames Received */
  114. #define GEM_RXOVERCNT (0x00000188 / 4) /* Oversize Frames Received */
  115. #define GEM_RXJABCNT (0x0000018C / 4) /* Jabbers Received Counter */
  116. #define GEM_RXFCSCNT (0x00000190 / 4) /* Frame Check seq. Error Counter */
  117. #define GEM_RXLENERRCNT (0x00000194 / 4) /* Length Field Error Counter */
  118. #define GEM_RXSYMERRCNT (0x00000198 / 4) /* Symbol Error Counter */
  119. #define GEM_RXALIGNERRCNT (0x0000019C / 4) /* Alignment Error Counter */
  120. #define GEM_RXRSCERRCNT (0x000001A0 / 4) /* Receive Resource Error Counter */
  121. #define GEM_RXORUNCNT (0x000001A4 / 4) /* Receive Overrun Counter */
  122. #define GEM_RXIPCSERRCNT (0x000001A8 / 4) /* IP header Checksum Err Counter */
  123. #define GEM_RXTCPCCNT (0x000001AC / 4) /* TCP Checksum Error Counter */
  124. #define GEM_RXUDPCCNT (0x000001B0 / 4) /* UDP Checksum Error Counter */
  125. #define GEM_1588S (0x000001D0 / 4) /* 1588 Timer Seconds */
  126. #define GEM_1588NS (0x000001D4 / 4) /* 1588 Timer Nanoseconds */
  127. #define GEM_1588ADJ (0x000001D8 / 4) /* 1588 Timer Adjust */
  128. #define GEM_1588INC (0x000001DC / 4) /* 1588 Timer Increment */
  129. #define GEM_PTPETXS (0x000001E0 / 4) /* PTP Event Frame Transmitted (s) */
  130. #define GEM_PTPETXNS (0x000001E4 / 4) /*
  131. * PTP Event Frame Transmitted (ns)
  132. */
  133. #define GEM_PTPERXS (0x000001E8 / 4) /* PTP Event Frame Received (s) */
  134. #define GEM_PTPERXNS (0x000001EC / 4) /* PTP Event Frame Received (ns) */
  135. #define GEM_PTPPTXS (0x000001E0 / 4) /* PTP Peer Frame Transmitted (s) */
  136. #define GEM_PTPPTXNS (0x000001E4 / 4) /* PTP Peer Frame Transmitted (ns) */
  137. #define GEM_PTPPRXS (0x000001E8 / 4) /* PTP Peer Frame Received (s) */
  138. #define GEM_PTPPRXNS (0x000001EC / 4) /* PTP Peer Frame Received (ns) */
  139. /* Design Configuration Registers */
  140. #define GEM_DESCONF (0x00000280 / 4)
  141. #define GEM_DESCONF2 (0x00000284 / 4)
  142. #define GEM_DESCONF3 (0x00000288 / 4)
  143. #define GEM_DESCONF4 (0x0000028C / 4)
  144. #define GEM_DESCONF5 (0x00000290 / 4)
  145. #define GEM_DESCONF6 (0x00000294 / 4)
  146. #define GEM_DESCONF6_64B_MASK (1U << 23)
  147. #define GEM_DESCONF7 (0x00000298 / 4)
  148. #define GEM_INT_Q1_STATUS (0x00000400 / 4)
  149. #define GEM_INT_Q1_MASK (0x00000640 / 4)
  150. #define GEM_TRANSMIT_Q1_PTR (0x00000440 / 4)
  151. #define GEM_TRANSMIT_Q7_PTR (GEM_TRANSMIT_Q1_PTR + 6)
  152. #define GEM_RECEIVE_Q1_PTR (0x00000480 / 4)
  153. #define GEM_RECEIVE_Q7_PTR (GEM_RECEIVE_Q1_PTR + 6)
  154. #define GEM_TBQPH (0x000004C8 / 4)
  155. #define GEM_RBQPH (0x000004D4 / 4)
  156. #define GEM_INT_Q1_ENABLE (0x00000600 / 4)
  157. #define GEM_INT_Q7_ENABLE (GEM_INT_Q1_ENABLE + 6)
  158. #define GEM_INT_Q1_DISABLE (0x00000620 / 4)
  159. #define GEM_INT_Q7_DISABLE (GEM_INT_Q1_DISABLE + 6)
  160. #define GEM_INT_Q1_MASK (0x00000640 / 4)
  161. #define GEM_INT_Q7_MASK (GEM_INT_Q1_MASK + 6)
  162. #define GEM_SCREENING_TYPE1_REGISTER_0 (0x00000500 / 4)
  163. #define GEM_ST1R_UDP_PORT_MATCH_ENABLE (1 << 29)
  164. #define GEM_ST1R_DSTC_ENABLE (1 << 28)
  165. #define GEM_ST1R_UDP_PORT_MATCH_SHIFT (12)
  166. #define GEM_ST1R_UDP_PORT_MATCH_WIDTH (27 - GEM_ST1R_UDP_PORT_MATCH_SHIFT + 1)
  167. #define GEM_ST1R_DSTC_MATCH_SHIFT (4)
  168. #define GEM_ST1R_DSTC_MATCH_WIDTH (11 - GEM_ST1R_DSTC_MATCH_SHIFT + 1)
  169. #define GEM_ST1R_QUEUE_SHIFT (0)
  170. #define GEM_ST1R_QUEUE_WIDTH (3 - GEM_ST1R_QUEUE_SHIFT + 1)
  171. #define GEM_SCREENING_TYPE2_REGISTER_0 (0x00000540 / 4)
  172. #define GEM_ST2R_COMPARE_A_ENABLE (1 << 18)
  173. #define GEM_ST2R_COMPARE_A_SHIFT (13)
  174. #define GEM_ST2R_COMPARE_WIDTH (17 - GEM_ST2R_COMPARE_A_SHIFT + 1)
  175. #define GEM_ST2R_ETHERTYPE_ENABLE (1 << 12)
  176. #define GEM_ST2R_ETHERTYPE_INDEX_SHIFT (9)
  177. #define GEM_ST2R_ETHERTYPE_INDEX_WIDTH (11 - GEM_ST2R_ETHERTYPE_INDEX_SHIFT \
  178. + 1)
  179. #define GEM_ST2R_QUEUE_SHIFT (0)
  180. #define GEM_ST2R_QUEUE_WIDTH (3 - GEM_ST2R_QUEUE_SHIFT + 1)
  181. #define GEM_SCREENING_TYPE2_ETHERTYPE_REG_0 (0x000006e0 / 4)
  182. #define GEM_TYPE2_COMPARE_0_WORD_0 (0x00000700 / 4)
  183. #define GEM_T2CW1_COMPARE_OFFSET_SHIFT (7)
  184. #define GEM_T2CW1_COMPARE_OFFSET_WIDTH (8 - GEM_T2CW1_COMPARE_OFFSET_SHIFT + 1)
  185. #define GEM_T2CW1_OFFSET_VALUE_SHIFT (0)
  186. #define GEM_T2CW1_OFFSET_VALUE_WIDTH (6 - GEM_T2CW1_OFFSET_VALUE_SHIFT + 1)
  187. /*****************************************/
  188. #define GEM_NWCTRL_TXSTART 0x00000200 /* Transmit Enable */
  189. #define GEM_NWCTRL_TXENA 0x00000008 /* Transmit Enable */
  190. #define GEM_NWCTRL_RXENA 0x00000004 /* Receive Enable */
  191. #define GEM_NWCTRL_LOCALLOOP 0x00000002 /* Local Loopback */
  192. #define GEM_NWCFG_STRIP_FCS 0x00020000 /* Strip FCS field */
  193. #define GEM_NWCFG_LERR_DISC 0x00010000 /* Discard RX frames with len err */
  194. #define GEM_NWCFG_BUFF_OFST_M 0x0000C000 /* Receive buffer offset mask */
  195. #define GEM_NWCFG_BUFF_OFST_S 14 /* Receive buffer offset shift */
  196. #define GEM_NWCFG_RCV_1538 0x00000100 /* Receive 1538 bytes frame */
  197. #define GEM_NWCFG_UCAST_HASH 0x00000080 /* accept unicast if hash match */
  198. #define GEM_NWCFG_MCAST_HASH 0x00000040 /* accept multicast if hash match */
  199. #define GEM_NWCFG_BCAST_REJ 0x00000020 /* Reject broadcast packets */
  200. #define GEM_NWCFG_PROMISC 0x00000010 /* Accept all packets */
  201. #define GEM_NWCFG_JUMBO_FRAME 0x00000008 /* Jumbo Frames enable */
  202. #define GEM_DMACFG_ADDR_64B (1U << 30)
  203. #define GEM_DMACFG_TX_BD_EXT (1U << 29)
  204. #define GEM_DMACFG_RX_BD_EXT (1U << 28)
  205. #define GEM_DMACFG_RBUFSZ_M 0x00FF0000 /* DMA RX Buffer Size mask */
  206. #define GEM_DMACFG_RBUFSZ_S 16 /* DMA RX Buffer Size shift */
  207. #define GEM_DMACFG_RBUFSZ_MUL 64 /* DMA RX Buffer Size multiplier */
  208. #define GEM_DMACFG_TXCSUM_OFFL 0x00000800 /* Transmit checksum offload */
  209. #define GEM_TXSTATUS_TXCMPL 0x00000020 /* Transmit Complete */
  210. #define GEM_TXSTATUS_USED 0x00000001 /* sw owned descriptor encountered */
  211. #define GEM_RXSTATUS_FRMRCVD 0x00000002 /* Frame received */
  212. #define GEM_RXSTATUS_NOBUF 0x00000001 /* Buffer unavailable */
  213. /* GEM_ISR GEM_IER GEM_IDR GEM_IMR */
  214. #define GEM_INT_TXCMPL 0x00000080 /* Transmit Complete */
  215. #define GEM_INT_AMBA_ERR 0x00000040
  216. #define GEM_INT_TXUSED 0x00000008
  217. #define GEM_INT_RXUSED 0x00000004
  218. #define GEM_INT_RXCMPL 0x00000002
  219. #define GEM_PHYMNTNC_OP_R 0x20000000 /* read operation */
  220. #define GEM_PHYMNTNC_OP_W 0x10000000 /* write operation */
  221. #define GEM_PHYMNTNC_ADDR 0x0F800000 /* Address bits */
  222. #define GEM_PHYMNTNC_ADDR_SHFT 23
  223. #define GEM_PHYMNTNC_REG 0x007C0000 /* register bits */
  224. #define GEM_PHYMNTNC_REG_SHIFT 18
  225. /* Marvell PHY definitions */
  226. #define BOARD_PHY_ADDRESS 0 /* PHY address we will emulate a device at */
  227. #define PHY_REG_CONTROL 0
  228. #define PHY_REG_STATUS 1
  229. #define PHY_REG_PHYID1 2
  230. #define PHY_REG_PHYID2 3
  231. #define PHY_REG_ANEGADV 4
  232. #define PHY_REG_LINKPABIL 5
  233. #define PHY_REG_ANEGEXP 6
  234. #define PHY_REG_NEXTP 7
  235. #define PHY_REG_LINKPNEXTP 8
  236. #define PHY_REG_100BTCTRL 9
  237. #define PHY_REG_1000BTSTAT 10
  238. #define PHY_REG_EXTSTAT 15
  239. #define PHY_REG_PHYSPCFC_CTL 16
  240. #define PHY_REG_PHYSPCFC_ST 17
  241. #define PHY_REG_INT_EN 18
  242. #define PHY_REG_INT_ST 19
  243. #define PHY_REG_EXT_PHYSPCFC_CTL 20
  244. #define PHY_REG_RXERR 21
  245. #define PHY_REG_EACD 22
  246. #define PHY_REG_LED 24
  247. #define PHY_REG_LED_OVRD 25
  248. #define PHY_REG_EXT_PHYSPCFC_CTL2 26
  249. #define PHY_REG_EXT_PHYSPCFC_ST 27
  250. #define PHY_REG_CABLE_DIAG 28
  251. #define PHY_REG_CONTROL_RST 0x8000
  252. #define PHY_REG_CONTROL_LOOP 0x4000
  253. #define PHY_REG_CONTROL_ANEG 0x1000
  254. #define PHY_REG_CONTROL_ANRESTART 0x0200
  255. #define PHY_REG_STATUS_LINK 0x0004
  256. #define PHY_REG_STATUS_ANEGCMPL 0x0020
  257. #define PHY_REG_INT_ST_ANEGCMPL 0x0800
  258. #define PHY_REG_INT_ST_LINKC 0x0400
  259. #define PHY_REG_INT_ST_ENERGY 0x0010
  260. /***********************************************************************/
  261. #define GEM_RX_REJECT (-1)
  262. #define GEM_RX_PROMISCUOUS_ACCEPT (-2)
  263. #define GEM_RX_BROADCAST_ACCEPT (-3)
  264. #define GEM_RX_MULTICAST_HASH_ACCEPT (-4)
  265. #define GEM_RX_UNICAST_HASH_ACCEPT (-5)
  266. #define GEM_RX_SAR_ACCEPT 0
  267. /***********************************************************************/
  268. #define DESC_1_USED 0x80000000
  269. #define DESC_1_LENGTH 0x00001FFF
  270. #define DESC_1_TX_WRAP 0x40000000
  271. #define DESC_1_TX_LAST 0x00008000
  272. #define DESC_0_RX_WRAP 0x00000002
  273. #define DESC_0_RX_OWNERSHIP 0x00000001
  274. #define R_DESC_1_RX_SAR_SHIFT 25
  275. #define R_DESC_1_RX_SAR_LENGTH 2
  276. #define R_DESC_1_RX_SAR_MATCH (1 << 27)
  277. #define R_DESC_1_RX_UNICAST_HASH (1 << 29)
  278. #define R_DESC_1_RX_MULTICAST_HASH (1 << 30)
  279. #define R_DESC_1_RX_BROADCAST (1 << 31)
  280. #define DESC_1_RX_SOF 0x00004000
  281. #define DESC_1_RX_EOF 0x00008000
  282. #define GEM_MODID_VALUE 0x00020118
  283. static inline uint64_t tx_desc_get_buffer(CadenceGEMState *s, uint32_t *desc)
  284. {
  285. uint64_t ret = desc[0];
  286. if (s->regs[GEM_DMACFG] & GEM_DMACFG_ADDR_64B) {
  287. ret |= (uint64_t)desc[2] << 32;
  288. }
  289. return ret;
  290. }
  291. static inline unsigned tx_desc_get_used(uint32_t *desc)
  292. {
  293. return (desc[1] & DESC_1_USED) ? 1 : 0;
  294. }
  295. static inline void tx_desc_set_used(uint32_t *desc)
  296. {
  297. desc[1] |= DESC_1_USED;
  298. }
  299. static inline unsigned tx_desc_get_wrap(uint32_t *desc)
  300. {
  301. return (desc[1] & DESC_1_TX_WRAP) ? 1 : 0;
  302. }
  303. static inline unsigned tx_desc_get_last(uint32_t *desc)
  304. {
  305. return (desc[1] & DESC_1_TX_LAST) ? 1 : 0;
  306. }
  307. static inline unsigned tx_desc_get_length(uint32_t *desc)
  308. {
  309. return desc[1] & DESC_1_LENGTH;
  310. }
  311. static inline void print_gem_tx_desc(uint32_t *desc, uint8_t queue)
  312. {
  313. DB_PRINT("TXDESC (queue %" PRId8 "):\n", queue);
  314. DB_PRINT("bufaddr: 0x%08x\n", *desc);
  315. DB_PRINT("used_hw: %d\n", tx_desc_get_used(desc));
  316. DB_PRINT("wrap: %d\n", tx_desc_get_wrap(desc));
  317. DB_PRINT("last: %d\n", tx_desc_get_last(desc));
  318. DB_PRINT("length: %d\n", tx_desc_get_length(desc));
  319. }
  320. static inline uint64_t rx_desc_get_buffer(CadenceGEMState *s, uint32_t *desc)
  321. {
  322. uint64_t ret = desc[0] & ~0x3UL;
  323. if (s->regs[GEM_DMACFG] & GEM_DMACFG_ADDR_64B) {
  324. ret |= (uint64_t)desc[2] << 32;
  325. }
  326. return ret;
  327. }
  328. static inline int gem_get_desc_len(CadenceGEMState *s, bool rx_n_tx)
  329. {
  330. int ret = 2;
  331. if (s->regs[GEM_DMACFG] & GEM_DMACFG_ADDR_64B) {
  332. ret += 2;
  333. }
  334. if (s->regs[GEM_DMACFG] & (rx_n_tx ? GEM_DMACFG_RX_BD_EXT
  335. : GEM_DMACFG_TX_BD_EXT)) {
  336. ret += 2;
  337. }
  338. assert(ret <= DESC_MAX_NUM_WORDS);
  339. return ret;
  340. }
  341. static inline unsigned rx_desc_get_wrap(uint32_t *desc)
  342. {
  343. return desc[0] & DESC_0_RX_WRAP ? 1 : 0;
  344. }
  345. static inline unsigned rx_desc_get_ownership(uint32_t *desc)
  346. {
  347. return desc[0] & DESC_0_RX_OWNERSHIP ? 1 : 0;
  348. }
  349. static inline void rx_desc_set_ownership(uint32_t *desc)
  350. {
  351. desc[0] |= DESC_0_RX_OWNERSHIP;
  352. }
  353. static inline void rx_desc_set_sof(uint32_t *desc)
  354. {
  355. desc[1] |= DESC_1_RX_SOF;
  356. }
  357. static inline void rx_desc_clear_control(uint32_t *desc)
  358. {
  359. desc[1] = 0;
  360. }
  361. static inline void rx_desc_set_eof(uint32_t *desc)
  362. {
  363. desc[1] |= DESC_1_RX_EOF;
  364. }
  365. static inline void rx_desc_set_length(uint32_t *desc, unsigned len)
  366. {
  367. desc[1] &= ~DESC_1_LENGTH;
  368. desc[1] |= len;
  369. }
  370. static inline void rx_desc_set_broadcast(uint32_t *desc)
  371. {
  372. desc[1] |= R_DESC_1_RX_BROADCAST;
  373. }
  374. static inline void rx_desc_set_unicast_hash(uint32_t *desc)
  375. {
  376. desc[1] |= R_DESC_1_RX_UNICAST_HASH;
  377. }
  378. static inline void rx_desc_set_multicast_hash(uint32_t *desc)
  379. {
  380. desc[1] |= R_DESC_1_RX_MULTICAST_HASH;
  381. }
  382. static inline void rx_desc_set_sar(uint32_t *desc, int sar_idx)
  383. {
  384. desc[1] = deposit32(desc[1], R_DESC_1_RX_SAR_SHIFT, R_DESC_1_RX_SAR_LENGTH,
  385. sar_idx);
  386. desc[1] |= R_DESC_1_RX_SAR_MATCH;
  387. }
  388. /* The broadcast MAC address: 0xFFFFFFFFFFFF */
  389. static const uint8_t broadcast_addr[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
  390. static uint32_t gem_get_max_buf_len(CadenceGEMState *s, bool tx)
  391. {
  392. uint32_t size;
  393. if (s->regs[GEM_NWCFG] & GEM_NWCFG_JUMBO_FRAME) {
  394. size = s->regs[GEM_JUMBO_MAX_LEN];
  395. if (size > s->jumbo_max_len) {
  396. size = s->jumbo_max_len;
  397. qemu_log_mask(LOG_GUEST_ERROR, "GEM_JUMBO_MAX_LEN reg cannot be"
  398. " greater than 0x%" PRIx32 "\n", s->jumbo_max_len);
  399. }
  400. } else if (tx) {
  401. size = 1518;
  402. } else {
  403. size = s->regs[GEM_NWCFG] & GEM_NWCFG_RCV_1538 ? 1538 : 1518;
  404. }
  405. return size;
  406. }
  407. static void gem_set_isr(CadenceGEMState *s, int q, uint32_t flag)
  408. {
  409. if (q == 0) {
  410. s->regs[GEM_ISR] |= flag & ~(s->regs[GEM_IMR]);
  411. } else {
  412. s->regs[GEM_INT_Q1_STATUS + q - 1] |= flag &
  413. ~(s->regs[GEM_INT_Q1_MASK + q - 1]);
  414. }
  415. }
  416. /*
  417. * gem_init_register_masks:
  418. * One time initialization.
  419. * Set masks to identify which register bits have magical clear properties
  420. */
  421. static void gem_init_register_masks(CadenceGEMState *s)
  422. {
  423. unsigned int i;
  424. /* Mask of register bits which are read only */
  425. memset(&s->regs_ro[0], 0, sizeof(s->regs_ro));
  426. s->regs_ro[GEM_NWCTRL] = 0xFFF80000;
  427. s->regs_ro[GEM_NWSTATUS] = 0xFFFFFFFF;
  428. s->regs_ro[GEM_DMACFG] = 0x8E00F000;
  429. s->regs_ro[GEM_TXSTATUS] = 0xFFFFFE08;
  430. s->regs_ro[GEM_RXQBASE] = 0x00000003;
  431. s->regs_ro[GEM_TXQBASE] = 0x00000003;
  432. s->regs_ro[GEM_RXSTATUS] = 0xFFFFFFF0;
  433. s->regs_ro[GEM_ISR] = 0xFFFFFFFF;
  434. s->regs_ro[GEM_IMR] = 0xFFFFFFFF;
  435. s->regs_ro[GEM_MODID] = 0xFFFFFFFF;
  436. for (i = 0; i < s->num_priority_queues; i++) {
  437. s->regs_ro[GEM_INT_Q1_STATUS + i] = 0xFFFFFFFF;
  438. s->regs_ro[GEM_INT_Q1_ENABLE + i] = 0xFFFFF319;
  439. s->regs_ro[GEM_INT_Q1_DISABLE + i] = 0xFFFFF319;
  440. s->regs_ro[GEM_INT_Q1_MASK + i] = 0xFFFFFFFF;
  441. }
  442. /* Mask of register bits which are clear on read */
  443. memset(&s->regs_rtc[0], 0, sizeof(s->regs_rtc));
  444. s->regs_rtc[GEM_ISR] = 0xFFFFFFFF;
  445. for (i = 0; i < s->num_priority_queues; i++) {
  446. s->regs_rtc[GEM_INT_Q1_STATUS + i] = 0x00000CE6;
  447. }
  448. /* Mask of register bits which are write 1 to clear */
  449. memset(&s->regs_w1c[0], 0, sizeof(s->regs_w1c));
  450. s->regs_w1c[GEM_TXSTATUS] = 0x000001F7;
  451. s->regs_w1c[GEM_RXSTATUS] = 0x0000000F;
  452. /* Mask of register bits which are write only */
  453. memset(&s->regs_wo[0], 0, sizeof(s->regs_wo));
  454. s->regs_wo[GEM_NWCTRL] = 0x00073E60;
  455. s->regs_wo[GEM_IER] = 0x07FFFFFF;
  456. s->regs_wo[GEM_IDR] = 0x07FFFFFF;
  457. for (i = 0; i < s->num_priority_queues; i++) {
  458. s->regs_wo[GEM_INT_Q1_ENABLE + i] = 0x00000CE6;
  459. s->regs_wo[GEM_INT_Q1_DISABLE + i] = 0x00000CE6;
  460. }
  461. }
  462. /*
  463. * phy_update_link:
  464. * Make the emulated PHY link state match the QEMU "interface" state.
  465. */
  466. static void phy_update_link(CadenceGEMState *s)
  467. {
  468. DB_PRINT("down %d\n", qemu_get_queue(s->nic)->link_down);
  469. /* Autonegotiation status mirrors link status. */
  470. if (qemu_get_queue(s->nic)->link_down) {
  471. s->phy_regs[PHY_REG_STATUS] &= ~(PHY_REG_STATUS_ANEGCMPL |
  472. PHY_REG_STATUS_LINK);
  473. s->phy_regs[PHY_REG_INT_ST] |= PHY_REG_INT_ST_LINKC;
  474. } else {
  475. s->phy_regs[PHY_REG_STATUS] |= (PHY_REG_STATUS_ANEGCMPL |
  476. PHY_REG_STATUS_LINK);
  477. s->phy_regs[PHY_REG_INT_ST] |= (PHY_REG_INT_ST_LINKC |
  478. PHY_REG_INT_ST_ANEGCMPL |
  479. PHY_REG_INT_ST_ENERGY);
  480. }
  481. }
  482. static bool gem_can_receive(NetClientState *nc)
  483. {
  484. CadenceGEMState *s;
  485. int i;
  486. s = qemu_get_nic_opaque(nc);
  487. /* Do nothing if receive is not enabled. */
  488. if (!(s->regs[GEM_NWCTRL] & GEM_NWCTRL_RXENA)) {
  489. if (s->can_rx_state != 1) {
  490. s->can_rx_state = 1;
  491. DB_PRINT("can't receive - no enable\n");
  492. }
  493. return false;
  494. }
  495. for (i = 0; i < s->num_priority_queues; i++) {
  496. if (rx_desc_get_ownership(s->rx_desc[i]) != 1) {
  497. break;
  498. }
  499. };
  500. if (i == s->num_priority_queues) {
  501. if (s->can_rx_state != 2) {
  502. s->can_rx_state = 2;
  503. DB_PRINT("can't receive - all the buffer descriptors are busy\n");
  504. }
  505. return false;
  506. }
  507. if (s->can_rx_state != 0) {
  508. s->can_rx_state = 0;
  509. DB_PRINT("can receive\n");
  510. }
  511. return true;
  512. }
  513. /*
  514. * gem_update_int_status:
  515. * Raise or lower interrupt based on current status.
  516. */
  517. static void gem_update_int_status(CadenceGEMState *s)
  518. {
  519. int i;
  520. qemu_set_irq(s->irq[0], !!s->regs[GEM_ISR]);
  521. for (i = 1; i < s->num_priority_queues; ++i) {
  522. qemu_set_irq(s->irq[i], !!s->regs[GEM_INT_Q1_STATUS + i - 1]);
  523. }
  524. }
  525. /*
  526. * gem_receive_updatestats:
  527. * Increment receive statistics.
  528. */
  529. static void gem_receive_updatestats(CadenceGEMState *s, const uint8_t *packet,
  530. unsigned bytes)
  531. {
  532. uint64_t octets;
  533. /* Total octets (bytes) received */
  534. octets = ((uint64_t)(s->regs[GEM_OCTRXLO]) << 32) |
  535. s->regs[GEM_OCTRXHI];
  536. octets += bytes;
  537. s->regs[GEM_OCTRXLO] = octets >> 32;
  538. s->regs[GEM_OCTRXHI] = octets;
  539. /* Error-free Frames received */
  540. s->regs[GEM_RXCNT]++;
  541. /* Error-free Broadcast Frames counter */
  542. if (!memcmp(packet, broadcast_addr, 6)) {
  543. s->regs[GEM_RXBROADCNT]++;
  544. }
  545. /* Error-free Multicast Frames counter */
  546. if (packet[0] == 0x01) {
  547. s->regs[GEM_RXMULTICNT]++;
  548. }
  549. if (bytes <= 64) {
  550. s->regs[GEM_RX64CNT]++;
  551. } else if (bytes <= 127) {
  552. s->regs[GEM_RX65CNT]++;
  553. } else if (bytes <= 255) {
  554. s->regs[GEM_RX128CNT]++;
  555. } else if (bytes <= 511) {
  556. s->regs[GEM_RX256CNT]++;
  557. } else if (bytes <= 1023) {
  558. s->regs[GEM_RX512CNT]++;
  559. } else if (bytes <= 1518) {
  560. s->regs[GEM_RX1024CNT]++;
  561. } else {
  562. s->regs[GEM_RX1519CNT]++;
  563. }
  564. }
  565. /*
  566. * Get the MAC Address bit from the specified position
  567. */
  568. static unsigned get_bit(const uint8_t *mac, unsigned bit)
  569. {
  570. unsigned byte;
  571. byte = mac[bit / 8];
  572. byte >>= (bit & 0x7);
  573. byte &= 1;
  574. return byte;
  575. }
  576. /*
  577. * Calculate a GEM MAC Address hash index
  578. */
  579. static unsigned calc_mac_hash(const uint8_t *mac)
  580. {
  581. int index_bit, mac_bit;
  582. unsigned hash_index;
  583. hash_index = 0;
  584. mac_bit = 5;
  585. for (index_bit = 5; index_bit >= 0; index_bit--) {
  586. hash_index |= (get_bit(mac, mac_bit) ^
  587. get_bit(mac, mac_bit + 6) ^
  588. get_bit(mac, mac_bit + 12) ^
  589. get_bit(mac, mac_bit + 18) ^
  590. get_bit(mac, mac_bit + 24) ^
  591. get_bit(mac, mac_bit + 30) ^
  592. get_bit(mac, mac_bit + 36) ^
  593. get_bit(mac, mac_bit + 42)) << index_bit;
  594. mac_bit--;
  595. }
  596. return hash_index;
  597. }
  598. /*
  599. * gem_mac_address_filter:
  600. * Accept or reject this destination address?
  601. * Returns:
  602. * GEM_RX_REJECT: reject
  603. * >= 0: Specific address accept (which matched SAR is returned)
  604. * others for various other modes of accept:
  605. * GEM_RM_PROMISCUOUS_ACCEPT, GEM_RX_BROADCAST_ACCEPT,
  606. * GEM_RX_MULTICAST_HASH_ACCEPT or GEM_RX_UNICAST_HASH_ACCEPT
  607. */
  608. static int gem_mac_address_filter(CadenceGEMState *s, const uint8_t *packet)
  609. {
  610. uint8_t *gem_spaddr;
  611. int i, is_mc;
  612. /* Promiscuous mode? */
  613. if (s->regs[GEM_NWCFG] & GEM_NWCFG_PROMISC) {
  614. return GEM_RX_PROMISCUOUS_ACCEPT;
  615. }
  616. if (!memcmp(packet, broadcast_addr, 6)) {
  617. /* Reject broadcast packets? */
  618. if (s->regs[GEM_NWCFG] & GEM_NWCFG_BCAST_REJ) {
  619. return GEM_RX_REJECT;
  620. }
  621. return GEM_RX_BROADCAST_ACCEPT;
  622. }
  623. /* Accept packets -w- hash match? */
  624. is_mc = is_multicast_ether_addr(packet);
  625. if ((is_mc && (s->regs[GEM_NWCFG] & GEM_NWCFG_MCAST_HASH)) ||
  626. (!is_mc && (s->regs[GEM_NWCFG] & GEM_NWCFG_UCAST_HASH))) {
  627. uint64_t buckets;
  628. unsigned hash_index;
  629. hash_index = calc_mac_hash(packet);
  630. buckets = ((uint64_t)s->regs[GEM_HASHHI] << 32) | s->regs[GEM_HASHLO];
  631. if ((buckets >> hash_index) & 1) {
  632. return is_mc ? GEM_RX_MULTICAST_HASH_ACCEPT
  633. : GEM_RX_UNICAST_HASH_ACCEPT;
  634. }
  635. }
  636. /* Check all 4 specific addresses */
  637. gem_spaddr = (uint8_t *)&(s->regs[GEM_SPADDR1LO]);
  638. for (i = 3; i >= 0; i--) {
  639. if (s->sar_active[i] && !memcmp(packet, gem_spaddr + 8 * i, 6)) {
  640. return GEM_RX_SAR_ACCEPT + i;
  641. }
  642. }
  643. /* No address match; reject the packet */
  644. return GEM_RX_REJECT;
  645. }
  646. /* Figure out which queue the received data should be sent to */
  647. static int get_queue_from_screen(CadenceGEMState *s, uint8_t *rxbuf_ptr,
  648. unsigned rxbufsize)
  649. {
  650. uint32_t reg;
  651. bool matched, mismatched;
  652. int i, j;
  653. for (i = 0; i < s->num_type1_screeners; i++) {
  654. reg = s->regs[GEM_SCREENING_TYPE1_REGISTER_0 + i];
  655. matched = false;
  656. mismatched = false;
  657. /* Screening is based on UDP Port */
  658. if (reg & GEM_ST1R_UDP_PORT_MATCH_ENABLE) {
  659. uint16_t udp_port = rxbuf_ptr[14 + 22] << 8 | rxbuf_ptr[14 + 23];
  660. if (udp_port == extract32(reg, GEM_ST1R_UDP_PORT_MATCH_SHIFT,
  661. GEM_ST1R_UDP_PORT_MATCH_WIDTH)) {
  662. matched = true;
  663. } else {
  664. mismatched = true;
  665. }
  666. }
  667. /* Screening is based on DS/TC */
  668. if (reg & GEM_ST1R_DSTC_ENABLE) {
  669. uint8_t dscp = rxbuf_ptr[14 + 1];
  670. if (dscp == extract32(reg, GEM_ST1R_DSTC_MATCH_SHIFT,
  671. GEM_ST1R_DSTC_MATCH_WIDTH)) {
  672. matched = true;
  673. } else {
  674. mismatched = true;
  675. }
  676. }
  677. if (matched && !mismatched) {
  678. return extract32(reg, GEM_ST1R_QUEUE_SHIFT, GEM_ST1R_QUEUE_WIDTH);
  679. }
  680. }
  681. for (i = 0; i < s->num_type2_screeners; i++) {
  682. reg = s->regs[GEM_SCREENING_TYPE2_REGISTER_0 + i];
  683. matched = false;
  684. mismatched = false;
  685. if (reg & GEM_ST2R_ETHERTYPE_ENABLE) {
  686. uint16_t type = rxbuf_ptr[12] << 8 | rxbuf_ptr[13];
  687. int et_idx = extract32(reg, GEM_ST2R_ETHERTYPE_INDEX_SHIFT,
  688. GEM_ST2R_ETHERTYPE_INDEX_WIDTH);
  689. if (et_idx > s->num_type2_screeners) {
  690. qemu_log_mask(LOG_GUEST_ERROR, "Out of range ethertype "
  691. "register index: %d\n", et_idx);
  692. }
  693. if (type == s->regs[GEM_SCREENING_TYPE2_ETHERTYPE_REG_0 +
  694. et_idx]) {
  695. matched = true;
  696. } else {
  697. mismatched = true;
  698. }
  699. }
  700. /* Compare A, B, C */
  701. for (j = 0; j < 3; j++) {
  702. uint32_t cr0, cr1, mask;
  703. uint16_t rx_cmp;
  704. int offset;
  705. int cr_idx = extract32(reg, GEM_ST2R_COMPARE_A_SHIFT + j * 6,
  706. GEM_ST2R_COMPARE_WIDTH);
  707. if (!(reg & (GEM_ST2R_COMPARE_A_ENABLE << (j * 6)))) {
  708. continue;
  709. }
  710. if (cr_idx > s->num_type2_screeners) {
  711. qemu_log_mask(LOG_GUEST_ERROR, "Out of range compare "
  712. "register index: %d\n", cr_idx);
  713. }
  714. cr0 = s->regs[GEM_TYPE2_COMPARE_0_WORD_0 + cr_idx * 2];
  715. cr1 = s->regs[GEM_TYPE2_COMPARE_0_WORD_0 + cr_idx * 2 + 1];
  716. offset = extract32(cr1, GEM_T2CW1_OFFSET_VALUE_SHIFT,
  717. GEM_T2CW1_OFFSET_VALUE_WIDTH);
  718. switch (extract32(cr1, GEM_T2CW1_COMPARE_OFFSET_SHIFT,
  719. GEM_T2CW1_COMPARE_OFFSET_WIDTH)) {
  720. case 3: /* Skip UDP header */
  721. qemu_log_mask(LOG_UNIMP, "TCP compare offsets"
  722. "unimplemented - assuming UDP\n");
  723. offset += 8;
  724. /* Fallthrough */
  725. case 2: /* skip the IP header */
  726. offset += 20;
  727. /* Fallthrough */
  728. case 1: /* Count from after the ethertype */
  729. offset += 14;
  730. break;
  731. case 0:
  732. /* Offset from start of frame */
  733. break;
  734. }
  735. rx_cmp = rxbuf_ptr[offset] << 8 | rxbuf_ptr[offset];
  736. mask = extract32(cr0, 0, 16);
  737. if ((rx_cmp & mask) == (extract32(cr0, 16, 16) & mask)) {
  738. matched = true;
  739. } else {
  740. mismatched = true;
  741. }
  742. }
  743. if (matched && !mismatched) {
  744. return extract32(reg, GEM_ST2R_QUEUE_SHIFT, GEM_ST2R_QUEUE_WIDTH);
  745. }
  746. }
  747. /* We made it here, assume it's queue 0 */
  748. return 0;
  749. }
  750. static uint32_t gem_get_queue_base_addr(CadenceGEMState *s, bool tx, int q)
  751. {
  752. uint32_t base_addr = 0;
  753. switch (q) {
  754. case 0:
  755. base_addr = s->regs[tx ? GEM_TXQBASE : GEM_RXQBASE];
  756. break;
  757. case 1 ... (MAX_PRIORITY_QUEUES - 1):
  758. base_addr = s->regs[(tx ? GEM_TRANSMIT_Q1_PTR :
  759. GEM_RECEIVE_Q1_PTR) + q - 1];
  760. break;
  761. default:
  762. g_assert_not_reached();
  763. };
  764. return base_addr;
  765. }
  766. static inline uint32_t gem_get_tx_queue_base_addr(CadenceGEMState *s, int q)
  767. {
  768. return gem_get_queue_base_addr(s, true, q);
  769. }
  770. static inline uint32_t gem_get_rx_queue_base_addr(CadenceGEMState *s, int q)
  771. {
  772. return gem_get_queue_base_addr(s, false, q);
  773. }
  774. static hwaddr gem_get_desc_addr(CadenceGEMState *s, bool tx, int q)
  775. {
  776. hwaddr desc_addr = 0;
  777. if (s->regs[GEM_DMACFG] & GEM_DMACFG_ADDR_64B) {
  778. desc_addr = s->regs[tx ? GEM_TBQPH : GEM_RBQPH];
  779. }
  780. desc_addr <<= 32;
  781. desc_addr |= tx ? s->tx_desc_addr[q] : s->rx_desc_addr[q];
  782. return desc_addr;
  783. }
  784. static hwaddr gem_get_tx_desc_addr(CadenceGEMState *s, int q)
  785. {
  786. return gem_get_desc_addr(s, true, q);
  787. }
  788. static hwaddr gem_get_rx_desc_addr(CadenceGEMState *s, int q)
  789. {
  790. return gem_get_desc_addr(s, false, q);
  791. }
  792. static void gem_get_rx_desc(CadenceGEMState *s, int q)
  793. {
  794. hwaddr desc_addr = gem_get_rx_desc_addr(s, q);
  795. DB_PRINT("read descriptor 0x%" HWADDR_PRIx "\n", desc_addr);
  796. /* read current descriptor */
  797. address_space_read(&s->dma_as, desc_addr, MEMTXATTRS_UNSPECIFIED,
  798. s->rx_desc[q],
  799. sizeof(uint32_t) * gem_get_desc_len(s, true));
  800. /* Descriptor owned by software ? */
  801. if (rx_desc_get_ownership(s->rx_desc[q]) == 1) {
  802. DB_PRINT("descriptor 0x%" HWADDR_PRIx " owned by sw.\n", desc_addr);
  803. s->regs[GEM_RXSTATUS] |= GEM_RXSTATUS_NOBUF;
  804. gem_set_isr(s, q, GEM_INT_RXUSED);
  805. /* Handle interrupt consequences */
  806. gem_update_int_status(s);
  807. }
  808. }
  809. /*
  810. * gem_receive:
  811. * Fit a packet handed to us by QEMU into the receive descriptor ring.
  812. */
  813. static ssize_t gem_receive(NetClientState *nc, const uint8_t *buf, size_t size)
  814. {
  815. CadenceGEMState *s = qemu_get_nic_opaque(nc);
  816. unsigned rxbufsize, bytes_to_copy;
  817. unsigned rxbuf_offset;
  818. uint8_t *rxbuf_ptr;
  819. bool first_desc = true;
  820. int maf;
  821. int q = 0;
  822. /* Is this destination MAC address "for us" ? */
  823. maf = gem_mac_address_filter(s, buf);
  824. if (maf == GEM_RX_REJECT) {
  825. return size; /* no, drop siliently b/c it's not an error */
  826. }
  827. /* Discard packets with receive length error enabled ? */
  828. if (s->regs[GEM_NWCFG] & GEM_NWCFG_LERR_DISC) {
  829. unsigned type_len;
  830. /* Fish the ethertype / length field out of the RX packet */
  831. type_len = buf[12] << 8 | buf[13];
  832. /* It is a length field, not an ethertype */
  833. if (type_len < 0x600) {
  834. if (size < type_len) {
  835. /* discard */
  836. return -1;
  837. }
  838. }
  839. }
  840. /*
  841. * Determine configured receive buffer offset (probably 0)
  842. */
  843. rxbuf_offset = (s->regs[GEM_NWCFG] & GEM_NWCFG_BUFF_OFST_M) >>
  844. GEM_NWCFG_BUFF_OFST_S;
  845. /* The configure size of each receive buffer. Determines how many
  846. * buffers needed to hold this packet.
  847. */
  848. rxbufsize = ((s->regs[GEM_DMACFG] & GEM_DMACFG_RBUFSZ_M) >>
  849. GEM_DMACFG_RBUFSZ_S) * GEM_DMACFG_RBUFSZ_MUL;
  850. bytes_to_copy = size;
  851. /* Hardware allows a zero value here but warns against it. To avoid QEMU
  852. * indefinite loops we enforce a minimum value here
  853. */
  854. if (rxbufsize < GEM_DMACFG_RBUFSZ_MUL) {
  855. rxbufsize = GEM_DMACFG_RBUFSZ_MUL;
  856. }
  857. /* Pad to minimum length. Assume FCS field is stripped, logic
  858. * below will increment it to the real minimum of 64 when
  859. * not FCS stripping
  860. */
  861. if (size < 60) {
  862. size = 60;
  863. }
  864. /* Strip of FCS field ? (usually yes) */
  865. if (s->regs[GEM_NWCFG] & GEM_NWCFG_STRIP_FCS) {
  866. rxbuf_ptr = (void *)buf;
  867. } else {
  868. unsigned crc_val;
  869. if (size > MAX_FRAME_SIZE - sizeof(crc_val)) {
  870. size = MAX_FRAME_SIZE - sizeof(crc_val);
  871. }
  872. bytes_to_copy = size;
  873. /* The application wants the FCS field, which QEMU does not provide.
  874. * We must try and calculate one.
  875. */
  876. memcpy(s->rx_packet, buf, size);
  877. memset(s->rx_packet + size, 0, MAX_FRAME_SIZE - size);
  878. rxbuf_ptr = s->rx_packet;
  879. crc_val = cpu_to_le32(crc32(0, s->rx_packet, MAX(size, 60)));
  880. memcpy(s->rx_packet + size, &crc_val, sizeof(crc_val));
  881. bytes_to_copy += 4;
  882. size += 4;
  883. }
  884. DB_PRINT("config bufsize: %u packet size: %zd\n", rxbufsize, size);
  885. /* Find which queue we are targeting */
  886. q = get_queue_from_screen(s, rxbuf_ptr, rxbufsize);
  887. if (size > gem_get_max_buf_len(s, false)) {
  888. qemu_log_mask(LOG_GUEST_ERROR, "rx frame too long\n");
  889. gem_set_isr(s, q, GEM_INT_AMBA_ERR);
  890. return -1;
  891. }
  892. while (bytes_to_copy) {
  893. hwaddr desc_addr;
  894. /* Do nothing if receive is not enabled. */
  895. if (!gem_can_receive(nc)) {
  896. return -1;
  897. }
  898. DB_PRINT("copy %" PRIu32 " bytes to 0x%" PRIx64 "\n",
  899. MIN(bytes_to_copy, rxbufsize),
  900. rx_desc_get_buffer(s, s->rx_desc[q]));
  901. /* Copy packet data to emulated DMA buffer */
  902. address_space_write(&s->dma_as, rx_desc_get_buffer(s, s->rx_desc[q]) +
  903. rxbuf_offset,
  904. MEMTXATTRS_UNSPECIFIED, rxbuf_ptr,
  905. MIN(bytes_to_copy, rxbufsize));
  906. rxbuf_ptr += MIN(bytes_to_copy, rxbufsize);
  907. bytes_to_copy -= MIN(bytes_to_copy, rxbufsize);
  908. rx_desc_clear_control(s->rx_desc[q]);
  909. /* Update the descriptor. */
  910. if (first_desc) {
  911. rx_desc_set_sof(s->rx_desc[q]);
  912. first_desc = false;
  913. }
  914. if (bytes_to_copy == 0) {
  915. rx_desc_set_eof(s->rx_desc[q]);
  916. rx_desc_set_length(s->rx_desc[q], size);
  917. }
  918. rx_desc_set_ownership(s->rx_desc[q]);
  919. switch (maf) {
  920. case GEM_RX_PROMISCUOUS_ACCEPT:
  921. break;
  922. case GEM_RX_BROADCAST_ACCEPT:
  923. rx_desc_set_broadcast(s->rx_desc[q]);
  924. break;
  925. case GEM_RX_UNICAST_HASH_ACCEPT:
  926. rx_desc_set_unicast_hash(s->rx_desc[q]);
  927. break;
  928. case GEM_RX_MULTICAST_HASH_ACCEPT:
  929. rx_desc_set_multicast_hash(s->rx_desc[q]);
  930. break;
  931. case GEM_RX_REJECT:
  932. abort();
  933. default: /* SAR */
  934. rx_desc_set_sar(s->rx_desc[q], maf);
  935. }
  936. /* Descriptor write-back. */
  937. desc_addr = gem_get_rx_desc_addr(s, q);
  938. address_space_write(&s->dma_as, desc_addr, MEMTXATTRS_UNSPECIFIED,
  939. s->rx_desc[q],
  940. sizeof(uint32_t) * gem_get_desc_len(s, true));
  941. /* Next descriptor */
  942. if (rx_desc_get_wrap(s->rx_desc[q])) {
  943. DB_PRINT("wrapping RX descriptor list\n");
  944. s->rx_desc_addr[q] = gem_get_rx_queue_base_addr(s, q);
  945. } else {
  946. DB_PRINT("incrementing RX descriptor list\n");
  947. s->rx_desc_addr[q] += 4 * gem_get_desc_len(s, true);
  948. }
  949. gem_get_rx_desc(s, q);
  950. }
  951. /* Count it */
  952. gem_receive_updatestats(s, buf, size);
  953. s->regs[GEM_RXSTATUS] |= GEM_RXSTATUS_FRMRCVD;
  954. gem_set_isr(s, q, GEM_INT_RXCMPL);
  955. /* Handle interrupt consequences */
  956. gem_update_int_status(s);
  957. return size;
  958. }
  959. /*
  960. * gem_transmit_updatestats:
  961. * Increment transmit statistics.
  962. */
  963. static void gem_transmit_updatestats(CadenceGEMState *s, const uint8_t *packet,
  964. unsigned bytes)
  965. {
  966. uint64_t octets;
  967. /* Total octets (bytes) transmitted */
  968. octets = ((uint64_t)(s->regs[GEM_OCTTXLO]) << 32) |
  969. s->regs[GEM_OCTTXHI];
  970. octets += bytes;
  971. s->regs[GEM_OCTTXLO] = octets >> 32;
  972. s->regs[GEM_OCTTXHI] = octets;
  973. /* Error-free Frames transmitted */
  974. s->regs[GEM_TXCNT]++;
  975. /* Error-free Broadcast Frames counter */
  976. if (!memcmp(packet, broadcast_addr, 6)) {
  977. s->regs[GEM_TXBCNT]++;
  978. }
  979. /* Error-free Multicast Frames counter */
  980. if (packet[0] == 0x01) {
  981. s->regs[GEM_TXMCNT]++;
  982. }
  983. if (bytes <= 64) {
  984. s->regs[GEM_TX64CNT]++;
  985. } else if (bytes <= 127) {
  986. s->regs[GEM_TX65CNT]++;
  987. } else if (bytes <= 255) {
  988. s->regs[GEM_TX128CNT]++;
  989. } else if (bytes <= 511) {
  990. s->regs[GEM_TX256CNT]++;
  991. } else if (bytes <= 1023) {
  992. s->regs[GEM_TX512CNT]++;
  993. } else if (bytes <= 1518) {
  994. s->regs[GEM_TX1024CNT]++;
  995. } else {
  996. s->regs[GEM_TX1519CNT]++;
  997. }
  998. }
  999. /*
  1000. * gem_transmit:
  1001. * Fish packets out of the descriptor ring and feed them to QEMU
  1002. */
  1003. static void gem_transmit(CadenceGEMState *s)
  1004. {
  1005. uint32_t desc[DESC_MAX_NUM_WORDS];
  1006. hwaddr packet_desc_addr;
  1007. uint8_t *p;
  1008. unsigned total_bytes;
  1009. int q = 0;
  1010. /* Do nothing if transmit is not enabled. */
  1011. if (!(s->regs[GEM_NWCTRL] & GEM_NWCTRL_TXENA)) {
  1012. return;
  1013. }
  1014. DB_PRINT("\n");
  1015. /* The packet we will hand off to QEMU.
  1016. * Packets scattered across multiple descriptors are gathered to this
  1017. * one contiguous buffer first.
  1018. */
  1019. p = s->tx_packet;
  1020. total_bytes = 0;
  1021. for (q = s->num_priority_queues - 1; q >= 0; q--) {
  1022. /* read current descriptor */
  1023. packet_desc_addr = gem_get_tx_desc_addr(s, q);
  1024. DB_PRINT("read descriptor 0x%" HWADDR_PRIx "\n", packet_desc_addr);
  1025. address_space_read(&s->dma_as, packet_desc_addr,
  1026. MEMTXATTRS_UNSPECIFIED, desc,
  1027. sizeof(uint32_t) * gem_get_desc_len(s, false));
  1028. /* Handle all descriptors owned by hardware */
  1029. while (tx_desc_get_used(desc) == 0) {
  1030. /* Do nothing if transmit is not enabled. */
  1031. if (!(s->regs[GEM_NWCTRL] & GEM_NWCTRL_TXENA)) {
  1032. return;
  1033. }
  1034. print_gem_tx_desc(desc, q);
  1035. /* The real hardware would eat this (and possibly crash).
  1036. * For QEMU let's lend a helping hand.
  1037. */
  1038. if ((tx_desc_get_buffer(s, desc) == 0) ||
  1039. (tx_desc_get_length(desc) == 0)) {
  1040. DB_PRINT("Invalid TX descriptor @ 0x%" HWADDR_PRIx "\n",
  1041. packet_desc_addr);
  1042. break;
  1043. }
  1044. if (tx_desc_get_length(desc) > gem_get_max_buf_len(s, true) -
  1045. (p - s->tx_packet)) {
  1046. qemu_log_mask(LOG_GUEST_ERROR, "TX descriptor @ 0x%" \
  1047. HWADDR_PRIx " too large: size 0x%x space 0x%zx\n",
  1048. packet_desc_addr, tx_desc_get_length(desc),
  1049. gem_get_max_buf_len(s, true) - (p - s->tx_packet));
  1050. gem_set_isr(s, q, GEM_INT_AMBA_ERR);
  1051. break;
  1052. }
  1053. /* Gather this fragment of the packet from "dma memory" to our
  1054. * contig buffer.
  1055. */
  1056. address_space_read(&s->dma_as, tx_desc_get_buffer(s, desc),
  1057. MEMTXATTRS_UNSPECIFIED,
  1058. p, tx_desc_get_length(desc));
  1059. p += tx_desc_get_length(desc);
  1060. total_bytes += tx_desc_get_length(desc);
  1061. /* Last descriptor for this packet; hand the whole thing off */
  1062. if (tx_desc_get_last(desc)) {
  1063. uint32_t desc_first[DESC_MAX_NUM_WORDS];
  1064. hwaddr desc_addr = gem_get_tx_desc_addr(s, q);
  1065. /* Modify the 1st descriptor of this packet to be owned by
  1066. * the processor.
  1067. */
  1068. address_space_read(&s->dma_as, desc_addr,
  1069. MEMTXATTRS_UNSPECIFIED, desc_first,
  1070. sizeof(desc_first));
  1071. tx_desc_set_used(desc_first);
  1072. address_space_write(&s->dma_as, desc_addr,
  1073. MEMTXATTRS_UNSPECIFIED, desc_first,
  1074. sizeof(desc_first));
  1075. /* Advance the hardware current descriptor past this packet */
  1076. if (tx_desc_get_wrap(desc)) {
  1077. s->tx_desc_addr[q] = gem_get_tx_queue_base_addr(s, q);
  1078. } else {
  1079. s->tx_desc_addr[q] = packet_desc_addr +
  1080. 4 * gem_get_desc_len(s, false);
  1081. }
  1082. DB_PRINT("TX descriptor next: 0x%08x\n", s->tx_desc_addr[q]);
  1083. s->regs[GEM_TXSTATUS] |= GEM_TXSTATUS_TXCMPL;
  1084. gem_set_isr(s, q, GEM_INT_TXCMPL);
  1085. /* Handle interrupt consequences */
  1086. gem_update_int_status(s);
  1087. /* Is checksum offload enabled? */
  1088. if (s->regs[GEM_DMACFG] & GEM_DMACFG_TXCSUM_OFFL) {
  1089. net_checksum_calculate(s->tx_packet, total_bytes, CSUM_ALL);
  1090. }
  1091. /* Update MAC statistics */
  1092. gem_transmit_updatestats(s, s->tx_packet, total_bytes);
  1093. /* Send the packet somewhere */
  1094. if (s->phy_loop || (s->regs[GEM_NWCTRL] &
  1095. GEM_NWCTRL_LOCALLOOP)) {
  1096. qemu_receive_packet(qemu_get_queue(s->nic), s->tx_packet,
  1097. total_bytes);
  1098. } else {
  1099. qemu_send_packet(qemu_get_queue(s->nic), s->tx_packet,
  1100. total_bytes);
  1101. }
  1102. /* Prepare for next packet */
  1103. p = s->tx_packet;
  1104. total_bytes = 0;
  1105. }
  1106. /* read next descriptor */
  1107. if (tx_desc_get_wrap(desc)) {
  1108. if (s->regs[GEM_DMACFG] & GEM_DMACFG_ADDR_64B) {
  1109. packet_desc_addr = s->regs[GEM_TBQPH];
  1110. packet_desc_addr <<= 32;
  1111. } else {
  1112. packet_desc_addr = 0;
  1113. }
  1114. packet_desc_addr |= gem_get_tx_queue_base_addr(s, q);
  1115. } else {
  1116. packet_desc_addr += 4 * gem_get_desc_len(s, false);
  1117. }
  1118. DB_PRINT("read descriptor 0x%" HWADDR_PRIx "\n", packet_desc_addr);
  1119. address_space_read(&s->dma_as, packet_desc_addr,
  1120. MEMTXATTRS_UNSPECIFIED, desc,
  1121. sizeof(uint32_t) * gem_get_desc_len(s, false));
  1122. }
  1123. if (tx_desc_get_used(desc)) {
  1124. s->regs[GEM_TXSTATUS] |= GEM_TXSTATUS_USED;
  1125. /* IRQ TXUSED is defined only for queue 0 */
  1126. if (q == 0) {
  1127. gem_set_isr(s, 0, GEM_INT_TXUSED);
  1128. }
  1129. gem_update_int_status(s);
  1130. }
  1131. }
  1132. }
  1133. static void gem_phy_reset(CadenceGEMState *s)
  1134. {
  1135. memset(&s->phy_regs[0], 0, sizeof(s->phy_regs));
  1136. s->phy_regs[PHY_REG_CONTROL] = 0x1140;
  1137. s->phy_regs[PHY_REG_STATUS] = 0x7969;
  1138. s->phy_regs[PHY_REG_PHYID1] = 0x0141;
  1139. s->phy_regs[PHY_REG_PHYID2] = 0x0CC2;
  1140. s->phy_regs[PHY_REG_ANEGADV] = 0x01E1;
  1141. s->phy_regs[PHY_REG_LINKPABIL] = 0xCDE1;
  1142. s->phy_regs[PHY_REG_ANEGEXP] = 0x000F;
  1143. s->phy_regs[PHY_REG_NEXTP] = 0x2001;
  1144. s->phy_regs[PHY_REG_LINKPNEXTP] = 0x40E6;
  1145. s->phy_regs[PHY_REG_100BTCTRL] = 0x0300;
  1146. s->phy_regs[PHY_REG_1000BTSTAT] = 0x7C00;
  1147. s->phy_regs[PHY_REG_EXTSTAT] = 0x3000;
  1148. s->phy_regs[PHY_REG_PHYSPCFC_CTL] = 0x0078;
  1149. s->phy_regs[PHY_REG_PHYSPCFC_ST] = 0x7C00;
  1150. s->phy_regs[PHY_REG_EXT_PHYSPCFC_CTL] = 0x0C60;
  1151. s->phy_regs[PHY_REG_LED] = 0x4100;
  1152. s->phy_regs[PHY_REG_EXT_PHYSPCFC_CTL2] = 0x000A;
  1153. s->phy_regs[PHY_REG_EXT_PHYSPCFC_ST] = 0x848B;
  1154. phy_update_link(s);
  1155. }
  1156. static void gem_reset(DeviceState *d)
  1157. {
  1158. int i;
  1159. CadenceGEMState *s = CADENCE_GEM(d);
  1160. const uint8_t *a;
  1161. uint32_t queues_mask = 0;
  1162. DB_PRINT("\n");
  1163. /* Set post reset register values */
  1164. memset(&s->regs[0], 0, sizeof(s->regs));
  1165. s->regs[GEM_NWCFG] = 0x00080000;
  1166. s->regs[GEM_NWSTATUS] = 0x00000006;
  1167. s->regs[GEM_DMACFG] = 0x00020784;
  1168. s->regs[GEM_IMR] = 0x07ffffff;
  1169. s->regs[GEM_TXPAUSE] = 0x0000ffff;
  1170. s->regs[GEM_TXPARTIALSF] = 0x000003ff;
  1171. s->regs[GEM_RXPARTIALSF] = 0x000003ff;
  1172. s->regs[GEM_MODID] = s->revision;
  1173. s->regs[GEM_DESCONF] = 0x02D00111;
  1174. s->regs[GEM_DESCONF2] = 0x2ab10000 | s->jumbo_max_len;
  1175. s->regs[GEM_DESCONF5] = 0x002f2045;
  1176. s->regs[GEM_DESCONF6] = GEM_DESCONF6_64B_MASK;
  1177. s->regs[GEM_INT_Q1_MASK] = 0x00000CE6;
  1178. s->regs[GEM_JUMBO_MAX_LEN] = s->jumbo_max_len;
  1179. if (s->num_priority_queues > 1) {
  1180. queues_mask = MAKE_64BIT_MASK(1, s->num_priority_queues - 1);
  1181. s->regs[GEM_DESCONF6] |= queues_mask;
  1182. }
  1183. /* Set MAC address */
  1184. a = &s->conf.macaddr.a[0];
  1185. s->regs[GEM_SPADDR1LO] = a[0] | (a[1] << 8) | (a[2] << 16) | (a[3] << 24);
  1186. s->regs[GEM_SPADDR1HI] = a[4] | (a[5] << 8);
  1187. for (i = 0; i < 4; i++) {
  1188. s->sar_active[i] = false;
  1189. }
  1190. gem_phy_reset(s);
  1191. gem_update_int_status(s);
  1192. }
  1193. static uint16_t gem_phy_read(CadenceGEMState *s, unsigned reg_num)
  1194. {
  1195. DB_PRINT("reg: %d value: 0x%04x\n", reg_num, s->phy_regs[reg_num]);
  1196. return s->phy_regs[reg_num];
  1197. }
  1198. static void gem_phy_write(CadenceGEMState *s, unsigned reg_num, uint16_t val)
  1199. {
  1200. DB_PRINT("reg: %d value: 0x%04x\n", reg_num, val);
  1201. switch (reg_num) {
  1202. case PHY_REG_CONTROL:
  1203. if (val & PHY_REG_CONTROL_RST) {
  1204. /* Phy reset */
  1205. gem_phy_reset(s);
  1206. val &= ~(PHY_REG_CONTROL_RST | PHY_REG_CONTROL_LOOP);
  1207. s->phy_loop = 0;
  1208. }
  1209. if (val & PHY_REG_CONTROL_ANEG) {
  1210. /* Complete autonegotiation immediately */
  1211. val &= ~(PHY_REG_CONTROL_ANEG | PHY_REG_CONTROL_ANRESTART);
  1212. s->phy_regs[PHY_REG_STATUS] |= PHY_REG_STATUS_ANEGCMPL;
  1213. }
  1214. if (val & PHY_REG_CONTROL_LOOP) {
  1215. DB_PRINT("PHY placed in loopback\n");
  1216. s->phy_loop = 1;
  1217. } else {
  1218. s->phy_loop = 0;
  1219. }
  1220. break;
  1221. }
  1222. s->phy_regs[reg_num] = val;
  1223. }
  1224. /*
  1225. * gem_read32:
  1226. * Read a GEM register.
  1227. */
  1228. static uint64_t gem_read(void *opaque, hwaddr offset, unsigned size)
  1229. {
  1230. CadenceGEMState *s;
  1231. uint32_t retval;
  1232. s = opaque;
  1233. offset >>= 2;
  1234. retval = s->regs[offset];
  1235. DB_PRINT("offset: 0x%04x read: 0x%08x\n", (unsigned)offset*4, retval);
  1236. switch (offset) {
  1237. case GEM_ISR:
  1238. DB_PRINT("lowering irqs on ISR read\n");
  1239. /* The interrupts get updated at the end of the function. */
  1240. break;
  1241. case GEM_PHYMNTNC:
  1242. if (retval & GEM_PHYMNTNC_OP_R) {
  1243. uint32_t phy_addr, reg_num;
  1244. phy_addr = (retval & GEM_PHYMNTNC_ADDR) >> GEM_PHYMNTNC_ADDR_SHFT;
  1245. if (phy_addr == s->phy_addr) {
  1246. reg_num = (retval & GEM_PHYMNTNC_REG) >> GEM_PHYMNTNC_REG_SHIFT;
  1247. retval &= 0xFFFF0000;
  1248. retval |= gem_phy_read(s, reg_num);
  1249. } else {
  1250. retval |= 0xFFFF; /* No device at this address */
  1251. }
  1252. }
  1253. break;
  1254. }
  1255. /* Squash read to clear bits */
  1256. s->regs[offset] &= ~(s->regs_rtc[offset]);
  1257. /* Do not provide write only bits */
  1258. retval &= ~(s->regs_wo[offset]);
  1259. DB_PRINT("0x%08x\n", retval);
  1260. gem_update_int_status(s);
  1261. return retval;
  1262. }
  1263. /*
  1264. * gem_write32:
  1265. * Write a GEM register.
  1266. */
  1267. static void gem_write(void *opaque, hwaddr offset, uint64_t val,
  1268. unsigned size)
  1269. {
  1270. CadenceGEMState *s = (CadenceGEMState *)opaque;
  1271. uint32_t readonly;
  1272. int i;
  1273. DB_PRINT("offset: 0x%04x write: 0x%08x ", (unsigned)offset, (unsigned)val);
  1274. offset >>= 2;
  1275. /* Squash bits which are read only in write value */
  1276. val &= ~(s->regs_ro[offset]);
  1277. /* Preserve (only) bits which are read only and wtc in register */
  1278. readonly = s->regs[offset] & (s->regs_ro[offset] | s->regs_w1c[offset]);
  1279. /* Copy register write to backing store */
  1280. s->regs[offset] = (val & ~s->regs_w1c[offset]) | readonly;
  1281. /* do w1c */
  1282. s->regs[offset] &= ~(s->regs_w1c[offset] & val);
  1283. /* Handle register write side effects */
  1284. switch (offset) {
  1285. case GEM_NWCTRL:
  1286. if (val & GEM_NWCTRL_RXENA) {
  1287. for (i = 0; i < s->num_priority_queues; ++i) {
  1288. gem_get_rx_desc(s, i);
  1289. }
  1290. }
  1291. if (val & GEM_NWCTRL_TXSTART) {
  1292. gem_transmit(s);
  1293. }
  1294. if (!(val & GEM_NWCTRL_TXENA)) {
  1295. /* Reset to start of Q when transmit disabled. */
  1296. for (i = 0; i < s->num_priority_queues; i++) {
  1297. s->tx_desc_addr[i] = gem_get_tx_queue_base_addr(s, i);
  1298. }
  1299. }
  1300. if (gem_can_receive(qemu_get_queue(s->nic))) {
  1301. qemu_flush_queued_packets(qemu_get_queue(s->nic));
  1302. }
  1303. break;
  1304. case GEM_TXSTATUS:
  1305. gem_update_int_status(s);
  1306. break;
  1307. case GEM_RXQBASE:
  1308. s->rx_desc_addr[0] = val;
  1309. break;
  1310. case GEM_RECEIVE_Q1_PTR ... GEM_RECEIVE_Q7_PTR:
  1311. s->rx_desc_addr[offset - GEM_RECEIVE_Q1_PTR + 1] = val;
  1312. break;
  1313. case GEM_TXQBASE:
  1314. s->tx_desc_addr[0] = val;
  1315. break;
  1316. case GEM_TRANSMIT_Q1_PTR ... GEM_TRANSMIT_Q7_PTR:
  1317. s->tx_desc_addr[offset - GEM_TRANSMIT_Q1_PTR + 1] = val;
  1318. break;
  1319. case GEM_RXSTATUS:
  1320. gem_update_int_status(s);
  1321. break;
  1322. case GEM_IER:
  1323. s->regs[GEM_IMR] &= ~val;
  1324. gem_update_int_status(s);
  1325. break;
  1326. case GEM_JUMBO_MAX_LEN:
  1327. s->regs[GEM_JUMBO_MAX_LEN] = val & MAX_JUMBO_FRAME_SIZE_MASK;
  1328. break;
  1329. case GEM_INT_Q1_ENABLE ... GEM_INT_Q7_ENABLE:
  1330. s->regs[GEM_INT_Q1_MASK + offset - GEM_INT_Q1_ENABLE] &= ~val;
  1331. gem_update_int_status(s);
  1332. break;
  1333. case GEM_IDR:
  1334. s->regs[GEM_IMR] |= val;
  1335. gem_update_int_status(s);
  1336. break;
  1337. case GEM_INT_Q1_DISABLE ... GEM_INT_Q7_DISABLE:
  1338. s->regs[GEM_INT_Q1_MASK + offset - GEM_INT_Q1_DISABLE] |= val;
  1339. gem_update_int_status(s);
  1340. break;
  1341. case GEM_SPADDR1LO:
  1342. case GEM_SPADDR2LO:
  1343. case GEM_SPADDR3LO:
  1344. case GEM_SPADDR4LO:
  1345. s->sar_active[(offset - GEM_SPADDR1LO) / 2] = false;
  1346. break;
  1347. case GEM_SPADDR1HI:
  1348. case GEM_SPADDR2HI:
  1349. case GEM_SPADDR3HI:
  1350. case GEM_SPADDR4HI:
  1351. s->sar_active[(offset - GEM_SPADDR1HI) / 2] = true;
  1352. break;
  1353. case GEM_PHYMNTNC:
  1354. if (val & GEM_PHYMNTNC_OP_W) {
  1355. uint32_t phy_addr, reg_num;
  1356. phy_addr = (val & GEM_PHYMNTNC_ADDR) >> GEM_PHYMNTNC_ADDR_SHFT;
  1357. if (phy_addr == s->phy_addr) {
  1358. reg_num = (val & GEM_PHYMNTNC_REG) >> GEM_PHYMNTNC_REG_SHIFT;
  1359. gem_phy_write(s, reg_num, val);
  1360. }
  1361. }
  1362. break;
  1363. }
  1364. DB_PRINT("newval: 0x%08x\n", s->regs[offset]);
  1365. }
  1366. static const MemoryRegionOps gem_ops = {
  1367. .read = gem_read,
  1368. .write = gem_write,
  1369. .endianness = DEVICE_LITTLE_ENDIAN,
  1370. };
  1371. static void gem_set_link(NetClientState *nc)
  1372. {
  1373. CadenceGEMState *s = qemu_get_nic_opaque(nc);
  1374. DB_PRINT("\n");
  1375. phy_update_link(s);
  1376. gem_update_int_status(s);
  1377. }
  1378. static NetClientInfo net_gem_info = {
  1379. .type = NET_CLIENT_DRIVER_NIC,
  1380. .size = sizeof(NICState),
  1381. .can_receive = gem_can_receive,
  1382. .receive = gem_receive,
  1383. .link_status_changed = gem_set_link,
  1384. };
  1385. static void gem_realize(DeviceState *dev, Error **errp)
  1386. {
  1387. CadenceGEMState *s = CADENCE_GEM(dev);
  1388. int i;
  1389. address_space_init(&s->dma_as,
  1390. s->dma_mr ? s->dma_mr : get_system_memory(), "dma");
  1391. if (s->num_priority_queues == 0 ||
  1392. s->num_priority_queues > MAX_PRIORITY_QUEUES) {
  1393. error_setg(errp, "Invalid num-priority-queues value: %" PRIx8,
  1394. s->num_priority_queues);
  1395. return;
  1396. } else if (s->num_type1_screeners > MAX_TYPE1_SCREENERS) {
  1397. error_setg(errp, "Invalid num-type1-screeners value: %" PRIx8,
  1398. s->num_type1_screeners);
  1399. return;
  1400. } else if (s->num_type2_screeners > MAX_TYPE2_SCREENERS) {
  1401. error_setg(errp, "Invalid num-type2-screeners value: %" PRIx8,
  1402. s->num_type2_screeners);
  1403. return;
  1404. }
  1405. for (i = 0; i < s->num_priority_queues; ++i) {
  1406. sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->irq[i]);
  1407. }
  1408. qemu_macaddr_default_if_unset(&s->conf.macaddr);
  1409. s->nic = qemu_new_nic(&net_gem_info, &s->conf,
  1410. object_get_typename(OBJECT(dev)), dev->id, s);
  1411. if (s->jumbo_max_len > MAX_FRAME_SIZE) {
  1412. error_setg(errp, "jumbo-max-len is greater than %d",
  1413. MAX_FRAME_SIZE);
  1414. return;
  1415. }
  1416. }
  1417. static void gem_init(Object *obj)
  1418. {
  1419. CadenceGEMState *s = CADENCE_GEM(obj);
  1420. DeviceState *dev = DEVICE(obj);
  1421. DB_PRINT("\n");
  1422. gem_init_register_masks(s);
  1423. memory_region_init_io(&s->iomem, OBJECT(s), &gem_ops, s,
  1424. "enet", sizeof(s->regs));
  1425. sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iomem);
  1426. object_property_add_link(obj, "dma", TYPE_MEMORY_REGION,
  1427. (Object **)&s->dma_mr,
  1428. qdev_prop_allow_set_link_before_realize,
  1429. OBJ_PROP_LINK_STRONG);
  1430. }
  1431. static const VMStateDescription vmstate_cadence_gem = {
  1432. .name = "cadence_gem",
  1433. .version_id = 4,
  1434. .minimum_version_id = 4,
  1435. .fields = (VMStateField[]) {
  1436. VMSTATE_UINT32_ARRAY(regs, CadenceGEMState, CADENCE_GEM_MAXREG),
  1437. VMSTATE_UINT16_ARRAY(phy_regs, CadenceGEMState, 32),
  1438. VMSTATE_UINT8(phy_loop, CadenceGEMState),
  1439. VMSTATE_UINT32_ARRAY(rx_desc_addr, CadenceGEMState,
  1440. MAX_PRIORITY_QUEUES),
  1441. VMSTATE_UINT32_ARRAY(tx_desc_addr, CadenceGEMState,
  1442. MAX_PRIORITY_QUEUES),
  1443. VMSTATE_BOOL_ARRAY(sar_active, CadenceGEMState, 4),
  1444. VMSTATE_END_OF_LIST(),
  1445. }
  1446. };
  1447. static Property gem_properties[] = {
  1448. DEFINE_NIC_PROPERTIES(CadenceGEMState, conf),
  1449. DEFINE_PROP_UINT32("revision", CadenceGEMState, revision,
  1450. GEM_MODID_VALUE),
  1451. DEFINE_PROP_UINT8("phy-addr", CadenceGEMState, phy_addr, BOARD_PHY_ADDRESS),
  1452. DEFINE_PROP_UINT8("num-priority-queues", CadenceGEMState,
  1453. num_priority_queues, 1),
  1454. DEFINE_PROP_UINT8("num-type1-screeners", CadenceGEMState,
  1455. num_type1_screeners, 4),
  1456. DEFINE_PROP_UINT8("num-type2-screeners", CadenceGEMState,
  1457. num_type2_screeners, 4),
  1458. DEFINE_PROP_UINT16("jumbo-max-len", CadenceGEMState,
  1459. jumbo_max_len, 10240),
  1460. DEFINE_PROP_END_OF_LIST(),
  1461. };
  1462. static void gem_class_init(ObjectClass *klass, void *data)
  1463. {
  1464. DeviceClass *dc = DEVICE_CLASS(klass);
  1465. dc->realize = gem_realize;
  1466. device_class_set_props(dc, gem_properties);
  1467. dc->vmsd = &vmstate_cadence_gem;
  1468. dc->reset = gem_reset;
  1469. }
  1470. static const TypeInfo gem_info = {
  1471. .name = TYPE_CADENCE_GEM,
  1472. .parent = TYPE_SYS_BUS_DEVICE,
  1473. .instance_size = sizeof(CadenceGEMState),
  1474. .instance_init = gem_init,
  1475. .class_init = gem_class_init,
  1476. };
  1477. static void gem_register_types(void)
  1478. {
  1479. type_register_static(&gem_info);
  1480. }
  1481. type_init(gem_register_types)