sunhme.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983
  1. /*
  2. * QEMU Sun Happy Meal Ethernet emulation
  3. *
  4. * Copyright (c) 2017 Mark Cave-Ayland
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a copy
  7. * of this software and associated documentation files (the "Software"), to deal
  8. * in the Software without restriction, including without limitation the rights
  9. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  10. * copies of the Software, and to permit persons to whom the Software is
  11. * furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  21. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  22. * THE SOFTWARE.
  23. */
  24. #include "qemu/osdep.h"
  25. #include "hw/pci/pci_device.h"
  26. #include "hw/qdev-properties.h"
  27. #include "migration/vmstate.h"
  28. #include "hw/net/mii.h"
  29. #include "net/net.h"
  30. #include "qemu/module.h"
  31. #include "net/checksum.h"
  32. #include "net/eth.h"
  33. #include "sysemu/sysemu.h"
  34. #include "trace.h"
  35. #include "qom/object.h"
  36. #define HME_REG_SIZE 0x8000
  37. #define HME_SEB_REG_SIZE 0x2000
  38. #define HME_SEBI_RESET 0x0
  39. #define HME_SEB_RESET_ETX 0x1
  40. #define HME_SEB_RESET_ERX 0x2
  41. #define HME_SEBI_STAT 0x100
  42. #define HME_SEBI_STAT_LINUXBUG 0x108
  43. #define HME_SEB_STAT_RXTOHOST 0x10000
  44. #define HME_SEB_STAT_NORXD 0x20000
  45. #define HME_SEB_STAT_MIFIRQ 0x800000
  46. #define HME_SEB_STAT_HOSTTOTX 0x1000000
  47. #define HME_SEB_STAT_TXALL 0x2000000
  48. #define HME_SEBI_IMASK 0x104
  49. #define HME_SEBI_IMASK_LINUXBUG 0x10c
  50. #define HME_ETX_REG_SIZE 0x2000
  51. #define HME_ETXI_PENDING 0x0
  52. #define HME_ETXI_RING 0x8
  53. #define HME_ETXI_RING_ADDR 0xffffff00
  54. #define HME_ETXI_RING_OFFSET 0xff
  55. #define HME_ETXI_RSIZE 0x2c
  56. #define HME_ERX_REG_SIZE 0x2000
  57. #define HME_ERXI_CFG 0x0
  58. #define HME_ERX_CFG_RINGSIZE 0x600
  59. #define HME_ERX_CFG_RINGSIZE_SHIFT 9
  60. #define HME_ERX_CFG_BYTEOFFSET 0x38
  61. #define HME_ERX_CFG_BYTEOFFSET_SHIFT 3
  62. #define HME_ERX_CFG_CSUMSTART 0x7f0000
  63. #define HME_ERX_CFG_CSUMSHIFT 16
  64. #define HME_ERXI_RING 0x4
  65. #define HME_ERXI_RING_ADDR 0xffffff00
  66. #define HME_ERXI_RING_OFFSET 0xff
  67. #define HME_MAC_REG_SIZE 0x1000
  68. #define HME_MACI_TXCFG 0x20c
  69. #define HME_MAC_TXCFG_ENABLE 0x1
  70. #define HME_MACI_RXCFG 0x30c
  71. #define HME_MAC_RXCFG_ENABLE 0x1
  72. #define HME_MAC_RXCFG_PMISC 0x40
  73. #define HME_MAC_RXCFG_HENABLE 0x800
  74. #define HME_MACI_MACADDR2 0x318
  75. #define HME_MACI_MACADDR1 0x31c
  76. #define HME_MACI_MACADDR0 0x320
  77. #define HME_MACI_HASHTAB3 0x340
  78. #define HME_MACI_HASHTAB2 0x344
  79. #define HME_MACI_HASHTAB1 0x348
  80. #define HME_MACI_HASHTAB0 0x34c
  81. #define HME_MIF_REG_SIZE 0x20
  82. #define HME_MIFI_FO 0xc
  83. #define HME_MIF_FO_ST 0xc0000000
  84. #define HME_MIF_FO_ST_SHIFT 30
  85. #define HME_MIF_FO_OPC 0x30000000
  86. #define HME_MIF_FO_OPC_SHIFT 28
  87. #define HME_MIF_FO_PHYAD 0x0f800000
  88. #define HME_MIF_FO_PHYAD_SHIFT 23
  89. #define HME_MIF_FO_REGAD 0x007c0000
  90. #define HME_MIF_FO_REGAD_SHIFT 18
  91. #define HME_MIF_FO_TAMSB 0x20000
  92. #define HME_MIF_FO_TALSB 0x10000
  93. #define HME_MIF_FO_DATA 0xffff
  94. #define HME_MIFI_CFG 0x10
  95. #define HME_MIF_CFG_MDI0 0x100
  96. #define HME_MIF_CFG_MDI1 0x200
  97. #define HME_MIFI_IMASK 0x14
  98. #define HME_MIFI_STAT 0x18
  99. /* Wired HME PHY addresses */
  100. #define HME_PHYAD_INTERNAL 1
  101. #define HME_PHYAD_EXTERNAL 0
  102. #define MII_COMMAND_START 0x1
  103. #define MII_COMMAND_READ 0x2
  104. #define MII_COMMAND_WRITE 0x1
  105. #define TYPE_SUNHME "sunhme"
  106. OBJECT_DECLARE_SIMPLE_TYPE(SunHMEState, SUNHME)
  107. /* Maximum size of buffer */
  108. #define HME_FIFO_SIZE 0x800
  109. /* Size of TX/RX descriptor */
  110. #define HME_DESC_SIZE 0x8
  111. #define HME_XD_OWN 0x80000000
  112. #define HME_XD_OFL 0x40000000
  113. #define HME_XD_SOP 0x40000000
  114. #define HME_XD_EOP 0x20000000
  115. #define HME_XD_RXLENMSK 0x3fff0000
  116. #define HME_XD_RXLENSHIFT 16
  117. #define HME_XD_RXCKSUM 0xffff
  118. #define HME_XD_TXLENMSK 0x00001fff
  119. #define HME_XD_TXCKSUM 0x10000000
  120. #define HME_XD_TXCSSTUFF 0xff00000
  121. #define HME_XD_TXCSSTUFFSHIFT 20
  122. #define HME_XD_TXCSSTART 0xfc000
  123. #define HME_XD_TXCSSTARTSHIFT 14
  124. #define HME_MII_REGS_SIZE 0x20
  125. struct SunHMEState {
  126. /*< private >*/
  127. PCIDevice parent_obj;
  128. NICState *nic;
  129. NICConf conf;
  130. MemoryRegion hme;
  131. MemoryRegion sebreg;
  132. MemoryRegion etxreg;
  133. MemoryRegion erxreg;
  134. MemoryRegion macreg;
  135. MemoryRegion mifreg;
  136. uint32_t sebregs[HME_SEB_REG_SIZE >> 2];
  137. uint32_t etxregs[HME_ETX_REG_SIZE >> 2];
  138. uint32_t erxregs[HME_ERX_REG_SIZE >> 2];
  139. uint32_t macregs[HME_MAC_REG_SIZE >> 2];
  140. uint32_t mifregs[HME_MIF_REG_SIZE >> 2];
  141. uint16_t miiregs[HME_MII_REGS_SIZE];
  142. };
  143. static Property sunhme_properties[] = {
  144. DEFINE_NIC_PROPERTIES(SunHMEState, conf),
  145. DEFINE_PROP_END_OF_LIST(),
  146. };
  147. static void sunhme_reset_tx(SunHMEState *s)
  148. {
  149. /* Indicate TX reset complete */
  150. s->sebregs[HME_SEBI_RESET] &= ~HME_SEB_RESET_ETX;
  151. }
  152. static void sunhme_reset_rx(SunHMEState *s)
  153. {
  154. /* Indicate RX reset complete */
  155. s->sebregs[HME_SEBI_RESET] &= ~HME_SEB_RESET_ERX;
  156. }
  157. static void sunhme_update_irq(SunHMEState *s)
  158. {
  159. PCIDevice *d = PCI_DEVICE(s);
  160. int level;
  161. /* MIF interrupt mask (16-bit) */
  162. uint32_t mifmask = ~(s->mifregs[HME_MIFI_IMASK >> 2]) & 0xffff;
  163. uint32_t mif = s->mifregs[HME_MIFI_STAT >> 2] & mifmask;
  164. /* Main SEB interrupt mask (include MIF status from above) */
  165. uint32_t sebmask = ~(s->sebregs[HME_SEBI_IMASK >> 2]) &
  166. ~HME_SEB_STAT_MIFIRQ;
  167. uint32_t seb = s->sebregs[HME_SEBI_STAT >> 2] & sebmask;
  168. if (mif) {
  169. seb |= HME_SEB_STAT_MIFIRQ;
  170. }
  171. level = (seb ? 1 : 0);
  172. trace_sunhme_update_irq(mifmask, mif, sebmask, seb, level);
  173. pci_set_irq(d, level);
  174. }
  175. static void sunhme_seb_write(void *opaque, hwaddr addr,
  176. uint64_t val, unsigned size)
  177. {
  178. SunHMEState *s = SUNHME(opaque);
  179. trace_sunhme_seb_write(addr, val);
  180. /* Handly buggy Linux drivers before 4.13 which have
  181. the wrong offsets for HME_SEBI_STAT and HME_SEBI_IMASK */
  182. switch (addr) {
  183. case HME_SEBI_STAT_LINUXBUG:
  184. addr = HME_SEBI_STAT;
  185. break;
  186. case HME_SEBI_IMASK_LINUXBUG:
  187. addr = HME_SEBI_IMASK;
  188. break;
  189. default:
  190. break;
  191. }
  192. switch (addr) {
  193. case HME_SEBI_RESET:
  194. if (val & HME_SEB_RESET_ETX) {
  195. sunhme_reset_tx(s);
  196. }
  197. if (val & HME_SEB_RESET_ERX) {
  198. sunhme_reset_rx(s);
  199. }
  200. val = s->sebregs[HME_SEBI_RESET >> 2];
  201. break;
  202. }
  203. s->sebregs[addr >> 2] = val;
  204. }
  205. static uint64_t sunhme_seb_read(void *opaque, hwaddr addr,
  206. unsigned size)
  207. {
  208. SunHMEState *s = SUNHME(opaque);
  209. uint64_t val;
  210. /* Handly buggy Linux drivers before 4.13 which have
  211. the wrong offsets for HME_SEBI_STAT and HME_SEBI_IMASK */
  212. switch (addr) {
  213. case HME_SEBI_STAT_LINUXBUG:
  214. addr = HME_SEBI_STAT;
  215. break;
  216. case HME_SEBI_IMASK_LINUXBUG:
  217. addr = HME_SEBI_IMASK;
  218. break;
  219. default:
  220. break;
  221. }
  222. val = s->sebregs[addr >> 2];
  223. switch (addr) {
  224. case HME_SEBI_STAT:
  225. /* Autoclear status (except MIF) */
  226. s->sebregs[HME_SEBI_STAT >> 2] &= HME_SEB_STAT_MIFIRQ;
  227. sunhme_update_irq(s);
  228. break;
  229. }
  230. trace_sunhme_seb_read(addr, val);
  231. return val;
  232. }
  233. static const MemoryRegionOps sunhme_seb_ops = {
  234. .read = sunhme_seb_read,
  235. .write = sunhme_seb_write,
  236. .endianness = DEVICE_LITTLE_ENDIAN,
  237. .valid = {
  238. .min_access_size = 4,
  239. .max_access_size = 4,
  240. },
  241. };
  242. static void sunhme_transmit(SunHMEState *s);
  243. static void sunhme_etx_write(void *opaque, hwaddr addr,
  244. uint64_t val, unsigned size)
  245. {
  246. SunHMEState *s = SUNHME(opaque);
  247. trace_sunhme_etx_write(addr, val);
  248. switch (addr) {
  249. case HME_ETXI_PENDING:
  250. if (val) {
  251. sunhme_transmit(s);
  252. }
  253. break;
  254. }
  255. s->etxregs[addr >> 2] = val;
  256. }
  257. static uint64_t sunhme_etx_read(void *opaque, hwaddr addr,
  258. unsigned size)
  259. {
  260. SunHMEState *s = SUNHME(opaque);
  261. uint64_t val;
  262. val = s->etxregs[addr >> 2];
  263. trace_sunhme_etx_read(addr, val);
  264. return val;
  265. }
  266. static const MemoryRegionOps sunhme_etx_ops = {
  267. .read = sunhme_etx_read,
  268. .write = sunhme_etx_write,
  269. .endianness = DEVICE_LITTLE_ENDIAN,
  270. .valid = {
  271. .min_access_size = 4,
  272. .max_access_size = 4,
  273. },
  274. };
  275. static void sunhme_erx_write(void *opaque, hwaddr addr,
  276. uint64_t val, unsigned size)
  277. {
  278. SunHMEState *s = SUNHME(opaque);
  279. trace_sunhme_erx_write(addr, val);
  280. s->erxregs[addr >> 2] = val;
  281. }
  282. static uint64_t sunhme_erx_read(void *opaque, hwaddr addr,
  283. unsigned size)
  284. {
  285. SunHMEState *s = SUNHME(opaque);
  286. uint64_t val;
  287. val = s->erxregs[addr >> 2];
  288. trace_sunhme_erx_read(addr, val);
  289. return val;
  290. }
  291. static const MemoryRegionOps sunhme_erx_ops = {
  292. .read = sunhme_erx_read,
  293. .write = sunhme_erx_write,
  294. .endianness = DEVICE_LITTLE_ENDIAN,
  295. .valid = {
  296. .min_access_size = 4,
  297. .max_access_size = 4,
  298. },
  299. };
  300. static void sunhme_mac_write(void *opaque, hwaddr addr,
  301. uint64_t val, unsigned size)
  302. {
  303. SunHMEState *s = SUNHME(opaque);
  304. uint64_t oldval = s->macregs[addr >> 2];
  305. trace_sunhme_mac_write(addr, val);
  306. s->macregs[addr >> 2] = val;
  307. switch (addr) {
  308. case HME_MACI_RXCFG:
  309. if (!(oldval & HME_MAC_RXCFG_ENABLE) &&
  310. (val & HME_MAC_RXCFG_ENABLE)) {
  311. qemu_flush_queued_packets(qemu_get_queue(s->nic));
  312. }
  313. break;
  314. }
  315. }
  316. static uint64_t sunhme_mac_read(void *opaque, hwaddr addr,
  317. unsigned size)
  318. {
  319. SunHMEState *s = SUNHME(opaque);
  320. uint64_t val;
  321. val = s->macregs[addr >> 2];
  322. trace_sunhme_mac_read(addr, val);
  323. return val;
  324. }
  325. static const MemoryRegionOps sunhme_mac_ops = {
  326. .read = sunhme_mac_read,
  327. .write = sunhme_mac_write,
  328. .endianness = DEVICE_LITTLE_ENDIAN,
  329. .valid = {
  330. .min_access_size = 4,
  331. .max_access_size = 4,
  332. },
  333. };
  334. static void sunhme_mii_write(SunHMEState *s, uint8_t reg, uint16_t data)
  335. {
  336. trace_sunhme_mii_write(reg, data);
  337. switch (reg) {
  338. case MII_BMCR:
  339. if (data & MII_BMCR_RESET) {
  340. /* Autoclear reset bit, enable auto negotiation */
  341. data &= ~MII_BMCR_RESET;
  342. data |= MII_BMCR_AUTOEN;
  343. }
  344. if (data & MII_BMCR_ANRESTART) {
  345. /* Autoclear auto negotiation restart */
  346. data &= ~MII_BMCR_ANRESTART;
  347. /* Indicate negotiation complete */
  348. s->miiregs[MII_BMSR] |= MII_BMSR_AN_COMP;
  349. if (!qemu_get_queue(s->nic)->link_down) {
  350. s->miiregs[MII_ANLPAR] |= MII_ANLPAR_TXFD;
  351. s->miiregs[MII_BMSR] |= MII_BMSR_LINK_ST;
  352. }
  353. }
  354. break;
  355. }
  356. s->miiregs[reg] = data;
  357. }
  358. static uint16_t sunhme_mii_read(SunHMEState *s, uint8_t reg)
  359. {
  360. uint16_t data = s->miiregs[reg];
  361. trace_sunhme_mii_read(reg, data);
  362. return data;
  363. }
  364. static void sunhme_mif_write(void *opaque, hwaddr addr,
  365. uint64_t val, unsigned size)
  366. {
  367. SunHMEState *s = SUNHME(opaque);
  368. uint8_t cmd, reg;
  369. uint16_t data;
  370. trace_sunhme_mif_write(addr, val);
  371. switch (addr) {
  372. case HME_MIFI_CFG:
  373. /* Mask the read-only bits */
  374. val &= ~(HME_MIF_CFG_MDI0 | HME_MIF_CFG_MDI1);
  375. val |= s->mifregs[HME_MIFI_CFG >> 2] &
  376. (HME_MIF_CFG_MDI0 | HME_MIF_CFG_MDI1);
  377. break;
  378. case HME_MIFI_FO:
  379. /* Detect start of MII command */
  380. if ((val & HME_MIF_FO_ST) >> HME_MIF_FO_ST_SHIFT
  381. != MII_COMMAND_START) {
  382. val |= HME_MIF_FO_TALSB;
  383. break;
  384. }
  385. /* Internal phy only */
  386. if ((val & HME_MIF_FO_PHYAD) >> HME_MIF_FO_PHYAD_SHIFT
  387. != HME_PHYAD_INTERNAL) {
  388. val |= HME_MIF_FO_TALSB;
  389. break;
  390. }
  391. cmd = (val & HME_MIF_FO_OPC) >> HME_MIF_FO_OPC_SHIFT;
  392. reg = (val & HME_MIF_FO_REGAD) >> HME_MIF_FO_REGAD_SHIFT;
  393. data = (val & HME_MIF_FO_DATA);
  394. switch (cmd) {
  395. case MII_COMMAND_WRITE:
  396. sunhme_mii_write(s, reg, data);
  397. break;
  398. case MII_COMMAND_READ:
  399. val &= ~HME_MIF_FO_DATA;
  400. val |= sunhme_mii_read(s, reg);
  401. break;
  402. }
  403. val |= HME_MIF_FO_TALSB;
  404. break;
  405. }
  406. s->mifregs[addr >> 2] = val;
  407. }
  408. static uint64_t sunhme_mif_read(void *opaque, hwaddr addr,
  409. unsigned size)
  410. {
  411. SunHMEState *s = SUNHME(opaque);
  412. uint64_t val;
  413. val = s->mifregs[addr >> 2];
  414. switch (addr) {
  415. case HME_MIFI_STAT:
  416. /* Autoclear MIF interrupt status */
  417. s->mifregs[HME_MIFI_STAT >> 2] = 0;
  418. sunhme_update_irq(s);
  419. break;
  420. }
  421. trace_sunhme_mif_read(addr, val);
  422. return val;
  423. }
  424. static const MemoryRegionOps sunhme_mif_ops = {
  425. .read = sunhme_mif_read,
  426. .write = sunhme_mif_write,
  427. .endianness = DEVICE_LITTLE_ENDIAN,
  428. .valid = {
  429. .min_access_size = 4,
  430. .max_access_size = 4,
  431. },
  432. };
  433. static void sunhme_transmit_frame(SunHMEState *s, uint8_t *buf, int size)
  434. {
  435. qemu_send_packet(qemu_get_queue(s->nic), buf, size);
  436. }
  437. static inline int sunhme_get_tx_ring_count(SunHMEState *s)
  438. {
  439. return (s->etxregs[HME_ETXI_RSIZE >> 2] + 1) << 4;
  440. }
  441. static inline int sunhme_get_tx_ring_nr(SunHMEState *s)
  442. {
  443. return s->etxregs[HME_ETXI_RING >> 2] & HME_ETXI_RING_OFFSET;
  444. }
  445. static inline void sunhme_set_tx_ring_nr(SunHMEState *s, int i)
  446. {
  447. uint32_t ring = s->etxregs[HME_ETXI_RING >> 2] & ~HME_ETXI_RING_OFFSET;
  448. ring |= i & HME_ETXI_RING_OFFSET;
  449. s->etxregs[HME_ETXI_RING >> 2] = ring;
  450. }
  451. static void sunhme_transmit(SunHMEState *s)
  452. {
  453. PCIDevice *d = PCI_DEVICE(s);
  454. dma_addr_t tb, addr;
  455. uint32_t intstatus, status, buffer, sum = 0;
  456. int cr, nr, len, xmit_pos, csum_offset = 0, csum_stuff_offset = 0;
  457. uint16_t csum = 0;
  458. uint8_t xmit_buffer[HME_FIFO_SIZE];
  459. tb = s->etxregs[HME_ETXI_RING >> 2] & HME_ETXI_RING_ADDR;
  460. nr = sunhme_get_tx_ring_count(s);
  461. cr = sunhme_get_tx_ring_nr(s);
  462. pci_dma_read(d, tb + cr * HME_DESC_SIZE, &status, 4);
  463. pci_dma_read(d, tb + cr * HME_DESC_SIZE + 4, &buffer, 4);
  464. xmit_pos = 0;
  465. while (status & HME_XD_OWN) {
  466. trace_sunhme_tx_desc(buffer, status, cr, nr);
  467. /* Copy data into transmit buffer */
  468. addr = buffer;
  469. len = status & HME_XD_TXLENMSK;
  470. if (xmit_pos + len > HME_FIFO_SIZE) {
  471. len = HME_FIFO_SIZE - xmit_pos;
  472. }
  473. pci_dma_read(d, addr, &xmit_buffer[xmit_pos], len);
  474. xmit_pos += len;
  475. /* Detect start of packet for TX checksum */
  476. if (status & HME_XD_SOP) {
  477. sum = 0;
  478. csum_offset = (status & HME_XD_TXCSSTART) >> HME_XD_TXCSSTARTSHIFT;
  479. csum_stuff_offset = (status & HME_XD_TXCSSTUFF) >>
  480. HME_XD_TXCSSTUFFSHIFT;
  481. }
  482. if (status & HME_XD_TXCKSUM) {
  483. /* Only start calculation from csum_offset */
  484. if (xmit_pos - len <= csum_offset && xmit_pos > csum_offset) {
  485. sum += net_checksum_add(xmit_pos - csum_offset,
  486. xmit_buffer + csum_offset);
  487. trace_sunhme_tx_xsum_add(csum_offset, xmit_pos - csum_offset);
  488. } else {
  489. sum += net_checksum_add(len, xmit_buffer + xmit_pos - len);
  490. trace_sunhme_tx_xsum_add(xmit_pos - len, len);
  491. }
  492. }
  493. /* Detect end of packet for TX checksum */
  494. if (status & HME_XD_EOP) {
  495. /* Stuff the checksum if required */
  496. if (status & HME_XD_TXCKSUM) {
  497. csum = net_checksum_finish(sum);
  498. stw_be_p(xmit_buffer + csum_stuff_offset, csum);
  499. trace_sunhme_tx_xsum_stuff(csum, csum_stuff_offset);
  500. }
  501. if (s->macregs[HME_MACI_TXCFG >> 2] & HME_MAC_TXCFG_ENABLE) {
  502. sunhme_transmit_frame(s, xmit_buffer, xmit_pos);
  503. trace_sunhme_tx_done(xmit_pos);
  504. }
  505. }
  506. /* Update status */
  507. status &= ~HME_XD_OWN;
  508. pci_dma_write(d, tb + cr * HME_DESC_SIZE, &status, 4);
  509. /* Move onto next descriptor */
  510. cr++;
  511. if (cr >= nr) {
  512. cr = 0;
  513. }
  514. sunhme_set_tx_ring_nr(s, cr);
  515. pci_dma_read(d, tb + cr * HME_DESC_SIZE, &status, 4);
  516. pci_dma_read(d, tb + cr * HME_DESC_SIZE + 4, &buffer, 4);
  517. /* Indicate TX complete */
  518. intstatus = s->sebregs[HME_SEBI_STAT >> 2];
  519. intstatus |= HME_SEB_STAT_HOSTTOTX;
  520. s->sebregs[HME_SEBI_STAT >> 2] = intstatus;
  521. /* Autoclear TX pending */
  522. s->etxregs[HME_ETXI_PENDING >> 2] = 0;
  523. sunhme_update_irq(s);
  524. }
  525. /* TX FIFO now clear */
  526. intstatus = s->sebregs[HME_SEBI_STAT >> 2];
  527. intstatus |= HME_SEB_STAT_TXALL;
  528. s->sebregs[HME_SEBI_STAT >> 2] = intstatus;
  529. sunhme_update_irq(s);
  530. }
  531. static bool sunhme_can_receive(NetClientState *nc)
  532. {
  533. SunHMEState *s = qemu_get_nic_opaque(nc);
  534. return !!(s->macregs[HME_MACI_RXCFG >> 2] & HME_MAC_RXCFG_ENABLE);
  535. }
  536. static void sunhme_link_status_changed(NetClientState *nc)
  537. {
  538. SunHMEState *s = qemu_get_nic_opaque(nc);
  539. if (nc->link_down) {
  540. s->miiregs[MII_ANLPAR] &= ~MII_ANLPAR_TXFD;
  541. s->miiregs[MII_BMSR] &= ~MII_BMSR_LINK_ST;
  542. } else {
  543. s->miiregs[MII_ANLPAR] |= MII_ANLPAR_TXFD;
  544. s->miiregs[MII_BMSR] |= MII_BMSR_LINK_ST;
  545. }
  546. /* Exact bits unknown */
  547. s->mifregs[HME_MIFI_STAT >> 2] = 0xffff;
  548. sunhme_update_irq(s);
  549. }
  550. static inline int sunhme_get_rx_ring_count(SunHMEState *s)
  551. {
  552. uint32_t rings = (s->erxregs[HME_ERXI_CFG >> 2] & HME_ERX_CFG_RINGSIZE)
  553. >> HME_ERX_CFG_RINGSIZE_SHIFT;
  554. switch (rings) {
  555. case 0:
  556. return 32;
  557. case 1:
  558. return 64;
  559. case 2:
  560. return 128;
  561. case 3:
  562. return 256;
  563. }
  564. return 0;
  565. }
  566. static inline int sunhme_get_rx_ring_nr(SunHMEState *s)
  567. {
  568. return s->erxregs[HME_ERXI_RING >> 2] & HME_ERXI_RING_OFFSET;
  569. }
  570. static inline void sunhme_set_rx_ring_nr(SunHMEState *s, int i)
  571. {
  572. uint32_t ring = s->erxregs[HME_ERXI_RING >> 2] & ~HME_ERXI_RING_OFFSET;
  573. ring |= i & HME_ERXI_RING_OFFSET;
  574. s->erxregs[HME_ERXI_RING >> 2] = ring;
  575. }
  576. #define MIN_BUF_SIZE 60
  577. static ssize_t sunhme_receive(NetClientState *nc, const uint8_t *buf,
  578. size_t size)
  579. {
  580. SunHMEState *s = qemu_get_nic_opaque(nc);
  581. PCIDevice *d = PCI_DEVICE(s);
  582. dma_addr_t rb, addr;
  583. uint32_t intstatus, status, buffer, buffersize, sum;
  584. uint16_t csum;
  585. uint8_t buf1[60];
  586. int nr, cr, len, rxoffset, csum_offset;
  587. trace_sunhme_rx_incoming(size);
  588. /* Do nothing if MAC RX disabled */
  589. if (!(s->macregs[HME_MACI_RXCFG >> 2] & HME_MAC_RXCFG_ENABLE)) {
  590. return 0;
  591. }
  592. trace_sunhme_rx_filter_destmac(buf[0], buf[1], buf[2],
  593. buf[3], buf[4], buf[5]);
  594. /* Check destination MAC address */
  595. if (!(s->macregs[HME_MACI_RXCFG >> 2] & HME_MAC_RXCFG_PMISC)) {
  596. /* Try and match local MAC address */
  597. if (((s->macregs[HME_MACI_MACADDR0 >> 2] & 0xff00) >> 8) == buf[0] &&
  598. (s->macregs[HME_MACI_MACADDR0 >> 2] & 0xff) == buf[1] &&
  599. ((s->macregs[HME_MACI_MACADDR1 >> 2] & 0xff00) >> 8) == buf[2] &&
  600. (s->macregs[HME_MACI_MACADDR1 >> 2] & 0xff) == buf[3] &&
  601. ((s->macregs[HME_MACI_MACADDR2 >> 2] & 0xff00) >> 8) == buf[4] &&
  602. (s->macregs[HME_MACI_MACADDR2 >> 2] & 0xff) == buf[5]) {
  603. /* Matched local MAC address */
  604. trace_sunhme_rx_filter_local_match();
  605. } else if (buf[0] == 0xff && buf[1] == 0xff && buf[2] == 0xff &&
  606. buf[3] == 0xff && buf[4] == 0xff && buf[5] == 0xff) {
  607. /* Matched broadcast address */
  608. trace_sunhme_rx_filter_bcast_match();
  609. } else if (s->macregs[HME_MACI_RXCFG >> 2] & HME_MAC_RXCFG_HENABLE) {
  610. /* Didn't match local address, check hash filter */
  611. int mcast_idx = net_crc32_le(buf, ETH_ALEN) >> 26;
  612. if (!(s->macregs[(HME_MACI_HASHTAB0 >> 2) - (mcast_idx >> 4)] &
  613. (1 << (mcast_idx & 0xf)))) {
  614. /* Didn't match hash filter */
  615. trace_sunhme_rx_filter_hash_nomatch();
  616. trace_sunhme_rx_filter_reject();
  617. return -1;
  618. } else {
  619. trace_sunhme_rx_filter_hash_match();
  620. }
  621. } else {
  622. /* Not for us */
  623. trace_sunhme_rx_filter_reject();
  624. return -1;
  625. }
  626. } else {
  627. trace_sunhme_rx_filter_promisc_match();
  628. }
  629. trace_sunhme_rx_filter_accept();
  630. /* If too small buffer, then expand it */
  631. if (size < MIN_BUF_SIZE) {
  632. memcpy(buf1, buf, size);
  633. memset(buf1 + size, 0, MIN_BUF_SIZE - size);
  634. buf = buf1;
  635. size = MIN_BUF_SIZE;
  636. }
  637. rb = s->erxregs[HME_ERXI_RING >> 2] & HME_ERXI_RING_ADDR;
  638. nr = sunhme_get_rx_ring_count(s);
  639. cr = sunhme_get_rx_ring_nr(s);
  640. pci_dma_read(d, rb + cr * HME_DESC_SIZE, &status, 4);
  641. pci_dma_read(d, rb + cr * HME_DESC_SIZE + 4, &buffer, 4);
  642. /* If we don't own the current descriptor then indicate overflow error */
  643. if (!(status & HME_XD_OWN)) {
  644. s->sebregs[HME_SEBI_STAT >> 2] |= HME_SEB_STAT_NORXD;
  645. sunhme_update_irq(s);
  646. trace_sunhme_rx_norxd();
  647. return -1;
  648. }
  649. rxoffset = (s->erxregs[HME_ERXI_CFG >> 2] & HME_ERX_CFG_BYTEOFFSET) >>
  650. HME_ERX_CFG_BYTEOFFSET_SHIFT;
  651. addr = buffer + rxoffset;
  652. buffersize = (status & HME_XD_RXLENMSK) >> HME_XD_RXLENSHIFT;
  653. /* Detect receive overflow */
  654. len = size;
  655. if (size > buffersize) {
  656. status |= HME_XD_OFL;
  657. len = buffersize;
  658. }
  659. pci_dma_write(d, addr, buf, len);
  660. trace_sunhme_rx_desc(buffer, rxoffset, status, len, cr, nr);
  661. /* Calculate the receive checksum */
  662. csum_offset = (s->erxregs[HME_ERXI_CFG >> 2] & HME_ERX_CFG_CSUMSTART) >>
  663. HME_ERX_CFG_CSUMSHIFT << 1;
  664. sum = 0;
  665. sum += net_checksum_add(len - csum_offset, (uint8_t *)buf + csum_offset);
  666. csum = net_checksum_finish(sum);
  667. trace_sunhme_rx_xsum_calc(csum);
  668. /* Update status */
  669. status &= ~HME_XD_OWN;
  670. status &= ~HME_XD_RXLENMSK;
  671. status |= len << HME_XD_RXLENSHIFT;
  672. status &= ~HME_XD_RXCKSUM;
  673. status |= csum;
  674. pci_dma_write(d, rb + cr * HME_DESC_SIZE, &status, 4);
  675. cr++;
  676. if (cr >= nr) {
  677. cr = 0;
  678. }
  679. sunhme_set_rx_ring_nr(s, cr);
  680. /* Indicate RX complete */
  681. intstatus = s->sebregs[HME_SEBI_STAT >> 2];
  682. intstatus |= HME_SEB_STAT_RXTOHOST;
  683. s->sebregs[HME_SEBI_STAT >> 2] = intstatus;
  684. sunhme_update_irq(s);
  685. return len;
  686. }
  687. static NetClientInfo net_sunhme_info = {
  688. .type = NET_CLIENT_DRIVER_NIC,
  689. .size = sizeof(NICState),
  690. .can_receive = sunhme_can_receive,
  691. .receive = sunhme_receive,
  692. .link_status_changed = sunhme_link_status_changed,
  693. };
  694. static void sunhme_realize(PCIDevice *pci_dev, Error **errp)
  695. {
  696. SunHMEState *s = SUNHME(pci_dev);
  697. DeviceState *d = DEVICE(pci_dev);
  698. uint8_t *pci_conf;
  699. pci_conf = pci_dev->config;
  700. pci_conf[PCI_INTERRUPT_PIN] = 1; /* interrupt pin A */
  701. memory_region_init(&s->hme, OBJECT(pci_dev), "sunhme", HME_REG_SIZE);
  702. pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->hme);
  703. memory_region_init_io(&s->sebreg, OBJECT(pci_dev), &sunhme_seb_ops, s,
  704. "sunhme.seb", HME_SEB_REG_SIZE);
  705. memory_region_add_subregion(&s->hme, 0, &s->sebreg);
  706. memory_region_init_io(&s->etxreg, OBJECT(pci_dev), &sunhme_etx_ops, s,
  707. "sunhme.etx", HME_ETX_REG_SIZE);
  708. memory_region_add_subregion(&s->hme, 0x2000, &s->etxreg);
  709. memory_region_init_io(&s->erxreg, OBJECT(pci_dev), &sunhme_erx_ops, s,
  710. "sunhme.erx", HME_ERX_REG_SIZE);
  711. memory_region_add_subregion(&s->hme, 0x4000, &s->erxreg);
  712. memory_region_init_io(&s->macreg, OBJECT(pci_dev), &sunhme_mac_ops, s,
  713. "sunhme.mac", HME_MAC_REG_SIZE);
  714. memory_region_add_subregion(&s->hme, 0x6000, &s->macreg);
  715. memory_region_init_io(&s->mifreg, OBJECT(pci_dev), &sunhme_mif_ops, s,
  716. "sunhme.mif", HME_MIF_REG_SIZE);
  717. memory_region_add_subregion(&s->hme, 0x7000, &s->mifreg);
  718. qemu_macaddr_default_if_unset(&s->conf.macaddr);
  719. s->nic = qemu_new_nic(&net_sunhme_info, &s->conf,
  720. object_get_typename(OBJECT(d)), d->id, s);
  721. qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a);
  722. }
  723. static void sunhme_instance_init(Object *obj)
  724. {
  725. SunHMEState *s = SUNHME(obj);
  726. device_add_bootindex_property(obj, &s->conf.bootindex,
  727. "bootindex", "/ethernet-phy@0",
  728. DEVICE(obj));
  729. }
  730. static void sunhme_reset(DeviceState *ds)
  731. {
  732. SunHMEState *s = SUNHME(ds);
  733. /* Configure internal transceiver */
  734. s->mifregs[HME_MIFI_CFG >> 2] |= HME_MIF_CFG_MDI0;
  735. /* Advetise auto, 100Mbps FD */
  736. s->miiregs[MII_ANAR] = MII_ANAR_TXFD;
  737. s->miiregs[MII_BMSR] = MII_BMSR_AUTONEG | MII_BMSR_100TX_FD |
  738. MII_BMSR_AN_COMP;
  739. if (!qemu_get_queue(s->nic)->link_down) {
  740. s->miiregs[MII_ANLPAR] |= MII_ANLPAR_TXFD;
  741. s->miiregs[MII_BMSR] |= MII_BMSR_LINK_ST;
  742. }
  743. /* Set manufacturer */
  744. s->miiregs[MII_PHYID1] = DP83840_PHYID1;
  745. s->miiregs[MII_PHYID2] = DP83840_PHYID2;
  746. /* Configure default interrupt mask */
  747. s->mifregs[HME_MIFI_IMASK >> 2] = 0xffff;
  748. s->sebregs[HME_SEBI_IMASK >> 2] = 0xff7fffff;
  749. }
  750. static const VMStateDescription vmstate_hme = {
  751. .name = "sunhme",
  752. .version_id = 0,
  753. .minimum_version_id = 0,
  754. .fields = (VMStateField[]) {
  755. VMSTATE_PCI_DEVICE(parent_obj, SunHMEState),
  756. VMSTATE_MACADDR(conf.macaddr, SunHMEState),
  757. VMSTATE_UINT32_ARRAY(sebregs, SunHMEState, (HME_SEB_REG_SIZE >> 2)),
  758. VMSTATE_UINT32_ARRAY(etxregs, SunHMEState, (HME_ETX_REG_SIZE >> 2)),
  759. VMSTATE_UINT32_ARRAY(erxregs, SunHMEState, (HME_ERX_REG_SIZE >> 2)),
  760. VMSTATE_UINT32_ARRAY(macregs, SunHMEState, (HME_MAC_REG_SIZE >> 2)),
  761. VMSTATE_UINT32_ARRAY(mifregs, SunHMEState, (HME_MIF_REG_SIZE >> 2)),
  762. VMSTATE_UINT16_ARRAY(miiregs, SunHMEState, HME_MII_REGS_SIZE),
  763. VMSTATE_END_OF_LIST()
  764. }
  765. };
  766. static void sunhme_class_init(ObjectClass *klass, void *data)
  767. {
  768. DeviceClass *dc = DEVICE_CLASS(klass);
  769. PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
  770. k->realize = sunhme_realize;
  771. k->vendor_id = PCI_VENDOR_ID_SUN;
  772. k->device_id = PCI_DEVICE_ID_SUN_HME;
  773. k->class_id = PCI_CLASS_NETWORK_ETHERNET;
  774. dc->vmsd = &vmstate_hme;
  775. dc->reset = sunhme_reset;
  776. device_class_set_props(dc, sunhme_properties);
  777. set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
  778. }
  779. static const TypeInfo sunhme_info = {
  780. .name = TYPE_SUNHME,
  781. .parent = TYPE_PCI_DEVICE,
  782. .class_init = sunhme_class_init,
  783. .instance_size = sizeof(SunHMEState),
  784. .instance_init = sunhme_instance_init,
  785. .interfaces = (InterfaceInfo[]) {
  786. { INTERFACE_CONVENTIONAL_PCI_DEVICE },
  787. { }
  788. }
  789. };
  790. static void sunhme_register_types(void)
  791. {
  792. type_register_static(&sunhme_info);
  793. }
  794. type_init(sunhme_register_types)