sunhme.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982
  1. /*
  2. * QEMU Sun Happy Meal Ethernet emulation
  3. *
  4. * Copyright (c) 2017 Mark Cave-Ayland
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a copy
  7. * of this software and associated documentation files (the "Software"), to deal
  8. * in the Software without restriction, including without limitation the rights
  9. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  10. * copies of the Software, and to permit persons to whom the Software is
  11. * furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  21. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  22. * THE SOFTWARE.
  23. */
  24. #include "qemu/osdep.h"
  25. #include "hw/pci/pci.h"
  26. #include "hw/qdev-properties.h"
  27. #include "migration/vmstate.h"
  28. #include "hw/net/mii.h"
  29. #include "net/net.h"
  30. #include "qemu/module.h"
  31. #include "net/checksum.h"
  32. #include "net/eth.h"
  33. #include "sysemu/sysemu.h"
  34. #include "trace.h"
  35. #define HME_REG_SIZE 0x8000
  36. #define HME_SEB_REG_SIZE 0x2000
  37. #define HME_SEBI_RESET 0x0
  38. #define HME_SEB_RESET_ETX 0x1
  39. #define HME_SEB_RESET_ERX 0x2
  40. #define HME_SEBI_STAT 0x100
  41. #define HME_SEBI_STAT_LINUXBUG 0x108
  42. #define HME_SEB_STAT_RXTOHOST 0x10000
  43. #define HME_SEB_STAT_NORXD 0x20000
  44. #define HME_SEB_STAT_MIFIRQ 0x800000
  45. #define HME_SEB_STAT_HOSTTOTX 0x1000000
  46. #define HME_SEB_STAT_TXALL 0x2000000
  47. #define HME_SEBI_IMASK 0x104
  48. #define HME_SEBI_IMASK_LINUXBUG 0x10c
  49. #define HME_ETX_REG_SIZE 0x2000
  50. #define HME_ETXI_PENDING 0x0
  51. #define HME_ETXI_RING 0x8
  52. #define HME_ETXI_RING_ADDR 0xffffff00
  53. #define HME_ETXI_RING_OFFSET 0xff
  54. #define HME_ETXI_RSIZE 0x2c
  55. #define HME_ERX_REG_SIZE 0x2000
  56. #define HME_ERXI_CFG 0x0
  57. #define HME_ERX_CFG_RINGSIZE 0x600
  58. #define HME_ERX_CFG_RINGSIZE_SHIFT 9
  59. #define HME_ERX_CFG_BYTEOFFSET 0x38
  60. #define HME_ERX_CFG_BYTEOFFSET_SHIFT 3
  61. #define HME_ERX_CFG_CSUMSTART 0x7f0000
  62. #define HME_ERX_CFG_CSUMSHIFT 16
  63. #define HME_ERXI_RING 0x4
  64. #define HME_ERXI_RING_ADDR 0xffffff00
  65. #define HME_ERXI_RING_OFFSET 0xff
  66. #define HME_MAC_REG_SIZE 0x1000
  67. #define HME_MACI_TXCFG 0x20c
  68. #define HME_MAC_TXCFG_ENABLE 0x1
  69. #define HME_MACI_RXCFG 0x30c
  70. #define HME_MAC_RXCFG_ENABLE 0x1
  71. #define HME_MAC_RXCFG_PMISC 0x40
  72. #define HME_MAC_RXCFG_HENABLE 0x800
  73. #define HME_MACI_MACADDR2 0x318
  74. #define HME_MACI_MACADDR1 0x31c
  75. #define HME_MACI_MACADDR0 0x320
  76. #define HME_MACI_HASHTAB3 0x340
  77. #define HME_MACI_HASHTAB2 0x344
  78. #define HME_MACI_HASHTAB1 0x348
  79. #define HME_MACI_HASHTAB0 0x34c
  80. #define HME_MIF_REG_SIZE 0x20
  81. #define HME_MIFI_FO 0xc
  82. #define HME_MIF_FO_ST 0xc0000000
  83. #define HME_MIF_FO_ST_SHIFT 30
  84. #define HME_MIF_FO_OPC 0x30000000
  85. #define HME_MIF_FO_OPC_SHIFT 28
  86. #define HME_MIF_FO_PHYAD 0x0f800000
  87. #define HME_MIF_FO_PHYAD_SHIFT 23
  88. #define HME_MIF_FO_REGAD 0x007c0000
  89. #define HME_MIF_FO_REGAD_SHIFT 18
  90. #define HME_MIF_FO_TAMSB 0x20000
  91. #define HME_MIF_FO_TALSB 0x10000
  92. #define HME_MIF_FO_DATA 0xffff
  93. #define HME_MIFI_CFG 0x10
  94. #define HME_MIF_CFG_MDI0 0x100
  95. #define HME_MIF_CFG_MDI1 0x200
  96. #define HME_MIFI_IMASK 0x14
  97. #define HME_MIFI_STAT 0x18
  98. /* Wired HME PHY addresses */
  99. #define HME_PHYAD_INTERNAL 1
  100. #define HME_PHYAD_EXTERNAL 0
  101. #define MII_COMMAND_START 0x1
  102. #define MII_COMMAND_READ 0x2
  103. #define MII_COMMAND_WRITE 0x1
  104. #define TYPE_SUNHME "sunhme"
  105. #define SUNHME(obj) OBJECT_CHECK(SunHMEState, (obj), TYPE_SUNHME)
  106. /* Maximum size of buffer */
  107. #define HME_FIFO_SIZE 0x800
  108. /* Size of TX/RX descriptor */
  109. #define HME_DESC_SIZE 0x8
  110. #define HME_XD_OWN 0x80000000
  111. #define HME_XD_OFL 0x40000000
  112. #define HME_XD_SOP 0x40000000
  113. #define HME_XD_EOP 0x20000000
  114. #define HME_XD_RXLENMSK 0x3fff0000
  115. #define HME_XD_RXLENSHIFT 16
  116. #define HME_XD_RXCKSUM 0xffff
  117. #define HME_XD_TXLENMSK 0x00001fff
  118. #define HME_XD_TXCKSUM 0x10000000
  119. #define HME_XD_TXCSSTUFF 0xff00000
  120. #define HME_XD_TXCSSTUFFSHIFT 20
  121. #define HME_XD_TXCSSTART 0xfc000
  122. #define HME_XD_TXCSSTARTSHIFT 14
  123. #define HME_MII_REGS_SIZE 0x20
  124. typedef struct SunHMEState {
  125. /*< private >*/
  126. PCIDevice parent_obj;
  127. NICState *nic;
  128. NICConf conf;
  129. MemoryRegion hme;
  130. MemoryRegion sebreg;
  131. MemoryRegion etxreg;
  132. MemoryRegion erxreg;
  133. MemoryRegion macreg;
  134. MemoryRegion mifreg;
  135. uint32_t sebregs[HME_SEB_REG_SIZE >> 2];
  136. uint32_t etxregs[HME_ETX_REG_SIZE >> 2];
  137. uint32_t erxregs[HME_ERX_REG_SIZE >> 2];
  138. uint32_t macregs[HME_MAC_REG_SIZE >> 2];
  139. uint32_t mifregs[HME_MIF_REG_SIZE >> 2];
  140. uint16_t miiregs[HME_MII_REGS_SIZE];
  141. } SunHMEState;
  142. static Property sunhme_properties[] = {
  143. DEFINE_NIC_PROPERTIES(SunHMEState, conf),
  144. DEFINE_PROP_END_OF_LIST(),
  145. };
  146. static void sunhme_reset_tx(SunHMEState *s)
  147. {
  148. /* Indicate TX reset complete */
  149. s->sebregs[HME_SEBI_RESET] &= ~HME_SEB_RESET_ETX;
  150. }
  151. static void sunhme_reset_rx(SunHMEState *s)
  152. {
  153. /* Indicate RX reset complete */
  154. s->sebregs[HME_SEBI_RESET] &= ~HME_SEB_RESET_ERX;
  155. }
  156. static void sunhme_update_irq(SunHMEState *s)
  157. {
  158. PCIDevice *d = PCI_DEVICE(s);
  159. int level;
  160. /* MIF interrupt mask (16-bit) */
  161. uint32_t mifmask = ~(s->mifregs[HME_MIFI_IMASK >> 2]) & 0xffff;
  162. uint32_t mif = s->mifregs[HME_MIFI_STAT >> 2] & mifmask;
  163. /* Main SEB interrupt mask (include MIF status from above) */
  164. uint32_t sebmask = ~(s->sebregs[HME_SEBI_IMASK >> 2]) &
  165. ~HME_SEB_STAT_MIFIRQ;
  166. uint32_t seb = s->sebregs[HME_SEBI_STAT >> 2] & sebmask;
  167. if (mif) {
  168. seb |= HME_SEB_STAT_MIFIRQ;
  169. }
  170. level = (seb ? 1 : 0);
  171. trace_sunhme_update_irq(mifmask, mif, sebmask, seb, level);
  172. pci_set_irq(d, level);
  173. }
  174. static void sunhme_seb_write(void *opaque, hwaddr addr,
  175. uint64_t val, unsigned size)
  176. {
  177. SunHMEState *s = SUNHME(opaque);
  178. trace_sunhme_seb_write(addr, val);
  179. /* Handly buggy Linux drivers before 4.13 which have
  180. the wrong offsets for HME_SEBI_STAT and HME_SEBI_IMASK */
  181. switch (addr) {
  182. case HME_SEBI_STAT_LINUXBUG:
  183. addr = HME_SEBI_STAT;
  184. break;
  185. case HME_SEBI_IMASK_LINUXBUG:
  186. addr = HME_SEBI_IMASK;
  187. break;
  188. default:
  189. break;
  190. }
  191. switch (addr) {
  192. case HME_SEBI_RESET:
  193. if (val & HME_SEB_RESET_ETX) {
  194. sunhme_reset_tx(s);
  195. }
  196. if (val & HME_SEB_RESET_ERX) {
  197. sunhme_reset_rx(s);
  198. }
  199. val = s->sebregs[HME_SEBI_RESET >> 2];
  200. break;
  201. }
  202. s->sebregs[addr >> 2] = val;
  203. }
  204. static uint64_t sunhme_seb_read(void *opaque, hwaddr addr,
  205. unsigned size)
  206. {
  207. SunHMEState *s = SUNHME(opaque);
  208. uint64_t val;
  209. /* Handly buggy Linux drivers before 4.13 which have
  210. the wrong offsets for HME_SEBI_STAT and HME_SEBI_IMASK */
  211. switch (addr) {
  212. case HME_SEBI_STAT_LINUXBUG:
  213. addr = HME_SEBI_STAT;
  214. break;
  215. case HME_SEBI_IMASK_LINUXBUG:
  216. addr = HME_SEBI_IMASK;
  217. break;
  218. default:
  219. break;
  220. }
  221. val = s->sebregs[addr >> 2];
  222. switch (addr) {
  223. case HME_SEBI_STAT:
  224. /* Autoclear status (except MIF) */
  225. s->sebregs[HME_SEBI_STAT >> 2] &= HME_SEB_STAT_MIFIRQ;
  226. sunhme_update_irq(s);
  227. break;
  228. }
  229. trace_sunhme_seb_read(addr, val);
  230. return val;
  231. }
  232. static const MemoryRegionOps sunhme_seb_ops = {
  233. .read = sunhme_seb_read,
  234. .write = sunhme_seb_write,
  235. .endianness = DEVICE_LITTLE_ENDIAN,
  236. .valid = {
  237. .min_access_size = 4,
  238. .max_access_size = 4,
  239. },
  240. };
  241. static void sunhme_transmit(SunHMEState *s);
  242. static void sunhme_etx_write(void *opaque, hwaddr addr,
  243. uint64_t val, unsigned size)
  244. {
  245. SunHMEState *s = SUNHME(opaque);
  246. trace_sunhme_etx_write(addr, val);
  247. switch (addr) {
  248. case HME_ETXI_PENDING:
  249. if (val) {
  250. sunhme_transmit(s);
  251. }
  252. break;
  253. }
  254. s->etxregs[addr >> 2] = val;
  255. }
  256. static uint64_t sunhme_etx_read(void *opaque, hwaddr addr,
  257. unsigned size)
  258. {
  259. SunHMEState *s = SUNHME(opaque);
  260. uint64_t val;
  261. val = s->etxregs[addr >> 2];
  262. trace_sunhme_etx_read(addr, val);
  263. return val;
  264. }
  265. static const MemoryRegionOps sunhme_etx_ops = {
  266. .read = sunhme_etx_read,
  267. .write = sunhme_etx_write,
  268. .endianness = DEVICE_LITTLE_ENDIAN,
  269. .valid = {
  270. .min_access_size = 4,
  271. .max_access_size = 4,
  272. },
  273. };
  274. static void sunhme_erx_write(void *opaque, hwaddr addr,
  275. uint64_t val, unsigned size)
  276. {
  277. SunHMEState *s = SUNHME(opaque);
  278. trace_sunhme_erx_write(addr, val);
  279. s->erxregs[addr >> 2] = val;
  280. }
  281. static uint64_t sunhme_erx_read(void *opaque, hwaddr addr,
  282. unsigned size)
  283. {
  284. SunHMEState *s = SUNHME(opaque);
  285. uint64_t val;
  286. val = s->erxregs[addr >> 2];
  287. trace_sunhme_erx_read(addr, val);
  288. return val;
  289. }
  290. static const MemoryRegionOps sunhme_erx_ops = {
  291. .read = sunhme_erx_read,
  292. .write = sunhme_erx_write,
  293. .endianness = DEVICE_LITTLE_ENDIAN,
  294. .valid = {
  295. .min_access_size = 4,
  296. .max_access_size = 4,
  297. },
  298. };
  299. static void sunhme_mac_write(void *opaque, hwaddr addr,
  300. uint64_t val, unsigned size)
  301. {
  302. SunHMEState *s = SUNHME(opaque);
  303. uint64_t oldval = s->macregs[addr >> 2];
  304. trace_sunhme_mac_write(addr, val);
  305. s->macregs[addr >> 2] = val;
  306. switch (addr) {
  307. case HME_MACI_RXCFG:
  308. if (!(oldval & HME_MAC_RXCFG_ENABLE) &&
  309. (val & HME_MAC_RXCFG_ENABLE)) {
  310. qemu_flush_queued_packets(qemu_get_queue(s->nic));
  311. }
  312. break;
  313. }
  314. }
  315. static uint64_t sunhme_mac_read(void *opaque, hwaddr addr,
  316. unsigned size)
  317. {
  318. SunHMEState *s = SUNHME(opaque);
  319. uint64_t val;
  320. val = s->macregs[addr >> 2];
  321. trace_sunhme_mac_read(addr, val);
  322. return val;
  323. }
  324. static const MemoryRegionOps sunhme_mac_ops = {
  325. .read = sunhme_mac_read,
  326. .write = sunhme_mac_write,
  327. .endianness = DEVICE_LITTLE_ENDIAN,
  328. .valid = {
  329. .min_access_size = 4,
  330. .max_access_size = 4,
  331. },
  332. };
  333. static void sunhme_mii_write(SunHMEState *s, uint8_t reg, uint16_t data)
  334. {
  335. trace_sunhme_mii_write(reg, data);
  336. switch (reg) {
  337. case MII_BMCR:
  338. if (data & MII_BMCR_RESET) {
  339. /* Autoclear reset bit, enable auto negotiation */
  340. data &= ~MII_BMCR_RESET;
  341. data |= MII_BMCR_AUTOEN;
  342. }
  343. if (data & MII_BMCR_ANRESTART) {
  344. /* Autoclear auto negotiation restart */
  345. data &= ~MII_BMCR_ANRESTART;
  346. /* Indicate negotiation complete */
  347. s->miiregs[MII_BMSR] |= MII_BMSR_AN_COMP;
  348. if (!qemu_get_queue(s->nic)->link_down) {
  349. s->miiregs[MII_ANLPAR] |= MII_ANLPAR_TXFD;
  350. s->miiregs[MII_BMSR] |= MII_BMSR_LINK_ST;
  351. }
  352. }
  353. break;
  354. }
  355. s->miiregs[reg] = data;
  356. }
  357. static uint16_t sunhme_mii_read(SunHMEState *s, uint8_t reg)
  358. {
  359. uint16_t data = s->miiregs[reg];
  360. trace_sunhme_mii_read(reg, data);
  361. return data;
  362. }
  363. static void sunhme_mif_write(void *opaque, hwaddr addr,
  364. uint64_t val, unsigned size)
  365. {
  366. SunHMEState *s = SUNHME(opaque);
  367. uint8_t cmd, reg;
  368. uint16_t data;
  369. trace_sunhme_mif_write(addr, val);
  370. switch (addr) {
  371. case HME_MIFI_CFG:
  372. /* Mask the read-only bits */
  373. val &= ~(HME_MIF_CFG_MDI0 | HME_MIF_CFG_MDI1);
  374. val |= s->mifregs[HME_MIFI_CFG >> 2] &
  375. (HME_MIF_CFG_MDI0 | HME_MIF_CFG_MDI1);
  376. break;
  377. case HME_MIFI_FO:
  378. /* Detect start of MII command */
  379. if ((val & HME_MIF_FO_ST) >> HME_MIF_FO_ST_SHIFT
  380. != MII_COMMAND_START) {
  381. val |= HME_MIF_FO_TALSB;
  382. break;
  383. }
  384. /* Internal phy only */
  385. if ((val & HME_MIF_FO_PHYAD) >> HME_MIF_FO_PHYAD_SHIFT
  386. != HME_PHYAD_INTERNAL) {
  387. val |= HME_MIF_FO_TALSB;
  388. break;
  389. }
  390. cmd = (val & HME_MIF_FO_OPC) >> HME_MIF_FO_OPC_SHIFT;
  391. reg = (val & HME_MIF_FO_REGAD) >> HME_MIF_FO_REGAD_SHIFT;
  392. data = (val & HME_MIF_FO_DATA);
  393. switch (cmd) {
  394. case MII_COMMAND_WRITE:
  395. sunhme_mii_write(s, reg, data);
  396. break;
  397. case MII_COMMAND_READ:
  398. val &= ~HME_MIF_FO_DATA;
  399. val |= sunhme_mii_read(s, reg);
  400. break;
  401. }
  402. val |= HME_MIF_FO_TALSB;
  403. break;
  404. }
  405. s->mifregs[addr >> 2] = val;
  406. }
  407. static uint64_t sunhme_mif_read(void *opaque, hwaddr addr,
  408. unsigned size)
  409. {
  410. SunHMEState *s = SUNHME(opaque);
  411. uint64_t val;
  412. val = s->mifregs[addr >> 2];
  413. switch (addr) {
  414. case HME_MIFI_STAT:
  415. /* Autoclear MIF interrupt status */
  416. s->mifregs[HME_MIFI_STAT >> 2] = 0;
  417. sunhme_update_irq(s);
  418. break;
  419. }
  420. trace_sunhme_mif_read(addr, val);
  421. return val;
  422. }
  423. static const MemoryRegionOps sunhme_mif_ops = {
  424. .read = sunhme_mif_read,
  425. .write = sunhme_mif_write,
  426. .endianness = DEVICE_LITTLE_ENDIAN,
  427. .valid = {
  428. .min_access_size = 4,
  429. .max_access_size = 4,
  430. },
  431. };
  432. static void sunhme_transmit_frame(SunHMEState *s, uint8_t *buf, int size)
  433. {
  434. qemu_send_packet(qemu_get_queue(s->nic), buf, size);
  435. }
  436. static inline int sunhme_get_tx_ring_count(SunHMEState *s)
  437. {
  438. return (s->etxregs[HME_ETXI_RSIZE >> 2] + 1) << 4;
  439. }
  440. static inline int sunhme_get_tx_ring_nr(SunHMEState *s)
  441. {
  442. return s->etxregs[HME_ETXI_RING >> 2] & HME_ETXI_RING_OFFSET;
  443. }
  444. static inline void sunhme_set_tx_ring_nr(SunHMEState *s, int i)
  445. {
  446. uint32_t ring = s->etxregs[HME_ETXI_RING >> 2] & ~HME_ETXI_RING_OFFSET;
  447. ring |= i & HME_ETXI_RING_OFFSET;
  448. s->etxregs[HME_ETXI_RING >> 2] = ring;
  449. }
  450. static void sunhme_transmit(SunHMEState *s)
  451. {
  452. PCIDevice *d = PCI_DEVICE(s);
  453. dma_addr_t tb, addr;
  454. uint32_t intstatus, status, buffer, sum = 0;
  455. int cr, nr, len, xmit_pos, csum_offset = 0, csum_stuff_offset = 0;
  456. uint16_t csum = 0;
  457. uint8_t xmit_buffer[HME_FIFO_SIZE];
  458. tb = s->etxregs[HME_ETXI_RING >> 2] & HME_ETXI_RING_ADDR;
  459. nr = sunhme_get_tx_ring_count(s);
  460. cr = sunhme_get_tx_ring_nr(s);
  461. pci_dma_read(d, tb + cr * HME_DESC_SIZE, &status, 4);
  462. pci_dma_read(d, tb + cr * HME_DESC_SIZE + 4, &buffer, 4);
  463. xmit_pos = 0;
  464. while (status & HME_XD_OWN) {
  465. trace_sunhme_tx_desc(buffer, status, cr, nr);
  466. /* Copy data into transmit buffer */
  467. addr = buffer;
  468. len = status & HME_XD_TXLENMSK;
  469. if (xmit_pos + len > HME_FIFO_SIZE) {
  470. len = HME_FIFO_SIZE - xmit_pos;
  471. }
  472. pci_dma_read(d, addr, &xmit_buffer[xmit_pos], len);
  473. xmit_pos += len;
  474. /* Detect start of packet for TX checksum */
  475. if (status & HME_XD_SOP) {
  476. sum = 0;
  477. csum_offset = (status & HME_XD_TXCSSTART) >> HME_XD_TXCSSTARTSHIFT;
  478. csum_stuff_offset = (status & HME_XD_TXCSSTUFF) >>
  479. HME_XD_TXCSSTUFFSHIFT;
  480. }
  481. if (status & HME_XD_TXCKSUM) {
  482. /* Only start calculation from csum_offset */
  483. if (xmit_pos - len <= csum_offset && xmit_pos > csum_offset) {
  484. sum += net_checksum_add(xmit_pos - csum_offset,
  485. xmit_buffer + csum_offset);
  486. trace_sunhme_tx_xsum_add(csum_offset, xmit_pos - csum_offset);
  487. } else {
  488. sum += net_checksum_add(len, xmit_buffer + xmit_pos - len);
  489. trace_sunhme_tx_xsum_add(xmit_pos - len, len);
  490. }
  491. }
  492. /* Detect end of packet for TX checksum */
  493. if (status & HME_XD_EOP) {
  494. /* Stuff the checksum if required */
  495. if (status & HME_XD_TXCKSUM) {
  496. csum = net_checksum_finish(sum);
  497. stw_be_p(xmit_buffer + csum_stuff_offset, csum);
  498. trace_sunhme_tx_xsum_stuff(csum, csum_stuff_offset);
  499. }
  500. if (s->macregs[HME_MACI_TXCFG >> 2] & HME_MAC_TXCFG_ENABLE) {
  501. sunhme_transmit_frame(s, xmit_buffer, xmit_pos);
  502. trace_sunhme_tx_done(xmit_pos);
  503. }
  504. }
  505. /* Update status */
  506. status &= ~HME_XD_OWN;
  507. pci_dma_write(d, tb + cr * HME_DESC_SIZE, &status, 4);
  508. /* Move onto next descriptor */
  509. cr++;
  510. if (cr >= nr) {
  511. cr = 0;
  512. }
  513. sunhme_set_tx_ring_nr(s, cr);
  514. pci_dma_read(d, tb + cr * HME_DESC_SIZE, &status, 4);
  515. pci_dma_read(d, tb + cr * HME_DESC_SIZE + 4, &buffer, 4);
  516. /* Indicate TX complete */
  517. intstatus = s->sebregs[HME_SEBI_STAT >> 2];
  518. intstatus |= HME_SEB_STAT_HOSTTOTX;
  519. s->sebregs[HME_SEBI_STAT >> 2] = intstatus;
  520. /* Autoclear TX pending */
  521. s->etxregs[HME_ETXI_PENDING >> 2] = 0;
  522. sunhme_update_irq(s);
  523. }
  524. /* TX FIFO now clear */
  525. intstatus = s->sebregs[HME_SEBI_STAT >> 2];
  526. intstatus |= HME_SEB_STAT_TXALL;
  527. s->sebregs[HME_SEBI_STAT >> 2] = intstatus;
  528. sunhme_update_irq(s);
  529. }
  530. static int sunhme_can_receive(NetClientState *nc)
  531. {
  532. SunHMEState *s = qemu_get_nic_opaque(nc);
  533. return s->macregs[HME_MACI_RXCFG >> 2] & HME_MAC_RXCFG_ENABLE;
  534. }
  535. static void sunhme_link_status_changed(NetClientState *nc)
  536. {
  537. SunHMEState *s = qemu_get_nic_opaque(nc);
  538. if (nc->link_down) {
  539. s->miiregs[MII_ANLPAR] &= ~MII_ANLPAR_TXFD;
  540. s->miiregs[MII_BMSR] &= ~MII_BMSR_LINK_ST;
  541. } else {
  542. s->miiregs[MII_ANLPAR] |= MII_ANLPAR_TXFD;
  543. s->miiregs[MII_BMSR] |= MII_BMSR_LINK_ST;
  544. }
  545. /* Exact bits unknown */
  546. s->mifregs[HME_MIFI_STAT >> 2] = 0xffff;
  547. sunhme_update_irq(s);
  548. }
  549. static inline int sunhme_get_rx_ring_count(SunHMEState *s)
  550. {
  551. uint32_t rings = (s->erxregs[HME_ERXI_CFG >> 2] & HME_ERX_CFG_RINGSIZE)
  552. >> HME_ERX_CFG_RINGSIZE_SHIFT;
  553. switch (rings) {
  554. case 0:
  555. return 32;
  556. case 1:
  557. return 64;
  558. case 2:
  559. return 128;
  560. case 3:
  561. return 256;
  562. }
  563. return 0;
  564. }
  565. static inline int sunhme_get_rx_ring_nr(SunHMEState *s)
  566. {
  567. return s->erxregs[HME_ERXI_RING >> 2] & HME_ERXI_RING_OFFSET;
  568. }
  569. static inline void sunhme_set_rx_ring_nr(SunHMEState *s, int i)
  570. {
  571. uint32_t ring = s->erxregs[HME_ERXI_RING >> 2] & ~HME_ERXI_RING_OFFSET;
  572. ring |= i & HME_ERXI_RING_OFFSET;
  573. s->erxregs[HME_ERXI_RING >> 2] = ring;
  574. }
  575. #define MIN_BUF_SIZE 60
  576. static ssize_t sunhme_receive(NetClientState *nc, const uint8_t *buf,
  577. size_t size)
  578. {
  579. SunHMEState *s = qemu_get_nic_opaque(nc);
  580. PCIDevice *d = PCI_DEVICE(s);
  581. dma_addr_t rb, addr;
  582. uint32_t intstatus, status, buffer, buffersize, sum;
  583. uint16_t csum;
  584. uint8_t buf1[60];
  585. int nr, cr, len, rxoffset, csum_offset;
  586. trace_sunhme_rx_incoming(size);
  587. /* Do nothing if MAC RX disabled */
  588. if (!(s->macregs[HME_MACI_RXCFG >> 2] & HME_MAC_RXCFG_ENABLE)) {
  589. return 0;
  590. }
  591. trace_sunhme_rx_filter_destmac(buf[0], buf[1], buf[2],
  592. buf[3], buf[4], buf[5]);
  593. /* Check destination MAC address */
  594. if (!(s->macregs[HME_MACI_RXCFG >> 2] & HME_MAC_RXCFG_PMISC)) {
  595. /* Try and match local MAC address */
  596. if (((s->macregs[HME_MACI_MACADDR0 >> 2] & 0xff00) >> 8) == buf[0] &&
  597. (s->macregs[HME_MACI_MACADDR0 >> 2] & 0xff) == buf[1] &&
  598. ((s->macregs[HME_MACI_MACADDR1 >> 2] & 0xff00) >> 8) == buf[2] &&
  599. (s->macregs[HME_MACI_MACADDR1 >> 2] & 0xff) == buf[3] &&
  600. ((s->macregs[HME_MACI_MACADDR2 >> 2] & 0xff00) >> 8) == buf[4] &&
  601. (s->macregs[HME_MACI_MACADDR2 >> 2] & 0xff) == buf[5]) {
  602. /* Matched local MAC address */
  603. trace_sunhme_rx_filter_local_match();
  604. } else if (buf[0] == 0xff && buf[1] == 0xff && buf[2] == 0xff &&
  605. buf[3] == 0xff && buf[4] == 0xff && buf[5] == 0xff) {
  606. /* Matched broadcast address */
  607. trace_sunhme_rx_filter_bcast_match();
  608. } else if (s->macregs[HME_MACI_RXCFG >> 2] & HME_MAC_RXCFG_HENABLE) {
  609. /* Didn't match local address, check hash filter */
  610. int mcast_idx = net_crc32_le(buf, ETH_ALEN) >> 26;
  611. if (!(s->macregs[(HME_MACI_HASHTAB0 >> 2) - (mcast_idx >> 4)] &
  612. (1 << (mcast_idx & 0xf)))) {
  613. /* Didn't match hash filter */
  614. trace_sunhme_rx_filter_hash_nomatch();
  615. trace_sunhme_rx_filter_reject();
  616. return -1;
  617. } else {
  618. trace_sunhme_rx_filter_hash_match();
  619. }
  620. } else {
  621. /* Not for us */
  622. trace_sunhme_rx_filter_reject();
  623. return -1;
  624. }
  625. } else {
  626. trace_sunhme_rx_filter_promisc_match();
  627. }
  628. trace_sunhme_rx_filter_accept();
  629. /* If too small buffer, then expand it */
  630. if (size < MIN_BUF_SIZE) {
  631. memcpy(buf1, buf, size);
  632. memset(buf1 + size, 0, MIN_BUF_SIZE - size);
  633. buf = buf1;
  634. size = MIN_BUF_SIZE;
  635. }
  636. rb = s->erxregs[HME_ERXI_RING >> 2] & HME_ERXI_RING_ADDR;
  637. nr = sunhme_get_rx_ring_count(s);
  638. cr = sunhme_get_rx_ring_nr(s);
  639. pci_dma_read(d, rb + cr * HME_DESC_SIZE, &status, 4);
  640. pci_dma_read(d, rb + cr * HME_DESC_SIZE + 4, &buffer, 4);
  641. /* If we don't own the current descriptor then indicate overflow error */
  642. if (!(status & HME_XD_OWN)) {
  643. s->sebregs[HME_SEBI_STAT >> 2] |= HME_SEB_STAT_NORXD;
  644. sunhme_update_irq(s);
  645. trace_sunhme_rx_norxd();
  646. return -1;
  647. }
  648. rxoffset = (s->erxregs[HME_ERXI_CFG >> 2] & HME_ERX_CFG_BYTEOFFSET) >>
  649. HME_ERX_CFG_BYTEOFFSET_SHIFT;
  650. addr = buffer + rxoffset;
  651. buffersize = (status & HME_XD_RXLENMSK) >> HME_XD_RXLENSHIFT;
  652. /* Detect receive overflow */
  653. len = size;
  654. if (size > buffersize) {
  655. status |= HME_XD_OFL;
  656. len = buffersize;
  657. }
  658. pci_dma_write(d, addr, buf, len);
  659. trace_sunhme_rx_desc(buffer, rxoffset, status, len, cr, nr);
  660. /* Calculate the receive checksum */
  661. csum_offset = (s->erxregs[HME_ERXI_CFG >> 2] & HME_ERX_CFG_CSUMSTART) >>
  662. HME_ERX_CFG_CSUMSHIFT << 1;
  663. sum = 0;
  664. sum += net_checksum_add(len - csum_offset, (uint8_t *)buf + csum_offset);
  665. csum = net_checksum_finish(sum);
  666. trace_sunhme_rx_xsum_calc(csum);
  667. /* Update status */
  668. status &= ~HME_XD_OWN;
  669. status &= ~HME_XD_RXLENMSK;
  670. status |= len << HME_XD_RXLENSHIFT;
  671. status &= ~HME_XD_RXCKSUM;
  672. status |= csum;
  673. pci_dma_write(d, rb + cr * HME_DESC_SIZE, &status, 4);
  674. cr++;
  675. if (cr >= nr) {
  676. cr = 0;
  677. }
  678. sunhme_set_rx_ring_nr(s, cr);
  679. /* Indicate RX complete */
  680. intstatus = s->sebregs[HME_SEBI_STAT >> 2];
  681. intstatus |= HME_SEB_STAT_RXTOHOST;
  682. s->sebregs[HME_SEBI_STAT >> 2] = intstatus;
  683. sunhme_update_irq(s);
  684. return len;
  685. }
  686. static NetClientInfo net_sunhme_info = {
  687. .type = NET_CLIENT_DRIVER_NIC,
  688. .size = sizeof(NICState),
  689. .can_receive = sunhme_can_receive,
  690. .receive = sunhme_receive,
  691. .link_status_changed = sunhme_link_status_changed,
  692. };
  693. static void sunhme_realize(PCIDevice *pci_dev, Error **errp)
  694. {
  695. SunHMEState *s = SUNHME(pci_dev);
  696. DeviceState *d = DEVICE(pci_dev);
  697. uint8_t *pci_conf;
  698. pci_conf = pci_dev->config;
  699. pci_conf[PCI_INTERRUPT_PIN] = 1; /* interrupt pin A */
  700. memory_region_init(&s->hme, OBJECT(pci_dev), "sunhme", HME_REG_SIZE);
  701. pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->hme);
  702. memory_region_init_io(&s->sebreg, OBJECT(pci_dev), &sunhme_seb_ops, s,
  703. "sunhme.seb", HME_SEB_REG_SIZE);
  704. memory_region_add_subregion(&s->hme, 0, &s->sebreg);
  705. memory_region_init_io(&s->etxreg, OBJECT(pci_dev), &sunhme_etx_ops, s,
  706. "sunhme.etx", HME_ETX_REG_SIZE);
  707. memory_region_add_subregion(&s->hme, 0x2000, &s->etxreg);
  708. memory_region_init_io(&s->erxreg, OBJECT(pci_dev), &sunhme_erx_ops, s,
  709. "sunhme.erx", HME_ERX_REG_SIZE);
  710. memory_region_add_subregion(&s->hme, 0x4000, &s->erxreg);
  711. memory_region_init_io(&s->macreg, OBJECT(pci_dev), &sunhme_mac_ops, s,
  712. "sunhme.mac", HME_MAC_REG_SIZE);
  713. memory_region_add_subregion(&s->hme, 0x6000, &s->macreg);
  714. memory_region_init_io(&s->mifreg, OBJECT(pci_dev), &sunhme_mif_ops, s,
  715. "sunhme.mif", HME_MIF_REG_SIZE);
  716. memory_region_add_subregion(&s->hme, 0x7000, &s->mifreg);
  717. qemu_macaddr_default_if_unset(&s->conf.macaddr);
  718. s->nic = qemu_new_nic(&net_sunhme_info, &s->conf,
  719. object_get_typename(OBJECT(d)), d->id, s);
  720. qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a);
  721. }
  722. static void sunhme_instance_init(Object *obj)
  723. {
  724. SunHMEState *s = SUNHME(obj);
  725. device_add_bootindex_property(obj, &s->conf.bootindex,
  726. "bootindex", "/ethernet-phy@0",
  727. DEVICE(obj), NULL);
  728. }
  729. static void sunhme_reset(DeviceState *ds)
  730. {
  731. SunHMEState *s = SUNHME(ds);
  732. /* Configure internal transceiver */
  733. s->mifregs[HME_MIFI_CFG >> 2] |= HME_MIF_CFG_MDI0;
  734. /* Advetise auto, 100Mbps FD */
  735. s->miiregs[MII_ANAR] = MII_ANAR_TXFD;
  736. s->miiregs[MII_BMSR] = MII_BMSR_AUTONEG | MII_BMSR_100TX_FD |
  737. MII_BMSR_AN_COMP;
  738. if (!qemu_get_queue(s->nic)->link_down) {
  739. s->miiregs[MII_ANLPAR] |= MII_ANLPAR_TXFD;
  740. s->miiregs[MII_BMSR] |= MII_BMSR_LINK_ST;
  741. }
  742. /* Set manufacturer */
  743. s->miiregs[MII_PHYID1] = DP83840_PHYID1;
  744. s->miiregs[MII_PHYID2] = DP83840_PHYID2;
  745. /* Configure default interrupt mask */
  746. s->mifregs[HME_MIFI_IMASK >> 2] = 0xffff;
  747. s->sebregs[HME_SEBI_IMASK >> 2] = 0xff7fffff;
  748. }
  749. static const VMStateDescription vmstate_hme = {
  750. .name = "sunhme",
  751. .version_id = 0,
  752. .minimum_version_id = 0,
  753. .fields = (VMStateField[]) {
  754. VMSTATE_PCI_DEVICE(parent_obj, SunHMEState),
  755. VMSTATE_MACADDR(conf.macaddr, SunHMEState),
  756. VMSTATE_UINT32_ARRAY(sebregs, SunHMEState, (HME_SEB_REG_SIZE >> 2)),
  757. VMSTATE_UINT32_ARRAY(etxregs, SunHMEState, (HME_ETX_REG_SIZE >> 2)),
  758. VMSTATE_UINT32_ARRAY(erxregs, SunHMEState, (HME_ERX_REG_SIZE >> 2)),
  759. VMSTATE_UINT32_ARRAY(macregs, SunHMEState, (HME_MAC_REG_SIZE >> 2)),
  760. VMSTATE_UINT32_ARRAY(mifregs, SunHMEState, (HME_MIF_REG_SIZE >> 2)),
  761. VMSTATE_UINT16_ARRAY(miiregs, SunHMEState, HME_MII_REGS_SIZE),
  762. VMSTATE_END_OF_LIST()
  763. }
  764. };
  765. static void sunhme_class_init(ObjectClass *klass, void *data)
  766. {
  767. DeviceClass *dc = DEVICE_CLASS(klass);
  768. PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
  769. k->realize = sunhme_realize;
  770. k->vendor_id = PCI_VENDOR_ID_SUN;
  771. k->device_id = PCI_DEVICE_ID_SUN_HME;
  772. k->class_id = PCI_CLASS_NETWORK_ETHERNET;
  773. dc->vmsd = &vmstate_hme;
  774. dc->reset = sunhme_reset;
  775. dc->props = sunhme_properties;
  776. set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
  777. }
  778. static const TypeInfo sunhme_info = {
  779. .name = TYPE_SUNHME,
  780. .parent = TYPE_PCI_DEVICE,
  781. .class_init = sunhme_class_init,
  782. .instance_size = sizeof(SunHMEState),
  783. .instance_init = sunhme_instance_init,
  784. .interfaces = (InterfaceInfo[]) {
  785. { INTERFACE_CONVENTIONAL_PCI_DEVICE },
  786. { }
  787. }
  788. };
  789. static void sunhme_register_types(void)
  790. {
  791. type_register_static(&sunhme_info);
  792. }
  793. type_init(sunhme_register_types)