e1000.c 41 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312
  1. /*
  2. * QEMU e1000 emulation
  3. *
  4. * Software developer's manual:
  5. * http://download.intel.com/design/network/manuals/8254x_GBe_SDM.pdf
  6. *
  7. * Nir Peleg, Tutis Systems Ltd. for Qumranet Inc.
  8. * Copyright (c) 2008 Qumranet
  9. * Based on work done by:
  10. * Copyright (c) 2007 Dan Aloni
  11. * Copyright (c) 2004 Antony T Curtis
  12. *
  13. * This library is free software; you can redistribute it and/or
  14. * modify it under the terms of the GNU Lesser General Public
  15. * License as published by the Free Software Foundation; either
  16. * version 2 of the License, or (at your option) any later version.
  17. *
  18. * This library is distributed in the hope that it will be useful,
  19. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  20. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  21. * Lesser General Public License for more details.
  22. *
  23. * You should have received a copy of the GNU Lesser General Public
  24. * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  25. */
  26. #include "hw.h"
  27. #include "pci.h"
  28. #include "net.h"
  29. #include "net/checksum.h"
  30. #include "loader.h"
  31. #include "sysemu.h"
  32. #include "dma.h"
  33. #include "e1000_hw.h"
  34. #define E1000_DEBUG
  35. #ifdef E1000_DEBUG
  36. enum {
  37. DEBUG_GENERAL, DEBUG_IO, DEBUG_MMIO, DEBUG_INTERRUPT,
  38. DEBUG_RX, DEBUG_TX, DEBUG_MDIC, DEBUG_EEPROM,
  39. DEBUG_UNKNOWN, DEBUG_TXSUM, DEBUG_TXERR, DEBUG_RXERR,
  40. DEBUG_RXFILTER, DEBUG_PHY, DEBUG_NOTYET,
  41. };
  42. #define DBGBIT(x) (1<<DEBUG_##x)
  43. static int debugflags = DBGBIT(TXERR) | DBGBIT(GENERAL);
  44. #define DBGOUT(what, fmt, ...) do { \
  45. if (debugflags & DBGBIT(what)) \
  46. fprintf(stderr, "e1000: " fmt, ## __VA_ARGS__); \
  47. } while (0)
  48. #else
  49. #define DBGOUT(what, fmt, ...) do {} while (0)
  50. #endif
  51. #define IOPORT_SIZE 0x40
  52. #define PNPMMIO_SIZE 0x20000
  53. #define MIN_BUF_SIZE 60 /* Min. octets in an ethernet frame sans FCS */
  54. /* this is the size past which hardware will drop packets when setting LPE=0 */
  55. #define MAXIMUM_ETHERNET_VLAN_SIZE 1522
  56. /*
  57. * HW models:
  58. * E1000_DEV_ID_82540EM works with Windows and Linux
  59. * E1000_DEV_ID_82573L OK with windoze and Linux 2.6.22,
  60. * appears to perform better than 82540EM, but breaks with Linux 2.6.18
  61. * E1000_DEV_ID_82544GC_COPPER appears to work; not well tested
  62. * Others never tested
  63. */
  64. enum { E1000_DEVID = E1000_DEV_ID_82540EM };
  65. /*
  66. * May need to specify additional MAC-to-PHY entries --
  67. * Intel's Windows driver refuses to initialize unless they match
  68. */
  69. enum {
  70. PHY_ID2_INIT = E1000_DEVID == E1000_DEV_ID_82573L ? 0xcc2 :
  71. E1000_DEVID == E1000_DEV_ID_82544GC_COPPER ? 0xc30 :
  72. /* default to E1000_DEV_ID_82540EM */ 0xc20
  73. };
  74. typedef struct E1000State_st {
  75. PCIDevice dev;
  76. NICState *nic;
  77. NICConf conf;
  78. MemoryRegion mmio;
  79. MemoryRegion io;
  80. uint32_t mac_reg[0x8000];
  81. uint16_t phy_reg[0x20];
  82. uint16_t eeprom_data[64];
  83. uint32_t rxbuf_size;
  84. uint32_t rxbuf_min_shift;
  85. struct e1000_tx {
  86. unsigned char header[256];
  87. unsigned char vlan_header[4];
  88. /* Fields vlan and data must not be reordered or separated. */
  89. unsigned char vlan[4];
  90. unsigned char data[0x10000];
  91. uint16_t size;
  92. unsigned char sum_needed;
  93. unsigned char vlan_needed;
  94. uint8_t ipcss;
  95. uint8_t ipcso;
  96. uint16_t ipcse;
  97. uint8_t tucss;
  98. uint8_t tucso;
  99. uint16_t tucse;
  100. uint8_t hdr_len;
  101. uint16_t mss;
  102. uint32_t paylen;
  103. uint16_t tso_frames;
  104. char tse;
  105. int8_t ip;
  106. int8_t tcp;
  107. char cptse; // current packet tse bit
  108. } tx;
  109. struct {
  110. uint32_t val_in; // shifted in from guest driver
  111. uint16_t bitnum_in;
  112. uint16_t bitnum_out;
  113. uint16_t reading;
  114. uint32_t old_eecd;
  115. } eecd_state;
  116. QEMUTimer *autoneg_timer;
  117. } E1000State;
  118. #define defreg(x) x = (E1000_##x>>2)
  119. enum {
  120. defreg(CTRL), defreg(EECD), defreg(EERD), defreg(GPRC),
  121. defreg(GPTC), defreg(ICR), defreg(ICS), defreg(IMC),
  122. defreg(IMS), defreg(LEDCTL), defreg(MANC), defreg(MDIC),
  123. defreg(MPC), defreg(PBA), defreg(RCTL), defreg(RDBAH),
  124. defreg(RDBAL), defreg(RDH), defreg(RDLEN), defreg(RDT),
  125. defreg(STATUS), defreg(SWSM), defreg(TCTL), defreg(TDBAH),
  126. defreg(TDBAL), defreg(TDH), defreg(TDLEN), defreg(TDT),
  127. defreg(TORH), defreg(TORL), defreg(TOTH), defreg(TOTL),
  128. defreg(TPR), defreg(TPT), defreg(TXDCTL), defreg(WUFC),
  129. defreg(RA), defreg(MTA), defreg(CRCERRS),defreg(VFTA),
  130. defreg(VET),
  131. };
  132. static void
  133. e1000_link_down(E1000State *s)
  134. {
  135. s->mac_reg[STATUS] &= ~E1000_STATUS_LU;
  136. s->phy_reg[PHY_STATUS] &= ~MII_SR_LINK_STATUS;
  137. }
  138. static void
  139. e1000_link_up(E1000State *s)
  140. {
  141. s->mac_reg[STATUS] |= E1000_STATUS_LU;
  142. s->phy_reg[PHY_STATUS] |= MII_SR_LINK_STATUS;
  143. }
  144. static void
  145. set_phy_ctrl(E1000State *s, int index, uint16_t val)
  146. {
  147. if ((val & MII_CR_AUTO_NEG_EN) && (val & MII_CR_RESTART_AUTO_NEG)) {
  148. s->nic->nc.link_down = true;
  149. e1000_link_down(s);
  150. s->phy_reg[PHY_STATUS] &= ~MII_SR_AUTONEG_COMPLETE;
  151. DBGOUT(PHY, "Start link auto negotiation\n");
  152. qemu_mod_timer(s->autoneg_timer, qemu_get_clock_ms(vm_clock) + 500);
  153. }
  154. }
  155. static void
  156. e1000_autoneg_timer(void *opaque)
  157. {
  158. E1000State *s = opaque;
  159. s->nic->nc.link_down = false;
  160. e1000_link_up(s);
  161. s->phy_reg[PHY_STATUS] |= MII_SR_AUTONEG_COMPLETE;
  162. DBGOUT(PHY, "Auto negotiation is completed\n");
  163. }
  164. static void (*phyreg_writeops[])(E1000State *, int, uint16_t) = {
  165. [PHY_CTRL] = set_phy_ctrl,
  166. };
  167. enum { NPHYWRITEOPS = ARRAY_SIZE(phyreg_writeops) };
  168. enum { PHY_R = 1, PHY_W = 2, PHY_RW = PHY_R | PHY_W };
  169. static const char phy_regcap[0x20] = {
  170. [PHY_STATUS] = PHY_R, [M88E1000_EXT_PHY_SPEC_CTRL] = PHY_RW,
  171. [PHY_ID1] = PHY_R, [M88E1000_PHY_SPEC_CTRL] = PHY_RW,
  172. [PHY_CTRL] = PHY_RW, [PHY_1000T_CTRL] = PHY_RW,
  173. [PHY_LP_ABILITY] = PHY_R, [PHY_1000T_STATUS] = PHY_R,
  174. [PHY_AUTONEG_ADV] = PHY_RW, [M88E1000_RX_ERR_CNTR] = PHY_R,
  175. [PHY_ID2] = PHY_R, [M88E1000_PHY_SPEC_STATUS] = PHY_R
  176. };
  177. static const uint16_t phy_reg_init[] = {
  178. [PHY_CTRL] = 0x1140,
  179. [PHY_STATUS] = 0x794d, /* link initially up with not completed autoneg */
  180. [PHY_ID1] = 0x141, [PHY_ID2] = PHY_ID2_INIT,
  181. [PHY_1000T_CTRL] = 0x0e00, [M88E1000_PHY_SPEC_CTRL] = 0x360,
  182. [M88E1000_EXT_PHY_SPEC_CTRL] = 0x0d60, [PHY_AUTONEG_ADV] = 0xde1,
  183. [PHY_LP_ABILITY] = 0x1e0, [PHY_1000T_STATUS] = 0x3c00,
  184. [M88E1000_PHY_SPEC_STATUS] = 0xac00,
  185. };
  186. static const uint32_t mac_reg_init[] = {
  187. [PBA] = 0x00100030,
  188. [LEDCTL] = 0x602,
  189. [CTRL] = E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN0 |
  190. E1000_CTRL_SPD_1000 | E1000_CTRL_SLU,
  191. [STATUS] = 0x80000000 | E1000_STATUS_GIO_MASTER_ENABLE |
  192. E1000_STATUS_ASDV | E1000_STATUS_MTXCKOK |
  193. E1000_STATUS_SPEED_1000 | E1000_STATUS_FD |
  194. E1000_STATUS_LU,
  195. [MANC] = E1000_MANC_EN_MNG2HOST | E1000_MANC_RCV_TCO_EN |
  196. E1000_MANC_ARP_EN | E1000_MANC_0298_EN |
  197. E1000_MANC_RMCP_EN,
  198. };
  199. static void
  200. set_interrupt_cause(E1000State *s, int index, uint32_t val)
  201. {
  202. if (val && (E1000_DEVID >= E1000_DEV_ID_82547EI_MOBILE)) {
  203. /* Only for 8257x */
  204. val |= E1000_ICR_INT_ASSERTED;
  205. }
  206. s->mac_reg[ICR] = val;
  207. s->mac_reg[ICS] = val;
  208. qemu_set_irq(s->dev.irq[0], (s->mac_reg[IMS] & s->mac_reg[ICR]) != 0);
  209. }
  210. static void
  211. set_ics(E1000State *s, int index, uint32_t val)
  212. {
  213. DBGOUT(INTERRUPT, "set_ics %x, ICR %x, IMR %x\n", val, s->mac_reg[ICR],
  214. s->mac_reg[IMS]);
  215. set_interrupt_cause(s, 0, val | s->mac_reg[ICR]);
  216. }
  217. static int
  218. rxbufsize(uint32_t v)
  219. {
  220. v &= E1000_RCTL_BSEX | E1000_RCTL_SZ_16384 | E1000_RCTL_SZ_8192 |
  221. E1000_RCTL_SZ_4096 | E1000_RCTL_SZ_2048 | E1000_RCTL_SZ_1024 |
  222. E1000_RCTL_SZ_512 | E1000_RCTL_SZ_256;
  223. switch (v) {
  224. case E1000_RCTL_BSEX | E1000_RCTL_SZ_16384:
  225. return 16384;
  226. case E1000_RCTL_BSEX | E1000_RCTL_SZ_8192:
  227. return 8192;
  228. case E1000_RCTL_BSEX | E1000_RCTL_SZ_4096:
  229. return 4096;
  230. case E1000_RCTL_SZ_1024:
  231. return 1024;
  232. case E1000_RCTL_SZ_512:
  233. return 512;
  234. case E1000_RCTL_SZ_256:
  235. return 256;
  236. }
  237. return 2048;
  238. }
  239. static void e1000_reset(void *opaque)
  240. {
  241. E1000State *d = opaque;
  242. qemu_del_timer(d->autoneg_timer);
  243. memset(d->phy_reg, 0, sizeof d->phy_reg);
  244. memmove(d->phy_reg, phy_reg_init, sizeof phy_reg_init);
  245. memset(d->mac_reg, 0, sizeof d->mac_reg);
  246. memmove(d->mac_reg, mac_reg_init, sizeof mac_reg_init);
  247. d->rxbuf_min_shift = 1;
  248. memset(&d->tx, 0, sizeof d->tx);
  249. if (d->nic->nc.link_down) {
  250. e1000_link_down(d);
  251. }
  252. }
  253. static void
  254. set_ctrl(E1000State *s, int index, uint32_t val)
  255. {
  256. /* RST is self clearing */
  257. s->mac_reg[CTRL] = val & ~E1000_CTRL_RST;
  258. }
  259. static void
  260. set_rx_control(E1000State *s, int index, uint32_t val)
  261. {
  262. s->mac_reg[RCTL] = val;
  263. s->rxbuf_size = rxbufsize(val);
  264. s->rxbuf_min_shift = ((val / E1000_RCTL_RDMTS_QUAT) & 3) + 1;
  265. DBGOUT(RX, "RCTL: %d, mac_reg[RCTL] = 0x%x\n", s->mac_reg[RDT],
  266. s->mac_reg[RCTL]);
  267. qemu_flush_queued_packets(&s->nic->nc);
  268. }
  269. static void
  270. set_mdic(E1000State *s, int index, uint32_t val)
  271. {
  272. uint32_t data = val & E1000_MDIC_DATA_MASK;
  273. uint32_t addr = ((val & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT);
  274. if ((val & E1000_MDIC_PHY_MASK) >> E1000_MDIC_PHY_SHIFT != 1) // phy #
  275. val = s->mac_reg[MDIC] | E1000_MDIC_ERROR;
  276. else if (val & E1000_MDIC_OP_READ) {
  277. DBGOUT(MDIC, "MDIC read reg 0x%x\n", addr);
  278. if (!(phy_regcap[addr] & PHY_R)) {
  279. DBGOUT(MDIC, "MDIC read reg %x unhandled\n", addr);
  280. val |= E1000_MDIC_ERROR;
  281. } else
  282. val = (val ^ data) | s->phy_reg[addr];
  283. } else if (val & E1000_MDIC_OP_WRITE) {
  284. DBGOUT(MDIC, "MDIC write reg 0x%x, value 0x%x\n", addr, data);
  285. if (!(phy_regcap[addr] & PHY_W)) {
  286. DBGOUT(MDIC, "MDIC write reg %x unhandled\n", addr);
  287. val |= E1000_MDIC_ERROR;
  288. } else {
  289. if (addr < NPHYWRITEOPS && phyreg_writeops[addr]) {
  290. phyreg_writeops[addr](s, index, data);
  291. }
  292. s->phy_reg[addr] = data;
  293. }
  294. }
  295. s->mac_reg[MDIC] = val | E1000_MDIC_READY;
  296. if (val & E1000_MDIC_INT_EN) {
  297. set_ics(s, 0, E1000_ICR_MDAC);
  298. }
  299. }
  300. static uint32_t
  301. get_eecd(E1000State *s, int index)
  302. {
  303. uint32_t ret = E1000_EECD_PRES|E1000_EECD_GNT | s->eecd_state.old_eecd;
  304. DBGOUT(EEPROM, "reading eeprom bit %d (reading %d)\n",
  305. s->eecd_state.bitnum_out, s->eecd_state.reading);
  306. if (!s->eecd_state.reading ||
  307. ((s->eeprom_data[(s->eecd_state.bitnum_out >> 4) & 0x3f] >>
  308. ((s->eecd_state.bitnum_out & 0xf) ^ 0xf))) & 1)
  309. ret |= E1000_EECD_DO;
  310. return ret;
  311. }
  312. static void
  313. set_eecd(E1000State *s, int index, uint32_t val)
  314. {
  315. uint32_t oldval = s->eecd_state.old_eecd;
  316. s->eecd_state.old_eecd = val & (E1000_EECD_SK | E1000_EECD_CS |
  317. E1000_EECD_DI|E1000_EECD_FWE_MASK|E1000_EECD_REQ);
  318. if (!(E1000_EECD_CS & val)) // CS inactive; nothing to do
  319. return;
  320. if (E1000_EECD_CS & (val ^ oldval)) { // CS rise edge; reset state
  321. s->eecd_state.val_in = 0;
  322. s->eecd_state.bitnum_in = 0;
  323. s->eecd_state.bitnum_out = 0;
  324. s->eecd_state.reading = 0;
  325. }
  326. if (!(E1000_EECD_SK & (val ^ oldval))) // no clock edge
  327. return;
  328. if (!(E1000_EECD_SK & val)) { // falling edge
  329. s->eecd_state.bitnum_out++;
  330. return;
  331. }
  332. s->eecd_state.val_in <<= 1;
  333. if (val & E1000_EECD_DI)
  334. s->eecd_state.val_in |= 1;
  335. if (++s->eecd_state.bitnum_in == 9 && !s->eecd_state.reading) {
  336. s->eecd_state.bitnum_out = ((s->eecd_state.val_in & 0x3f)<<4)-1;
  337. s->eecd_state.reading = (((s->eecd_state.val_in >> 6) & 7) ==
  338. EEPROM_READ_OPCODE_MICROWIRE);
  339. }
  340. DBGOUT(EEPROM, "eeprom bitnum in %d out %d, reading %d\n",
  341. s->eecd_state.bitnum_in, s->eecd_state.bitnum_out,
  342. s->eecd_state.reading);
  343. }
  344. static uint32_t
  345. flash_eerd_read(E1000State *s, int x)
  346. {
  347. unsigned int index, r = s->mac_reg[EERD] & ~E1000_EEPROM_RW_REG_START;
  348. if ((s->mac_reg[EERD] & E1000_EEPROM_RW_REG_START) == 0)
  349. return (s->mac_reg[EERD]);
  350. if ((index = r >> E1000_EEPROM_RW_ADDR_SHIFT) > EEPROM_CHECKSUM_REG)
  351. return (E1000_EEPROM_RW_REG_DONE | r);
  352. return ((s->eeprom_data[index] << E1000_EEPROM_RW_REG_DATA) |
  353. E1000_EEPROM_RW_REG_DONE | r);
  354. }
  355. static void
  356. putsum(uint8_t *data, uint32_t n, uint32_t sloc, uint32_t css, uint32_t cse)
  357. {
  358. uint32_t sum;
  359. if (cse && cse < n)
  360. n = cse + 1;
  361. if (sloc < n-1) {
  362. sum = net_checksum_add(n-css, data+css);
  363. cpu_to_be16wu((uint16_t *)(data + sloc),
  364. net_checksum_finish(sum));
  365. }
  366. }
  367. static inline int
  368. vlan_enabled(E1000State *s)
  369. {
  370. return ((s->mac_reg[CTRL] & E1000_CTRL_VME) != 0);
  371. }
  372. static inline int
  373. vlan_rx_filter_enabled(E1000State *s)
  374. {
  375. return ((s->mac_reg[RCTL] & E1000_RCTL_VFE) != 0);
  376. }
  377. static inline int
  378. is_vlan_packet(E1000State *s, const uint8_t *buf)
  379. {
  380. return (be16_to_cpup((uint16_t *)(buf + 12)) ==
  381. le16_to_cpup((uint16_t *)(s->mac_reg + VET)));
  382. }
  383. static inline int
  384. is_vlan_txd(uint32_t txd_lower)
  385. {
  386. return ((txd_lower & E1000_TXD_CMD_VLE) != 0);
  387. }
  388. /* FCS aka Ethernet CRC-32. We don't get it from backends and can't
  389. * fill it in, just pad descriptor length by 4 bytes unless guest
  390. * told us to strip it off the packet. */
  391. static inline int
  392. fcs_len(E1000State *s)
  393. {
  394. return (s->mac_reg[RCTL] & E1000_RCTL_SECRC) ? 0 : 4;
  395. }
  396. static void
  397. e1000_send_packet(E1000State *s, const uint8_t *buf, int size)
  398. {
  399. if (s->phy_reg[PHY_CTRL] & MII_CR_LOOPBACK) {
  400. s->nic->nc.info->receive(&s->nic->nc, buf, size);
  401. } else {
  402. qemu_send_packet(&s->nic->nc, buf, size);
  403. }
  404. }
  405. static void
  406. xmit_seg(E1000State *s)
  407. {
  408. uint16_t len, *sp;
  409. unsigned int frames = s->tx.tso_frames, css, sofar, n;
  410. struct e1000_tx *tp = &s->tx;
  411. if (tp->tse && tp->cptse) {
  412. css = tp->ipcss;
  413. DBGOUT(TXSUM, "frames %d size %d ipcss %d\n",
  414. frames, tp->size, css);
  415. if (tp->ip) { // IPv4
  416. cpu_to_be16wu((uint16_t *)(tp->data+css+2),
  417. tp->size - css);
  418. cpu_to_be16wu((uint16_t *)(tp->data+css+4),
  419. be16_to_cpup((uint16_t *)(tp->data+css+4))+frames);
  420. } else // IPv6
  421. cpu_to_be16wu((uint16_t *)(tp->data+css+4),
  422. tp->size - css);
  423. css = tp->tucss;
  424. len = tp->size - css;
  425. DBGOUT(TXSUM, "tcp %d tucss %d len %d\n", tp->tcp, css, len);
  426. if (tp->tcp) {
  427. sofar = frames * tp->mss;
  428. cpu_to_be32wu((uint32_t *)(tp->data+css+4), // seq
  429. be32_to_cpupu((uint32_t *)(tp->data+css+4))+sofar);
  430. if (tp->paylen - sofar > tp->mss)
  431. tp->data[css + 13] &= ~9; // PSH, FIN
  432. } else // UDP
  433. cpu_to_be16wu((uint16_t *)(tp->data+css+4), len);
  434. if (tp->sum_needed & E1000_TXD_POPTS_TXSM) {
  435. unsigned int phsum;
  436. // add pseudo-header length before checksum calculation
  437. sp = (uint16_t *)(tp->data + tp->tucso);
  438. phsum = be16_to_cpup(sp) + len;
  439. phsum = (phsum >> 16) + (phsum & 0xffff);
  440. cpu_to_be16wu(sp, phsum);
  441. }
  442. tp->tso_frames++;
  443. }
  444. if (tp->sum_needed & E1000_TXD_POPTS_TXSM)
  445. putsum(tp->data, tp->size, tp->tucso, tp->tucss, tp->tucse);
  446. if (tp->sum_needed & E1000_TXD_POPTS_IXSM)
  447. putsum(tp->data, tp->size, tp->ipcso, tp->ipcss, tp->ipcse);
  448. if (tp->vlan_needed) {
  449. memmove(tp->vlan, tp->data, 4);
  450. memmove(tp->data, tp->data + 4, 8);
  451. memcpy(tp->data + 8, tp->vlan_header, 4);
  452. e1000_send_packet(s, tp->vlan, tp->size + 4);
  453. } else
  454. e1000_send_packet(s, tp->data, tp->size);
  455. s->mac_reg[TPT]++;
  456. s->mac_reg[GPTC]++;
  457. n = s->mac_reg[TOTL];
  458. if ((s->mac_reg[TOTL] += s->tx.size) < n)
  459. s->mac_reg[TOTH]++;
  460. }
  461. static void
  462. process_tx_desc(E1000State *s, struct e1000_tx_desc *dp)
  463. {
  464. uint32_t txd_lower = le32_to_cpu(dp->lower.data);
  465. uint32_t dtype = txd_lower & (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D);
  466. unsigned int split_size = txd_lower & 0xffff, bytes, sz, op;
  467. unsigned int msh = 0xfffff, hdr = 0;
  468. uint64_t addr;
  469. struct e1000_context_desc *xp = (struct e1000_context_desc *)dp;
  470. struct e1000_tx *tp = &s->tx;
  471. if (dtype == E1000_TXD_CMD_DEXT) { // context descriptor
  472. op = le32_to_cpu(xp->cmd_and_length);
  473. tp->ipcss = xp->lower_setup.ip_fields.ipcss;
  474. tp->ipcso = xp->lower_setup.ip_fields.ipcso;
  475. tp->ipcse = le16_to_cpu(xp->lower_setup.ip_fields.ipcse);
  476. tp->tucss = xp->upper_setup.tcp_fields.tucss;
  477. tp->tucso = xp->upper_setup.tcp_fields.tucso;
  478. tp->tucse = le16_to_cpu(xp->upper_setup.tcp_fields.tucse);
  479. tp->paylen = op & 0xfffff;
  480. tp->hdr_len = xp->tcp_seg_setup.fields.hdr_len;
  481. tp->mss = le16_to_cpu(xp->tcp_seg_setup.fields.mss);
  482. tp->ip = (op & E1000_TXD_CMD_IP) ? 1 : 0;
  483. tp->tcp = (op & E1000_TXD_CMD_TCP) ? 1 : 0;
  484. tp->tse = (op & E1000_TXD_CMD_TSE) ? 1 : 0;
  485. tp->tso_frames = 0;
  486. if (tp->tucso == 0) { // this is probably wrong
  487. DBGOUT(TXSUM, "TCP/UDP: cso 0!\n");
  488. tp->tucso = tp->tucss + (tp->tcp ? 16 : 6);
  489. }
  490. return;
  491. } else if (dtype == (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D)) {
  492. // data descriptor
  493. if (tp->size == 0) {
  494. tp->sum_needed = le32_to_cpu(dp->upper.data) >> 8;
  495. }
  496. tp->cptse = ( txd_lower & E1000_TXD_CMD_TSE ) ? 1 : 0;
  497. } else {
  498. // legacy descriptor
  499. tp->cptse = 0;
  500. }
  501. if (vlan_enabled(s) && is_vlan_txd(txd_lower) &&
  502. (tp->cptse || txd_lower & E1000_TXD_CMD_EOP)) {
  503. tp->vlan_needed = 1;
  504. cpu_to_be16wu((uint16_t *)(tp->vlan_header),
  505. le16_to_cpup((uint16_t *)(s->mac_reg + VET)));
  506. cpu_to_be16wu((uint16_t *)(tp->vlan_header + 2),
  507. le16_to_cpu(dp->upper.fields.special));
  508. }
  509. addr = le64_to_cpu(dp->buffer_addr);
  510. if (tp->tse && tp->cptse) {
  511. hdr = tp->hdr_len;
  512. msh = hdr + tp->mss;
  513. do {
  514. bytes = split_size;
  515. if (tp->size + bytes > msh)
  516. bytes = msh - tp->size;
  517. bytes = MIN(sizeof(tp->data) - tp->size, bytes);
  518. pci_dma_read(&s->dev, addr, tp->data + tp->size, bytes);
  519. if ((sz = tp->size + bytes) >= hdr && tp->size < hdr)
  520. memmove(tp->header, tp->data, hdr);
  521. tp->size = sz;
  522. addr += bytes;
  523. if (sz == msh) {
  524. xmit_seg(s);
  525. memmove(tp->data, tp->header, hdr);
  526. tp->size = hdr;
  527. }
  528. } while (split_size -= bytes);
  529. } else if (!tp->tse && tp->cptse) {
  530. // context descriptor TSE is not set, while data descriptor TSE is set
  531. DBGOUT(TXERR, "TCP segmentation error\n");
  532. } else {
  533. split_size = MIN(sizeof(tp->data) - tp->size, split_size);
  534. pci_dma_read(&s->dev, addr, tp->data + tp->size, split_size);
  535. tp->size += split_size;
  536. }
  537. if (!(txd_lower & E1000_TXD_CMD_EOP))
  538. return;
  539. if (!(tp->tse && tp->cptse && tp->size < hdr))
  540. xmit_seg(s);
  541. tp->tso_frames = 0;
  542. tp->sum_needed = 0;
  543. tp->vlan_needed = 0;
  544. tp->size = 0;
  545. tp->cptse = 0;
  546. }
  547. static uint32_t
  548. txdesc_writeback(E1000State *s, dma_addr_t base, struct e1000_tx_desc *dp)
  549. {
  550. uint32_t txd_upper, txd_lower = le32_to_cpu(dp->lower.data);
  551. if (!(txd_lower & (E1000_TXD_CMD_RS|E1000_TXD_CMD_RPS)))
  552. return 0;
  553. txd_upper = (le32_to_cpu(dp->upper.data) | E1000_TXD_STAT_DD) &
  554. ~(E1000_TXD_STAT_EC | E1000_TXD_STAT_LC | E1000_TXD_STAT_TU);
  555. dp->upper.data = cpu_to_le32(txd_upper);
  556. pci_dma_write(&s->dev, base + ((char *)&dp->upper - (char *)dp),
  557. &dp->upper, sizeof(dp->upper));
  558. return E1000_ICR_TXDW;
  559. }
  560. static uint64_t tx_desc_base(E1000State *s)
  561. {
  562. uint64_t bah = s->mac_reg[TDBAH];
  563. uint64_t bal = s->mac_reg[TDBAL] & ~0xf;
  564. return (bah << 32) + bal;
  565. }
  566. static void
  567. start_xmit(E1000State *s)
  568. {
  569. dma_addr_t base;
  570. struct e1000_tx_desc desc;
  571. uint32_t tdh_start = s->mac_reg[TDH], cause = E1000_ICS_TXQE;
  572. if (!(s->mac_reg[TCTL] & E1000_TCTL_EN)) {
  573. DBGOUT(TX, "tx disabled\n");
  574. return;
  575. }
  576. while (s->mac_reg[TDH] != s->mac_reg[TDT]) {
  577. base = tx_desc_base(s) +
  578. sizeof(struct e1000_tx_desc) * s->mac_reg[TDH];
  579. pci_dma_read(&s->dev, base, &desc, sizeof(desc));
  580. DBGOUT(TX, "index %d: %p : %x %x\n", s->mac_reg[TDH],
  581. (void *)(intptr_t)desc.buffer_addr, desc.lower.data,
  582. desc.upper.data);
  583. process_tx_desc(s, &desc);
  584. cause |= txdesc_writeback(s, base, &desc);
  585. if (++s->mac_reg[TDH] * sizeof(desc) >= s->mac_reg[TDLEN])
  586. s->mac_reg[TDH] = 0;
  587. /*
  588. * the following could happen only if guest sw assigns
  589. * bogus values to TDT/TDLEN.
  590. * there's nothing too intelligent we could do about this.
  591. */
  592. if (s->mac_reg[TDH] == tdh_start) {
  593. DBGOUT(TXERR, "TDH wraparound @%x, TDT %x, TDLEN %x\n",
  594. tdh_start, s->mac_reg[TDT], s->mac_reg[TDLEN]);
  595. break;
  596. }
  597. }
  598. set_ics(s, 0, cause);
  599. }
  600. static int
  601. receive_filter(E1000State *s, const uint8_t *buf, int size)
  602. {
  603. static const uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
  604. static const int mta_shift[] = {4, 3, 2, 0};
  605. uint32_t f, rctl = s->mac_reg[RCTL], ra[2], *rp;
  606. if (is_vlan_packet(s, buf) && vlan_rx_filter_enabled(s)) {
  607. uint16_t vid = be16_to_cpup((uint16_t *)(buf + 14));
  608. uint32_t vfta = le32_to_cpup((uint32_t *)(s->mac_reg + VFTA) +
  609. ((vid >> 5) & 0x7f));
  610. if ((vfta & (1 << (vid & 0x1f))) == 0)
  611. return 0;
  612. }
  613. if (rctl & E1000_RCTL_UPE) // promiscuous
  614. return 1;
  615. if ((buf[0] & 1) && (rctl & E1000_RCTL_MPE)) // promiscuous mcast
  616. return 1;
  617. if ((rctl & E1000_RCTL_BAM) && !memcmp(buf, bcast, sizeof bcast))
  618. return 1;
  619. for (rp = s->mac_reg + RA; rp < s->mac_reg + RA + 32; rp += 2) {
  620. if (!(rp[1] & E1000_RAH_AV))
  621. continue;
  622. ra[0] = cpu_to_le32(rp[0]);
  623. ra[1] = cpu_to_le32(rp[1]);
  624. if (!memcmp(buf, (uint8_t *)ra, 6)) {
  625. DBGOUT(RXFILTER,
  626. "unicast match[%d]: %02x:%02x:%02x:%02x:%02x:%02x\n",
  627. (int)(rp - s->mac_reg - RA)/2,
  628. buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
  629. return 1;
  630. }
  631. }
  632. DBGOUT(RXFILTER, "unicast mismatch: %02x:%02x:%02x:%02x:%02x:%02x\n",
  633. buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
  634. f = mta_shift[(rctl >> E1000_RCTL_MO_SHIFT) & 3];
  635. f = (((buf[5] << 8) | buf[4]) >> f) & 0xfff;
  636. if (s->mac_reg[MTA + (f >> 5)] & (1 << (f & 0x1f)))
  637. return 1;
  638. DBGOUT(RXFILTER,
  639. "dropping, inexact filter mismatch: %02x:%02x:%02x:%02x:%02x:%02x MO %d MTA[%d] %x\n",
  640. buf[0], buf[1], buf[2], buf[3], buf[4], buf[5],
  641. (rctl >> E1000_RCTL_MO_SHIFT) & 3, f >> 5,
  642. s->mac_reg[MTA + (f >> 5)]);
  643. return 0;
  644. }
  645. static void
  646. e1000_set_link_status(NetClientState *nc)
  647. {
  648. E1000State *s = DO_UPCAST(NICState, nc, nc)->opaque;
  649. uint32_t old_status = s->mac_reg[STATUS];
  650. if (nc->link_down) {
  651. e1000_link_down(s);
  652. } else {
  653. e1000_link_up(s);
  654. }
  655. if (s->mac_reg[STATUS] != old_status)
  656. set_ics(s, 0, E1000_ICR_LSC);
  657. }
  658. static bool e1000_has_rxbufs(E1000State *s, size_t total_size)
  659. {
  660. int bufs;
  661. /* Fast-path short packets */
  662. if (total_size <= s->rxbuf_size) {
  663. return s->mac_reg[RDH] != s->mac_reg[RDT];
  664. }
  665. if (s->mac_reg[RDH] < s->mac_reg[RDT]) {
  666. bufs = s->mac_reg[RDT] - s->mac_reg[RDH];
  667. } else if (s->mac_reg[RDH] > s->mac_reg[RDT]) {
  668. bufs = s->mac_reg[RDLEN] / sizeof(struct e1000_rx_desc) +
  669. s->mac_reg[RDT] - s->mac_reg[RDH];
  670. } else {
  671. return false;
  672. }
  673. return total_size <= bufs * s->rxbuf_size;
  674. }
  675. static int
  676. e1000_can_receive(NetClientState *nc)
  677. {
  678. E1000State *s = DO_UPCAST(NICState, nc, nc)->opaque;
  679. return (s->mac_reg[RCTL] & E1000_RCTL_EN) && e1000_has_rxbufs(s, 1);
  680. }
  681. static uint64_t rx_desc_base(E1000State *s)
  682. {
  683. uint64_t bah = s->mac_reg[RDBAH];
  684. uint64_t bal = s->mac_reg[RDBAL] & ~0xf;
  685. return (bah << 32) + bal;
  686. }
  687. static ssize_t
  688. e1000_receive(NetClientState *nc, const uint8_t *buf, size_t size)
  689. {
  690. E1000State *s = DO_UPCAST(NICState, nc, nc)->opaque;
  691. struct e1000_rx_desc desc;
  692. dma_addr_t base;
  693. unsigned int n, rdt;
  694. uint32_t rdh_start;
  695. uint16_t vlan_special = 0;
  696. uint8_t vlan_status = 0, vlan_offset = 0;
  697. uint8_t min_buf[MIN_BUF_SIZE];
  698. size_t desc_offset;
  699. size_t desc_size;
  700. size_t total_size;
  701. if (!(s->mac_reg[RCTL] & E1000_RCTL_EN))
  702. return -1;
  703. /* Pad to minimum Ethernet frame length */
  704. if (size < sizeof(min_buf)) {
  705. memcpy(min_buf, buf, size);
  706. memset(&min_buf[size], 0, sizeof(min_buf) - size);
  707. buf = min_buf;
  708. size = sizeof(min_buf);
  709. }
  710. /* Discard oversized packets if !LPE and !SBP. */
  711. if (size > MAXIMUM_ETHERNET_VLAN_SIZE
  712. && !(s->mac_reg[RCTL] & E1000_RCTL_LPE)
  713. && !(s->mac_reg[RCTL] & E1000_RCTL_SBP)) {
  714. return size;
  715. }
  716. if (!receive_filter(s, buf, size))
  717. return size;
  718. if (vlan_enabled(s) && is_vlan_packet(s, buf)) {
  719. vlan_special = cpu_to_le16(be16_to_cpup((uint16_t *)(buf + 14)));
  720. memmove((uint8_t *)buf + 4, buf, 12);
  721. vlan_status = E1000_RXD_STAT_VP;
  722. vlan_offset = 4;
  723. size -= 4;
  724. }
  725. rdh_start = s->mac_reg[RDH];
  726. desc_offset = 0;
  727. total_size = size + fcs_len(s);
  728. if (!e1000_has_rxbufs(s, total_size)) {
  729. set_ics(s, 0, E1000_ICS_RXO);
  730. return -1;
  731. }
  732. do {
  733. desc_size = total_size - desc_offset;
  734. if (desc_size > s->rxbuf_size) {
  735. desc_size = s->rxbuf_size;
  736. }
  737. base = rx_desc_base(s) + sizeof(desc) * s->mac_reg[RDH];
  738. pci_dma_read(&s->dev, base, &desc, sizeof(desc));
  739. desc.special = vlan_special;
  740. desc.status |= (vlan_status | E1000_RXD_STAT_DD);
  741. if (desc.buffer_addr) {
  742. if (desc_offset < size) {
  743. size_t copy_size = size - desc_offset;
  744. if (copy_size > s->rxbuf_size) {
  745. copy_size = s->rxbuf_size;
  746. }
  747. pci_dma_write(&s->dev, le64_to_cpu(desc.buffer_addr),
  748. buf + desc_offset + vlan_offset, copy_size);
  749. }
  750. desc_offset += desc_size;
  751. desc.length = cpu_to_le16(desc_size);
  752. if (desc_offset >= total_size) {
  753. desc.status |= E1000_RXD_STAT_EOP | E1000_RXD_STAT_IXSM;
  754. } else {
  755. /* Guest zeroing out status is not a hardware requirement.
  756. Clear EOP in case guest didn't do it. */
  757. desc.status &= ~E1000_RXD_STAT_EOP;
  758. }
  759. } else { // as per intel docs; skip descriptors with null buf addr
  760. DBGOUT(RX, "Null RX descriptor!!\n");
  761. }
  762. pci_dma_write(&s->dev, base, &desc, sizeof(desc));
  763. if (++s->mac_reg[RDH] * sizeof(desc) >= s->mac_reg[RDLEN])
  764. s->mac_reg[RDH] = 0;
  765. /* see comment in start_xmit; same here */
  766. if (s->mac_reg[RDH] == rdh_start) {
  767. DBGOUT(RXERR, "RDH wraparound @%x, RDT %x, RDLEN %x\n",
  768. rdh_start, s->mac_reg[RDT], s->mac_reg[RDLEN]);
  769. set_ics(s, 0, E1000_ICS_RXO);
  770. return -1;
  771. }
  772. } while (desc_offset < total_size);
  773. s->mac_reg[GPRC]++;
  774. s->mac_reg[TPR]++;
  775. /* TOR - Total Octets Received:
  776. * This register includes bytes received in a packet from the <Destination
  777. * Address> field through the <CRC> field, inclusively.
  778. */
  779. n = s->mac_reg[TORL] + size + /* Always include FCS length. */ 4;
  780. if (n < s->mac_reg[TORL])
  781. s->mac_reg[TORH]++;
  782. s->mac_reg[TORL] = n;
  783. n = E1000_ICS_RXT0;
  784. if ((rdt = s->mac_reg[RDT]) < s->mac_reg[RDH])
  785. rdt += s->mac_reg[RDLEN] / sizeof(desc);
  786. if (((rdt - s->mac_reg[RDH]) * sizeof(desc)) <= s->mac_reg[RDLEN] >>
  787. s->rxbuf_min_shift)
  788. n |= E1000_ICS_RXDMT0;
  789. set_ics(s, 0, n);
  790. return size;
  791. }
  792. static uint32_t
  793. mac_readreg(E1000State *s, int index)
  794. {
  795. return s->mac_reg[index];
  796. }
  797. static uint32_t
  798. mac_icr_read(E1000State *s, int index)
  799. {
  800. uint32_t ret = s->mac_reg[ICR];
  801. DBGOUT(INTERRUPT, "ICR read: %x\n", ret);
  802. set_interrupt_cause(s, 0, 0);
  803. return ret;
  804. }
  805. static uint32_t
  806. mac_read_clr4(E1000State *s, int index)
  807. {
  808. uint32_t ret = s->mac_reg[index];
  809. s->mac_reg[index] = 0;
  810. return ret;
  811. }
  812. static uint32_t
  813. mac_read_clr8(E1000State *s, int index)
  814. {
  815. uint32_t ret = s->mac_reg[index];
  816. s->mac_reg[index] = 0;
  817. s->mac_reg[index-1] = 0;
  818. return ret;
  819. }
  820. static void
  821. mac_writereg(E1000State *s, int index, uint32_t val)
  822. {
  823. s->mac_reg[index] = val;
  824. }
  825. static void
  826. set_rdt(E1000State *s, int index, uint32_t val)
  827. {
  828. s->mac_reg[index] = val & 0xffff;
  829. if (e1000_has_rxbufs(s, 1)) {
  830. qemu_flush_queued_packets(&s->nic->nc);
  831. }
  832. }
  833. static void
  834. set_16bit(E1000State *s, int index, uint32_t val)
  835. {
  836. s->mac_reg[index] = val & 0xffff;
  837. }
  838. static void
  839. set_dlen(E1000State *s, int index, uint32_t val)
  840. {
  841. s->mac_reg[index] = val & 0xfff80;
  842. }
  843. static void
  844. set_tctl(E1000State *s, int index, uint32_t val)
  845. {
  846. s->mac_reg[index] = val;
  847. s->mac_reg[TDT] &= 0xffff;
  848. start_xmit(s);
  849. }
  850. static void
  851. set_icr(E1000State *s, int index, uint32_t val)
  852. {
  853. DBGOUT(INTERRUPT, "set_icr %x\n", val);
  854. set_interrupt_cause(s, 0, s->mac_reg[ICR] & ~val);
  855. }
  856. static void
  857. set_imc(E1000State *s, int index, uint32_t val)
  858. {
  859. s->mac_reg[IMS] &= ~val;
  860. set_ics(s, 0, 0);
  861. }
  862. static void
  863. set_ims(E1000State *s, int index, uint32_t val)
  864. {
  865. s->mac_reg[IMS] |= val;
  866. set_ics(s, 0, 0);
  867. }
  868. #define getreg(x) [x] = mac_readreg
  869. static uint32_t (*macreg_readops[])(E1000State *, int) = {
  870. getreg(PBA), getreg(RCTL), getreg(TDH), getreg(TXDCTL),
  871. getreg(WUFC), getreg(TDT), getreg(CTRL), getreg(LEDCTL),
  872. getreg(MANC), getreg(MDIC), getreg(SWSM), getreg(STATUS),
  873. getreg(TORL), getreg(TOTL), getreg(IMS), getreg(TCTL),
  874. getreg(RDH), getreg(RDT), getreg(VET), getreg(ICS),
  875. getreg(TDBAL), getreg(TDBAH), getreg(RDBAH), getreg(RDBAL),
  876. getreg(TDLEN), getreg(RDLEN),
  877. [TOTH] = mac_read_clr8, [TORH] = mac_read_clr8, [GPRC] = mac_read_clr4,
  878. [GPTC] = mac_read_clr4, [TPR] = mac_read_clr4, [TPT] = mac_read_clr4,
  879. [ICR] = mac_icr_read, [EECD] = get_eecd, [EERD] = flash_eerd_read,
  880. [CRCERRS ... MPC] = &mac_readreg,
  881. [RA ... RA+31] = &mac_readreg,
  882. [MTA ... MTA+127] = &mac_readreg,
  883. [VFTA ... VFTA+127] = &mac_readreg,
  884. };
  885. enum { NREADOPS = ARRAY_SIZE(macreg_readops) };
  886. #define putreg(x) [x] = mac_writereg
  887. static void (*macreg_writeops[])(E1000State *, int, uint32_t) = {
  888. putreg(PBA), putreg(EERD), putreg(SWSM), putreg(WUFC),
  889. putreg(TDBAL), putreg(TDBAH), putreg(TXDCTL), putreg(RDBAH),
  890. putreg(RDBAL), putreg(LEDCTL), putreg(VET),
  891. [TDLEN] = set_dlen, [RDLEN] = set_dlen, [TCTL] = set_tctl,
  892. [TDT] = set_tctl, [MDIC] = set_mdic, [ICS] = set_ics,
  893. [TDH] = set_16bit, [RDH] = set_16bit, [RDT] = set_rdt,
  894. [IMC] = set_imc, [IMS] = set_ims, [ICR] = set_icr,
  895. [EECD] = set_eecd, [RCTL] = set_rx_control, [CTRL] = set_ctrl,
  896. [RA ... RA+31] = &mac_writereg,
  897. [MTA ... MTA+127] = &mac_writereg,
  898. [VFTA ... VFTA+127] = &mac_writereg,
  899. };
  900. enum { NWRITEOPS = ARRAY_SIZE(macreg_writeops) };
  901. static void
  902. e1000_mmio_write(void *opaque, target_phys_addr_t addr, uint64_t val,
  903. unsigned size)
  904. {
  905. E1000State *s = opaque;
  906. unsigned int index = (addr & 0x1ffff) >> 2;
  907. if (index < NWRITEOPS && macreg_writeops[index]) {
  908. macreg_writeops[index](s, index, val);
  909. } else if (index < NREADOPS && macreg_readops[index]) {
  910. DBGOUT(MMIO, "e1000_mmio_writel RO %x: 0x%04"PRIx64"\n", index<<2, val);
  911. } else {
  912. DBGOUT(UNKNOWN, "MMIO unknown write addr=0x%08x,val=0x%08"PRIx64"\n",
  913. index<<2, val);
  914. }
  915. }
  916. static uint64_t
  917. e1000_mmio_read(void *opaque, target_phys_addr_t addr, unsigned size)
  918. {
  919. E1000State *s = opaque;
  920. unsigned int index = (addr & 0x1ffff) >> 2;
  921. if (index < NREADOPS && macreg_readops[index])
  922. {
  923. return macreg_readops[index](s, index);
  924. }
  925. DBGOUT(UNKNOWN, "MMIO unknown read addr=0x%08x\n", index<<2);
  926. return 0;
  927. }
  928. static const MemoryRegionOps e1000_mmio_ops = {
  929. .read = e1000_mmio_read,
  930. .write = e1000_mmio_write,
  931. .endianness = DEVICE_LITTLE_ENDIAN,
  932. .impl = {
  933. .min_access_size = 4,
  934. .max_access_size = 4,
  935. },
  936. };
  937. static uint64_t e1000_io_read(void *opaque, target_phys_addr_t addr,
  938. unsigned size)
  939. {
  940. E1000State *s = opaque;
  941. (void)s;
  942. return 0;
  943. }
  944. static void e1000_io_write(void *opaque, target_phys_addr_t addr,
  945. uint64_t val, unsigned size)
  946. {
  947. E1000State *s = opaque;
  948. (void)s;
  949. }
  950. static const MemoryRegionOps e1000_io_ops = {
  951. .read = e1000_io_read,
  952. .write = e1000_io_write,
  953. .endianness = DEVICE_LITTLE_ENDIAN,
  954. };
  955. static bool is_version_1(void *opaque, int version_id)
  956. {
  957. return version_id == 1;
  958. }
  959. static const VMStateDescription vmstate_e1000 = {
  960. .name = "e1000",
  961. .version_id = 2,
  962. .minimum_version_id = 1,
  963. .minimum_version_id_old = 1,
  964. .fields = (VMStateField []) {
  965. VMSTATE_PCI_DEVICE(dev, E1000State),
  966. VMSTATE_UNUSED_TEST(is_version_1, 4), /* was instance id */
  967. VMSTATE_UNUSED(4), /* Was mmio_base. */
  968. VMSTATE_UINT32(rxbuf_size, E1000State),
  969. VMSTATE_UINT32(rxbuf_min_shift, E1000State),
  970. VMSTATE_UINT32(eecd_state.val_in, E1000State),
  971. VMSTATE_UINT16(eecd_state.bitnum_in, E1000State),
  972. VMSTATE_UINT16(eecd_state.bitnum_out, E1000State),
  973. VMSTATE_UINT16(eecd_state.reading, E1000State),
  974. VMSTATE_UINT32(eecd_state.old_eecd, E1000State),
  975. VMSTATE_UINT8(tx.ipcss, E1000State),
  976. VMSTATE_UINT8(tx.ipcso, E1000State),
  977. VMSTATE_UINT16(tx.ipcse, E1000State),
  978. VMSTATE_UINT8(tx.tucss, E1000State),
  979. VMSTATE_UINT8(tx.tucso, E1000State),
  980. VMSTATE_UINT16(tx.tucse, E1000State),
  981. VMSTATE_UINT32(tx.paylen, E1000State),
  982. VMSTATE_UINT8(tx.hdr_len, E1000State),
  983. VMSTATE_UINT16(tx.mss, E1000State),
  984. VMSTATE_UINT16(tx.size, E1000State),
  985. VMSTATE_UINT16(tx.tso_frames, E1000State),
  986. VMSTATE_UINT8(tx.sum_needed, E1000State),
  987. VMSTATE_INT8(tx.ip, E1000State),
  988. VMSTATE_INT8(tx.tcp, E1000State),
  989. VMSTATE_BUFFER(tx.header, E1000State),
  990. VMSTATE_BUFFER(tx.data, E1000State),
  991. VMSTATE_UINT16_ARRAY(eeprom_data, E1000State, 64),
  992. VMSTATE_UINT16_ARRAY(phy_reg, E1000State, 0x20),
  993. VMSTATE_UINT32(mac_reg[CTRL], E1000State),
  994. VMSTATE_UINT32(mac_reg[EECD], E1000State),
  995. VMSTATE_UINT32(mac_reg[EERD], E1000State),
  996. VMSTATE_UINT32(mac_reg[GPRC], E1000State),
  997. VMSTATE_UINT32(mac_reg[GPTC], E1000State),
  998. VMSTATE_UINT32(mac_reg[ICR], E1000State),
  999. VMSTATE_UINT32(mac_reg[ICS], E1000State),
  1000. VMSTATE_UINT32(mac_reg[IMC], E1000State),
  1001. VMSTATE_UINT32(mac_reg[IMS], E1000State),
  1002. VMSTATE_UINT32(mac_reg[LEDCTL], E1000State),
  1003. VMSTATE_UINT32(mac_reg[MANC], E1000State),
  1004. VMSTATE_UINT32(mac_reg[MDIC], E1000State),
  1005. VMSTATE_UINT32(mac_reg[MPC], E1000State),
  1006. VMSTATE_UINT32(mac_reg[PBA], E1000State),
  1007. VMSTATE_UINT32(mac_reg[RCTL], E1000State),
  1008. VMSTATE_UINT32(mac_reg[RDBAH], E1000State),
  1009. VMSTATE_UINT32(mac_reg[RDBAL], E1000State),
  1010. VMSTATE_UINT32(mac_reg[RDH], E1000State),
  1011. VMSTATE_UINT32(mac_reg[RDLEN], E1000State),
  1012. VMSTATE_UINT32(mac_reg[RDT], E1000State),
  1013. VMSTATE_UINT32(mac_reg[STATUS], E1000State),
  1014. VMSTATE_UINT32(mac_reg[SWSM], E1000State),
  1015. VMSTATE_UINT32(mac_reg[TCTL], E1000State),
  1016. VMSTATE_UINT32(mac_reg[TDBAH], E1000State),
  1017. VMSTATE_UINT32(mac_reg[TDBAL], E1000State),
  1018. VMSTATE_UINT32(mac_reg[TDH], E1000State),
  1019. VMSTATE_UINT32(mac_reg[TDLEN], E1000State),
  1020. VMSTATE_UINT32(mac_reg[TDT], E1000State),
  1021. VMSTATE_UINT32(mac_reg[TORH], E1000State),
  1022. VMSTATE_UINT32(mac_reg[TORL], E1000State),
  1023. VMSTATE_UINT32(mac_reg[TOTH], E1000State),
  1024. VMSTATE_UINT32(mac_reg[TOTL], E1000State),
  1025. VMSTATE_UINT32(mac_reg[TPR], E1000State),
  1026. VMSTATE_UINT32(mac_reg[TPT], E1000State),
  1027. VMSTATE_UINT32(mac_reg[TXDCTL], E1000State),
  1028. VMSTATE_UINT32(mac_reg[WUFC], E1000State),
  1029. VMSTATE_UINT32(mac_reg[VET], E1000State),
  1030. VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, RA, 32),
  1031. VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, MTA, 128),
  1032. VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, VFTA, 128),
  1033. VMSTATE_END_OF_LIST()
  1034. }
  1035. };
  1036. static const uint16_t e1000_eeprom_template[64] = {
  1037. 0x0000, 0x0000, 0x0000, 0x0000, 0xffff, 0x0000, 0x0000, 0x0000,
  1038. 0x3000, 0x1000, 0x6403, E1000_DEVID, 0x8086, E1000_DEVID, 0x8086, 0x3040,
  1039. 0x0008, 0x2000, 0x7e14, 0x0048, 0x1000, 0x00d8, 0x0000, 0x2700,
  1040. 0x6cc9, 0x3150, 0x0722, 0x040b, 0x0984, 0x0000, 0xc000, 0x0706,
  1041. 0x1008, 0x0000, 0x0f04, 0x7fff, 0x4d01, 0xffff, 0xffff, 0xffff,
  1042. 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
  1043. 0x0100, 0x4000, 0x121c, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
  1044. 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0x0000,
  1045. };
  1046. /* PCI interface */
  1047. static void
  1048. e1000_mmio_setup(E1000State *d)
  1049. {
  1050. int i;
  1051. const uint32_t excluded_regs[] = {
  1052. E1000_MDIC, E1000_ICR, E1000_ICS, E1000_IMS,
  1053. E1000_IMC, E1000_TCTL, E1000_TDT, PNPMMIO_SIZE
  1054. };
  1055. memory_region_init_io(&d->mmio, &e1000_mmio_ops, d, "e1000-mmio",
  1056. PNPMMIO_SIZE);
  1057. memory_region_add_coalescing(&d->mmio, 0, excluded_regs[0]);
  1058. for (i = 0; excluded_regs[i] != PNPMMIO_SIZE; i++)
  1059. memory_region_add_coalescing(&d->mmio, excluded_regs[i] + 4,
  1060. excluded_regs[i+1] - excluded_regs[i] - 4);
  1061. memory_region_init_io(&d->io, &e1000_io_ops, d, "e1000-io", IOPORT_SIZE);
  1062. }
  1063. static void
  1064. e1000_cleanup(NetClientState *nc)
  1065. {
  1066. E1000State *s = DO_UPCAST(NICState, nc, nc)->opaque;
  1067. s->nic = NULL;
  1068. }
  1069. static void
  1070. pci_e1000_uninit(PCIDevice *dev)
  1071. {
  1072. E1000State *d = DO_UPCAST(E1000State, dev, dev);
  1073. qemu_del_timer(d->autoneg_timer);
  1074. qemu_free_timer(d->autoneg_timer);
  1075. memory_region_destroy(&d->mmio);
  1076. memory_region_destroy(&d->io);
  1077. qemu_del_net_client(&d->nic->nc);
  1078. }
  1079. static NetClientInfo net_e1000_info = {
  1080. .type = NET_CLIENT_OPTIONS_KIND_NIC,
  1081. .size = sizeof(NICState),
  1082. .can_receive = e1000_can_receive,
  1083. .receive = e1000_receive,
  1084. .cleanup = e1000_cleanup,
  1085. .link_status_changed = e1000_set_link_status,
  1086. };
  1087. static int pci_e1000_init(PCIDevice *pci_dev)
  1088. {
  1089. E1000State *d = DO_UPCAST(E1000State, dev, pci_dev);
  1090. uint8_t *pci_conf;
  1091. uint16_t checksum = 0;
  1092. int i;
  1093. uint8_t *macaddr;
  1094. pci_conf = d->dev.config;
  1095. /* TODO: RST# value should be 0, PCI spec 6.2.4 */
  1096. pci_conf[PCI_CACHE_LINE_SIZE] = 0x10;
  1097. pci_conf[PCI_INTERRUPT_PIN] = 1; /* interrupt pin A */
  1098. e1000_mmio_setup(d);
  1099. pci_register_bar(&d->dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &d->mmio);
  1100. pci_register_bar(&d->dev, 1, PCI_BASE_ADDRESS_SPACE_IO, &d->io);
  1101. memmove(d->eeprom_data, e1000_eeprom_template,
  1102. sizeof e1000_eeprom_template);
  1103. qemu_macaddr_default_if_unset(&d->conf.macaddr);
  1104. macaddr = d->conf.macaddr.a;
  1105. for (i = 0; i < 3; i++)
  1106. d->eeprom_data[i] = (macaddr[2*i+1]<<8) | macaddr[2*i];
  1107. for (i = 0; i < EEPROM_CHECKSUM_REG; i++)
  1108. checksum += d->eeprom_data[i];
  1109. checksum = (uint16_t) EEPROM_SUM - checksum;
  1110. d->eeprom_data[EEPROM_CHECKSUM_REG] = checksum;
  1111. d->nic = qemu_new_nic(&net_e1000_info, &d->conf,
  1112. object_get_typename(OBJECT(d)), d->dev.qdev.id, d);
  1113. qemu_format_nic_info_str(&d->nic->nc, macaddr);
  1114. add_boot_device_path(d->conf.bootindex, &pci_dev->qdev, "/ethernet-phy@0");
  1115. d->autoneg_timer = qemu_new_timer_ms(vm_clock, e1000_autoneg_timer, d);
  1116. return 0;
  1117. }
  1118. static void qdev_e1000_reset(DeviceState *dev)
  1119. {
  1120. E1000State *d = DO_UPCAST(E1000State, dev.qdev, dev);
  1121. e1000_reset(d);
  1122. }
  1123. static Property e1000_properties[] = {
  1124. DEFINE_NIC_PROPERTIES(E1000State, conf),
  1125. DEFINE_PROP_END_OF_LIST(),
  1126. };
  1127. static void e1000_class_init(ObjectClass *klass, void *data)
  1128. {
  1129. DeviceClass *dc = DEVICE_CLASS(klass);
  1130. PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
  1131. k->init = pci_e1000_init;
  1132. k->exit = pci_e1000_uninit;
  1133. k->romfile = "pxe-e1000.rom";
  1134. k->vendor_id = PCI_VENDOR_ID_INTEL;
  1135. k->device_id = E1000_DEVID;
  1136. k->revision = 0x03;
  1137. k->class_id = PCI_CLASS_NETWORK_ETHERNET;
  1138. dc->desc = "Intel Gigabit Ethernet";
  1139. dc->reset = qdev_e1000_reset;
  1140. dc->vmsd = &vmstate_e1000;
  1141. dc->props = e1000_properties;
  1142. }
  1143. static TypeInfo e1000_info = {
  1144. .name = "e1000",
  1145. .parent = TYPE_PCI_DEVICE,
  1146. .instance_size = sizeof(E1000State),
  1147. .class_init = e1000_class_init,
  1148. };
  1149. static void e1000_register_types(void)
  1150. {
  1151. type_register_static(&e1000_info);
  1152. }
  1153. type_init(e1000_register_types)