e1000.c 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134
  1. /*
  2. * QEMU e1000 emulation
  3. *
  4. * Nir Peleg, Tutis Systems Ltd. for Qumranet Inc.
  5. * Copyright (c) 2008 Qumranet
  6. * Based on work done by:
  7. * Copyright (c) 2007 Dan Aloni
  8. * Copyright (c) 2004 Antony T Curtis
  9. *
  10. * This library is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU Lesser General Public
  12. * License as published by the Free Software Foundation; either
  13. * version 2 of the License, or (at your option) any later version.
  14. *
  15. * This library is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * Lesser General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU Lesser General Public
  21. * License along with this library; if not, write to the Free Software
  22. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
  23. */
  24. #include "hw.h"
  25. #include "pci.h"
  26. #include "net.h"
  27. #include "e1000_hw.h"
  28. #define DEBUG
  29. #ifdef DEBUG
  30. enum {
  31. DEBUG_GENERAL, DEBUG_IO, DEBUG_MMIO, DEBUG_INTERRUPT,
  32. DEBUG_RX, DEBUG_TX, DEBUG_MDIC, DEBUG_EEPROM,
  33. DEBUG_UNKNOWN, DEBUG_TXSUM, DEBUG_TXERR, DEBUG_RXERR,
  34. DEBUG_RXFILTER, DEBUG_NOTYET,
  35. };
  36. #define DBGBIT(x) (1<<DEBUG_##x)
  37. static int debugflags = DBGBIT(TXERR) | DBGBIT(GENERAL);
  38. #define DBGOUT(what, fmt, params...) do { \
  39. if (debugflags & DBGBIT(what)) \
  40. fprintf(stderr, "e1000: " fmt, ##params); \
  41. } while (0)
  42. #else
  43. #define DBGOUT(what, fmt, params...) do {} while (0)
  44. #endif
  45. #define IOPORT_SIZE 0x40
  46. #define PNPMMIO_SIZE 0x20000
  47. /*
  48. * HW models:
  49. * E1000_DEV_ID_82540EM works with Windows and Linux
  50. * E1000_DEV_ID_82573L OK with windoze and Linux 2.6.22,
  51. * appears to perform better than 82540EM, but breaks with Linux 2.6.18
  52. * E1000_DEV_ID_82544GC_COPPER appears to work; not well tested
  53. * Others never tested
  54. */
  55. enum { E1000_DEVID = E1000_DEV_ID_82540EM };
  56. /*
  57. * May need to specify additional MAC-to-PHY entries --
  58. * Intel's Windows driver refuses to initialize unless they match
  59. */
  60. enum {
  61. PHY_ID2_INIT = E1000_DEVID == E1000_DEV_ID_82573L ? 0xcc2 :
  62. E1000_DEVID == E1000_DEV_ID_82544GC_COPPER ? 0xc30 :
  63. /* default to E1000_DEV_ID_82540EM */ 0xc20
  64. };
  65. typedef struct E1000State_st {
  66. PCIDevice dev;
  67. VLANClientState *vc;
  68. int mmio_index;
  69. uint32_t mac_reg[0x8000];
  70. uint16_t phy_reg[0x20];
  71. uint16_t eeprom_data[64];
  72. uint32_t rxbuf_size;
  73. uint32_t rxbuf_min_shift;
  74. int check_rxov;
  75. struct e1000_tx {
  76. unsigned char header[256];
  77. unsigned char vlan_header[4];
  78. unsigned char vlan[4];
  79. unsigned char data[0x10000];
  80. uint16_t size;
  81. unsigned char sum_needed;
  82. unsigned char vlan_needed;
  83. uint8_t ipcss;
  84. uint8_t ipcso;
  85. uint16_t ipcse;
  86. uint8_t tucss;
  87. uint8_t tucso;
  88. uint16_t tucse;
  89. uint8_t hdr_len;
  90. uint16_t mss;
  91. uint32_t paylen;
  92. uint16_t tso_frames;
  93. char tse;
  94. int8_t ip;
  95. int8_t tcp;
  96. char cptse; // current packet tse bit
  97. } tx;
  98. struct {
  99. uint32_t val_in; // shifted in from guest driver
  100. uint16_t bitnum_in;
  101. uint16_t bitnum_out;
  102. uint16_t reading;
  103. uint32_t old_eecd;
  104. } eecd_state;
  105. } E1000State;
  106. #define defreg(x) x = (E1000_##x>>2)
  107. enum {
  108. defreg(CTRL), defreg(EECD), defreg(EERD), defreg(GPRC),
  109. defreg(GPTC), defreg(ICR), defreg(ICS), defreg(IMC),
  110. defreg(IMS), defreg(LEDCTL), defreg(MANC), defreg(MDIC),
  111. defreg(MPC), defreg(PBA), defreg(RCTL), defreg(RDBAH),
  112. defreg(RDBAL), defreg(RDH), defreg(RDLEN), defreg(RDT),
  113. defreg(STATUS), defreg(SWSM), defreg(TCTL), defreg(TDBAH),
  114. defreg(TDBAL), defreg(TDH), defreg(TDLEN), defreg(TDT),
  115. defreg(TORH), defreg(TORL), defreg(TOTH), defreg(TOTL),
  116. defreg(TPR), defreg(TPT), defreg(TXDCTL), defreg(WUFC),
  117. defreg(RA), defreg(MTA), defreg(CRCERRS),defreg(VFTA),
  118. defreg(VET),
  119. };
  120. enum { PHY_R = 1, PHY_W = 2, PHY_RW = PHY_R | PHY_W };
  121. static const char phy_regcap[0x20] = {
  122. [PHY_STATUS] = PHY_R, [M88E1000_EXT_PHY_SPEC_CTRL] = PHY_RW,
  123. [PHY_ID1] = PHY_R, [M88E1000_PHY_SPEC_CTRL] = PHY_RW,
  124. [PHY_CTRL] = PHY_RW, [PHY_1000T_CTRL] = PHY_RW,
  125. [PHY_LP_ABILITY] = PHY_R, [PHY_1000T_STATUS] = PHY_R,
  126. [PHY_AUTONEG_ADV] = PHY_RW, [M88E1000_RX_ERR_CNTR] = PHY_R,
  127. [PHY_ID2] = PHY_R, [M88E1000_PHY_SPEC_STATUS] = PHY_R
  128. };
  129. static void
  130. ioport_map(PCIDevice *pci_dev, int region_num, uint32_t addr,
  131. uint32_t size, int type)
  132. {
  133. DBGOUT(IO, "e1000_ioport_map addr=0x%04x size=0x%08x\n", addr, size);
  134. }
  135. static void
  136. set_interrupt_cause(E1000State *s, int index, uint32_t val)
  137. {
  138. if (val)
  139. val |= E1000_ICR_INT_ASSERTED;
  140. s->mac_reg[ICR] = val;
  141. s->mac_reg[ICS] = val;
  142. qemu_set_irq(s->dev.irq[0], (s->mac_reg[IMS] & s->mac_reg[ICR]) != 0);
  143. }
  144. static void
  145. set_ics(E1000State *s, int index, uint32_t val)
  146. {
  147. DBGOUT(INTERRUPT, "set_ics %x, ICR %x, IMR %x\n", val, s->mac_reg[ICR],
  148. s->mac_reg[IMS]);
  149. set_interrupt_cause(s, 0, val | s->mac_reg[ICR]);
  150. }
  151. static int
  152. rxbufsize(uint32_t v)
  153. {
  154. v &= E1000_RCTL_BSEX | E1000_RCTL_SZ_16384 | E1000_RCTL_SZ_8192 |
  155. E1000_RCTL_SZ_4096 | E1000_RCTL_SZ_2048 | E1000_RCTL_SZ_1024 |
  156. E1000_RCTL_SZ_512 | E1000_RCTL_SZ_256;
  157. switch (v) {
  158. case E1000_RCTL_BSEX | E1000_RCTL_SZ_16384:
  159. return 16384;
  160. case E1000_RCTL_BSEX | E1000_RCTL_SZ_8192:
  161. return 8192;
  162. case E1000_RCTL_BSEX | E1000_RCTL_SZ_4096:
  163. return 4096;
  164. case E1000_RCTL_SZ_1024:
  165. return 1024;
  166. case E1000_RCTL_SZ_512:
  167. return 512;
  168. case E1000_RCTL_SZ_256:
  169. return 256;
  170. }
  171. return 2048;
  172. }
  173. static void
  174. set_ctrl(E1000State *s, int index, uint32_t val)
  175. {
  176. /* RST is self clearing */
  177. s->mac_reg[CTRL] = val & ~E1000_CTRL_RST;
  178. }
  179. static void
  180. set_rx_control(E1000State *s, int index, uint32_t val)
  181. {
  182. s->mac_reg[RCTL] = val;
  183. s->rxbuf_size = rxbufsize(val);
  184. s->rxbuf_min_shift = ((val / E1000_RCTL_RDMTS_QUAT) & 3) + 1;
  185. DBGOUT(RX, "RCTL: %d, mac_reg[RCTL] = 0x%x\n", s->mac_reg[RDT],
  186. s->mac_reg[RCTL]);
  187. }
  188. static void
  189. set_mdic(E1000State *s, int index, uint32_t val)
  190. {
  191. uint32_t data = val & E1000_MDIC_DATA_MASK;
  192. uint32_t addr = ((val & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT);
  193. if ((val & E1000_MDIC_PHY_MASK) >> E1000_MDIC_PHY_SHIFT != 1) // phy #
  194. val = s->mac_reg[MDIC] | E1000_MDIC_ERROR;
  195. else if (val & E1000_MDIC_OP_READ) {
  196. DBGOUT(MDIC, "MDIC read reg 0x%x\n", addr);
  197. if (!(phy_regcap[addr] & PHY_R)) {
  198. DBGOUT(MDIC, "MDIC read reg %x unhandled\n", addr);
  199. val |= E1000_MDIC_ERROR;
  200. } else
  201. val = (val ^ data) | s->phy_reg[addr];
  202. } else if (val & E1000_MDIC_OP_WRITE) {
  203. DBGOUT(MDIC, "MDIC write reg 0x%x, value 0x%x\n", addr, data);
  204. if (!(phy_regcap[addr] & PHY_W)) {
  205. DBGOUT(MDIC, "MDIC write reg %x unhandled\n", addr);
  206. val |= E1000_MDIC_ERROR;
  207. } else
  208. s->phy_reg[addr] = data;
  209. }
  210. s->mac_reg[MDIC] = val | E1000_MDIC_READY;
  211. set_ics(s, 0, E1000_ICR_MDAC);
  212. }
  213. static uint32_t
  214. get_eecd(E1000State *s, int index)
  215. {
  216. uint32_t ret = E1000_EECD_PRES|E1000_EECD_GNT | s->eecd_state.old_eecd;
  217. DBGOUT(EEPROM, "reading eeprom bit %d (reading %d)\n",
  218. s->eecd_state.bitnum_out, s->eecd_state.reading);
  219. if (!s->eecd_state.reading ||
  220. ((s->eeprom_data[(s->eecd_state.bitnum_out >> 4) & 0x3f] >>
  221. ((s->eecd_state.bitnum_out & 0xf) ^ 0xf))) & 1)
  222. ret |= E1000_EECD_DO;
  223. return ret;
  224. }
  225. static void
  226. set_eecd(E1000State *s, int index, uint32_t val)
  227. {
  228. uint32_t oldval = s->eecd_state.old_eecd;
  229. s->eecd_state.old_eecd = val & (E1000_EECD_SK | E1000_EECD_CS |
  230. E1000_EECD_DI|E1000_EECD_FWE_MASK|E1000_EECD_REQ);
  231. if (!(E1000_EECD_SK & (val ^ oldval))) // no clock edge
  232. return;
  233. if (!(E1000_EECD_SK & val)) { // falling edge
  234. s->eecd_state.bitnum_out++;
  235. return;
  236. }
  237. if (!(val & E1000_EECD_CS)) { // rising, no CS (EEPROM reset)
  238. memset(&s->eecd_state, 0, sizeof s->eecd_state);
  239. /*
  240. * restore old_eecd's E1000_EECD_SK (known to be on)
  241. * to avoid false detection of a clock edge
  242. */
  243. s->eecd_state.old_eecd = E1000_EECD_SK;
  244. return;
  245. }
  246. s->eecd_state.val_in <<= 1;
  247. if (val & E1000_EECD_DI)
  248. s->eecd_state.val_in |= 1;
  249. if (++s->eecd_state.bitnum_in == 9 && !s->eecd_state.reading) {
  250. s->eecd_state.bitnum_out = ((s->eecd_state.val_in & 0x3f)<<4)-1;
  251. s->eecd_state.reading = (((s->eecd_state.val_in >> 6) & 7) ==
  252. EEPROM_READ_OPCODE_MICROWIRE);
  253. }
  254. DBGOUT(EEPROM, "eeprom bitnum in %d out %d, reading %d\n",
  255. s->eecd_state.bitnum_in, s->eecd_state.bitnum_out,
  256. s->eecd_state.reading);
  257. }
  258. static uint32_t
  259. flash_eerd_read(E1000State *s, int x)
  260. {
  261. unsigned int index, r = s->mac_reg[EERD] & ~E1000_EEPROM_RW_REG_START;
  262. if ((s->mac_reg[EERD] & E1000_EEPROM_RW_REG_START) == 0)
  263. return (s->mac_reg[EERD]);
  264. if ((index = r >> E1000_EEPROM_RW_ADDR_SHIFT) > EEPROM_CHECKSUM_REG)
  265. return (E1000_EEPROM_RW_REG_DONE | r);
  266. return ((s->eeprom_data[index] << E1000_EEPROM_RW_REG_DATA) |
  267. E1000_EEPROM_RW_REG_DONE | r);
  268. }
  269. static void
  270. putsum(uint8_t *data, uint32_t n, uint32_t sloc, uint32_t css, uint32_t cse)
  271. {
  272. uint32_t sum;
  273. if (cse && cse < n)
  274. n = cse + 1;
  275. if (sloc < n-1) {
  276. sum = net_checksum_add(n-css, data+css);
  277. cpu_to_be16wu((uint16_t *)(data + sloc),
  278. net_checksum_finish(sum));
  279. }
  280. }
  281. static inline int
  282. vlan_enabled(E1000State *s)
  283. {
  284. return ((s->mac_reg[CTRL] & E1000_CTRL_VME) != 0);
  285. }
  286. static inline int
  287. vlan_rx_filter_enabled(E1000State *s)
  288. {
  289. return ((s->mac_reg[RCTL] & E1000_RCTL_VFE) != 0);
  290. }
  291. static inline int
  292. is_vlan_packet(E1000State *s, const uint8_t *buf)
  293. {
  294. return (be16_to_cpup((uint16_t *)(buf + 12)) ==
  295. le16_to_cpup((uint16_t *)(s->mac_reg + VET)));
  296. }
  297. static inline int
  298. is_vlan_txd(uint32_t txd_lower)
  299. {
  300. return ((txd_lower & E1000_TXD_CMD_VLE) != 0);
  301. }
  302. static void
  303. xmit_seg(E1000State *s)
  304. {
  305. uint16_t len, *sp;
  306. unsigned int frames = s->tx.tso_frames, css, sofar, n;
  307. struct e1000_tx *tp = &s->tx;
  308. if (tp->tse && tp->cptse) {
  309. css = tp->ipcss;
  310. DBGOUT(TXSUM, "frames %d size %d ipcss %d\n",
  311. frames, tp->size, css);
  312. if (tp->ip) { // IPv4
  313. cpu_to_be16wu((uint16_t *)(tp->data+css+2),
  314. tp->size - css);
  315. cpu_to_be16wu((uint16_t *)(tp->data+css+4),
  316. be16_to_cpup((uint16_t *)(tp->data+css+4))+frames);
  317. } else // IPv6
  318. cpu_to_be16wu((uint16_t *)(tp->data+css+4),
  319. tp->size - css);
  320. css = tp->tucss;
  321. len = tp->size - css;
  322. DBGOUT(TXSUM, "tcp %d tucss %d len %d\n", tp->tcp, css, len);
  323. if (tp->tcp) {
  324. sofar = frames * tp->mss;
  325. cpu_to_be32wu((uint32_t *)(tp->data+css+4), // seq
  326. be32_to_cpupu((uint32_t *)(tp->data+css+4))+sofar);
  327. if (tp->paylen - sofar > tp->mss)
  328. tp->data[css + 13] &= ~9; // PSH, FIN
  329. } else // UDP
  330. cpu_to_be16wu((uint16_t *)(tp->data+css+4), len);
  331. if (tp->sum_needed & E1000_TXD_POPTS_TXSM) {
  332. // add pseudo-header length before checksum calculation
  333. sp = (uint16_t *)(tp->data + tp->tucso);
  334. cpu_to_be16wu(sp, be16_to_cpup(sp) + len);
  335. }
  336. tp->tso_frames++;
  337. }
  338. if (tp->sum_needed & E1000_TXD_POPTS_TXSM)
  339. putsum(tp->data, tp->size, tp->tucso, tp->tucss, tp->tucse);
  340. if (tp->sum_needed & E1000_TXD_POPTS_IXSM)
  341. putsum(tp->data, tp->size, tp->ipcso, tp->ipcss, tp->ipcse);
  342. if (tp->vlan_needed) {
  343. memmove(tp->vlan, tp->data, 12);
  344. memcpy(tp->data + 8, tp->vlan_header, 4);
  345. qemu_send_packet(s->vc, tp->vlan, tp->size + 4);
  346. } else
  347. qemu_send_packet(s->vc, tp->data, tp->size);
  348. s->mac_reg[TPT]++;
  349. s->mac_reg[GPTC]++;
  350. n = s->mac_reg[TOTL];
  351. if ((s->mac_reg[TOTL] += s->tx.size) < n)
  352. s->mac_reg[TOTH]++;
  353. }
  354. static void
  355. process_tx_desc(E1000State *s, struct e1000_tx_desc *dp)
  356. {
  357. uint32_t txd_lower = le32_to_cpu(dp->lower.data);
  358. uint32_t dtype = txd_lower & (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D);
  359. unsigned int split_size = txd_lower & 0xffff, bytes, sz, op;
  360. unsigned int msh = 0xfffff, hdr = 0;
  361. uint64_t addr;
  362. struct e1000_context_desc *xp = (struct e1000_context_desc *)dp;
  363. struct e1000_tx *tp = &s->tx;
  364. if (dtype == E1000_TXD_CMD_DEXT) { // context descriptor
  365. op = le32_to_cpu(xp->cmd_and_length);
  366. tp->ipcss = xp->lower_setup.ip_fields.ipcss;
  367. tp->ipcso = xp->lower_setup.ip_fields.ipcso;
  368. tp->ipcse = le16_to_cpu(xp->lower_setup.ip_fields.ipcse);
  369. tp->tucss = xp->upper_setup.tcp_fields.tucss;
  370. tp->tucso = xp->upper_setup.tcp_fields.tucso;
  371. tp->tucse = le16_to_cpu(xp->upper_setup.tcp_fields.tucse);
  372. tp->paylen = op & 0xfffff;
  373. tp->hdr_len = xp->tcp_seg_setup.fields.hdr_len;
  374. tp->mss = le16_to_cpu(xp->tcp_seg_setup.fields.mss);
  375. tp->ip = (op & E1000_TXD_CMD_IP) ? 1 : 0;
  376. tp->tcp = (op & E1000_TXD_CMD_TCP) ? 1 : 0;
  377. tp->tse = (op & E1000_TXD_CMD_TSE) ? 1 : 0;
  378. tp->tso_frames = 0;
  379. if (tp->tucso == 0) { // this is probably wrong
  380. DBGOUT(TXSUM, "TCP/UDP: cso 0!\n");
  381. tp->tucso = tp->tucss + (tp->tcp ? 16 : 6);
  382. }
  383. return;
  384. } else if (dtype == (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D)) {
  385. // data descriptor
  386. tp->sum_needed = le32_to_cpu(dp->upper.data) >> 8;
  387. tp->cptse = ( txd_lower & E1000_TXD_CMD_TSE ) ? 1 : 0;
  388. } else
  389. // legacy descriptor
  390. tp->cptse = 0;
  391. if (vlan_enabled(s) && is_vlan_txd(txd_lower) &&
  392. (tp->cptse || txd_lower & E1000_TXD_CMD_EOP)) {
  393. tp->vlan_needed = 1;
  394. cpu_to_be16wu((uint16_t *)(tp->vlan_header),
  395. le16_to_cpup((uint16_t *)(s->mac_reg + VET)));
  396. cpu_to_be16wu((uint16_t *)(tp->vlan_header + 2),
  397. le16_to_cpu(dp->upper.fields.special));
  398. }
  399. addr = le64_to_cpu(dp->buffer_addr);
  400. if (tp->tse && tp->cptse) {
  401. hdr = tp->hdr_len;
  402. msh = hdr + tp->mss;
  403. do {
  404. bytes = split_size;
  405. if (tp->size + bytes > msh)
  406. bytes = msh - tp->size;
  407. cpu_physical_memory_read(addr, tp->data + tp->size, bytes);
  408. if ((sz = tp->size + bytes) >= hdr && tp->size < hdr)
  409. memmove(tp->header, tp->data, hdr);
  410. tp->size = sz;
  411. addr += bytes;
  412. if (sz == msh) {
  413. xmit_seg(s);
  414. memmove(tp->data, tp->header, hdr);
  415. tp->size = hdr;
  416. }
  417. } while (split_size -= bytes);
  418. } else if (!tp->tse && tp->cptse) {
  419. // context descriptor TSE is not set, while data descriptor TSE is set
  420. DBGOUT(TXERR, "TCP segmentaion Error\n");
  421. } else {
  422. cpu_physical_memory_read(addr, tp->data + tp->size, split_size);
  423. tp->size += split_size;
  424. }
  425. if (!(txd_lower & E1000_TXD_CMD_EOP))
  426. return;
  427. if (!(tp->tse && tp->cptse && tp->size < hdr))
  428. xmit_seg(s);
  429. tp->tso_frames = 0;
  430. tp->sum_needed = 0;
  431. tp->vlan_needed = 0;
  432. tp->size = 0;
  433. tp->cptse = 0;
  434. }
  435. static uint32_t
  436. txdesc_writeback(target_phys_addr_t base, struct e1000_tx_desc *dp)
  437. {
  438. uint32_t txd_upper, txd_lower = le32_to_cpu(dp->lower.data);
  439. if (!(txd_lower & (E1000_TXD_CMD_RS|E1000_TXD_CMD_RPS)))
  440. return 0;
  441. txd_upper = (le32_to_cpu(dp->upper.data) | E1000_TXD_STAT_DD) &
  442. ~(E1000_TXD_STAT_EC | E1000_TXD_STAT_LC | E1000_TXD_STAT_TU);
  443. dp->upper.data = cpu_to_le32(txd_upper);
  444. cpu_physical_memory_write(base + ((char *)&dp->upper - (char *)dp),
  445. (void *)&dp->upper, sizeof(dp->upper));
  446. return E1000_ICR_TXDW;
  447. }
  448. static void
  449. start_xmit(E1000State *s)
  450. {
  451. target_phys_addr_t base;
  452. struct e1000_tx_desc desc;
  453. uint32_t tdh_start = s->mac_reg[TDH], cause = E1000_ICS_TXQE;
  454. if (!(s->mac_reg[TCTL] & E1000_TCTL_EN)) {
  455. DBGOUT(TX, "tx disabled\n");
  456. return;
  457. }
  458. while (s->mac_reg[TDH] != s->mac_reg[TDT]) {
  459. base = ((uint64_t)s->mac_reg[TDBAH] << 32) + s->mac_reg[TDBAL] +
  460. sizeof(struct e1000_tx_desc) * s->mac_reg[TDH];
  461. cpu_physical_memory_read(base, (void *)&desc, sizeof(desc));
  462. DBGOUT(TX, "index %d: %p : %x %x\n", s->mac_reg[TDH],
  463. (void *)(intptr_t)desc.buffer_addr, desc.lower.data,
  464. desc.upper.data);
  465. process_tx_desc(s, &desc);
  466. cause |= txdesc_writeback(base, &desc);
  467. if (++s->mac_reg[TDH] * sizeof(desc) >= s->mac_reg[TDLEN])
  468. s->mac_reg[TDH] = 0;
  469. /*
  470. * the following could happen only if guest sw assigns
  471. * bogus values to TDT/TDLEN.
  472. * there's nothing too intelligent we could do about this.
  473. */
  474. if (s->mac_reg[TDH] == tdh_start) {
  475. DBGOUT(TXERR, "TDH wraparound @%x, TDT %x, TDLEN %x\n",
  476. tdh_start, s->mac_reg[TDT], s->mac_reg[TDLEN]);
  477. break;
  478. }
  479. }
  480. set_ics(s, 0, cause);
  481. }
  482. static int
  483. receive_filter(E1000State *s, const uint8_t *buf, int size)
  484. {
  485. static uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
  486. static int mta_shift[] = {4, 3, 2, 0};
  487. uint32_t f, rctl = s->mac_reg[RCTL], ra[2], *rp;
  488. if (is_vlan_packet(s, buf) && vlan_rx_filter_enabled(s)) {
  489. uint16_t vid = be16_to_cpup((uint16_t *)(buf + 14));
  490. uint32_t vfta = le32_to_cpup((uint32_t *)(s->mac_reg + VFTA) +
  491. ((vid >> 5) & 0x7f));
  492. if ((vfta & (1 << (vid & 0x1f))) == 0)
  493. return 0;
  494. }
  495. if (rctl & E1000_RCTL_UPE) // promiscuous
  496. return 1;
  497. if ((buf[0] & 1) && (rctl & E1000_RCTL_MPE)) // promiscuous mcast
  498. return 1;
  499. if ((rctl & E1000_RCTL_BAM) && !memcmp(buf, bcast, sizeof bcast))
  500. return 1;
  501. for (rp = s->mac_reg + RA; rp < s->mac_reg + RA + 32; rp += 2) {
  502. if (!(rp[1] & E1000_RAH_AV))
  503. continue;
  504. ra[0] = cpu_to_le32(rp[0]);
  505. ra[1] = cpu_to_le32(rp[1]);
  506. if (!memcmp(buf, (uint8_t *)ra, 6)) {
  507. DBGOUT(RXFILTER,
  508. "unicast match[%d]: %02x:%02x:%02x:%02x:%02x:%02x\n",
  509. (int)(rp - s->mac_reg - RA)/2,
  510. buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
  511. return 1;
  512. }
  513. }
  514. DBGOUT(RXFILTER, "unicast mismatch: %02x:%02x:%02x:%02x:%02x:%02x\n",
  515. buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
  516. f = mta_shift[(rctl >> E1000_RCTL_MO_SHIFT) & 3];
  517. f = (((buf[5] << 8) | buf[4]) >> f) & 0xfff;
  518. if (s->mac_reg[MTA + (f >> 5)] & (1 << (f & 0x1f)))
  519. return 1;
  520. DBGOUT(RXFILTER,
  521. "dropping, inexact filter mismatch: %02x:%02x:%02x:%02x:%02x:%02x MO %d MTA[%d] %x\n",
  522. buf[0], buf[1], buf[2], buf[3], buf[4], buf[5],
  523. (rctl >> E1000_RCTL_MO_SHIFT) & 3, f >> 5,
  524. s->mac_reg[MTA + (f >> 5)]);
  525. return 0;
  526. }
  527. static void
  528. e1000_set_link_status(VLANClientState *vc)
  529. {
  530. E1000State *s = vc->opaque;
  531. uint32_t old_status = s->mac_reg[STATUS];
  532. if (vc->link_down)
  533. s->mac_reg[STATUS] &= ~E1000_STATUS_LU;
  534. else
  535. s->mac_reg[STATUS] |= E1000_STATUS_LU;
  536. if (s->mac_reg[STATUS] != old_status)
  537. set_ics(s, 0, E1000_ICR_LSC);
  538. }
  539. static int
  540. e1000_can_receive(void *opaque)
  541. {
  542. E1000State *s = opaque;
  543. return (s->mac_reg[RCTL] & E1000_RCTL_EN);
  544. }
  545. static void
  546. e1000_receive(void *opaque, const uint8_t *buf, int size)
  547. {
  548. E1000State *s = opaque;
  549. struct e1000_rx_desc desc;
  550. target_phys_addr_t base;
  551. unsigned int n, rdt;
  552. uint32_t rdh_start;
  553. uint16_t vlan_special = 0;
  554. uint8_t vlan_status = 0, vlan_offset = 0;
  555. if (!(s->mac_reg[RCTL] & E1000_RCTL_EN))
  556. return;
  557. if (size > s->rxbuf_size) {
  558. DBGOUT(RX, "packet too large for buffers (%d > %d)\n", size,
  559. s->rxbuf_size);
  560. return;
  561. }
  562. if (!receive_filter(s, buf, size))
  563. return;
  564. if (vlan_enabled(s) && is_vlan_packet(s, buf)) {
  565. vlan_special = cpu_to_le16(be16_to_cpup((uint16_t *)(buf + 14)));
  566. memmove((void *)(buf + 4), buf, 12);
  567. vlan_status = E1000_RXD_STAT_VP;
  568. vlan_offset = 4;
  569. size -= 4;
  570. }
  571. rdh_start = s->mac_reg[RDH];
  572. size += 4; // for the header
  573. do {
  574. if (s->mac_reg[RDH] == s->mac_reg[RDT] && s->check_rxov) {
  575. set_ics(s, 0, E1000_ICS_RXO);
  576. return;
  577. }
  578. base = ((uint64_t)s->mac_reg[RDBAH] << 32) + s->mac_reg[RDBAL] +
  579. sizeof(desc) * s->mac_reg[RDH];
  580. cpu_physical_memory_read(base, (void *)&desc, sizeof(desc));
  581. desc.special = vlan_special;
  582. desc.status |= (vlan_status | E1000_RXD_STAT_DD);
  583. if (desc.buffer_addr) {
  584. cpu_physical_memory_write(le64_to_cpu(desc.buffer_addr),
  585. (void *)(buf + vlan_offset), size);
  586. desc.length = cpu_to_le16(size);
  587. desc.status |= E1000_RXD_STAT_EOP|E1000_RXD_STAT_IXSM;
  588. } else // as per intel docs; skip descriptors with null buf addr
  589. DBGOUT(RX, "Null RX descriptor!!\n");
  590. cpu_physical_memory_write(base, (void *)&desc, sizeof(desc));
  591. if (++s->mac_reg[RDH] * sizeof(desc) >= s->mac_reg[RDLEN])
  592. s->mac_reg[RDH] = 0;
  593. s->check_rxov = 1;
  594. /* see comment in start_xmit; same here */
  595. if (s->mac_reg[RDH] == rdh_start) {
  596. DBGOUT(RXERR, "RDH wraparound @%x, RDT %x, RDLEN %x\n",
  597. rdh_start, s->mac_reg[RDT], s->mac_reg[RDLEN]);
  598. set_ics(s, 0, E1000_ICS_RXO);
  599. return;
  600. }
  601. } while (desc.buffer_addr == 0);
  602. s->mac_reg[GPRC]++;
  603. s->mac_reg[TPR]++;
  604. n = s->mac_reg[TORL];
  605. if ((s->mac_reg[TORL] += size) < n)
  606. s->mac_reg[TORH]++;
  607. n = E1000_ICS_RXT0;
  608. if ((rdt = s->mac_reg[RDT]) < s->mac_reg[RDH])
  609. rdt += s->mac_reg[RDLEN] / sizeof(desc);
  610. if (((rdt - s->mac_reg[RDH]) * sizeof(desc)) <= s->mac_reg[RDLEN] >>
  611. s->rxbuf_min_shift)
  612. n |= E1000_ICS_RXDMT0;
  613. set_ics(s, 0, n);
  614. }
  615. static uint32_t
  616. mac_readreg(E1000State *s, int index)
  617. {
  618. return s->mac_reg[index];
  619. }
  620. static uint32_t
  621. mac_icr_read(E1000State *s, int index)
  622. {
  623. uint32_t ret = s->mac_reg[ICR];
  624. DBGOUT(INTERRUPT, "ICR read: %x\n", ret);
  625. set_interrupt_cause(s, 0, 0);
  626. return ret;
  627. }
  628. static uint32_t
  629. mac_read_clr4(E1000State *s, int index)
  630. {
  631. uint32_t ret = s->mac_reg[index];
  632. s->mac_reg[index] = 0;
  633. return ret;
  634. }
  635. static uint32_t
  636. mac_read_clr8(E1000State *s, int index)
  637. {
  638. uint32_t ret = s->mac_reg[index];
  639. s->mac_reg[index] = 0;
  640. s->mac_reg[index-1] = 0;
  641. return ret;
  642. }
  643. static void
  644. mac_writereg(E1000State *s, int index, uint32_t val)
  645. {
  646. s->mac_reg[index] = val;
  647. }
  648. static void
  649. set_rdt(E1000State *s, int index, uint32_t val)
  650. {
  651. s->check_rxov = 0;
  652. s->mac_reg[index] = val & 0xffff;
  653. }
  654. static void
  655. set_16bit(E1000State *s, int index, uint32_t val)
  656. {
  657. s->mac_reg[index] = val & 0xffff;
  658. }
  659. static void
  660. set_dlen(E1000State *s, int index, uint32_t val)
  661. {
  662. s->mac_reg[index] = val & 0xfff80;
  663. }
  664. static void
  665. set_tctl(E1000State *s, int index, uint32_t val)
  666. {
  667. s->mac_reg[index] = val;
  668. s->mac_reg[TDT] &= 0xffff;
  669. start_xmit(s);
  670. }
  671. static void
  672. set_icr(E1000State *s, int index, uint32_t val)
  673. {
  674. DBGOUT(INTERRUPT, "set_icr %x\n", val);
  675. set_interrupt_cause(s, 0, s->mac_reg[ICR] & ~val);
  676. }
  677. static void
  678. set_imc(E1000State *s, int index, uint32_t val)
  679. {
  680. s->mac_reg[IMS] &= ~val;
  681. set_ics(s, 0, 0);
  682. }
  683. static void
  684. set_ims(E1000State *s, int index, uint32_t val)
  685. {
  686. s->mac_reg[IMS] |= val;
  687. set_ics(s, 0, 0);
  688. }
  689. #define getreg(x) [x] = mac_readreg
  690. static uint32_t (*macreg_readops[])(E1000State *, int) = {
  691. getreg(PBA), getreg(RCTL), getreg(TDH), getreg(TXDCTL),
  692. getreg(WUFC), getreg(TDT), getreg(CTRL), getreg(LEDCTL),
  693. getreg(MANC), getreg(MDIC), getreg(SWSM), getreg(STATUS),
  694. getreg(TORL), getreg(TOTL), getreg(IMS), getreg(TCTL),
  695. getreg(RDH), getreg(RDT), getreg(VET), getreg(ICS),
  696. [TOTH] = mac_read_clr8, [TORH] = mac_read_clr8, [GPRC] = mac_read_clr4,
  697. [GPTC] = mac_read_clr4, [TPR] = mac_read_clr4, [TPT] = mac_read_clr4,
  698. [ICR] = mac_icr_read, [EECD] = get_eecd, [EERD] = flash_eerd_read,
  699. [CRCERRS ... MPC] = &mac_readreg,
  700. [RA ... RA+31] = &mac_readreg,
  701. [MTA ... MTA+127] = &mac_readreg,
  702. [VFTA ... VFTA+127] = &mac_readreg,
  703. };
  704. enum { NREADOPS = ARRAY_SIZE(macreg_readops) };
  705. #define putreg(x) [x] = mac_writereg
  706. static void (*macreg_writeops[])(E1000State *, int, uint32_t) = {
  707. putreg(PBA), putreg(EERD), putreg(SWSM), putreg(WUFC),
  708. putreg(TDBAL), putreg(TDBAH), putreg(TXDCTL), putreg(RDBAH),
  709. putreg(RDBAL), putreg(LEDCTL), putreg(VET),
  710. [TDLEN] = set_dlen, [RDLEN] = set_dlen, [TCTL] = set_tctl,
  711. [TDT] = set_tctl, [MDIC] = set_mdic, [ICS] = set_ics,
  712. [TDH] = set_16bit, [RDH] = set_16bit, [RDT] = set_rdt,
  713. [IMC] = set_imc, [IMS] = set_ims, [ICR] = set_icr,
  714. [EECD] = set_eecd, [RCTL] = set_rx_control, [CTRL] = set_ctrl,
  715. [RA ... RA+31] = &mac_writereg,
  716. [MTA ... MTA+127] = &mac_writereg,
  717. [VFTA ... VFTA+127] = &mac_writereg,
  718. };
  719. enum { NWRITEOPS = ARRAY_SIZE(macreg_writeops) };
  720. static void
  721. e1000_mmio_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
  722. {
  723. E1000State *s = opaque;
  724. unsigned int index = (addr & 0x1ffff) >> 2;
  725. #ifdef TARGET_WORDS_BIGENDIAN
  726. val = bswap32(val);
  727. #endif
  728. if (index < NWRITEOPS && macreg_writeops[index])
  729. macreg_writeops[index](s, index, val);
  730. else if (index < NREADOPS && macreg_readops[index])
  731. DBGOUT(MMIO, "e1000_mmio_writel RO %x: 0x%04x\n", index<<2, val);
  732. else
  733. DBGOUT(UNKNOWN, "MMIO unknown write addr=0x%08x,val=0x%08x\n",
  734. index<<2, val);
  735. }
  736. static void
  737. e1000_mmio_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
  738. {
  739. // emulate hw without byte enables: no RMW
  740. e1000_mmio_writel(opaque, addr & ~3,
  741. (val & 0xffff) << (8*(addr & 3)));
  742. }
  743. static void
  744. e1000_mmio_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
  745. {
  746. // emulate hw without byte enables: no RMW
  747. e1000_mmio_writel(opaque, addr & ~3,
  748. (val & 0xff) << (8*(addr & 3)));
  749. }
  750. static uint32_t
  751. e1000_mmio_readl(void *opaque, target_phys_addr_t addr)
  752. {
  753. E1000State *s = opaque;
  754. unsigned int index = (addr & 0x1ffff) >> 2;
  755. if (index < NREADOPS && macreg_readops[index])
  756. {
  757. uint32_t val = macreg_readops[index](s, index);
  758. #ifdef TARGET_WORDS_BIGENDIAN
  759. val = bswap32(val);
  760. #endif
  761. return val;
  762. }
  763. DBGOUT(UNKNOWN, "MMIO unknown read addr=0x%08x\n", index<<2);
  764. return 0;
  765. }
  766. static uint32_t
  767. e1000_mmio_readb(void *opaque, target_phys_addr_t addr)
  768. {
  769. return ((e1000_mmio_readl(opaque, addr & ~3)) >>
  770. (8 * (addr & 3))) & 0xff;
  771. }
  772. static uint32_t
  773. e1000_mmio_readw(void *opaque, target_phys_addr_t addr)
  774. {
  775. return ((e1000_mmio_readl(opaque, addr & ~3)) >>
  776. (8 * (addr & 3))) & 0xffff;
  777. }
  778. static const int mac_regtosave[] = {
  779. CTRL, EECD, EERD, GPRC, GPTC, ICR, ICS, IMC, IMS,
  780. LEDCTL, MANC, MDIC, MPC, PBA, RCTL, RDBAH, RDBAL, RDH,
  781. RDLEN, RDT, STATUS, SWSM, TCTL, TDBAH, TDBAL, TDH, TDLEN,
  782. TDT, TORH, TORL, TOTH, TOTL, TPR, TPT, TXDCTL, WUFC,
  783. VET,
  784. };
  785. enum { MAC_NSAVE = ARRAY_SIZE(mac_regtosave) };
  786. static const struct {
  787. int size;
  788. int array0;
  789. } mac_regarraystosave[] = { {32, RA}, {128, MTA}, {128, VFTA} };
  790. enum { MAC_NARRAYS = ARRAY_SIZE(mac_regarraystosave) };
  791. static void
  792. nic_save(QEMUFile *f, void *opaque)
  793. {
  794. E1000State *s = (E1000State *)opaque;
  795. int i, j;
  796. pci_device_save(&s->dev, f);
  797. qemu_put_be32(f, 0);
  798. qemu_put_be32s(f, &s->rxbuf_size);
  799. qemu_put_be32s(f, &s->rxbuf_min_shift);
  800. qemu_put_be32s(f, &s->eecd_state.val_in);
  801. qemu_put_be16s(f, &s->eecd_state.bitnum_in);
  802. qemu_put_be16s(f, &s->eecd_state.bitnum_out);
  803. qemu_put_be16s(f, &s->eecd_state.reading);
  804. qemu_put_be32s(f, &s->eecd_state.old_eecd);
  805. qemu_put_8s(f, &s->tx.ipcss);
  806. qemu_put_8s(f, &s->tx.ipcso);
  807. qemu_put_be16s(f, &s->tx.ipcse);
  808. qemu_put_8s(f, &s->tx.tucss);
  809. qemu_put_8s(f, &s->tx.tucso);
  810. qemu_put_be16s(f, &s->tx.tucse);
  811. qemu_put_be32s(f, &s->tx.paylen);
  812. qemu_put_8s(f, &s->tx.hdr_len);
  813. qemu_put_be16s(f, &s->tx.mss);
  814. qemu_put_be16s(f, &s->tx.size);
  815. qemu_put_be16s(f, &s->tx.tso_frames);
  816. qemu_put_8s(f, &s->tx.sum_needed);
  817. qemu_put_s8s(f, &s->tx.ip);
  818. qemu_put_s8s(f, &s->tx.tcp);
  819. qemu_put_buffer(f, s->tx.header, sizeof s->tx.header);
  820. qemu_put_buffer(f, s->tx.data, sizeof s->tx.data);
  821. for (i = 0; i < 64; i++)
  822. qemu_put_be16s(f, s->eeprom_data + i);
  823. for (i = 0; i < 0x20; i++)
  824. qemu_put_be16s(f, s->phy_reg + i);
  825. for (i = 0; i < MAC_NSAVE; i++)
  826. qemu_put_be32s(f, s->mac_reg + mac_regtosave[i]);
  827. for (i = 0; i < MAC_NARRAYS; i++)
  828. for (j = 0; j < mac_regarraystosave[i].size; j++)
  829. qemu_put_be32s(f,
  830. s->mac_reg + mac_regarraystosave[i].array0 + j);
  831. }
  832. static int
  833. nic_load(QEMUFile *f, void *opaque, int version_id)
  834. {
  835. E1000State *s = (E1000State *)opaque;
  836. int i, j, ret;
  837. if ((ret = pci_device_load(&s->dev, f)) < 0)
  838. return ret;
  839. if (version_id == 1)
  840. qemu_get_sbe32s(f, &i); /* once some unused instance id */
  841. qemu_get_be32(f); /* Ignored. Was mmio_base. */
  842. qemu_get_be32s(f, &s->rxbuf_size);
  843. qemu_get_be32s(f, &s->rxbuf_min_shift);
  844. qemu_get_be32s(f, &s->eecd_state.val_in);
  845. qemu_get_be16s(f, &s->eecd_state.bitnum_in);
  846. qemu_get_be16s(f, &s->eecd_state.bitnum_out);
  847. qemu_get_be16s(f, &s->eecd_state.reading);
  848. qemu_get_be32s(f, &s->eecd_state.old_eecd);
  849. qemu_get_8s(f, &s->tx.ipcss);
  850. qemu_get_8s(f, &s->tx.ipcso);
  851. qemu_get_be16s(f, &s->tx.ipcse);
  852. qemu_get_8s(f, &s->tx.tucss);
  853. qemu_get_8s(f, &s->tx.tucso);
  854. qemu_get_be16s(f, &s->tx.tucse);
  855. qemu_get_be32s(f, &s->tx.paylen);
  856. qemu_get_8s(f, &s->tx.hdr_len);
  857. qemu_get_be16s(f, &s->tx.mss);
  858. qemu_get_be16s(f, &s->tx.size);
  859. qemu_get_be16s(f, &s->tx.tso_frames);
  860. qemu_get_8s(f, &s->tx.sum_needed);
  861. qemu_get_s8s(f, &s->tx.ip);
  862. qemu_get_s8s(f, &s->tx.tcp);
  863. qemu_get_buffer(f, s->tx.header, sizeof s->tx.header);
  864. qemu_get_buffer(f, s->tx.data, sizeof s->tx.data);
  865. for (i = 0; i < 64; i++)
  866. qemu_get_be16s(f, s->eeprom_data + i);
  867. for (i = 0; i < 0x20; i++)
  868. qemu_get_be16s(f, s->phy_reg + i);
  869. for (i = 0; i < MAC_NSAVE; i++)
  870. qemu_get_be32s(f, s->mac_reg + mac_regtosave[i]);
  871. for (i = 0; i < MAC_NARRAYS; i++)
  872. for (j = 0; j < mac_regarraystosave[i].size; j++)
  873. qemu_get_be32s(f,
  874. s->mac_reg + mac_regarraystosave[i].array0 + j);
  875. return 0;
  876. }
  877. static const uint16_t e1000_eeprom_template[64] = {
  878. 0x0000, 0x0000, 0x0000, 0x0000, 0xffff, 0x0000, 0x0000, 0x0000,
  879. 0x3000, 0x1000, 0x6403, E1000_DEVID, 0x8086, E1000_DEVID, 0x8086, 0x3040,
  880. 0x0008, 0x2000, 0x7e14, 0x0048, 0x1000, 0x00d8, 0x0000, 0x2700,
  881. 0x6cc9, 0x3150, 0x0722, 0x040b, 0x0984, 0x0000, 0xc000, 0x0706,
  882. 0x1008, 0x0000, 0x0f04, 0x7fff, 0x4d01, 0xffff, 0xffff, 0xffff,
  883. 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
  884. 0x0100, 0x4000, 0x121c, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
  885. 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0x0000,
  886. };
  887. static const uint16_t phy_reg_init[] = {
  888. [PHY_CTRL] = 0x1140, [PHY_STATUS] = 0x796d, // link initially up
  889. [PHY_ID1] = 0x141, [PHY_ID2] = PHY_ID2_INIT,
  890. [PHY_1000T_CTRL] = 0x0e00, [M88E1000_PHY_SPEC_CTRL] = 0x360,
  891. [M88E1000_EXT_PHY_SPEC_CTRL] = 0x0d60, [PHY_AUTONEG_ADV] = 0xde1,
  892. [PHY_LP_ABILITY] = 0x1e0, [PHY_1000T_STATUS] = 0x3c00,
  893. [M88E1000_PHY_SPEC_STATUS] = 0xac00,
  894. };
  895. static const uint32_t mac_reg_init[] = {
  896. [PBA] = 0x00100030,
  897. [LEDCTL] = 0x602,
  898. [CTRL] = E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN0 |
  899. E1000_CTRL_SPD_1000 | E1000_CTRL_SLU,
  900. [STATUS] = 0x80000000 | E1000_STATUS_GIO_MASTER_ENABLE |
  901. E1000_STATUS_ASDV | E1000_STATUS_MTXCKOK |
  902. E1000_STATUS_SPEED_1000 | E1000_STATUS_FD |
  903. E1000_STATUS_LU,
  904. [MANC] = E1000_MANC_EN_MNG2HOST | E1000_MANC_RCV_TCO_EN |
  905. E1000_MANC_ARP_EN | E1000_MANC_0298_EN |
  906. E1000_MANC_RMCP_EN,
  907. };
  908. /* PCI interface */
  909. static CPUWriteMemoryFunc *e1000_mmio_write[] = {
  910. e1000_mmio_writeb, e1000_mmio_writew, e1000_mmio_writel
  911. };
  912. static CPUReadMemoryFunc *e1000_mmio_read[] = {
  913. e1000_mmio_readb, e1000_mmio_readw, e1000_mmio_readl
  914. };
  915. static void
  916. e1000_mmio_map(PCIDevice *pci_dev, int region_num,
  917. uint32_t addr, uint32_t size, int type)
  918. {
  919. E1000State *d = (E1000State *)pci_dev;
  920. int i;
  921. const uint32_t excluded_regs[] = {
  922. E1000_MDIC, E1000_ICR, E1000_ICS, E1000_IMS,
  923. E1000_IMC, E1000_TCTL, E1000_TDT, PNPMMIO_SIZE
  924. };
  925. DBGOUT(MMIO, "e1000_mmio_map addr=0x%08x 0x%08x\n", addr, size);
  926. cpu_register_physical_memory(addr, PNPMMIO_SIZE, d->mmio_index);
  927. qemu_register_coalesced_mmio(addr, excluded_regs[0]);
  928. for (i = 0; excluded_regs[i] != PNPMMIO_SIZE; i++)
  929. qemu_register_coalesced_mmio(addr + excluded_regs[i] + 4,
  930. excluded_regs[i + 1] -
  931. excluded_regs[i] - 4);
  932. }
  933. static void
  934. e1000_cleanup(VLANClientState *vc)
  935. {
  936. E1000State *d = vc->opaque;
  937. unregister_savevm("e1000", d);
  938. }
  939. static int
  940. pci_e1000_uninit(PCIDevice *dev)
  941. {
  942. E1000State *d = (E1000State *) dev;
  943. cpu_unregister_io_memory(d->mmio_index);
  944. return 0;
  945. }
  946. PCIDevice *
  947. pci_e1000_init(PCIBus *bus, NICInfo *nd, int devfn)
  948. {
  949. E1000State *d;
  950. uint8_t *pci_conf;
  951. uint16_t checksum = 0;
  952. static const char info_str[] = "e1000";
  953. int i;
  954. d = (E1000State *)pci_register_device(bus, "e1000",
  955. sizeof(E1000State), devfn, NULL, NULL);
  956. if (!d)
  957. return NULL;
  958. pci_conf = d->dev.config;
  959. pci_config_set_vendor_id(pci_conf, PCI_VENDOR_ID_INTEL);
  960. pci_config_set_device_id(pci_conf, E1000_DEVID);
  961. *(uint16_t *)(pci_conf+0x04) = cpu_to_le16(0x0407);
  962. *(uint16_t *)(pci_conf+0x06) = cpu_to_le16(0x0010);
  963. pci_conf[0x08] = 0x03;
  964. pci_config_set_class(pci_conf, PCI_CLASS_NETWORK_ETHERNET);
  965. pci_conf[0x0c] = 0x10;
  966. pci_conf[0x3d] = 1; // interrupt pin 0
  967. d->mmio_index = cpu_register_io_memory(0, e1000_mmio_read,
  968. e1000_mmio_write, d);
  969. pci_register_io_region((PCIDevice *)d, 0, PNPMMIO_SIZE,
  970. PCI_ADDRESS_SPACE_MEM, e1000_mmio_map);
  971. pci_register_io_region((PCIDevice *)d, 1, IOPORT_SIZE,
  972. PCI_ADDRESS_SPACE_IO, ioport_map);
  973. memmove(d->eeprom_data, e1000_eeprom_template,
  974. sizeof e1000_eeprom_template);
  975. for (i = 0; i < 3; i++)
  976. d->eeprom_data[i] = (nd->macaddr[2*i+1]<<8) | nd->macaddr[2*i];
  977. for (i = 0; i < EEPROM_CHECKSUM_REG; i++)
  978. checksum += d->eeprom_data[i];
  979. checksum = (uint16_t) EEPROM_SUM - checksum;
  980. d->eeprom_data[EEPROM_CHECKSUM_REG] = checksum;
  981. memset(d->phy_reg, 0, sizeof d->phy_reg);
  982. memmove(d->phy_reg, phy_reg_init, sizeof phy_reg_init);
  983. memset(d->mac_reg, 0, sizeof d->mac_reg);
  984. memmove(d->mac_reg, mac_reg_init, sizeof mac_reg_init);
  985. d->rxbuf_min_shift = 1;
  986. memset(&d->tx, 0, sizeof d->tx);
  987. d->vc = qemu_new_vlan_client(nd->vlan, nd->model, nd->name,
  988. e1000_receive, e1000_can_receive,
  989. e1000_cleanup, d);
  990. d->vc->link_status_changed = e1000_set_link_status;
  991. qemu_format_nic_info_str(d->vc, nd->macaddr);
  992. register_savevm(info_str, -1, 2, nic_save, nic_load, d);
  993. d->dev.unregister = pci_e1000_uninit;
  994. return (PCIDevice *)d;
  995. }