e1000.c 40 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235
  1. /*
  2. * QEMU e1000 emulation
  3. *
  4. * Software developer's manual:
  5. * http://download.intel.com/design/network/manuals/8254x_GBe_SDM.pdf
  6. *
  7. * Nir Peleg, Tutis Systems Ltd. for Qumranet Inc.
  8. * Copyright (c) 2008 Qumranet
  9. * Based on work done by:
  10. * Copyright (c) 2007 Dan Aloni
  11. * Copyright (c) 2004 Antony T Curtis
  12. *
  13. * This library is free software; you can redistribute it and/or
  14. * modify it under the terms of the GNU Lesser General Public
  15. * License as published by the Free Software Foundation; either
  16. * version 2 of the License, or (at your option) any later version.
  17. *
  18. * This library is distributed in the hope that it will be useful,
  19. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  20. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  21. * Lesser General Public License for more details.
  22. *
  23. * You should have received a copy of the GNU Lesser General Public
  24. * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  25. */
  26. #include "hw.h"
  27. #include "pci.h"
  28. #include "net.h"
  29. #include "net/checksum.h"
  30. #include "loader.h"
  31. #include "sysemu.h"
  32. #include "e1000_hw.h"
  33. #define E1000_DEBUG
  34. #ifdef E1000_DEBUG
  35. enum {
  36. DEBUG_GENERAL, DEBUG_IO, DEBUG_MMIO, DEBUG_INTERRUPT,
  37. DEBUG_RX, DEBUG_TX, DEBUG_MDIC, DEBUG_EEPROM,
  38. DEBUG_UNKNOWN, DEBUG_TXSUM, DEBUG_TXERR, DEBUG_RXERR,
  39. DEBUG_RXFILTER, DEBUG_NOTYET,
  40. };
  41. #define DBGBIT(x) (1<<DEBUG_##x)
  42. static int debugflags = DBGBIT(TXERR) | DBGBIT(GENERAL);
  43. #define DBGOUT(what, fmt, ...) do { \
  44. if (debugflags & DBGBIT(what)) \
  45. fprintf(stderr, "e1000: " fmt, ## __VA_ARGS__); \
  46. } while (0)
  47. #else
  48. #define DBGOUT(what, fmt, ...) do {} while (0)
  49. #endif
  50. #define IOPORT_SIZE 0x40
  51. #define PNPMMIO_SIZE 0x20000
  52. #define MIN_BUF_SIZE 60 /* Min. octets in an ethernet frame sans FCS */
  53. /*
  54. * HW models:
  55. * E1000_DEV_ID_82540EM works with Windows and Linux
  56. * E1000_DEV_ID_82573L OK with windoze and Linux 2.6.22,
  57. * appears to perform better than 82540EM, but breaks with Linux 2.6.18
  58. * E1000_DEV_ID_82544GC_COPPER appears to work; not well tested
  59. * Others never tested
  60. */
  61. enum { E1000_DEVID = E1000_DEV_ID_82540EM };
  62. /*
  63. * May need to specify additional MAC-to-PHY entries --
  64. * Intel's Windows driver refuses to initialize unless they match
  65. */
  66. enum {
  67. PHY_ID2_INIT = E1000_DEVID == E1000_DEV_ID_82573L ? 0xcc2 :
  68. E1000_DEVID == E1000_DEV_ID_82544GC_COPPER ? 0xc30 :
  69. /* default to E1000_DEV_ID_82540EM */ 0xc20
  70. };
  71. typedef struct E1000State_st {
  72. PCIDevice dev;
  73. NICState *nic;
  74. NICConf conf;
  75. int mmio_index;
  76. uint32_t mac_reg[0x8000];
  77. uint16_t phy_reg[0x20];
  78. uint16_t eeprom_data[64];
  79. uint32_t rxbuf_size;
  80. uint32_t rxbuf_min_shift;
  81. int check_rxov;
  82. struct e1000_tx {
  83. unsigned char header[256];
  84. unsigned char vlan_header[4];
  85. /* Fields vlan and data must not be reordered or separated. */
  86. unsigned char vlan[4];
  87. unsigned char data[0x10000];
  88. uint16_t size;
  89. unsigned char sum_needed;
  90. unsigned char vlan_needed;
  91. uint8_t ipcss;
  92. uint8_t ipcso;
  93. uint16_t ipcse;
  94. uint8_t tucss;
  95. uint8_t tucso;
  96. uint16_t tucse;
  97. uint8_t hdr_len;
  98. uint16_t mss;
  99. uint32_t paylen;
  100. uint16_t tso_frames;
  101. char tse;
  102. int8_t ip;
  103. int8_t tcp;
  104. char cptse; // current packet tse bit
  105. } tx;
  106. struct {
  107. uint32_t val_in; // shifted in from guest driver
  108. uint16_t bitnum_in;
  109. uint16_t bitnum_out;
  110. uint16_t reading;
  111. uint32_t old_eecd;
  112. } eecd_state;
  113. } E1000State;
  114. #define defreg(x) x = (E1000_##x>>2)
  115. enum {
  116. defreg(CTRL), defreg(EECD), defreg(EERD), defreg(GPRC),
  117. defreg(GPTC), defreg(ICR), defreg(ICS), defreg(IMC),
  118. defreg(IMS), defreg(LEDCTL), defreg(MANC), defreg(MDIC),
  119. defreg(MPC), defreg(PBA), defreg(RCTL), defreg(RDBAH),
  120. defreg(RDBAL), defreg(RDH), defreg(RDLEN), defreg(RDT),
  121. defreg(STATUS), defreg(SWSM), defreg(TCTL), defreg(TDBAH),
  122. defreg(TDBAL), defreg(TDH), defreg(TDLEN), defreg(TDT),
  123. defreg(TORH), defreg(TORL), defreg(TOTH), defreg(TOTL),
  124. defreg(TPR), defreg(TPT), defreg(TXDCTL), defreg(WUFC),
  125. defreg(RA), defreg(MTA), defreg(CRCERRS),defreg(VFTA),
  126. defreg(VET),
  127. };
  128. enum { PHY_R = 1, PHY_W = 2, PHY_RW = PHY_R | PHY_W };
  129. static const char phy_regcap[0x20] = {
  130. [PHY_STATUS] = PHY_R, [M88E1000_EXT_PHY_SPEC_CTRL] = PHY_RW,
  131. [PHY_ID1] = PHY_R, [M88E1000_PHY_SPEC_CTRL] = PHY_RW,
  132. [PHY_CTRL] = PHY_RW, [PHY_1000T_CTRL] = PHY_RW,
  133. [PHY_LP_ABILITY] = PHY_R, [PHY_1000T_STATUS] = PHY_R,
  134. [PHY_AUTONEG_ADV] = PHY_RW, [M88E1000_RX_ERR_CNTR] = PHY_R,
  135. [PHY_ID2] = PHY_R, [M88E1000_PHY_SPEC_STATUS] = PHY_R
  136. };
  137. static void
  138. ioport_map(PCIDevice *pci_dev, int region_num, pcibus_t addr,
  139. pcibus_t size, int type)
  140. {
  141. DBGOUT(IO, "e1000_ioport_map addr=0x%04"FMT_PCIBUS
  142. " size=0x%08"FMT_PCIBUS"\n", addr, size);
  143. }
  144. static void
  145. set_interrupt_cause(E1000State *s, int index, uint32_t val)
  146. {
  147. if (val)
  148. val |= E1000_ICR_INT_ASSERTED;
  149. s->mac_reg[ICR] = val;
  150. s->mac_reg[ICS] = val;
  151. qemu_set_irq(s->dev.irq[0], (s->mac_reg[IMS] & s->mac_reg[ICR]) != 0);
  152. }
  153. static void
  154. set_ics(E1000State *s, int index, uint32_t val)
  155. {
  156. DBGOUT(INTERRUPT, "set_ics %x, ICR %x, IMR %x\n", val, s->mac_reg[ICR],
  157. s->mac_reg[IMS]);
  158. set_interrupt_cause(s, 0, val | s->mac_reg[ICR]);
  159. }
  160. static int
  161. rxbufsize(uint32_t v)
  162. {
  163. v &= E1000_RCTL_BSEX | E1000_RCTL_SZ_16384 | E1000_RCTL_SZ_8192 |
  164. E1000_RCTL_SZ_4096 | E1000_RCTL_SZ_2048 | E1000_RCTL_SZ_1024 |
  165. E1000_RCTL_SZ_512 | E1000_RCTL_SZ_256;
  166. switch (v) {
  167. case E1000_RCTL_BSEX | E1000_RCTL_SZ_16384:
  168. return 16384;
  169. case E1000_RCTL_BSEX | E1000_RCTL_SZ_8192:
  170. return 8192;
  171. case E1000_RCTL_BSEX | E1000_RCTL_SZ_4096:
  172. return 4096;
  173. case E1000_RCTL_SZ_1024:
  174. return 1024;
  175. case E1000_RCTL_SZ_512:
  176. return 512;
  177. case E1000_RCTL_SZ_256:
  178. return 256;
  179. }
  180. return 2048;
  181. }
  182. static void
  183. set_ctrl(E1000State *s, int index, uint32_t val)
  184. {
  185. /* RST is self clearing */
  186. s->mac_reg[CTRL] = val & ~E1000_CTRL_RST;
  187. }
  188. static void
  189. set_rx_control(E1000State *s, int index, uint32_t val)
  190. {
  191. s->mac_reg[RCTL] = val;
  192. s->rxbuf_size = rxbufsize(val);
  193. s->rxbuf_min_shift = ((val / E1000_RCTL_RDMTS_QUAT) & 3) + 1;
  194. DBGOUT(RX, "RCTL: %d, mac_reg[RCTL] = 0x%x\n", s->mac_reg[RDT],
  195. s->mac_reg[RCTL]);
  196. }
  197. static void
  198. set_mdic(E1000State *s, int index, uint32_t val)
  199. {
  200. uint32_t data = val & E1000_MDIC_DATA_MASK;
  201. uint32_t addr = ((val & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT);
  202. if ((val & E1000_MDIC_PHY_MASK) >> E1000_MDIC_PHY_SHIFT != 1) // phy #
  203. val = s->mac_reg[MDIC] | E1000_MDIC_ERROR;
  204. else if (val & E1000_MDIC_OP_READ) {
  205. DBGOUT(MDIC, "MDIC read reg 0x%x\n", addr);
  206. if (!(phy_regcap[addr] & PHY_R)) {
  207. DBGOUT(MDIC, "MDIC read reg %x unhandled\n", addr);
  208. val |= E1000_MDIC_ERROR;
  209. } else
  210. val = (val ^ data) | s->phy_reg[addr];
  211. } else if (val & E1000_MDIC_OP_WRITE) {
  212. DBGOUT(MDIC, "MDIC write reg 0x%x, value 0x%x\n", addr, data);
  213. if (!(phy_regcap[addr] & PHY_W)) {
  214. DBGOUT(MDIC, "MDIC write reg %x unhandled\n", addr);
  215. val |= E1000_MDIC_ERROR;
  216. } else
  217. s->phy_reg[addr] = data;
  218. }
  219. s->mac_reg[MDIC] = val | E1000_MDIC_READY;
  220. set_ics(s, 0, E1000_ICR_MDAC);
  221. }
  222. static uint32_t
  223. get_eecd(E1000State *s, int index)
  224. {
  225. uint32_t ret = E1000_EECD_PRES|E1000_EECD_GNT | s->eecd_state.old_eecd;
  226. DBGOUT(EEPROM, "reading eeprom bit %d (reading %d)\n",
  227. s->eecd_state.bitnum_out, s->eecd_state.reading);
  228. if (!s->eecd_state.reading ||
  229. ((s->eeprom_data[(s->eecd_state.bitnum_out >> 4) & 0x3f] >>
  230. ((s->eecd_state.bitnum_out & 0xf) ^ 0xf))) & 1)
  231. ret |= E1000_EECD_DO;
  232. return ret;
  233. }
  234. static void
  235. set_eecd(E1000State *s, int index, uint32_t val)
  236. {
  237. uint32_t oldval = s->eecd_state.old_eecd;
  238. s->eecd_state.old_eecd = val & (E1000_EECD_SK | E1000_EECD_CS |
  239. E1000_EECD_DI|E1000_EECD_FWE_MASK|E1000_EECD_REQ);
  240. if (!(E1000_EECD_CS & val)) // CS inactive; nothing to do
  241. return;
  242. if (E1000_EECD_CS & (val ^ oldval)) { // CS rise edge; reset state
  243. s->eecd_state.val_in = 0;
  244. s->eecd_state.bitnum_in = 0;
  245. s->eecd_state.bitnum_out = 0;
  246. s->eecd_state.reading = 0;
  247. }
  248. if (!(E1000_EECD_SK & (val ^ oldval))) // no clock edge
  249. return;
  250. if (!(E1000_EECD_SK & val)) { // falling edge
  251. s->eecd_state.bitnum_out++;
  252. return;
  253. }
  254. s->eecd_state.val_in <<= 1;
  255. if (val & E1000_EECD_DI)
  256. s->eecd_state.val_in |= 1;
  257. if (++s->eecd_state.bitnum_in == 9 && !s->eecd_state.reading) {
  258. s->eecd_state.bitnum_out = ((s->eecd_state.val_in & 0x3f)<<4)-1;
  259. s->eecd_state.reading = (((s->eecd_state.val_in >> 6) & 7) ==
  260. EEPROM_READ_OPCODE_MICROWIRE);
  261. }
  262. DBGOUT(EEPROM, "eeprom bitnum in %d out %d, reading %d\n",
  263. s->eecd_state.bitnum_in, s->eecd_state.bitnum_out,
  264. s->eecd_state.reading);
  265. }
  266. static uint32_t
  267. flash_eerd_read(E1000State *s, int x)
  268. {
  269. unsigned int index, r = s->mac_reg[EERD] & ~E1000_EEPROM_RW_REG_START;
  270. if ((s->mac_reg[EERD] & E1000_EEPROM_RW_REG_START) == 0)
  271. return (s->mac_reg[EERD]);
  272. if ((index = r >> E1000_EEPROM_RW_ADDR_SHIFT) > EEPROM_CHECKSUM_REG)
  273. return (E1000_EEPROM_RW_REG_DONE | r);
  274. return ((s->eeprom_data[index] << E1000_EEPROM_RW_REG_DATA) |
  275. E1000_EEPROM_RW_REG_DONE | r);
  276. }
  277. static void
  278. putsum(uint8_t *data, uint32_t n, uint32_t sloc, uint32_t css, uint32_t cse)
  279. {
  280. uint32_t sum;
  281. if (cse && cse < n)
  282. n = cse + 1;
  283. if (sloc < n-1) {
  284. sum = net_checksum_add(n-css, data+css);
  285. cpu_to_be16wu((uint16_t *)(data + sloc),
  286. net_checksum_finish(sum));
  287. }
  288. }
  289. static inline int
  290. vlan_enabled(E1000State *s)
  291. {
  292. return ((s->mac_reg[CTRL] & E1000_CTRL_VME) != 0);
  293. }
  294. static inline int
  295. vlan_rx_filter_enabled(E1000State *s)
  296. {
  297. return ((s->mac_reg[RCTL] & E1000_RCTL_VFE) != 0);
  298. }
  299. static inline int
  300. is_vlan_packet(E1000State *s, const uint8_t *buf)
  301. {
  302. return (be16_to_cpup((uint16_t *)(buf + 12)) ==
  303. le16_to_cpup((uint16_t *)(s->mac_reg + VET)));
  304. }
  305. static inline int
  306. is_vlan_txd(uint32_t txd_lower)
  307. {
  308. return ((txd_lower & E1000_TXD_CMD_VLE) != 0);
  309. }
  310. /* FCS aka Ethernet CRC-32. We don't get it from backends and can't
  311. * fill it in, just pad descriptor length by 4 bytes unless guest
  312. * told us to strip it off the packet. */
  313. static inline int
  314. fcs_len(E1000State *s)
  315. {
  316. return (s->mac_reg[RCTL] & E1000_RCTL_SECRC) ? 0 : 4;
  317. }
  318. static void
  319. xmit_seg(E1000State *s)
  320. {
  321. uint16_t len, *sp;
  322. unsigned int frames = s->tx.tso_frames, css, sofar, n;
  323. struct e1000_tx *tp = &s->tx;
  324. if (tp->tse && tp->cptse) {
  325. css = tp->ipcss;
  326. DBGOUT(TXSUM, "frames %d size %d ipcss %d\n",
  327. frames, tp->size, css);
  328. if (tp->ip) { // IPv4
  329. cpu_to_be16wu((uint16_t *)(tp->data+css+2),
  330. tp->size - css);
  331. cpu_to_be16wu((uint16_t *)(tp->data+css+4),
  332. be16_to_cpup((uint16_t *)(tp->data+css+4))+frames);
  333. } else // IPv6
  334. cpu_to_be16wu((uint16_t *)(tp->data+css+4),
  335. tp->size - css);
  336. css = tp->tucss;
  337. len = tp->size - css;
  338. DBGOUT(TXSUM, "tcp %d tucss %d len %d\n", tp->tcp, css, len);
  339. if (tp->tcp) {
  340. sofar = frames * tp->mss;
  341. cpu_to_be32wu((uint32_t *)(tp->data+css+4), // seq
  342. be32_to_cpupu((uint32_t *)(tp->data+css+4))+sofar);
  343. if (tp->paylen - sofar > tp->mss)
  344. tp->data[css + 13] &= ~9; // PSH, FIN
  345. } else // UDP
  346. cpu_to_be16wu((uint16_t *)(tp->data+css+4), len);
  347. if (tp->sum_needed & E1000_TXD_POPTS_TXSM) {
  348. unsigned int phsum;
  349. // add pseudo-header length before checksum calculation
  350. sp = (uint16_t *)(tp->data + tp->tucso);
  351. phsum = be16_to_cpup(sp) + len;
  352. phsum = (phsum >> 16) + (phsum & 0xffff);
  353. cpu_to_be16wu(sp, phsum);
  354. }
  355. tp->tso_frames++;
  356. }
  357. if (tp->sum_needed & E1000_TXD_POPTS_TXSM)
  358. putsum(tp->data, tp->size, tp->tucso, tp->tucss, tp->tucse);
  359. if (tp->sum_needed & E1000_TXD_POPTS_IXSM)
  360. putsum(tp->data, tp->size, tp->ipcso, tp->ipcss, tp->ipcse);
  361. if (tp->vlan_needed) {
  362. memmove(tp->vlan, tp->data, 4);
  363. memmove(tp->data, tp->data + 4, 8);
  364. memcpy(tp->data + 8, tp->vlan_header, 4);
  365. qemu_send_packet(&s->nic->nc, tp->vlan, tp->size + 4);
  366. } else
  367. qemu_send_packet(&s->nic->nc, tp->data, tp->size);
  368. s->mac_reg[TPT]++;
  369. s->mac_reg[GPTC]++;
  370. n = s->mac_reg[TOTL];
  371. if ((s->mac_reg[TOTL] += s->tx.size) < n)
  372. s->mac_reg[TOTH]++;
  373. }
  374. static void
  375. process_tx_desc(E1000State *s, struct e1000_tx_desc *dp)
  376. {
  377. uint32_t txd_lower = le32_to_cpu(dp->lower.data);
  378. uint32_t dtype = txd_lower & (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D);
  379. unsigned int split_size = txd_lower & 0xffff, bytes, sz, op;
  380. unsigned int msh = 0xfffff, hdr = 0;
  381. uint64_t addr;
  382. struct e1000_context_desc *xp = (struct e1000_context_desc *)dp;
  383. struct e1000_tx *tp = &s->tx;
  384. if (dtype == E1000_TXD_CMD_DEXT) { // context descriptor
  385. op = le32_to_cpu(xp->cmd_and_length);
  386. tp->ipcss = xp->lower_setup.ip_fields.ipcss;
  387. tp->ipcso = xp->lower_setup.ip_fields.ipcso;
  388. tp->ipcse = le16_to_cpu(xp->lower_setup.ip_fields.ipcse);
  389. tp->tucss = xp->upper_setup.tcp_fields.tucss;
  390. tp->tucso = xp->upper_setup.tcp_fields.tucso;
  391. tp->tucse = le16_to_cpu(xp->upper_setup.tcp_fields.tucse);
  392. tp->paylen = op & 0xfffff;
  393. tp->hdr_len = xp->tcp_seg_setup.fields.hdr_len;
  394. tp->mss = le16_to_cpu(xp->tcp_seg_setup.fields.mss);
  395. tp->ip = (op & E1000_TXD_CMD_IP) ? 1 : 0;
  396. tp->tcp = (op & E1000_TXD_CMD_TCP) ? 1 : 0;
  397. tp->tse = (op & E1000_TXD_CMD_TSE) ? 1 : 0;
  398. tp->tso_frames = 0;
  399. if (tp->tucso == 0) { // this is probably wrong
  400. DBGOUT(TXSUM, "TCP/UDP: cso 0!\n");
  401. tp->tucso = tp->tucss + (tp->tcp ? 16 : 6);
  402. }
  403. return;
  404. } else if (dtype == (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D)) {
  405. // data descriptor
  406. if (tp->size == 0) {
  407. tp->sum_needed = le32_to_cpu(dp->upper.data) >> 8;
  408. }
  409. tp->cptse = ( txd_lower & E1000_TXD_CMD_TSE ) ? 1 : 0;
  410. } else {
  411. // legacy descriptor
  412. tp->cptse = 0;
  413. }
  414. if (vlan_enabled(s) && is_vlan_txd(txd_lower) &&
  415. (tp->cptse || txd_lower & E1000_TXD_CMD_EOP)) {
  416. tp->vlan_needed = 1;
  417. cpu_to_be16wu((uint16_t *)(tp->vlan_header),
  418. le16_to_cpup((uint16_t *)(s->mac_reg + VET)));
  419. cpu_to_be16wu((uint16_t *)(tp->vlan_header + 2),
  420. le16_to_cpu(dp->upper.fields.special));
  421. }
  422. addr = le64_to_cpu(dp->buffer_addr);
  423. if (tp->tse && tp->cptse) {
  424. hdr = tp->hdr_len;
  425. msh = hdr + tp->mss;
  426. do {
  427. bytes = split_size;
  428. if (tp->size + bytes > msh)
  429. bytes = msh - tp->size;
  430. cpu_physical_memory_read(addr, tp->data + tp->size, bytes);
  431. if ((sz = tp->size + bytes) >= hdr && tp->size < hdr)
  432. memmove(tp->header, tp->data, hdr);
  433. tp->size = sz;
  434. addr += bytes;
  435. if (sz == msh) {
  436. xmit_seg(s);
  437. memmove(tp->data, tp->header, hdr);
  438. tp->size = hdr;
  439. }
  440. } while (split_size -= bytes);
  441. } else if (!tp->tse && tp->cptse) {
  442. // context descriptor TSE is not set, while data descriptor TSE is set
  443. DBGOUT(TXERR, "TCP segmentaion Error\n");
  444. } else {
  445. cpu_physical_memory_read(addr, tp->data + tp->size, split_size);
  446. tp->size += split_size;
  447. }
  448. if (!(txd_lower & E1000_TXD_CMD_EOP))
  449. return;
  450. if (!(tp->tse && tp->cptse && tp->size < hdr))
  451. xmit_seg(s);
  452. tp->tso_frames = 0;
  453. tp->sum_needed = 0;
  454. tp->vlan_needed = 0;
  455. tp->size = 0;
  456. tp->cptse = 0;
  457. }
  458. static uint32_t
  459. txdesc_writeback(target_phys_addr_t base, struct e1000_tx_desc *dp)
  460. {
  461. uint32_t txd_upper, txd_lower = le32_to_cpu(dp->lower.data);
  462. if (!(txd_lower & (E1000_TXD_CMD_RS|E1000_TXD_CMD_RPS)))
  463. return 0;
  464. txd_upper = (le32_to_cpu(dp->upper.data) | E1000_TXD_STAT_DD) &
  465. ~(E1000_TXD_STAT_EC | E1000_TXD_STAT_LC | E1000_TXD_STAT_TU);
  466. dp->upper.data = cpu_to_le32(txd_upper);
  467. cpu_physical_memory_write(base + ((char *)&dp->upper - (char *)dp),
  468. (void *)&dp->upper, sizeof(dp->upper));
  469. return E1000_ICR_TXDW;
  470. }
  471. static uint64_t tx_desc_base(E1000State *s)
  472. {
  473. uint64_t bah = s->mac_reg[TDBAH];
  474. uint64_t bal = s->mac_reg[TDBAL] & ~0xf;
  475. return (bah << 32) + bal;
  476. }
  477. static void
  478. start_xmit(E1000State *s)
  479. {
  480. target_phys_addr_t base;
  481. struct e1000_tx_desc desc;
  482. uint32_t tdh_start = s->mac_reg[TDH], cause = E1000_ICS_TXQE;
  483. if (!(s->mac_reg[TCTL] & E1000_TCTL_EN)) {
  484. DBGOUT(TX, "tx disabled\n");
  485. return;
  486. }
  487. while (s->mac_reg[TDH] != s->mac_reg[TDT]) {
  488. base = tx_desc_base(s) +
  489. sizeof(struct e1000_tx_desc) * s->mac_reg[TDH];
  490. cpu_physical_memory_read(base, (void *)&desc, sizeof(desc));
  491. DBGOUT(TX, "index %d: %p : %x %x\n", s->mac_reg[TDH],
  492. (void *)(intptr_t)desc.buffer_addr, desc.lower.data,
  493. desc.upper.data);
  494. process_tx_desc(s, &desc);
  495. cause |= txdesc_writeback(base, &desc);
  496. if (++s->mac_reg[TDH] * sizeof(desc) >= s->mac_reg[TDLEN])
  497. s->mac_reg[TDH] = 0;
  498. /*
  499. * the following could happen only if guest sw assigns
  500. * bogus values to TDT/TDLEN.
  501. * there's nothing too intelligent we could do about this.
  502. */
  503. if (s->mac_reg[TDH] == tdh_start) {
  504. DBGOUT(TXERR, "TDH wraparound @%x, TDT %x, TDLEN %x\n",
  505. tdh_start, s->mac_reg[TDT], s->mac_reg[TDLEN]);
  506. break;
  507. }
  508. }
  509. set_ics(s, 0, cause);
  510. }
  511. static int
  512. receive_filter(E1000State *s, const uint8_t *buf, int size)
  513. {
  514. static const uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
  515. static const int mta_shift[] = {4, 3, 2, 0};
  516. uint32_t f, rctl = s->mac_reg[RCTL], ra[2], *rp;
  517. if (is_vlan_packet(s, buf) && vlan_rx_filter_enabled(s)) {
  518. uint16_t vid = be16_to_cpup((uint16_t *)(buf + 14));
  519. uint32_t vfta = le32_to_cpup((uint32_t *)(s->mac_reg + VFTA) +
  520. ((vid >> 5) & 0x7f));
  521. if ((vfta & (1 << (vid & 0x1f))) == 0)
  522. return 0;
  523. }
  524. if (rctl & E1000_RCTL_UPE) // promiscuous
  525. return 1;
  526. if ((buf[0] & 1) && (rctl & E1000_RCTL_MPE)) // promiscuous mcast
  527. return 1;
  528. if ((rctl & E1000_RCTL_BAM) && !memcmp(buf, bcast, sizeof bcast))
  529. return 1;
  530. for (rp = s->mac_reg + RA; rp < s->mac_reg + RA + 32; rp += 2) {
  531. if (!(rp[1] & E1000_RAH_AV))
  532. continue;
  533. ra[0] = cpu_to_le32(rp[0]);
  534. ra[1] = cpu_to_le32(rp[1]);
  535. if (!memcmp(buf, (uint8_t *)ra, 6)) {
  536. DBGOUT(RXFILTER,
  537. "unicast match[%d]: %02x:%02x:%02x:%02x:%02x:%02x\n",
  538. (int)(rp - s->mac_reg - RA)/2,
  539. buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
  540. return 1;
  541. }
  542. }
  543. DBGOUT(RXFILTER, "unicast mismatch: %02x:%02x:%02x:%02x:%02x:%02x\n",
  544. buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
  545. f = mta_shift[(rctl >> E1000_RCTL_MO_SHIFT) & 3];
  546. f = (((buf[5] << 8) | buf[4]) >> f) & 0xfff;
  547. if (s->mac_reg[MTA + (f >> 5)] & (1 << (f & 0x1f)))
  548. return 1;
  549. DBGOUT(RXFILTER,
  550. "dropping, inexact filter mismatch: %02x:%02x:%02x:%02x:%02x:%02x MO %d MTA[%d] %x\n",
  551. buf[0], buf[1], buf[2], buf[3], buf[4], buf[5],
  552. (rctl >> E1000_RCTL_MO_SHIFT) & 3, f >> 5,
  553. s->mac_reg[MTA + (f >> 5)]);
  554. return 0;
  555. }
  556. static void
  557. e1000_set_link_status(VLANClientState *nc)
  558. {
  559. E1000State *s = DO_UPCAST(NICState, nc, nc)->opaque;
  560. uint32_t old_status = s->mac_reg[STATUS];
  561. if (nc->link_down)
  562. s->mac_reg[STATUS] &= ~E1000_STATUS_LU;
  563. else
  564. s->mac_reg[STATUS] |= E1000_STATUS_LU;
  565. if (s->mac_reg[STATUS] != old_status)
  566. set_ics(s, 0, E1000_ICR_LSC);
  567. }
  568. static bool e1000_has_rxbufs(E1000State *s, size_t total_size)
  569. {
  570. int bufs;
  571. /* Fast-path short packets */
  572. if (total_size <= s->rxbuf_size) {
  573. return s->mac_reg[RDH] != s->mac_reg[RDT] || !s->check_rxov;
  574. }
  575. if (s->mac_reg[RDH] < s->mac_reg[RDT]) {
  576. bufs = s->mac_reg[RDT] - s->mac_reg[RDH];
  577. } else if (s->mac_reg[RDH] > s->mac_reg[RDT] || !s->check_rxov) {
  578. bufs = s->mac_reg[RDLEN] / sizeof(struct e1000_rx_desc) +
  579. s->mac_reg[RDT] - s->mac_reg[RDH];
  580. } else {
  581. return false;
  582. }
  583. return total_size <= bufs * s->rxbuf_size;
  584. }
  585. static int
  586. e1000_can_receive(VLANClientState *nc)
  587. {
  588. E1000State *s = DO_UPCAST(NICState, nc, nc)->opaque;
  589. return (s->mac_reg[RCTL] & E1000_RCTL_EN) && e1000_has_rxbufs(s, 1);
  590. }
  591. static uint64_t rx_desc_base(E1000State *s)
  592. {
  593. uint64_t bah = s->mac_reg[RDBAH];
  594. uint64_t bal = s->mac_reg[RDBAL] & ~0xf;
  595. return (bah << 32) + bal;
  596. }
  597. static ssize_t
  598. e1000_receive(VLANClientState *nc, const uint8_t *buf, size_t size)
  599. {
  600. E1000State *s = DO_UPCAST(NICState, nc, nc)->opaque;
  601. struct e1000_rx_desc desc;
  602. target_phys_addr_t base;
  603. unsigned int n, rdt;
  604. uint32_t rdh_start;
  605. uint16_t vlan_special = 0;
  606. uint8_t vlan_status = 0, vlan_offset = 0;
  607. uint8_t min_buf[MIN_BUF_SIZE];
  608. size_t desc_offset;
  609. size_t desc_size;
  610. size_t total_size;
  611. if (!(s->mac_reg[RCTL] & E1000_RCTL_EN))
  612. return -1;
  613. /* Pad to minimum Ethernet frame length */
  614. if (size < sizeof(min_buf)) {
  615. memcpy(min_buf, buf, size);
  616. memset(&min_buf[size], 0, sizeof(min_buf) - size);
  617. buf = min_buf;
  618. size = sizeof(min_buf);
  619. }
  620. if (!receive_filter(s, buf, size))
  621. return size;
  622. if (vlan_enabled(s) && is_vlan_packet(s, buf)) {
  623. vlan_special = cpu_to_le16(be16_to_cpup((uint16_t *)(buf + 14)));
  624. memmove((uint8_t *)buf + 4, buf, 12);
  625. vlan_status = E1000_RXD_STAT_VP;
  626. vlan_offset = 4;
  627. size -= 4;
  628. }
  629. rdh_start = s->mac_reg[RDH];
  630. desc_offset = 0;
  631. total_size = size + fcs_len(s);
  632. if (!e1000_has_rxbufs(s, total_size)) {
  633. set_ics(s, 0, E1000_ICS_RXO);
  634. return -1;
  635. }
  636. do {
  637. desc_size = total_size - desc_offset;
  638. if (desc_size > s->rxbuf_size) {
  639. desc_size = s->rxbuf_size;
  640. }
  641. base = rx_desc_base(s) + sizeof(desc) * s->mac_reg[RDH];
  642. cpu_physical_memory_read(base, (void *)&desc, sizeof(desc));
  643. desc.special = vlan_special;
  644. desc.status |= (vlan_status | E1000_RXD_STAT_DD);
  645. if (desc.buffer_addr) {
  646. if (desc_offset < size) {
  647. size_t copy_size = size - desc_offset;
  648. if (copy_size > s->rxbuf_size) {
  649. copy_size = s->rxbuf_size;
  650. }
  651. cpu_physical_memory_write(le64_to_cpu(desc.buffer_addr),
  652. (void *)(buf + desc_offset + vlan_offset),
  653. copy_size);
  654. }
  655. desc_offset += desc_size;
  656. desc.length = cpu_to_le16(desc_size);
  657. if (desc_offset >= total_size) {
  658. desc.status |= E1000_RXD_STAT_EOP | E1000_RXD_STAT_IXSM;
  659. } else {
  660. /* Guest zeroing out status is not a hardware requirement.
  661. Clear EOP in case guest didn't do it. */
  662. desc.status &= ~E1000_RXD_STAT_EOP;
  663. }
  664. } else { // as per intel docs; skip descriptors with null buf addr
  665. DBGOUT(RX, "Null RX descriptor!!\n");
  666. }
  667. cpu_physical_memory_write(base, (void *)&desc, sizeof(desc));
  668. if (++s->mac_reg[RDH] * sizeof(desc) >= s->mac_reg[RDLEN])
  669. s->mac_reg[RDH] = 0;
  670. s->check_rxov = 1;
  671. /* see comment in start_xmit; same here */
  672. if (s->mac_reg[RDH] == rdh_start) {
  673. DBGOUT(RXERR, "RDH wraparound @%x, RDT %x, RDLEN %x\n",
  674. rdh_start, s->mac_reg[RDT], s->mac_reg[RDLEN]);
  675. set_ics(s, 0, E1000_ICS_RXO);
  676. return -1;
  677. }
  678. } while (desc_offset < total_size);
  679. s->mac_reg[GPRC]++;
  680. s->mac_reg[TPR]++;
  681. /* TOR - Total Octets Received:
  682. * This register includes bytes received in a packet from the <Destination
  683. * Address> field through the <CRC> field, inclusively.
  684. */
  685. n = s->mac_reg[TORL] + size + /* Always include FCS length. */ 4;
  686. if (n < s->mac_reg[TORL])
  687. s->mac_reg[TORH]++;
  688. s->mac_reg[TORL] = n;
  689. n = E1000_ICS_RXT0;
  690. if ((rdt = s->mac_reg[RDT]) < s->mac_reg[RDH])
  691. rdt += s->mac_reg[RDLEN] / sizeof(desc);
  692. if (((rdt - s->mac_reg[RDH]) * sizeof(desc)) <= s->mac_reg[RDLEN] >>
  693. s->rxbuf_min_shift)
  694. n |= E1000_ICS_RXDMT0;
  695. set_ics(s, 0, n);
  696. return size;
  697. }
  698. static uint32_t
  699. mac_readreg(E1000State *s, int index)
  700. {
  701. return s->mac_reg[index];
  702. }
  703. static uint32_t
  704. mac_icr_read(E1000State *s, int index)
  705. {
  706. uint32_t ret = s->mac_reg[ICR];
  707. DBGOUT(INTERRUPT, "ICR read: %x\n", ret);
  708. set_interrupt_cause(s, 0, 0);
  709. return ret;
  710. }
  711. static uint32_t
  712. mac_read_clr4(E1000State *s, int index)
  713. {
  714. uint32_t ret = s->mac_reg[index];
  715. s->mac_reg[index] = 0;
  716. return ret;
  717. }
  718. static uint32_t
  719. mac_read_clr8(E1000State *s, int index)
  720. {
  721. uint32_t ret = s->mac_reg[index];
  722. s->mac_reg[index] = 0;
  723. s->mac_reg[index-1] = 0;
  724. return ret;
  725. }
  726. static void
  727. mac_writereg(E1000State *s, int index, uint32_t val)
  728. {
  729. s->mac_reg[index] = val;
  730. }
  731. static void
  732. set_rdt(E1000State *s, int index, uint32_t val)
  733. {
  734. s->check_rxov = 0;
  735. s->mac_reg[index] = val & 0xffff;
  736. }
  737. static void
  738. set_16bit(E1000State *s, int index, uint32_t val)
  739. {
  740. s->mac_reg[index] = val & 0xffff;
  741. }
  742. static void
  743. set_dlen(E1000State *s, int index, uint32_t val)
  744. {
  745. s->mac_reg[index] = val & 0xfff80;
  746. }
  747. static void
  748. set_tctl(E1000State *s, int index, uint32_t val)
  749. {
  750. s->mac_reg[index] = val;
  751. s->mac_reg[TDT] &= 0xffff;
  752. start_xmit(s);
  753. }
  754. static void
  755. set_icr(E1000State *s, int index, uint32_t val)
  756. {
  757. DBGOUT(INTERRUPT, "set_icr %x\n", val);
  758. set_interrupt_cause(s, 0, s->mac_reg[ICR] & ~val);
  759. }
  760. static void
  761. set_imc(E1000State *s, int index, uint32_t val)
  762. {
  763. s->mac_reg[IMS] &= ~val;
  764. set_ics(s, 0, 0);
  765. }
  766. static void
  767. set_ims(E1000State *s, int index, uint32_t val)
  768. {
  769. s->mac_reg[IMS] |= val;
  770. set_ics(s, 0, 0);
  771. }
  772. #define getreg(x) [x] = mac_readreg
  773. static uint32_t (*macreg_readops[])(E1000State *, int) = {
  774. getreg(PBA), getreg(RCTL), getreg(TDH), getreg(TXDCTL),
  775. getreg(WUFC), getreg(TDT), getreg(CTRL), getreg(LEDCTL),
  776. getreg(MANC), getreg(MDIC), getreg(SWSM), getreg(STATUS),
  777. getreg(TORL), getreg(TOTL), getreg(IMS), getreg(TCTL),
  778. getreg(RDH), getreg(RDT), getreg(VET), getreg(ICS),
  779. getreg(TDBAL), getreg(TDBAH), getreg(RDBAH), getreg(RDBAL),
  780. getreg(TDLEN), getreg(RDLEN),
  781. [TOTH] = mac_read_clr8, [TORH] = mac_read_clr8, [GPRC] = mac_read_clr4,
  782. [GPTC] = mac_read_clr4, [TPR] = mac_read_clr4, [TPT] = mac_read_clr4,
  783. [ICR] = mac_icr_read, [EECD] = get_eecd, [EERD] = flash_eerd_read,
  784. [CRCERRS ... MPC] = &mac_readreg,
  785. [RA ... RA+31] = &mac_readreg,
  786. [MTA ... MTA+127] = &mac_readreg,
  787. [VFTA ... VFTA+127] = &mac_readreg,
  788. };
  789. enum { NREADOPS = ARRAY_SIZE(macreg_readops) };
  790. #define putreg(x) [x] = mac_writereg
  791. static void (*macreg_writeops[])(E1000State *, int, uint32_t) = {
  792. putreg(PBA), putreg(EERD), putreg(SWSM), putreg(WUFC),
  793. putreg(TDBAL), putreg(TDBAH), putreg(TXDCTL), putreg(RDBAH),
  794. putreg(RDBAL), putreg(LEDCTL), putreg(VET),
  795. [TDLEN] = set_dlen, [RDLEN] = set_dlen, [TCTL] = set_tctl,
  796. [TDT] = set_tctl, [MDIC] = set_mdic, [ICS] = set_ics,
  797. [TDH] = set_16bit, [RDH] = set_16bit, [RDT] = set_rdt,
  798. [IMC] = set_imc, [IMS] = set_ims, [ICR] = set_icr,
  799. [EECD] = set_eecd, [RCTL] = set_rx_control, [CTRL] = set_ctrl,
  800. [RA ... RA+31] = &mac_writereg,
  801. [MTA ... MTA+127] = &mac_writereg,
  802. [VFTA ... VFTA+127] = &mac_writereg,
  803. };
  804. enum { NWRITEOPS = ARRAY_SIZE(macreg_writeops) };
  805. static void
  806. e1000_mmio_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
  807. {
  808. E1000State *s = opaque;
  809. unsigned int index = (addr & 0x1ffff) >> 2;
  810. if (index < NWRITEOPS && macreg_writeops[index]) {
  811. macreg_writeops[index](s, index, val);
  812. } else if (index < NREADOPS && macreg_readops[index]) {
  813. DBGOUT(MMIO, "e1000_mmio_writel RO %x: 0x%04x\n", index<<2, val);
  814. } else {
  815. DBGOUT(UNKNOWN, "MMIO unknown write addr=0x%08x,val=0x%08x\n",
  816. index<<2, val);
  817. }
  818. }
  819. static void
  820. e1000_mmio_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
  821. {
  822. // emulate hw without byte enables: no RMW
  823. e1000_mmio_writel(opaque, addr & ~3,
  824. (val & 0xffff) << (8*(addr & 3)));
  825. }
  826. static void
  827. e1000_mmio_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
  828. {
  829. // emulate hw without byte enables: no RMW
  830. e1000_mmio_writel(opaque, addr & ~3,
  831. (val & 0xff) << (8*(addr & 3)));
  832. }
  833. static uint32_t
  834. e1000_mmio_readl(void *opaque, target_phys_addr_t addr)
  835. {
  836. E1000State *s = opaque;
  837. unsigned int index = (addr & 0x1ffff) >> 2;
  838. if (index < NREADOPS && macreg_readops[index])
  839. {
  840. return macreg_readops[index](s, index);
  841. }
  842. DBGOUT(UNKNOWN, "MMIO unknown read addr=0x%08x\n", index<<2);
  843. return 0;
  844. }
  845. static uint32_t
  846. e1000_mmio_readb(void *opaque, target_phys_addr_t addr)
  847. {
  848. return ((e1000_mmio_readl(opaque, addr & ~3)) >>
  849. (8 * (addr & 3))) & 0xff;
  850. }
  851. static uint32_t
  852. e1000_mmio_readw(void *opaque, target_phys_addr_t addr)
  853. {
  854. return ((e1000_mmio_readl(opaque, addr & ~3)) >>
  855. (8 * (addr & 3))) & 0xffff;
  856. }
  857. static bool is_version_1(void *opaque, int version_id)
  858. {
  859. return version_id == 1;
  860. }
  861. static const VMStateDescription vmstate_e1000 = {
  862. .name = "e1000",
  863. .version_id = 2,
  864. .minimum_version_id = 1,
  865. .minimum_version_id_old = 1,
  866. .fields = (VMStateField []) {
  867. VMSTATE_PCI_DEVICE(dev, E1000State),
  868. VMSTATE_UNUSED_TEST(is_version_1, 4), /* was instance id */
  869. VMSTATE_UNUSED(4), /* Was mmio_base. */
  870. VMSTATE_UINT32(rxbuf_size, E1000State),
  871. VMSTATE_UINT32(rxbuf_min_shift, E1000State),
  872. VMSTATE_UINT32(eecd_state.val_in, E1000State),
  873. VMSTATE_UINT16(eecd_state.bitnum_in, E1000State),
  874. VMSTATE_UINT16(eecd_state.bitnum_out, E1000State),
  875. VMSTATE_UINT16(eecd_state.reading, E1000State),
  876. VMSTATE_UINT32(eecd_state.old_eecd, E1000State),
  877. VMSTATE_UINT8(tx.ipcss, E1000State),
  878. VMSTATE_UINT8(tx.ipcso, E1000State),
  879. VMSTATE_UINT16(tx.ipcse, E1000State),
  880. VMSTATE_UINT8(tx.tucss, E1000State),
  881. VMSTATE_UINT8(tx.tucso, E1000State),
  882. VMSTATE_UINT16(tx.tucse, E1000State),
  883. VMSTATE_UINT32(tx.paylen, E1000State),
  884. VMSTATE_UINT8(tx.hdr_len, E1000State),
  885. VMSTATE_UINT16(tx.mss, E1000State),
  886. VMSTATE_UINT16(tx.size, E1000State),
  887. VMSTATE_UINT16(tx.tso_frames, E1000State),
  888. VMSTATE_UINT8(tx.sum_needed, E1000State),
  889. VMSTATE_INT8(tx.ip, E1000State),
  890. VMSTATE_INT8(tx.tcp, E1000State),
  891. VMSTATE_BUFFER(tx.header, E1000State),
  892. VMSTATE_BUFFER(tx.data, E1000State),
  893. VMSTATE_UINT16_ARRAY(eeprom_data, E1000State, 64),
  894. VMSTATE_UINT16_ARRAY(phy_reg, E1000State, 0x20),
  895. VMSTATE_UINT32(mac_reg[CTRL], E1000State),
  896. VMSTATE_UINT32(mac_reg[EECD], E1000State),
  897. VMSTATE_UINT32(mac_reg[EERD], E1000State),
  898. VMSTATE_UINT32(mac_reg[GPRC], E1000State),
  899. VMSTATE_UINT32(mac_reg[GPTC], E1000State),
  900. VMSTATE_UINT32(mac_reg[ICR], E1000State),
  901. VMSTATE_UINT32(mac_reg[ICS], E1000State),
  902. VMSTATE_UINT32(mac_reg[IMC], E1000State),
  903. VMSTATE_UINT32(mac_reg[IMS], E1000State),
  904. VMSTATE_UINT32(mac_reg[LEDCTL], E1000State),
  905. VMSTATE_UINT32(mac_reg[MANC], E1000State),
  906. VMSTATE_UINT32(mac_reg[MDIC], E1000State),
  907. VMSTATE_UINT32(mac_reg[MPC], E1000State),
  908. VMSTATE_UINT32(mac_reg[PBA], E1000State),
  909. VMSTATE_UINT32(mac_reg[RCTL], E1000State),
  910. VMSTATE_UINT32(mac_reg[RDBAH], E1000State),
  911. VMSTATE_UINT32(mac_reg[RDBAL], E1000State),
  912. VMSTATE_UINT32(mac_reg[RDH], E1000State),
  913. VMSTATE_UINT32(mac_reg[RDLEN], E1000State),
  914. VMSTATE_UINT32(mac_reg[RDT], E1000State),
  915. VMSTATE_UINT32(mac_reg[STATUS], E1000State),
  916. VMSTATE_UINT32(mac_reg[SWSM], E1000State),
  917. VMSTATE_UINT32(mac_reg[TCTL], E1000State),
  918. VMSTATE_UINT32(mac_reg[TDBAH], E1000State),
  919. VMSTATE_UINT32(mac_reg[TDBAL], E1000State),
  920. VMSTATE_UINT32(mac_reg[TDH], E1000State),
  921. VMSTATE_UINT32(mac_reg[TDLEN], E1000State),
  922. VMSTATE_UINT32(mac_reg[TDT], E1000State),
  923. VMSTATE_UINT32(mac_reg[TORH], E1000State),
  924. VMSTATE_UINT32(mac_reg[TORL], E1000State),
  925. VMSTATE_UINT32(mac_reg[TOTH], E1000State),
  926. VMSTATE_UINT32(mac_reg[TOTL], E1000State),
  927. VMSTATE_UINT32(mac_reg[TPR], E1000State),
  928. VMSTATE_UINT32(mac_reg[TPT], E1000State),
  929. VMSTATE_UINT32(mac_reg[TXDCTL], E1000State),
  930. VMSTATE_UINT32(mac_reg[WUFC], E1000State),
  931. VMSTATE_UINT32(mac_reg[VET], E1000State),
  932. VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, RA, 32),
  933. VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, MTA, 128),
  934. VMSTATE_UINT32_SUB_ARRAY(mac_reg, E1000State, VFTA, 128),
  935. VMSTATE_END_OF_LIST()
  936. }
  937. };
  938. static const uint16_t e1000_eeprom_template[64] = {
  939. 0x0000, 0x0000, 0x0000, 0x0000, 0xffff, 0x0000, 0x0000, 0x0000,
  940. 0x3000, 0x1000, 0x6403, E1000_DEVID, 0x8086, E1000_DEVID, 0x8086, 0x3040,
  941. 0x0008, 0x2000, 0x7e14, 0x0048, 0x1000, 0x00d8, 0x0000, 0x2700,
  942. 0x6cc9, 0x3150, 0x0722, 0x040b, 0x0984, 0x0000, 0xc000, 0x0706,
  943. 0x1008, 0x0000, 0x0f04, 0x7fff, 0x4d01, 0xffff, 0xffff, 0xffff,
  944. 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
  945. 0x0100, 0x4000, 0x121c, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff,
  946. 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0x0000,
  947. };
  948. static const uint16_t phy_reg_init[] = {
  949. [PHY_CTRL] = 0x1140, [PHY_STATUS] = 0x796d, // link initially up
  950. [PHY_ID1] = 0x141, [PHY_ID2] = PHY_ID2_INIT,
  951. [PHY_1000T_CTRL] = 0x0e00, [M88E1000_PHY_SPEC_CTRL] = 0x360,
  952. [M88E1000_EXT_PHY_SPEC_CTRL] = 0x0d60, [PHY_AUTONEG_ADV] = 0xde1,
  953. [PHY_LP_ABILITY] = 0x1e0, [PHY_1000T_STATUS] = 0x3c00,
  954. [M88E1000_PHY_SPEC_STATUS] = 0xac00,
  955. };
  956. static const uint32_t mac_reg_init[] = {
  957. [PBA] = 0x00100030,
  958. [LEDCTL] = 0x602,
  959. [CTRL] = E1000_CTRL_SWDPIN2 | E1000_CTRL_SWDPIN0 |
  960. E1000_CTRL_SPD_1000 | E1000_CTRL_SLU,
  961. [STATUS] = 0x80000000 | E1000_STATUS_GIO_MASTER_ENABLE |
  962. E1000_STATUS_ASDV | E1000_STATUS_MTXCKOK |
  963. E1000_STATUS_SPEED_1000 | E1000_STATUS_FD |
  964. E1000_STATUS_LU,
  965. [MANC] = E1000_MANC_EN_MNG2HOST | E1000_MANC_RCV_TCO_EN |
  966. E1000_MANC_ARP_EN | E1000_MANC_0298_EN |
  967. E1000_MANC_RMCP_EN,
  968. };
  969. /* PCI interface */
  970. static CPUWriteMemoryFunc * const e1000_mmio_write[] = {
  971. e1000_mmio_writeb, e1000_mmio_writew, e1000_mmio_writel
  972. };
  973. static CPUReadMemoryFunc * const e1000_mmio_read[] = {
  974. e1000_mmio_readb, e1000_mmio_readw, e1000_mmio_readl
  975. };
  976. static void
  977. e1000_mmio_map(PCIDevice *pci_dev, int region_num,
  978. pcibus_t addr, pcibus_t size, int type)
  979. {
  980. E1000State *d = DO_UPCAST(E1000State, dev, pci_dev);
  981. int i;
  982. const uint32_t excluded_regs[] = {
  983. E1000_MDIC, E1000_ICR, E1000_ICS, E1000_IMS,
  984. E1000_IMC, E1000_TCTL, E1000_TDT, PNPMMIO_SIZE
  985. };
  986. DBGOUT(MMIO, "e1000_mmio_map addr=0x%08"FMT_PCIBUS" 0x%08"FMT_PCIBUS"\n",
  987. addr, size);
  988. cpu_register_physical_memory(addr, PNPMMIO_SIZE, d->mmio_index);
  989. qemu_register_coalesced_mmio(addr, excluded_regs[0]);
  990. for (i = 0; excluded_regs[i] != PNPMMIO_SIZE; i++)
  991. qemu_register_coalesced_mmio(addr + excluded_regs[i] + 4,
  992. excluded_regs[i + 1] -
  993. excluded_regs[i] - 4);
  994. }
  995. static void
  996. e1000_cleanup(VLANClientState *nc)
  997. {
  998. E1000State *s = DO_UPCAST(NICState, nc, nc)->opaque;
  999. s->nic = NULL;
  1000. }
  1001. static int
  1002. pci_e1000_uninit(PCIDevice *dev)
  1003. {
  1004. E1000State *d = DO_UPCAST(E1000State, dev, dev);
  1005. cpu_unregister_io_memory(d->mmio_index);
  1006. qemu_del_vlan_client(&d->nic->nc);
  1007. return 0;
  1008. }
  1009. static void e1000_reset(void *opaque)
  1010. {
  1011. E1000State *d = opaque;
  1012. memset(d->phy_reg, 0, sizeof d->phy_reg);
  1013. memmove(d->phy_reg, phy_reg_init, sizeof phy_reg_init);
  1014. memset(d->mac_reg, 0, sizeof d->mac_reg);
  1015. memmove(d->mac_reg, mac_reg_init, sizeof mac_reg_init);
  1016. d->rxbuf_min_shift = 1;
  1017. memset(&d->tx, 0, sizeof d->tx);
  1018. }
  1019. static NetClientInfo net_e1000_info = {
  1020. .type = NET_CLIENT_TYPE_NIC,
  1021. .size = sizeof(NICState),
  1022. .can_receive = e1000_can_receive,
  1023. .receive = e1000_receive,
  1024. .cleanup = e1000_cleanup,
  1025. .link_status_changed = e1000_set_link_status,
  1026. };
  1027. static int pci_e1000_init(PCIDevice *pci_dev)
  1028. {
  1029. E1000State *d = DO_UPCAST(E1000State, dev, pci_dev);
  1030. uint8_t *pci_conf;
  1031. uint16_t checksum = 0;
  1032. int i;
  1033. uint8_t *macaddr;
  1034. pci_conf = d->dev.config;
  1035. /* TODO: we have no capabilities, so why is this bit set? */
  1036. pci_set_word(pci_conf + PCI_STATUS, PCI_STATUS_CAP_LIST);
  1037. /* TODO: RST# value should be 0, PCI spec 6.2.4 */
  1038. pci_conf[PCI_CACHE_LINE_SIZE] = 0x10;
  1039. /* TODO: RST# value should be 0 if programmable, PCI spec 6.2.4 */
  1040. pci_conf[PCI_INTERRUPT_PIN] = 1; // interrupt pin 0
  1041. d->mmio_index = cpu_register_io_memory(e1000_mmio_read,
  1042. e1000_mmio_write, d, DEVICE_LITTLE_ENDIAN);
  1043. pci_register_bar(&d->dev, 0, PNPMMIO_SIZE,
  1044. PCI_BASE_ADDRESS_SPACE_MEMORY, e1000_mmio_map);
  1045. pci_register_bar(&d->dev, 1, IOPORT_SIZE,
  1046. PCI_BASE_ADDRESS_SPACE_IO, ioport_map);
  1047. memmove(d->eeprom_data, e1000_eeprom_template,
  1048. sizeof e1000_eeprom_template);
  1049. qemu_macaddr_default_if_unset(&d->conf.macaddr);
  1050. macaddr = d->conf.macaddr.a;
  1051. for (i = 0; i < 3; i++)
  1052. d->eeprom_data[i] = (macaddr[2*i+1]<<8) | macaddr[2*i];
  1053. for (i = 0; i < EEPROM_CHECKSUM_REG; i++)
  1054. checksum += d->eeprom_data[i];
  1055. checksum = (uint16_t) EEPROM_SUM - checksum;
  1056. d->eeprom_data[EEPROM_CHECKSUM_REG] = checksum;
  1057. d->nic = qemu_new_nic(&net_e1000_info, &d->conf,
  1058. d->dev.qdev.info->name, d->dev.qdev.id, d);
  1059. qemu_format_nic_info_str(&d->nic->nc, macaddr);
  1060. add_boot_device_path(d->conf.bootindex, &pci_dev->qdev, "/ethernet-phy@0");
  1061. return 0;
  1062. }
  1063. static void qdev_e1000_reset(DeviceState *dev)
  1064. {
  1065. E1000State *d = DO_UPCAST(E1000State, dev.qdev, dev);
  1066. e1000_reset(d);
  1067. }
  1068. static PCIDeviceInfo e1000_info = {
  1069. .qdev.name = "e1000",
  1070. .qdev.desc = "Intel Gigabit Ethernet",
  1071. .qdev.size = sizeof(E1000State),
  1072. .qdev.reset = qdev_e1000_reset,
  1073. .qdev.vmsd = &vmstate_e1000,
  1074. .init = pci_e1000_init,
  1075. .exit = pci_e1000_uninit,
  1076. .romfile = "pxe-e1000.rom",
  1077. .vendor_id = PCI_VENDOR_ID_INTEL,
  1078. .device_id = E1000_DEVID,
  1079. .revision = 0x03,
  1080. .class_id = PCI_CLASS_NETWORK_ETHERNET,
  1081. .qdev.props = (Property[]) {
  1082. DEFINE_NIC_PROPERTIES(E1000State, conf),
  1083. DEFINE_PROP_END_OF_LIST(),
  1084. }
  1085. };
  1086. static void e1000_register_devices(void)
  1087. {
  1088. pci_qdev_register(&e1000_info);
  1089. }
  1090. device_init(e1000_register_devices)