imx_fec.c 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379
  1. /*
  2. * i.MX Fast Ethernet Controller emulation.
  3. *
  4. * Copyright (c) 2013 Jean-Christophe Dubois. <jcd@tribudubois.net>
  5. *
  6. * Based on Coldfire Fast Ethernet Controller emulation.
  7. *
  8. * Copyright (c) 2007 CodeSourcery.
  9. *
  10. * This program is free software; you can redistribute it and/or modify it
  11. * under the terms of the GNU General Public License as published by the
  12. * Free Software Foundation; either version 2 of the License, or
  13. * (at your option) any later version.
  14. *
  15. * This program is distributed in the hope that it will be useful, but WITHOUT
  16. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  17. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
  18. * for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License along
  21. * with this program; if not, see <http://www.gnu.org/licenses/>.
  22. */
  23. #include "qemu/osdep.h"
  24. #include "hw/irq.h"
  25. #include "hw/net/imx_fec.h"
  26. #include "hw/qdev-properties.h"
  27. #include "migration/vmstate.h"
  28. #include "sysemu/dma.h"
  29. #include "qemu/log.h"
  30. #include "qemu/module.h"
  31. #include "net/checksum.h"
  32. #include "net/eth.h"
  33. /* For crc32 */
  34. #include <zlib.h>
  35. #ifndef DEBUG_IMX_FEC
  36. #define DEBUG_IMX_FEC 0
  37. #endif
  38. #define FEC_PRINTF(fmt, args...) \
  39. do { \
  40. if (DEBUG_IMX_FEC) { \
  41. fprintf(stderr, "[%s]%s: " fmt , TYPE_IMX_FEC, \
  42. __func__, ##args); \
  43. } \
  44. } while (0)
  45. #ifndef DEBUG_IMX_PHY
  46. #define DEBUG_IMX_PHY 0
  47. #endif
  48. #define PHY_PRINTF(fmt, args...) \
  49. do { \
  50. if (DEBUG_IMX_PHY) { \
  51. fprintf(stderr, "[%s.phy]%s: " fmt , TYPE_IMX_FEC, \
  52. __func__, ##args); \
  53. } \
  54. } while (0)
  55. #define IMX_MAX_DESC 1024
  56. static const char *imx_default_reg_name(IMXFECState *s, uint32_t index)
  57. {
  58. static char tmp[20];
  59. sprintf(tmp, "index %d", index);
  60. return tmp;
  61. }
  62. static const char *imx_fec_reg_name(IMXFECState *s, uint32_t index)
  63. {
  64. switch (index) {
  65. case ENET_FRBR:
  66. return "FRBR";
  67. case ENET_FRSR:
  68. return "FRSR";
  69. case ENET_MIIGSK_CFGR:
  70. return "MIIGSK_CFGR";
  71. case ENET_MIIGSK_ENR:
  72. return "MIIGSK_ENR";
  73. default:
  74. return imx_default_reg_name(s, index);
  75. }
  76. }
  77. static const char *imx_enet_reg_name(IMXFECState *s, uint32_t index)
  78. {
  79. switch (index) {
  80. case ENET_RSFL:
  81. return "RSFL";
  82. case ENET_RSEM:
  83. return "RSEM";
  84. case ENET_RAEM:
  85. return "RAEM";
  86. case ENET_RAFL:
  87. return "RAFL";
  88. case ENET_TSEM:
  89. return "TSEM";
  90. case ENET_TAEM:
  91. return "TAEM";
  92. case ENET_TAFL:
  93. return "TAFL";
  94. case ENET_TIPG:
  95. return "TIPG";
  96. case ENET_FTRL:
  97. return "FTRL";
  98. case ENET_TACC:
  99. return "TACC";
  100. case ENET_RACC:
  101. return "RACC";
  102. case ENET_ATCR:
  103. return "ATCR";
  104. case ENET_ATVR:
  105. return "ATVR";
  106. case ENET_ATOFF:
  107. return "ATOFF";
  108. case ENET_ATPER:
  109. return "ATPER";
  110. case ENET_ATCOR:
  111. return "ATCOR";
  112. case ENET_ATINC:
  113. return "ATINC";
  114. case ENET_ATSTMP:
  115. return "ATSTMP";
  116. case ENET_TGSR:
  117. return "TGSR";
  118. case ENET_TCSR0:
  119. return "TCSR0";
  120. case ENET_TCCR0:
  121. return "TCCR0";
  122. case ENET_TCSR1:
  123. return "TCSR1";
  124. case ENET_TCCR1:
  125. return "TCCR1";
  126. case ENET_TCSR2:
  127. return "TCSR2";
  128. case ENET_TCCR2:
  129. return "TCCR2";
  130. case ENET_TCSR3:
  131. return "TCSR3";
  132. case ENET_TCCR3:
  133. return "TCCR3";
  134. default:
  135. return imx_default_reg_name(s, index);
  136. }
  137. }
  138. static const char *imx_eth_reg_name(IMXFECState *s, uint32_t index)
  139. {
  140. switch (index) {
  141. case ENET_EIR:
  142. return "EIR";
  143. case ENET_EIMR:
  144. return "EIMR";
  145. case ENET_RDAR:
  146. return "RDAR";
  147. case ENET_TDAR:
  148. return "TDAR";
  149. case ENET_ECR:
  150. return "ECR";
  151. case ENET_MMFR:
  152. return "MMFR";
  153. case ENET_MSCR:
  154. return "MSCR";
  155. case ENET_MIBC:
  156. return "MIBC";
  157. case ENET_RCR:
  158. return "RCR";
  159. case ENET_TCR:
  160. return "TCR";
  161. case ENET_PALR:
  162. return "PALR";
  163. case ENET_PAUR:
  164. return "PAUR";
  165. case ENET_OPD:
  166. return "OPD";
  167. case ENET_IAUR:
  168. return "IAUR";
  169. case ENET_IALR:
  170. return "IALR";
  171. case ENET_GAUR:
  172. return "GAUR";
  173. case ENET_GALR:
  174. return "GALR";
  175. case ENET_TFWR:
  176. return "TFWR";
  177. case ENET_RDSR:
  178. return "RDSR";
  179. case ENET_TDSR:
  180. return "TDSR";
  181. case ENET_MRBR:
  182. return "MRBR";
  183. default:
  184. if (s->is_fec) {
  185. return imx_fec_reg_name(s, index);
  186. } else {
  187. return imx_enet_reg_name(s, index);
  188. }
  189. }
  190. }
  191. /*
  192. * Versions of this device with more than one TX descriptor save the
  193. * 2nd and 3rd descriptors in a subsection, to maintain migration
  194. * compatibility with previous versions of the device that only
  195. * supported a single descriptor.
  196. */
  197. static bool imx_eth_is_multi_tx_ring(void *opaque)
  198. {
  199. IMXFECState *s = IMX_FEC(opaque);
  200. return s->tx_ring_num > 1;
  201. }
  202. static const VMStateDescription vmstate_imx_eth_txdescs = {
  203. .name = "imx.fec/txdescs",
  204. .version_id = 1,
  205. .minimum_version_id = 1,
  206. .needed = imx_eth_is_multi_tx_ring,
  207. .fields = (VMStateField[]) {
  208. VMSTATE_UINT32(tx_descriptor[1], IMXFECState),
  209. VMSTATE_UINT32(tx_descriptor[2], IMXFECState),
  210. VMSTATE_END_OF_LIST()
  211. }
  212. };
  213. static const VMStateDescription vmstate_imx_eth = {
  214. .name = TYPE_IMX_FEC,
  215. .version_id = 2,
  216. .minimum_version_id = 2,
  217. .fields = (VMStateField[]) {
  218. VMSTATE_UINT32_ARRAY(regs, IMXFECState, ENET_MAX),
  219. VMSTATE_UINT32(rx_descriptor, IMXFECState),
  220. VMSTATE_UINT32(tx_descriptor[0], IMXFECState),
  221. VMSTATE_UINT32(phy_status, IMXFECState),
  222. VMSTATE_UINT32(phy_control, IMXFECState),
  223. VMSTATE_UINT32(phy_advertise, IMXFECState),
  224. VMSTATE_UINT32(phy_int, IMXFECState),
  225. VMSTATE_UINT32(phy_int_mask, IMXFECState),
  226. VMSTATE_END_OF_LIST()
  227. },
  228. .subsections = (const VMStateDescription * []) {
  229. &vmstate_imx_eth_txdescs,
  230. NULL
  231. },
  232. };
  233. #define PHY_INT_ENERGYON (1 << 7)
  234. #define PHY_INT_AUTONEG_COMPLETE (1 << 6)
  235. #define PHY_INT_FAULT (1 << 5)
  236. #define PHY_INT_DOWN (1 << 4)
  237. #define PHY_INT_AUTONEG_LP (1 << 3)
  238. #define PHY_INT_PARFAULT (1 << 2)
  239. #define PHY_INT_AUTONEG_PAGE (1 << 1)
  240. static void imx_eth_update(IMXFECState *s);
  241. /*
  242. * The MII phy could raise a GPIO to the processor which in turn
  243. * could be handled as an interrpt by the OS.
  244. * For now we don't handle any GPIO/interrupt line, so the OS will
  245. * have to poll for the PHY status.
  246. */
  247. static void phy_update_irq(IMXFECState *s)
  248. {
  249. imx_eth_update(s);
  250. }
  251. static void phy_update_link(IMXFECState *s)
  252. {
  253. /* Autonegotiation status mirrors link status. */
  254. if (qemu_get_queue(s->nic)->link_down) {
  255. PHY_PRINTF("link is down\n");
  256. s->phy_status &= ~0x0024;
  257. s->phy_int |= PHY_INT_DOWN;
  258. } else {
  259. PHY_PRINTF("link is up\n");
  260. s->phy_status |= 0x0024;
  261. s->phy_int |= PHY_INT_ENERGYON;
  262. s->phy_int |= PHY_INT_AUTONEG_COMPLETE;
  263. }
  264. phy_update_irq(s);
  265. }
  266. static void imx_eth_set_link(NetClientState *nc)
  267. {
  268. phy_update_link(IMX_FEC(qemu_get_nic_opaque(nc)));
  269. }
  270. static void phy_reset(IMXFECState *s)
  271. {
  272. s->phy_status = 0x7809;
  273. s->phy_control = 0x3000;
  274. s->phy_advertise = 0x01e1;
  275. s->phy_int_mask = 0;
  276. s->phy_int = 0;
  277. phy_update_link(s);
  278. }
  279. static uint32_t do_phy_read(IMXFECState *s, int reg)
  280. {
  281. uint32_t val;
  282. if (reg > 31) {
  283. /* we only advertise one phy */
  284. return 0;
  285. }
  286. switch (reg) {
  287. case 0: /* Basic Control */
  288. val = s->phy_control;
  289. break;
  290. case 1: /* Basic Status */
  291. val = s->phy_status;
  292. break;
  293. case 2: /* ID1 */
  294. val = 0x0007;
  295. break;
  296. case 3: /* ID2 */
  297. val = 0xc0d1;
  298. break;
  299. case 4: /* Auto-neg advertisement */
  300. val = s->phy_advertise;
  301. break;
  302. case 5: /* Auto-neg Link Partner Ability */
  303. val = 0x0f71;
  304. break;
  305. case 6: /* Auto-neg Expansion */
  306. val = 1;
  307. break;
  308. case 29: /* Interrupt source. */
  309. val = s->phy_int;
  310. s->phy_int = 0;
  311. phy_update_irq(s);
  312. break;
  313. case 30: /* Interrupt mask */
  314. val = s->phy_int_mask;
  315. break;
  316. case 17:
  317. case 18:
  318. case 27:
  319. case 31:
  320. qemu_log_mask(LOG_UNIMP, "[%s.phy]%s: reg %d not implemented\n",
  321. TYPE_IMX_FEC, __func__, reg);
  322. val = 0;
  323. break;
  324. default:
  325. qemu_log_mask(LOG_GUEST_ERROR, "[%s.phy]%s: Bad address at offset %d\n",
  326. TYPE_IMX_FEC, __func__, reg);
  327. val = 0;
  328. break;
  329. }
  330. PHY_PRINTF("read 0x%04x @ %d\n", val, reg);
  331. return val;
  332. }
  333. static void do_phy_write(IMXFECState *s, int reg, uint32_t val)
  334. {
  335. PHY_PRINTF("write 0x%04x @ %d\n", val, reg);
  336. if (reg > 31) {
  337. /* we only advertise one phy */
  338. return;
  339. }
  340. switch (reg) {
  341. case 0: /* Basic Control */
  342. if (val & 0x8000) {
  343. phy_reset(s);
  344. } else {
  345. s->phy_control = val & 0x7980;
  346. /* Complete autonegotiation immediately. */
  347. if (val & 0x1000) {
  348. s->phy_status |= 0x0020;
  349. }
  350. }
  351. break;
  352. case 4: /* Auto-neg advertisement */
  353. s->phy_advertise = (val & 0x2d7f) | 0x80;
  354. break;
  355. case 30: /* Interrupt mask */
  356. s->phy_int_mask = val & 0xff;
  357. phy_update_irq(s);
  358. break;
  359. case 17:
  360. case 18:
  361. case 27:
  362. case 31:
  363. qemu_log_mask(LOG_UNIMP, "[%s.phy)%s: reg %d not implemented\n",
  364. TYPE_IMX_FEC, __func__, reg);
  365. break;
  366. default:
  367. qemu_log_mask(LOG_GUEST_ERROR, "[%s.phy]%s: Bad address at offset %d\n",
  368. TYPE_IMX_FEC, __func__, reg);
  369. break;
  370. }
  371. }
  372. static void imx_fec_read_bd(IMXFECBufDesc *bd, dma_addr_t addr)
  373. {
  374. dma_memory_read(&address_space_memory, addr, bd, sizeof(*bd));
  375. }
  376. static void imx_fec_write_bd(IMXFECBufDesc *bd, dma_addr_t addr)
  377. {
  378. dma_memory_write(&address_space_memory, addr, bd, sizeof(*bd));
  379. }
  380. static void imx_enet_read_bd(IMXENETBufDesc *bd, dma_addr_t addr)
  381. {
  382. dma_memory_read(&address_space_memory, addr, bd, sizeof(*bd));
  383. }
  384. static void imx_enet_write_bd(IMXENETBufDesc *bd, dma_addr_t addr)
  385. {
  386. dma_memory_write(&address_space_memory, addr, bd, sizeof(*bd));
  387. }
  388. static void imx_eth_update(IMXFECState *s)
  389. {
  390. /*
  391. * Previous versions of qemu had the ENET_INT_MAC and ENET_INT_TS_TIMER
  392. * interrupts swapped. This worked with older versions of Linux (4.14
  393. * and older) since Linux associated both interrupt lines with Ethernet
  394. * MAC interrupts. Specifically,
  395. * - Linux 4.15 and later have separate interrupt handlers for the MAC and
  396. * timer interrupts. Those versions of Linux fail with versions of QEMU
  397. * with swapped interrupt assignments.
  398. * - In linux 4.14, both interrupt lines were registered with the Ethernet
  399. * MAC interrupt handler. As a result, all versions of qemu happen to
  400. * work, though that is accidental.
  401. * - In Linux 4.9 and older, the timer interrupt was registered directly
  402. * with the Ethernet MAC interrupt handler. The MAC interrupt was
  403. * redirected to a GPIO interrupt to work around erratum ERR006687.
  404. * This was implemented using the SOC's IOMUX block. In qemu, this GPIO
  405. * interrupt never fired since IOMUX is currently not supported in qemu.
  406. * Linux instead received MAC interrupts on the timer interrupt.
  407. * As a result, qemu versions with the swapped interrupt assignment work,
  408. * albeit accidentally, but qemu versions with the correct interrupt
  409. * assignment fail.
  410. *
  411. * To ensure that all versions of Linux work, generate ENET_INT_MAC
  412. * interrrupts on both interrupt lines. This should be changed if and when
  413. * qemu supports IOMUX.
  414. */
  415. if (s->regs[ENET_EIR] & s->regs[ENET_EIMR] &
  416. (ENET_INT_MAC | ENET_INT_TS_TIMER)) {
  417. qemu_set_irq(s->irq[1], 1);
  418. } else {
  419. qemu_set_irq(s->irq[1], 0);
  420. }
  421. if (s->regs[ENET_EIR] & s->regs[ENET_EIMR] & ENET_INT_MAC) {
  422. qemu_set_irq(s->irq[0], 1);
  423. } else {
  424. qemu_set_irq(s->irq[0], 0);
  425. }
  426. }
  427. static void imx_fec_do_tx(IMXFECState *s)
  428. {
  429. int frame_size = 0, descnt = 0;
  430. uint8_t *ptr = s->frame;
  431. uint32_t addr = s->tx_descriptor[0];
  432. while (descnt++ < IMX_MAX_DESC) {
  433. IMXFECBufDesc bd;
  434. int len;
  435. imx_fec_read_bd(&bd, addr);
  436. FEC_PRINTF("tx_bd %x flags %04x len %d data %08x\n",
  437. addr, bd.flags, bd.length, bd.data);
  438. if ((bd.flags & ENET_BD_R) == 0) {
  439. /* Run out of descriptors to transmit. */
  440. FEC_PRINTF("tx_bd ran out of descriptors to transmit\n");
  441. break;
  442. }
  443. len = bd.length;
  444. if (frame_size + len > ENET_MAX_FRAME_SIZE) {
  445. len = ENET_MAX_FRAME_SIZE - frame_size;
  446. s->regs[ENET_EIR] |= ENET_INT_BABT;
  447. }
  448. dma_memory_read(&address_space_memory, bd.data, ptr, len);
  449. ptr += len;
  450. frame_size += len;
  451. if (bd.flags & ENET_BD_L) {
  452. /* Last buffer in frame. */
  453. qemu_send_packet(qemu_get_queue(s->nic), s->frame, frame_size);
  454. ptr = s->frame;
  455. frame_size = 0;
  456. s->regs[ENET_EIR] |= ENET_INT_TXF;
  457. }
  458. s->regs[ENET_EIR] |= ENET_INT_TXB;
  459. bd.flags &= ~ENET_BD_R;
  460. /* Write back the modified descriptor. */
  461. imx_fec_write_bd(&bd, addr);
  462. /* Advance to the next descriptor. */
  463. if ((bd.flags & ENET_BD_W) != 0) {
  464. addr = s->regs[ENET_TDSR];
  465. } else {
  466. addr += sizeof(bd);
  467. }
  468. }
  469. s->tx_descriptor[0] = addr;
  470. imx_eth_update(s);
  471. }
  472. static void imx_enet_do_tx(IMXFECState *s, uint32_t index)
  473. {
  474. int frame_size = 0, descnt = 0;
  475. uint8_t *ptr = s->frame;
  476. uint32_t addr, int_txb, int_txf, tdsr;
  477. size_t ring;
  478. switch (index) {
  479. case ENET_TDAR:
  480. ring = 0;
  481. int_txb = ENET_INT_TXB;
  482. int_txf = ENET_INT_TXF;
  483. tdsr = ENET_TDSR;
  484. break;
  485. case ENET_TDAR1:
  486. ring = 1;
  487. int_txb = ENET_INT_TXB1;
  488. int_txf = ENET_INT_TXF1;
  489. tdsr = ENET_TDSR1;
  490. break;
  491. case ENET_TDAR2:
  492. ring = 2;
  493. int_txb = ENET_INT_TXB2;
  494. int_txf = ENET_INT_TXF2;
  495. tdsr = ENET_TDSR2;
  496. break;
  497. default:
  498. qemu_log_mask(LOG_GUEST_ERROR,
  499. "%s: bogus value for index %x\n",
  500. __func__, index);
  501. abort();
  502. break;
  503. }
  504. addr = s->tx_descriptor[ring];
  505. while (descnt++ < IMX_MAX_DESC) {
  506. IMXENETBufDesc bd;
  507. int len;
  508. imx_enet_read_bd(&bd, addr);
  509. FEC_PRINTF("tx_bd %x flags %04x len %d data %08x option %04x "
  510. "status %04x\n", addr, bd.flags, bd.length, bd.data,
  511. bd.option, bd.status);
  512. if ((bd.flags & ENET_BD_R) == 0) {
  513. /* Run out of descriptors to transmit. */
  514. break;
  515. }
  516. len = bd.length;
  517. if (frame_size + len > ENET_MAX_FRAME_SIZE) {
  518. len = ENET_MAX_FRAME_SIZE - frame_size;
  519. s->regs[ENET_EIR] |= ENET_INT_BABT;
  520. }
  521. dma_memory_read(&address_space_memory, bd.data, ptr, len);
  522. ptr += len;
  523. frame_size += len;
  524. if (bd.flags & ENET_BD_L) {
  525. if (bd.option & ENET_BD_PINS) {
  526. struct ip_header *ip_hd = PKT_GET_IP_HDR(s->frame);
  527. if (IP_HEADER_VERSION(ip_hd) == 4) {
  528. net_checksum_calculate(s->frame, frame_size);
  529. }
  530. }
  531. if (bd.option & ENET_BD_IINS) {
  532. struct ip_header *ip_hd = PKT_GET_IP_HDR(s->frame);
  533. /* We compute checksum only for IPv4 frames */
  534. if (IP_HEADER_VERSION(ip_hd) == 4) {
  535. uint16_t csum;
  536. ip_hd->ip_sum = 0;
  537. csum = net_raw_checksum((uint8_t *)ip_hd, sizeof(*ip_hd));
  538. ip_hd->ip_sum = cpu_to_be16(csum);
  539. }
  540. }
  541. /* Last buffer in frame. */
  542. qemu_send_packet(qemu_get_queue(s->nic), s->frame, frame_size);
  543. ptr = s->frame;
  544. frame_size = 0;
  545. if (bd.option & ENET_BD_TX_INT) {
  546. s->regs[ENET_EIR] |= int_txf;
  547. }
  548. /* Indicate that we've updated the last buffer descriptor. */
  549. bd.last_buffer = ENET_BD_BDU;
  550. }
  551. if (bd.option & ENET_BD_TX_INT) {
  552. s->regs[ENET_EIR] |= int_txb;
  553. }
  554. bd.flags &= ~ENET_BD_R;
  555. /* Write back the modified descriptor. */
  556. imx_enet_write_bd(&bd, addr);
  557. /* Advance to the next descriptor. */
  558. if ((bd.flags & ENET_BD_W) != 0) {
  559. addr = s->regs[tdsr];
  560. } else {
  561. addr += sizeof(bd);
  562. }
  563. }
  564. s->tx_descriptor[ring] = addr;
  565. imx_eth_update(s);
  566. }
  567. static void imx_eth_do_tx(IMXFECState *s, uint32_t index)
  568. {
  569. if (!s->is_fec && (s->regs[ENET_ECR] & ENET_ECR_EN1588)) {
  570. imx_enet_do_tx(s, index);
  571. } else {
  572. imx_fec_do_tx(s);
  573. }
  574. }
  575. static void imx_eth_enable_rx(IMXFECState *s, bool flush)
  576. {
  577. IMXFECBufDesc bd;
  578. imx_fec_read_bd(&bd, s->rx_descriptor);
  579. s->regs[ENET_RDAR] = (bd.flags & ENET_BD_E) ? ENET_RDAR_RDAR : 0;
  580. if (!s->regs[ENET_RDAR]) {
  581. FEC_PRINTF("RX buffer full\n");
  582. } else if (flush) {
  583. qemu_flush_queued_packets(qemu_get_queue(s->nic));
  584. }
  585. }
  586. static void imx_eth_reset(DeviceState *d)
  587. {
  588. IMXFECState *s = IMX_FEC(d);
  589. /* Reset the Device */
  590. memset(s->regs, 0, sizeof(s->regs));
  591. s->regs[ENET_ECR] = 0xf0000000;
  592. s->regs[ENET_MIBC] = 0xc0000000;
  593. s->regs[ENET_RCR] = 0x05ee0001;
  594. s->regs[ENET_OPD] = 0x00010000;
  595. s->regs[ENET_PALR] = (s->conf.macaddr.a[0] << 24)
  596. | (s->conf.macaddr.a[1] << 16)
  597. | (s->conf.macaddr.a[2] << 8)
  598. | s->conf.macaddr.a[3];
  599. s->regs[ENET_PAUR] = (s->conf.macaddr.a[4] << 24)
  600. | (s->conf.macaddr.a[5] << 16)
  601. | 0x8808;
  602. if (s->is_fec) {
  603. s->regs[ENET_FRBR] = 0x00000600;
  604. s->regs[ENET_FRSR] = 0x00000500;
  605. s->regs[ENET_MIIGSK_ENR] = 0x00000006;
  606. } else {
  607. s->regs[ENET_RAEM] = 0x00000004;
  608. s->regs[ENET_RAFL] = 0x00000004;
  609. s->regs[ENET_TAEM] = 0x00000004;
  610. s->regs[ENET_TAFL] = 0x00000008;
  611. s->regs[ENET_TIPG] = 0x0000000c;
  612. s->regs[ENET_FTRL] = 0x000007ff;
  613. s->regs[ENET_ATPER] = 0x3b9aca00;
  614. }
  615. s->rx_descriptor = 0;
  616. memset(s->tx_descriptor, 0, sizeof(s->tx_descriptor));
  617. /* We also reset the PHY */
  618. phy_reset(s);
  619. }
  620. static uint32_t imx_default_read(IMXFECState *s, uint32_t index)
  621. {
  622. qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad register at offset 0x%"
  623. PRIx32 "\n", TYPE_IMX_FEC, __func__, index * 4);
  624. return 0;
  625. }
  626. static uint32_t imx_fec_read(IMXFECState *s, uint32_t index)
  627. {
  628. switch (index) {
  629. case ENET_FRBR:
  630. case ENET_FRSR:
  631. case ENET_MIIGSK_CFGR:
  632. case ENET_MIIGSK_ENR:
  633. return s->regs[index];
  634. default:
  635. return imx_default_read(s, index);
  636. }
  637. }
  638. static uint32_t imx_enet_read(IMXFECState *s, uint32_t index)
  639. {
  640. switch (index) {
  641. case ENET_RSFL:
  642. case ENET_RSEM:
  643. case ENET_RAEM:
  644. case ENET_RAFL:
  645. case ENET_TSEM:
  646. case ENET_TAEM:
  647. case ENET_TAFL:
  648. case ENET_TIPG:
  649. case ENET_FTRL:
  650. case ENET_TACC:
  651. case ENET_RACC:
  652. case ENET_ATCR:
  653. case ENET_ATVR:
  654. case ENET_ATOFF:
  655. case ENET_ATPER:
  656. case ENET_ATCOR:
  657. case ENET_ATINC:
  658. case ENET_ATSTMP:
  659. case ENET_TGSR:
  660. case ENET_TCSR0:
  661. case ENET_TCCR0:
  662. case ENET_TCSR1:
  663. case ENET_TCCR1:
  664. case ENET_TCSR2:
  665. case ENET_TCCR2:
  666. case ENET_TCSR3:
  667. case ENET_TCCR3:
  668. return s->regs[index];
  669. default:
  670. return imx_default_read(s, index);
  671. }
  672. }
  673. static uint64_t imx_eth_read(void *opaque, hwaddr offset, unsigned size)
  674. {
  675. uint32_t value = 0;
  676. IMXFECState *s = IMX_FEC(opaque);
  677. uint32_t index = offset >> 2;
  678. switch (index) {
  679. case ENET_EIR:
  680. case ENET_EIMR:
  681. case ENET_RDAR:
  682. case ENET_TDAR:
  683. case ENET_ECR:
  684. case ENET_MMFR:
  685. case ENET_MSCR:
  686. case ENET_MIBC:
  687. case ENET_RCR:
  688. case ENET_TCR:
  689. case ENET_PALR:
  690. case ENET_PAUR:
  691. case ENET_OPD:
  692. case ENET_IAUR:
  693. case ENET_IALR:
  694. case ENET_GAUR:
  695. case ENET_GALR:
  696. case ENET_TFWR:
  697. case ENET_RDSR:
  698. case ENET_TDSR:
  699. case ENET_MRBR:
  700. value = s->regs[index];
  701. break;
  702. default:
  703. if (s->is_fec) {
  704. value = imx_fec_read(s, index);
  705. } else {
  706. value = imx_enet_read(s, index);
  707. }
  708. break;
  709. }
  710. FEC_PRINTF("reg[%s] => 0x%" PRIx32 "\n", imx_eth_reg_name(s, index),
  711. value);
  712. return value;
  713. }
  714. static void imx_default_write(IMXFECState *s, uint32_t index, uint32_t value)
  715. {
  716. qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad address at offset 0x%"
  717. PRIx32 "\n", TYPE_IMX_FEC, __func__, index * 4);
  718. return;
  719. }
  720. static void imx_fec_write(IMXFECState *s, uint32_t index, uint32_t value)
  721. {
  722. switch (index) {
  723. case ENET_FRBR:
  724. /* FRBR is read only */
  725. qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Register FRBR is read only\n",
  726. TYPE_IMX_FEC, __func__);
  727. break;
  728. case ENET_FRSR:
  729. s->regs[index] = (value & 0x000003fc) | 0x00000400;
  730. break;
  731. case ENET_MIIGSK_CFGR:
  732. s->regs[index] = value & 0x00000053;
  733. break;
  734. case ENET_MIIGSK_ENR:
  735. s->regs[index] = (value & 0x00000002) ? 0x00000006 : 0;
  736. break;
  737. default:
  738. imx_default_write(s, index, value);
  739. break;
  740. }
  741. }
  742. static void imx_enet_write(IMXFECState *s, uint32_t index, uint32_t value)
  743. {
  744. switch (index) {
  745. case ENET_RSFL:
  746. case ENET_RSEM:
  747. case ENET_RAEM:
  748. case ENET_RAFL:
  749. case ENET_TSEM:
  750. case ENET_TAEM:
  751. case ENET_TAFL:
  752. s->regs[index] = value & 0x000001ff;
  753. break;
  754. case ENET_TIPG:
  755. s->regs[index] = value & 0x0000001f;
  756. break;
  757. case ENET_FTRL:
  758. s->regs[index] = value & 0x00003fff;
  759. break;
  760. case ENET_TACC:
  761. s->regs[index] = value & 0x00000019;
  762. break;
  763. case ENET_RACC:
  764. s->regs[index] = value & 0x000000C7;
  765. break;
  766. case ENET_ATCR:
  767. s->regs[index] = value & 0x00002a9d;
  768. break;
  769. case ENET_ATVR:
  770. case ENET_ATOFF:
  771. case ENET_ATPER:
  772. s->regs[index] = value;
  773. break;
  774. case ENET_ATSTMP:
  775. /* ATSTMP is read only */
  776. qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Register ATSTMP is read only\n",
  777. TYPE_IMX_FEC, __func__);
  778. break;
  779. case ENET_ATCOR:
  780. s->regs[index] = value & 0x7fffffff;
  781. break;
  782. case ENET_ATINC:
  783. s->regs[index] = value & 0x00007f7f;
  784. break;
  785. case ENET_TGSR:
  786. /* implement clear timer flag */
  787. value = value & 0x0000000f;
  788. break;
  789. case ENET_TCSR0:
  790. case ENET_TCSR1:
  791. case ENET_TCSR2:
  792. case ENET_TCSR3:
  793. value = value & 0x000000fd;
  794. break;
  795. case ENET_TCCR0:
  796. case ENET_TCCR1:
  797. case ENET_TCCR2:
  798. case ENET_TCCR3:
  799. s->regs[index] = value;
  800. break;
  801. default:
  802. imx_default_write(s, index, value);
  803. break;
  804. }
  805. }
  806. static void imx_eth_write(void *opaque, hwaddr offset, uint64_t value,
  807. unsigned size)
  808. {
  809. IMXFECState *s = IMX_FEC(opaque);
  810. const bool single_tx_ring = !imx_eth_is_multi_tx_ring(s);
  811. uint32_t index = offset >> 2;
  812. FEC_PRINTF("reg[%s] <= 0x%" PRIx32 "\n", imx_eth_reg_name(s, index),
  813. (uint32_t)value);
  814. switch (index) {
  815. case ENET_EIR:
  816. s->regs[index] &= ~value;
  817. break;
  818. case ENET_EIMR:
  819. s->regs[index] = value;
  820. break;
  821. case ENET_RDAR:
  822. if (s->regs[ENET_ECR] & ENET_ECR_ETHEREN) {
  823. if (!s->regs[index]) {
  824. imx_eth_enable_rx(s, true);
  825. }
  826. } else {
  827. s->regs[index] = 0;
  828. }
  829. break;
  830. case ENET_TDAR1: /* FALLTHROUGH */
  831. case ENET_TDAR2: /* FALLTHROUGH */
  832. if (unlikely(single_tx_ring)) {
  833. qemu_log_mask(LOG_GUEST_ERROR,
  834. "[%s]%s: trying to access TDAR2 or TDAR1\n",
  835. TYPE_IMX_FEC, __func__);
  836. return;
  837. }
  838. case ENET_TDAR: /* FALLTHROUGH */
  839. if (s->regs[ENET_ECR] & ENET_ECR_ETHEREN) {
  840. s->regs[index] = ENET_TDAR_TDAR;
  841. imx_eth_do_tx(s, index);
  842. }
  843. s->regs[index] = 0;
  844. break;
  845. case ENET_ECR:
  846. if (value & ENET_ECR_RESET) {
  847. return imx_eth_reset(DEVICE(s));
  848. }
  849. s->regs[index] = value;
  850. if ((s->regs[index] & ENET_ECR_ETHEREN) == 0) {
  851. s->regs[ENET_RDAR] = 0;
  852. s->rx_descriptor = s->regs[ENET_RDSR];
  853. s->regs[ENET_TDAR] = 0;
  854. s->regs[ENET_TDAR1] = 0;
  855. s->regs[ENET_TDAR2] = 0;
  856. s->tx_descriptor[0] = s->regs[ENET_TDSR];
  857. s->tx_descriptor[1] = s->regs[ENET_TDSR1];
  858. s->tx_descriptor[2] = s->regs[ENET_TDSR2];
  859. }
  860. break;
  861. case ENET_MMFR:
  862. s->regs[index] = value;
  863. if (extract32(value, 29, 1)) {
  864. /* This is a read operation */
  865. s->regs[ENET_MMFR] = deposit32(s->regs[ENET_MMFR], 0, 16,
  866. do_phy_read(s,
  867. extract32(value,
  868. 18, 10)));
  869. } else {
  870. /* This a write operation */
  871. do_phy_write(s, extract32(value, 18, 10), extract32(value, 0, 16));
  872. }
  873. /* raise the interrupt as the PHY operation is done */
  874. s->regs[ENET_EIR] |= ENET_INT_MII;
  875. break;
  876. case ENET_MSCR:
  877. s->regs[index] = value & 0xfe;
  878. break;
  879. case ENET_MIBC:
  880. /* TODO: Implement MIB. */
  881. s->regs[index] = (value & 0x80000000) ? 0xc0000000 : 0;
  882. break;
  883. case ENET_RCR:
  884. s->regs[index] = value & 0x07ff003f;
  885. /* TODO: Implement LOOP mode. */
  886. break;
  887. case ENET_TCR:
  888. /* We transmit immediately, so raise GRA immediately. */
  889. s->regs[index] = value;
  890. if (value & 1) {
  891. s->regs[ENET_EIR] |= ENET_INT_GRA;
  892. }
  893. break;
  894. case ENET_PALR:
  895. s->regs[index] = value;
  896. s->conf.macaddr.a[0] = value >> 24;
  897. s->conf.macaddr.a[1] = value >> 16;
  898. s->conf.macaddr.a[2] = value >> 8;
  899. s->conf.macaddr.a[3] = value;
  900. break;
  901. case ENET_PAUR:
  902. s->regs[index] = (value | 0x0000ffff) & 0xffff8808;
  903. s->conf.macaddr.a[4] = value >> 24;
  904. s->conf.macaddr.a[5] = value >> 16;
  905. break;
  906. case ENET_OPD:
  907. s->regs[index] = (value & 0x0000ffff) | 0x00010000;
  908. break;
  909. case ENET_IAUR:
  910. case ENET_IALR:
  911. case ENET_GAUR:
  912. case ENET_GALR:
  913. /* TODO: implement MAC hash filtering. */
  914. break;
  915. case ENET_TFWR:
  916. if (s->is_fec) {
  917. s->regs[index] = value & 0x3;
  918. } else {
  919. s->regs[index] = value & 0x13f;
  920. }
  921. break;
  922. case ENET_RDSR:
  923. if (s->is_fec) {
  924. s->regs[index] = value & ~3;
  925. } else {
  926. s->regs[index] = value & ~7;
  927. }
  928. s->rx_descriptor = s->regs[index];
  929. break;
  930. case ENET_TDSR:
  931. if (s->is_fec) {
  932. s->regs[index] = value & ~3;
  933. } else {
  934. s->regs[index] = value & ~7;
  935. }
  936. s->tx_descriptor[0] = s->regs[index];
  937. break;
  938. case ENET_TDSR1:
  939. if (unlikely(single_tx_ring)) {
  940. qemu_log_mask(LOG_GUEST_ERROR,
  941. "[%s]%s: trying to access TDSR1\n",
  942. TYPE_IMX_FEC, __func__);
  943. return;
  944. }
  945. s->regs[index] = value & ~7;
  946. s->tx_descriptor[1] = s->regs[index];
  947. break;
  948. case ENET_TDSR2:
  949. if (unlikely(single_tx_ring)) {
  950. qemu_log_mask(LOG_GUEST_ERROR,
  951. "[%s]%s: trying to access TDSR2\n",
  952. TYPE_IMX_FEC, __func__);
  953. return;
  954. }
  955. s->regs[index] = value & ~7;
  956. s->tx_descriptor[2] = s->regs[index];
  957. break;
  958. case ENET_MRBR:
  959. s->regs[index] = value & 0x00003ff0;
  960. break;
  961. default:
  962. if (s->is_fec) {
  963. imx_fec_write(s, index, value);
  964. } else {
  965. imx_enet_write(s, index, value);
  966. }
  967. return;
  968. }
  969. imx_eth_update(s);
  970. }
  971. static int imx_eth_can_receive(NetClientState *nc)
  972. {
  973. IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
  974. FEC_PRINTF("\n");
  975. return !!s->regs[ENET_RDAR];
  976. }
  977. static ssize_t imx_fec_receive(NetClientState *nc, const uint8_t *buf,
  978. size_t len)
  979. {
  980. IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
  981. IMXFECBufDesc bd;
  982. uint32_t flags = 0;
  983. uint32_t addr;
  984. uint32_t crc;
  985. uint32_t buf_addr;
  986. uint8_t *crc_ptr;
  987. unsigned int buf_len;
  988. size_t size = len;
  989. FEC_PRINTF("len %d\n", (int)size);
  990. if (!s->regs[ENET_RDAR]) {
  991. qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Unexpected packet\n",
  992. TYPE_IMX_FEC, __func__);
  993. return 0;
  994. }
  995. /* 4 bytes for the CRC. */
  996. size += 4;
  997. crc = cpu_to_be32(crc32(~0, buf, size));
  998. crc_ptr = (uint8_t *) &crc;
  999. /* Huge frames are truncated. */
  1000. if (size > ENET_MAX_FRAME_SIZE) {
  1001. size = ENET_MAX_FRAME_SIZE;
  1002. flags |= ENET_BD_TR | ENET_BD_LG;
  1003. }
  1004. /* Frames larger than the user limit just set error flags. */
  1005. if (size > (s->regs[ENET_RCR] >> 16)) {
  1006. flags |= ENET_BD_LG;
  1007. }
  1008. addr = s->rx_descriptor;
  1009. while (size > 0) {
  1010. imx_fec_read_bd(&bd, addr);
  1011. if ((bd.flags & ENET_BD_E) == 0) {
  1012. /* No descriptors available. Bail out. */
  1013. /*
  1014. * FIXME: This is wrong. We should probably either
  1015. * save the remainder for when more RX buffers are
  1016. * available, or flag an error.
  1017. */
  1018. qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Lost end of frame\n",
  1019. TYPE_IMX_FEC, __func__);
  1020. break;
  1021. }
  1022. buf_len = (size <= s->regs[ENET_MRBR]) ? size : s->regs[ENET_MRBR];
  1023. bd.length = buf_len;
  1024. size -= buf_len;
  1025. FEC_PRINTF("rx_bd 0x%x length %d\n", addr, bd.length);
  1026. /* The last 4 bytes are the CRC. */
  1027. if (size < 4) {
  1028. buf_len += size - 4;
  1029. }
  1030. buf_addr = bd.data;
  1031. dma_memory_write(&address_space_memory, buf_addr, buf, buf_len);
  1032. buf += buf_len;
  1033. if (size < 4) {
  1034. dma_memory_write(&address_space_memory, buf_addr + buf_len,
  1035. crc_ptr, 4 - size);
  1036. crc_ptr += 4 - size;
  1037. }
  1038. bd.flags &= ~ENET_BD_E;
  1039. if (size == 0) {
  1040. /* Last buffer in frame. */
  1041. bd.flags |= flags | ENET_BD_L;
  1042. FEC_PRINTF("rx frame flags %04x\n", bd.flags);
  1043. s->regs[ENET_EIR] |= ENET_INT_RXF;
  1044. } else {
  1045. s->regs[ENET_EIR] |= ENET_INT_RXB;
  1046. }
  1047. imx_fec_write_bd(&bd, addr);
  1048. /* Advance to the next descriptor. */
  1049. if ((bd.flags & ENET_BD_W) != 0) {
  1050. addr = s->regs[ENET_RDSR];
  1051. } else {
  1052. addr += sizeof(bd);
  1053. }
  1054. }
  1055. s->rx_descriptor = addr;
  1056. imx_eth_enable_rx(s, false);
  1057. imx_eth_update(s);
  1058. return len;
  1059. }
  1060. static ssize_t imx_enet_receive(NetClientState *nc, const uint8_t *buf,
  1061. size_t len)
  1062. {
  1063. IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
  1064. IMXENETBufDesc bd;
  1065. uint32_t flags = 0;
  1066. uint32_t addr;
  1067. uint32_t crc;
  1068. uint32_t buf_addr;
  1069. uint8_t *crc_ptr;
  1070. unsigned int buf_len;
  1071. size_t size = len;
  1072. bool shift16 = s->regs[ENET_RACC] & ENET_RACC_SHIFT16;
  1073. FEC_PRINTF("len %d\n", (int)size);
  1074. if (!s->regs[ENET_RDAR]) {
  1075. qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Unexpected packet\n",
  1076. TYPE_IMX_FEC, __func__);
  1077. return 0;
  1078. }
  1079. /* 4 bytes for the CRC. */
  1080. size += 4;
  1081. crc = cpu_to_be32(crc32(~0, buf, size));
  1082. crc_ptr = (uint8_t *) &crc;
  1083. if (shift16) {
  1084. size += 2;
  1085. }
  1086. /* Huge frames are truncated. */
  1087. if (size > s->regs[ENET_FTRL]) {
  1088. size = s->regs[ENET_FTRL];
  1089. flags |= ENET_BD_TR | ENET_BD_LG;
  1090. }
  1091. /* Frames larger than the user limit just set error flags. */
  1092. if (size > (s->regs[ENET_RCR] >> 16)) {
  1093. flags |= ENET_BD_LG;
  1094. }
  1095. addr = s->rx_descriptor;
  1096. while (size > 0) {
  1097. imx_enet_read_bd(&bd, addr);
  1098. if ((bd.flags & ENET_BD_E) == 0) {
  1099. /* No descriptors available. Bail out. */
  1100. /*
  1101. * FIXME: This is wrong. We should probably either
  1102. * save the remainder for when more RX buffers are
  1103. * available, or flag an error.
  1104. */
  1105. qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Lost end of frame\n",
  1106. TYPE_IMX_FEC, __func__);
  1107. break;
  1108. }
  1109. buf_len = MIN(size, s->regs[ENET_MRBR]);
  1110. bd.length = buf_len;
  1111. size -= buf_len;
  1112. FEC_PRINTF("rx_bd 0x%x length %d\n", addr, bd.length);
  1113. /* The last 4 bytes are the CRC. */
  1114. if (size < 4) {
  1115. buf_len += size - 4;
  1116. }
  1117. buf_addr = bd.data;
  1118. if (shift16) {
  1119. /*
  1120. * If SHIFT16 bit of ENETx_RACC register is set we need to
  1121. * align the payload to 4-byte boundary.
  1122. */
  1123. const uint8_t zeros[2] = { 0 };
  1124. dma_memory_write(&address_space_memory, buf_addr,
  1125. zeros, sizeof(zeros));
  1126. buf_addr += sizeof(zeros);
  1127. buf_len -= sizeof(zeros);
  1128. /* We only do this once per Ethernet frame */
  1129. shift16 = false;
  1130. }
  1131. dma_memory_write(&address_space_memory, buf_addr, buf, buf_len);
  1132. buf += buf_len;
  1133. if (size < 4) {
  1134. dma_memory_write(&address_space_memory, buf_addr + buf_len,
  1135. crc_ptr, 4 - size);
  1136. crc_ptr += 4 - size;
  1137. }
  1138. bd.flags &= ~ENET_BD_E;
  1139. if (size == 0) {
  1140. /* Last buffer in frame. */
  1141. bd.flags |= flags | ENET_BD_L;
  1142. FEC_PRINTF("rx frame flags %04x\n", bd.flags);
  1143. /* Indicate that we've updated the last buffer descriptor. */
  1144. bd.last_buffer = ENET_BD_BDU;
  1145. if (bd.option & ENET_BD_RX_INT) {
  1146. s->regs[ENET_EIR] |= ENET_INT_RXF;
  1147. }
  1148. } else {
  1149. if (bd.option & ENET_BD_RX_INT) {
  1150. s->regs[ENET_EIR] |= ENET_INT_RXB;
  1151. }
  1152. }
  1153. imx_enet_write_bd(&bd, addr);
  1154. /* Advance to the next descriptor. */
  1155. if ((bd.flags & ENET_BD_W) != 0) {
  1156. addr = s->regs[ENET_RDSR];
  1157. } else {
  1158. addr += sizeof(bd);
  1159. }
  1160. }
  1161. s->rx_descriptor = addr;
  1162. imx_eth_enable_rx(s, false);
  1163. imx_eth_update(s);
  1164. return len;
  1165. }
  1166. static ssize_t imx_eth_receive(NetClientState *nc, const uint8_t *buf,
  1167. size_t len)
  1168. {
  1169. IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
  1170. if (!s->is_fec && (s->regs[ENET_ECR] & ENET_ECR_EN1588)) {
  1171. return imx_enet_receive(nc, buf, len);
  1172. } else {
  1173. return imx_fec_receive(nc, buf, len);
  1174. }
  1175. }
  1176. static const MemoryRegionOps imx_eth_ops = {
  1177. .read = imx_eth_read,
  1178. .write = imx_eth_write,
  1179. .valid.min_access_size = 4,
  1180. .valid.max_access_size = 4,
  1181. .endianness = DEVICE_NATIVE_ENDIAN,
  1182. };
  1183. static void imx_eth_cleanup(NetClientState *nc)
  1184. {
  1185. IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
  1186. s->nic = NULL;
  1187. }
  1188. static NetClientInfo imx_eth_net_info = {
  1189. .type = NET_CLIENT_DRIVER_NIC,
  1190. .size = sizeof(NICState),
  1191. .can_receive = imx_eth_can_receive,
  1192. .receive = imx_eth_receive,
  1193. .cleanup = imx_eth_cleanup,
  1194. .link_status_changed = imx_eth_set_link,
  1195. };
  1196. static void imx_eth_realize(DeviceState *dev, Error **errp)
  1197. {
  1198. IMXFECState *s = IMX_FEC(dev);
  1199. SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
  1200. memory_region_init_io(&s->iomem, OBJECT(dev), &imx_eth_ops, s,
  1201. TYPE_IMX_FEC, FSL_IMX25_FEC_SIZE);
  1202. sysbus_init_mmio(sbd, &s->iomem);
  1203. sysbus_init_irq(sbd, &s->irq[0]);
  1204. sysbus_init_irq(sbd, &s->irq[1]);
  1205. qemu_macaddr_default_if_unset(&s->conf.macaddr);
  1206. s->nic = qemu_new_nic(&imx_eth_net_info, &s->conf,
  1207. object_get_typename(OBJECT(dev)),
  1208. DEVICE(dev)->id, s);
  1209. qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a);
  1210. }
  1211. static Property imx_eth_properties[] = {
  1212. DEFINE_NIC_PROPERTIES(IMXFECState, conf),
  1213. DEFINE_PROP_UINT32("tx-ring-num", IMXFECState, tx_ring_num, 1),
  1214. DEFINE_PROP_END_OF_LIST(),
  1215. };
  1216. static void imx_eth_class_init(ObjectClass *klass, void *data)
  1217. {
  1218. DeviceClass *dc = DEVICE_CLASS(klass);
  1219. dc->vmsd = &vmstate_imx_eth;
  1220. dc->reset = imx_eth_reset;
  1221. dc->props = imx_eth_properties;
  1222. dc->realize = imx_eth_realize;
  1223. dc->desc = "i.MX FEC/ENET Ethernet Controller";
  1224. }
  1225. static void imx_fec_init(Object *obj)
  1226. {
  1227. IMXFECState *s = IMX_FEC(obj);
  1228. s->is_fec = true;
  1229. }
  1230. static void imx_enet_init(Object *obj)
  1231. {
  1232. IMXFECState *s = IMX_FEC(obj);
  1233. s->is_fec = false;
  1234. }
  1235. static const TypeInfo imx_fec_info = {
  1236. .name = TYPE_IMX_FEC,
  1237. .parent = TYPE_SYS_BUS_DEVICE,
  1238. .instance_size = sizeof(IMXFECState),
  1239. .instance_init = imx_fec_init,
  1240. .class_init = imx_eth_class_init,
  1241. };
  1242. static const TypeInfo imx_enet_info = {
  1243. .name = TYPE_IMX_ENET,
  1244. .parent = TYPE_IMX_FEC,
  1245. .instance_init = imx_enet_init,
  1246. };
  1247. static void imx_eth_register_types(void)
  1248. {
  1249. type_register_static(&imx_fec_info);
  1250. type_register_static(&imx_enet_info);
  1251. }
  1252. type_init(imx_eth_register_types)