imx_fec.c 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397
  1. /*
  2. * i.MX Fast Ethernet Controller emulation.
  3. *
  4. * Copyright (c) 2013 Jean-Christophe Dubois. <jcd@tribudubois.net>
  5. *
  6. * Based on Coldfire Fast Ethernet Controller emulation.
  7. *
  8. * Copyright (c) 2007 CodeSourcery.
  9. *
  10. * This program is free software; you can redistribute it and/or modify it
  11. * under the terms of the GNU General Public License as published by the
  12. * Free Software Foundation; either version 2 of the License, or
  13. * (at your option) any later version.
  14. *
  15. * This program is distributed in the hope that it will be useful, but WITHOUT
  16. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  17. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
  18. * for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License along
  21. * with this program; if not, see <http://www.gnu.org/licenses/>.
  22. */
  23. #include "qemu/osdep.h"
  24. #include "hw/irq.h"
  25. #include "hw/net/imx_fec.h"
  26. #include "hw/qdev-properties.h"
  27. #include "migration/vmstate.h"
  28. #include "sysemu/dma.h"
  29. #include "qemu/log.h"
  30. #include "qemu/module.h"
  31. #include "net/checksum.h"
  32. #include "net/eth.h"
  33. #include "trace.h"
  34. /* For crc32 */
  35. #include <zlib.h>
  36. #define IMX_MAX_DESC 1024
  37. static const char *imx_default_reg_name(IMXFECState *s, uint32_t index)
  38. {
  39. static char tmp[20];
  40. sprintf(tmp, "index %d", index);
  41. return tmp;
  42. }
  43. static const char *imx_fec_reg_name(IMXFECState *s, uint32_t index)
  44. {
  45. switch (index) {
  46. case ENET_FRBR:
  47. return "FRBR";
  48. case ENET_FRSR:
  49. return "FRSR";
  50. case ENET_MIIGSK_CFGR:
  51. return "MIIGSK_CFGR";
  52. case ENET_MIIGSK_ENR:
  53. return "MIIGSK_ENR";
  54. default:
  55. return imx_default_reg_name(s, index);
  56. }
  57. }
  58. static const char *imx_enet_reg_name(IMXFECState *s, uint32_t index)
  59. {
  60. switch (index) {
  61. case ENET_RSFL:
  62. return "RSFL";
  63. case ENET_RSEM:
  64. return "RSEM";
  65. case ENET_RAEM:
  66. return "RAEM";
  67. case ENET_RAFL:
  68. return "RAFL";
  69. case ENET_TSEM:
  70. return "TSEM";
  71. case ENET_TAEM:
  72. return "TAEM";
  73. case ENET_TAFL:
  74. return "TAFL";
  75. case ENET_TIPG:
  76. return "TIPG";
  77. case ENET_FTRL:
  78. return "FTRL";
  79. case ENET_TACC:
  80. return "TACC";
  81. case ENET_RACC:
  82. return "RACC";
  83. case ENET_ATCR:
  84. return "ATCR";
  85. case ENET_ATVR:
  86. return "ATVR";
  87. case ENET_ATOFF:
  88. return "ATOFF";
  89. case ENET_ATPER:
  90. return "ATPER";
  91. case ENET_ATCOR:
  92. return "ATCOR";
  93. case ENET_ATINC:
  94. return "ATINC";
  95. case ENET_ATSTMP:
  96. return "ATSTMP";
  97. case ENET_TGSR:
  98. return "TGSR";
  99. case ENET_TCSR0:
  100. return "TCSR0";
  101. case ENET_TCCR0:
  102. return "TCCR0";
  103. case ENET_TCSR1:
  104. return "TCSR1";
  105. case ENET_TCCR1:
  106. return "TCCR1";
  107. case ENET_TCSR2:
  108. return "TCSR2";
  109. case ENET_TCCR2:
  110. return "TCCR2";
  111. case ENET_TCSR3:
  112. return "TCSR3";
  113. case ENET_TCCR3:
  114. return "TCCR3";
  115. default:
  116. return imx_default_reg_name(s, index);
  117. }
  118. }
  119. static const char *imx_eth_reg_name(IMXFECState *s, uint32_t index)
  120. {
  121. switch (index) {
  122. case ENET_EIR:
  123. return "EIR";
  124. case ENET_EIMR:
  125. return "EIMR";
  126. case ENET_RDAR:
  127. return "RDAR";
  128. case ENET_TDAR:
  129. return "TDAR";
  130. case ENET_ECR:
  131. return "ECR";
  132. case ENET_MMFR:
  133. return "MMFR";
  134. case ENET_MSCR:
  135. return "MSCR";
  136. case ENET_MIBC:
  137. return "MIBC";
  138. case ENET_RCR:
  139. return "RCR";
  140. case ENET_TCR:
  141. return "TCR";
  142. case ENET_PALR:
  143. return "PALR";
  144. case ENET_PAUR:
  145. return "PAUR";
  146. case ENET_OPD:
  147. return "OPD";
  148. case ENET_IAUR:
  149. return "IAUR";
  150. case ENET_IALR:
  151. return "IALR";
  152. case ENET_GAUR:
  153. return "GAUR";
  154. case ENET_GALR:
  155. return "GALR";
  156. case ENET_TFWR:
  157. return "TFWR";
  158. case ENET_RDSR:
  159. return "RDSR";
  160. case ENET_TDSR:
  161. return "TDSR";
  162. case ENET_MRBR:
  163. return "MRBR";
  164. default:
  165. if (s->is_fec) {
  166. return imx_fec_reg_name(s, index);
  167. } else {
  168. return imx_enet_reg_name(s, index);
  169. }
  170. }
  171. }
  172. /*
  173. * Versions of this device with more than one TX descriptor save the
  174. * 2nd and 3rd descriptors in a subsection, to maintain migration
  175. * compatibility with previous versions of the device that only
  176. * supported a single descriptor.
  177. */
  178. static bool imx_eth_is_multi_tx_ring(void *opaque)
  179. {
  180. IMXFECState *s = IMX_FEC(opaque);
  181. return s->tx_ring_num > 1;
  182. }
  183. static const VMStateDescription vmstate_imx_eth_txdescs = {
  184. .name = "imx.fec/txdescs",
  185. .version_id = 1,
  186. .minimum_version_id = 1,
  187. .needed = imx_eth_is_multi_tx_ring,
  188. .fields = (const VMStateField[]) {
  189. VMSTATE_UINT32(tx_descriptor[1], IMXFECState),
  190. VMSTATE_UINT32(tx_descriptor[2], IMXFECState),
  191. VMSTATE_END_OF_LIST()
  192. }
  193. };
  194. static const VMStateDescription vmstate_imx_eth = {
  195. .name = TYPE_IMX_FEC,
  196. .version_id = 2,
  197. .minimum_version_id = 2,
  198. .fields = (const VMStateField[]) {
  199. VMSTATE_UINT32_ARRAY(regs, IMXFECState, ENET_MAX),
  200. VMSTATE_UINT32(rx_descriptor, IMXFECState),
  201. VMSTATE_UINT32(tx_descriptor[0], IMXFECState),
  202. VMSTATE_UINT32(phy_status, IMXFECState),
  203. VMSTATE_UINT32(phy_control, IMXFECState),
  204. VMSTATE_UINT32(phy_advertise, IMXFECState),
  205. VMSTATE_UINT32(phy_int, IMXFECState),
  206. VMSTATE_UINT32(phy_int_mask, IMXFECState),
  207. VMSTATE_END_OF_LIST()
  208. },
  209. .subsections = (const VMStateDescription * const []) {
  210. &vmstate_imx_eth_txdescs,
  211. NULL
  212. },
  213. };
  214. #define PHY_INT_ENERGYON (1 << 7)
  215. #define PHY_INT_AUTONEG_COMPLETE (1 << 6)
  216. #define PHY_INT_FAULT (1 << 5)
  217. #define PHY_INT_DOWN (1 << 4)
  218. #define PHY_INT_AUTONEG_LP (1 << 3)
  219. #define PHY_INT_PARFAULT (1 << 2)
  220. #define PHY_INT_AUTONEG_PAGE (1 << 1)
  221. static void imx_eth_update(IMXFECState *s);
  222. /*
  223. * The MII phy could raise a GPIO to the processor which in turn
  224. * could be handled as an interrpt by the OS.
  225. * For now we don't handle any GPIO/interrupt line, so the OS will
  226. * have to poll for the PHY status.
  227. */
  228. static void imx_phy_update_irq(IMXFECState *s)
  229. {
  230. imx_eth_update(s);
  231. }
  232. static void imx_phy_update_link(IMXFECState *s)
  233. {
  234. /* Autonegotiation status mirrors link status. */
  235. if (qemu_get_queue(s->nic)->link_down) {
  236. trace_imx_phy_update_link("down");
  237. s->phy_status &= ~0x0024;
  238. s->phy_int |= PHY_INT_DOWN;
  239. } else {
  240. trace_imx_phy_update_link("up");
  241. s->phy_status |= 0x0024;
  242. s->phy_int |= PHY_INT_ENERGYON;
  243. s->phy_int |= PHY_INT_AUTONEG_COMPLETE;
  244. }
  245. imx_phy_update_irq(s);
  246. }
  247. static void imx_eth_set_link(NetClientState *nc)
  248. {
  249. imx_phy_update_link(IMX_FEC(qemu_get_nic_opaque(nc)));
  250. }
  251. static void imx_phy_reset(IMXFECState *s)
  252. {
  253. trace_imx_phy_reset();
  254. s->phy_status = 0x7809;
  255. s->phy_control = 0x3000;
  256. s->phy_advertise = 0x01e1;
  257. s->phy_int_mask = 0;
  258. s->phy_int = 0;
  259. imx_phy_update_link(s);
  260. }
  261. static uint32_t imx_phy_read(IMXFECState *s, int reg)
  262. {
  263. uint32_t val;
  264. uint32_t phy = reg / 32;
  265. if (!s->phy_connected) {
  266. return 0xffff;
  267. }
  268. if (phy != s->phy_num) {
  269. if (s->phy_consumer && phy == s->phy_consumer->phy_num) {
  270. s = s->phy_consumer;
  271. } else {
  272. trace_imx_phy_read_num(phy, s->phy_num);
  273. return 0xffff;
  274. }
  275. }
  276. reg %= 32;
  277. switch (reg) {
  278. case 0: /* Basic Control */
  279. val = s->phy_control;
  280. break;
  281. case 1: /* Basic Status */
  282. val = s->phy_status;
  283. break;
  284. case 2: /* ID1 */
  285. val = 0x0007;
  286. break;
  287. case 3: /* ID2 */
  288. val = 0xc0d1;
  289. break;
  290. case 4: /* Auto-neg advertisement */
  291. val = s->phy_advertise;
  292. break;
  293. case 5: /* Auto-neg Link Partner Ability */
  294. val = 0x0f71;
  295. break;
  296. case 6: /* Auto-neg Expansion */
  297. val = 1;
  298. break;
  299. case 29: /* Interrupt source. */
  300. val = s->phy_int;
  301. s->phy_int = 0;
  302. imx_phy_update_irq(s);
  303. break;
  304. case 30: /* Interrupt mask */
  305. val = s->phy_int_mask;
  306. break;
  307. case 17:
  308. case 18:
  309. case 27:
  310. case 31:
  311. qemu_log_mask(LOG_UNIMP, "[%s.phy]%s: reg %d not implemented\n",
  312. TYPE_IMX_FEC, __func__, reg);
  313. val = 0;
  314. break;
  315. default:
  316. qemu_log_mask(LOG_GUEST_ERROR, "[%s.phy]%s: Bad address at offset %d\n",
  317. TYPE_IMX_FEC, __func__, reg);
  318. val = 0;
  319. break;
  320. }
  321. trace_imx_phy_read(val, phy, reg);
  322. return val;
  323. }
  324. static void imx_phy_write(IMXFECState *s, int reg, uint32_t val)
  325. {
  326. uint32_t phy = reg / 32;
  327. if (!s->phy_connected) {
  328. return;
  329. }
  330. if (phy != s->phy_num) {
  331. if (s->phy_consumer && phy == s->phy_consumer->phy_num) {
  332. s = s->phy_consumer;
  333. } else {
  334. trace_imx_phy_write_num(phy, s->phy_num);
  335. return;
  336. }
  337. }
  338. reg %= 32;
  339. trace_imx_phy_write(val, phy, reg);
  340. switch (reg) {
  341. case 0: /* Basic Control */
  342. if (val & 0x8000) {
  343. imx_phy_reset(s);
  344. } else {
  345. s->phy_control = val & 0x7980;
  346. /* Complete autonegotiation immediately. */
  347. if (val & 0x1000) {
  348. s->phy_status |= 0x0020;
  349. }
  350. }
  351. break;
  352. case 4: /* Auto-neg advertisement */
  353. s->phy_advertise = (val & 0x2d7f) | 0x80;
  354. break;
  355. case 30: /* Interrupt mask */
  356. s->phy_int_mask = val & 0xff;
  357. imx_phy_update_irq(s);
  358. break;
  359. case 17:
  360. case 18:
  361. case 27:
  362. case 31:
  363. qemu_log_mask(LOG_UNIMP, "[%s.phy)%s: reg %d not implemented\n",
  364. TYPE_IMX_FEC, __func__, reg);
  365. break;
  366. default:
  367. qemu_log_mask(LOG_GUEST_ERROR, "[%s.phy]%s: Bad address at offset %d\n",
  368. TYPE_IMX_FEC, __func__, reg);
  369. break;
  370. }
  371. }
  372. static void imx_fec_read_bd(IMXFECBufDesc *bd, dma_addr_t addr)
  373. {
  374. dma_memory_read(&address_space_memory, addr, bd, sizeof(*bd),
  375. MEMTXATTRS_UNSPECIFIED);
  376. trace_imx_fec_read_bd(addr, bd->flags, bd->length, bd->data);
  377. }
  378. static void imx_fec_write_bd(IMXFECBufDesc *bd, dma_addr_t addr)
  379. {
  380. dma_memory_write(&address_space_memory, addr, bd, sizeof(*bd),
  381. MEMTXATTRS_UNSPECIFIED);
  382. }
  383. static void imx_enet_read_bd(IMXENETBufDesc *bd, dma_addr_t addr)
  384. {
  385. dma_memory_read(&address_space_memory, addr, bd, sizeof(*bd),
  386. MEMTXATTRS_UNSPECIFIED);
  387. trace_imx_enet_read_bd(addr, bd->flags, bd->length, bd->data,
  388. bd->option, bd->status);
  389. }
  390. static void imx_enet_write_bd(IMXENETBufDesc *bd, dma_addr_t addr)
  391. {
  392. dma_memory_write(&address_space_memory, addr, bd, sizeof(*bd),
  393. MEMTXATTRS_UNSPECIFIED);
  394. }
  395. static void imx_eth_update(IMXFECState *s)
  396. {
  397. /*
  398. * Previous versions of qemu had the ENET_INT_MAC and ENET_INT_TS_TIMER
  399. * interrupts swapped. This worked with older versions of Linux (4.14
  400. * and older) since Linux associated both interrupt lines with Ethernet
  401. * MAC interrupts. Specifically,
  402. * - Linux 4.15 and later have separate interrupt handlers for the MAC and
  403. * timer interrupts. Those versions of Linux fail with versions of QEMU
  404. * with swapped interrupt assignments.
  405. * - In linux 4.14, both interrupt lines were registered with the Ethernet
  406. * MAC interrupt handler. As a result, all versions of qemu happen to
  407. * work, though that is accidental.
  408. * - In Linux 4.9 and older, the timer interrupt was registered directly
  409. * with the Ethernet MAC interrupt handler. The MAC interrupt was
  410. * redirected to a GPIO interrupt to work around erratum ERR006687.
  411. * This was implemented using the SOC's IOMUX block. In qemu, this GPIO
  412. * interrupt never fired since IOMUX is currently not supported in qemu.
  413. * Linux instead received MAC interrupts on the timer interrupt.
  414. * As a result, qemu versions with the swapped interrupt assignment work,
  415. * albeit accidentally, but qemu versions with the correct interrupt
  416. * assignment fail.
  417. *
  418. * To ensure that all versions of Linux work, generate ENET_INT_MAC
  419. * interrupts on both interrupt lines. This should be changed if and when
  420. * qemu supports IOMUX.
  421. */
  422. if (s->regs[ENET_EIR] & s->regs[ENET_EIMR] &
  423. (ENET_INT_MAC | ENET_INT_TS_TIMER)) {
  424. qemu_set_irq(s->irq[1], 1);
  425. } else {
  426. qemu_set_irq(s->irq[1], 0);
  427. }
  428. if (s->regs[ENET_EIR] & s->regs[ENET_EIMR] & ENET_INT_MAC) {
  429. qemu_set_irq(s->irq[0], 1);
  430. } else {
  431. qemu_set_irq(s->irq[0], 0);
  432. }
  433. }
  434. static void imx_fec_do_tx(IMXFECState *s)
  435. {
  436. int frame_size = 0, descnt = 0;
  437. uint8_t *ptr = s->frame;
  438. uint32_t addr = s->tx_descriptor[0];
  439. while (descnt++ < IMX_MAX_DESC) {
  440. IMXFECBufDesc bd;
  441. int len;
  442. imx_fec_read_bd(&bd, addr);
  443. if ((bd.flags & ENET_BD_R) == 0) {
  444. /* Run out of descriptors to transmit. */
  445. trace_imx_eth_tx_bd_busy();
  446. break;
  447. }
  448. len = bd.length;
  449. if (frame_size + len > ENET_MAX_FRAME_SIZE) {
  450. len = ENET_MAX_FRAME_SIZE - frame_size;
  451. s->regs[ENET_EIR] |= ENET_INT_BABT;
  452. }
  453. dma_memory_read(&address_space_memory, bd.data, ptr, len,
  454. MEMTXATTRS_UNSPECIFIED);
  455. ptr += len;
  456. frame_size += len;
  457. if (bd.flags & ENET_BD_L) {
  458. /* Last buffer in frame. */
  459. qemu_send_packet(qemu_get_queue(s->nic), s->frame, frame_size);
  460. ptr = s->frame;
  461. frame_size = 0;
  462. s->regs[ENET_EIR] |= ENET_INT_TXF;
  463. }
  464. s->regs[ENET_EIR] |= ENET_INT_TXB;
  465. bd.flags &= ~ENET_BD_R;
  466. /* Write back the modified descriptor. */
  467. imx_fec_write_bd(&bd, addr);
  468. /* Advance to the next descriptor. */
  469. if ((bd.flags & ENET_BD_W) != 0) {
  470. addr = s->regs[ENET_TDSR];
  471. } else {
  472. addr += sizeof(bd);
  473. }
  474. }
  475. s->tx_descriptor[0] = addr;
  476. imx_eth_update(s);
  477. }
  478. static void imx_enet_do_tx(IMXFECState *s, uint32_t index)
  479. {
  480. int frame_size = 0, descnt = 0;
  481. uint8_t *ptr = s->frame;
  482. uint32_t addr, int_txb, int_txf, tdsr;
  483. size_t ring;
  484. switch (index) {
  485. case ENET_TDAR:
  486. ring = 0;
  487. int_txb = ENET_INT_TXB;
  488. int_txf = ENET_INT_TXF;
  489. tdsr = ENET_TDSR;
  490. break;
  491. case ENET_TDAR1:
  492. ring = 1;
  493. int_txb = ENET_INT_TXB1;
  494. int_txf = ENET_INT_TXF1;
  495. tdsr = ENET_TDSR1;
  496. break;
  497. case ENET_TDAR2:
  498. ring = 2;
  499. int_txb = ENET_INT_TXB2;
  500. int_txf = ENET_INT_TXF2;
  501. tdsr = ENET_TDSR2;
  502. break;
  503. default:
  504. qemu_log_mask(LOG_GUEST_ERROR,
  505. "%s: bogus value for index %x\n",
  506. __func__, index);
  507. abort();
  508. break;
  509. }
  510. addr = s->tx_descriptor[ring];
  511. while (descnt++ < IMX_MAX_DESC) {
  512. IMXENETBufDesc bd;
  513. int len;
  514. imx_enet_read_bd(&bd, addr);
  515. if ((bd.flags & ENET_BD_R) == 0) {
  516. /* Run out of descriptors to transmit. */
  517. trace_imx_eth_tx_bd_busy();
  518. break;
  519. }
  520. len = bd.length;
  521. if (frame_size + len > ENET_MAX_FRAME_SIZE) {
  522. len = ENET_MAX_FRAME_SIZE - frame_size;
  523. s->regs[ENET_EIR] |= ENET_INT_BABT;
  524. }
  525. dma_memory_read(&address_space_memory, bd.data, ptr, len,
  526. MEMTXATTRS_UNSPECIFIED);
  527. ptr += len;
  528. frame_size += len;
  529. if (bd.flags & ENET_BD_L) {
  530. int csum = 0;
  531. if (bd.option & ENET_BD_PINS) {
  532. csum |= (CSUM_TCP | CSUM_UDP);
  533. }
  534. if (bd.option & ENET_BD_IINS) {
  535. csum |= CSUM_IP;
  536. }
  537. if (csum) {
  538. net_checksum_calculate(s->frame, frame_size, csum);
  539. }
  540. /* Last buffer in frame. */
  541. qemu_send_packet(qemu_get_queue(s->nic), s->frame, frame_size);
  542. ptr = s->frame;
  543. frame_size = 0;
  544. if (bd.option & ENET_BD_TX_INT) {
  545. s->regs[ENET_EIR] |= int_txf;
  546. }
  547. /* Indicate that we've updated the last buffer descriptor. */
  548. bd.last_buffer = ENET_BD_BDU;
  549. }
  550. if (bd.option & ENET_BD_TX_INT) {
  551. s->regs[ENET_EIR] |= int_txb;
  552. }
  553. bd.flags &= ~ENET_BD_R;
  554. /* Write back the modified descriptor. */
  555. imx_enet_write_bd(&bd, addr);
  556. /* Advance to the next descriptor. */
  557. if ((bd.flags & ENET_BD_W) != 0) {
  558. addr = s->regs[tdsr];
  559. } else {
  560. addr += sizeof(bd);
  561. }
  562. }
  563. s->tx_descriptor[ring] = addr;
  564. imx_eth_update(s);
  565. }
  566. static void imx_eth_do_tx(IMXFECState *s, uint32_t index)
  567. {
  568. if (!s->is_fec && (s->regs[ENET_ECR] & ENET_ECR_EN1588)) {
  569. imx_enet_do_tx(s, index);
  570. } else {
  571. imx_fec_do_tx(s);
  572. }
  573. }
  574. static void imx_eth_enable_rx(IMXFECState *s, bool flush)
  575. {
  576. IMXFECBufDesc bd;
  577. imx_fec_read_bd(&bd, s->rx_descriptor);
  578. s->regs[ENET_RDAR] = (bd.flags & ENET_BD_E) ? ENET_RDAR_RDAR : 0;
  579. if (!s->regs[ENET_RDAR]) {
  580. trace_imx_eth_rx_bd_full();
  581. } else if (flush) {
  582. qemu_flush_queued_packets(qemu_get_queue(s->nic));
  583. }
  584. }
  585. static void imx_eth_reset(DeviceState *d)
  586. {
  587. IMXFECState *s = IMX_FEC(d);
  588. /* Reset the Device */
  589. memset(s->regs, 0, sizeof(s->regs));
  590. s->regs[ENET_ECR] = 0xf0000000;
  591. s->regs[ENET_MIBC] = 0xc0000000;
  592. s->regs[ENET_RCR] = 0x05ee0001;
  593. s->regs[ENET_OPD] = 0x00010000;
  594. s->regs[ENET_PALR] = (s->conf.macaddr.a[0] << 24)
  595. | (s->conf.macaddr.a[1] << 16)
  596. | (s->conf.macaddr.a[2] << 8)
  597. | s->conf.macaddr.a[3];
  598. s->regs[ENET_PAUR] = (s->conf.macaddr.a[4] << 24)
  599. | (s->conf.macaddr.a[5] << 16)
  600. | 0x8808;
  601. if (s->is_fec) {
  602. s->regs[ENET_FRBR] = 0x00000600;
  603. s->regs[ENET_FRSR] = 0x00000500;
  604. s->regs[ENET_MIIGSK_ENR] = 0x00000006;
  605. } else {
  606. s->regs[ENET_RAEM] = 0x00000004;
  607. s->regs[ENET_RAFL] = 0x00000004;
  608. s->regs[ENET_TAEM] = 0x00000004;
  609. s->regs[ENET_TAFL] = 0x00000008;
  610. s->regs[ENET_TIPG] = 0x0000000c;
  611. s->regs[ENET_FTRL] = 0x000007ff;
  612. s->regs[ENET_ATPER] = 0x3b9aca00;
  613. }
  614. s->rx_descriptor = 0;
  615. memset(s->tx_descriptor, 0, sizeof(s->tx_descriptor));
  616. /* We also reset the PHY */
  617. imx_phy_reset(s);
  618. }
  619. static uint32_t imx_default_read(IMXFECState *s, uint32_t index)
  620. {
  621. qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad register at offset 0x%"
  622. PRIx32 "\n", TYPE_IMX_FEC, __func__, index * 4);
  623. return 0;
  624. }
  625. static uint32_t imx_fec_read(IMXFECState *s, uint32_t index)
  626. {
  627. switch (index) {
  628. case ENET_FRBR:
  629. case ENET_FRSR:
  630. case ENET_MIIGSK_CFGR:
  631. case ENET_MIIGSK_ENR:
  632. return s->regs[index];
  633. default:
  634. return imx_default_read(s, index);
  635. }
  636. }
  637. static uint32_t imx_enet_read(IMXFECState *s, uint32_t index)
  638. {
  639. switch (index) {
  640. case ENET_RSFL:
  641. case ENET_RSEM:
  642. case ENET_RAEM:
  643. case ENET_RAFL:
  644. case ENET_TSEM:
  645. case ENET_TAEM:
  646. case ENET_TAFL:
  647. case ENET_TIPG:
  648. case ENET_FTRL:
  649. case ENET_TACC:
  650. case ENET_RACC:
  651. case ENET_ATCR:
  652. case ENET_ATVR:
  653. case ENET_ATOFF:
  654. case ENET_ATPER:
  655. case ENET_ATCOR:
  656. case ENET_ATINC:
  657. case ENET_ATSTMP:
  658. case ENET_TGSR:
  659. case ENET_TCSR0:
  660. case ENET_TCCR0:
  661. case ENET_TCSR1:
  662. case ENET_TCCR1:
  663. case ENET_TCSR2:
  664. case ENET_TCCR2:
  665. case ENET_TCSR3:
  666. case ENET_TCCR3:
  667. return s->regs[index];
  668. default:
  669. return imx_default_read(s, index);
  670. }
  671. }
  672. static uint64_t imx_eth_read(void *opaque, hwaddr offset, unsigned size)
  673. {
  674. uint32_t value = 0;
  675. IMXFECState *s = IMX_FEC(opaque);
  676. uint32_t index = offset >> 2;
  677. switch (index) {
  678. case ENET_EIR:
  679. case ENET_EIMR:
  680. case ENET_RDAR:
  681. case ENET_TDAR:
  682. case ENET_ECR:
  683. case ENET_MMFR:
  684. case ENET_MSCR:
  685. case ENET_MIBC:
  686. case ENET_RCR:
  687. case ENET_TCR:
  688. case ENET_PALR:
  689. case ENET_PAUR:
  690. case ENET_OPD:
  691. case ENET_IAUR:
  692. case ENET_IALR:
  693. case ENET_GAUR:
  694. case ENET_GALR:
  695. case ENET_TFWR:
  696. case ENET_RDSR:
  697. case ENET_TDSR:
  698. case ENET_MRBR:
  699. value = s->regs[index];
  700. break;
  701. default:
  702. if (s->is_fec) {
  703. value = imx_fec_read(s, index);
  704. } else {
  705. value = imx_enet_read(s, index);
  706. }
  707. break;
  708. }
  709. trace_imx_eth_read(index, imx_eth_reg_name(s, index), value);
  710. return value;
  711. }
  712. static void imx_default_write(IMXFECState *s, uint32_t index, uint32_t value)
  713. {
  714. qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad address at offset 0x%"
  715. PRIx32 "\n", TYPE_IMX_FEC, __func__, index * 4);
  716. return;
  717. }
  718. static void imx_fec_write(IMXFECState *s, uint32_t index, uint32_t value)
  719. {
  720. switch (index) {
  721. case ENET_FRBR:
  722. /* FRBR is read only */
  723. qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Register FRBR is read only\n",
  724. TYPE_IMX_FEC, __func__);
  725. break;
  726. case ENET_FRSR:
  727. s->regs[index] = (value & 0x000003fc) | 0x00000400;
  728. break;
  729. case ENET_MIIGSK_CFGR:
  730. s->regs[index] = value & 0x00000053;
  731. break;
  732. case ENET_MIIGSK_ENR:
  733. s->regs[index] = (value & 0x00000002) ? 0x00000006 : 0;
  734. break;
  735. default:
  736. imx_default_write(s, index, value);
  737. break;
  738. }
  739. }
  740. static void imx_enet_write(IMXFECState *s, uint32_t index, uint32_t value)
  741. {
  742. switch (index) {
  743. case ENET_RSFL:
  744. case ENET_RSEM:
  745. case ENET_RAEM:
  746. case ENET_RAFL:
  747. case ENET_TSEM:
  748. case ENET_TAEM:
  749. case ENET_TAFL:
  750. s->regs[index] = value & 0x000001ff;
  751. break;
  752. case ENET_TIPG:
  753. s->regs[index] = value & 0x0000001f;
  754. break;
  755. case ENET_FTRL:
  756. s->regs[index] = value & 0x00003fff;
  757. break;
  758. case ENET_TACC:
  759. s->regs[index] = value & 0x00000019;
  760. break;
  761. case ENET_RACC:
  762. s->regs[index] = value & 0x000000C7;
  763. break;
  764. case ENET_ATCR:
  765. s->regs[index] = value & 0x00002a9d;
  766. break;
  767. case ENET_ATVR:
  768. case ENET_ATOFF:
  769. case ENET_ATPER:
  770. s->regs[index] = value;
  771. break;
  772. case ENET_ATSTMP:
  773. /* ATSTMP is read only */
  774. qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Register ATSTMP is read only\n",
  775. TYPE_IMX_FEC, __func__);
  776. break;
  777. case ENET_ATCOR:
  778. s->regs[index] = value & 0x7fffffff;
  779. break;
  780. case ENET_ATINC:
  781. s->regs[index] = value & 0x00007f7f;
  782. break;
  783. case ENET_TGSR:
  784. /* implement clear timer flag */
  785. s->regs[index] &= ~(value & 0x0000000f); /* all bits W1C */
  786. break;
  787. case ENET_TCSR0:
  788. case ENET_TCSR1:
  789. case ENET_TCSR2:
  790. case ENET_TCSR3:
  791. s->regs[index] &= ~(value & 0x00000080); /* W1C bits */
  792. s->regs[index] &= ~0x0000007d; /* writable fields */
  793. s->regs[index] |= (value & 0x0000007d);
  794. break;
  795. case ENET_TCCR0:
  796. case ENET_TCCR1:
  797. case ENET_TCCR2:
  798. case ENET_TCCR3:
  799. s->regs[index] = value;
  800. break;
  801. default:
  802. imx_default_write(s, index, value);
  803. break;
  804. }
  805. }
  806. static void imx_eth_write(void *opaque, hwaddr offset, uint64_t value,
  807. unsigned size)
  808. {
  809. IMXFECState *s = IMX_FEC(opaque);
  810. const bool single_tx_ring = !imx_eth_is_multi_tx_ring(s);
  811. uint32_t index = offset >> 2;
  812. trace_imx_eth_write(index, imx_eth_reg_name(s, index), value);
  813. switch (index) {
  814. case ENET_EIR:
  815. s->regs[index] &= ~value;
  816. break;
  817. case ENET_EIMR:
  818. s->regs[index] = value;
  819. break;
  820. case ENET_RDAR:
  821. if (s->regs[ENET_ECR] & ENET_ECR_ETHEREN) {
  822. if (!s->regs[index]) {
  823. imx_eth_enable_rx(s, true);
  824. }
  825. } else {
  826. s->regs[index] = 0;
  827. }
  828. break;
  829. case ENET_TDAR1:
  830. case ENET_TDAR2:
  831. if (unlikely(single_tx_ring)) {
  832. qemu_log_mask(LOG_GUEST_ERROR,
  833. "[%s]%s: trying to access TDAR2 or TDAR1\n",
  834. TYPE_IMX_FEC, __func__);
  835. return;
  836. }
  837. /* fall through */
  838. case ENET_TDAR:
  839. if (s->regs[ENET_ECR] & ENET_ECR_ETHEREN) {
  840. s->regs[index] = ENET_TDAR_TDAR;
  841. imx_eth_do_tx(s, index);
  842. }
  843. s->regs[index] = 0;
  844. break;
  845. case ENET_ECR:
  846. if (value & ENET_ECR_RESET) {
  847. return imx_eth_reset(DEVICE(s));
  848. }
  849. s->regs[index] = value;
  850. if ((s->regs[index] & ENET_ECR_ETHEREN) == 0) {
  851. s->regs[ENET_RDAR] = 0;
  852. s->rx_descriptor = s->regs[ENET_RDSR];
  853. s->regs[ENET_TDAR] = 0;
  854. s->regs[ENET_TDAR1] = 0;
  855. s->regs[ENET_TDAR2] = 0;
  856. s->tx_descriptor[0] = s->regs[ENET_TDSR];
  857. s->tx_descriptor[1] = s->regs[ENET_TDSR1];
  858. s->tx_descriptor[2] = s->regs[ENET_TDSR2];
  859. }
  860. break;
  861. case ENET_MMFR:
  862. s->regs[index] = value;
  863. if (extract32(value, 29, 1)) {
  864. /* This is a read operation */
  865. s->regs[ENET_MMFR] = deposit32(s->regs[ENET_MMFR], 0, 16,
  866. imx_phy_read(s,
  867. extract32(value,
  868. 18, 10)));
  869. } else {
  870. /* This is a write operation */
  871. imx_phy_write(s, extract32(value, 18, 10), extract32(value, 0, 16));
  872. }
  873. /* raise the interrupt as the PHY operation is done */
  874. s->regs[ENET_EIR] |= ENET_INT_MII;
  875. break;
  876. case ENET_MSCR:
  877. s->regs[index] = value & 0xfe;
  878. break;
  879. case ENET_MIBC:
  880. /* TODO: Implement MIB. */
  881. s->regs[index] = (value & 0x80000000) ? 0xc0000000 : 0;
  882. break;
  883. case ENET_RCR:
  884. s->regs[index] = value & 0x07ff003f;
  885. /* TODO: Implement LOOP mode. */
  886. break;
  887. case ENET_TCR:
  888. /* We transmit immediately, so raise GRA immediately. */
  889. s->regs[index] = value;
  890. if (value & 1) {
  891. s->regs[ENET_EIR] |= ENET_INT_GRA;
  892. }
  893. break;
  894. case ENET_PALR:
  895. s->regs[index] = value;
  896. s->conf.macaddr.a[0] = value >> 24;
  897. s->conf.macaddr.a[1] = value >> 16;
  898. s->conf.macaddr.a[2] = value >> 8;
  899. s->conf.macaddr.a[3] = value;
  900. break;
  901. case ENET_PAUR:
  902. s->regs[index] = (value | 0x0000ffff) & 0xffff8808;
  903. s->conf.macaddr.a[4] = value >> 24;
  904. s->conf.macaddr.a[5] = value >> 16;
  905. break;
  906. case ENET_OPD:
  907. s->regs[index] = (value & 0x0000ffff) | 0x00010000;
  908. break;
  909. case ENET_IAUR:
  910. case ENET_IALR:
  911. case ENET_GAUR:
  912. case ENET_GALR:
  913. /* TODO: implement MAC hash filtering. */
  914. break;
  915. case ENET_TFWR:
  916. if (s->is_fec) {
  917. s->regs[index] = value & 0x3;
  918. } else {
  919. s->regs[index] = value & 0x13f;
  920. }
  921. break;
  922. case ENET_RDSR:
  923. if (s->is_fec) {
  924. s->regs[index] = value & ~3;
  925. } else {
  926. s->regs[index] = value & ~7;
  927. }
  928. s->rx_descriptor = s->regs[index];
  929. break;
  930. case ENET_TDSR:
  931. if (s->is_fec) {
  932. s->regs[index] = value & ~3;
  933. } else {
  934. s->regs[index] = value & ~7;
  935. }
  936. s->tx_descriptor[0] = s->regs[index];
  937. break;
  938. case ENET_TDSR1:
  939. if (unlikely(single_tx_ring)) {
  940. qemu_log_mask(LOG_GUEST_ERROR,
  941. "[%s]%s: trying to access TDSR1\n",
  942. TYPE_IMX_FEC, __func__);
  943. return;
  944. }
  945. s->regs[index] = value & ~7;
  946. s->tx_descriptor[1] = s->regs[index];
  947. break;
  948. case ENET_TDSR2:
  949. if (unlikely(single_tx_ring)) {
  950. qemu_log_mask(LOG_GUEST_ERROR,
  951. "[%s]%s: trying to access TDSR2\n",
  952. TYPE_IMX_FEC, __func__);
  953. return;
  954. }
  955. s->regs[index] = value & ~7;
  956. s->tx_descriptor[2] = s->regs[index];
  957. break;
  958. case ENET_MRBR:
  959. s->regs[index] = value & 0x00003ff0;
  960. break;
  961. default:
  962. if (s->is_fec) {
  963. imx_fec_write(s, index, value);
  964. } else {
  965. imx_enet_write(s, index, value);
  966. }
  967. return;
  968. }
  969. imx_eth_update(s);
  970. }
  971. static bool imx_eth_can_receive(NetClientState *nc)
  972. {
  973. IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
  974. return !!s->regs[ENET_RDAR];
  975. }
  976. static ssize_t imx_fec_receive(NetClientState *nc, const uint8_t *buf,
  977. size_t len)
  978. {
  979. IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
  980. IMXFECBufDesc bd;
  981. uint32_t flags = 0;
  982. uint32_t addr;
  983. uint32_t crc;
  984. uint32_t buf_addr;
  985. uint8_t *crc_ptr;
  986. unsigned int buf_len;
  987. size_t size = len;
  988. trace_imx_fec_receive(size);
  989. if (!s->regs[ENET_RDAR]) {
  990. qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Unexpected packet\n",
  991. TYPE_IMX_FEC, __func__);
  992. return 0;
  993. }
  994. crc = cpu_to_be32(crc32(~0, buf, size));
  995. /* Increase size by 4, loop below reads the last 4 bytes from crc_ptr. */
  996. size += 4;
  997. crc_ptr = (uint8_t *) &crc;
  998. /* Huge frames are truncated. */
  999. if (size > ENET_MAX_FRAME_SIZE) {
  1000. size = ENET_MAX_FRAME_SIZE;
  1001. flags |= ENET_BD_TR | ENET_BD_LG;
  1002. }
  1003. /* Frames larger than the user limit just set error flags. */
  1004. if (size > (s->regs[ENET_RCR] >> 16)) {
  1005. flags |= ENET_BD_LG;
  1006. }
  1007. addr = s->rx_descriptor;
  1008. while (size > 0) {
  1009. imx_fec_read_bd(&bd, addr);
  1010. if ((bd.flags & ENET_BD_E) == 0) {
  1011. /* No descriptors available. Bail out. */
  1012. /*
  1013. * FIXME: This is wrong. We should probably either
  1014. * save the remainder for when more RX buffers are
  1015. * available, or flag an error.
  1016. */
  1017. qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Lost end of frame\n",
  1018. TYPE_IMX_FEC, __func__);
  1019. break;
  1020. }
  1021. buf_len = (size <= s->regs[ENET_MRBR]) ? size : s->regs[ENET_MRBR];
  1022. bd.length = buf_len;
  1023. size -= buf_len;
  1024. trace_imx_fec_receive_len(addr, bd.length);
  1025. /* The last 4 bytes are the CRC. */
  1026. if (size < 4) {
  1027. buf_len += size - 4;
  1028. }
  1029. buf_addr = bd.data;
  1030. dma_memory_write(&address_space_memory, buf_addr, buf, buf_len,
  1031. MEMTXATTRS_UNSPECIFIED);
  1032. buf += buf_len;
  1033. if (size < 4) {
  1034. dma_memory_write(&address_space_memory, buf_addr + buf_len,
  1035. crc_ptr, 4 - size, MEMTXATTRS_UNSPECIFIED);
  1036. crc_ptr += 4 - size;
  1037. }
  1038. bd.flags &= ~ENET_BD_E;
  1039. if (size == 0) {
  1040. /* Last buffer in frame. */
  1041. bd.flags |= flags | ENET_BD_L;
  1042. trace_imx_fec_receive_last(bd.flags);
  1043. s->regs[ENET_EIR] |= ENET_INT_RXF;
  1044. } else {
  1045. s->regs[ENET_EIR] |= ENET_INT_RXB;
  1046. }
  1047. imx_fec_write_bd(&bd, addr);
  1048. /* Advance to the next descriptor. */
  1049. if ((bd.flags & ENET_BD_W) != 0) {
  1050. addr = s->regs[ENET_RDSR];
  1051. } else {
  1052. addr += sizeof(bd);
  1053. }
  1054. }
  1055. s->rx_descriptor = addr;
  1056. imx_eth_enable_rx(s, false);
  1057. imx_eth_update(s);
  1058. return len;
  1059. }
  1060. static ssize_t imx_enet_receive(NetClientState *nc, const uint8_t *buf,
  1061. size_t len)
  1062. {
  1063. IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
  1064. IMXENETBufDesc bd;
  1065. uint32_t flags = 0;
  1066. uint32_t addr;
  1067. uint32_t crc;
  1068. uint32_t buf_addr;
  1069. uint8_t *crc_ptr;
  1070. unsigned int buf_len;
  1071. size_t size = len;
  1072. bool shift16 = s->regs[ENET_RACC] & ENET_RACC_SHIFT16;
  1073. trace_imx_enet_receive(size);
  1074. if (!s->regs[ENET_RDAR]) {
  1075. qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Unexpected packet\n",
  1076. TYPE_IMX_FEC, __func__);
  1077. return 0;
  1078. }
  1079. crc = cpu_to_be32(crc32(~0, buf, size));
  1080. /* Increase size by 4, loop below reads the last 4 bytes from crc_ptr. */
  1081. size += 4;
  1082. crc_ptr = (uint8_t *) &crc;
  1083. if (shift16) {
  1084. size += 2;
  1085. }
  1086. /* Huge frames are truncated. */
  1087. if (size > s->regs[ENET_FTRL]) {
  1088. size = s->regs[ENET_FTRL];
  1089. flags |= ENET_BD_TR | ENET_BD_LG;
  1090. }
  1091. /* Frames larger than the user limit just set error flags. */
  1092. if (size > (s->regs[ENET_RCR] >> 16)) {
  1093. flags |= ENET_BD_LG;
  1094. }
  1095. addr = s->rx_descriptor;
  1096. while (size > 0) {
  1097. imx_enet_read_bd(&bd, addr);
  1098. if ((bd.flags & ENET_BD_E) == 0) {
  1099. /* No descriptors available. Bail out. */
  1100. /*
  1101. * FIXME: This is wrong. We should probably either
  1102. * save the remainder for when more RX buffers are
  1103. * available, or flag an error.
  1104. */
  1105. qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Lost end of frame\n",
  1106. TYPE_IMX_FEC, __func__);
  1107. break;
  1108. }
  1109. buf_len = MIN(size, s->regs[ENET_MRBR]);
  1110. bd.length = buf_len;
  1111. size -= buf_len;
  1112. trace_imx_enet_receive_len(addr, bd.length);
  1113. /* The last 4 bytes are the CRC. */
  1114. if (size < 4) {
  1115. buf_len += size - 4;
  1116. }
  1117. buf_addr = bd.data;
  1118. if (shift16) {
  1119. /*
  1120. * If SHIFT16 bit of ENETx_RACC register is set we need to
  1121. * align the payload to 4-byte boundary.
  1122. */
  1123. const uint8_t zeros[2] = { 0 };
  1124. dma_memory_write(&address_space_memory, buf_addr, zeros,
  1125. sizeof(zeros), MEMTXATTRS_UNSPECIFIED);
  1126. buf_addr += sizeof(zeros);
  1127. buf_len -= sizeof(zeros);
  1128. /* We only do this once per Ethernet frame */
  1129. shift16 = false;
  1130. }
  1131. dma_memory_write(&address_space_memory, buf_addr, buf, buf_len,
  1132. MEMTXATTRS_UNSPECIFIED);
  1133. buf += buf_len;
  1134. if (size < 4) {
  1135. dma_memory_write(&address_space_memory, buf_addr + buf_len,
  1136. crc_ptr, 4 - size, MEMTXATTRS_UNSPECIFIED);
  1137. crc_ptr += 4 - size;
  1138. }
  1139. bd.flags &= ~ENET_BD_E;
  1140. if (size == 0) {
  1141. /* Last buffer in frame. */
  1142. bd.flags |= flags | ENET_BD_L;
  1143. trace_imx_enet_receive_last(bd.flags);
  1144. /* Indicate that we've updated the last buffer descriptor. */
  1145. bd.last_buffer = ENET_BD_BDU;
  1146. if (bd.option & ENET_BD_RX_INT) {
  1147. s->regs[ENET_EIR] |= ENET_INT_RXF;
  1148. }
  1149. } else {
  1150. if (bd.option & ENET_BD_RX_INT) {
  1151. s->regs[ENET_EIR] |= ENET_INT_RXB;
  1152. }
  1153. }
  1154. imx_enet_write_bd(&bd, addr);
  1155. /* Advance to the next descriptor. */
  1156. if ((bd.flags & ENET_BD_W) != 0) {
  1157. addr = s->regs[ENET_RDSR];
  1158. } else {
  1159. addr += sizeof(bd);
  1160. }
  1161. }
  1162. s->rx_descriptor = addr;
  1163. imx_eth_enable_rx(s, false);
  1164. imx_eth_update(s);
  1165. return len;
  1166. }
  1167. static ssize_t imx_eth_receive(NetClientState *nc, const uint8_t *buf,
  1168. size_t len)
  1169. {
  1170. IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
  1171. if (!s->is_fec && (s->regs[ENET_ECR] & ENET_ECR_EN1588)) {
  1172. return imx_enet_receive(nc, buf, len);
  1173. } else {
  1174. return imx_fec_receive(nc, buf, len);
  1175. }
  1176. }
  1177. static const MemoryRegionOps imx_eth_ops = {
  1178. .read = imx_eth_read,
  1179. .write = imx_eth_write,
  1180. .valid.min_access_size = 4,
  1181. .valid.max_access_size = 4,
  1182. .endianness = DEVICE_NATIVE_ENDIAN,
  1183. };
  1184. static void imx_eth_cleanup(NetClientState *nc)
  1185. {
  1186. IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
  1187. s->nic = NULL;
  1188. }
  1189. static NetClientInfo imx_eth_net_info = {
  1190. .type = NET_CLIENT_DRIVER_NIC,
  1191. .size = sizeof(NICState),
  1192. .can_receive = imx_eth_can_receive,
  1193. .receive = imx_eth_receive,
  1194. .cleanup = imx_eth_cleanup,
  1195. .link_status_changed = imx_eth_set_link,
  1196. };
  1197. static void imx_eth_realize(DeviceState *dev, Error **errp)
  1198. {
  1199. IMXFECState *s = IMX_FEC(dev);
  1200. SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
  1201. memory_region_init_io(&s->iomem, OBJECT(dev), &imx_eth_ops, s,
  1202. TYPE_IMX_FEC, FSL_IMX25_FEC_SIZE);
  1203. sysbus_init_mmio(sbd, &s->iomem);
  1204. sysbus_init_irq(sbd, &s->irq[0]);
  1205. sysbus_init_irq(sbd, &s->irq[1]);
  1206. qemu_macaddr_default_if_unset(&s->conf.macaddr);
  1207. s->nic = qemu_new_nic(&imx_eth_net_info, &s->conf,
  1208. object_get_typename(OBJECT(dev)),
  1209. dev->id, &dev->mem_reentrancy_guard, s);
  1210. qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a);
  1211. }
  1212. static Property imx_eth_properties[] = {
  1213. DEFINE_NIC_PROPERTIES(IMXFECState, conf),
  1214. DEFINE_PROP_UINT32("tx-ring-num", IMXFECState, tx_ring_num, 1),
  1215. DEFINE_PROP_UINT32("phy-num", IMXFECState, phy_num, 0),
  1216. DEFINE_PROP_BOOL("phy-connected", IMXFECState, phy_connected, true),
  1217. DEFINE_PROP_LINK("phy-consumer", IMXFECState, phy_consumer, TYPE_IMX_FEC,
  1218. IMXFECState *),
  1219. DEFINE_PROP_END_OF_LIST(),
  1220. };
  1221. static void imx_eth_class_init(ObjectClass *klass, void *data)
  1222. {
  1223. DeviceClass *dc = DEVICE_CLASS(klass);
  1224. dc->vmsd = &vmstate_imx_eth;
  1225. dc->reset = imx_eth_reset;
  1226. device_class_set_props(dc, imx_eth_properties);
  1227. dc->realize = imx_eth_realize;
  1228. dc->desc = "i.MX FEC/ENET Ethernet Controller";
  1229. }
  1230. static void imx_fec_init(Object *obj)
  1231. {
  1232. IMXFECState *s = IMX_FEC(obj);
  1233. s->is_fec = true;
  1234. }
  1235. static void imx_enet_init(Object *obj)
  1236. {
  1237. IMXFECState *s = IMX_FEC(obj);
  1238. s->is_fec = false;
  1239. }
  1240. static const TypeInfo imx_fec_info = {
  1241. .name = TYPE_IMX_FEC,
  1242. .parent = TYPE_SYS_BUS_DEVICE,
  1243. .instance_size = sizeof(IMXFECState),
  1244. .instance_init = imx_fec_init,
  1245. .class_init = imx_eth_class_init,
  1246. };
  1247. static const TypeInfo imx_enet_info = {
  1248. .name = TYPE_IMX_ENET,
  1249. .parent = TYPE_IMX_FEC,
  1250. .instance_init = imx_enet_init,
  1251. };
  1252. static void imx_eth_register_types(void)
  1253. {
  1254. type_register_static(&imx_fec_info);
  1255. type_register_static(&imx_enet_info);
  1256. }
  1257. type_init(imx_eth_register_types)