2
0

igb_core.c 132 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551
  1. /*
  2. * Core code for QEMU igb emulation
  3. *
  4. * Datasheet:
  5. * https://www.intel.com/content/dam/www/public/us/en/documents/datasheets/82576eg-gbe-datasheet.pdf
  6. *
  7. * Copyright (c) 2020-2023 Red Hat, Inc.
  8. * Copyright (c) 2015 Ravello Systems LTD (http://ravellosystems.com)
  9. * Developed by Daynix Computing LTD (http://www.daynix.com)
  10. *
  11. * Authors:
  12. * Akihiko Odaki <akihiko.odaki@daynix.com>
  13. * Gal Hammmer <gal.hammer@sap.com>
  14. * Marcel Apfelbaum <marcel.apfelbaum@gmail.com>
  15. * Dmitry Fleytman <dmitry@daynix.com>
  16. * Leonid Bloch <leonid@daynix.com>
  17. * Yan Vugenfirer <yan@daynix.com>
  18. *
  19. * Based on work done by:
  20. * Nir Peleg, Tutis Systems Ltd. for Qumranet Inc.
  21. * Copyright (c) 2008 Qumranet
  22. * Based on work done by:
  23. * Copyright (c) 2007 Dan Aloni
  24. * Copyright (c) 2004 Antony T Curtis
  25. *
  26. * This library is free software; you can redistribute it and/or
  27. * modify it under the terms of the GNU Lesser General Public
  28. * License as published by the Free Software Foundation; either
  29. * version 2.1 of the License, or (at your option) any later version.
  30. *
  31. * This library is distributed in the hope that it will be useful,
  32. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  33. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  34. * Lesser General Public License for more details.
  35. *
  36. * You should have received a copy of the GNU Lesser General Public
  37. * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  38. */
  39. #include "qemu/osdep.h"
  40. #include "qemu/log.h"
  41. #include "net/net.h"
  42. #include "net/tap.h"
  43. #include "hw/net/mii.h"
  44. #include "hw/pci/msi.h"
  45. #include "hw/pci/msix.h"
  46. #include "system/runstate.h"
  47. #include "net_tx_pkt.h"
  48. #include "net_rx_pkt.h"
  49. #include "igb_common.h"
  50. #include "e1000x_common.h"
  51. #include "igb_core.h"
  52. #include "trace.h"
  53. #define E1000E_MAX_TX_FRAGS (64)
  54. union e1000_rx_desc_union {
  55. struct e1000_rx_desc legacy;
  56. union e1000_adv_rx_desc adv;
  57. };
  58. typedef struct IGBTxPktVmdqCallbackContext {
  59. IGBCore *core;
  60. NetClientState *nc;
  61. } IGBTxPktVmdqCallbackContext;
  62. typedef struct L2Header {
  63. struct eth_header eth;
  64. struct vlan_header vlan[2];
  65. } L2Header;
  66. typedef struct PTP2 {
  67. uint8_t message_id_transport_specific;
  68. uint8_t version_ptp;
  69. uint16_t message_length;
  70. uint8_t subdomain_number;
  71. uint8_t reserved0;
  72. uint16_t flags;
  73. uint64_t correction;
  74. uint8_t reserved1[5];
  75. uint8_t source_communication_technology;
  76. uint32_t source_uuid_lo;
  77. uint16_t source_uuid_hi;
  78. uint16_t source_port_id;
  79. uint16_t sequence_id;
  80. uint8_t control;
  81. uint8_t log_message_period;
  82. } PTP2;
  83. static ssize_t
  84. igb_receive_internal(IGBCore *core, const struct iovec *iov, int iovcnt,
  85. bool has_vnet, bool *external_tx);
  86. static void igb_raise_interrupts(IGBCore *core, size_t index, uint32_t causes);
  87. static void igb_reset(IGBCore *core, bool sw);
  88. static inline void
  89. igb_raise_legacy_irq(IGBCore *core)
  90. {
  91. trace_e1000e_irq_legacy_notify(true);
  92. e1000x_inc_reg_if_not_full(core->mac, IAC);
  93. pci_set_irq(core->owner, 1);
  94. }
  95. static inline void
  96. igb_lower_legacy_irq(IGBCore *core)
  97. {
  98. trace_e1000e_irq_legacy_notify(false);
  99. pci_set_irq(core->owner, 0);
  100. }
  101. static void igb_msix_notify(IGBCore *core, unsigned int cause)
  102. {
  103. PCIDevice *dev = core->owner;
  104. uint16_t vfn;
  105. uint32_t effective_eiac;
  106. unsigned int vector;
  107. vfn = 8 - (cause + 2) / IGBVF_MSIX_VEC_NUM;
  108. if (vfn < pcie_sriov_num_vfs(core->owner)) {
  109. dev = pcie_sriov_get_vf_at_index(core->owner, vfn);
  110. assert(dev);
  111. vector = (cause + 2) % IGBVF_MSIX_VEC_NUM;
  112. } else if (cause >= IGB_MSIX_VEC_NUM) {
  113. qemu_log_mask(LOG_GUEST_ERROR,
  114. "igb: Tried to use vector unavailable for PF");
  115. return;
  116. } else {
  117. vector = cause;
  118. }
  119. msix_notify(dev, vector);
  120. trace_e1000e_irq_icr_clear_eiac(core->mac[EICR], core->mac[EIAC]);
  121. effective_eiac = core->mac[EIAC] & BIT(cause);
  122. core->mac[EICR] &= ~effective_eiac;
  123. }
  124. static inline void
  125. igb_intrmgr_rearm_timer(IGBIntrDelayTimer *timer)
  126. {
  127. int64_t delay_ns = (int64_t) timer->core->mac[timer->delay_reg] *
  128. timer->delay_resolution_ns;
  129. trace_e1000e_irq_rearm_timer(timer->delay_reg << 2, delay_ns);
  130. timer_mod(timer->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + delay_ns);
  131. timer->running = true;
  132. }
  133. static void
  134. igb_intmgr_timer_resume(IGBIntrDelayTimer *timer)
  135. {
  136. if (timer->running) {
  137. igb_intrmgr_rearm_timer(timer);
  138. }
  139. }
  140. static void
  141. igb_intrmgr_on_msix_throttling_timer(void *opaque)
  142. {
  143. IGBIntrDelayTimer *timer = opaque;
  144. int idx = timer - &timer->core->eitr[0];
  145. timer->running = false;
  146. trace_e1000e_irq_msix_notify_postponed_vec(idx);
  147. igb_msix_notify(timer->core, idx);
  148. }
  149. static void
  150. igb_intrmgr_initialize_all_timers(IGBCore *core, bool create)
  151. {
  152. int i;
  153. for (i = 0; i < IGB_INTR_NUM; i++) {
  154. core->eitr[i].core = core;
  155. core->eitr[i].delay_reg = EITR0 + i;
  156. core->eitr[i].delay_resolution_ns = E1000_INTR_DELAY_NS_RES;
  157. }
  158. if (!create) {
  159. return;
  160. }
  161. for (i = 0; i < IGB_INTR_NUM; i++) {
  162. core->eitr[i].timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
  163. igb_intrmgr_on_msix_throttling_timer,
  164. &core->eitr[i]);
  165. }
  166. }
  167. static void
  168. igb_intrmgr_resume(IGBCore *core)
  169. {
  170. int i;
  171. for (i = 0; i < IGB_INTR_NUM; i++) {
  172. igb_intmgr_timer_resume(&core->eitr[i]);
  173. }
  174. }
  175. static void
  176. igb_intrmgr_reset(IGBCore *core)
  177. {
  178. int i;
  179. for (i = 0; i < IGB_INTR_NUM; i++) {
  180. if (core->eitr[i].running) {
  181. timer_del(core->eitr[i].timer);
  182. igb_intrmgr_on_msix_throttling_timer(&core->eitr[i]);
  183. }
  184. }
  185. }
  186. static void
  187. igb_intrmgr_pci_unint(IGBCore *core)
  188. {
  189. int i;
  190. for (i = 0; i < IGB_INTR_NUM; i++) {
  191. timer_free(core->eitr[i].timer);
  192. }
  193. }
  194. static void
  195. igb_intrmgr_pci_realize(IGBCore *core)
  196. {
  197. igb_intrmgr_initialize_all_timers(core, true);
  198. }
  199. static inline bool
  200. igb_rx_csum_enabled(IGBCore *core)
  201. {
  202. return (core->mac[RXCSUM] & E1000_RXCSUM_PCSD) ? false : true;
  203. }
  204. static inline bool
  205. igb_rx_use_legacy_descriptor(IGBCore *core)
  206. {
  207. /*
  208. * TODO: If SRRCTL[n],DESCTYPE = 000b, the 82576 uses the legacy Rx
  209. * descriptor.
  210. */
  211. return false;
  212. }
  213. typedef struct E1000ERingInfo {
  214. int dbah;
  215. int dbal;
  216. int dlen;
  217. int dh;
  218. int dt;
  219. int idx;
  220. } E1000ERingInfo;
  221. static uint32_t
  222. igb_rx_queue_desctyp_get(IGBCore *core, const E1000ERingInfo *r)
  223. {
  224. return core->mac[E1000_SRRCTL(r->idx) >> 2] & E1000_SRRCTL_DESCTYPE_MASK;
  225. }
  226. static bool
  227. igb_rx_use_ps_descriptor(IGBCore *core, const E1000ERingInfo *r)
  228. {
  229. uint32_t desctyp = igb_rx_queue_desctyp_get(core, r);
  230. return desctyp == E1000_SRRCTL_DESCTYPE_HDR_SPLIT ||
  231. desctyp == E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
  232. }
  233. static inline bool
  234. igb_rss_enabled(IGBCore *core)
  235. {
  236. return (core->mac[MRQC] & 3) == E1000_MRQC_ENABLE_RSS_MQ &&
  237. !igb_rx_csum_enabled(core) &&
  238. !igb_rx_use_legacy_descriptor(core);
  239. }
  240. typedef struct E1000E_RSSInfo_st {
  241. bool enabled;
  242. uint32_t hash;
  243. uint32_t queue;
  244. uint32_t type;
  245. } E1000E_RSSInfo;
  246. static uint32_t
  247. igb_rss_get_hash_type(IGBCore *core, struct NetRxPkt *pkt)
  248. {
  249. bool hasip4, hasip6;
  250. EthL4HdrProto l4hdr_proto;
  251. assert(igb_rss_enabled(core));
  252. net_rx_pkt_get_protocols(pkt, &hasip4, &hasip6, &l4hdr_proto);
  253. if (hasip4) {
  254. trace_e1000e_rx_rss_ip4(l4hdr_proto, core->mac[MRQC],
  255. E1000_MRQC_EN_TCPIPV4(core->mac[MRQC]),
  256. E1000_MRQC_EN_IPV4(core->mac[MRQC]));
  257. if (l4hdr_proto == ETH_L4_HDR_PROTO_TCP &&
  258. E1000_MRQC_EN_TCPIPV4(core->mac[MRQC])) {
  259. return E1000_MRQ_RSS_TYPE_IPV4TCP;
  260. }
  261. if (l4hdr_proto == ETH_L4_HDR_PROTO_UDP &&
  262. (core->mac[MRQC] & E1000_MRQC_RSS_FIELD_IPV4_UDP)) {
  263. return E1000_MRQ_RSS_TYPE_IPV4UDP;
  264. }
  265. if (E1000_MRQC_EN_IPV4(core->mac[MRQC])) {
  266. return E1000_MRQ_RSS_TYPE_IPV4;
  267. }
  268. } else if (hasip6) {
  269. eth_ip6_hdr_info *ip6info = net_rx_pkt_get_ip6_info(pkt);
  270. bool ex_dis = core->mac[RFCTL] & E1000_RFCTL_IPV6_EX_DIS;
  271. bool new_ex_dis = core->mac[RFCTL] & E1000_RFCTL_NEW_IPV6_EXT_DIS;
  272. /*
  273. * Following two traces must not be combined because resulting
  274. * event will have 11 arguments totally and some trace backends
  275. * (at least "ust") have limitation of maximum 10 arguments per
  276. * event. Events with more arguments fail to compile for
  277. * backends like these.
  278. */
  279. trace_e1000e_rx_rss_ip6_rfctl(core->mac[RFCTL]);
  280. trace_e1000e_rx_rss_ip6(ex_dis, new_ex_dis, l4hdr_proto,
  281. ip6info->has_ext_hdrs,
  282. ip6info->rss_ex_dst_valid,
  283. ip6info->rss_ex_src_valid,
  284. core->mac[MRQC],
  285. E1000_MRQC_EN_TCPIPV6EX(core->mac[MRQC]),
  286. E1000_MRQC_EN_IPV6EX(core->mac[MRQC]),
  287. E1000_MRQC_EN_IPV6(core->mac[MRQC]));
  288. if ((!ex_dis || !ip6info->has_ext_hdrs) &&
  289. (!new_ex_dis || !(ip6info->rss_ex_dst_valid ||
  290. ip6info->rss_ex_src_valid))) {
  291. if (l4hdr_proto == ETH_L4_HDR_PROTO_TCP &&
  292. E1000_MRQC_EN_TCPIPV6EX(core->mac[MRQC])) {
  293. return E1000_MRQ_RSS_TYPE_IPV6TCPEX;
  294. }
  295. if (l4hdr_proto == ETH_L4_HDR_PROTO_UDP &&
  296. (core->mac[MRQC] & E1000_MRQC_RSS_FIELD_IPV6_UDP)) {
  297. return E1000_MRQ_RSS_TYPE_IPV6UDP;
  298. }
  299. if (E1000_MRQC_EN_IPV6EX(core->mac[MRQC])) {
  300. return E1000_MRQ_RSS_TYPE_IPV6EX;
  301. }
  302. }
  303. if (E1000_MRQC_EN_IPV6(core->mac[MRQC])) {
  304. return E1000_MRQ_RSS_TYPE_IPV6;
  305. }
  306. }
  307. return E1000_MRQ_RSS_TYPE_NONE;
  308. }
  309. static uint32_t
  310. igb_rss_calc_hash(IGBCore *core, struct NetRxPkt *pkt, E1000E_RSSInfo *info)
  311. {
  312. NetRxPktRssType type;
  313. assert(igb_rss_enabled(core));
  314. switch (info->type) {
  315. case E1000_MRQ_RSS_TYPE_IPV4:
  316. type = NetPktRssIpV4;
  317. break;
  318. case E1000_MRQ_RSS_TYPE_IPV4TCP:
  319. type = NetPktRssIpV4Tcp;
  320. break;
  321. case E1000_MRQ_RSS_TYPE_IPV6TCPEX:
  322. type = NetPktRssIpV6TcpEx;
  323. break;
  324. case E1000_MRQ_RSS_TYPE_IPV6:
  325. type = NetPktRssIpV6;
  326. break;
  327. case E1000_MRQ_RSS_TYPE_IPV6EX:
  328. type = NetPktRssIpV6Ex;
  329. break;
  330. case E1000_MRQ_RSS_TYPE_IPV4UDP:
  331. type = NetPktRssIpV4Udp;
  332. break;
  333. case E1000_MRQ_RSS_TYPE_IPV6UDP:
  334. type = NetPktRssIpV6Udp;
  335. break;
  336. default:
  337. g_assert_not_reached();
  338. }
  339. return net_rx_pkt_calc_rss_hash(pkt, type, (uint8_t *) &core->mac[RSSRK]);
  340. }
  341. static void
  342. igb_rss_parse_packet(IGBCore *core, struct NetRxPkt *pkt, bool tx,
  343. E1000E_RSSInfo *info)
  344. {
  345. trace_e1000e_rx_rss_started();
  346. if (tx || !igb_rss_enabled(core)) {
  347. info->enabled = false;
  348. info->hash = 0;
  349. info->queue = 0;
  350. info->type = 0;
  351. trace_e1000e_rx_rss_disabled();
  352. return;
  353. }
  354. info->enabled = true;
  355. info->type = igb_rss_get_hash_type(core, pkt);
  356. trace_e1000e_rx_rss_type(info->type);
  357. if (info->type == E1000_MRQ_RSS_TYPE_NONE) {
  358. info->hash = 0;
  359. info->queue = 0;
  360. return;
  361. }
  362. info->hash = igb_rss_calc_hash(core, pkt, info);
  363. info->queue = E1000_RSS_QUEUE(&core->mac[RETA], info->hash);
  364. }
  365. static void
  366. igb_tx_insert_vlan(IGBCore *core, uint16_t qn, struct igb_tx *tx,
  367. uint16_t vlan, bool insert_vlan)
  368. {
  369. if (core->mac[MRQC] & 1) {
  370. uint16_t pool = qn % IGB_NUM_VM_POOLS;
  371. if (core->mac[VMVIR0 + pool] & E1000_VMVIR_VLANA_DEFAULT) {
  372. /* always insert default VLAN */
  373. insert_vlan = true;
  374. vlan = core->mac[VMVIR0 + pool] & 0xffff;
  375. } else if (core->mac[VMVIR0 + pool] & E1000_VMVIR_VLANA_NEVER) {
  376. insert_vlan = false;
  377. }
  378. }
  379. if (insert_vlan) {
  380. net_tx_pkt_setup_vlan_header_ex(tx->tx_pkt, vlan,
  381. core->mac[VET] & 0xffff);
  382. }
  383. }
  384. static bool
  385. igb_setup_tx_offloads(IGBCore *core, struct igb_tx *tx)
  386. {
  387. uint32_t idx = (tx->first_olinfo_status >> 4) & 1;
  388. if (tx->first_cmd_type_len & E1000_ADVTXD_DCMD_TSE) {
  389. uint32_t mss = tx->ctx[idx].mss_l4len_idx >> E1000_ADVTXD_MSS_SHIFT;
  390. if (!net_tx_pkt_build_vheader(tx->tx_pkt, true, true, mss)) {
  391. return false;
  392. }
  393. net_tx_pkt_update_ip_checksums(tx->tx_pkt);
  394. e1000x_inc_reg_if_not_full(core->mac, TSCTC);
  395. return true;
  396. }
  397. if ((tx->first_olinfo_status & E1000_ADVTXD_POTS_TXSM) &&
  398. !((tx->ctx[idx].type_tucmd_mlhl & E1000_ADVTXD_TUCMD_L4T_SCTP) ?
  399. net_tx_pkt_update_sctp_checksum(tx->tx_pkt) :
  400. net_tx_pkt_build_vheader(tx->tx_pkt, false, true, 0))) {
  401. return false;
  402. }
  403. if (tx->first_olinfo_status & E1000_ADVTXD_POTS_IXSM) {
  404. net_tx_pkt_update_ip_hdr_checksum(tx->tx_pkt);
  405. }
  406. return true;
  407. }
  408. static void igb_tx_pkt_mac_callback(void *core,
  409. const struct iovec *iov,
  410. int iovcnt,
  411. const struct iovec *virt_iov,
  412. int virt_iovcnt)
  413. {
  414. igb_receive_internal(core, virt_iov, virt_iovcnt, true, NULL);
  415. }
  416. static void igb_tx_pkt_vmdq_callback(void *opaque,
  417. const struct iovec *iov,
  418. int iovcnt,
  419. const struct iovec *virt_iov,
  420. int virt_iovcnt)
  421. {
  422. IGBTxPktVmdqCallbackContext *context = opaque;
  423. bool external_tx;
  424. igb_receive_internal(context->core, virt_iov, virt_iovcnt, true,
  425. &external_tx);
  426. if (external_tx) {
  427. if (context->core->has_vnet) {
  428. qemu_sendv_packet(context->nc, virt_iov, virt_iovcnt);
  429. } else {
  430. qemu_sendv_packet(context->nc, iov, iovcnt);
  431. }
  432. }
  433. }
  434. /* TX Packets Switching (7.10.3.6) */
  435. static bool igb_tx_pkt_switch(IGBCore *core, struct igb_tx *tx,
  436. NetClientState *nc)
  437. {
  438. IGBTxPktVmdqCallbackContext context;
  439. /* TX switching is only used to serve VM to VM traffic. */
  440. if (!(core->mac[MRQC] & 1)) {
  441. goto send_out;
  442. }
  443. /* TX switching requires DTXSWC.Loopback_en bit enabled. */
  444. if (!(core->mac[DTXSWC] & E1000_DTXSWC_VMDQ_LOOPBACK_EN)) {
  445. goto send_out;
  446. }
  447. context.core = core;
  448. context.nc = nc;
  449. return net_tx_pkt_send_custom(tx->tx_pkt, false,
  450. igb_tx_pkt_vmdq_callback, &context);
  451. send_out:
  452. return net_tx_pkt_send(tx->tx_pkt, nc);
  453. }
  454. static bool
  455. igb_tx_pkt_send(IGBCore *core, struct igb_tx *tx, int queue_index)
  456. {
  457. int target_queue = MIN(core->max_queue_num, queue_index);
  458. NetClientState *queue = qemu_get_subqueue(core->owner_nic, target_queue);
  459. if (!igb_setup_tx_offloads(core, tx)) {
  460. return false;
  461. }
  462. net_tx_pkt_dump(tx->tx_pkt);
  463. if ((core->phy[MII_BMCR] & MII_BMCR_LOOPBACK) ||
  464. ((core->mac[RCTL] & E1000_RCTL_LBM_MAC) == E1000_RCTL_LBM_MAC)) {
  465. return net_tx_pkt_send_custom(tx->tx_pkt, false,
  466. igb_tx_pkt_mac_callback, core);
  467. } else {
  468. return igb_tx_pkt_switch(core, tx, queue);
  469. }
  470. }
  471. static void
  472. igb_on_tx_done_update_stats(IGBCore *core, struct NetTxPkt *tx_pkt, int qn)
  473. {
  474. static const int PTCregs[6] = { PTC64, PTC127, PTC255, PTC511,
  475. PTC1023, PTC1522 };
  476. size_t tot_len = net_tx_pkt_get_total_len(tx_pkt) + 4;
  477. e1000x_increase_size_stats(core->mac, PTCregs, tot_len);
  478. e1000x_inc_reg_if_not_full(core->mac, TPT);
  479. e1000x_grow_8reg_if_not_full(core->mac, TOTL, tot_len);
  480. switch (net_tx_pkt_get_packet_type(tx_pkt)) {
  481. case ETH_PKT_BCAST:
  482. e1000x_inc_reg_if_not_full(core->mac, BPTC);
  483. break;
  484. case ETH_PKT_MCAST:
  485. e1000x_inc_reg_if_not_full(core->mac, MPTC);
  486. break;
  487. case ETH_PKT_UCAST:
  488. break;
  489. default:
  490. g_assert_not_reached();
  491. }
  492. e1000x_inc_reg_if_not_full(core->mac, GPTC);
  493. e1000x_grow_8reg_if_not_full(core->mac, GOTCL, tot_len);
  494. if (core->mac[MRQC] & 1) {
  495. uint16_t pool = qn % IGB_NUM_VM_POOLS;
  496. core->mac[PVFGOTC0 + (pool * 64)] += tot_len;
  497. core->mac[PVFGPTC0 + (pool * 64)]++;
  498. }
  499. }
  500. static void
  501. igb_process_tx_desc(IGBCore *core,
  502. PCIDevice *dev,
  503. struct igb_tx *tx,
  504. union e1000_adv_tx_desc *tx_desc,
  505. int queue_index)
  506. {
  507. struct e1000_adv_tx_context_desc *tx_ctx_desc;
  508. uint32_t cmd_type_len;
  509. uint32_t idx;
  510. uint64_t buffer_addr;
  511. uint16_t length;
  512. cmd_type_len = le32_to_cpu(tx_desc->read.cmd_type_len);
  513. if (cmd_type_len & E1000_ADVTXD_DCMD_DEXT) {
  514. if ((cmd_type_len & E1000_ADVTXD_DTYP_DATA) ==
  515. E1000_ADVTXD_DTYP_DATA) {
  516. /* advanced transmit data descriptor */
  517. if (tx->first) {
  518. tx->first_cmd_type_len = cmd_type_len;
  519. tx->first_olinfo_status = le32_to_cpu(tx_desc->read.olinfo_status);
  520. tx->first = false;
  521. }
  522. } else if ((cmd_type_len & E1000_ADVTXD_DTYP_CTXT) ==
  523. E1000_ADVTXD_DTYP_CTXT) {
  524. /* advanced transmit context descriptor */
  525. tx_ctx_desc = (struct e1000_adv_tx_context_desc *)tx_desc;
  526. idx = (le32_to_cpu(tx_ctx_desc->mss_l4len_idx) >> 4) & 1;
  527. tx->ctx[idx].vlan_macip_lens = le32_to_cpu(tx_ctx_desc->vlan_macip_lens);
  528. tx->ctx[idx].seqnum_seed = le32_to_cpu(tx_ctx_desc->seqnum_seed);
  529. tx->ctx[idx].type_tucmd_mlhl = le32_to_cpu(tx_ctx_desc->type_tucmd_mlhl);
  530. tx->ctx[idx].mss_l4len_idx = le32_to_cpu(tx_ctx_desc->mss_l4len_idx);
  531. return;
  532. } else {
  533. /* unknown descriptor type */
  534. return;
  535. }
  536. } else {
  537. /* legacy descriptor */
  538. /* TODO: Implement a support for legacy descriptors (7.2.2.1). */
  539. }
  540. buffer_addr = le64_to_cpu(tx_desc->read.buffer_addr);
  541. length = cmd_type_len & 0xFFFF;
  542. if (!tx->skip_cp) {
  543. if (!net_tx_pkt_add_raw_fragment_pci(tx->tx_pkt, dev,
  544. buffer_addr, length)) {
  545. tx->skip_cp = true;
  546. }
  547. }
  548. if (cmd_type_len & E1000_TXD_CMD_EOP) {
  549. if (!tx->skip_cp && net_tx_pkt_parse(tx->tx_pkt)) {
  550. idx = (tx->first_olinfo_status >> 4) & 1;
  551. igb_tx_insert_vlan(core, queue_index, tx,
  552. tx->ctx[idx].vlan_macip_lens >> IGB_TX_FLAGS_VLAN_SHIFT,
  553. !!(tx->first_cmd_type_len & E1000_TXD_CMD_VLE));
  554. if ((tx->first_cmd_type_len & E1000_ADVTXD_MAC_TSTAMP) &&
  555. (core->mac[TSYNCTXCTL] & E1000_TSYNCTXCTL_ENABLED) &&
  556. !(core->mac[TSYNCTXCTL] & E1000_TSYNCTXCTL_VALID)) {
  557. core->mac[TSYNCTXCTL] |= E1000_TSYNCTXCTL_VALID;
  558. e1000x_timestamp(core->mac, core->timadj, TXSTMPL, TXSTMPH);
  559. }
  560. if (igb_tx_pkt_send(core, tx, queue_index)) {
  561. igb_on_tx_done_update_stats(core, tx->tx_pkt, queue_index);
  562. }
  563. }
  564. tx->first = true;
  565. tx->skip_cp = false;
  566. net_tx_pkt_reset(tx->tx_pkt, net_tx_pkt_unmap_frag_pci, dev);
  567. }
  568. }
  569. static uint32_t igb_tx_wb_eic(IGBCore *core, int queue_idx)
  570. {
  571. uint32_t n, ent = 0;
  572. n = igb_ivar_entry_tx(queue_idx);
  573. ent = (core->mac[IVAR0 + n / 4] >> (8 * (n % 4))) & 0xff;
  574. return (ent & E1000_IVAR_VALID) ? BIT(ent & 0x1f) : 0;
  575. }
  576. static uint32_t igb_rx_wb_eic(IGBCore *core, int queue_idx)
  577. {
  578. uint32_t n, ent = 0;
  579. n = igb_ivar_entry_rx(queue_idx);
  580. ent = (core->mac[IVAR0 + n / 4] >> (8 * (n % 4))) & 0xff;
  581. return (ent & E1000_IVAR_VALID) ? BIT(ent & 0x1f) : 0;
  582. }
  583. static inline bool
  584. igb_ring_empty(IGBCore *core, const E1000ERingInfo *r)
  585. {
  586. return core->mac[r->dh] == core->mac[r->dt] ||
  587. core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
  588. }
  589. static inline uint64_t
  590. igb_ring_base(IGBCore *core, const E1000ERingInfo *r)
  591. {
  592. uint64_t bah = core->mac[r->dbah];
  593. uint64_t bal = core->mac[r->dbal];
  594. return (bah << 32) + bal;
  595. }
  596. static inline uint64_t
  597. igb_ring_head_descr(IGBCore *core, const E1000ERingInfo *r)
  598. {
  599. return igb_ring_base(core, r) + E1000_RING_DESC_LEN * core->mac[r->dh];
  600. }
  601. static inline void
  602. igb_ring_advance(IGBCore *core, const E1000ERingInfo *r, uint32_t count)
  603. {
  604. core->mac[r->dh] += count;
  605. if (core->mac[r->dh] * E1000_RING_DESC_LEN >= core->mac[r->dlen]) {
  606. core->mac[r->dh] = 0;
  607. }
  608. }
  609. static inline uint32_t
  610. igb_ring_free_descr_num(IGBCore *core, const E1000ERingInfo *r)
  611. {
  612. trace_e1000e_ring_free_space(r->idx, core->mac[r->dlen],
  613. core->mac[r->dh], core->mac[r->dt]);
  614. if (core->mac[r->dh] <= core->mac[r->dt]) {
  615. return core->mac[r->dt] - core->mac[r->dh];
  616. }
  617. if (core->mac[r->dh] > core->mac[r->dt]) {
  618. return core->mac[r->dlen] / E1000_RING_DESC_LEN +
  619. core->mac[r->dt] - core->mac[r->dh];
  620. }
  621. g_assert_not_reached();
  622. }
  623. static inline bool
  624. igb_ring_enabled(IGBCore *core, const E1000ERingInfo *r)
  625. {
  626. return core->mac[r->dlen] > 0;
  627. }
  628. typedef struct IGB_TxRing_st {
  629. const E1000ERingInfo *i;
  630. struct igb_tx *tx;
  631. } IGB_TxRing;
  632. static inline int
  633. igb_mq_queue_idx(int base_reg_idx, int reg_idx)
  634. {
  635. return (reg_idx - base_reg_idx) / 16;
  636. }
  637. static inline void
  638. igb_tx_ring_init(IGBCore *core, IGB_TxRing *txr, int idx)
  639. {
  640. static const E1000ERingInfo i[IGB_NUM_QUEUES] = {
  641. { TDBAH0, TDBAL0, TDLEN0, TDH0, TDT0, 0 },
  642. { TDBAH1, TDBAL1, TDLEN1, TDH1, TDT1, 1 },
  643. { TDBAH2, TDBAL2, TDLEN2, TDH2, TDT2, 2 },
  644. { TDBAH3, TDBAL3, TDLEN3, TDH3, TDT3, 3 },
  645. { TDBAH4, TDBAL4, TDLEN4, TDH4, TDT4, 4 },
  646. { TDBAH5, TDBAL5, TDLEN5, TDH5, TDT5, 5 },
  647. { TDBAH6, TDBAL6, TDLEN6, TDH6, TDT6, 6 },
  648. { TDBAH7, TDBAL7, TDLEN7, TDH7, TDT7, 7 },
  649. { TDBAH8, TDBAL8, TDLEN8, TDH8, TDT8, 8 },
  650. { TDBAH9, TDBAL9, TDLEN9, TDH9, TDT9, 9 },
  651. { TDBAH10, TDBAL10, TDLEN10, TDH10, TDT10, 10 },
  652. { TDBAH11, TDBAL11, TDLEN11, TDH11, TDT11, 11 },
  653. { TDBAH12, TDBAL12, TDLEN12, TDH12, TDT12, 12 },
  654. { TDBAH13, TDBAL13, TDLEN13, TDH13, TDT13, 13 },
  655. { TDBAH14, TDBAL14, TDLEN14, TDH14, TDT14, 14 },
  656. { TDBAH15, TDBAL15, TDLEN15, TDH15, TDT15, 15 }
  657. };
  658. assert(idx < ARRAY_SIZE(i));
  659. txr->i = &i[idx];
  660. txr->tx = &core->tx[idx];
  661. }
  662. typedef struct E1000E_RxRing_st {
  663. const E1000ERingInfo *i;
  664. } E1000E_RxRing;
  665. static inline void
  666. igb_rx_ring_init(IGBCore *core, E1000E_RxRing *rxr, int idx)
  667. {
  668. static const E1000ERingInfo i[IGB_NUM_QUEUES] = {
  669. { RDBAH0, RDBAL0, RDLEN0, RDH0, RDT0, 0 },
  670. { RDBAH1, RDBAL1, RDLEN1, RDH1, RDT1, 1 },
  671. { RDBAH2, RDBAL2, RDLEN2, RDH2, RDT2, 2 },
  672. { RDBAH3, RDBAL3, RDLEN3, RDH3, RDT3, 3 },
  673. { RDBAH4, RDBAL4, RDLEN4, RDH4, RDT4, 4 },
  674. { RDBAH5, RDBAL5, RDLEN5, RDH5, RDT5, 5 },
  675. { RDBAH6, RDBAL6, RDLEN6, RDH6, RDT6, 6 },
  676. { RDBAH7, RDBAL7, RDLEN7, RDH7, RDT7, 7 },
  677. { RDBAH8, RDBAL8, RDLEN8, RDH8, RDT8, 8 },
  678. { RDBAH9, RDBAL9, RDLEN9, RDH9, RDT9, 9 },
  679. { RDBAH10, RDBAL10, RDLEN10, RDH10, RDT10, 10 },
  680. { RDBAH11, RDBAL11, RDLEN11, RDH11, RDT11, 11 },
  681. { RDBAH12, RDBAL12, RDLEN12, RDH12, RDT12, 12 },
  682. { RDBAH13, RDBAL13, RDLEN13, RDH13, RDT13, 13 },
  683. { RDBAH14, RDBAL14, RDLEN14, RDH14, RDT14, 14 },
  684. { RDBAH15, RDBAL15, RDLEN15, RDH15, RDT15, 15 }
  685. };
  686. assert(idx < ARRAY_SIZE(i));
  687. rxr->i = &i[idx];
  688. }
  689. static uint32_t
  690. igb_txdesc_writeback(IGBCore *core, dma_addr_t base,
  691. union e1000_adv_tx_desc *tx_desc,
  692. const E1000ERingInfo *txi)
  693. {
  694. PCIDevice *d;
  695. uint32_t cmd_type_len = le32_to_cpu(tx_desc->read.cmd_type_len);
  696. uint64_t tdwba;
  697. tdwba = core->mac[E1000_TDWBAL(txi->idx) >> 2];
  698. tdwba |= (uint64_t)core->mac[E1000_TDWBAH(txi->idx) >> 2] << 32;
  699. if (!(cmd_type_len & E1000_TXD_CMD_RS)) {
  700. return 0;
  701. }
  702. d = pcie_sriov_get_vf_at_index(core->owner, txi->idx % 8);
  703. if (!d) {
  704. d = core->owner;
  705. }
  706. if (tdwba & 1) {
  707. uint32_t buffer = cpu_to_le32(core->mac[txi->dh]);
  708. pci_dma_write(d, tdwba & ~3, &buffer, sizeof(buffer));
  709. } else {
  710. uint32_t status = le32_to_cpu(tx_desc->wb.status) | E1000_TXD_STAT_DD;
  711. tx_desc->wb.status = cpu_to_le32(status);
  712. pci_dma_write(d, base + offsetof(union e1000_adv_tx_desc, wb),
  713. &tx_desc->wb, sizeof(tx_desc->wb));
  714. }
  715. return igb_tx_wb_eic(core, txi->idx);
  716. }
  717. static inline bool
  718. igb_tx_enabled(IGBCore *core, const E1000ERingInfo *txi)
  719. {
  720. bool vmdq = core->mac[MRQC] & 1;
  721. uint16_t qn = txi->idx;
  722. uint16_t pool = qn % IGB_NUM_VM_POOLS;
  723. return (core->mac[TCTL] & E1000_TCTL_EN) &&
  724. (!vmdq || core->mac[VFTE] & BIT(pool)) &&
  725. (core->mac[TXDCTL0 + (qn * 16)] & E1000_TXDCTL_QUEUE_ENABLE);
  726. }
  727. static void
  728. igb_start_xmit(IGBCore *core, const IGB_TxRing *txr)
  729. {
  730. PCIDevice *d;
  731. dma_addr_t base;
  732. union e1000_adv_tx_desc desc;
  733. const E1000ERingInfo *txi = txr->i;
  734. uint32_t eic = 0;
  735. if (!igb_tx_enabled(core, txi)) {
  736. trace_e1000e_tx_disabled();
  737. return;
  738. }
  739. d = pcie_sriov_get_vf_at_index(core->owner, txi->idx % 8);
  740. if (!d) {
  741. d = core->owner;
  742. }
  743. while (!igb_ring_empty(core, txi)) {
  744. base = igb_ring_head_descr(core, txi);
  745. pci_dma_read(d, base, &desc, sizeof(desc));
  746. trace_e1000e_tx_descr((void *)(intptr_t)desc.read.buffer_addr,
  747. desc.read.cmd_type_len, desc.wb.status);
  748. igb_process_tx_desc(core, d, txr->tx, &desc, txi->idx);
  749. igb_ring_advance(core, txi, 1);
  750. eic |= igb_txdesc_writeback(core, base, &desc, txi);
  751. }
  752. if (eic) {
  753. igb_raise_interrupts(core, EICR, eic);
  754. igb_raise_interrupts(core, ICR, E1000_ICR_TXDW);
  755. }
  756. net_tx_pkt_reset(txr->tx->tx_pkt, net_tx_pkt_unmap_frag_pci, d);
  757. }
  758. static uint32_t
  759. igb_rxbufsize(IGBCore *core, const E1000ERingInfo *r)
  760. {
  761. uint32_t srrctl = core->mac[E1000_SRRCTL(r->idx) >> 2];
  762. uint32_t bsizepkt = srrctl & E1000_SRRCTL_BSIZEPKT_MASK;
  763. if (bsizepkt) {
  764. return bsizepkt << E1000_SRRCTL_BSIZEPKT_SHIFT;
  765. }
  766. return e1000x_rxbufsize(core->mac[RCTL]);
  767. }
  768. static bool
  769. igb_has_rxbufs(IGBCore *core, const E1000ERingInfo *r, size_t total_size)
  770. {
  771. uint32_t bufs = igb_ring_free_descr_num(core, r);
  772. uint32_t bufsize = igb_rxbufsize(core, r);
  773. trace_e1000e_rx_has_buffers(r->idx, bufs, total_size, bufsize);
  774. return total_size <= bufs / (core->rx_desc_len / E1000_MIN_RX_DESC_LEN) *
  775. bufsize;
  776. }
  777. static uint32_t
  778. igb_rxhdrbufsize(IGBCore *core, const E1000ERingInfo *r)
  779. {
  780. uint32_t srrctl = core->mac[E1000_SRRCTL(r->idx) >> 2];
  781. return (srrctl & E1000_SRRCTL_BSIZEHDRSIZE_MASK) >>
  782. E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
  783. }
  784. void
  785. igb_start_recv(IGBCore *core)
  786. {
  787. int i;
  788. trace_e1000e_rx_start_recv();
  789. for (i = 0; i <= core->max_queue_num; i++) {
  790. qemu_flush_queued_packets(qemu_get_subqueue(core->owner_nic, i));
  791. }
  792. }
  793. bool
  794. igb_can_receive(IGBCore *core)
  795. {
  796. int i;
  797. if (!e1000x_rx_ready(core->owner, core->mac)) {
  798. return false;
  799. }
  800. for (i = 0; i < IGB_NUM_QUEUES; i++) {
  801. E1000E_RxRing rxr;
  802. if (!(core->mac[RXDCTL0 + (i * 16)] & E1000_RXDCTL_QUEUE_ENABLE)) {
  803. continue;
  804. }
  805. igb_rx_ring_init(core, &rxr, i);
  806. if (igb_ring_enabled(core, rxr.i) && igb_has_rxbufs(core, rxr.i, 1)) {
  807. trace_e1000e_rx_can_recv();
  808. return true;
  809. }
  810. }
  811. trace_e1000e_rx_can_recv_rings_full();
  812. return false;
  813. }
  814. ssize_t
  815. igb_receive(IGBCore *core, const uint8_t *buf, size_t size)
  816. {
  817. const struct iovec iov = {
  818. .iov_base = (uint8_t *)buf,
  819. .iov_len = size
  820. };
  821. return igb_receive_iov(core, &iov, 1);
  822. }
  823. static inline bool
  824. igb_rx_l3_cso_enabled(IGBCore *core)
  825. {
  826. return !!(core->mac[RXCSUM] & E1000_RXCSUM_IPOFLD);
  827. }
  828. static inline bool
  829. igb_rx_l4_cso_enabled(IGBCore *core)
  830. {
  831. return !!(core->mac[RXCSUM] & E1000_RXCSUM_TUOFLD);
  832. }
  833. static bool igb_rx_is_oversized(IGBCore *core, const struct eth_header *ehdr,
  834. size_t size, size_t vlan_num,
  835. bool lpe, uint16_t rlpml)
  836. {
  837. size_t vlan_header_size = sizeof(struct vlan_header) * vlan_num;
  838. size_t header_size = sizeof(struct eth_header) + vlan_header_size;
  839. return lpe ? size + ETH_FCS_LEN > rlpml : size > header_size + ETH_MTU;
  840. }
  841. static uint16_t igb_receive_assign(IGBCore *core, const struct iovec *iov,
  842. size_t iovcnt, size_t iov_ofs,
  843. const L2Header *l2_header, size_t size,
  844. E1000E_RSSInfo *rss_info,
  845. uint16_t *etqf, bool *ts, bool *external_tx)
  846. {
  847. static const int ta_shift[] = { 4, 3, 2, 0 };
  848. const struct eth_header *ehdr = &l2_header->eth;
  849. uint32_t f, ra[2], *macp, rctl = core->mac[RCTL];
  850. uint16_t queues = 0;
  851. uint16_t oversized = 0;
  852. size_t vlan_num = 0;
  853. PTP2 ptp2;
  854. bool lpe;
  855. uint16_t rlpml;
  856. int i;
  857. memset(rss_info, 0, sizeof(E1000E_RSSInfo));
  858. *ts = false;
  859. if (external_tx) {
  860. *external_tx = true;
  861. }
  862. if (core->mac[CTRL_EXT] & BIT(26)) {
  863. if (be16_to_cpu(ehdr->h_proto) == core->mac[VET] >> 16 &&
  864. be16_to_cpu(l2_header->vlan[0].h_proto) == (core->mac[VET] & 0xffff)) {
  865. vlan_num = 2;
  866. }
  867. } else {
  868. if (be16_to_cpu(ehdr->h_proto) == (core->mac[VET] & 0xffff)) {
  869. vlan_num = 1;
  870. }
  871. }
  872. lpe = !!(core->mac[RCTL] & E1000_RCTL_LPE);
  873. rlpml = core->mac[RLPML];
  874. if (!(core->mac[RCTL] & E1000_RCTL_SBP) &&
  875. igb_rx_is_oversized(core, ehdr, size, vlan_num, lpe, rlpml)) {
  876. trace_e1000x_rx_oversized(size);
  877. return queues;
  878. }
  879. for (*etqf = 0; *etqf < 8; (*etqf)++) {
  880. if ((core->mac[ETQF0 + *etqf] & E1000_ETQF_FILTER_ENABLE) &&
  881. be16_to_cpu(ehdr->h_proto) == (core->mac[ETQF0 + *etqf] & E1000_ETQF_ETYPE_MASK)) {
  882. if ((core->mac[ETQF0 + *etqf] & E1000_ETQF_1588) &&
  883. (core->mac[TSYNCRXCTL] & E1000_TSYNCRXCTL_ENABLED) &&
  884. !(core->mac[TSYNCRXCTL] & E1000_TSYNCRXCTL_VALID) &&
  885. iov_to_buf(iov, iovcnt, iov_ofs + ETH_HLEN, &ptp2, sizeof(ptp2)) >= sizeof(ptp2) &&
  886. (ptp2.version_ptp & 15) == 2 &&
  887. ptp2.message_id_transport_specific == ((core->mac[TSYNCRXCFG] >> 8) & 255)) {
  888. e1000x_timestamp(core->mac, core->timadj, RXSTMPL, RXSTMPH);
  889. *ts = true;
  890. core->mac[TSYNCRXCTL] |= E1000_TSYNCRXCTL_VALID;
  891. core->mac[RXSATRL] = le32_to_cpu(ptp2.source_uuid_lo);
  892. core->mac[RXSATRH] = le16_to_cpu(ptp2.source_uuid_hi) |
  893. (le16_to_cpu(ptp2.sequence_id) << 16);
  894. }
  895. break;
  896. }
  897. }
  898. if (vlan_num &&
  899. !e1000x_rx_vlan_filter(core->mac, l2_header->vlan + vlan_num - 1)) {
  900. return queues;
  901. }
  902. if (core->mac[MRQC] & 1) {
  903. if (is_broadcast_ether_addr(ehdr->h_dest)) {
  904. for (i = 0; i < IGB_NUM_VM_POOLS; i++) {
  905. if (core->mac[VMOLR0 + i] & E1000_VMOLR_BAM) {
  906. queues |= BIT(i);
  907. }
  908. }
  909. } else {
  910. for (macp = core->mac + RA; macp < core->mac + RA + 32; macp += 2) {
  911. if (!(macp[1] & E1000_RAH_AV)) {
  912. continue;
  913. }
  914. ra[0] = cpu_to_le32(macp[0]);
  915. ra[1] = cpu_to_le32(macp[1]);
  916. if (!memcmp(ehdr->h_dest, (uint8_t *)ra, ETH_ALEN)) {
  917. queues |= (macp[1] & E1000_RAH_POOL_MASK) / E1000_RAH_POOL_1;
  918. }
  919. }
  920. for (macp = core->mac + RA2; macp < core->mac + RA2 + 16; macp += 2) {
  921. if (!(macp[1] & E1000_RAH_AV)) {
  922. continue;
  923. }
  924. ra[0] = cpu_to_le32(macp[0]);
  925. ra[1] = cpu_to_le32(macp[1]);
  926. if (!memcmp(ehdr->h_dest, (uint8_t *)ra, ETH_ALEN)) {
  927. queues |= (macp[1] & E1000_RAH_POOL_MASK) / E1000_RAH_POOL_1;
  928. }
  929. }
  930. if (!queues) {
  931. macp = core->mac + (is_multicast_ether_addr(ehdr->h_dest) ? MTA : UTA);
  932. f = ta_shift[(rctl >> E1000_RCTL_MO_SHIFT) & 3];
  933. f = (((ehdr->h_dest[5] << 8) | ehdr->h_dest[4]) >> f) & 0xfff;
  934. if (macp[f >> 5] & (1 << (f & 0x1f))) {
  935. for (i = 0; i < IGB_NUM_VM_POOLS; i++) {
  936. if (core->mac[VMOLR0 + i] & E1000_VMOLR_ROMPE) {
  937. queues |= BIT(i);
  938. }
  939. }
  940. }
  941. } else if (is_unicast_ether_addr(ehdr->h_dest) && external_tx) {
  942. *external_tx = false;
  943. }
  944. }
  945. if (e1000x_vlan_rx_filter_enabled(core->mac)) {
  946. uint16_t mask = 0;
  947. if (vlan_num) {
  948. uint16_t vid = be16_to_cpu(l2_header->vlan[vlan_num - 1].h_tci) & VLAN_VID_MASK;
  949. for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
  950. if ((core->mac[VLVF0 + i] & E1000_VLVF_VLANID_MASK) == vid &&
  951. (core->mac[VLVF0 + i] & E1000_VLVF_VLANID_ENABLE)) {
  952. uint32_t poolsel = core->mac[VLVF0 + i] & E1000_VLVF_POOLSEL_MASK;
  953. mask |= poolsel >> E1000_VLVF_POOLSEL_SHIFT;
  954. }
  955. }
  956. } else {
  957. for (i = 0; i < IGB_NUM_VM_POOLS; i++) {
  958. if (core->mac[VMOLR0 + i] & E1000_VMOLR_AUPE) {
  959. mask |= BIT(i);
  960. }
  961. }
  962. }
  963. queues &= mask;
  964. }
  965. if (is_unicast_ether_addr(ehdr->h_dest) && !queues && !external_tx &&
  966. !(core->mac[VT_CTL] & E1000_VT_CTL_DISABLE_DEF_POOL)) {
  967. uint32_t def_pl = core->mac[VT_CTL] & E1000_VT_CTL_DEFAULT_POOL_MASK;
  968. queues = BIT(def_pl >> E1000_VT_CTL_DEFAULT_POOL_SHIFT);
  969. }
  970. queues &= core->mac[VFRE];
  971. if (queues) {
  972. for (i = 0; i < IGB_NUM_VM_POOLS; i++) {
  973. lpe = !!(core->mac[VMOLR0 + i] & E1000_VMOLR_LPE);
  974. rlpml = core->mac[VMOLR0 + i] & E1000_VMOLR_RLPML_MASK;
  975. if ((queues & BIT(i)) &&
  976. igb_rx_is_oversized(core, ehdr, size, vlan_num,
  977. lpe, rlpml)) {
  978. oversized |= BIT(i);
  979. }
  980. }
  981. /* 8.19.37 increment ROC if packet is oversized for all queues */
  982. if (oversized == queues) {
  983. trace_e1000x_rx_oversized(size);
  984. e1000x_inc_reg_if_not_full(core->mac, ROC);
  985. }
  986. queues &= ~oversized;
  987. }
  988. if (queues) {
  989. igb_rss_parse_packet(core, core->rx_pkt,
  990. external_tx != NULL, rss_info);
  991. /* Sec 8.26.1: PQn = VFn + VQn*8 */
  992. if (rss_info->queue & 1) {
  993. for (i = 0; i < IGB_NUM_VM_POOLS; i++) {
  994. if ((queues & BIT(i)) &&
  995. (core->mac[VMOLR0 + i] & E1000_VMOLR_RSSE)) {
  996. queues |= BIT(i + IGB_NUM_VM_POOLS);
  997. queues &= ~BIT(i);
  998. }
  999. }
  1000. }
  1001. }
  1002. } else {
  1003. bool accepted = e1000x_rx_group_filter(core->mac, ehdr);
  1004. if (!accepted) {
  1005. for (macp = core->mac + RA2; macp < core->mac + RA2 + 16; macp += 2) {
  1006. if (!(macp[1] & E1000_RAH_AV)) {
  1007. continue;
  1008. }
  1009. ra[0] = cpu_to_le32(macp[0]);
  1010. ra[1] = cpu_to_le32(macp[1]);
  1011. if (!memcmp(ehdr->h_dest, (uint8_t *)ra, ETH_ALEN)) {
  1012. trace_e1000x_rx_flt_ucast_match((int)(macp - core->mac - RA2) / 2,
  1013. MAC_ARG(ehdr->h_dest));
  1014. accepted = true;
  1015. break;
  1016. }
  1017. }
  1018. }
  1019. if (accepted) {
  1020. igb_rss_parse_packet(core, core->rx_pkt, false, rss_info);
  1021. queues = BIT(rss_info->queue);
  1022. }
  1023. }
  1024. return queues;
  1025. }
  1026. static inline void
  1027. igb_read_lgcy_rx_descr(IGBCore *core, struct e1000_rx_desc *desc,
  1028. hwaddr *buff_addr)
  1029. {
  1030. *buff_addr = le64_to_cpu(desc->buffer_addr);
  1031. }
  1032. static inline void
  1033. igb_read_adv_rx_single_buf_descr(IGBCore *core, union e1000_adv_rx_desc *desc,
  1034. hwaddr *buff_addr)
  1035. {
  1036. *buff_addr = le64_to_cpu(desc->read.pkt_addr);
  1037. }
  1038. static inline void
  1039. igb_read_adv_rx_split_buf_descr(IGBCore *core, union e1000_adv_rx_desc *desc,
  1040. hwaddr *buff_addr)
  1041. {
  1042. buff_addr[0] = le64_to_cpu(desc->read.hdr_addr);
  1043. buff_addr[1] = le64_to_cpu(desc->read.pkt_addr);
  1044. }
  1045. typedef struct IGBBAState {
  1046. uint16_t written[IGB_MAX_PS_BUFFERS];
  1047. uint8_t cur_idx;
  1048. } IGBBAState;
  1049. typedef struct IGBSplitDescriptorData {
  1050. bool sph;
  1051. bool hbo;
  1052. size_t hdr_len;
  1053. } IGBSplitDescriptorData;
  1054. typedef struct IGBPacketRxDMAState {
  1055. size_t size;
  1056. size_t total_size;
  1057. size_t ps_hdr_len;
  1058. size_t desc_size;
  1059. size_t desc_offset;
  1060. uint32_t rx_desc_packet_buf_size;
  1061. uint32_t rx_desc_header_buf_size;
  1062. struct iovec *iov;
  1063. size_t iov_ofs;
  1064. bool do_ps;
  1065. bool is_first;
  1066. IGBBAState bastate;
  1067. hwaddr ba[IGB_MAX_PS_BUFFERS];
  1068. IGBSplitDescriptorData ps_desc_data;
  1069. } IGBPacketRxDMAState;
  1070. static inline void
  1071. igb_read_rx_descr(IGBCore *core,
  1072. union e1000_rx_desc_union *desc,
  1073. IGBPacketRxDMAState *pdma_st,
  1074. const E1000ERingInfo *r)
  1075. {
  1076. uint32_t desc_type;
  1077. if (igb_rx_use_legacy_descriptor(core)) {
  1078. igb_read_lgcy_rx_descr(core, &desc->legacy, &pdma_st->ba[1]);
  1079. pdma_st->ba[0] = 0;
  1080. return;
  1081. }
  1082. /* advanced header split descriptor */
  1083. if (igb_rx_use_ps_descriptor(core, r)) {
  1084. igb_read_adv_rx_split_buf_descr(core, &desc->adv, &pdma_st->ba[0]);
  1085. return;
  1086. }
  1087. /* descriptor replication modes not supported */
  1088. desc_type = igb_rx_queue_desctyp_get(core, r);
  1089. if (desc_type != E1000_SRRCTL_DESCTYPE_ADV_ONEBUF) {
  1090. trace_igb_wrn_rx_desc_modes_not_supp(desc_type);
  1091. }
  1092. /* advanced single buffer descriptor */
  1093. igb_read_adv_rx_single_buf_descr(core, &desc->adv, &pdma_st->ba[1]);
  1094. pdma_st->ba[0] = 0;
  1095. }
  1096. static void
  1097. igb_verify_csum_in_sw(IGBCore *core,
  1098. struct NetRxPkt *pkt,
  1099. uint32_t *status_flags,
  1100. EthL4HdrProto l4hdr_proto)
  1101. {
  1102. bool csum_valid;
  1103. uint32_t csum_error;
  1104. if (igb_rx_l3_cso_enabled(core)) {
  1105. if (!net_rx_pkt_validate_l3_csum(pkt, &csum_valid)) {
  1106. trace_e1000e_rx_metadata_l3_csum_validation_failed();
  1107. } else {
  1108. csum_error = csum_valid ? 0 : E1000_RXDEXT_STATERR_IPE;
  1109. *status_flags |= E1000_RXD_STAT_IPCS | csum_error;
  1110. }
  1111. } else {
  1112. trace_e1000e_rx_metadata_l3_cso_disabled();
  1113. }
  1114. if (!igb_rx_l4_cso_enabled(core)) {
  1115. trace_e1000e_rx_metadata_l4_cso_disabled();
  1116. return;
  1117. }
  1118. if (!net_rx_pkt_validate_l4_csum(pkt, &csum_valid)) {
  1119. trace_e1000e_rx_metadata_l4_csum_validation_failed();
  1120. return;
  1121. }
  1122. csum_error = csum_valid ? 0 : E1000_RXDEXT_STATERR_TCPE;
  1123. *status_flags |= E1000_RXD_STAT_TCPCS | csum_error;
  1124. if (l4hdr_proto == ETH_L4_HDR_PROTO_UDP) {
  1125. *status_flags |= E1000_RXD_STAT_UDPCS;
  1126. }
  1127. }
  1128. static void
  1129. igb_build_rx_metadata_common(IGBCore *core,
  1130. struct NetRxPkt *pkt,
  1131. bool is_eop,
  1132. uint32_t *status_flags,
  1133. uint16_t *vlan_tag)
  1134. {
  1135. struct virtio_net_hdr *vhdr;
  1136. bool hasip4, hasip6, csum_valid;
  1137. EthL4HdrProto l4hdr_proto;
  1138. *status_flags = E1000_RXD_STAT_DD;
  1139. /* No additional metadata needed for non-EOP descriptors */
  1140. if (!is_eop) {
  1141. goto func_exit;
  1142. }
  1143. *status_flags |= E1000_RXD_STAT_EOP;
  1144. net_rx_pkt_get_protocols(pkt, &hasip4, &hasip6, &l4hdr_proto);
  1145. trace_e1000e_rx_metadata_protocols(hasip4, hasip6, l4hdr_proto);
  1146. /* VLAN state */
  1147. if (net_rx_pkt_is_vlan_stripped(pkt)) {
  1148. *status_flags |= E1000_RXD_STAT_VP;
  1149. *vlan_tag = cpu_to_le16(net_rx_pkt_get_vlan_tag(pkt));
  1150. trace_e1000e_rx_metadata_vlan(*vlan_tag);
  1151. }
  1152. /* RX CSO information */
  1153. if (hasip6 && (core->mac[RFCTL] & E1000_RFCTL_IPV6_XSUM_DIS)) {
  1154. trace_e1000e_rx_metadata_ipv6_sum_disabled();
  1155. goto func_exit;
  1156. }
  1157. vhdr = net_rx_pkt_get_vhdr(pkt);
  1158. if (!(vhdr->flags & VIRTIO_NET_HDR_F_DATA_VALID) &&
  1159. !(vhdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)) {
  1160. trace_e1000e_rx_metadata_virthdr_no_csum_info();
  1161. igb_verify_csum_in_sw(core, pkt, status_flags, l4hdr_proto);
  1162. goto func_exit;
  1163. }
  1164. if (igb_rx_l3_cso_enabled(core)) {
  1165. *status_flags |= hasip4 ? E1000_RXD_STAT_IPCS : 0;
  1166. } else {
  1167. trace_e1000e_rx_metadata_l3_cso_disabled();
  1168. }
  1169. if (igb_rx_l4_cso_enabled(core)) {
  1170. switch (l4hdr_proto) {
  1171. case ETH_L4_HDR_PROTO_SCTP:
  1172. if (!net_rx_pkt_validate_l4_csum(pkt, &csum_valid)) {
  1173. trace_e1000e_rx_metadata_l4_csum_validation_failed();
  1174. goto func_exit;
  1175. }
  1176. if (!csum_valid) {
  1177. *status_flags |= E1000_RXDEXT_STATERR_TCPE;
  1178. }
  1179. /* fall through */
  1180. case ETH_L4_HDR_PROTO_TCP:
  1181. *status_flags |= E1000_RXD_STAT_TCPCS;
  1182. break;
  1183. case ETH_L4_HDR_PROTO_UDP:
  1184. *status_flags |= E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS;
  1185. break;
  1186. default:
  1187. break;
  1188. }
  1189. } else {
  1190. trace_e1000e_rx_metadata_l4_cso_disabled();
  1191. }
  1192. func_exit:
  1193. trace_e1000e_rx_metadata_status_flags(*status_flags);
  1194. *status_flags = cpu_to_le32(*status_flags);
  1195. }
  1196. static inline void
  1197. igb_write_lgcy_rx_descr(IGBCore *core, struct e1000_rx_desc *desc,
  1198. struct NetRxPkt *pkt,
  1199. const E1000E_RSSInfo *rss_info,
  1200. uint16_t length)
  1201. {
  1202. uint32_t status_flags;
  1203. assert(!rss_info->enabled);
  1204. memset(desc, 0, sizeof(*desc));
  1205. desc->length = cpu_to_le16(length);
  1206. igb_build_rx_metadata_common(core, pkt, pkt != NULL,
  1207. &status_flags,
  1208. &desc->special);
  1209. desc->errors = (uint8_t) (le32_to_cpu(status_flags) >> 24);
  1210. desc->status = (uint8_t) le32_to_cpu(status_flags);
  1211. }
  1212. static bool
  1213. igb_rx_ps_descriptor_split_always(IGBCore *core, const E1000ERingInfo *r)
  1214. {
  1215. uint32_t desctyp = igb_rx_queue_desctyp_get(core, r);
  1216. return desctyp == E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
  1217. }
  1218. static uint16_t
  1219. igb_rx_desc_get_packet_type(IGBCore *core, struct NetRxPkt *pkt, uint16_t etqf)
  1220. {
  1221. uint16_t pkt_type;
  1222. bool hasip4, hasip6;
  1223. EthL4HdrProto l4hdr_proto;
  1224. if (etqf < 8) {
  1225. pkt_type = BIT(11) | etqf;
  1226. return pkt_type;
  1227. }
  1228. net_rx_pkt_get_protocols(pkt, &hasip4, &hasip6, &l4hdr_proto);
  1229. if (hasip6 && !(core->mac[RFCTL] & E1000_RFCTL_IPV6_DIS)) {
  1230. eth_ip6_hdr_info *ip6hdr_info = net_rx_pkt_get_ip6_info(pkt);
  1231. pkt_type = ip6hdr_info->has_ext_hdrs ? E1000_ADVRXD_PKT_IP6E :
  1232. E1000_ADVRXD_PKT_IP6;
  1233. } else if (hasip4) {
  1234. pkt_type = E1000_ADVRXD_PKT_IP4;
  1235. } else {
  1236. pkt_type = 0;
  1237. }
  1238. switch (l4hdr_proto) {
  1239. case ETH_L4_HDR_PROTO_TCP:
  1240. pkt_type |= E1000_ADVRXD_PKT_TCP;
  1241. break;
  1242. case ETH_L4_HDR_PROTO_UDP:
  1243. pkt_type |= E1000_ADVRXD_PKT_UDP;
  1244. break;
  1245. case ETH_L4_HDR_PROTO_SCTP:
  1246. pkt_type |= E1000_ADVRXD_PKT_SCTP;
  1247. break;
  1248. default:
  1249. break;
  1250. }
  1251. return pkt_type;
  1252. }
  1253. static inline void
  1254. igb_write_adv_rx_descr(IGBCore *core, union e1000_adv_rx_desc *desc,
  1255. struct NetRxPkt *pkt,
  1256. const E1000E_RSSInfo *rss_info, uint16_t etqf, bool ts,
  1257. uint16_t length)
  1258. {
  1259. bool hasip4, hasip6;
  1260. EthL4HdrProto l4hdr_proto;
  1261. uint16_t rss_type = 0, pkt_type;
  1262. bool eop = (pkt != NULL);
  1263. uint32_t adv_desc_status_error = 0;
  1264. memset(&desc->wb, 0, sizeof(desc->wb));
  1265. desc->wb.upper.length = cpu_to_le16(length);
  1266. igb_build_rx_metadata_common(core, pkt, eop,
  1267. &desc->wb.upper.status_error,
  1268. &desc->wb.upper.vlan);
  1269. if (!eop) {
  1270. return;
  1271. }
  1272. net_rx_pkt_get_protocols(pkt, &hasip4, &hasip6, &l4hdr_proto);
  1273. if ((core->mac[RXCSUM] & E1000_RXCSUM_PCSD) != 0) {
  1274. if (rss_info->enabled) {
  1275. desc->wb.lower.hi_dword.rss = cpu_to_le32(rss_info->hash);
  1276. rss_type = rss_info->type;
  1277. trace_igb_rx_metadata_rss(desc->wb.lower.hi_dword.rss, rss_type);
  1278. }
  1279. } else if (hasip4) {
  1280. adv_desc_status_error |= E1000_RXD_STAT_IPIDV;
  1281. desc->wb.lower.hi_dword.csum_ip.ip_id =
  1282. cpu_to_le16(net_rx_pkt_get_ip_id(pkt));
  1283. trace_e1000e_rx_metadata_ip_id(
  1284. desc->wb.lower.hi_dword.csum_ip.ip_id);
  1285. }
  1286. if (ts) {
  1287. adv_desc_status_error |= BIT(16);
  1288. }
  1289. pkt_type = igb_rx_desc_get_packet_type(core, pkt, etqf);
  1290. trace_e1000e_rx_metadata_pkt_type(pkt_type);
  1291. desc->wb.lower.lo_dword.pkt_info = cpu_to_le16(rss_type | (pkt_type << 4));
  1292. desc->wb.upper.status_error |= cpu_to_le32(adv_desc_status_error);
  1293. }
  1294. static inline void
  1295. igb_write_adv_ps_rx_descr(IGBCore *core,
  1296. union e1000_adv_rx_desc *desc,
  1297. struct NetRxPkt *pkt,
  1298. const E1000E_RSSInfo *rss_info,
  1299. const E1000ERingInfo *r,
  1300. uint16_t etqf,
  1301. bool ts,
  1302. IGBPacketRxDMAState *pdma_st)
  1303. {
  1304. size_t pkt_len;
  1305. uint16_t hdr_info = 0;
  1306. if (pdma_st->do_ps) {
  1307. pkt_len = pdma_st->bastate.written[1];
  1308. } else {
  1309. pkt_len = pdma_st->bastate.written[0] + pdma_st->bastate.written[1];
  1310. }
  1311. igb_write_adv_rx_descr(core, desc, pkt, rss_info, etqf, ts, pkt_len);
  1312. hdr_info = (pdma_st->ps_desc_data.hdr_len << E1000_ADVRXD_HDR_LEN_OFFSET) &
  1313. E1000_ADVRXD_ADV_HDR_LEN_MASK;
  1314. hdr_info |= pdma_st->ps_desc_data.sph ? E1000_ADVRXD_HDR_SPH : 0;
  1315. desc->wb.lower.lo_dword.hdr_info = cpu_to_le16(hdr_info);
  1316. desc->wb.upper.status_error |= cpu_to_le32(
  1317. pdma_st->ps_desc_data.hbo ? E1000_ADVRXD_ST_ERR_HBO_OFFSET : 0);
  1318. }
  1319. static inline void
  1320. igb_write_rx_descr(IGBCore *core,
  1321. union e1000_rx_desc_union *desc,
  1322. struct NetRxPkt *pkt,
  1323. const E1000E_RSSInfo *rss_info,
  1324. uint16_t etqf,
  1325. bool ts,
  1326. IGBPacketRxDMAState *pdma_st,
  1327. const E1000ERingInfo *r)
  1328. {
  1329. if (igb_rx_use_legacy_descriptor(core)) {
  1330. igb_write_lgcy_rx_descr(core, &desc->legacy, pkt, rss_info,
  1331. pdma_st->bastate.written[1]);
  1332. } else if (igb_rx_use_ps_descriptor(core, r)) {
  1333. igb_write_adv_ps_rx_descr(core, &desc->adv, pkt, rss_info, r, etqf, ts,
  1334. pdma_st);
  1335. } else {
  1336. igb_write_adv_rx_descr(core, &desc->adv, pkt, rss_info,
  1337. etqf, ts, pdma_st->bastate.written[1]);
  1338. }
  1339. }
  1340. static inline void
  1341. igb_pci_dma_write_rx_desc(IGBCore *core, PCIDevice *dev, dma_addr_t addr,
  1342. union e1000_rx_desc_union *desc, dma_addr_t len)
  1343. {
  1344. if (igb_rx_use_legacy_descriptor(core)) {
  1345. struct e1000_rx_desc *d = &desc->legacy;
  1346. size_t offset = offsetof(struct e1000_rx_desc, status);
  1347. uint8_t status = d->status;
  1348. d->status &= ~E1000_RXD_STAT_DD;
  1349. pci_dma_write(dev, addr, desc, len);
  1350. if (status & E1000_RXD_STAT_DD) {
  1351. d->status = status;
  1352. pci_dma_write(dev, addr + offset, &status, sizeof(status));
  1353. }
  1354. } else {
  1355. union e1000_adv_rx_desc *d = &desc->adv;
  1356. size_t offset =
  1357. offsetof(union e1000_adv_rx_desc, wb.upper.status_error);
  1358. uint32_t status = d->wb.upper.status_error;
  1359. d->wb.upper.status_error &= ~E1000_RXD_STAT_DD;
  1360. pci_dma_write(dev, addr, desc, len);
  1361. if (status & E1000_RXD_STAT_DD) {
  1362. d->wb.upper.status_error = status;
  1363. pci_dma_write(dev, addr + offset, &status, sizeof(status));
  1364. }
  1365. }
  1366. }
  1367. static void
  1368. igb_update_rx_stats(IGBCore *core, const E1000ERingInfo *rxi,
  1369. size_t pkt_size, size_t pkt_fcs_size)
  1370. {
  1371. eth_pkt_types_e pkt_type = net_rx_pkt_get_packet_type(core->rx_pkt);
  1372. e1000x_update_rx_total_stats(core->mac, pkt_type, pkt_size, pkt_fcs_size);
  1373. if (core->mac[MRQC] & 1) {
  1374. uint16_t pool = rxi->idx % IGB_NUM_VM_POOLS;
  1375. core->mac[PVFGORC0 + (pool * 64)] += pkt_size + 4;
  1376. core->mac[PVFGPRC0 + (pool * 64)]++;
  1377. if (pkt_type == ETH_PKT_MCAST) {
  1378. core->mac[PVFMPRC0 + (pool * 64)]++;
  1379. }
  1380. }
  1381. }
  1382. static inline bool
  1383. igb_rx_descr_threshold_hit(IGBCore *core, const E1000ERingInfo *rxi)
  1384. {
  1385. return igb_ring_free_descr_num(core, rxi) ==
  1386. ((core->mac[E1000_SRRCTL(rxi->idx) >> 2] >> 20) & 31) * 16;
  1387. }
  1388. static bool
  1389. igb_do_ps(IGBCore *core,
  1390. const E1000ERingInfo *r,
  1391. struct NetRxPkt *pkt,
  1392. IGBPacketRxDMAState *pdma_st)
  1393. {
  1394. bool hasip4, hasip6;
  1395. EthL4HdrProto l4hdr_proto;
  1396. bool fragment;
  1397. bool split_always;
  1398. size_t bheader_size;
  1399. size_t total_pkt_len;
  1400. if (!igb_rx_use_ps_descriptor(core, r)) {
  1401. return false;
  1402. }
  1403. total_pkt_len = net_rx_pkt_get_total_len(pkt);
  1404. bheader_size = igb_rxhdrbufsize(core, r);
  1405. split_always = igb_rx_ps_descriptor_split_always(core, r);
  1406. if (split_always && total_pkt_len <= bheader_size) {
  1407. pdma_st->ps_hdr_len = total_pkt_len;
  1408. pdma_st->ps_desc_data.hdr_len = total_pkt_len;
  1409. return true;
  1410. }
  1411. net_rx_pkt_get_protocols(pkt, &hasip4, &hasip6, &l4hdr_proto);
  1412. if (hasip4) {
  1413. fragment = net_rx_pkt_get_ip4_info(pkt)->fragment;
  1414. } else if (hasip6) {
  1415. fragment = net_rx_pkt_get_ip6_info(pkt)->fragment;
  1416. } else {
  1417. pdma_st->ps_desc_data.hdr_len = bheader_size;
  1418. goto header_not_handled;
  1419. }
  1420. if (fragment && (core->mac[RFCTL] & E1000_RFCTL_IPFRSP_DIS)) {
  1421. pdma_st->ps_desc_data.hdr_len = bheader_size;
  1422. goto header_not_handled;
  1423. }
  1424. /* no header splitting for SCTP */
  1425. if (!fragment && (l4hdr_proto == ETH_L4_HDR_PROTO_UDP ||
  1426. l4hdr_proto == ETH_L4_HDR_PROTO_TCP)) {
  1427. pdma_st->ps_hdr_len = net_rx_pkt_get_l5_hdr_offset(pkt);
  1428. } else {
  1429. pdma_st->ps_hdr_len = net_rx_pkt_get_l4_hdr_offset(pkt);
  1430. }
  1431. pdma_st->ps_desc_data.sph = true;
  1432. pdma_st->ps_desc_data.hdr_len = pdma_st->ps_hdr_len;
  1433. if (pdma_st->ps_hdr_len > bheader_size) {
  1434. pdma_st->ps_desc_data.hbo = true;
  1435. goto header_not_handled;
  1436. }
  1437. return true;
  1438. header_not_handled:
  1439. if (split_always) {
  1440. pdma_st->ps_hdr_len = bheader_size;
  1441. return true;
  1442. }
  1443. return false;
  1444. }
  1445. static void
  1446. igb_truncate_to_descriptor_size(IGBPacketRxDMAState *pdma_st, size_t *size)
  1447. {
  1448. if (pdma_st->do_ps && pdma_st->is_first) {
  1449. if (*size > pdma_st->rx_desc_packet_buf_size + pdma_st->ps_hdr_len) {
  1450. *size = pdma_st->rx_desc_packet_buf_size + pdma_st->ps_hdr_len;
  1451. }
  1452. } else {
  1453. if (*size > pdma_st->rx_desc_packet_buf_size) {
  1454. *size = pdma_st->rx_desc_packet_buf_size;
  1455. }
  1456. }
  1457. }
  1458. static inline void
  1459. igb_write_hdr_frag_to_rx_buffers(IGBCore *core,
  1460. PCIDevice *d,
  1461. IGBPacketRxDMAState *pdma_st,
  1462. const char *data,
  1463. dma_addr_t data_len)
  1464. {
  1465. assert(data_len <= pdma_st->rx_desc_header_buf_size -
  1466. pdma_st->bastate.written[0]);
  1467. pci_dma_write(d,
  1468. pdma_st->ba[0] + pdma_st->bastate.written[0],
  1469. data, data_len);
  1470. pdma_st->bastate.written[0] += data_len;
  1471. pdma_st->bastate.cur_idx = 1;
  1472. }
  1473. static void
  1474. igb_write_header_to_rx_buffers(IGBCore *core,
  1475. struct NetRxPkt *pkt,
  1476. PCIDevice *d,
  1477. IGBPacketRxDMAState *pdma_st,
  1478. size_t *copy_size)
  1479. {
  1480. size_t iov_copy;
  1481. size_t ps_hdr_copied = 0;
  1482. if (!pdma_st->is_first) {
  1483. /* Leave buffer 0 of each descriptor except first */
  1484. /* empty */
  1485. pdma_st->bastate.cur_idx = 1;
  1486. return;
  1487. }
  1488. do {
  1489. iov_copy = MIN(pdma_st->ps_hdr_len - ps_hdr_copied,
  1490. pdma_st->iov->iov_len - pdma_st->iov_ofs);
  1491. igb_write_hdr_frag_to_rx_buffers(core, d, pdma_st,
  1492. pdma_st->iov->iov_base,
  1493. iov_copy);
  1494. *copy_size -= iov_copy;
  1495. ps_hdr_copied += iov_copy;
  1496. pdma_st->iov_ofs += iov_copy;
  1497. if (pdma_st->iov_ofs == pdma_st->iov->iov_len) {
  1498. pdma_st->iov++;
  1499. pdma_st->iov_ofs = 0;
  1500. }
  1501. } while (ps_hdr_copied < pdma_st->ps_hdr_len);
  1502. pdma_st->is_first = false;
  1503. }
  1504. static void
  1505. igb_write_payload_frag_to_rx_buffers(IGBCore *core,
  1506. PCIDevice *d,
  1507. IGBPacketRxDMAState *pdma_st,
  1508. const char *data,
  1509. dma_addr_t data_len)
  1510. {
  1511. while (data_len > 0) {
  1512. assert(pdma_st->bastate.cur_idx < IGB_MAX_PS_BUFFERS);
  1513. uint32_t cur_buf_bytes_left =
  1514. pdma_st->rx_desc_packet_buf_size -
  1515. pdma_st->bastate.written[pdma_st->bastate.cur_idx];
  1516. uint32_t bytes_to_write = MIN(data_len, cur_buf_bytes_left);
  1517. trace_igb_rx_desc_buff_write(
  1518. pdma_st->bastate.cur_idx,
  1519. pdma_st->ba[pdma_st->bastate.cur_idx],
  1520. pdma_st->bastate.written[pdma_st->bastate.cur_idx],
  1521. data,
  1522. bytes_to_write);
  1523. pci_dma_write(d,
  1524. pdma_st->ba[pdma_st->bastate.cur_idx] +
  1525. pdma_st->bastate.written[pdma_st->bastate.cur_idx],
  1526. data, bytes_to_write);
  1527. pdma_st->bastate.written[pdma_st->bastate.cur_idx] += bytes_to_write;
  1528. data += bytes_to_write;
  1529. data_len -= bytes_to_write;
  1530. if (pdma_st->bastate.written[pdma_st->bastate.cur_idx] ==
  1531. pdma_st->rx_desc_packet_buf_size) {
  1532. pdma_st->bastate.cur_idx++;
  1533. }
  1534. }
  1535. }
  1536. static void
  1537. igb_write_payload_to_rx_buffers(IGBCore *core,
  1538. struct NetRxPkt *pkt,
  1539. PCIDevice *d,
  1540. IGBPacketRxDMAState *pdma_st,
  1541. size_t *copy_size)
  1542. {
  1543. static const uint32_t fcs_pad;
  1544. size_t iov_copy;
  1545. /* Copy packet payload */
  1546. while (*copy_size) {
  1547. iov_copy = MIN(*copy_size, pdma_st->iov->iov_len - pdma_st->iov_ofs);
  1548. igb_write_payload_frag_to_rx_buffers(core, d,
  1549. pdma_st,
  1550. pdma_st->iov->iov_base +
  1551. pdma_st->iov_ofs,
  1552. iov_copy);
  1553. *copy_size -= iov_copy;
  1554. pdma_st->iov_ofs += iov_copy;
  1555. if (pdma_st->iov_ofs == pdma_st->iov->iov_len) {
  1556. pdma_st->iov++;
  1557. pdma_st->iov_ofs = 0;
  1558. }
  1559. }
  1560. if (pdma_st->desc_offset + pdma_st->desc_size >= pdma_st->total_size) {
  1561. /* Simulate FCS checksum presence in the last descriptor */
  1562. igb_write_payload_frag_to_rx_buffers(core, d,
  1563. pdma_st,
  1564. (const char *) &fcs_pad,
  1565. e1000x_fcs_len(core->mac));
  1566. }
  1567. }
  1568. static void
  1569. igb_write_to_rx_buffers(IGBCore *core,
  1570. struct NetRxPkt *pkt,
  1571. PCIDevice *d,
  1572. IGBPacketRxDMAState *pdma_st)
  1573. {
  1574. size_t copy_size;
  1575. if (!(pdma_st->ba)[1] || (pdma_st->do_ps && !(pdma_st->ba[0]))) {
  1576. /* as per intel docs; skip descriptors with null buf addr */
  1577. trace_e1000e_rx_null_descriptor();
  1578. return;
  1579. }
  1580. if (pdma_st->desc_offset >= pdma_st->size) {
  1581. return;
  1582. }
  1583. pdma_st->desc_size = pdma_st->total_size - pdma_st->desc_offset;
  1584. igb_truncate_to_descriptor_size(pdma_st, &pdma_st->desc_size);
  1585. copy_size = pdma_st->size - pdma_st->desc_offset;
  1586. igb_truncate_to_descriptor_size(pdma_st, &copy_size);
  1587. /* For PS mode copy the packet header first */
  1588. if (pdma_st->do_ps) {
  1589. igb_write_header_to_rx_buffers(core, pkt, d, pdma_st, &copy_size);
  1590. } else {
  1591. pdma_st->bastate.cur_idx = 1;
  1592. }
  1593. igb_write_payload_to_rx_buffers(core, pkt, d, pdma_st, &copy_size);
  1594. }
  1595. static void
  1596. igb_write_packet_to_guest(IGBCore *core, struct NetRxPkt *pkt,
  1597. const E1000E_RxRing *rxr,
  1598. const E1000E_RSSInfo *rss_info,
  1599. uint16_t etqf, bool ts)
  1600. {
  1601. PCIDevice *d;
  1602. dma_addr_t base;
  1603. union e1000_rx_desc_union desc;
  1604. const E1000ERingInfo *rxi;
  1605. size_t rx_desc_len;
  1606. IGBPacketRxDMAState pdma_st = {0};
  1607. pdma_st.is_first = true;
  1608. pdma_st.size = net_rx_pkt_get_total_len(pkt);
  1609. pdma_st.total_size = pdma_st.size + e1000x_fcs_len(core->mac);
  1610. rxi = rxr->i;
  1611. rx_desc_len = core->rx_desc_len;
  1612. pdma_st.rx_desc_packet_buf_size = igb_rxbufsize(core, rxi);
  1613. pdma_st.rx_desc_header_buf_size = igb_rxhdrbufsize(core, rxi);
  1614. pdma_st.iov = net_rx_pkt_get_iovec(pkt);
  1615. d = pcie_sriov_get_vf_at_index(core->owner, rxi->idx % 8);
  1616. if (!d) {
  1617. d = core->owner;
  1618. }
  1619. pdma_st.do_ps = igb_do_ps(core, rxi, pkt, &pdma_st);
  1620. do {
  1621. memset(&pdma_st.bastate, 0, sizeof(IGBBAState));
  1622. bool is_last = false;
  1623. if (igb_ring_empty(core, rxi)) {
  1624. return;
  1625. }
  1626. base = igb_ring_head_descr(core, rxi);
  1627. pci_dma_read(d, base, &desc, rx_desc_len);
  1628. trace_e1000e_rx_descr(rxi->idx, base, rx_desc_len);
  1629. igb_read_rx_descr(core, &desc, &pdma_st, rxi);
  1630. igb_write_to_rx_buffers(core, pkt, d, &pdma_st);
  1631. pdma_st.desc_offset += pdma_st.desc_size;
  1632. if (pdma_st.desc_offset >= pdma_st.total_size) {
  1633. is_last = true;
  1634. }
  1635. igb_write_rx_descr(core, &desc,
  1636. is_last ? pkt : NULL,
  1637. rss_info,
  1638. etqf, ts,
  1639. &pdma_st,
  1640. rxi);
  1641. igb_pci_dma_write_rx_desc(core, d, base, &desc, rx_desc_len);
  1642. igb_ring_advance(core, rxi, rx_desc_len / E1000_MIN_RX_DESC_LEN);
  1643. } while (pdma_st.desc_offset < pdma_st.total_size);
  1644. igb_update_rx_stats(core, rxi, pdma_st.size, pdma_st.total_size);
  1645. }
  1646. static bool
  1647. igb_rx_strip_vlan(IGBCore *core, const E1000ERingInfo *rxi)
  1648. {
  1649. if (core->mac[MRQC] & 1) {
  1650. uint16_t pool = rxi->idx % IGB_NUM_VM_POOLS;
  1651. /* Sec 7.10.3.8: CTRL.VME is ignored, only VMOLR/RPLOLR is used */
  1652. return (net_rx_pkt_get_packet_type(core->rx_pkt) == ETH_PKT_MCAST) ?
  1653. core->mac[RPLOLR] & E1000_RPLOLR_STRVLAN :
  1654. core->mac[VMOLR0 + pool] & E1000_VMOLR_STRVLAN;
  1655. }
  1656. return e1000x_vlan_enabled(core->mac);
  1657. }
  1658. static inline void
  1659. igb_rx_fix_l4_csum(IGBCore *core, struct NetRxPkt *pkt)
  1660. {
  1661. struct virtio_net_hdr *vhdr = net_rx_pkt_get_vhdr(pkt);
  1662. if (vhdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
  1663. net_rx_pkt_fix_l4_csum(pkt);
  1664. }
  1665. }
  1666. ssize_t
  1667. igb_receive_iov(IGBCore *core, const struct iovec *iov, int iovcnt)
  1668. {
  1669. return igb_receive_internal(core, iov, iovcnt, core->has_vnet, NULL);
  1670. }
  1671. static ssize_t
  1672. igb_receive_internal(IGBCore *core, const struct iovec *iov, int iovcnt,
  1673. bool has_vnet, bool *external_tx)
  1674. {
  1675. uint16_t queues = 0;
  1676. uint32_t causes = 0;
  1677. uint32_t ecauses = 0;
  1678. union {
  1679. L2Header l2_header;
  1680. uint8_t octets[ETH_ZLEN];
  1681. } buf;
  1682. struct iovec min_iov;
  1683. size_t size, orig_size;
  1684. size_t iov_ofs = 0;
  1685. E1000E_RxRing rxr;
  1686. E1000E_RSSInfo rss_info;
  1687. uint16_t etqf;
  1688. bool ts;
  1689. size_t total_size;
  1690. int strip_vlan_index;
  1691. int i;
  1692. trace_e1000e_rx_receive_iov(iovcnt);
  1693. if (external_tx) {
  1694. *external_tx = true;
  1695. }
  1696. if (!e1000x_hw_rx_enabled(core->mac)) {
  1697. return -1;
  1698. }
  1699. /* Pull virtio header in */
  1700. if (has_vnet) {
  1701. net_rx_pkt_set_vhdr_iovec(core->rx_pkt, iov, iovcnt);
  1702. iov_ofs = sizeof(struct virtio_net_hdr);
  1703. } else {
  1704. net_rx_pkt_unset_vhdr(core->rx_pkt);
  1705. }
  1706. orig_size = iov_size(iov, iovcnt);
  1707. size = orig_size - iov_ofs;
  1708. /* Pad to minimum Ethernet frame length */
  1709. if (size < sizeof(buf)) {
  1710. iov_to_buf(iov, iovcnt, iov_ofs, &buf, size);
  1711. memset(&buf.octets[size], 0, sizeof(buf) - size);
  1712. e1000x_inc_reg_if_not_full(core->mac, RUC);
  1713. min_iov.iov_base = &buf;
  1714. min_iov.iov_len = size = sizeof(buf);
  1715. iovcnt = 1;
  1716. iov = &min_iov;
  1717. iov_ofs = 0;
  1718. } else {
  1719. iov_to_buf(iov, iovcnt, iov_ofs, &buf, sizeof(buf.l2_header));
  1720. }
  1721. net_rx_pkt_set_packet_type(core->rx_pkt,
  1722. get_eth_packet_type(&buf.l2_header.eth));
  1723. net_rx_pkt_set_protocols(core->rx_pkt, iov, iovcnt, iov_ofs);
  1724. queues = igb_receive_assign(core, iov, iovcnt, iov_ofs,
  1725. &buf.l2_header, size,
  1726. &rss_info, &etqf, &ts, external_tx);
  1727. if (!queues) {
  1728. trace_e1000e_rx_flt_dropped();
  1729. return orig_size;
  1730. }
  1731. for (i = 0; i < IGB_NUM_QUEUES; i++) {
  1732. if (!(queues & BIT(i)) ||
  1733. !(core->mac[RXDCTL0 + (i * 16)] & E1000_RXDCTL_QUEUE_ENABLE)) {
  1734. continue;
  1735. }
  1736. igb_rx_ring_init(core, &rxr, i);
  1737. if (!igb_rx_strip_vlan(core, rxr.i)) {
  1738. strip_vlan_index = -1;
  1739. } else if (core->mac[CTRL_EXT] & BIT(26)) {
  1740. strip_vlan_index = 1;
  1741. } else {
  1742. strip_vlan_index = 0;
  1743. }
  1744. net_rx_pkt_attach_iovec_ex(core->rx_pkt, iov, iovcnt, iov_ofs,
  1745. strip_vlan_index,
  1746. core->mac[VET] & 0xffff,
  1747. core->mac[VET] >> 16);
  1748. total_size = net_rx_pkt_get_total_len(core->rx_pkt) +
  1749. e1000x_fcs_len(core->mac);
  1750. if (!igb_has_rxbufs(core, rxr.i, total_size)) {
  1751. causes |= E1000_ICS_RXO;
  1752. trace_e1000e_rx_not_written_to_guest(rxr.i->idx);
  1753. continue;
  1754. }
  1755. causes |= E1000_ICR_RXDW;
  1756. igb_rx_fix_l4_csum(core, core->rx_pkt);
  1757. igb_write_packet_to_guest(core, core->rx_pkt, &rxr, &rss_info, etqf, ts);
  1758. /* Check if receive descriptor minimum threshold hit */
  1759. if (igb_rx_descr_threshold_hit(core, rxr.i)) {
  1760. causes |= E1000_ICS_RXDMT0;
  1761. }
  1762. ecauses |= igb_rx_wb_eic(core, rxr.i->idx);
  1763. trace_e1000e_rx_written_to_guest(rxr.i->idx);
  1764. }
  1765. trace_e1000e_rx_interrupt_set(causes);
  1766. igb_raise_interrupts(core, EICR, ecauses);
  1767. igb_raise_interrupts(core, ICR, causes);
  1768. return orig_size;
  1769. }
  1770. static inline bool
  1771. igb_have_autoneg(IGBCore *core)
  1772. {
  1773. return core->phy[MII_BMCR] & MII_BMCR_AUTOEN;
  1774. }
  1775. static void igb_update_flowctl_status(IGBCore *core)
  1776. {
  1777. if (igb_have_autoneg(core) && core->phy[MII_BMSR] & MII_BMSR_AN_COMP) {
  1778. trace_e1000e_link_autoneg_flowctl(true);
  1779. core->mac[CTRL] |= E1000_CTRL_TFCE | E1000_CTRL_RFCE;
  1780. } else {
  1781. trace_e1000e_link_autoneg_flowctl(false);
  1782. }
  1783. }
  1784. static inline void
  1785. igb_link_down(IGBCore *core)
  1786. {
  1787. e1000x_update_regs_on_link_down(core->mac, core->phy);
  1788. igb_update_flowctl_status(core);
  1789. }
  1790. static inline void
  1791. igb_set_phy_ctrl(IGBCore *core, uint16_t val)
  1792. {
  1793. /* bits 0-5 reserved; MII_BMCR_[ANRESTART,RESET] are self clearing */
  1794. core->phy[MII_BMCR] = val & ~(0x3f | MII_BMCR_RESET | MII_BMCR_ANRESTART);
  1795. if ((val & MII_BMCR_ANRESTART) && igb_have_autoneg(core)) {
  1796. e1000x_restart_autoneg(core->mac, core->phy, core->autoneg_timer);
  1797. }
  1798. }
  1799. void igb_core_set_link_status(IGBCore *core)
  1800. {
  1801. NetClientState *nc = qemu_get_queue(core->owner_nic);
  1802. uint32_t old_status = core->mac[STATUS];
  1803. trace_e1000e_link_status_changed(nc->link_down ? false : true);
  1804. if (nc->link_down) {
  1805. e1000x_update_regs_on_link_down(core->mac, core->phy);
  1806. } else {
  1807. if (igb_have_autoneg(core) &&
  1808. !(core->phy[MII_BMSR] & MII_BMSR_AN_COMP)) {
  1809. e1000x_restart_autoneg(core->mac, core->phy,
  1810. core->autoneg_timer);
  1811. } else {
  1812. e1000x_update_regs_on_link_up(core->mac, core->phy);
  1813. igb_start_recv(core);
  1814. }
  1815. }
  1816. if (core->mac[STATUS] != old_status) {
  1817. igb_raise_interrupts(core, ICR, E1000_ICR_LSC);
  1818. }
  1819. }
  1820. static void
  1821. igb_set_ctrl(IGBCore *core, int index, uint32_t val)
  1822. {
  1823. trace_e1000e_core_ctrl_write(index, val);
  1824. /* RST is self clearing */
  1825. core->mac[CTRL] = val & ~E1000_CTRL_RST;
  1826. core->mac[CTRL_DUP] = core->mac[CTRL];
  1827. trace_e1000e_link_set_params(
  1828. !!(val & E1000_CTRL_ASDE),
  1829. (val & E1000_CTRL_SPD_SEL) >> E1000_CTRL_SPD_SHIFT,
  1830. !!(val & E1000_CTRL_FRCSPD),
  1831. !!(val & E1000_CTRL_FRCDPX),
  1832. !!(val & E1000_CTRL_RFCE),
  1833. !!(val & E1000_CTRL_TFCE));
  1834. if (val & E1000_CTRL_RST) {
  1835. trace_e1000e_core_ctrl_sw_reset();
  1836. igb_reset(core, true);
  1837. }
  1838. if (val & E1000_CTRL_PHY_RST) {
  1839. trace_e1000e_core_ctrl_phy_reset();
  1840. core->mac[STATUS] |= E1000_STATUS_PHYRA;
  1841. }
  1842. }
  1843. static void
  1844. igb_set_rfctl(IGBCore *core, int index, uint32_t val)
  1845. {
  1846. trace_e1000e_rx_set_rfctl(val);
  1847. if (!(val & E1000_RFCTL_ISCSI_DIS)) {
  1848. trace_e1000e_wrn_iscsi_filtering_not_supported();
  1849. }
  1850. if (!(val & E1000_RFCTL_NFSW_DIS)) {
  1851. trace_e1000e_wrn_nfsw_filtering_not_supported();
  1852. }
  1853. if (!(val & E1000_RFCTL_NFSR_DIS)) {
  1854. trace_e1000e_wrn_nfsr_filtering_not_supported();
  1855. }
  1856. core->mac[RFCTL] = val;
  1857. }
  1858. static void
  1859. igb_calc_rxdesclen(IGBCore *core)
  1860. {
  1861. if (igb_rx_use_legacy_descriptor(core)) {
  1862. core->rx_desc_len = sizeof(struct e1000_rx_desc);
  1863. } else {
  1864. core->rx_desc_len = sizeof(union e1000_adv_rx_desc);
  1865. }
  1866. trace_e1000e_rx_desc_len(core->rx_desc_len);
  1867. }
  1868. static void
  1869. igb_set_rx_control(IGBCore *core, int index, uint32_t val)
  1870. {
  1871. core->mac[RCTL] = val;
  1872. trace_e1000e_rx_set_rctl(core->mac[RCTL]);
  1873. if (val & E1000_RCTL_DTYP_MASK) {
  1874. qemu_log_mask(LOG_GUEST_ERROR,
  1875. "igb: RCTL.DTYP must be zero for compatibility");
  1876. }
  1877. if (val & E1000_RCTL_EN) {
  1878. igb_calc_rxdesclen(core);
  1879. igb_start_recv(core);
  1880. }
  1881. }
  1882. static inline bool
  1883. igb_postpone_interrupt(IGBIntrDelayTimer *timer)
  1884. {
  1885. if (timer->running) {
  1886. trace_e1000e_irq_postponed_by_xitr(timer->delay_reg << 2);
  1887. return true;
  1888. }
  1889. if (timer->core->mac[timer->delay_reg] != 0) {
  1890. igb_intrmgr_rearm_timer(timer);
  1891. }
  1892. return false;
  1893. }
  1894. static inline bool
  1895. igb_eitr_should_postpone(IGBCore *core, int idx)
  1896. {
  1897. return igb_postpone_interrupt(&core->eitr[idx]);
  1898. }
  1899. static void igb_send_msix(IGBCore *core, uint32_t causes)
  1900. {
  1901. int vector;
  1902. for (vector = 0; vector < IGB_INTR_NUM; ++vector) {
  1903. if ((causes & BIT(vector)) && !igb_eitr_should_postpone(core, vector)) {
  1904. trace_e1000e_irq_msix_notify_vec(vector);
  1905. igb_msix_notify(core, vector);
  1906. }
  1907. }
  1908. }
  1909. static inline void
  1910. igb_fix_icr_asserted(IGBCore *core)
  1911. {
  1912. core->mac[ICR] &= ~E1000_ICR_ASSERTED;
  1913. if (core->mac[ICR]) {
  1914. core->mac[ICR] |= E1000_ICR_ASSERTED;
  1915. }
  1916. trace_e1000e_irq_fix_icr_asserted(core->mac[ICR]);
  1917. }
  1918. static void igb_raise_interrupts(IGBCore *core, size_t index, uint32_t causes)
  1919. {
  1920. uint32_t old_causes = core->mac[ICR] & core->mac[IMS];
  1921. uint32_t old_ecauses = core->mac[EICR] & core->mac[EIMS];
  1922. uint32_t raised_causes;
  1923. uint32_t raised_ecauses;
  1924. uint32_t int_alloc;
  1925. trace_e1000e_irq_set(index << 2,
  1926. core->mac[index], core->mac[index] | causes);
  1927. core->mac[index] |= causes;
  1928. if (core->mac[GPIE] & E1000_GPIE_MSIX_MODE) {
  1929. raised_causes = core->mac[ICR] & core->mac[IMS] & ~old_causes;
  1930. if (raised_causes & E1000_ICR_DRSTA) {
  1931. int_alloc = core->mac[IVAR_MISC] & 0xff;
  1932. if (int_alloc & E1000_IVAR_VALID) {
  1933. core->mac[EICR] |= BIT(int_alloc & 0x1f);
  1934. }
  1935. }
  1936. /* Check if other bits (excluding the TCP Timer) are enabled. */
  1937. if (raised_causes & ~E1000_ICR_DRSTA) {
  1938. int_alloc = (core->mac[IVAR_MISC] >> 8) & 0xff;
  1939. if (int_alloc & E1000_IVAR_VALID) {
  1940. core->mac[EICR] |= BIT(int_alloc & 0x1f);
  1941. }
  1942. }
  1943. raised_ecauses = core->mac[EICR] & core->mac[EIMS] & ~old_ecauses;
  1944. if (!raised_ecauses) {
  1945. return;
  1946. }
  1947. igb_send_msix(core, raised_ecauses);
  1948. } else {
  1949. igb_fix_icr_asserted(core);
  1950. raised_causes = core->mac[ICR] & core->mac[IMS] & ~old_causes;
  1951. if (!raised_causes) {
  1952. return;
  1953. }
  1954. core->mac[EICR] |= (raised_causes & E1000_ICR_DRSTA) | E1000_EICR_OTHER;
  1955. if (msix_enabled(core->owner)) {
  1956. trace_e1000e_irq_msix_notify_vec(0);
  1957. msix_notify(core->owner, 0);
  1958. } else if (msi_enabled(core->owner)) {
  1959. trace_e1000e_irq_msi_notify(raised_causes);
  1960. msi_notify(core->owner, 0);
  1961. } else {
  1962. igb_raise_legacy_irq(core);
  1963. }
  1964. }
  1965. }
  1966. static void igb_lower_interrupts(IGBCore *core, size_t index, uint32_t causes)
  1967. {
  1968. trace_e1000e_irq_clear(index << 2,
  1969. core->mac[index], core->mac[index] & ~causes);
  1970. core->mac[index] &= ~causes;
  1971. trace_e1000e_irq_pending_interrupts(core->mac[ICR] & core->mac[IMS],
  1972. core->mac[ICR], core->mac[IMS]);
  1973. if (!(core->mac[ICR] & core->mac[IMS]) &&
  1974. !(core->mac[GPIE] & E1000_GPIE_MSIX_MODE)) {
  1975. core->mac[EICR] &= ~E1000_EICR_OTHER;
  1976. if (!msix_enabled(core->owner) && !msi_enabled(core->owner)) {
  1977. igb_lower_legacy_irq(core);
  1978. }
  1979. }
  1980. }
  1981. static void igb_set_eics(IGBCore *core, int index, uint32_t val)
  1982. {
  1983. bool msix = !!(core->mac[GPIE] & E1000_GPIE_MSIX_MODE);
  1984. uint32_t mask = msix ? E1000_EICR_MSIX_MASK : E1000_EICR_LEGACY_MASK;
  1985. trace_igb_irq_write_eics(val, msix);
  1986. igb_raise_interrupts(core, EICR, val & mask);
  1987. }
  1988. static void igb_set_eims(IGBCore *core, int index, uint32_t val)
  1989. {
  1990. bool msix = !!(core->mac[GPIE] & E1000_GPIE_MSIX_MODE);
  1991. uint32_t mask = msix ? E1000_EICR_MSIX_MASK : E1000_EICR_LEGACY_MASK;
  1992. trace_igb_irq_write_eims(val, msix);
  1993. igb_raise_interrupts(core, EIMS, val & mask);
  1994. }
  1995. static void mailbox_interrupt_to_vf(IGBCore *core, uint16_t vfn)
  1996. {
  1997. uint32_t ent = core->mac[VTIVAR_MISC + vfn];
  1998. uint32_t causes;
  1999. if ((ent & E1000_IVAR_VALID)) {
  2000. causes = (ent & 0x3) << (22 - vfn * IGBVF_MSIX_VEC_NUM);
  2001. igb_raise_interrupts(core, EICR, causes);
  2002. }
  2003. }
  2004. static void mailbox_interrupt_to_pf(IGBCore *core)
  2005. {
  2006. igb_raise_interrupts(core, ICR, E1000_ICR_VMMB);
  2007. }
  2008. static void igb_set_pfmailbox(IGBCore *core, int index, uint32_t val)
  2009. {
  2010. uint16_t vfn = index - P2VMAILBOX0;
  2011. trace_igb_set_pfmailbox(vfn, val);
  2012. if (val & E1000_P2VMAILBOX_STS) {
  2013. core->mac[V2PMAILBOX0 + vfn] |= E1000_V2PMAILBOX_PFSTS;
  2014. mailbox_interrupt_to_vf(core, vfn);
  2015. }
  2016. if (val & E1000_P2VMAILBOX_ACK) {
  2017. core->mac[V2PMAILBOX0 + vfn] |= E1000_V2PMAILBOX_PFACK;
  2018. mailbox_interrupt_to_vf(core, vfn);
  2019. }
  2020. /* Buffer Taken by PF (can be set only if the VFU is cleared). */
  2021. if (val & E1000_P2VMAILBOX_PFU) {
  2022. if (!(core->mac[index] & E1000_P2VMAILBOX_VFU)) {
  2023. core->mac[index] |= E1000_P2VMAILBOX_PFU;
  2024. core->mac[V2PMAILBOX0 + vfn] |= E1000_V2PMAILBOX_PFU;
  2025. }
  2026. } else {
  2027. core->mac[index] &= ~E1000_P2VMAILBOX_PFU;
  2028. core->mac[V2PMAILBOX0 + vfn] &= ~E1000_V2PMAILBOX_PFU;
  2029. }
  2030. if (val & E1000_P2VMAILBOX_RVFU) {
  2031. core->mac[V2PMAILBOX0 + vfn] &= ~E1000_V2PMAILBOX_VFU;
  2032. core->mac[MBVFICR] &= ~((E1000_MBVFICR_VFACK_VF1 << vfn) |
  2033. (E1000_MBVFICR_VFREQ_VF1 << vfn));
  2034. }
  2035. }
  2036. static void igb_set_vfmailbox(IGBCore *core, int index, uint32_t val)
  2037. {
  2038. uint16_t vfn = index - V2PMAILBOX0;
  2039. trace_igb_set_vfmailbox(vfn, val);
  2040. if (val & E1000_V2PMAILBOX_REQ) {
  2041. core->mac[MBVFICR] |= E1000_MBVFICR_VFREQ_VF1 << vfn;
  2042. mailbox_interrupt_to_pf(core);
  2043. }
  2044. if (val & E1000_V2PMAILBOX_ACK) {
  2045. core->mac[MBVFICR] |= E1000_MBVFICR_VFACK_VF1 << vfn;
  2046. mailbox_interrupt_to_pf(core);
  2047. }
  2048. /* Buffer Taken by VF (can be set only if the PFU is cleared). */
  2049. if (val & E1000_V2PMAILBOX_VFU) {
  2050. if (!(core->mac[index] & E1000_V2PMAILBOX_PFU)) {
  2051. core->mac[index] |= E1000_V2PMAILBOX_VFU;
  2052. core->mac[P2VMAILBOX0 + vfn] |= E1000_P2VMAILBOX_VFU;
  2053. }
  2054. } else {
  2055. core->mac[index] &= ~E1000_V2PMAILBOX_VFU;
  2056. core->mac[P2VMAILBOX0 + vfn] &= ~E1000_P2VMAILBOX_VFU;
  2057. }
  2058. }
  2059. void igb_core_vf_reset(IGBCore *core, uint16_t vfn)
  2060. {
  2061. uint16_t qn0 = vfn;
  2062. uint16_t qn1 = vfn + IGB_NUM_VM_POOLS;
  2063. trace_igb_core_vf_reset(vfn);
  2064. /* disable Rx and Tx for the VF*/
  2065. core->mac[RXDCTL0 + (qn0 * 16)] &= ~E1000_RXDCTL_QUEUE_ENABLE;
  2066. core->mac[RXDCTL0 + (qn1 * 16)] &= ~E1000_RXDCTL_QUEUE_ENABLE;
  2067. core->mac[TXDCTL0 + (qn0 * 16)] &= ~E1000_TXDCTL_QUEUE_ENABLE;
  2068. core->mac[TXDCTL0 + (qn1 * 16)] &= ~E1000_TXDCTL_QUEUE_ENABLE;
  2069. core->mac[VFRE] &= ~BIT(vfn);
  2070. core->mac[VFTE] &= ~BIT(vfn);
  2071. /* indicate VF reset to PF */
  2072. core->mac[VFLRE] |= BIT(vfn);
  2073. /* VFLRE and mailbox use the same interrupt cause */
  2074. mailbox_interrupt_to_pf(core);
  2075. }
  2076. static void igb_w1c(IGBCore *core, int index, uint32_t val)
  2077. {
  2078. core->mac[index] &= ~val;
  2079. }
  2080. static void igb_set_eimc(IGBCore *core, int index, uint32_t val)
  2081. {
  2082. bool msix = !!(core->mac[GPIE] & E1000_GPIE_MSIX_MODE);
  2083. uint32_t mask = msix ? E1000_EICR_MSIX_MASK : E1000_EICR_LEGACY_MASK;
  2084. trace_igb_irq_write_eimc(val, msix);
  2085. /* Interrupts are disabled via a write to EIMC and reflected in EIMS. */
  2086. igb_lower_interrupts(core, EIMS, val & mask);
  2087. }
  2088. static void igb_set_eiac(IGBCore *core, int index, uint32_t val)
  2089. {
  2090. bool msix = !!(core->mac[GPIE] & E1000_GPIE_MSIX_MODE);
  2091. if (msix) {
  2092. trace_igb_irq_write_eiac(val);
  2093. /*
  2094. * TODO: When using IOV, the bits that correspond to MSI-X vectors
  2095. * that are assigned to a VF are read-only.
  2096. */
  2097. core->mac[EIAC] |= (val & E1000_EICR_MSIX_MASK);
  2098. }
  2099. }
  2100. static void igb_set_eiam(IGBCore *core, int index, uint32_t val)
  2101. {
  2102. bool msix = !!(core->mac[GPIE] & E1000_GPIE_MSIX_MODE);
  2103. /*
  2104. * TODO: When using IOV, the bits that correspond to MSI-X vectors that
  2105. * are assigned to a VF are read-only.
  2106. */
  2107. core->mac[EIAM] |=
  2108. ~(val & (msix ? E1000_EICR_MSIX_MASK : E1000_EICR_LEGACY_MASK));
  2109. trace_igb_irq_write_eiam(val, msix);
  2110. }
  2111. static void igb_set_eicr(IGBCore *core, int index, uint32_t val)
  2112. {
  2113. bool msix = !!(core->mac[GPIE] & E1000_GPIE_MSIX_MODE);
  2114. /*
  2115. * TODO: In IOV mode, only bit zero of this vector is available for the PF
  2116. * function.
  2117. */
  2118. uint32_t mask = msix ? E1000_EICR_MSIX_MASK : E1000_EICR_LEGACY_MASK;
  2119. trace_igb_irq_write_eicr(val, msix);
  2120. igb_lower_interrupts(core, EICR, val & mask);
  2121. }
  2122. static void igb_set_vtctrl(IGBCore *core, int index, uint32_t val)
  2123. {
  2124. uint16_t vfn;
  2125. if (val & E1000_CTRL_RST) {
  2126. vfn = (index - PVTCTRL0) / 0x40;
  2127. igb_core_vf_reset(core, vfn);
  2128. }
  2129. }
  2130. static void igb_set_vteics(IGBCore *core, int index, uint32_t val)
  2131. {
  2132. uint16_t vfn = (index - PVTEICS0) / 0x40;
  2133. core->mac[index] = val;
  2134. igb_set_eics(core, EICS, (val & 0x7) << (22 - vfn * IGBVF_MSIX_VEC_NUM));
  2135. }
  2136. static void igb_set_vteims(IGBCore *core, int index, uint32_t val)
  2137. {
  2138. uint16_t vfn = (index - PVTEIMS0) / 0x40;
  2139. core->mac[index] = val;
  2140. igb_set_eims(core, EIMS, (val & 0x7) << (22 - vfn * IGBVF_MSIX_VEC_NUM));
  2141. }
  2142. static void igb_set_vteimc(IGBCore *core, int index, uint32_t val)
  2143. {
  2144. uint16_t vfn = (index - PVTEIMC0) / 0x40;
  2145. core->mac[index] = val;
  2146. igb_set_eimc(core, EIMC, (val & 0x7) << (22 - vfn * IGBVF_MSIX_VEC_NUM));
  2147. }
  2148. static void igb_set_vteiac(IGBCore *core, int index, uint32_t val)
  2149. {
  2150. uint16_t vfn = (index - PVTEIAC0) / 0x40;
  2151. core->mac[index] = val;
  2152. igb_set_eiac(core, EIAC, (val & 0x7) << (22 - vfn * IGBVF_MSIX_VEC_NUM));
  2153. }
  2154. static void igb_set_vteiam(IGBCore *core, int index, uint32_t val)
  2155. {
  2156. uint16_t vfn = (index - PVTEIAM0) / 0x40;
  2157. core->mac[index] = val;
  2158. igb_set_eiam(core, EIAM, (val & 0x7) << (22 - vfn * IGBVF_MSIX_VEC_NUM));
  2159. }
  2160. static void igb_set_vteicr(IGBCore *core, int index, uint32_t val)
  2161. {
  2162. uint16_t vfn = (index - PVTEICR0) / 0x40;
  2163. core->mac[index] = val;
  2164. igb_set_eicr(core, EICR, (val & 0x7) << (22 - vfn * IGBVF_MSIX_VEC_NUM));
  2165. }
  2166. static void igb_set_vtivar(IGBCore *core, int index, uint32_t val)
  2167. {
  2168. uint16_t vfn = (index - VTIVAR);
  2169. uint16_t qn = vfn;
  2170. uint8_t ent;
  2171. int n;
  2172. core->mac[index] = val;
  2173. /* Get assigned vector associated with queue Rx#0. */
  2174. if ((val & E1000_IVAR_VALID)) {
  2175. n = igb_ivar_entry_rx(qn);
  2176. ent = E1000_IVAR_VALID | (24 - vfn * IGBVF_MSIX_VEC_NUM - (2 - (val & 0x7)));
  2177. core->mac[IVAR0 + n / 4] |= ent << 8 * (n % 4);
  2178. }
  2179. /* Get assigned vector associated with queue Tx#0 */
  2180. ent = val >> 8;
  2181. if ((ent & E1000_IVAR_VALID)) {
  2182. n = igb_ivar_entry_tx(qn);
  2183. ent = E1000_IVAR_VALID | (24 - vfn * IGBVF_MSIX_VEC_NUM - (2 - (ent & 0x7)));
  2184. core->mac[IVAR0 + n / 4] |= ent << 8 * (n % 4);
  2185. }
  2186. /*
  2187. * Ignoring assigned vectors associated with queues Rx#1 and Tx#1 for now.
  2188. */
  2189. }
  2190. static inline void
  2191. igb_autoneg_timer(void *opaque)
  2192. {
  2193. IGBCore *core = opaque;
  2194. if (!qemu_get_queue(core->owner_nic)->link_down) {
  2195. e1000x_update_regs_on_autoneg_done(core->mac, core->phy);
  2196. igb_start_recv(core);
  2197. igb_update_flowctl_status(core);
  2198. /* signal link status change to the guest */
  2199. igb_raise_interrupts(core, ICR, E1000_ICR_LSC);
  2200. }
  2201. }
  2202. static inline uint16_t
  2203. igb_get_reg_index_with_offset(const uint16_t *mac_reg_access, hwaddr addr)
  2204. {
  2205. uint16_t index = (addr & 0x1ffff) >> 2;
  2206. return index + (mac_reg_access[index] & 0xfffe);
  2207. }
  2208. static const char igb_phy_regcap[MAX_PHY_REG_ADDRESS + 1] = {
  2209. [MII_BMCR] = PHY_RW,
  2210. [MII_BMSR] = PHY_R,
  2211. [MII_PHYID1] = PHY_R,
  2212. [MII_PHYID2] = PHY_R,
  2213. [MII_ANAR] = PHY_RW,
  2214. [MII_ANLPAR] = PHY_R,
  2215. [MII_ANER] = PHY_R,
  2216. [MII_ANNP] = PHY_RW,
  2217. [MII_ANLPRNP] = PHY_R,
  2218. [MII_CTRL1000] = PHY_RW,
  2219. [MII_STAT1000] = PHY_R,
  2220. [MII_EXTSTAT] = PHY_R,
  2221. [IGP01E1000_PHY_PORT_CONFIG] = PHY_RW,
  2222. [IGP01E1000_PHY_PORT_STATUS] = PHY_R,
  2223. [IGP01E1000_PHY_PORT_CTRL] = PHY_RW,
  2224. [IGP01E1000_PHY_LINK_HEALTH] = PHY_R,
  2225. [IGP02E1000_PHY_POWER_MGMT] = PHY_RW,
  2226. [IGP01E1000_PHY_PAGE_SELECT] = PHY_W
  2227. };
  2228. static void
  2229. igb_phy_reg_write(IGBCore *core, uint32_t addr, uint16_t data)
  2230. {
  2231. assert(addr <= MAX_PHY_REG_ADDRESS);
  2232. if (addr == MII_BMCR) {
  2233. igb_set_phy_ctrl(core, data);
  2234. } else {
  2235. core->phy[addr] = data;
  2236. }
  2237. }
  2238. static void
  2239. igb_set_mdic(IGBCore *core, int index, uint32_t val)
  2240. {
  2241. uint32_t data = val & E1000_MDIC_DATA_MASK;
  2242. uint32_t addr = ((val & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT);
  2243. if ((val & E1000_MDIC_PHY_MASK) >> E1000_MDIC_PHY_SHIFT != 1) { /* phy # */
  2244. val = core->mac[MDIC] | E1000_MDIC_ERROR;
  2245. } else if (val & E1000_MDIC_OP_READ) {
  2246. if (!(igb_phy_regcap[addr] & PHY_R)) {
  2247. trace_igb_core_mdic_read_unhandled(addr);
  2248. val |= E1000_MDIC_ERROR;
  2249. } else {
  2250. val = (val ^ data) | core->phy[addr];
  2251. trace_igb_core_mdic_read(addr, val);
  2252. }
  2253. } else if (val & E1000_MDIC_OP_WRITE) {
  2254. if (!(igb_phy_regcap[addr] & PHY_W)) {
  2255. trace_igb_core_mdic_write_unhandled(addr);
  2256. val |= E1000_MDIC_ERROR;
  2257. } else {
  2258. trace_igb_core_mdic_write(addr, data);
  2259. igb_phy_reg_write(core, addr, data);
  2260. }
  2261. }
  2262. core->mac[MDIC] = val | E1000_MDIC_READY;
  2263. if (val & E1000_MDIC_INT_EN) {
  2264. igb_raise_interrupts(core, ICR, E1000_ICR_MDAC);
  2265. }
  2266. }
  2267. static void
  2268. igb_set_rdt(IGBCore *core, int index, uint32_t val)
  2269. {
  2270. core->mac[index] = val & 0xffff;
  2271. trace_e1000e_rx_set_rdt(igb_mq_queue_idx(RDT0, index), val);
  2272. igb_start_recv(core);
  2273. }
  2274. static void
  2275. igb_set_status(IGBCore *core, int index, uint32_t val)
  2276. {
  2277. if ((val & E1000_STATUS_PHYRA) == 0) {
  2278. core->mac[index] &= ~E1000_STATUS_PHYRA;
  2279. }
  2280. }
  2281. static void
  2282. igb_set_ctrlext(IGBCore *core, int index, uint32_t val)
  2283. {
  2284. trace_igb_link_set_ext_params(!!(val & E1000_CTRL_EXT_ASDCHK),
  2285. !!(val & E1000_CTRL_EXT_SPD_BYPS),
  2286. !!(val & E1000_CTRL_EXT_PFRSTD));
  2287. /* Zero self-clearing bits */
  2288. val &= ~(E1000_CTRL_EXT_ASDCHK | E1000_CTRL_EXT_EE_RST);
  2289. core->mac[CTRL_EXT] = val;
  2290. if (core->mac[CTRL_EXT] & E1000_CTRL_EXT_PFRSTD) {
  2291. for (int vfn = 0; vfn < IGB_MAX_VF_FUNCTIONS; vfn++) {
  2292. core->mac[V2PMAILBOX0 + vfn] &= ~E1000_V2PMAILBOX_RSTI;
  2293. core->mac[V2PMAILBOX0 + vfn] |= E1000_V2PMAILBOX_RSTD;
  2294. }
  2295. }
  2296. }
  2297. static void
  2298. igb_set_pbaclr(IGBCore *core, int index, uint32_t val)
  2299. {
  2300. int i;
  2301. core->mac[PBACLR] = val & E1000_PBACLR_VALID_MASK;
  2302. if (!msix_enabled(core->owner)) {
  2303. return;
  2304. }
  2305. for (i = 0; i < IGB_INTR_NUM; i++) {
  2306. if (core->mac[PBACLR] & BIT(i)) {
  2307. msix_clr_pending(core->owner, i);
  2308. }
  2309. }
  2310. }
  2311. static void
  2312. igb_set_fcrth(IGBCore *core, int index, uint32_t val)
  2313. {
  2314. core->mac[FCRTH] = val & 0xFFF8;
  2315. }
  2316. static void
  2317. igb_set_fcrtl(IGBCore *core, int index, uint32_t val)
  2318. {
  2319. core->mac[FCRTL] = val & 0x8000FFF8;
  2320. }
  2321. #define IGB_LOW_BITS_SET_FUNC(num) \
  2322. static void \
  2323. igb_set_##num##bit(IGBCore *core, int index, uint32_t val) \
  2324. { \
  2325. core->mac[index] = val & (BIT(num) - 1); \
  2326. }
  2327. IGB_LOW_BITS_SET_FUNC(4)
  2328. IGB_LOW_BITS_SET_FUNC(13)
  2329. IGB_LOW_BITS_SET_FUNC(16)
  2330. static void
  2331. igb_set_dlen(IGBCore *core, int index, uint32_t val)
  2332. {
  2333. core->mac[index] = val & 0xffff0;
  2334. }
  2335. static void
  2336. igb_set_dbal(IGBCore *core, int index, uint32_t val)
  2337. {
  2338. core->mac[index] = val & E1000_XDBAL_MASK;
  2339. }
  2340. static void
  2341. igb_set_tdt(IGBCore *core, int index, uint32_t val)
  2342. {
  2343. IGB_TxRing txr;
  2344. int qn = igb_mq_queue_idx(TDT0, index);
  2345. core->mac[index] = val & 0xffff;
  2346. igb_tx_ring_init(core, &txr, qn);
  2347. igb_start_xmit(core, &txr);
  2348. }
  2349. static void
  2350. igb_set_ics(IGBCore *core, int index, uint32_t val)
  2351. {
  2352. trace_e1000e_irq_write_ics(val);
  2353. igb_raise_interrupts(core, ICR, val);
  2354. }
  2355. static void
  2356. igb_set_imc(IGBCore *core, int index, uint32_t val)
  2357. {
  2358. trace_e1000e_irq_ims_clear_set_imc(val);
  2359. igb_lower_interrupts(core, IMS, val);
  2360. }
  2361. static void
  2362. igb_set_ims(IGBCore *core, int index, uint32_t val)
  2363. {
  2364. igb_raise_interrupts(core, IMS, val & 0x77D4FBFD);
  2365. }
  2366. static void igb_nsicr(IGBCore *core)
  2367. {
  2368. /*
  2369. * If GPIE.NSICR = 0, then the clear of IMS will occur only if at
  2370. * least one bit is set in the IMS and there is a true interrupt as
  2371. * reflected in ICR.INTA.
  2372. */
  2373. if ((core->mac[GPIE] & E1000_GPIE_NSICR) ||
  2374. (core->mac[IMS] && (core->mac[ICR] & E1000_ICR_INT_ASSERTED))) {
  2375. igb_lower_interrupts(core, IMS, core->mac[IAM]);
  2376. }
  2377. }
  2378. static void igb_set_icr(IGBCore *core, int index, uint32_t val)
  2379. {
  2380. igb_nsicr(core);
  2381. igb_lower_interrupts(core, ICR, val);
  2382. }
  2383. static uint32_t
  2384. igb_mac_readreg(IGBCore *core, int index)
  2385. {
  2386. return core->mac[index];
  2387. }
  2388. static uint32_t
  2389. igb_mac_ics_read(IGBCore *core, int index)
  2390. {
  2391. trace_e1000e_irq_read_ics(core->mac[ICS]);
  2392. return core->mac[ICS];
  2393. }
  2394. static uint32_t
  2395. igb_mac_ims_read(IGBCore *core, int index)
  2396. {
  2397. trace_e1000e_irq_read_ims(core->mac[IMS]);
  2398. return core->mac[IMS];
  2399. }
  2400. static uint32_t
  2401. igb_mac_swsm_read(IGBCore *core, int index)
  2402. {
  2403. uint32_t val = core->mac[SWSM];
  2404. core->mac[SWSM] = val | E1000_SWSM_SMBI;
  2405. return val;
  2406. }
  2407. static uint32_t
  2408. igb_mac_eitr_read(IGBCore *core, int index)
  2409. {
  2410. return core->eitr_guest_value[index - EITR0];
  2411. }
  2412. static uint32_t igb_mac_vfmailbox_read(IGBCore *core, int index)
  2413. {
  2414. uint32_t val = core->mac[index];
  2415. core->mac[index] &= ~(E1000_V2PMAILBOX_PFSTS | E1000_V2PMAILBOX_PFACK |
  2416. E1000_V2PMAILBOX_RSTD);
  2417. return val;
  2418. }
  2419. static uint32_t
  2420. igb_mac_icr_read(IGBCore *core, int index)
  2421. {
  2422. uint32_t ret = core->mac[ICR];
  2423. if (core->mac[GPIE] & E1000_GPIE_NSICR) {
  2424. trace_igb_irq_icr_clear_gpie_nsicr();
  2425. igb_lower_interrupts(core, ICR, 0xffffffff);
  2426. } else if (core->mac[IMS] == 0) {
  2427. trace_e1000e_irq_icr_clear_zero_ims();
  2428. igb_lower_interrupts(core, ICR, 0xffffffff);
  2429. } else if (core->mac[ICR] & E1000_ICR_INT_ASSERTED) {
  2430. igb_lower_interrupts(core, ICR, 0xffffffff);
  2431. } else if (!msix_enabled(core->owner)) {
  2432. trace_e1000e_irq_icr_clear_nonmsix_icr_read();
  2433. igb_lower_interrupts(core, ICR, 0xffffffff);
  2434. }
  2435. igb_nsicr(core);
  2436. return ret;
  2437. }
  2438. static uint32_t
  2439. igb_mac_read_clr4(IGBCore *core, int index)
  2440. {
  2441. uint32_t ret = core->mac[index];
  2442. core->mac[index] = 0;
  2443. return ret;
  2444. }
  2445. static uint32_t
  2446. igb_mac_read_clr8(IGBCore *core, int index)
  2447. {
  2448. uint32_t ret = core->mac[index];
  2449. core->mac[index] = 0;
  2450. core->mac[index - 1] = 0;
  2451. return ret;
  2452. }
  2453. static uint32_t
  2454. igb_get_ctrl(IGBCore *core, int index)
  2455. {
  2456. uint32_t val = core->mac[CTRL];
  2457. trace_e1000e_link_read_params(
  2458. !!(val & E1000_CTRL_ASDE),
  2459. (val & E1000_CTRL_SPD_SEL) >> E1000_CTRL_SPD_SHIFT,
  2460. !!(val & E1000_CTRL_FRCSPD),
  2461. !!(val & E1000_CTRL_FRCDPX),
  2462. !!(val & E1000_CTRL_RFCE),
  2463. !!(val & E1000_CTRL_TFCE));
  2464. return val;
  2465. }
  2466. static uint32_t igb_get_status(IGBCore *core, int index)
  2467. {
  2468. uint32_t res = core->mac[STATUS];
  2469. uint16_t num_vfs = pcie_sriov_num_vfs(core->owner);
  2470. if (core->mac[CTRL] & E1000_CTRL_FRCDPX) {
  2471. res |= (core->mac[CTRL] & E1000_CTRL_FD) ? E1000_STATUS_FD : 0;
  2472. } else {
  2473. res |= E1000_STATUS_FD;
  2474. }
  2475. if ((core->mac[CTRL] & E1000_CTRL_FRCSPD) ||
  2476. (core->mac[CTRL_EXT] & E1000_CTRL_EXT_SPD_BYPS)) {
  2477. switch (core->mac[CTRL] & E1000_CTRL_SPD_SEL) {
  2478. case E1000_CTRL_SPD_10:
  2479. res |= E1000_STATUS_SPEED_10;
  2480. break;
  2481. case E1000_CTRL_SPD_100:
  2482. res |= E1000_STATUS_SPEED_100;
  2483. break;
  2484. case E1000_CTRL_SPD_1000:
  2485. default:
  2486. res |= E1000_STATUS_SPEED_1000;
  2487. break;
  2488. }
  2489. } else {
  2490. res |= E1000_STATUS_SPEED_1000;
  2491. }
  2492. if (num_vfs) {
  2493. res |= num_vfs << E1000_STATUS_NUM_VFS_SHIFT;
  2494. res |= E1000_STATUS_IOV_MODE;
  2495. }
  2496. if (!(core->mac[CTRL] & E1000_CTRL_GIO_MASTER_DISABLE)) {
  2497. res |= E1000_STATUS_GIO_MASTER_ENABLE;
  2498. }
  2499. return res;
  2500. }
  2501. static void
  2502. igb_mac_writereg(IGBCore *core, int index, uint32_t val)
  2503. {
  2504. core->mac[index] = val;
  2505. }
  2506. static void
  2507. igb_mac_setmacaddr(IGBCore *core, int index, uint32_t val)
  2508. {
  2509. uint32_t macaddr[2];
  2510. core->mac[index] = val;
  2511. macaddr[0] = cpu_to_le32(core->mac[RA]);
  2512. macaddr[1] = cpu_to_le32(core->mac[RA + 1]);
  2513. qemu_format_nic_info_str(qemu_get_queue(core->owner_nic),
  2514. (uint8_t *) macaddr);
  2515. trace_e1000e_mac_set_sw(MAC_ARG(macaddr));
  2516. }
  2517. static void
  2518. igb_set_eecd(IGBCore *core, int index, uint32_t val)
  2519. {
  2520. static const uint32_t ro_bits = E1000_EECD_PRES |
  2521. E1000_EECD_AUTO_RD |
  2522. E1000_EECD_SIZE_EX_MASK;
  2523. core->mac[EECD] = (core->mac[EECD] & ro_bits) | (val & ~ro_bits);
  2524. }
  2525. static void
  2526. igb_set_eerd(IGBCore *core, int index, uint32_t val)
  2527. {
  2528. uint32_t addr = (val >> E1000_EERW_ADDR_SHIFT) & E1000_EERW_ADDR_MASK;
  2529. uint32_t flags = 0;
  2530. uint32_t data = 0;
  2531. if ((addr < IGB_EEPROM_SIZE) && (val & E1000_EERW_START)) {
  2532. data = core->eeprom[addr];
  2533. flags = E1000_EERW_DONE;
  2534. }
  2535. core->mac[EERD] = flags |
  2536. (addr << E1000_EERW_ADDR_SHIFT) |
  2537. (data << E1000_EERW_DATA_SHIFT);
  2538. }
  2539. static void
  2540. igb_set_eitr(IGBCore *core, int index, uint32_t val)
  2541. {
  2542. uint32_t eitr_num = index - EITR0;
  2543. trace_igb_irq_eitr_set(eitr_num, val);
  2544. core->eitr_guest_value[eitr_num] = val & ~E1000_EITR_CNT_IGNR;
  2545. core->mac[index] = val & 0x7FFE;
  2546. }
  2547. static void
  2548. igb_update_rx_offloads(IGBCore *core)
  2549. {
  2550. int cso_state = igb_rx_l4_cso_enabled(core);
  2551. trace_e1000e_rx_set_cso(cso_state);
  2552. if (core->has_vnet) {
  2553. qemu_set_offload(qemu_get_queue(core->owner_nic)->peer,
  2554. cso_state, 0, 0, 0, 0, 0, 0);
  2555. }
  2556. }
  2557. static void
  2558. igb_set_rxcsum(IGBCore *core, int index, uint32_t val)
  2559. {
  2560. core->mac[RXCSUM] = val;
  2561. igb_update_rx_offloads(core);
  2562. }
  2563. static void
  2564. igb_set_gcr(IGBCore *core, int index, uint32_t val)
  2565. {
  2566. uint32_t ro_bits = core->mac[GCR] & E1000_GCR_RO_BITS;
  2567. core->mac[GCR] = (val & ~E1000_GCR_RO_BITS) | ro_bits;
  2568. }
  2569. static uint32_t igb_get_systiml(IGBCore *core, int index)
  2570. {
  2571. e1000x_timestamp(core->mac, core->timadj, SYSTIML, SYSTIMH);
  2572. return core->mac[SYSTIML];
  2573. }
  2574. static uint32_t igb_get_rxsatrh(IGBCore *core, int index)
  2575. {
  2576. core->mac[TSYNCRXCTL] &= ~E1000_TSYNCRXCTL_VALID;
  2577. return core->mac[RXSATRH];
  2578. }
  2579. static uint32_t igb_get_txstmph(IGBCore *core, int index)
  2580. {
  2581. core->mac[TSYNCTXCTL] &= ~E1000_TSYNCTXCTL_VALID;
  2582. return core->mac[TXSTMPH];
  2583. }
  2584. static void igb_set_timinca(IGBCore *core, int index, uint32_t val)
  2585. {
  2586. e1000x_set_timinca(core->mac, &core->timadj, val);
  2587. }
  2588. static void igb_set_timadjh(IGBCore *core, int index, uint32_t val)
  2589. {
  2590. core->mac[TIMADJH] = val;
  2591. core->timadj += core->mac[TIMADJL] | ((int64_t)core->mac[TIMADJH] << 32);
  2592. }
  2593. #define igb_getreg(x) [x] = igb_mac_readreg
  2594. typedef uint32_t (*readops)(IGBCore *, int);
  2595. static const readops igb_macreg_readops[] = {
  2596. igb_getreg(WUFC),
  2597. igb_getreg(MANC),
  2598. igb_getreg(TOTL),
  2599. igb_getreg(RDT0),
  2600. igb_getreg(RDT1),
  2601. igb_getreg(RDT2),
  2602. igb_getreg(RDT3),
  2603. igb_getreg(RDT4),
  2604. igb_getreg(RDT5),
  2605. igb_getreg(RDT6),
  2606. igb_getreg(RDT7),
  2607. igb_getreg(RDT8),
  2608. igb_getreg(RDT9),
  2609. igb_getreg(RDT10),
  2610. igb_getreg(RDT11),
  2611. igb_getreg(RDT12),
  2612. igb_getreg(RDT13),
  2613. igb_getreg(RDT14),
  2614. igb_getreg(RDT15),
  2615. igb_getreg(RDBAH0),
  2616. igb_getreg(RDBAH1),
  2617. igb_getreg(RDBAH2),
  2618. igb_getreg(RDBAH3),
  2619. igb_getreg(RDBAH4),
  2620. igb_getreg(RDBAH5),
  2621. igb_getreg(RDBAH6),
  2622. igb_getreg(RDBAH7),
  2623. igb_getreg(RDBAH8),
  2624. igb_getreg(RDBAH9),
  2625. igb_getreg(RDBAH10),
  2626. igb_getreg(RDBAH11),
  2627. igb_getreg(RDBAH12),
  2628. igb_getreg(RDBAH13),
  2629. igb_getreg(RDBAH14),
  2630. igb_getreg(RDBAH15),
  2631. igb_getreg(TDBAL0),
  2632. igb_getreg(TDBAL1),
  2633. igb_getreg(TDBAL2),
  2634. igb_getreg(TDBAL3),
  2635. igb_getreg(TDBAL4),
  2636. igb_getreg(TDBAL5),
  2637. igb_getreg(TDBAL6),
  2638. igb_getreg(TDBAL7),
  2639. igb_getreg(TDBAL8),
  2640. igb_getreg(TDBAL9),
  2641. igb_getreg(TDBAL10),
  2642. igb_getreg(TDBAL11),
  2643. igb_getreg(TDBAL12),
  2644. igb_getreg(TDBAL13),
  2645. igb_getreg(TDBAL14),
  2646. igb_getreg(TDBAL15),
  2647. igb_getreg(RDLEN0),
  2648. igb_getreg(RDLEN1),
  2649. igb_getreg(RDLEN2),
  2650. igb_getreg(RDLEN3),
  2651. igb_getreg(RDLEN4),
  2652. igb_getreg(RDLEN5),
  2653. igb_getreg(RDLEN6),
  2654. igb_getreg(RDLEN7),
  2655. igb_getreg(RDLEN8),
  2656. igb_getreg(RDLEN9),
  2657. igb_getreg(RDLEN10),
  2658. igb_getreg(RDLEN11),
  2659. igb_getreg(RDLEN12),
  2660. igb_getreg(RDLEN13),
  2661. igb_getreg(RDLEN14),
  2662. igb_getreg(RDLEN15),
  2663. igb_getreg(SRRCTL0),
  2664. igb_getreg(SRRCTL1),
  2665. igb_getreg(SRRCTL2),
  2666. igb_getreg(SRRCTL3),
  2667. igb_getreg(SRRCTL4),
  2668. igb_getreg(SRRCTL5),
  2669. igb_getreg(SRRCTL6),
  2670. igb_getreg(SRRCTL7),
  2671. igb_getreg(SRRCTL8),
  2672. igb_getreg(SRRCTL9),
  2673. igb_getreg(SRRCTL10),
  2674. igb_getreg(SRRCTL11),
  2675. igb_getreg(SRRCTL12),
  2676. igb_getreg(SRRCTL13),
  2677. igb_getreg(SRRCTL14),
  2678. igb_getreg(SRRCTL15),
  2679. igb_getreg(LATECOL),
  2680. igb_getreg(XONTXC),
  2681. igb_getreg(TDFH),
  2682. igb_getreg(TDFT),
  2683. igb_getreg(TDFHS),
  2684. igb_getreg(TDFTS),
  2685. igb_getreg(TDFPC),
  2686. igb_getreg(WUS),
  2687. igb_getreg(RDFH),
  2688. igb_getreg(RDFT),
  2689. igb_getreg(RDFHS),
  2690. igb_getreg(RDFTS),
  2691. igb_getreg(RDFPC),
  2692. igb_getreg(GORCL),
  2693. igb_getreg(MGTPRC),
  2694. igb_getreg(EERD),
  2695. igb_getreg(EIAC),
  2696. igb_getreg(MANC2H),
  2697. igb_getreg(RXCSUM),
  2698. igb_getreg(GSCL_3),
  2699. igb_getreg(GSCN_2),
  2700. igb_getreg(FCAH),
  2701. igb_getreg(FCRTH),
  2702. igb_getreg(FLOP),
  2703. igb_getreg(RXSTMPH),
  2704. igb_getreg(TXSTMPL),
  2705. igb_getreg(TIMADJL),
  2706. igb_getreg(RDH0),
  2707. igb_getreg(RDH1),
  2708. igb_getreg(RDH2),
  2709. igb_getreg(RDH3),
  2710. igb_getreg(RDH4),
  2711. igb_getreg(RDH5),
  2712. igb_getreg(RDH6),
  2713. igb_getreg(RDH7),
  2714. igb_getreg(RDH8),
  2715. igb_getreg(RDH9),
  2716. igb_getreg(RDH10),
  2717. igb_getreg(RDH11),
  2718. igb_getreg(RDH12),
  2719. igb_getreg(RDH13),
  2720. igb_getreg(RDH14),
  2721. igb_getreg(RDH15),
  2722. igb_getreg(TDT0),
  2723. igb_getreg(TDT1),
  2724. igb_getreg(TDT2),
  2725. igb_getreg(TDT3),
  2726. igb_getreg(TDT4),
  2727. igb_getreg(TDT5),
  2728. igb_getreg(TDT6),
  2729. igb_getreg(TDT7),
  2730. igb_getreg(TDT8),
  2731. igb_getreg(TDT9),
  2732. igb_getreg(TDT10),
  2733. igb_getreg(TDT11),
  2734. igb_getreg(TDT12),
  2735. igb_getreg(TDT13),
  2736. igb_getreg(TDT14),
  2737. igb_getreg(TDT15),
  2738. igb_getreg(TNCRS),
  2739. igb_getreg(RJC),
  2740. igb_getreg(IAM),
  2741. igb_getreg(GSCL_2),
  2742. igb_getreg(TIPG),
  2743. igb_getreg(FLMNGCTL),
  2744. igb_getreg(FLMNGCNT),
  2745. igb_getreg(TSYNCTXCTL),
  2746. igb_getreg(EEMNGDATA),
  2747. igb_getreg(CTRL_EXT),
  2748. igb_getreg(SYSTIMH),
  2749. igb_getreg(EEMNGCTL),
  2750. igb_getreg(FLMNGDATA),
  2751. igb_getreg(TSYNCRXCTL),
  2752. igb_getreg(LEDCTL),
  2753. igb_getreg(TCTL),
  2754. igb_getreg(TCTL_EXT),
  2755. igb_getreg(DTXCTL),
  2756. igb_getreg(RXPBS),
  2757. igb_getreg(TDH0),
  2758. igb_getreg(TDH1),
  2759. igb_getreg(TDH2),
  2760. igb_getreg(TDH3),
  2761. igb_getreg(TDH4),
  2762. igb_getreg(TDH5),
  2763. igb_getreg(TDH6),
  2764. igb_getreg(TDH7),
  2765. igb_getreg(TDH8),
  2766. igb_getreg(TDH9),
  2767. igb_getreg(TDH10),
  2768. igb_getreg(TDH11),
  2769. igb_getreg(TDH12),
  2770. igb_getreg(TDH13),
  2771. igb_getreg(TDH14),
  2772. igb_getreg(TDH15),
  2773. igb_getreg(ECOL),
  2774. igb_getreg(DC),
  2775. igb_getreg(RLEC),
  2776. igb_getreg(XOFFTXC),
  2777. igb_getreg(RFC),
  2778. igb_getreg(RNBC),
  2779. igb_getreg(MGTPTC),
  2780. igb_getreg(TIMINCA),
  2781. igb_getreg(FACTPS),
  2782. igb_getreg(GSCL_1),
  2783. igb_getreg(GSCN_0),
  2784. igb_getreg(PBACLR),
  2785. igb_getreg(FCTTV),
  2786. igb_getreg(RXSATRL),
  2787. igb_getreg(TORL),
  2788. igb_getreg(TDLEN0),
  2789. igb_getreg(TDLEN1),
  2790. igb_getreg(TDLEN2),
  2791. igb_getreg(TDLEN3),
  2792. igb_getreg(TDLEN4),
  2793. igb_getreg(TDLEN5),
  2794. igb_getreg(TDLEN6),
  2795. igb_getreg(TDLEN7),
  2796. igb_getreg(TDLEN8),
  2797. igb_getreg(TDLEN9),
  2798. igb_getreg(TDLEN10),
  2799. igb_getreg(TDLEN11),
  2800. igb_getreg(TDLEN12),
  2801. igb_getreg(TDLEN13),
  2802. igb_getreg(TDLEN14),
  2803. igb_getreg(TDLEN15),
  2804. igb_getreg(MCC),
  2805. igb_getreg(WUC),
  2806. igb_getreg(EECD),
  2807. igb_getreg(FCRTV),
  2808. igb_getreg(TXDCTL0),
  2809. igb_getreg(TXDCTL1),
  2810. igb_getreg(TXDCTL2),
  2811. igb_getreg(TXDCTL3),
  2812. igb_getreg(TXDCTL4),
  2813. igb_getreg(TXDCTL5),
  2814. igb_getreg(TXDCTL6),
  2815. igb_getreg(TXDCTL7),
  2816. igb_getreg(TXDCTL8),
  2817. igb_getreg(TXDCTL9),
  2818. igb_getreg(TXDCTL10),
  2819. igb_getreg(TXDCTL11),
  2820. igb_getreg(TXDCTL12),
  2821. igb_getreg(TXDCTL13),
  2822. igb_getreg(TXDCTL14),
  2823. igb_getreg(TXDCTL15),
  2824. igb_getreg(TXCTL0),
  2825. igb_getreg(TXCTL1),
  2826. igb_getreg(TXCTL2),
  2827. igb_getreg(TXCTL3),
  2828. igb_getreg(TXCTL4),
  2829. igb_getreg(TXCTL5),
  2830. igb_getreg(TXCTL6),
  2831. igb_getreg(TXCTL7),
  2832. igb_getreg(TXCTL8),
  2833. igb_getreg(TXCTL9),
  2834. igb_getreg(TXCTL10),
  2835. igb_getreg(TXCTL11),
  2836. igb_getreg(TXCTL12),
  2837. igb_getreg(TXCTL13),
  2838. igb_getreg(TXCTL14),
  2839. igb_getreg(TXCTL15),
  2840. igb_getreg(TDWBAL0),
  2841. igb_getreg(TDWBAL1),
  2842. igb_getreg(TDWBAL2),
  2843. igb_getreg(TDWBAL3),
  2844. igb_getreg(TDWBAL4),
  2845. igb_getreg(TDWBAL5),
  2846. igb_getreg(TDWBAL6),
  2847. igb_getreg(TDWBAL7),
  2848. igb_getreg(TDWBAL8),
  2849. igb_getreg(TDWBAL9),
  2850. igb_getreg(TDWBAL10),
  2851. igb_getreg(TDWBAL11),
  2852. igb_getreg(TDWBAL12),
  2853. igb_getreg(TDWBAL13),
  2854. igb_getreg(TDWBAL14),
  2855. igb_getreg(TDWBAL15),
  2856. igb_getreg(TDWBAH0),
  2857. igb_getreg(TDWBAH1),
  2858. igb_getreg(TDWBAH2),
  2859. igb_getreg(TDWBAH3),
  2860. igb_getreg(TDWBAH4),
  2861. igb_getreg(TDWBAH5),
  2862. igb_getreg(TDWBAH6),
  2863. igb_getreg(TDWBAH7),
  2864. igb_getreg(TDWBAH8),
  2865. igb_getreg(TDWBAH9),
  2866. igb_getreg(TDWBAH10),
  2867. igb_getreg(TDWBAH11),
  2868. igb_getreg(TDWBAH12),
  2869. igb_getreg(TDWBAH13),
  2870. igb_getreg(TDWBAH14),
  2871. igb_getreg(TDWBAH15),
  2872. igb_getreg(PVTCTRL0),
  2873. igb_getreg(PVTCTRL1),
  2874. igb_getreg(PVTCTRL2),
  2875. igb_getreg(PVTCTRL3),
  2876. igb_getreg(PVTCTRL4),
  2877. igb_getreg(PVTCTRL5),
  2878. igb_getreg(PVTCTRL6),
  2879. igb_getreg(PVTCTRL7),
  2880. igb_getreg(PVTEIMS0),
  2881. igb_getreg(PVTEIMS1),
  2882. igb_getreg(PVTEIMS2),
  2883. igb_getreg(PVTEIMS3),
  2884. igb_getreg(PVTEIMS4),
  2885. igb_getreg(PVTEIMS5),
  2886. igb_getreg(PVTEIMS6),
  2887. igb_getreg(PVTEIMS7),
  2888. igb_getreg(PVTEIAC0),
  2889. igb_getreg(PVTEIAC1),
  2890. igb_getreg(PVTEIAC2),
  2891. igb_getreg(PVTEIAC3),
  2892. igb_getreg(PVTEIAC4),
  2893. igb_getreg(PVTEIAC5),
  2894. igb_getreg(PVTEIAC6),
  2895. igb_getreg(PVTEIAC7),
  2896. igb_getreg(PVTEIAM0),
  2897. igb_getreg(PVTEIAM1),
  2898. igb_getreg(PVTEIAM2),
  2899. igb_getreg(PVTEIAM3),
  2900. igb_getreg(PVTEIAM4),
  2901. igb_getreg(PVTEIAM5),
  2902. igb_getreg(PVTEIAM6),
  2903. igb_getreg(PVTEIAM7),
  2904. igb_getreg(PVFGPRC0),
  2905. igb_getreg(PVFGPRC1),
  2906. igb_getreg(PVFGPRC2),
  2907. igb_getreg(PVFGPRC3),
  2908. igb_getreg(PVFGPRC4),
  2909. igb_getreg(PVFGPRC5),
  2910. igb_getreg(PVFGPRC6),
  2911. igb_getreg(PVFGPRC7),
  2912. igb_getreg(PVFGPTC0),
  2913. igb_getreg(PVFGPTC1),
  2914. igb_getreg(PVFGPTC2),
  2915. igb_getreg(PVFGPTC3),
  2916. igb_getreg(PVFGPTC4),
  2917. igb_getreg(PVFGPTC5),
  2918. igb_getreg(PVFGPTC6),
  2919. igb_getreg(PVFGPTC7),
  2920. igb_getreg(PVFGORC0),
  2921. igb_getreg(PVFGORC1),
  2922. igb_getreg(PVFGORC2),
  2923. igb_getreg(PVFGORC3),
  2924. igb_getreg(PVFGORC4),
  2925. igb_getreg(PVFGORC5),
  2926. igb_getreg(PVFGORC6),
  2927. igb_getreg(PVFGORC7),
  2928. igb_getreg(PVFGOTC0),
  2929. igb_getreg(PVFGOTC1),
  2930. igb_getreg(PVFGOTC2),
  2931. igb_getreg(PVFGOTC3),
  2932. igb_getreg(PVFGOTC4),
  2933. igb_getreg(PVFGOTC5),
  2934. igb_getreg(PVFGOTC6),
  2935. igb_getreg(PVFGOTC7),
  2936. igb_getreg(PVFMPRC0),
  2937. igb_getreg(PVFMPRC1),
  2938. igb_getreg(PVFMPRC2),
  2939. igb_getreg(PVFMPRC3),
  2940. igb_getreg(PVFMPRC4),
  2941. igb_getreg(PVFMPRC5),
  2942. igb_getreg(PVFMPRC6),
  2943. igb_getreg(PVFMPRC7),
  2944. igb_getreg(PVFGPRLBC0),
  2945. igb_getreg(PVFGPRLBC1),
  2946. igb_getreg(PVFGPRLBC2),
  2947. igb_getreg(PVFGPRLBC3),
  2948. igb_getreg(PVFGPRLBC4),
  2949. igb_getreg(PVFGPRLBC5),
  2950. igb_getreg(PVFGPRLBC6),
  2951. igb_getreg(PVFGPRLBC7),
  2952. igb_getreg(PVFGPTLBC0),
  2953. igb_getreg(PVFGPTLBC1),
  2954. igb_getreg(PVFGPTLBC2),
  2955. igb_getreg(PVFGPTLBC3),
  2956. igb_getreg(PVFGPTLBC4),
  2957. igb_getreg(PVFGPTLBC5),
  2958. igb_getreg(PVFGPTLBC6),
  2959. igb_getreg(PVFGPTLBC7),
  2960. igb_getreg(PVFGORLBC0),
  2961. igb_getreg(PVFGORLBC1),
  2962. igb_getreg(PVFGORLBC2),
  2963. igb_getreg(PVFGORLBC3),
  2964. igb_getreg(PVFGORLBC4),
  2965. igb_getreg(PVFGORLBC5),
  2966. igb_getreg(PVFGORLBC6),
  2967. igb_getreg(PVFGORLBC7),
  2968. igb_getreg(PVFGOTLBC0),
  2969. igb_getreg(PVFGOTLBC1),
  2970. igb_getreg(PVFGOTLBC2),
  2971. igb_getreg(PVFGOTLBC3),
  2972. igb_getreg(PVFGOTLBC4),
  2973. igb_getreg(PVFGOTLBC5),
  2974. igb_getreg(PVFGOTLBC6),
  2975. igb_getreg(PVFGOTLBC7),
  2976. igb_getreg(RCTL),
  2977. igb_getreg(MDIC),
  2978. igb_getreg(FCRUC),
  2979. igb_getreg(VET),
  2980. igb_getreg(RDBAL0),
  2981. igb_getreg(RDBAL1),
  2982. igb_getreg(RDBAL2),
  2983. igb_getreg(RDBAL3),
  2984. igb_getreg(RDBAL4),
  2985. igb_getreg(RDBAL5),
  2986. igb_getreg(RDBAL6),
  2987. igb_getreg(RDBAL7),
  2988. igb_getreg(RDBAL8),
  2989. igb_getreg(RDBAL9),
  2990. igb_getreg(RDBAL10),
  2991. igb_getreg(RDBAL11),
  2992. igb_getreg(RDBAL12),
  2993. igb_getreg(RDBAL13),
  2994. igb_getreg(RDBAL14),
  2995. igb_getreg(RDBAL15),
  2996. igb_getreg(TDBAH0),
  2997. igb_getreg(TDBAH1),
  2998. igb_getreg(TDBAH2),
  2999. igb_getreg(TDBAH3),
  3000. igb_getreg(TDBAH4),
  3001. igb_getreg(TDBAH5),
  3002. igb_getreg(TDBAH6),
  3003. igb_getreg(TDBAH7),
  3004. igb_getreg(TDBAH8),
  3005. igb_getreg(TDBAH9),
  3006. igb_getreg(TDBAH10),
  3007. igb_getreg(TDBAH11),
  3008. igb_getreg(TDBAH12),
  3009. igb_getreg(TDBAH13),
  3010. igb_getreg(TDBAH14),
  3011. igb_getreg(TDBAH15),
  3012. igb_getreg(SCC),
  3013. igb_getreg(COLC),
  3014. igb_getreg(XOFFRXC),
  3015. igb_getreg(IPAV),
  3016. igb_getreg(GOTCL),
  3017. igb_getreg(MGTPDC),
  3018. igb_getreg(GCR),
  3019. igb_getreg(MFVAL),
  3020. igb_getreg(FUNCTAG),
  3021. igb_getreg(GSCL_4),
  3022. igb_getreg(GSCN_3),
  3023. igb_getreg(MRQC),
  3024. igb_getreg(FCT),
  3025. igb_getreg(FLA),
  3026. igb_getreg(RXDCTL0),
  3027. igb_getreg(RXDCTL1),
  3028. igb_getreg(RXDCTL2),
  3029. igb_getreg(RXDCTL3),
  3030. igb_getreg(RXDCTL4),
  3031. igb_getreg(RXDCTL5),
  3032. igb_getreg(RXDCTL6),
  3033. igb_getreg(RXDCTL7),
  3034. igb_getreg(RXDCTL8),
  3035. igb_getreg(RXDCTL9),
  3036. igb_getreg(RXDCTL10),
  3037. igb_getreg(RXDCTL11),
  3038. igb_getreg(RXDCTL12),
  3039. igb_getreg(RXDCTL13),
  3040. igb_getreg(RXDCTL14),
  3041. igb_getreg(RXDCTL15),
  3042. igb_getreg(RXSTMPL),
  3043. igb_getreg(TIMADJH),
  3044. igb_getreg(FCRTL),
  3045. igb_getreg(XONRXC),
  3046. igb_getreg(RFCTL),
  3047. igb_getreg(GSCN_1),
  3048. igb_getreg(FCAL),
  3049. igb_getreg(GPIE),
  3050. igb_getreg(TXPBS),
  3051. igb_getreg(RLPML),
  3052. [TOTH] = igb_mac_read_clr8,
  3053. [GOTCH] = igb_mac_read_clr8,
  3054. [PRC64] = igb_mac_read_clr4,
  3055. [PRC255] = igb_mac_read_clr4,
  3056. [PRC1023] = igb_mac_read_clr4,
  3057. [PTC64] = igb_mac_read_clr4,
  3058. [PTC255] = igb_mac_read_clr4,
  3059. [PTC1023] = igb_mac_read_clr4,
  3060. [GPRC] = igb_mac_read_clr4,
  3061. [TPT] = igb_mac_read_clr4,
  3062. [RUC] = igb_mac_read_clr4,
  3063. [BPRC] = igb_mac_read_clr4,
  3064. [MPTC] = igb_mac_read_clr4,
  3065. [IAC] = igb_mac_read_clr4,
  3066. [ICR] = igb_mac_icr_read,
  3067. [STATUS] = igb_get_status,
  3068. [ICS] = igb_mac_ics_read,
  3069. /*
  3070. * 8.8.10: Reading the IMC register returns the value of the IMS register.
  3071. */
  3072. [IMC] = igb_mac_ims_read,
  3073. [TORH] = igb_mac_read_clr8,
  3074. [GORCH] = igb_mac_read_clr8,
  3075. [PRC127] = igb_mac_read_clr4,
  3076. [PRC511] = igb_mac_read_clr4,
  3077. [PRC1522] = igb_mac_read_clr4,
  3078. [PTC127] = igb_mac_read_clr4,
  3079. [PTC511] = igb_mac_read_clr4,
  3080. [PTC1522] = igb_mac_read_clr4,
  3081. [GPTC] = igb_mac_read_clr4,
  3082. [TPR] = igb_mac_read_clr4,
  3083. [ROC] = igb_mac_read_clr4,
  3084. [MPRC] = igb_mac_read_clr4,
  3085. [BPTC] = igb_mac_read_clr4,
  3086. [TSCTC] = igb_mac_read_clr4,
  3087. [CTRL] = igb_get_ctrl,
  3088. [SWSM] = igb_mac_swsm_read,
  3089. [IMS] = igb_mac_ims_read,
  3090. [SYSTIML] = igb_get_systiml,
  3091. [RXSATRH] = igb_get_rxsatrh,
  3092. [TXSTMPH] = igb_get_txstmph,
  3093. [CRCERRS ... MPC] = igb_mac_readreg,
  3094. [IP6AT ... IP6AT + 3] = igb_mac_readreg,
  3095. [IP4AT ... IP4AT + 6] = igb_mac_readreg,
  3096. [RA ... RA + 31] = igb_mac_readreg,
  3097. [RA2 ... RA2 + 31] = igb_mac_readreg,
  3098. [WUPM ... WUPM + 31] = igb_mac_readreg,
  3099. [MTA ... MTA + E1000_MC_TBL_SIZE - 1] = igb_mac_readreg,
  3100. [VFTA ... VFTA + E1000_VLAN_FILTER_TBL_SIZE - 1] = igb_mac_readreg,
  3101. [FFMT ... FFMT + 254] = igb_mac_readreg,
  3102. [MDEF ... MDEF + 7] = igb_mac_readreg,
  3103. [FTFT ... FTFT + 254] = igb_mac_readreg,
  3104. [RETA ... RETA + 31] = igb_mac_readreg,
  3105. [RSSRK ... RSSRK + 9] = igb_mac_readreg,
  3106. [MAVTV0 ... MAVTV3] = igb_mac_readreg,
  3107. [EITR0 ... EITR0 + IGB_INTR_NUM - 1] = igb_mac_eitr_read,
  3108. [PVTEICR0] = igb_mac_read_clr4,
  3109. [PVTEICR1] = igb_mac_read_clr4,
  3110. [PVTEICR2] = igb_mac_read_clr4,
  3111. [PVTEICR3] = igb_mac_read_clr4,
  3112. [PVTEICR4] = igb_mac_read_clr4,
  3113. [PVTEICR5] = igb_mac_read_clr4,
  3114. [PVTEICR6] = igb_mac_read_clr4,
  3115. [PVTEICR7] = igb_mac_read_clr4,
  3116. /* IGB specific: */
  3117. [FWSM] = igb_mac_readreg,
  3118. [SW_FW_SYNC] = igb_mac_readreg,
  3119. [HTCBDPC] = igb_mac_read_clr4,
  3120. [EICR] = igb_mac_read_clr4,
  3121. [EIMS] = igb_mac_readreg,
  3122. [EIAM] = igb_mac_readreg,
  3123. [IVAR0 ... IVAR0 + 7] = igb_mac_readreg,
  3124. igb_getreg(IVAR_MISC),
  3125. igb_getreg(TSYNCRXCFG),
  3126. [ETQF0 ... ETQF0 + 7] = igb_mac_readreg,
  3127. igb_getreg(VT_CTL),
  3128. [P2VMAILBOX0 ... P2VMAILBOX7] = igb_mac_readreg,
  3129. [V2PMAILBOX0 ... V2PMAILBOX7] = igb_mac_vfmailbox_read,
  3130. igb_getreg(MBVFICR),
  3131. [VMBMEM0 ... VMBMEM0 + 127] = igb_mac_readreg,
  3132. igb_getreg(MBVFIMR),
  3133. igb_getreg(VFLRE),
  3134. igb_getreg(VFRE),
  3135. igb_getreg(VFTE),
  3136. igb_getreg(QDE),
  3137. igb_getreg(DTXSWC),
  3138. igb_getreg(RPLOLR),
  3139. [VLVF0 ... VLVF0 + E1000_VLVF_ARRAY_SIZE - 1] = igb_mac_readreg,
  3140. [VMVIR0 ... VMVIR7] = igb_mac_readreg,
  3141. [VMOLR0 ... VMOLR7] = igb_mac_readreg,
  3142. [WVBR] = igb_mac_read_clr4,
  3143. [RQDPC0] = igb_mac_read_clr4,
  3144. [RQDPC1] = igb_mac_read_clr4,
  3145. [RQDPC2] = igb_mac_read_clr4,
  3146. [RQDPC3] = igb_mac_read_clr4,
  3147. [RQDPC4] = igb_mac_read_clr4,
  3148. [RQDPC5] = igb_mac_read_clr4,
  3149. [RQDPC6] = igb_mac_read_clr4,
  3150. [RQDPC7] = igb_mac_read_clr4,
  3151. [RQDPC8] = igb_mac_read_clr4,
  3152. [RQDPC9] = igb_mac_read_clr4,
  3153. [RQDPC10] = igb_mac_read_clr4,
  3154. [RQDPC11] = igb_mac_read_clr4,
  3155. [RQDPC12] = igb_mac_read_clr4,
  3156. [RQDPC13] = igb_mac_read_clr4,
  3157. [RQDPC14] = igb_mac_read_clr4,
  3158. [RQDPC15] = igb_mac_read_clr4,
  3159. [VTIVAR ... VTIVAR + 7] = igb_mac_readreg,
  3160. [VTIVAR_MISC ... VTIVAR_MISC + 7] = igb_mac_readreg,
  3161. };
  3162. enum { IGB_NREADOPS = ARRAY_SIZE(igb_macreg_readops) };
  3163. #define igb_putreg(x) [x] = igb_mac_writereg
  3164. typedef void (*writeops)(IGBCore *, int, uint32_t);
  3165. static const writeops igb_macreg_writeops[] = {
  3166. igb_putreg(SWSM),
  3167. igb_putreg(WUFC),
  3168. igb_putreg(RDBAH0),
  3169. igb_putreg(RDBAH1),
  3170. igb_putreg(RDBAH2),
  3171. igb_putreg(RDBAH3),
  3172. igb_putreg(RDBAH4),
  3173. igb_putreg(RDBAH5),
  3174. igb_putreg(RDBAH6),
  3175. igb_putreg(RDBAH7),
  3176. igb_putreg(RDBAH8),
  3177. igb_putreg(RDBAH9),
  3178. igb_putreg(RDBAH10),
  3179. igb_putreg(RDBAH11),
  3180. igb_putreg(RDBAH12),
  3181. igb_putreg(RDBAH13),
  3182. igb_putreg(RDBAH14),
  3183. igb_putreg(RDBAH15),
  3184. igb_putreg(SRRCTL0),
  3185. igb_putreg(SRRCTL1),
  3186. igb_putreg(SRRCTL2),
  3187. igb_putreg(SRRCTL3),
  3188. igb_putreg(SRRCTL4),
  3189. igb_putreg(SRRCTL5),
  3190. igb_putreg(SRRCTL6),
  3191. igb_putreg(SRRCTL7),
  3192. igb_putreg(SRRCTL8),
  3193. igb_putreg(SRRCTL9),
  3194. igb_putreg(SRRCTL10),
  3195. igb_putreg(SRRCTL11),
  3196. igb_putreg(SRRCTL12),
  3197. igb_putreg(SRRCTL13),
  3198. igb_putreg(SRRCTL14),
  3199. igb_putreg(SRRCTL15),
  3200. igb_putreg(RXDCTL0),
  3201. igb_putreg(RXDCTL1),
  3202. igb_putreg(RXDCTL2),
  3203. igb_putreg(RXDCTL3),
  3204. igb_putreg(RXDCTL4),
  3205. igb_putreg(RXDCTL5),
  3206. igb_putreg(RXDCTL6),
  3207. igb_putreg(RXDCTL7),
  3208. igb_putreg(RXDCTL8),
  3209. igb_putreg(RXDCTL9),
  3210. igb_putreg(RXDCTL10),
  3211. igb_putreg(RXDCTL11),
  3212. igb_putreg(RXDCTL12),
  3213. igb_putreg(RXDCTL13),
  3214. igb_putreg(RXDCTL14),
  3215. igb_putreg(RXDCTL15),
  3216. igb_putreg(LEDCTL),
  3217. igb_putreg(TCTL),
  3218. igb_putreg(TCTL_EXT),
  3219. igb_putreg(DTXCTL),
  3220. igb_putreg(RXPBS),
  3221. igb_putreg(RQDPC0),
  3222. igb_putreg(FCAL),
  3223. igb_putreg(FCRUC),
  3224. igb_putreg(WUC),
  3225. igb_putreg(WUS),
  3226. igb_putreg(IPAV),
  3227. igb_putreg(TDBAH0),
  3228. igb_putreg(TDBAH1),
  3229. igb_putreg(TDBAH2),
  3230. igb_putreg(TDBAH3),
  3231. igb_putreg(TDBAH4),
  3232. igb_putreg(TDBAH5),
  3233. igb_putreg(TDBAH6),
  3234. igb_putreg(TDBAH7),
  3235. igb_putreg(TDBAH8),
  3236. igb_putreg(TDBAH9),
  3237. igb_putreg(TDBAH10),
  3238. igb_putreg(TDBAH11),
  3239. igb_putreg(TDBAH12),
  3240. igb_putreg(TDBAH13),
  3241. igb_putreg(TDBAH14),
  3242. igb_putreg(TDBAH15),
  3243. igb_putreg(IAM),
  3244. igb_putreg(MANC),
  3245. igb_putreg(MANC2H),
  3246. igb_putreg(MFVAL),
  3247. igb_putreg(FACTPS),
  3248. igb_putreg(FUNCTAG),
  3249. igb_putreg(GSCL_1),
  3250. igb_putreg(GSCL_2),
  3251. igb_putreg(GSCL_3),
  3252. igb_putreg(GSCL_4),
  3253. igb_putreg(GSCN_0),
  3254. igb_putreg(GSCN_1),
  3255. igb_putreg(GSCN_2),
  3256. igb_putreg(GSCN_3),
  3257. igb_putreg(MRQC),
  3258. igb_putreg(FLOP),
  3259. igb_putreg(FLA),
  3260. igb_putreg(TXDCTL0),
  3261. igb_putreg(TXDCTL1),
  3262. igb_putreg(TXDCTL2),
  3263. igb_putreg(TXDCTL3),
  3264. igb_putreg(TXDCTL4),
  3265. igb_putreg(TXDCTL5),
  3266. igb_putreg(TXDCTL6),
  3267. igb_putreg(TXDCTL7),
  3268. igb_putreg(TXDCTL8),
  3269. igb_putreg(TXDCTL9),
  3270. igb_putreg(TXDCTL10),
  3271. igb_putreg(TXDCTL11),
  3272. igb_putreg(TXDCTL12),
  3273. igb_putreg(TXDCTL13),
  3274. igb_putreg(TXDCTL14),
  3275. igb_putreg(TXDCTL15),
  3276. igb_putreg(TXCTL0),
  3277. igb_putreg(TXCTL1),
  3278. igb_putreg(TXCTL2),
  3279. igb_putreg(TXCTL3),
  3280. igb_putreg(TXCTL4),
  3281. igb_putreg(TXCTL5),
  3282. igb_putreg(TXCTL6),
  3283. igb_putreg(TXCTL7),
  3284. igb_putreg(TXCTL8),
  3285. igb_putreg(TXCTL9),
  3286. igb_putreg(TXCTL10),
  3287. igb_putreg(TXCTL11),
  3288. igb_putreg(TXCTL12),
  3289. igb_putreg(TXCTL13),
  3290. igb_putreg(TXCTL14),
  3291. igb_putreg(TXCTL15),
  3292. igb_putreg(TDWBAL0),
  3293. igb_putreg(TDWBAL1),
  3294. igb_putreg(TDWBAL2),
  3295. igb_putreg(TDWBAL3),
  3296. igb_putreg(TDWBAL4),
  3297. igb_putreg(TDWBAL5),
  3298. igb_putreg(TDWBAL6),
  3299. igb_putreg(TDWBAL7),
  3300. igb_putreg(TDWBAL8),
  3301. igb_putreg(TDWBAL9),
  3302. igb_putreg(TDWBAL10),
  3303. igb_putreg(TDWBAL11),
  3304. igb_putreg(TDWBAL12),
  3305. igb_putreg(TDWBAL13),
  3306. igb_putreg(TDWBAL14),
  3307. igb_putreg(TDWBAL15),
  3308. igb_putreg(TDWBAH0),
  3309. igb_putreg(TDWBAH1),
  3310. igb_putreg(TDWBAH2),
  3311. igb_putreg(TDWBAH3),
  3312. igb_putreg(TDWBAH4),
  3313. igb_putreg(TDWBAH5),
  3314. igb_putreg(TDWBAH6),
  3315. igb_putreg(TDWBAH7),
  3316. igb_putreg(TDWBAH8),
  3317. igb_putreg(TDWBAH9),
  3318. igb_putreg(TDWBAH10),
  3319. igb_putreg(TDWBAH11),
  3320. igb_putreg(TDWBAH12),
  3321. igb_putreg(TDWBAH13),
  3322. igb_putreg(TDWBAH14),
  3323. igb_putreg(TDWBAH15),
  3324. igb_putreg(TIPG),
  3325. igb_putreg(RXSTMPH),
  3326. igb_putreg(RXSTMPL),
  3327. igb_putreg(RXSATRL),
  3328. igb_putreg(RXSATRH),
  3329. igb_putreg(TXSTMPL),
  3330. igb_putreg(TXSTMPH),
  3331. igb_putreg(SYSTIML),
  3332. igb_putreg(SYSTIMH),
  3333. igb_putreg(TIMADJL),
  3334. igb_putreg(TSYNCRXCTL),
  3335. igb_putreg(TSYNCTXCTL),
  3336. igb_putreg(EEMNGCTL),
  3337. igb_putreg(GPIE),
  3338. igb_putreg(TXPBS),
  3339. igb_putreg(RLPML),
  3340. igb_putreg(VET),
  3341. [TDH0] = igb_set_16bit,
  3342. [TDH1] = igb_set_16bit,
  3343. [TDH2] = igb_set_16bit,
  3344. [TDH3] = igb_set_16bit,
  3345. [TDH4] = igb_set_16bit,
  3346. [TDH5] = igb_set_16bit,
  3347. [TDH6] = igb_set_16bit,
  3348. [TDH7] = igb_set_16bit,
  3349. [TDH8] = igb_set_16bit,
  3350. [TDH9] = igb_set_16bit,
  3351. [TDH10] = igb_set_16bit,
  3352. [TDH11] = igb_set_16bit,
  3353. [TDH12] = igb_set_16bit,
  3354. [TDH13] = igb_set_16bit,
  3355. [TDH14] = igb_set_16bit,
  3356. [TDH15] = igb_set_16bit,
  3357. [TDT0] = igb_set_tdt,
  3358. [TDT1] = igb_set_tdt,
  3359. [TDT2] = igb_set_tdt,
  3360. [TDT3] = igb_set_tdt,
  3361. [TDT4] = igb_set_tdt,
  3362. [TDT5] = igb_set_tdt,
  3363. [TDT6] = igb_set_tdt,
  3364. [TDT7] = igb_set_tdt,
  3365. [TDT8] = igb_set_tdt,
  3366. [TDT9] = igb_set_tdt,
  3367. [TDT10] = igb_set_tdt,
  3368. [TDT11] = igb_set_tdt,
  3369. [TDT12] = igb_set_tdt,
  3370. [TDT13] = igb_set_tdt,
  3371. [TDT14] = igb_set_tdt,
  3372. [TDT15] = igb_set_tdt,
  3373. [MDIC] = igb_set_mdic,
  3374. [ICS] = igb_set_ics,
  3375. [RDH0] = igb_set_16bit,
  3376. [RDH1] = igb_set_16bit,
  3377. [RDH2] = igb_set_16bit,
  3378. [RDH3] = igb_set_16bit,
  3379. [RDH4] = igb_set_16bit,
  3380. [RDH5] = igb_set_16bit,
  3381. [RDH6] = igb_set_16bit,
  3382. [RDH7] = igb_set_16bit,
  3383. [RDH8] = igb_set_16bit,
  3384. [RDH9] = igb_set_16bit,
  3385. [RDH10] = igb_set_16bit,
  3386. [RDH11] = igb_set_16bit,
  3387. [RDH12] = igb_set_16bit,
  3388. [RDH13] = igb_set_16bit,
  3389. [RDH14] = igb_set_16bit,
  3390. [RDH15] = igb_set_16bit,
  3391. [RDT0] = igb_set_rdt,
  3392. [RDT1] = igb_set_rdt,
  3393. [RDT2] = igb_set_rdt,
  3394. [RDT3] = igb_set_rdt,
  3395. [RDT4] = igb_set_rdt,
  3396. [RDT5] = igb_set_rdt,
  3397. [RDT6] = igb_set_rdt,
  3398. [RDT7] = igb_set_rdt,
  3399. [RDT8] = igb_set_rdt,
  3400. [RDT9] = igb_set_rdt,
  3401. [RDT10] = igb_set_rdt,
  3402. [RDT11] = igb_set_rdt,
  3403. [RDT12] = igb_set_rdt,
  3404. [RDT13] = igb_set_rdt,
  3405. [RDT14] = igb_set_rdt,
  3406. [RDT15] = igb_set_rdt,
  3407. [IMC] = igb_set_imc,
  3408. [IMS] = igb_set_ims,
  3409. [ICR] = igb_set_icr,
  3410. [EECD] = igb_set_eecd,
  3411. [RCTL] = igb_set_rx_control,
  3412. [CTRL] = igb_set_ctrl,
  3413. [EERD] = igb_set_eerd,
  3414. [TDFH] = igb_set_13bit,
  3415. [TDFT] = igb_set_13bit,
  3416. [TDFHS] = igb_set_13bit,
  3417. [TDFTS] = igb_set_13bit,
  3418. [TDFPC] = igb_set_13bit,
  3419. [RDFH] = igb_set_13bit,
  3420. [RDFT] = igb_set_13bit,
  3421. [RDFHS] = igb_set_13bit,
  3422. [RDFTS] = igb_set_13bit,
  3423. [RDFPC] = igb_set_13bit,
  3424. [GCR] = igb_set_gcr,
  3425. [RXCSUM] = igb_set_rxcsum,
  3426. [TDLEN0] = igb_set_dlen,
  3427. [TDLEN1] = igb_set_dlen,
  3428. [TDLEN2] = igb_set_dlen,
  3429. [TDLEN3] = igb_set_dlen,
  3430. [TDLEN4] = igb_set_dlen,
  3431. [TDLEN5] = igb_set_dlen,
  3432. [TDLEN6] = igb_set_dlen,
  3433. [TDLEN7] = igb_set_dlen,
  3434. [TDLEN8] = igb_set_dlen,
  3435. [TDLEN9] = igb_set_dlen,
  3436. [TDLEN10] = igb_set_dlen,
  3437. [TDLEN11] = igb_set_dlen,
  3438. [TDLEN12] = igb_set_dlen,
  3439. [TDLEN13] = igb_set_dlen,
  3440. [TDLEN14] = igb_set_dlen,
  3441. [TDLEN15] = igb_set_dlen,
  3442. [RDLEN0] = igb_set_dlen,
  3443. [RDLEN1] = igb_set_dlen,
  3444. [RDLEN2] = igb_set_dlen,
  3445. [RDLEN3] = igb_set_dlen,
  3446. [RDLEN4] = igb_set_dlen,
  3447. [RDLEN5] = igb_set_dlen,
  3448. [RDLEN6] = igb_set_dlen,
  3449. [RDLEN7] = igb_set_dlen,
  3450. [RDLEN8] = igb_set_dlen,
  3451. [RDLEN9] = igb_set_dlen,
  3452. [RDLEN10] = igb_set_dlen,
  3453. [RDLEN11] = igb_set_dlen,
  3454. [RDLEN12] = igb_set_dlen,
  3455. [RDLEN13] = igb_set_dlen,
  3456. [RDLEN14] = igb_set_dlen,
  3457. [RDLEN15] = igb_set_dlen,
  3458. [TDBAL0] = igb_set_dbal,
  3459. [TDBAL1] = igb_set_dbal,
  3460. [TDBAL2] = igb_set_dbal,
  3461. [TDBAL3] = igb_set_dbal,
  3462. [TDBAL4] = igb_set_dbal,
  3463. [TDBAL5] = igb_set_dbal,
  3464. [TDBAL6] = igb_set_dbal,
  3465. [TDBAL7] = igb_set_dbal,
  3466. [TDBAL8] = igb_set_dbal,
  3467. [TDBAL9] = igb_set_dbal,
  3468. [TDBAL10] = igb_set_dbal,
  3469. [TDBAL11] = igb_set_dbal,
  3470. [TDBAL12] = igb_set_dbal,
  3471. [TDBAL13] = igb_set_dbal,
  3472. [TDBAL14] = igb_set_dbal,
  3473. [TDBAL15] = igb_set_dbal,
  3474. [RDBAL0] = igb_set_dbal,
  3475. [RDBAL1] = igb_set_dbal,
  3476. [RDBAL2] = igb_set_dbal,
  3477. [RDBAL3] = igb_set_dbal,
  3478. [RDBAL4] = igb_set_dbal,
  3479. [RDBAL5] = igb_set_dbal,
  3480. [RDBAL6] = igb_set_dbal,
  3481. [RDBAL7] = igb_set_dbal,
  3482. [RDBAL8] = igb_set_dbal,
  3483. [RDBAL9] = igb_set_dbal,
  3484. [RDBAL10] = igb_set_dbal,
  3485. [RDBAL11] = igb_set_dbal,
  3486. [RDBAL12] = igb_set_dbal,
  3487. [RDBAL13] = igb_set_dbal,
  3488. [RDBAL14] = igb_set_dbal,
  3489. [RDBAL15] = igb_set_dbal,
  3490. [STATUS] = igb_set_status,
  3491. [PBACLR] = igb_set_pbaclr,
  3492. [CTRL_EXT] = igb_set_ctrlext,
  3493. [FCAH] = igb_set_16bit,
  3494. [FCT] = igb_set_16bit,
  3495. [FCTTV] = igb_set_16bit,
  3496. [FCRTV] = igb_set_16bit,
  3497. [FCRTH] = igb_set_fcrth,
  3498. [FCRTL] = igb_set_fcrtl,
  3499. [CTRL_DUP] = igb_set_ctrl,
  3500. [RFCTL] = igb_set_rfctl,
  3501. [TIMINCA] = igb_set_timinca,
  3502. [TIMADJH] = igb_set_timadjh,
  3503. [IP6AT ... IP6AT + 3] = igb_mac_writereg,
  3504. [IP4AT ... IP4AT + 6] = igb_mac_writereg,
  3505. [RA] = igb_mac_writereg,
  3506. [RA + 1] = igb_mac_setmacaddr,
  3507. [RA + 2 ... RA + 31] = igb_mac_writereg,
  3508. [RA2 ... RA2 + 31] = igb_mac_writereg,
  3509. [WUPM ... WUPM + 31] = igb_mac_writereg,
  3510. [MTA ... MTA + E1000_MC_TBL_SIZE - 1] = igb_mac_writereg,
  3511. [VFTA ... VFTA + E1000_VLAN_FILTER_TBL_SIZE - 1] = igb_mac_writereg,
  3512. [FFMT ... FFMT + 254] = igb_set_4bit,
  3513. [MDEF ... MDEF + 7] = igb_mac_writereg,
  3514. [FTFT ... FTFT + 254] = igb_mac_writereg,
  3515. [RETA ... RETA + 31] = igb_mac_writereg,
  3516. [RSSRK ... RSSRK + 9] = igb_mac_writereg,
  3517. [MAVTV0 ... MAVTV3] = igb_mac_writereg,
  3518. [EITR0 ... EITR0 + IGB_INTR_NUM - 1] = igb_set_eitr,
  3519. /* IGB specific: */
  3520. [FWSM] = igb_mac_writereg,
  3521. [SW_FW_SYNC] = igb_mac_writereg,
  3522. [EICR] = igb_set_eicr,
  3523. [EICS] = igb_set_eics,
  3524. [EIAC] = igb_set_eiac,
  3525. [EIAM] = igb_set_eiam,
  3526. [EIMC] = igb_set_eimc,
  3527. [EIMS] = igb_set_eims,
  3528. [IVAR0 ... IVAR0 + 7] = igb_mac_writereg,
  3529. igb_putreg(IVAR_MISC),
  3530. igb_putreg(TSYNCRXCFG),
  3531. [ETQF0 ... ETQF0 + 7] = igb_mac_writereg,
  3532. igb_putreg(VT_CTL),
  3533. [P2VMAILBOX0 ... P2VMAILBOX7] = igb_set_pfmailbox,
  3534. [V2PMAILBOX0 ... V2PMAILBOX7] = igb_set_vfmailbox,
  3535. [MBVFICR] = igb_w1c,
  3536. [VMBMEM0 ... VMBMEM0 + 127] = igb_mac_writereg,
  3537. igb_putreg(MBVFIMR),
  3538. [VFLRE] = igb_w1c,
  3539. igb_putreg(VFRE),
  3540. igb_putreg(VFTE),
  3541. igb_putreg(QDE),
  3542. igb_putreg(DTXSWC),
  3543. igb_putreg(RPLOLR),
  3544. [VLVF0 ... VLVF0 + E1000_VLVF_ARRAY_SIZE - 1] = igb_mac_writereg,
  3545. [VMVIR0 ... VMVIR7] = igb_mac_writereg,
  3546. [VMOLR0 ... VMOLR7] = igb_mac_writereg,
  3547. [UTA ... UTA + E1000_MC_TBL_SIZE - 1] = igb_mac_writereg,
  3548. [PVTCTRL0] = igb_set_vtctrl,
  3549. [PVTCTRL1] = igb_set_vtctrl,
  3550. [PVTCTRL2] = igb_set_vtctrl,
  3551. [PVTCTRL3] = igb_set_vtctrl,
  3552. [PVTCTRL4] = igb_set_vtctrl,
  3553. [PVTCTRL5] = igb_set_vtctrl,
  3554. [PVTCTRL6] = igb_set_vtctrl,
  3555. [PVTCTRL7] = igb_set_vtctrl,
  3556. [PVTEICS0] = igb_set_vteics,
  3557. [PVTEICS1] = igb_set_vteics,
  3558. [PVTEICS2] = igb_set_vteics,
  3559. [PVTEICS3] = igb_set_vteics,
  3560. [PVTEICS4] = igb_set_vteics,
  3561. [PVTEICS5] = igb_set_vteics,
  3562. [PVTEICS6] = igb_set_vteics,
  3563. [PVTEICS7] = igb_set_vteics,
  3564. [PVTEIMS0] = igb_set_vteims,
  3565. [PVTEIMS1] = igb_set_vteims,
  3566. [PVTEIMS2] = igb_set_vteims,
  3567. [PVTEIMS3] = igb_set_vteims,
  3568. [PVTEIMS4] = igb_set_vteims,
  3569. [PVTEIMS5] = igb_set_vteims,
  3570. [PVTEIMS6] = igb_set_vteims,
  3571. [PVTEIMS7] = igb_set_vteims,
  3572. [PVTEIMC0] = igb_set_vteimc,
  3573. [PVTEIMC1] = igb_set_vteimc,
  3574. [PVTEIMC2] = igb_set_vteimc,
  3575. [PVTEIMC3] = igb_set_vteimc,
  3576. [PVTEIMC4] = igb_set_vteimc,
  3577. [PVTEIMC5] = igb_set_vteimc,
  3578. [PVTEIMC6] = igb_set_vteimc,
  3579. [PVTEIMC7] = igb_set_vteimc,
  3580. [PVTEIAC0] = igb_set_vteiac,
  3581. [PVTEIAC1] = igb_set_vteiac,
  3582. [PVTEIAC2] = igb_set_vteiac,
  3583. [PVTEIAC3] = igb_set_vteiac,
  3584. [PVTEIAC4] = igb_set_vteiac,
  3585. [PVTEIAC5] = igb_set_vteiac,
  3586. [PVTEIAC6] = igb_set_vteiac,
  3587. [PVTEIAC7] = igb_set_vteiac,
  3588. [PVTEIAM0] = igb_set_vteiam,
  3589. [PVTEIAM1] = igb_set_vteiam,
  3590. [PVTEIAM2] = igb_set_vteiam,
  3591. [PVTEIAM3] = igb_set_vteiam,
  3592. [PVTEIAM4] = igb_set_vteiam,
  3593. [PVTEIAM5] = igb_set_vteiam,
  3594. [PVTEIAM6] = igb_set_vteiam,
  3595. [PVTEIAM7] = igb_set_vteiam,
  3596. [PVTEICR0] = igb_set_vteicr,
  3597. [PVTEICR1] = igb_set_vteicr,
  3598. [PVTEICR2] = igb_set_vteicr,
  3599. [PVTEICR3] = igb_set_vteicr,
  3600. [PVTEICR4] = igb_set_vteicr,
  3601. [PVTEICR5] = igb_set_vteicr,
  3602. [PVTEICR6] = igb_set_vteicr,
  3603. [PVTEICR7] = igb_set_vteicr,
  3604. [VTIVAR ... VTIVAR + 7] = igb_set_vtivar,
  3605. [VTIVAR_MISC ... VTIVAR_MISC + 7] = igb_mac_writereg
  3606. };
  3607. enum { IGB_NWRITEOPS = ARRAY_SIZE(igb_macreg_writeops) };
  3608. enum { MAC_ACCESS_PARTIAL = 1 };
  3609. /*
  3610. * The array below combines alias offsets of the index values for the
  3611. * MAC registers that have aliases, with the indication of not fully
  3612. * implemented registers (lowest bit). This combination is possible
  3613. * because all of the offsets are even.
  3614. */
  3615. static const uint16_t mac_reg_access[E1000E_MAC_SIZE] = {
  3616. /* Alias index offsets */
  3617. [FCRTL_A] = 0x07fe,
  3618. [RDFH_A] = 0xe904, [RDFT_A] = 0xe904,
  3619. [TDFH_A] = 0xed00, [TDFT_A] = 0xed00,
  3620. [RA_A ... RA_A + 31] = 0x14f0,
  3621. [VFTA_A ... VFTA_A + E1000_VLAN_FILTER_TBL_SIZE - 1] = 0x1400,
  3622. [RDBAL0_A] = 0x2600,
  3623. [RDBAH0_A] = 0x2600,
  3624. [RDLEN0_A] = 0x2600,
  3625. [SRRCTL0_A] = 0x2600,
  3626. [RDH0_A] = 0x2600,
  3627. [RDT0_A] = 0x2600,
  3628. [RXDCTL0_A] = 0x2600,
  3629. [RXCTL0_A] = 0x2600,
  3630. [RQDPC0_A] = 0x2600,
  3631. [RDBAL1_A] = 0x25D0,
  3632. [RDBAL2_A] = 0x25A0,
  3633. [RDBAL3_A] = 0x2570,
  3634. [RDBAH1_A] = 0x25D0,
  3635. [RDBAH2_A] = 0x25A0,
  3636. [RDBAH3_A] = 0x2570,
  3637. [RDLEN1_A] = 0x25D0,
  3638. [RDLEN2_A] = 0x25A0,
  3639. [RDLEN3_A] = 0x2570,
  3640. [SRRCTL1_A] = 0x25D0,
  3641. [SRRCTL2_A] = 0x25A0,
  3642. [SRRCTL3_A] = 0x2570,
  3643. [RDH1_A] = 0x25D0,
  3644. [RDH2_A] = 0x25A0,
  3645. [RDH3_A] = 0x2570,
  3646. [RDT1_A] = 0x25D0,
  3647. [RDT2_A] = 0x25A0,
  3648. [RDT3_A] = 0x2570,
  3649. [RXDCTL1_A] = 0x25D0,
  3650. [RXDCTL2_A] = 0x25A0,
  3651. [RXDCTL3_A] = 0x2570,
  3652. [RXCTL1_A] = 0x25D0,
  3653. [RXCTL2_A] = 0x25A0,
  3654. [RXCTL3_A] = 0x2570,
  3655. [RQDPC1_A] = 0x25D0,
  3656. [RQDPC2_A] = 0x25A0,
  3657. [RQDPC3_A] = 0x2570,
  3658. [TDBAL0_A] = 0x2A00,
  3659. [TDBAH0_A] = 0x2A00,
  3660. [TDLEN0_A] = 0x2A00,
  3661. [TDH0_A] = 0x2A00,
  3662. [TDT0_A] = 0x2A00,
  3663. [TXCTL0_A] = 0x2A00,
  3664. [TDWBAL0_A] = 0x2A00,
  3665. [TDWBAH0_A] = 0x2A00,
  3666. [TDBAL1_A] = 0x29D0,
  3667. [TDBAL2_A] = 0x29A0,
  3668. [TDBAL3_A] = 0x2970,
  3669. [TDBAH1_A] = 0x29D0,
  3670. [TDBAH2_A] = 0x29A0,
  3671. [TDBAH3_A] = 0x2970,
  3672. [TDLEN1_A] = 0x29D0,
  3673. [TDLEN2_A] = 0x29A0,
  3674. [TDLEN3_A] = 0x2970,
  3675. [TDH1_A] = 0x29D0,
  3676. [TDH2_A] = 0x29A0,
  3677. [TDH3_A] = 0x2970,
  3678. [TDT1_A] = 0x29D0,
  3679. [TDT2_A] = 0x29A0,
  3680. [TDT3_A] = 0x2970,
  3681. [TXDCTL0_A] = 0x2A00,
  3682. [TXDCTL1_A] = 0x29D0,
  3683. [TXDCTL2_A] = 0x29A0,
  3684. [TXDCTL3_A] = 0x2970,
  3685. [TXCTL1_A] = 0x29D0,
  3686. [TXCTL2_A] = 0x29A0,
  3687. [TXCTL3_A] = 0x29D0,
  3688. [TDWBAL1_A] = 0x29D0,
  3689. [TDWBAL2_A] = 0x29A0,
  3690. [TDWBAL3_A] = 0x2970,
  3691. [TDWBAH1_A] = 0x29D0,
  3692. [TDWBAH2_A] = 0x29A0,
  3693. [TDWBAH3_A] = 0x2970,
  3694. /* Access options */
  3695. [RDFH] = MAC_ACCESS_PARTIAL, [RDFT] = MAC_ACCESS_PARTIAL,
  3696. [RDFHS] = MAC_ACCESS_PARTIAL, [RDFTS] = MAC_ACCESS_PARTIAL,
  3697. [RDFPC] = MAC_ACCESS_PARTIAL,
  3698. [TDFH] = MAC_ACCESS_PARTIAL, [TDFT] = MAC_ACCESS_PARTIAL,
  3699. [TDFHS] = MAC_ACCESS_PARTIAL, [TDFTS] = MAC_ACCESS_PARTIAL,
  3700. [TDFPC] = MAC_ACCESS_PARTIAL, [EECD] = MAC_ACCESS_PARTIAL,
  3701. [FLA] = MAC_ACCESS_PARTIAL,
  3702. [FCAL] = MAC_ACCESS_PARTIAL, [FCAH] = MAC_ACCESS_PARTIAL,
  3703. [FCT] = MAC_ACCESS_PARTIAL, [FCTTV] = MAC_ACCESS_PARTIAL,
  3704. [FCRTV] = MAC_ACCESS_PARTIAL, [FCRTL] = MAC_ACCESS_PARTIAL,
  3705. [FCRTH] = MAC_ACCESS_PARTIAL,
  3706. [MAVTV0 ... MAVTV3] = MAC_ACCESS_PARTIAL
  3707. };
  3708. void
  3709. igb_core_write(IGBCore *core, hwaddr addr, uint64_t val, unsigned size)
  3710. {
  3711. uint16_t index = igb_get_reg_index_with_offset(mac_reg_access, addr);
  3712. if (index < IGB_NWRITEOPS && igb_macreg_writeops[index]) {
  3713. if (mac_reg_access[index] & MAC_ACCESS_PARTIAL) {
  3714. trace_e1000e_wrn_regs_write_trivial(index << 2);
  3715. }
  3716. trace_e1000e_core_write(index << 2, size, val);
  3717. igb_macreg_writeops[index](core, index, val);
  3718. } else if (index < IGB_NREADOPS && igb_macreg_readops[index]) {
  3719. trace_e1000e_wrn_regs_write_ro(index << 2, size, val);
  3720. } else {
  3721. trace_e1000e_wrn_regs_write_unknown(index << 2, size, val);
  3722. }
  3723. }
  3724. uint64_t
  3725. igb_core_read(IGBCore *core, hwaddr addr, unsigned size)
  3726. {
  3727. uint64_t val;
  3728. uint16_t index = igb_get_reg_index_with_offset(mac_reg_access, addr);
  3729. if (index < IGB_NREADOPS && igb_macreg_readops[index]) {
  3730. if (mac_reg_access[index] & MAC_ACCESS_PARTIAL) {
  3731. trace_e1000e_wrn_regs_read_trivial(index << 2);
  3732. }
  3733. val = igb_macreg_readops[index](core, index);
  3734. trace_e1000e_core_read(index << 2, size, val);
  3735. return val;
  3736. } else {
  3737. trace_e1000e_wrn_regs_read_unknown(index << 2, size);
  3738. }
  3739. return 0;
  3740. }
  3741. static void
  3742. igb_autoneg_resume(IGBCore *core)
  3743. {
  3744. if (igb_have_autoneg(core) &&
  3745. !(core->phy[MII_BMSR] & MII_BMSR_AN_COMP)) {
  3746. qemu_get_queue(core->owner_nic)->link_down = false;
  3747. timer_mod(core->autoneg_timer,
  3748. qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 500);
  3749. }
  3750. }
  3751. void
  3752. igb_core_pci_realize(IGBCore *core,
  3753. const uint16_t *eeprom_templ,
  3754. uint32_t eeprom_size,
  3755. const uint8_t *macaddr)
  3756. {
  3757. int i;
  3758. core->autoneg_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
  3759. igb_autoneg_timer, core);
  3760. igb_intrmgr_pci_realize(core);
  3761. for (i = 0; i < IGB_NUM_QUEUES; i++) {
  3762. net_tx_pkt_init(&core->tx[i].tx_pkt, E1000E_MAX_TX_FRAGS);
  3763. }
  3764. net_rx_pkt_init(&core->rx_pkt);
  3765. e1000x_core_prepare_eeprom(core->eeprom,
  3766. eeprom_templ,
  3767. eeprom_size,
  3768. PCI_DEVICE_GET_CLASS(core->owner)->device_id,
  3769. macaddr);
  3770. igb_update_rx_offloads(core);
  3771. }
  3772. void
  3773. igb_core_pci_uninit(IGBCore *core)
  3774. {
  3775. int i;
  3776. timer_free(core->autoneg_timer);
  3777. igb_intrmgr_pci_unint(core);
  3778. for (i = 0; i < IGB_NUM_QUEUES; i++) {
  3779. net_tx_pkt_uninit(core->tx[i].tx_pkt);
  3780. }
  3781. net_rx_pkt_uninit(core->rx_pkt);
  3782. }
  3783. static const uint16_t
  3784. igb_phy_reg_init[] = {
  3785. [MII_BMCR] = MII_BMCR_SPEED1000 |
  3786. MII_BMCR_FD |
  3787. MII_BMCR_AUTOEN,
  3788. [MII_BMSR] = MII_BMSR_EXTCAP |
  3789. MII_BMSR_LINK_ST |
  3790. MII_BMSR_AUTONEG |
  3791. MII_BMSR_MFPS |
  3792. MII_BMSR_EXTSTAT |
  3793. MII_BMSR_10T_HD |
  3794. MII_BMSR_10T_FD |
  3795. MII_BMSR_100TX_HD |
  3796. MII_BMSR_100TX_FD,
  3797. [MII_PHYID1] = IGP03E1000_E_PHY_ID >> 16,
  3798. [MII_PHYID2] = (IGP03E1000_E_PHY_ID & 0xfff0) | 1,
  3799. [MII_ANAR] = MII_ANAR_CSMACD | MII_ANAR_10 |
  3800. MII_ANAR_10FD | MII_ANAR_TX |
  3801. MII_ANAR_TXFD | MII_ANAR_PAUSE |
  3802. MII_ANAR_PAUSE_ASYM,
  3803. [MII_ANLPAR] = MII_ANLPAR_10 | MII_ANLPAR_10FD |
  3804. MII_ANLPAR_TX | MII_ANLPAR_TXFD |
  3805. MII_ANLPAR_T4 | MII_ANLPAR_PAUSE,
  3806. [MII_ANER] = MII_ANER_NP | MII_ANER_NWAY,
  3807. [MII_ANNP] = 0x1 | MII_ANNP_MP,
  3808. [MII_CTRL1000] = MII_CTRL1000_HALF | MII_CTRL1000_FULL |
  3809. MII_CTRL1000_PORT | MII_CTRL1000_MASTER,
  3810. [MII_STAT1000] = MII_STAT1000_HALF | MII_STAT1000_FULL |
  3811. MII_STAT1000_ROK | MII_STAT1000_LOK,
  3812. [MII_EXTSTAT] = MII_EXTSTAT_1000T_HD | MII_EXTSTAT_1000T_FD,
  3813. [IGP01E1000_PHY_PORT_CONFIG] = BIT(5) | BIT(8),
  3814. [IGP01E1000_PHY_PORT_STATUS] = IGP01E1000_PSSR_SPEED_1000MBPS,
  3815. [IGP02E1000_PHY_POWER_MGMT] = BIT(0) | BIT(3) | IGP02E1000_PM_D3_LPLU |
  3816. IGP01E1000_PSCFR_SMART_SPEED
  3817. };
  3818. static const uint32_t igb_mac_reg_init[] = {
  3819. [LEDCTL] = 2 | (3 << 8) | BIT(15) | (6 << 16) | (7 << 24),
  3820. [EEMNGCTL] = BIT(31),
  3821. [TXDCTL0] = E1000_TXDCTL_QUEUE_ENABLE,
  3822. [RXDCTL0] = E1000_RXDCTL_QUEUE_ENABLE | (1 << 16),
  3823. [RXDCTL1] = 1 << 16,
  3824. [RXDCTL2] = 1 << 16,
  3825. [RXDCTL3] = 1 << 16,
  3826. [RXDCTL4] = 1 << 16,
  3827. [RXDCTL5] = 1 << 16,
  3828. [RXDCTL6] = 1 << 16,
  3829. [RXDCTL7] = 1 << 16,
  3830. [RXDCTL8] = 1 << 16,
  3831. [RXDCTL9] = 1 << 16,
  3832. [RXDCTL10] = 1 << 16,
  3833. [RXDCTL11] = 1 << 16,
  3834. [RXDCTL12] = 1 << 16,
  3835. [RXDCTL13] = 1 << 16,
  3836. [RXDCTL14] = 1 << 16,
  3837. [RXDCTL15] = 1 << 16,
  3838. [TIPG] = 0x08 | (0x04 << 10) | (0x06 << 20),
  3839. [CTRL] = E1000_CTRL_FD | E1000_CTRL_LRST | E1000_CTRL_SPD_1000 |
  3840. E1000_CTRL_ADVD3WUC,
  3841. [STATUS] = E1000_STATUS_PHYRA | BIT(31),
  3842. [EECD] = E1000_EECD_FWE_DIS | E1000_EECD_PRES |
  3843. (2 << E1000_EECD_SIZE_EX_SHIFT),
  3844. [GCR] = E1000_L0S_ADJUST |
  3845. E1000_GCR_CMPL_TMOUT_RESEND |
  3846. E1000_GCR_CAP_VER2 |
  3847. E1000_L1_ENTRY_LATENCY_MSB |
  3848. E1000_L1_ENTRY_LATENCY_LSB,
  3849. [RXCSUM] = E1000_RXCSUM_IPOFLD | E1000_RXCSUM_TUOFLD,
  3850. [TXPBS] = 0x28,
  3851. [RXPBS] = 0x40,
  3852. [TCTL] = E1000_TCTL_PSP | (0xF << E1000_CT_SHIFT) |
  3853. (0x40 << E1000_COLD_SHIFT) | (0x1 << 26) | (0xA << 28),
  3854. [TCTL_EXT] = 0x40 | (0x42 << 10),
  3855. [DTXCTL] = E1000_DTXCTL_8023LL | E1000_DTXCTL_SPOOF_INT,
  3856. [VET] = ETH_P_VLAN | (ETH_P_VLAN << 16),
  3857. [V2PMAILBOX0 ... V2PMAILBOX0 + IGB_MAX_VF_FUNCTIONS - 1] = E1000_V2PMAILBOX_RSTI,
  3858. [MBVFIMR] = 0xFF,
  3859. [VFRE] = 0xFF,
  3860. [VFTE] = 0xFF,
  3861. [VMOLR0 ... VMOLR0 + 7] = 0x2600 | E1000_VMOLR_STRCRC,
  3862. [RPLOLR] = E1000_RPLOLR_STRCRC,
  3863. [RLPML] = 0x2600,
  3864. [TXCTL0] = E1000_DCA_TXCTRL_DATA_RRO_EN |
  3865. E1000_DCA_TXCTRL_TX_WB_RO_EN |
  3866. E1000_DCA_TXCTRL_DESC_RRO_EN,
  3867. [TXCTL1] = E1000_DCA_TXCTRL_DATA_RRO_EN |
  3868. E1000_DCA_TXCTRL_TX_WB_RO_EN |
  3869. E1000_DCA_TXCTRL_DESC_RRO_EN,
  3870. [TXCTL2] = E1000_DCA_TXCTRL_DATA_RRO_EN |
  3871. E1000_DCA_TXCTRL_TX_WB_RO_EN |
  3872. E1000_DCA_TXCTRL_DESC_RRO_EN,
  3873. [TXCTL3] = E1000_DCA_TXCTRL_DATA_RRO_EN |
  3874. E1000_DCA_TXCTRL_TX_WB_RO_EN |
  3875. E1000_DCA_TXCTRL_DESC_RRO_EN,
  3876. [TXCTL4] = E1000_DCA_TXCTRL_DATA_RRO_EN |
  3877. E1000_DCA_TXCTRL_TX_WB_RO_EN |
  3878. E1000_DCA_TXCTRL_DESC_RRO_EN,
  3879. [TXCTL5] = E1000_DCA_TXCTRL_DATA_RRO_EN |
  3880. E1000_DCA_TXCTRL_TX_WB_RO_EN |
  3881. E1000_DCA_TXCTRL_DESC_RRO_EN,
  3882. [TXCTL6] = E1000_DCA_TXCTRL_DATA_RRO_EN |
  3883. E1000_DCA_TXCTRL_TX_WB_RO_EN |
  3884. E1000_DCA_TXCTRL_DESC_RRO_EN,
  3885. [TXCTL7] = E1000_DCA_TXCTRL_DATA_RRO_EN |
  3886. E1000_DCA_TXCTRL_TX_WB_RO_EN |
  3887. E1000_DCA_TXCTRL_DESC_RRO_EN,
  3888. [TXCTL8] = E1000_DCA_TXCTRL_DATA_RRO_EN |
  3889. E1000_DCA_TXCTRL_TX_WB_RO_EN |
  3890. E1000_DCA_TXCTRL_DESC_RRO_EN,
  3891. [TXCTL9] = E1000_DCA_TXCTRL_DATA_RRO_EN |
  3892. E1000_DCA_TXCTRL_TX_WB_RO_EN |
  3893. E1000_DCA_TXCTRL_DESC_RRO_EN,
  3894. [TXCTL10] = E1000_DCA_TXCTRL_DATA_RRO_EN |
  3895. E1000_DCA_TXCTRL_TX_WB_RO_EN |
  3896. E1000_DCA_TXCTRL_DESC_RRO_EN,
  3897. [TXCTL11] = E1000_DCA_TXCTRL_DATA_RRO_EN |
  3898. E1000_DCA_TXCTRL_TX_WB_RO_EN |
  3899. E1000_DCA_TXCTRL_DESC_RRO_EN,
  3900. [TXCTL12] = E1000_DCA_TXCTRL_DATA_RRO_EN |
  3901. E1000_DCA_TXCTRL_TX_WB_RO_EN |
  3902. E1000_DCA_TXCTRL_DESC_RRO_EN,
  3903. [TXCTL13] = E1000_DCA_TXCTRL_DATA_RRO_EN |
  3904. E1000_DCA_TXCTRL_TX_WB_RO_EN |
  3905. E1000_DCA_TXCTRL_DESC_RRO_EN,
  3906. [TXCTL14] = E1000_DCA_TXCTRL_DATA_RRO_EN |
  3907. E1000_DCA_TXCTRL_TX_WB_RO_EN |
  3908. E1000_DCA_TXCTRL_DESC_RRO_EN,
  3909. [TXCTL15] = E1000_DCA_TXCTRL_DATA_RRO_EN |
  3910. E1000_DCA_TXCTRL_TX_WB_RO_EN |
  3911. E1000_DCA_TXCTRL_DESC_RRO_EN,
  3912. };
  3913. static void igb_reset(IGBCore *core, bool sw)
  3914. {
  3915. struct igb_tx *tx;
  3916. int i;
  3917. timer_del(core->autoneg_timer);
  3918. igb_intrmgr_reset(core);
  3919. memset(core->phy, 0, sizeof core->phy);
  3920. memcpy(core->phy, igb_phy_reg_init, sizeof igb_phy_reg_init);
  3921. for (i = 0; i < E1000E_MAC_SIZE; i++) {
  3922. if (sw &&
  3923. (i == RXPBS || i == TXPBS ||
  3924. (i >= EITR0 && i < EITR0 + IGB_INTR_NUM))) {
  3925. continue;
  3926. }
  3927. core->mac[i] = i < ARRAY_SIZE(igb_mac_reg_init) ?
  3928. igb_mac_reg_init[i] : 0;
  3929. }
  3930. if (qemu_get_queue(core->owner_nic)->link_down) {
  3931. igb_link_down(core);
  3932. }
  3933. e1000x_reset_mac_addr(core->owner_nic, core->mac, core->permanent_mac);
  3934. for (int vfn = 0; vfn < IGB_MAX_VF_FUNCTIONS; vfn++) {
  3935. /* Set RSTI, so VF can identify a PF reset is in progress */
  3936. core->mac[V2PMAILBOX0 + vfn] |= E1000_V2PMAILBOX_RSTI;
  3937. }
  3938. for (i = 0; i < ARRAY_SIZE(core->tx); i++) {
  3939. tx = &core->tx[i];
  3940. memset(tx->ctx, 0, sizeof(tx->ctx));
  3941. tx->first = true;
  3942. tx->skip_cp = false;
  3943. }
  3944. }
  3945. void
  3946. igb_core_reset(IGBCore *core)
  3947. {
  3948. igb_reset(core, false);
  3949. }
  3950. void igb_core_pre_save(IGBCore *core)
  3951. {
  3952. int i;
  3953. NetClientState *nc = qemu_get_queue(core->owner_nic);
  3954. /*
  3955. * If link is down and auto-negotiation is supported and ongoing,
  3956. * complete auto-negotiation immediately. This allows us to look
  3957. * at MII_BMSR_AN_COMP to infer link status on load.
  3958. */
  3959. if (nc->link_down && igb_have_autoneg(core)) {
  3960. core->phy[MII_BMSR] |= MII_BMSR_AN_COMP;
  3961. igb_update_flowctl_status(core);
  3962. }
  3963. for (i = 0; i < ARRAY_SIZE(core->tx); i++) {
  3964. if (net_tx_pkt_has_fragments(core->tx[i].tx_pkt)) {
  3965. core->tx[i].skip_cp = true;
  3966. }
  3967. }
  3968. }
  3969. int
  3970. igb_core_post_load(IGBCore *core)
  3971. {
  3972. NetClientState *nc = qemu_get_queue(core->owner_nic);
  3973. /*
  3974. * nc.link_down can't be migrated, so infer link_down according
  3975. * to link status bit in core.mac[STATUS].
  3976. */
  3977. nc->link_down = (core->mac[STATUS] & E1000_STATUS_LU) == 0;
  3978. /*
  3979. * we need to restart intrmgr timers, as an older version of
  3980. * QEMU can have stopped them before migration
  3981. */
  3982. igb_intrmgr_resume(core);
  3983. igb_autoneg_resume(core);
  3984. return 0;
  3985. }