syscall.c 440 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789779077917792779377947795779677977798779978007801780278037804780578067807780878097810781178127813781478157816781778187819782078217822782378247825782678277828782978307831783278337834783578367837783878397840784178427843784478457846784778487849785078517852785378547855785678577858785978607861786278637864786578667867786878697870787178727873787478757876787778787879788078817882788378847885788678877888788978907891789278937894789578967897789878997900790179027903790479057906790779087909791079117912791379147915791679177918791979207921792279237924792579267927792879297930793179327933793479357936793779387939794079417942794379447945794679477948794979507951795279537954795579567957795879597960796179627963796479657966796779687969797079717972797379747975797679777978797979807981798279837984798579867987798879897990799179927993799479957996799779987999800080018002800380048005800680078008800980108011801280138014801580168017801880198020802180228023802480258026802780288029803080318032803380348035803680378038803980408041804280438044804580468047804880498050805180528053805480558056805780588059806080618062806380648065806680678068806980708071807280738074807580768077807880798080808180828083808480858086808780888089809080918092809380948095809680978098809981008101810281038104810581068107810881098110811181128113811481158116811781188119812081218122812381248125812681278128812981308131813281338134813581368137813881398140814181428143814481458146814781488149815081518152815381548155815681578158815981608161816281638164816581668167816881698170817181728173817481758176817781788179818081818182818381848185818681878188818981908191819281938194819581968197819881998200820182028203820482058206820782088209821082118212821382148215821682178218821982208221822282238224822582268227822882298230823182328233823482358236823782388239824082418242824382448245824682478248824982508251825282538254825582568257825882598260826182628263826482658266826782688269827082718272827382748275827682778278827982808281828282838284828582868287828882898290829182928293829482958296829782988299830083018302830383048305830683078308830983108311831283138314831583168317831883198320832183228323832483258326832783288329833083318332833383348335833683378338833983408341834283438344834583468347834883498350835183528353835483558356835783588359836083618362836383648365836683678368836983708371837283738374837583768377837883798380838183828383838483858386838783888389839083918392839383948395839683978398839984008401840284038404840584068407840884098410841184128413841484158416841784188419842084218422842384248425842684278428842984308431843284338434843584368437843884398440844184428443844484458446844784488449845084518452845384548455845684578458845984608461846284638464846584668467846884698470847184728473847484758476847784788479848084818482848384848485848684878488848984908491849284938494849584968497849884998500850185028503850485058506850785088509851085118512851385148515851685178518851985208521852285238524852585268527852885298530853185328533853485358536853785388539854085418542854385448545854685478548854985508551855285538554855585568557855885598560856185628563856485658566856785688569857085718572857385748575857685778578857985808581858285838584858585868587858885898590859185928593859485958596859785988599860086018602860386048605860686078608860986108611861286138614861586168617861886198620862186228623862486258626862786288629863086318632863386348635863686378638863986408641864286438644864586468647864886498650865186528653865486558656865786588659866086618662866386648665866686678668866986708671867286738674867586768677867886798680868186828683868486858686868786888689869086918692869386948695869686978698869987008701870287038704870587068707870887098710871187128713871487158716871787188719872087218722872387248725872687278728872987308731873287338734873587368737873887398740874187428743874487458746874787488749875087518752875387548755875687578758875987608761876287638764876587668767876887698770877187728773877487758776877787788779878087818782878387848785878687878788878987908791879287938794879587968797879887998800880188028803880488058806880788088809881088118812881388148815881688178818881988208821882288238824882588268827882888298830883188328833883488358836883788388839884088418842884388448845884688478848884988508851885288538854885588568857885888598860886188628863886488658866886788688869887088718872887388748875887688778878887988808881888288838884888588868887888888898890889188928893889488958896889788988899890089018902890389048905890689078908890989108911891289138914891589168917891889198920892189228923892489258926892789288929893089318932893389348935893689378938893989408941894289438944894589468947894889498950895189528953895489558956895789588959896089618962896389648965896689678968896989708971897289738974897589768977897889798980898189828983898489858986898789888989899089918992899389948995899689978998899990009001900290039004900590069007900890099010901190129013901490159016901790189019902090219022902390249025902690279028902990309031903290339034903590369037903890399040904190429043904490459046904790489049905090519052905390549055905690579058905990609061906290639064906590669067906890699070907190729073907490759076907790789079908090819082908390849085908690879088908990909091909290939094909590969097909890999100910191029103910491059106910791089109911091119112911391149115911691179118911991209121912291239124912591269127912891299130913191329133913491359136913791389139914091419142914391449145914691479148914991509151915291539154915591569157915891599160916191629163916491659166916791689169917091719172917391749175917691779178917991809181918291839184918591869187918891899190919191929193919491959196919791989199920092019202920392049205920692079208920992109211921292139214921592169217921892199220922192229223922492259226922792289229923092319232923392349235923692379238923992409241924292439244924592469247924892499250925192529253925492559256925792589259926092619262926392649265926692679268926992709271927292739274927592769277927892799280928192829283928492859286928792889289929092919292929392949295929692979298929993009301930293039304930593069307930893099310931193129313931493159316931793189319932093219322932393249325932693279328932993309331933293339334933593369337933893399340934193429343934493459346934793489349935093519352935393549355935693579358935993609361936293639364936593669367936893699370937193729373937493759376937793789379938093819382938393849385938693879388938993909391939293939394939593969397939893999400940194029403940494059406940794089409941094119412941394149415941694179418941994209421942294239424942594269427942894299430943194329433943494359436943794389439944094419442944394449445944694479448944994509451945294539454945594569457945894599460946194629463946494659466946794689469947094719472947394749475947694779478947994809481948294839484948594869487948894899490949194929493949494959496949794989499950095019502950395049505950695079508950995109511951295139514951595169517951895199520952195229523952495259526952795289529953095319532953395349535953695379538953995409541954295439544954595469547954895499550955195529553955495559556955795589559956095619562956395649565956695679568956995709571957295739574957595769577957895799580958195829583958495859586958795889589959095919592959395949595959695979598959996009601960296039604960596069607960896099610961196129613961496159616961796189619962096219622962396249625962696279628962996309631963296339634963596369637963896399640964196429643964496459646964796489649965096519652965396549655965696579658965996609661966296639664966596669667966896699670967196729673967496759676967796789679968096819682968396849685968696879688968996909691969296939694969596969697969896999700970197029703970497059706970797089709971097119712971397149715971697179718971997209721972297239724972597269727972897299730973197329733973497359736973797389739974097419742974397449745974697479748974997509751975297539754975597569757975897599760976197629763976497659766976797689769977097719772977397749775977697779778977997809781978297839784978597869787978897899790979197929793979497959796979797989799980098019802980398049805980698079808980998109811981298139814981598169817981898199820982198229823982498259826982798289829983098319832983398349835983698379838983998409841984298439844984598469847984898499850985198529853985498559856985798589859986098619862986398649865986698679868986998709871987298739874987598769877987898799880988198829883988498859886988798889889989098919892989398949895989698979898989999009901990299039904990599069907990899099910991199129913991499159916991799189919992099219922992399249925992699279928992999309931993299339934993599369937993899399940994199429943994499459946994799489949995099519952995399549955995699579958995999609961996299639964996599669967996899699970997199729973997499759976997799789979998099819982998399849985998699879988998999909991999299939994999599969997999899991000010001100021000310004100051000610007100081000910010100111001210013100141001510016100171001810019100201002110022100231002410025100261002710028100291003010031100321003310034100351003610037100381003910040100411004210043100441004510046100471004810049100501005110052100531005410055100561005710058100591006010061100621006310064100651006610067100681006910070100711007210073100741007510076100771007810079100801008110082100831008410085100861008710088100891009010091100921009310094100951009610097100981009910100101011010210103101041010510106101071010810109101101011110112101131011410115101161011710118101191012010121101221012310124101251012610127101281012910130101311013210133101341013510136101371013810139101401014110142101431014410145101461014710148101491015010151101521015310154101551015610157101581015910160101611016210163101641016510166101671016810169101701017110172101731017410175101761017710178101791018010181101821018310184101851018610187101881018910190101911019210193101941019510196101971019810199102001020110202102031020410205102061020710208102091021010211102121021310214102151021610217102181021910220102211022210223102241022510226102271022810229102301023110232102331023410235102361023710238102391024010241102421024310244102451024610247102481024910250102511025210253102541025510256102571025810259102601026110262102631026410265102661026710268102691027010271102721027310274102751027610277102781027910280102811028210283102841028510286102871028810289102901029110292102931029410295102961029710298102991030010301103021030310304103051030610307103081030910310103111031210313103141031510316103171031810319103201032110322103231032410325103261032710328103291033010331103321033310334103351033610337103381033910340103411034210343103441034510346103471034810349103501035110352103531035410355103561035710358103591036010361103621036310364103651036610367103681036910370103711037210373103741037510376103771037810379103801038110382103831038410385103861038710388103891039010391103921039310394103951039610397103981039910400104011040210403104041040510406104071040810409104101041110412104131041410415104161041710418104191042010421104221042310424104251042610427104281042910430104311043210433104341043510436104371043810439104401044110442104431044410445104461044710448104491045010451104521045310454104551045610457104581045910460104611046210463104641046510466104671046810469104701047110472104731047410475104761047710478104791048010481104821048310484104851048610487104881048910490104911049210493104941049510496104971049810499105001050110502105031050410505105061050710508105091051010511105121051310514105151051610517105181051910520105211052210523105241052510526105271052810529105301053110532105331053410535105361053710538105391054010541105421054310544105451054610547105481054910550105511055210553105541055510556105571055810559105601056110562105631056410565105661056710568105691057010571105721057310574105751057610577105781057910580105811058210583105841058510586105871058810589105901059110592105931059410595105961059710598105991060010601106021060310604106051060610607106081060910610106111061210613106141061510616106171061810619106201062110622106231062410625106261062710628106291063010631106321063310634106351063610637106381063910640106411064210643106441064510646106471064810649106501065110652106531065410655106561065710658106591066010661106621066310664106651066610667106681066910670106711067210673106741067510676106771067810679106801068110682106831068410685106861068710688106891069010691106921069310694106951069610697106981069910700107011070210703107041070510706107071070810709107101071110712107131071410715107161071710718107191072010721107221072310724107251072610727107281072910730107311073210733107341073510736107371073810739107401074110742107431074410745107461074710748107491075010751107521075310754107551075610757107581075910760107611076210763107641076510766107671076810769107701077110772107731077410775107761077710778107791078010781107821078310784107851078610787107881078910790107911079210793107941079510796107971079810799108001080110802108031080410805108061080710808108091081010811108121081310814108151081610817108181081910820108211082210823108241082510826108271082810829108301083110832108331083410835108361083710838108391084010841108421084310844108451084610847108481084910850108511085210853108541085510856108571085810859108601086110862108631086410865108661086710868108691087010871108721087310874108751087610877108781087910880108811088210883108841088510886108871088810889108901089110892108931089410895108961089710898108991090010901109021090310904109051090610907109081090910910109111091210913109141091510916109171091810919109201092110922109231092410925109261092710928109291093010931109321093310934109351093610937109381093910940109411094210943109441094510946109471094810949109501095110952109531095410955109561095710958109591096010961109621096310964109651096610967109681096910970109711097210973109741097510976109771097810979109801098110982109831098410985109861098710988109891099010991109921099310994109951099610997109981099911000110011100211003110041100511006110071100811009110101101111012110131101411015110161101711018110191102011021110221102311024110251102611027110281102911030110311103211033110341103511036110371103811039110401104111042110431104411045110461104711048110491105011051110521105311054110551105611057110581105911060110611106211063110641106511066110671106811069110701107111072110731107411075110761107711078110791108011081110821108311084110851108611087110881108911090110911109211093110941109511096110971109811099111001110111102111031110411105111061110711108111091111011111111121111311114111151111611117111181111911120111211112211123111241112511126111271112811129111301113111132111331113411135111361113711138111391114011141111421114311144111451114611147111481114911150111511115211153111541115511156111571115811159111601116111162111631116411165111661116711168111691117011171111721117311174111751117611177111781117911180111811118211183111841118511186111871118811189111901119111192111931119411195111961119711198111991120011201112021120311204112051120611207112081120911210112111121211213112141121511216112171121811219112201122111222112231122411225112261122711228112291123011231112321123311234112351123611237112381123911240112411124211243112441124511246112471124811249112501125111252112531125411255112561125711258112591126011261112621126311264112651126611267112681126911270112711127211273112741127511276112771127811279112801128111282112831128411285112861128711288112891129011291112921129311294112951129611297112981129911300113011130211303113041130511306113071130811309113101131111312113131131411315113161131711318113191132011321113221132311324113251132611327113281132911330113311133211333113341133511336113371133811339113401134111342113431134411345113461134711348113491135011351113521135311354113551135611357113581135911360113611136211363113641136511366113671136811369113701137111372113731137411375113761137711378113791138011381113821138311384113851138611387113881138911390113911139211393113941139511396113971139811399114001140111402114031140411405114061140711408114091141011411114121141311414114151141611417114181141911420114211142211423114241142511426114271142811429114301143111432114331143411435114361143711438114391144011441114421144311444114451144611447114481144911450114511145211453114541145511456114571145811459114601146111462114631146411465114661146711468114691147011471114721147311474114751147611477114781147911480114811148211483114841148511486114871148811489114901149111492114931149411495114961149711498114991150011501115021150311504115051150611507115081150911510115111151211513115141151511516115171151811519115201152111522115231152411525115261152711528115291153011531115321153311534115351153611537115381153911540115411154211543115441154511546115471154811549115501155111552115531155411555115561155711558115591156011561115621156311564115651156611567115681156911570115711157211573115741157511576115771157811579115801158111582115831158411585115861158711588115891159011591115921159311594115951159611597115981159911600116011160211603116041160511606116071160811609116101161111612116131161411615116161161711618116191162011621116221162311624116251162611627116281162911630116311163211633116341163511636116371163811639116401164111642116431164411645116461164711648116491165011651116521165311654116551165611657116581165911660116611166211663116641166511666116671166811669116701167111672116731167411675116761167711678116791168011681116821168311684116851168611687116881168911690116911169211693116941169511696116971169811699117001170111702117031170411705117061170711708117091171011711117121171311714117151171611717117181171911720117211172211723117241172511726117271172811729117301173111732117331173411735117361173711738117391174011741117421174311744117451174611747117481174911750117511175211753117541175511756117571175811759117601176111762117631176411765117661176711768117691177011771117721177311774117751177611777117781177911780117811178211783117841178511786117871178811789117901179111792117931179411795117961179711798117991180011801118021180311804118051180611807118081180911810118111181211813118141181511816118171181811819118201182111822118231182411825118261182711828118291183011831118321183311834118351183611837118381183911840118411184211843118441184511846118471184811849118501185111852118531185411855118561185711858118591186011861118621186311864118651186611867118681186911870118711187211873118741187511876118771187811879118801188111882118831188411885118861188711888118891189011891118921189311894118951189611897118981189911900119011190211903119041190511906119071190811909119101191111912119131191411915119161191711918119191192011921119221192311924119251192611927119281192911930119311193211933119341193511936119371193811939119401194111942119431194411945119461194711948119491195011951119521195311954119551195611957119581195911960119611196211963119641196511966119671196811969119701197111972119731197411975119761197711978119791198011981119821198311984119851198611987119881198911990119911199211993119941199511996119971199811999120001200112002120031200412005120061200712008120091201012011120121201312014120151201612017120181201912020120211202212023120241202512026120271202812029120301203112032120331203412035120361203712038120391204012041120421204312044120451204612047120481204912050120511205212053120541205512056120571205812059120601206112062120631206412065120661206712068120691207012071120721207312074120751207612077120781207912080120811208212083120841208512086120871208812089120901209112092120931209412095120961209712098120991210012101121021210312104121051210612107121081210912110121111211212113121141211512116121171211812119121201212112122121231212412125121261212712128121291213012131121321213312134121351213612137121381213912140121411214212143121441214512146121471214812149121501215112152121531215412155121561215712158121591216012161121621216312164121651216612167121681216912170121711217212173121741217512176121771217812179121801218112182121831218412185121861218712188121891219012191121921219312194121951219612197121981219912200122011220212203122041220512206122071220812209122101221112212122131221412215122161221712218122191222012221122221222312224122251222612227122281222912230122311223212233122341223512236122371223812239122401224112242122431224412245122461224712248122491225012251122521225312254122551225612257122581225912260122611226212263122641226512266122671226812269122701227112272122731227412275122761227712278122791228012281122821228312284122851228612287122881228912290122911229212293122941229512296122971229812299123001230112302123031230412305123061230712308123091231012311123121231312314123151231612317123181231912320123211232212323123241232512326123271232812329123301233112332123331233412335123361233712338123391234012341123421234312344123451234612347123481234912350123511235212353123541235512356123571235812359123601236112362123631236412365123661236712368123691237012371123721237312374123751237612377123781237912380123811238212383123841238512386123871238812389123901239112392123931239412395123961239712398123991240012401124021240312404124051240612407124081240912410124111241212413124141241512416124171241812419124201242112422124231242412425124261242712428124291243012431124321243312434124351243612437124381243912440124411244212443124441244512446124471244812449124501245112452124531245412455124561245712458124591246012461124621246312464124651246612467124681246912470124711247212473124741247512476124771247812479124801248112482124831248412485124861248712488124891249012491124921249312494124951249612497124981249912500125011250212503125041250512506125071250812509125101251112512125131251412515125161251712518125191252012521125221252312524125251252612527125281252912530125311253212533125341253512536125371253812539125401254112542125431254412545125461254712548125491255012551125521255312554125551255612557125581255912560125611256212563125641256512566125671256812569125701257112572125731257412575125761257712578125791258012581125821258312584125851258612587125881258912590125911259212593125941259512596125971259812599126001260112602126031260412605126061260712608126091261012611126121261312614126151261612617126181261912620126211262212623126241262512626126271262812629126301263112632126331263412635126361263712638126391264012641126421264312644126451264612647126481264912650126511265212653126541265512656126571265812659126601266112662126631266412665126661266712668126691267012671126721267312674126751267612677126781267912680126811268212683126841268512686126871268812689126901269112692126931269412695126961269712698126991270012701127021270312704127051270612707127081270912710127111271212713127141271512716127171271812719127201272112722127231272412725127261272712728127291273012731127321273312734127351273612737127381273912740127411274212743127441274512746127471274812749127501275112752127531275412755127561275712758127591276012761127621276312764127651276612767127681276912770127711277212773127741277512776127771277812779127801278112782127831278412785127861278712788127891279012791127921279312794127951279612797127981279912800128011280212803128041280512806128071280812809128101281112812128131281412815128161281712818128191282012821128221282312824128251282612827128281282912830128311283212833128341283512836128371283812839128401284112842128431284412845128461284712848128491285012851128521285312854128551285612857128581285912860128611286212863128641286512866128671286812869128701287112872128731287412875128761287712878128791288012881128821288312884128851288612887128881288912890128911289212893128941289512896128971289812899129001290112902129031290412905129061290712908129091291012911129121291312914129151291612917129181291912920129211292212923129241292512926129271292812929129301293112932129331293412935129361293712938129391294012941129421294312944129451294612947129481294912950129511295212953129541295512956129571295812959129601296112962129631296412965129661296712968129691297012971129721297312974129751297612977129781297912980129811298212983129841298512986129871298812989129901299112992129931299412995129961299712998129991300013001130021300313004130051300613007130081300913010130111301213013130141301513016130171301813019130201302113022130231302413025130261302713028130291303013031130321303313034130351303613037130381303913040130411304213043130441304513046130471304813049130501305113052130531305413055130561305713058130591306013061130621306313064130651306613067130681306913070130711307213073130741307513076130771307813079130801308113082130831308413085130861308713088130891309013091130921309313094130951309613097130981309913100131011310213103131041310513106131071310813109131101311113112131131311413115131161311713118131191312013121131221312313124131251312613127131281312913130131311313213133131341313513136131371313813139131401314113142131431314413145131461314713148131491315013151131521315313154131551315613157131581315913160131611316213163131641316513166131671316813169131701317113172131731317413175131761317713178131791318013181131821318313184131851318613187131881318913190131911319213193131941319513196131971319813199132001320113202132031320413205132061320713208132091321013211132121321313214132151321613217132181321913220132211322213223132241322513226132271322813229132301323113232132331323413235132361323713238132391324013241132421324313244132451324613247132481324913250132511325213253132541325513256132571325813259132601326113262132631326413265132661326713268132691327013271132721327313274132751327613277132781327913280132811328213283132841328513286132871328813289132901329113292132931329413295132961329713298132991330013301133021330313304133051330613307133081330913310133111331213313133141331513316133171331813319133201332113322133231332413325133261332713328133291333013331133321333313334133351333613337133381333913340133411334213343133441334513346133471334813349133501335113352133531335413355133561335713358133591336013361133621336313364133651336613367133681336913370133711337213373133741337513376133771337813379133801338113382133831338413385133861338713388133891339013391133921339313394133951339613397133981339913400134011340213403134041340513406134071340813409134101341113412134131341413415134161341713418134191342013421134221342313424134251342613427134281342913430134311343213433134341343513436134371343813439134401344113442134431344413445134461344713448134491345013451134521345313454134551345613457134581345913460134611346213463134641346513466134671346813469134701347113472134731347413475134761347713478134791348013481134821348313484134851348613487134881348913490134911349213493134941349513496134971349813499135001350113502135031350413505135061350713508135091351013511135121351313514135151351613517135181351913520135211352213523135241352513526135271352813529135301353113532135331353413535135361353713538135391354013541135421354313544135451354613547135481354913550135511355213553135541355513556135571355813559135601356113562135631356413565135661356713568135691357013571135721357313574135751357613577135781357913580135811358213583135841358513586135871358813589135901359113592135931359413595135961359713598135991360013601136021360313604136051360613607136081360913610136111361213613136141361513616136171361813619136201362113622136231362413625136261362713628136291363013631136321363313634136351363613637136381363913640136411364213643136441364513646136471364813649136501365113652136531365413655136561365713658136591366013661136621366313664136651366613667136681366913670136711367213673136741367513676136771367813679136801368113682136831368413685136861368713688136891369013691136921369313694136951369613697136981369913700137011370213703137041370513706137071370813709137101371113712137131371413715137161371713718137191372013721137221372313724137251372613727137281372913730137311373213733137341373513736137371373813739137401374113742137431374413745137461374713748137491375013751137521375313754137551375613757137581375913760137611376213763137641376513766137671376813769137701377113772137731377413775137761377713778137791378013781137821378313784137851378613787137881378913790137911379213793137941379513796137971379813799138001380113802138031380413805138061380713808138091381013811138121381313814138151381613817138181381913820138211382213823138241382513826138271382813829138301383113832138331383413835138361383713838138391384013841138421384313844138451384613847138481384913850138511385213853138541385513856138571385813859138601386113862138631386413865138661386713868138691387013871138721387313874138751387613877138781387913880138811388213883138841388513886138871388813889138901389113892138931389413895138961389713898138991390013901139021390313904139051390613907139081390913910139111391213913139141391513916139171391813919
  1. /*
  2. * Linux syscalls
  3. *
  4. * Copyright (c) 2003 Fabrice Bellard
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #define _ATFILE_SOURCE
  20. #include "qemu/osdep.h"
  21. #include "qemu/cutils.h"
  22. #include "qemu/path.h"
  23. #include "qemu/memfd.h"
  24. #include "qemu/queue.h"
  25. #include "qemu/plugin.h"
  26. #include "tcg/startup.h"
  27. #include "target_mman.h"
  28. #include "exec/page-protection.h"
  29. #include "exec/tb-flush.h"
  30. #include "exec/translation-block.h"
  31. #include <elf.h>
  32. #include <endian.h>
  33. #include <grp.h>
  34. #include <sys/ipc.h>
  35. #include <sys/msg.h>
  36. #include <sys/wait.h>
  37. #include <sys/mount.h>
  38. #include <sys/file.h>
  39. #include <sys/fsuid.h>
  40. #include <sys/personality.h>
  41. #include <sys/prctl.h>
  42. #include <sys/resource.h>
  43. #include <sys/swap.h>
  44. #include <linux/capability.h>
  45. #include <sched.h>
  46. #include <sys/timex.h>
  47. #include <sys/socket.h>
  48. #include <linux/sockios.h>
  49. #include <sys/un.h>
  50. #include <sys/uio.h>
  51. #include <poll.h>
  52. #include <sys/times.h>
  53. #include <sys/shm.h>
  54. #include <sys/sem.h>
  55. #include <sys/statfs.h>
  56. #include <utime.h>
  57. #include <sys/sysinfo.h>
  58. #include <sys/signalfd.h>
  59. #include <netinet/in.h>
  60. #include <netinet/ip.h>
  61. #include <netinet/tcp.h>
  62. #include <netinet/udp.h>
  63. #include <linux/wireless.h>
  64. #include <linux/icmp.h>
  65. #include <linux/icmpv6.h>
  66. #include <linux/if_tun.h>
  67. #include <linux/in6.h>
  68. #include <linux/errqueue.h>
  69. #include <linux/random.h>
  70. #ifdef CONFIG_TIMERFD
  71. #include <sys/timerfd.h>
  72. #endif
  73. #ifdef CONFIG_EVENTFD
  74. #include <sys/eventfd.h>
  75. #endif
  76. #ifdef CONFIG_EPOLL
  77. #include <sys/epoll.h>
  78. #endif
  79. #ifdef CONFIG_ATTR
  80. #include "qemu/xattr.h"
  81. #endif
  82. #ifdef CONFIG_SENDFILE
  83. #include <sys/sendfile.h>
  84. #endif
  85. #ifdef HAVE_SYS_KCOV_H
  86. #include <sys/kcov.h>
  87. #endif
  88. #define termios host_termios
  89. #define winsize host_winsize
  90. #define termio host_termio
  91. #define sgttyb host_sgttyb /* same as target */
  92. #define tchars host_tchars /* same as target */
  93. #define ltchars host_ltchars /* same as target */
  94. #include <linux/termios.h>
  95. #include <linux/unistd.h>
  96. #include <linux/cdrom.h>
  97. #include <linux/hdreg.h>
  98. #include <linux/soundcard.h>
  99. #include <linux/kd.h>
  100. #include <linux/mtio.h>
  101. #include <linux/fs.h>
  102. #include <linux/fd.h>
  103. #if defined(CONFIG_FIEMAP)
  104. #include <linux/fiemap.h>
  105. #endif
  106. #include <linux/fb.h>
  107. #if defined(CONFIG_USBFS)
  108. #include <linux/usbdevice_fs.h>
  109. #include <linux/usb/ch9.h>
  110. #endif
  111. #include <linux/vt.h>
  112. #include <linux/dm-ioctl.h>
  113. #include <linux/reboot.h>
  114. #include <linux/route.h>
  115. #include <linux/filter.h>
  116. #include <linux/blkpg.h>
  117. #include <netpacket/packet.h>
  118. #include <linux/netlink.h>
  119. #include <linux/if_alg.h>
  120. #include <linux/rtc.h>
  121. #include <sound/asound.h>
  122. #ifdef HAVE_BTRFS_H
  123. #include <linux/btrfs.h>
  124. #endif
  125. #ifdef HAVE_DRM_H
  126. #include <libdrm/drm.h>
  127. #include <libdrm/i915_drm.h>
  128. #endif
  129. #include "linux_loop.h"
  130. #include "uname.h"
  131. #include "qemu.h"
  132. #include "user-internals.h"
  133. #include "strace.h"
  134. #include "signal-common.h"
  135. #include "loader.h"
  136. #include "user-mmap.h"
  137. #include "user/page-protection.h"
  138. #include "user/safe-syscall.h"
  139. #include "user/signal.h"
  140. #include "qemu/guest-random.h"
  141. #include "qemu/selfmap.h"
  142. #include "user/syscall-trace.h"
  143. #include "special-errno.h"
  144. #include "qapi/error.h"
  145. #include "fd-trans.h"
  146. #include "user/cpu_loop.h"
  147. #ifndef CLONE_IO
  148. #define CLONE_IO 0x80000000 /* Clone io context */
  149. #endif
  150. /* We can't directly call the host clone syscall, because this will
  151. * badly confuse libc (breaking mutexes, for example). So we must
  152. * divide clone flags into:
  153. * * flag combinations that look like pthread_create()
  154. * * flag combinations that look like fork()
  155. * * flags we can implement within QEMU itself
  156. * * flags we can't support and will return an error for
  157. */
  158. /* For thread creation, all these flags must be present; for
  159. * fork, none must be present.
  160. */
  161. #define CLONE_THREAD_FLAGS \
  162. (CLONE_VM | CLONE_FS | CLONE_FILES | \
  163. CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM)
  164. /* These flags are ignored:
  165. * CLONE_DETACHED is now ignored by the kernel;
  166. * CLONE_IO is just an optimisation hint to the I/O scheduler
  167. */
  168. #define CLONE_IGNORED_FLAGS \
  169. (CLONE_DETACHED | CLONE_IO)
  170. #ifndef CLONE_PIDFD
  171. # define CLONE_PIDFD 0x00001000
  172. #endif
  173. /* Flags for fork which we can implement within QEMU itself */
  174. #define CLONE_OPTIONAL_FORK_FLAGS \
  175. (CLONE_SETTLS | CLONE_PARENT_SETTID | CLONE_PIDFD | \
  176. CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID)
  177. /* Flags for thread creation which we can implement within QEMU itself */
  178. #define CLONE_OPTIONAL_THREAD_FLAGS \
  179. (CLONE_SETTLS | CLONE_PARENT_SETTID | \
  180. CLONE_CHILD_CLEARTID | CLONE_CHILD_SETTID | CLONE_PARENT)
  181. #define CLONE_INVALID_FORK_FLAGS \
  182. (~(CSIGNAL | CLONE_OPTIONAL_FORK_FLAGS | CLONE_IGNORED_FLAGS))
  183. #define CLONE_INVALID_THREAD_FLAGS \
  184. (~(CSIGNAL | CLONE_THREAD_FLAGS | CLONE_OPTIONAL_THREAD_FLAGS | \
  185. CLONE_IGNORED_FLAGS))
  186. /* CLONE_VFORK is special cased early in do_fork(). The other flag bits
  187. * have almost all been allocated. We cannot support any of
  188. * CLONE_NEWNS, CLONE_NEWCGROUP, CLONE_NEWUTS, CLONE_NEWIPC,
  189. * CLONE_NEWUSER, CLONE_NEWPID, CLONE_NEWNET, CLONE_PTRACE, CLONE_UNTRACED.
  190. * The checks against the invalid thread masks above will catch these.
  191. * (The one remaining unallocated bit is 0x1000 which used to be CLONE_PID.)
  192. */
  193. /* Define DEBUG_ERESTARTSYS to force every syscall to be restarted
  194. * once. This exercises the codepaths for restart.
  195. */
  196. //#define DEBUG_ERESTARTSYS
  197. //#include <linux/msdos_fs.h>
  198. #define VFAT_IOCTL_READDIR_BOTH \
  199. _IOC(_IOC_READ, 'r', 1, (sizeof(struct linux_dirent) + 256) * 2)
  200. #define VFAT_IOCTL_READDIR_SHORT \
  201. _IOC(_IOC_READ, 'r', 2, (sizeof(struct linux_dirent) + 256) * 2)
  202. #undef _syscall0
  203. #undef _syscall1
  204. #undef _syscall2
  205. #undef _syscall3
  206. #undef _syscall4
  207. #undef _syscall5
  208. #undef _syscall6
  209. #define _syscall0(type,name) \
  210. static type name (void) \
  211. { \
  212. return syscall(__NR_##name); \
  213. }
  214. #define _syscall1(type,name,type1,arg1) \
  215. static type name (type1 arg1) \
  216. { \
  217. return syscall(__NR_##name, arg1); \
  218. }
  219. #define _syscall2(type,name,type1,arg1,type2,arg2) \
  220. static type name (type1 arg1,type2 arg2) \
  221. { \
  222. return syscall(__NR_##name, arg1, arg2); \
  223. }
  224. #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
  225. static type name (type1 arg1,type2 arg2,type3 arg3) \
  226. { \
  227. return syscall(__NR_##name, arg1, arg2, arg3); \
  228. }
  229. #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
  230. static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
  231. { \
  232. return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
  233. }
  234. #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
  235. type5,arg5) \
  236. static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
  237. { \
  238. return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
  239. }
  240. #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
  241. type5,arg5,type6,arg6) \
  242. static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
  243. type6 arg6) \
  244. { \
  245. return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
  246. }
  247. #define __NR_sys_uname __NR_uname
  248. #define __NR_sys_getcwd1 __NR_getcwd
  249. #define __NR_sys_getdents __NR_getdents
  250. #define __NR_sys_getdents64 __NR_getdents64
  251. #define __NR_sys_getpriority __NR_getpriority
  252. #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
  253. #define __NR_sys_rt_tgsigqueueinfo __NR_rt_tgsigqueueinfo
  254. #define __NR_sys_syslog __NR_syslog
  255. #if defined(__NR_futex)
  256. # define __NR_sys_futex __NR_futex
  257. #endif
  258. #if defined(__NR_futex_time64)
  259. # define __NR_sys_futex_time64 __NR_futex_time64
  260. #endif
  261. #define __NR_sys_statx __NR_statx
  262. #if defined(__alpha__) || defined(__x86_64__) || defined(__s390x__)
  263. #define __NR__llseek __NR_lseek
  264. #endif
  265. /* Newer kernel ports have llseek() instead of _llseek() */
  266. #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
  267. #define TARGET_NR__llseek TARGET_NR_llseek
  268. #endif
  269. /* some platforms need to mask more bits than just TARGET_O_NONBLOCK */
  270. #ifndef TARGET_O_NONBLOCK_MASK
  271. #define TARGET_O_NONBLOCK_MASK TARGET_O_NONBLOCK
  272. #endif
  273. #define __NR_sys_gettid __NR_gettid
  274. _syscall0(int, sys_gettid)
  275. /* For the 64-bit guest on 32-bit host case we must emulate
  276. * getdents using getdents64, because otherwise the host
  277. * might hand us back more dirent records than we can fit
  278. * into the guest buffer after structure format conversion.
  279. * Otherwise we emulate getdents with getdents if the host has it.
  280. */
  281. #if defined(__NR_getdents) && HOST_LONG_BITS >= TARGET_ABI_BITS
  282. #define EMULATE_GETDENTS_WITH_GETDENTS
  283. #endif
  284. #if defined(TARGET_NR_getdents) && defined(EMULATE_GETDENTS_WITH_GETDENTS)
  285. _syscall3(int, sys_getdents, unsigned int, fd, struct linux_dirent *, dirp, unsigned int, count);
  286. #endif
  287. #if (defined(TARGET_NR_getdents) && \
  288. !defined(EMULATE_GETDENTS_WITH_GETDENTS)) || \
  289. (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
  290. _syscall3(int, sys_getdents64, unsigned int, fd, struct linux_dirent64 *, dirp, unsigned int, count);
  291. #endif
  292. #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
  293. _syscall5(int, _llseek, unsigned int, fd, unsigned long, hi, unsigned long, lo,
  294. loff_t *, res, unsigned int, wh);
  295. #endif
  296. _syscall3(int, sys_rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t *, uinfo)
  297. _syscall4(int, sys_rt_tgsigqueueinfo, pid_t, pid, pid_t, tid, int, sig,
  298. siginfo_t *, uinfo)
  299. _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
  300. #ifdef __NR_exit_group
  301. _syscall1(int,exit_group,int,error_code)
  302. #endif
  303. #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
  304. #define __NR_sys_close_range __NR_close_range
  305. _syscall3(int,sys_close_range,int,first,int,last,int,flags)
  306. #ifndef CLOSE_RANGE_CLOEXEC
  307. #define CLOSE_RANGE_CLOEXEC (1U << 2)
  308. #endif
  309. #endif
  310. #if defined(__NR_futex)
  311. _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
  312. const struct timespec *,timeout,int *,uaddr2,int,val3)
  313. #endif
  314. #if defined(__NR_futex_time64)
  315. _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val,
  316. const struct timespec *,timeout,int *,uaddr2,int,val3)
  317. #endif
  318. #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
  319. _syscall2(int, pidfd_open, pid_t, pid, unsigned int, flags);
  320. #endif
  321. #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
  322. _syscall4(int, pidfd_send_signal, int, pidfd, int, sig, siginfo_t *, info,
  323. unsigned int, flags);
  324. #endif
  325. #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
  326. _syscall3(int, pidfd_getfd, int, pidfd, int, targetfd, unsigned int, flags);
  327. #endif
  328. #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
  329. _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
  330. unsigned long *, user_mask_ptr);
  331. #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
  332. _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
  333. unsigned long *, user_mask_ptr);
  334. /* sched_attr is not defined in glibc < 2.41 */
  335. #ifndef SCHED_ATTR_SIZE_VER0
  336. struct sched_attr {
  337. uint32_t size;
  338. uint32_t sched_policy;
  339. uint64_t sched_flags;
  340. int32_t sched_nice;
  341. uint32_t sched_priority;
  342. uint64_t sched_runtime;
  343. uint64_t sched_deadline;
  344. uint64_t sched_period;
  345. uint32_t sched_util_min;
  346. uint32_t sched_util_max;
  347. };
  348. #endif
  349. #define __NR_sys_sched_getattr __NR_sched_getattr
  350. _syscall4(int, sys_sched_getattr, pid_t, pid, struct sched_attr *, attr,
  351. unsigned int, size, unsigned int, flags);
  352. #define __NR_sys_sched_setattr __NR_sched_setattr
  353. _syscall3(int, sys_sched_setattr, pid_t, pid, struct sched_attr *, attr,
  354. unsigned int, flags);
  355. #define __NR_sys_sched_getscheduler __NR_sched_getscheduler
  356. _syscall1(int, sys_sched_getscheduler, pid_t, pid);
  357. #define __NR_sys_sched_setscheduler __NR_sched_setscheduler
  358. _syscall3(int, sys_sched_setscheduler, pid_t, pid, int, policy,
  359. const struct sched_param *, param);
  360. #define __NR_sys_sched_getparam __NR_sched_getparam
  361. _syscall2(int, sys_sched_getparam, pid_t, pid,
  362. struct sched_param *, param);
  363. #define __NR_sys_sched_setparam __NR_sched_setparam
  364. _syscall2(int, sys_sched_setparam, pid_t, pid,
  365. const struct sched_param *, param);
  366. #define __NR_sys_getcpu __NR_getcpu
  367. _syscall3(int, sys_getcpu, unsigned *, cpu, unsigned *, node, void *, tcache);
  368. _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
  369. void *, arg);
  370. _syscall2(int, capget, struct __user_cap_header_struct *, header,
  371. struct __user_cap_data_struct *, data);
  372. _syscall2(int, capset, struct __user_cap_header_struct *, header,
  373. struct __user_cap_data_struct *, data);
  374. #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
  375. _syscall2(int, ioprio_get, int, which, int, who)
  376. #endif
  377. #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
  378. _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
  379. #endif
  380. #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
  381. _syscall3(int, getrandom, void *, buf, size_t, buflen, unsigned int, flags)
  382. #endif
  383. #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
  384. _syscall5(int, kcmp, pid_t, pid1, pid_t, pid2, int, type,
  385. unsigned long, idx1, unsigned long, idx2)
  386. #endif
  387. /*
  388. * It is assumed that struct statx is architecture independent.
  389. */
  390. #if defined(TARGET_NR_statx) && defined(__NR_statx)
  391. _syscall5(int, sys_statx, int, dirfd, const char *, pathname, int, flags,
  392. unsigned int, mask, struct target_statx *, statxbuf)
  393. #endif
  394. #if defined(TARGET_NR_membarrier) && defined(__NR_membarrier)
  395. _syscall2(int, membarrier, int, cmd, int, flags)
  396. #endif
  397. static const bitmask_transtbl fcntl_flags_tbl[] = {
  398. { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
  399. { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
  400. { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
  401. { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
  402. { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
  403. { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
  404. { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
  405. { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
  406. { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
  407. { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
  408. { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
  409. { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
  410. { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
  411. #if defined(O_DIRECT)
  412. { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
  413. #endif
  414. #if defined(O_NOATIME)
  415. { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
  416. #endif
  417. #if defined(O_CLOEXEC)
  418. { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
  419. #endif
  420. #if defined(O_PATH)
  421. { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
  422. #endif
  423. #if defined(O_TMPFILE)
  424. { TARGET_O_TMPFILE, TARGET_O_TMPFILE, O_TMPFILE, O_TMPFILE },
  425. #endif
  426. /* Don't terminate the list prematurely on 64-bit host+guest. */
  427. #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
  428. { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
  429. #endif
  430. };
  431. _syscall2(int, sys_getcwd1, char *, buf, size_t, size)
  432. #if defined(TARGET_NR_utimensat) || defined(TARGET_NR_utimensat_time64)
  433. #if defined(__NR_utimensat)
  434. #define __NR_sys_utimensat __NR_utimensat
  435. _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
  436. const struct timespec *,tsp,int,flags)
  437. #else
  438. static int sys_utimensat(int dirfd, const char *pathname,
  439. const struct timespec times[2], int flags)
  440. {
  441. errno = ENOSYS;
  442. return -1;
  443. }
  444. #endif
  445. #endif /* TARGET_NR_utimensat */
  446. #ifdef TARGET_NR_renameat2
  447. #if defined(__NR_renameat2)
  448. #define __NR_sys_renameat2 __NR_renameat2
  449. _syscall5(int, sys_renameat2, int, oldfd, const char *, old, int, newfd,
  450. const char *, new, unsigned int, flags)
  451. #else
  452. static int sys_renameat2(int oldfd, const char *old,
  453. int newfd, const char *new, int flags)
  454. {
  455. if (flags == 0) {
  456. return renameat(oldfd, old, newfd, new);
  457. }
  458. errno = ENOSYS;
  459. return -1;
  460. }
  461. #endif
  462. #endif /* TARGET_NR_renameat2 */
  463. #ifdef CONFIG_INOTIFY
  464. #include <sys/inotify.h>
  465. #else
  466. /* Userspace can usually survive runtime without inotify */
  467. #undef TARGET_NR_inotify_init
  468. #undef TARGET_NR_inotify_init1
  469. #undef TARGET_NR_inotify_add_watch
  470. #undef TARGET_NR_inotify_rm_watch
  471. #endif /* CONFIG_INOTIFY */
  472. #if defined(TARGET_NR_prlimit64)
  473. #ifndef __NR_prlimit64
  474. # define __NR_prlimit64 -1
  475. #endif
  476. #define __NR_sys_prlimit64 __NR_prlimit64
  477. /* The glibc rlimit structure may not be that used by the underlying syscall */
  478. struct host_rlimit64 {
  479. uint64_t rlim_cur;
  480. uint64_t rlim_max;
  481. };
  482. _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
  483. const struct host_rlimit64 *, new_limit,
  484. struct host_rlimit64 *, old_limit)
  485. #endif
  486. #if defined(TARGET_NR_timer_create)
  487. /* Maximum of 32 active POSIX timers allowed at any one time. */
  488. #define GUEST_TIMER_MAX 32
  489. static timer_t g_posix_timers[GUEST_TIMER_MAX];
  490. static int g_posix_timer_allocated[GUEST_TIMER_MAX];
  491. static inline int next_free_host_timer(void)
  492. {
  493. int k;
  494. for (k = 0; k < ARRAY_SIZE(g_posix_timer_allocated); k++) {
  495. if (qatomic_xchg(g_posix_timer_allocated + k, 1) == 0) {
  496. return k;
  497. }
  498. }
  499. return -1;
  500. }
  501. static inline void free_host_timer_slot(int id)
  502. {
  503. qatomic_store_release(g_posix_timer_allocated + id, 0);
  504. }
  505. #endif
  506. static inline int host_to_target_errno(int host_errno)
  507. {
  508. switch (host_errno) {
  509. #define E(X) case X: return TARGET_##X;
  510. #include "errnos.c.inc"
  511. #undef E
  512. default:
  513. return host_errno;
  514. }
  515. }
  516. static inline int target_to_host_errno(int target_errno)
  517. {
  518. switch (target_errno) {
  519. #define E(X) case TARGET_##X: return X;
  520. #include "errnos.c.inc"
  521. #undef E
  522. default:
  523. return target_errno;
  524. }
  525. }
  526. abi_long get_errno(abi_long ret)
  527. {
  528. if (ret == -1)
  529. return -host_to_target_errno(errno);
  530. else
  531. return ret;
  532. }
  533. const char *target_strerror(int err)
  534. {
  535. if (err == QEMU_ERESTARTSYS) {
  536. return "To be restarted";
  537. }
  538. if (err == QEMU_ESIGRETURN) {
  539. return "Successful exit from sigreturn";
  540. }
  541. return strerror(target_to_host_errno(err));
  542. }
  543. static int check_zeroed_user(abi_long addr, size_t ksize, size_t usize)
  544. {
  545. int i;
  546. uint8_t b;
  547. if (usize <= ksize) {
  548. return 1;
  549. }
  550. for (i = ksize; i < usize; i++) {
  551. if (get_user_u8(b, addr + i)) {
  552. return -TARGET_EFAULT;
  553. }
  554. if (b != 0) {
  555. return 0;
  556. }
  557. }
  558. return 1;
  559. }
  560. /*
  561. * Copies a target struct to a host struct, in a way that guarantees
  562. * backwards-compatibility for struct syscall arguments.
  563. *
  564. * Similar to kernels uaccess.h:copy_struct_from_user()
  565. */
  566. int copy_struct_from_user(void *dst, size_t ksize, abi_ptr src, size_t usize)
  567. {
  568. size_t size = MIN(ksize, usize);
  569. size_t rest = MAX(ksize, usize) - size;
  570. /* Deal with trailing bytes. */
  571. if (usize < ksize) {
  572. memset(dst + size, 0, rest);
  573. } else if (usize > ksize) {
  574. int ret = check_zeroed_user(src, ksize, usize);
  575. if (ret <= 0) {
  576. return ret ?: -TARGET_E2BIG;
  577. }
  578. }
  579. /* Copy the interoperable parts of the struct. */
  580. if (copy_from_user(dst, src, size)) {
  581. return -TARGET_EFAULT;
  582. }
  583. return 0;
  584. }
  585. #define safe_syscall0(type, name) \
  586. static type safe_##name(void) \
  587. { \
  588. return safe_syscall(__NR_##name); \
  589. }
  590. #define safe_syscall1(type, name, type1, arg1) \
  591. static type safe_##name(type1 arg1) \
  592. { \
  593. return safe_syscall(__NR_##name, arg1); \
  594. }
  595. #define safe_syscall2(type, name, type1, arg1, type2, arg2) \
  596. static type safe_##name(type1 arg1, type2 arg2) \
  597. { \
  598. return safe_syscall(__NR_##name, arg1, arg2); \
  599. }
  600. #define safe_syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
  601. static type safe_##name(type1 arg1, type2 arg2, type3 arg3) \
  602. { \
  603. return safe_syscall(__NR_##name, arg1, arg2, arg3); \
  604. }
  605. #define safe_syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
  606. type4, arg4) \
  607. static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4) \
  608. { \
  609. return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4); \
  610. }
  611. #define safe_syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
  612. type4, arg4, type5, arg5) \
  613. static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
  614. type5 arg5) \
  615. { \
  616. return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
  617. }
  618. #define safe_syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
  619. type4, arg4, type5, arg5, type6, arg6) \
  620. static type safe_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
  621. type5 arg5, type6 arg6) \
  622. { \
  623. return safe_syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
  624. }
  625. safe_syscall3(ssize_t, read, int, fd, void *, buff, size_t, count)
  626. safe_syscall3(ssize_t, write, int, fd, const void *, buff, size_t, count)
  627. safe_syscall4(int, openat, int, dirfd, const char *, pathname, \
  628. int, flags, mode_t, mode)
  629. safe_syscall4(int, openat2, int, dirfd, const char *, pathname, \
  630. const struct open_how_ver0 *, how, size_t, size)
  631. #if defined(TARGET_NR_wait4) || defined(TARGET_NR_waitpid)
  632. safe_syscall4(pid_t, wait4, pid_t, pid, int *, status, int, options, \
  633. struct rusage *, rusage)
  634. #endif
  635. safe_syscall5(int, waitid, idtype_t, idtype, id_t, id, siginfo_t *, infop, \
  636. int, options, struct rusage *, rusage)
  637. safe_syscall3(int, execve, const char *, filename, char **, argv, char **, envp)
  638. safe_syscall5(int, execveat, int, dirfd, const char *, filename,
  639. char **, argv, char **, envp, int, flags)
  640. #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
  641. defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
  642. safe_syscall6(int, pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds, \
  643. fd_set *, exceptfds, struct timespec *, timeout, void *, sig)
  644. #endif
  645. #if defined(TARGET_NR_ppoll) || defined(TARGET_NR_ppoll_time64)
  646. safe_syscall5(int, ppoll, struct pollfd *, ufds, unsigned int, nfds,
  647. struct timespec *, tsp, const sigset_t *, sigmask,
  648. size_t, sigsetsize)
  649. #endif
  650. safe_syscall6(int, epoll_pwait, int, epfd, struct epoll_event *, events,
  651. int, maxevents, int, timeout, const sigset_t *, sigmask,
  652. size_t, sigsetsize)
  653. #if defined(__NR_futex)
  654. safe_syscall6(int,futex,int *,uaddr,int,op,int,val, \
  655. const struct timespec *,timeout,int *,uaddr2,int,val3)
  656. #endif
  657. #if defined(__NR_futex_time64)
  658. safe_syscall6(int,futex_time64,int *,uaddr,int,op,int,val, \
  659. const struct timespec *,timeout,int *,uaddr2,int,val3)
  660. #endif
  661. safe_syscall2(int, rt_sigsuspend, sigset_t *, newset, size_t, sigsetsize)
  662. safe_syscall2(int, kill, pid_t, pid, int, sig)
  663. safe_syscall2(int, tkill, int, tid, int, sig)
  664. safe_syscall3(int, tgkill, int, tgid, int, pid, int, sig)
  665. safe_syscall3(ssize_t, readv, int, fd, const struct iovec *, iov, int, iovcnt)
  666. safe_syscall3(ssize_t, writev, int, fd, const struct iovec *, iov, int, iovcnt)
  667. safe_syscall5(ssize_t, preadv, int, fd, const struct iovec *, iov, int, iovcnt,
  668. unsigned long, pos_l, unsigned long, pos_h)
  669. safe_syscall5(ssize_t, pwritev, int, fd, const struct iovec *, iov, int, iovcnt,
  670. unsigned long, pos_l, unsigned long, pos_h)
  671. safe_syscall3(int, connect, int, fd, const struct sockaddr *, addr,
  672. socklen_t, addrlen)
  673. safe_syscall6(ssize_t, sendto, int, fd, const void *, buf, size_t, len,
  674. int, flags, const struct sockaddr *, addr, socklen_t, addrlen)
  675. safe_syscall6(ssize_t, recvfrom, int, fd, void *, buf, size_t, len,
  676. int, flags, struct sockaddr *, addr, socklen_t *, addrlen)
  677. safe_syscall3(ssize_t, sendmsg, int, fd, const struct msghdr *, msg, int, flags)
  678. safe_syscall3(ssize_t, recvmsg, int, fd, struct msghdr *, msg, int, flags)
  679. safe_syscall2(int, flock, int, fd, int, operation)
  680. #if defined(TARGET_NR_rt_sigtimedwait) || defined(TARGET_NR_rt_sigtimedwait_time64)
  681. safe_syscall4(int, rt_sigtimedwait, const sigset_t *, these, siginfo_t *, uinfo,
  682. const struct timespec *, uts, size_t, sigsetsize)
  683. #endif
  684. safe_syscall4(int, accept4, int, fd, struct sockaddr *, addr, socklen_t *, len,
  685. int, flags)
  686. #if defined(TARGET_NR_nanosleep)
  687. safe_syscall2(int, nanosleep, const struct timespec *, req,
  688. struct timespec *, rem)
  689. #endif
  690. #if defined(TARGET_NR_clock_nanosleep) || \
  691. defined(TARGET_NR_clock_nanosleep_time64)
  692. safe_syscall4(int, clock_nanosleep, const clockid_t, clock, int, flags,
  693. const struct timespec *, req, struct timespec *, rem)
  694. #endif
  695. #ifdef __NR_ipc
  696. #ifdef __s390x__
  697. safe_syscall5(int, ipc, int, call, long, first, long, second, long, third,
  698. void *, ptr)
  699. #else
  700. safe_syscall6(int, ipc, int, call, long, first, long, second, long, third,
  701. void *, ptr, long, fifth)
  702. #endif
  703. #endif
  704. #ifdef __NR_msgsnd
  705. safe_syscall4(int, msgsnd, int, msgid, const void *, msgp, size_t, sz,
  706. int, flags)
  707. #endif
  708. #ifdef __NR_msgrcv
  709. safe_syscall5(int, msgrcv, int, msgid, void *, msgp, size_t, sz,
  710. long, msgtype, int, flags)
  711. #endif
  712. #ifdef __NR_semtimedop
  713. safe_syscall4(int, semtimedop, int, semid, struct sembuf *, tsops,
  714. unsigned, nsops, const struct timespec *, timeout)
  715. #endif
  716. #if defined(TARGET_NR_mq_timedsend) || \
  717. defined(TARGET_NR_mq_timedsend_time64)
  718. safe_syscall5(int, mq_timedsend, int, mqdes, const char *, msg_ptr,
  719. size_t, len, unsigned, prio, const struct timespec *, timeout)
  720. #endif
  721. #if defined(TARGET_NR_mq_timedreceive) || \
  722. defined(TARGET_NR_mq_timedreceive_time64)
  723. safe_syscall5(int, mq_timedreceive, int, mqdes, char *, msg_ptr,
  724. size_t, len, unsigned *, prio, const struct timespec *, timeout)
  725. #endif
  726. #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
  727. safe_syscall6(ssize_t, copy_file_range, int, infd, loff_t *, pinoff,
  728. int, outfd, loff_t *, poutoff, size_t, length,
  729. unsigned int, flags)
  730. #endif
  731. /* We do ioctl like this rather than via safe_syscall3 to preserve the
  732. * "third argument might be integer or pointer or not present" behaviour of
  733. * the libc function.
  734. */
  735. #define safe_ioctl(...) safe_syscall(__NR_ioctl, __VA_ARGS__)
  736. /* Similarly for fcntl. Since we always build with LFS enabled,
  737. * we should be using the 64-bit structures automatically.
  738. */
  739. #ifdef __NR_fcntl64
  740. #define safe_fcntl(...) safe_syscall(__NR_fcntl64, __VA_ARGS__)
  741. #else
  742. #define safe_fcntl(...) safe_syscall(__NR_fcntl, __VA_ARGS__)
  743. #endif
  744. static inline int host_to_target_sock_type(int host_type)
  745. {
  746. int target_type;
  747. switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
  748. case SOCK_DGRAM:
  749. target_type = TARGET_SOCK_DGRAM;
  750. break;
  751. case SOCK_STREAM:
  752. target_type = TARGET_SOCK_STREAM;
  753. break;
  754. default:
  755. target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
  756. break;
  757. }
  758. #if defined(SOCK_CLOEXEC)
  759. if (host_type & SOCK_CLOEXEC) {
  760. target_type |= TARGET_SOCK_CLOEXEC;
  761. }
  762. #endif
  763. #if defined(SOCK_NONBLOCK)
  764. if (host_type & SOCK_NONBLOCK) {
  765. target_type |= TARGET_SOCK_NONBLOCK;
  766. }
  767. #endif
  768. return target_type;
  769. }
  770. static abi_ulong target_brk, initial_target_brk;
  771. void target_set_brk(abi_ulong new_brk)
  772. {
  773. target_brk = TARGET_PAGE_ALIGN(new_brk);
  774. initial_target_brk = target_brk;
  775. }
  776. /* do_brk() must return target values and target errnos. */
  777. abi_long do_brk(abi_ulong brk_val)
  778. {
  779. abi_long mapped_addr;
  780. abi_ulong new_brk;
  781. abi_ulong old_brk;
  782. /* brk pointers are always untagged */
  783. /* do not allow to shrink below initial brk value */
  784. if (brk_val < initial_target_brk) {
  785. return target_brk;
  786. }
  787. new_brk = TARGET_PAGE_ALIGN(brk_val);
  788. old_brk = TARGET_PAGE_ALIGN(target_brk);
  789. /* new and old target_brk might be on the same page */
  790. if (new_brk == old_brk) {
  791. target_brk = brk_val;
  792. return target_brk;
  793. }
  794. /* Release heap if necessary */
  795. if (new_brk < old_brk) {
  796. target_munmap(new_brk, old_brk - new_brk);
  797. target_brk = brk_val;
  798. return target_brk;
  799. }
  800. mapped_addr = target_mmap(old_brk, new_brk - old_brk,
  801. PROT_READ | PROT_WRITE,
  802. MAP_FIXED_NOREPLACE | MAP_ANON | MAP_PRIVATE,
  803. -1, 0);
  804. if (mapped_addr == old_brk) {
  805. target_brk = brk_val;
  806. return target_brk;
  807. }
  808. #if defined(TARGET_ALPHA)
  809. /* We (partially) emulate OSF/1 on Alpha, which requires we
  810. return a proper errno, not an unchanged brk value. */
  811. return -TARGET_ENOMEM;
  812. #endif
  813. /* For everything else, return the previous break. */
  814. return target_brk;
  815. }
  816. #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect) || \
  817. defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
  818. static inline abi_long copy_from_user_fdset(fd_set *fds,
  819. abi_ulong target_fds_addr,
  820. int n)
  821. {
  822. int i, nw, j, k;
  823. abi_ulong b, *target_fds;
  824. nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
  825. if (!(target_fds = lock_user(VERIFY_READ,
  826. target_fds_addr,
  827. sizeof(abi_ulong) * nw,
  828. 1)))
  829. return -TARGET_EFAULT;
  830. FD_ZERO(fds);
  831. k = 0;
  832. for (i = 0; i < nw; i++) {
  833. /* grab the abi_ulong */
  834. __get_user(b, &target_fds[i]);
  835. for (j = 0; j < TARGET_ABI_BITS; j++) {
  836. /* check the bit inside the abi_ulong */
  837. if ((b >> j) & 1)
  838. FD_SET(k, fds);
  839. k++;
  840. }
  841. }
  842. unlock_user(target_fds, target_fds_addr, 0);
  843. return 0;
  844. }
  845. static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
  846. abi_ulong target_fds_addr,
  847. int n)
  848. {
  849. if (target_fds_addr) {
  850. if (copy_from_user_fdset(fds, target_fds_addr, n))
  851. return -TARGET_EFAULT;
  852. *fds_ptr = fds;
  853. } else {
  854. *fds_ptr = NULL;
  855. }
  856. return 0;
  857. }
  858. static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
  859. const fd_set *fds,
  860. int n)
  861. {
  862. int i, nw, j, k;
  863. abi_long v;
  864. abi_ulong *target_fds;
  865. nw = DIV_ROUND_UP(n, TARGET_ABI_BITS);
  866. if (!(target_fds = lock_user(VERIFY_WRITE,
  867. target_fds_addr,
  868. sizeof(abi_ulong) * nw,
  869. 0)))
  870. return -TARGET_EFAULT;
  871. k = 0;
  872. for (i = 0; i < nw; i++) {
  873. v = 0;
  874. for (j = 0; j < TARGET_ABI_BITS; j++) {
  875. v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
  876. k++;
  877. }
  878. __put_user(v, &target_fds[i]);
  879. }
  880. unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
  881. return 0;
  882. }
  883. #endif
  884. #if defined(__alpha__)
  885. #define HOST_HZ 1024
  886. #else
  887. #define HOST_HZ 100
  888. #endif
  889. static inline abi_long host_to_target_clock_t(long ticks)
  890. {
  891. #if HOST_HZ == TARGET_HZ
  892. return ticks;
  893. #else
  894. return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
  895. #endif
  896. }
  897. static inline abi_long host_to_target_rusage(abi_ulong target_addr,
  898. const struct rusage *rusage)
  899. {
  900. struct target_rusage *target_rusage;
  901. if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
  902. return -TARGET_EFAULT;
  903. target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
  904. target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
  905. target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
  906. target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
  907. target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
  908. target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
  909. target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
  910. target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
  911. target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
  912. target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
  913. target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
  914. target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
  915. target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
  916. target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
  917. target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
  918. target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
  919. target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
  920. target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
  921. unlock_user_struct(target_rusage, target_addr, 1);
  922. return 0;
  923. }
  924. #ifdef TARGET_NR_setrlimit
  925. static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
  926. {
  927. abi_ulong target_rlim_swap;
  928. rlim_t result;
  929. target_rlim_swap = tswapal(target_rlim);
  930. if (target_rlim_swap == TARGET_RLIM_INFINITY)
  931. return RLIM_INFINITY;
  932. result = target_rlim_swap;
  933. if (target_rlim_swap != (rlim_t)result)
  934. return RLIM_INFINITY;
  935. return result;
  936. }
  937. #endif
  938. #if defined(TARGET_NR_getrlimit) || defined(TARGET_NR_ugetrlimit)
  939. static inline abi_ulong host_to_target_rlim(rlim_t rlim)
  940. {
  941. abi_ulong target_rlim_swap;
  942. abi_ulong result;
  943. if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
  944. target_rlim_swap = TARGET_RLIM_INFINITY;
  945. else
  946. target_rlim_swap = rlim;
  947. result = tswapal(target_rlim_swap);
  948. return result;
  949. }
  950. #endif
  951. static inline int target_to_host_resource(int code)
  952. {
  953. switch (code) {
  954. case TARGET_RLIMIT_AS:
  955. return RLIMIT_AS;
  956. case TARGET_RLIMIT_CORE:
  957. return RLIMIT_CORE;
  958. case TARGET_RLIMIT_CPU:
  959. return RLIMIT_CPU;
  960. case TARGET_RLIMIT_DATA:
  961. return RLIMIT_DATA;
  962. case TARGET_RLIMIT_FSIZE:
  963. return RLIMIT_FSIZE;
  964. case TARGET_RLIMIT_LOCKS:
  965. return RLIMIT_LOCKS;
  966. case TARGET_RLIMIT_MEMLOCK:
  967. return RLIMIT_MEMLOCK;
  968. case TARGET_RLIMIT_MSGQUEUE:
  969. return RLIMIT_MSGQUEUE;
  970. case TARGET_RLIMIT_NICE:
  971. return RLIMIT_NICE;
  972. case TARGET_RLIMIT_NOFILE:
  973. return RLIMIT_NOFILE;
  974. case TARGET_RLIMIT_NPROC:
  975. return RLIMIT_NPROC;
  976. case TARGET_RLIMIT_RSS:
  977. return RLIMIT_RSS;
  978. case TARGET_RLIMIT_RTPRIO:
  979. return RLIMIT_RTPRIO;
  980. #ifdef RLIMIT_RTTIME
  981. case TARGET_RLIMIT_RTTIME:
  982. return RLIMIT_RTTIME;
  983. #endif
  984. case TARGET_RLIMIT_SIGPENDING:
  985. return RLIMIT_SIGPENDING;
  986. case TARGET_RLIMIT_STACK:
  987. return RLIMIT_STACK;
  988. default:
  989. return code;
  990. }
  991. }
  992. static inline abi_long copy_from_user_timeval(struct timeval *tv,
  993. abi_ulong target_tv_addr)
  994. {
  995. struct target_timeval *target_tv;
  996. if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
  997. return -TARGET_EFAULT;
  998. }
  999. __get_user(tv->tv_sec, &target_tv->tv_sec);
  1000. __get_user(tv->tv_usec, &target_tv->tv_usec);
  1001. unlock_user_struct(target_tv, target_tv_addr, 0);
  1002. return 0;
  1003. }
  1004. static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
  1005. const struct timeval *tv)
  1006. {
  1007. struct target_timeval *target_tv;
  1008. if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
  1009. return -TARGET_EFAULT;
  1010. }
  1011. __put_user(tv->tv_sec, &target_tv->tv_sec);
  1012. __put_user(tv->tv_usec, &target_tv->tv_usec);
  1013. unlock_user_struct(target_tv, target_tv_addr, 1);
  1014. return 0;
  1015. }
  1016. #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
  1017. static inline abi_long copy_from_user_timeval64(struct timeval *tv,
  1018. abi_ulong target_tv_addr)
  1019. {
  1020. struct target__kernel_sock_timeval *target_tv;
  1021. if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1)) {
  1022. return -TARGET_EFAULT;
  1023. }
  1024. __get_user(tv->tv_sec, &target_tv->tv_sec);
  1025. __get_user(tv->tv_usec, &target_tv->tv_usec);
  1026. unlock_user_struct(target_tv, target_tv_addr, 0);
  1027. return 0;
  1028. }
  1029. #endif
  1030. static inline abi_long copy_to_user_timeval64(abi_ulong target_tv_addr,
  1031. const struct timeval *tv)
  1032. {
  1033. struct target__kernel_sock_timeval *target_tv;
  1034. if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0)) {
  1035. return -TARGET_EFAULT;
  1036. }
  1037. __put_user(tv->tv_sec, &target_tv->tv_sec);
  1038. __put_user(tv->tv_usec, &target_tv->tv_usec);
  1039. unlock_user_struct(target_tv, target_tv_addr, 1);
  1040. return 0;
  1041. }
  1042. #if defined(TARGET_NR_futex) || \
  1043. defined(TARGET_NR_rt_sigtimedwait) || \
  1044. defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6) || \
  1045. defined(TARGET_NR_nanosleep) || defined(TARGET_NR_clock_settime) || \
  1046. defined(TARGET_NR_utimensat) || defined(TARGET_NR_mq_timedsend) || \
  1047. defined(TARGET_NR_mq_timedreceive) || defined(TARGET_NR_ipc) || \
  1048. defined(TARGET_NR_semop) || defined(TARGET_NR_semtimedop) || \
  1049. defined(TARGET_NR_timer_settime) || \
  1050. (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
  1051. static inline abi_long target_to_host_timespec(struct timespec *host_ts,
  1052. abi_ulong target_addr)
  1053. {
  1054. struct target_timespec *target_ts;
  1055. if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
  1056. return -TARGET_EFAULT;
  1057. }
  1058. __get_user(host_ts->tv_sec, &target_ts->tv_sec);
  1059. __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
  1060. unlock_user_struct(target_ts, target_addr, 0);
  1061. return 0;
  1062. }
  1063. #endif
  1064. #if defined(TARGET_NR_clock_settime64) || defined(TARGET_NR_futex_time64) || \
  1065. defined(TARGET_NR_timer_settime64) || \
  1066. defined(TARGET_NR_mq_timedsend_time64) || \
  1067. defined(TARGET_NR_mq_timedreceive_time64) || \
  1068. (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)) || \
  1069. defined(TARGET_NR_clock_nanosleep_time64) || \
  1070. defined(TARGET_NR_rt_sigtimedwait_time64) || \
  1071. defined(TARGET_NR_utimensat) || \
  1072. defined(TARGET_NR_utimensat_time64) || \
  1073. defined(TARGET_NR_semtimedop_time64) || \
  1074. defined(TARGET_NR_pselect6_time64) || defined(TARGET_NR_ppoll_time64)
  1075. static inline abi_long target_to_host_timespec64(struct timespec *host_ts,
  1076. abi_ulong target_addr)
  1077. {
  1078. struct target__kernel_timespec *target_ts;
  1079. if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1)) {
  1080. return -TARGET_EFAULT;
  1081. }
  1082. __get_user(host_ts->tv_sec, &target_ts->tv_sec);
  1083. __get_user(host_ts->tv_nsec, &target_ts->tv_nsec);
  1084. /* in 32bit mode, this drops the padding */
  1085. host_ts->tv_nsec = (long)(abi_long)host_ts->tv_nsec;
  1086. unlock_user_struct(target_ts, target_addr, 0);
  1087. return 0;
  1088. }
  1089. #endif
  1090. static inline abi_long host_to_target_timespec(abi_ulong target_addr,
  1091. struct timespec *host_ts)
  1092. {
  1093. struct target_timespec *target_ts;
  1094. if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
  1095. return -TARGET_EFAULT;
  1096. }
  1097. __put_user(host_ts->tv_sec, &target_ts->tv_sec);
  1098. __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
  1099. unlock_user_struct(target_ts, target_addr, 1);
  1100. return 0;
  1101. }
  1102. static inline abi_long host_to_target_timespec64(abi_ulong target_addr,
  1103. struct timespec *host_ts)
  1104. {
  1105. struct target__kernel_timespec *target_ts;
  1106. if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0)) {
  1107. return -TARGET_EFAULT;
  1108. }
  1109. __put_user(host_ts->tv_sec, &target_ts->tv_sec);
  1110. __put_user(host_ts->tv_nsec, &target_ts->tv_nsec);
  1111. unlock_user_struct(target_ts, target_addr, 1);
  1112. return 0;
  1113. }
  1114. #if defined(TARGET_NR_gettimeofday)
  1115. static inline abi_long copy_to_user_timezone(abi_ulong target_tz_addr,
  1116. struct timezone *tz)
  1117. {
  1118. struct target_timezone *target_tz;
  1119. if (!lock_user_struct(VERIFY_WRITE, target_tz, target_tz_addr, 1)) {
  1120. return -TARGET_EFAULT;
  1121. }
  1122. __put_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
  1123. __put_user(tz->tz_dsttime, &target_tz->tz_dsttime);
  1124. unlock_user_struct(target_tz, target_tz_addr, 1);
  1125. return 0;
  1126. }
  1127. #endif
  1128. #if defined(TARGET_NR_settimeofday)
  1129. static inline abi_long copy_from_user_timezone(struct timezone *tz,
  1130. abi_ulong target_tz_addr)
  1131. {
  1132. struct target_timezone *target_tz;
  1133. if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
  1134. return -TARGET_EFAULT;
  1135. }
  1136. __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
  1137. __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
  1138. unlock_user_struct(target_tz, target_tz_addr, 0);
  1139. return 0;
  1140. }
  1141. #endif
  1142. #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
  1143. #include <mqueue.h>
  1144. static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
  1145. abi_ulong target_mq_attr_addr)
  1146. {
  1147. struct target_mq_attr *target_mq_attr;
  1148. if (!lock_user_struct(VERIFY_READ, target_mq_attr,
  1149. target_mq_attr_addr, 1))
  1150. return -TARGET_EFAULT;
  1151. __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
  1152. __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
  1153. __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
  1154. __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
  1155. unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
  1156. return 0;
  1157. }
  1158. static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
  1159. const struct mq_attr *attr)
  1160. {
  1161. struct target_mq_attr *target_mq_attr;
  1162. if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
  1163. target_mq_attr_addr, 0))
  1164. return -TARGET_EFAULT;
  1165. __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
  1166. __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
  1167. __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
  1168. __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
  1169. unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
  1170. return 0;
  1171. }
  1172. #endif
  1173. #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
  1174. /* do_select() must return target values and target errnos. */
  1175. static abi_long do_select(int n,
  1176. abi_ulong rfd_addr, abi_ulong wfd_addr,
  1177. abi_ulong efd_addr, abi_ulong target_tv_addr)
  1178. {
  1179. fd_set rfds, wfds, efds;
  1180. fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
  1181. struct timeval tv;
  1182. struct timespec ts, *ts_ptr;
  1183. abi_long ret;
  1184. ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
  1185. if (ret) {
  1186. return ret;
  1187. }
  1188. ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
  1189. if (ret) {
  1190. return ret;
  1191. }
  1192. ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
  1193. if (ret) {
  1194. return ret;
  1195. }
  1196. if (target_tv_addr) {
  1197. if (copy_from_user_timeval(&tv, target_tv_addr))
  1198. return -TARGET_EFAULT;
  1199. ts.tv_sec = tv.tv_sec;
  1200. ts.tv_nsec = tv.tv_usec * 1000;
  1201. ts_ptr = &ts;
  1202. } else {
  1203. ts_ptr = NULL;
  1204. }
  1205. ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
  1206. ts_ptr, NULL));
  1207. if (!is_error(ret)) {
  1208. if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
  1209. return -TARGET_EFAULT;
  1210. if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
  1211. return -TARGET_EFAULT;
  1212. if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
  1213. return -TARGET_EFAULT;
  1214. if (target_tv_addr) {
  1215. tv.tv_sec = ts.tv_sec;
  1216. tv.tv_usec = ts.tv_nsec / 1000;
  1217. if (copy_to_user_timeval(target_tv_addr, &tv)) {
  1218. return -TARGET_EFAULT;
  1219. }
  1220. }
  1221. }
  1222. return ret;
  1223. }
  1224. #if defined(TARGET_WANT_OLD_SYS_SELECT)
  1225. static abi_long do_old_select(abi_ulong arg1)
  1226. {
  1227. struct target_sel_arg_struct *sel;
  1228. abi_ulong inp, outp, exp, tvp;
  1229. long nsel;
  1230. if (!lock_user_struct(VERIFY_READ, sel, arg1, 1)) {
  1231. return -TARGET_EFAULT;
  1232. }
  1233. nsel = tswapal(sel->n);
  1234. inp = tswapal(sel->inp);
  1235. outp = tswapal(sel->outp);
  1236. exp = tswapal(sel->exp);
  1237. tvp = tswapal(sel->tvp);
  1238. unlock_user_struct(sel, arg1, 0);
  1239. return do_select(nsel, inp, outp, exp, tvp);
  1240. }
  1241. #endif
  1242. #endif
  1243. #if defined(TARGET_NR_pselect6) || defined(TARGET_NR_pselect6_time64)
  1244. static abi_long do_pselect6(abi_long arg1, abi_long arg2, abi_long arg3,
  1245. abi_long arg4, abi_long arg5, abi_long arg6,
  1246. bool time64)
  1247. {
  1248. abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
  1249. fd_set rfds, wfds, efds;
  1250. fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
  1251. struct timespec ts, *ts_ptr;
  1252. abi_long ret;
  1253. /*
  1254. * The 6th arg is actually two args smashed together,
  1255. * so we cannot use the C library.
  1256. */
  1257. struct {
  1258. sigset_t *set;
  1259. size_t size;
  1260. } sig, *sig_ptr;
  1261. abi_ulong arg_sigset, arg_sigsize, *arg7;
  1262. n = arg1;
  1263. rfd_addr = arg2;
  1264. wfd_addr = arg3;
  1265. efd_addr = arg4;
  1266. ts_addr = arg5;
  1267. ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
  1268. if (ret) {
  1269. return ret;
  1270. }
  1271. ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
  1272. if (ret) {
  1273. return ret;
  1274. }
  1275. ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
  1276. if (ret) {
  1277. return ret;
  1278. }
  1279. /*
  1280. * This takes a timespec, and not a timeval, so we cannot
  1281. * use the do_select() helper ...
  1282. */
  1283. if (ts_addr) {
  1284. if (time64) {
  1285. if (target_to_host_timespec64(&ts, ts_addr)) {
  1286. return -TARGET_EFAULT;
  1287. }
  1288. } else {
  1289. if (target_to_host_timespec(&ts, ts_addr)) {
  1290. return -TARGET_EFAULT;
  1291. }
  1292. }
  1293. ts_ptr = &ts;
  1294. } else {
  1295. ts_ptr = NULL;
  1296. }
  1297. /* Extract the two packed args for the sigset */
  1298. sig_ptr = NULL;
  1299. if (arg6) {
  1300. arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
  1301. if (!arg7) {
  1302. return -TARGET_EFAULT;
  1303. }
  1304. arg_sigset = tswapal(arg7[0]);
  1305. arg_sigsize = tswapal(arg7[1]);
  1306. unlock_user(arg7, arg6, 0);
  1307. if (arg_sigset) {
  1308. ret = process_sigsuspend_mask(&sig.set, arg_sigset, arg_sigsize);
  1309. if (ret != 0) {
  1310. return ret;
  1311. }
  1312. sig_ptr = &sig;
  1313. sig.size = SIGSET_T_SIZE;
  1314. }
  1315. }
  1316. ret = get_errno(safe_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
  1317. ts_ptr, sig_ptr));
  1318. if (sig_ptr) {
  1319. finish_sigsuspend_mask(ret);
  1320. }
  1321. if (!is_error(ret)) {
  1322. if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n)) {
  1323. return -TARGET_EFAULT;
  1324. }
  1325. if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n)) {
  1326. return -TARGET_EFAULT;
  1327. }
  1328. if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n)) {
  1329. return -TARGET_EFAULT;
  1330. }
  1331. if (time64) {
  1332. if (ts_addr && host_to_target_timespec64(ts_addr, &ts)) {
  1333. return -TARGET_EFAULT;
  1334. }
  1335. } else {
  1336. if (ts_addr && host_to_target_timespec(ts_addr, &ts)) {
  1337. return -TARGET_EFAULT;
  1338. }
  1339. }
  1340. }
  1341. return ret;
  1342. }
  1343. #endif
  1344. #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll) || \
  1345. defined(TARGET_NR_ppoll_time64)
  1346. static abi_long do_ppoll(abi_long arg1, abi_long arg2, abi_long arg3,
  1347. abi_long arg4, abi_long arg5, bool ppoll, bool time64)
  1348. {
  1349. struct target_pollfd *target_pfd;
  1350. unsigned int nfds = arg2;
  1351. struct pollfd *pfd;
  1352. unsigned int i;
  1353. abi_long ret;
  1354. pfd = NULL;
  1355. target_pfd = NULL;
  1356. if (nfds) {
  1357. if (nfds > (INT_MAX / sizeof(struct target_pollfd))) {
  1358. return -TARGET_EINVAL;
  1359. }
  1360. target_pfd = lock_user(VERIFY_WRITE, arg1,
  1361. sizeof(struct target_pollfd) * nfds, 1);
  1362. if (!target_pfd) {
  1363. return -TARGET_EFAULT;
  1364. }
  1365. pfd = alloca(sizeof(struct pollfd) * nfds);
  1366. for (i = 0; i < nfds; i++) {
  1367. pfd[i].fd = tswap32(target_pfd[i].fd);
  1368. pfd[i].events = tswap16(target_pfd[i].events);
  1369. }
  1370. }
  1371. if (ppoll) {
  1372. struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
  1373. sigset_t *set = NULL;
  1374. if (arg3) {
  1375. if (time64) {
  1376. if (target_to_host_timespec64(timeout_ts, arg3)) {
  1377. unlock_user(target_pfd, arg1, 0);
  1378. return -TARGET_EFAULT;
  1379. }
  1380. } else {
  1381. if (target_to_host_timespec(timeout_ts, arg3)) {
  1382. unlock_user(target_pfd, arg1, 0);
  1383. return -TARGET_EFAULT;
  1384. }
  1385. }
  1386. } else {
  1387. timeout_ts = NULL;
  1388. }
  1389. if (arg4) {
  1390. ret = process_sigsuspend_mask(&set, arg4, arg5);
  1391. if (ret != 0) {
  1392. unlock_user(target_pfd, arg1, 0);
  1393. return ret;
  1394. }
  1395. }
  1396. ret = get_errno(safe_ppoll(pfd, nfds, timeout_ts,
  1397. set, SIGSET_T_SIZE));
  1398. if (set) {
  1399. finish_sigsuspend_mask(ret);
  1400. }
  1401. if (!is_error(ret) && arg3) {
  1402. if (time64) {
  1403. if (host_to_target_timespec64(arg3, timeout_ts)) {
  1404. return -TARGET_EFAULT;
  1405. }
  1406. } else {
  1407. if (host_to_target_timespec(arg3, timeout_ts)) {
  1408. return -TARGET_EFAULT;
  1409. }
  1410. }
  1411. }
  1412. } else {
  1413. struct timespec ts, *pts;
  1414. if (arg3 >= 0) {
  1415. /* Convert ms to secs, ns */
  1416. ts.tv_sec = arg3 / 1000;
  1417. ts.tv_nsec = (arg3 % 1000) * 1000000LL;
  1418. pts = &ts;
  1419. } else {
  1420. /* -ve poll() timeout means "infinite" */
  1421. pts = NULL;
  1422. }
  1423. ret = get_errno(safe_ppoll(pfd, nfds, pts, NULL, 0));
  1424. }
  1425. if (!is_error(ret)) {
  1426. for (i = 0; i < nfds; i++) {
  1427. target_pfd[i].revents = tswap16(pfd[i].revents);
  1428. }
  1429. }
  1430. unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
  1431. return ret;
  1432. }
  1433. #endif
  1434. static abi_long do_pipe(CPUArchState *cpu_env, abi_ulong pipedes,
  1435. int flags, int is_pipe2)
  1436. {
  1437. int host_pipe[2];
  1438. abi_long ret;
  1439. ret = pipe2(host_pipe, flags);
  1440. if (is_error(ret))
  1441. return get_errno(ret);
  1442. /* Several targets have special calling conventions for the original
  1443. pipe syscall, but didn't replicate this into the pipe2 syscall. */
  1444. if (!is_pipe2) {
  1445. #if defined(TARGET_ALPHA)
  1446. cpu_env->ir[IR_A4] = host_pipe[1];
  1447. return host_pipe[0];
  1448. #elif defined(TARGET_MIPS)
  1449. cpu_env->active_tc.gpr[3] = host_pipe[1];
  1450. return host_pipe[0];
  1451. #elif defined(TARGET_SH4)
  1452. cpu_env->gregs[1] = host_pipe[1];
  1453. return host_pipe[0];
  1454. #elif defined(TARGET_SPARC)
  1455. cpu_env->regwptr[1] = host_pipe[1];
  1456. return host_pipe[0];
  1457. #endif
  1458. }
  1459. if (put_user_s32(host_pipe[0], pipedes)
  1460. || put_user_s32(host_pipe[1], pipedes + sizeof(abi_int)))
  1461. return -TARGET_EFAULT;
  1462. return get_errno(ret);
  1463. }
  1464. static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr,
  1465. abi_ulong target_addr,
  1466. socklen_t len)
  1467. {
  1468. const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
  1469. sa_family_t sa_family;
  1470. struct target_sockaddr *target_saddr;
  1471. if (fd_trans_target_to_host_addr(fd)) {
  1472. return fd_trans_target_to_host_addr(fd)(addr, target_addr, len);
  1473. }
  1474. target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
  1475. if (!target_saddr)
  1476. return -TARGET_EFAULT;
  1477. sa_family = tswap16(target_saddr->sa_family);
  1478. /* Oops. The caller might send a incomplete sun_path; sun_path
  1479. * must be terminated by \0 (see the manual page), but
  1480. * unfortunately it is quite common to specify sockaddr_un
  1481. * length as "strlen(x->sun_path)" while it should be
  1482. * "strlen(...) + 1". We'll fix that here if needed.
  1483. * Linux kernel has a similar feature.
  1484. */
  1485. if (sa_family == AF_UNIX) {
  1486. if (len < unix_maxlen && len > 0) {
  1487. char *cp = (char*)target_saddr;
  1488. if ( cp[len-1] && !cp[len] )
  1489. len++;
  1490. }
  1491. if (len > unix_maxlen)
  1492. len = unix_maxlen;
  1493. }
  1494. memcpy(addr, target_saddr, len);
  1495. addr->sa_family = sa_family;
  1496. if (sa_family == AF_NETLINK) {
  1497. struct sockaddr_nl *nladdr;
  1498. nladdr = (struct sockaddr_nl *)addr;
  1499. nladdr->nl_pid = tswap32(nladdr->nl_pid);
  1500. nladdr->nl_groups = tswap32(nladdr->nl_groups);
  1501. } else if (sa_family == AF_PACKET) {
  1502. struct target_sockaddr_ll *lladdr;
  1503. lladdr = (struct target_sockaddr_ll *)addr;
  1504. lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
  1505. lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
  1506. } else if (sa_family == AF_INET6) {
  1507. struct sockaddr_in6 *in6addr;
  1508. in6addr = (struct sockaddr_in6 *)addr;
  1509. in6addr->sin6_scope_id = tswap32(in6addr->sin6_scope_id);
  1510. }
  1511. unlock_user(target_saddr, target_addr, 0);
  1512. return 0;
  1513. }
  1514. static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
  1515. struct sockaddr *addr,
  1516. socklen_t len)
  1517. {
  1518. struct target_sockaddr *target_saddr;
  1519. if (len == 0) {
  1520. return 0;
  1521. }
  1522. assert(addr);
  1523. target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
  1524. if (!target_saddr)
  1525. return -TARGET_EFAULT;
  1526. memcpy(target_saddr, addr, len);
  1527. if (len >= offsetof(struct target_sockaddr, sa_family) +
  1528. sizeof(target_saddr->sa_family)) {
  1529. target_saddr->sa_family = tswap16(addr->sa_family);
  1530. }
  1531. if (addr->sa_family == AF_NETLINK &&
  1532. len >= sizeof(struct target_sockaddr_nl)) {
  1533. struct target_sockaddr_nl *target_nl =
  1534. (struct target_sockaddr_nl *)target_saddr;
  1535. target_nl->nl_pid = tswap32(target_nl->nl_pid);
  1536. target_nl->nl_groups = tswap32(target_nl->nl_groups);
  1537. } else if (addr->sa_family == AF_PACKET) {
  1538. struct sockaddr_ll *target_ll = (struct sockaddr_ll *)target_saddr;
  1539. target_ll->sll_ifindex = tswap32(target_ll->sll_ifindex);
  1540. target_ll->sll_hatype = tswap16(target_ll->sll_hatype);
  1541. } else if (addr->sa_family == AF_INET6 &&
  1542. len >= sizeof(struct target_sockaddr_in6)) {
  1543. struct target_sockaddr_in6 *target_in6 =
  1544. (struct target_sockaddr_in6 *)target_saddr;
  1545. target_in6->sin6_scope_id = tswap16(target_in6->sin6_scope_id);
  1546. }
  1547. unlock_user(target_saddr, target_addr, len);
  1548. return 0;
  1549. }
  1550. static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
  1551. struct target_msghdr *target_msgh)
  1552. {
  1553. struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
  1554. abi_long msg_controllen;
  1555. abi_ulong target_cmsg_addr;
  1556. struct target_cmsghdr *target_cmsg, *target_cmsg_start;
  1557. socklen_t space = 0;
  1558. msg_controllen = tswapal(target_msgh->msg_controllen);
  1559. if (msg_controllen < sizeof (struct target_cmsghdr))
  1560. goto the_end;
  1561. target_cmsg_addr = tswapal(target_msgh->msg_control);
  1562. target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
  1563. target_cmsg_start = target_cmsg;
  1564. if (!target_cmsg)
  1565. return -TARGET_EFAULT;
  1566. while (cmsg && target_cmsg) {
  1567. void *data = CMSG_DATA(cmsg);
  1568. void *target_data = TARGET_CMSG_DATA(target_cmsg);
  1569. int len = tswapal(target_cmsg->cmsg_len)
  1570. - sizeof(struct target_cmsghdr);
  1571. space += CMSG_SPACE(len);
  1572. if (space > msgh->msg_controllen) {
  1573. space -= CMSG_SPACE(len);
  1574. /* This is a QEMU bug, since we allocated the payload
  1575. * area ourselves (unlike overflow in host-to-target
  1576. * conversion, which is just the guest giving us a buffer
  1577. * that's too small). It can't happen for the payload types
  1578. * we currently support; if it becomes an issue in future
  1579. * we would need to improve our allocation strategy to
  1580. * something more intelligent than "twice the size of the
  1581. * target buffer we're reading from".
  1582. */
  1583. qemu_log_mask(LOG_UNIMP,
  1584. ("Unsupported ancillary data %d/%d: "
  1585. "unhandled msg size\n"),
  1586. tswap32(target_cmsg->cmsg_level),
  1587. tswap32(target_cmsg->cmsg_type));
  1588. break;
  1589. }
  1590. if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
  1591. cmsg->cmsg_level = SOL_SOCKET;
  1592. } else {
  1593. cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
  1594. }
  1595. cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
  1596. cmsg->cmsg_len = CMSG_LEN(len);
  1597. if (cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_RIGHTS) {
  1598. int *fd = (int *)data;
  1599. int *target_fd = (int *)target_data;
  1600. int i, numfds = len / sizeof(int);
  1601. for (i = 0; i < numfds; i++) {
  1602. __get_user(fd[i], target_fd + i);
  1603. }
  1604. } else if (cmsg->cmsg_level == SOL_SOCKET
  1605. && cmsg->cmsg_type == SCM_CREDENTIALS) {
  1606. struct ucred *cred = (struct ucred *)data;
  1607. struct target_ucred *target_cred =
  1608. (struct target_ucred *)target_data;
  1609. __get_user(cred->pid, &target_cred->pid);
  1610. __get_user(cred->uid, &target_cred->uid);
  1611. __get_user(cred->gid, &target_cred->gid);
  1612. } else if (cmsg->cmsg_level == SOL_ALG) {
  1613. uint32_t *dst = (uint32_t *)data;
  1614. memcpy(dst, target_data, len);
  1615. /* fix endianness of first 32-bit word */
  1616. if (len >= sizeof(uint32_t)) {
  1617. *dst = tswap32(*dst);
  1618. }
  1619. } else {
  1620. qemu_log_mask(LOG_UNIMP, "Unsupported target ancillary data: %d/%d\n",
  1621. cmsg->cmsg_level, cmsg->cmsg_type);
  1622. memcpy(data, target_data, len);
  1623. }
  1624. cmsg = CMSG_NXTHDR(msgh, cmsg);
  1625. target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
  1626. target_cmsg_start);
  1627. }
  1628. unlock_user(target_cmsg, target_cmsg_addr, 0);
  1629. the_end:
  1630. msgh->msg_controllen = space;
  1631. return 0;
  1632. }
  1633. static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
  1634. struct msghdr *msgh)
  1635. {
  1636. struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
  1637. abi_long msg_controllen;
  1638. abi_ulong target_cmsg_addr;
  1639. struct target_cmsghdr *target_cmsg, *target_cmsg_start;
  1640. socklen_t space = 0;
  1641. msg_controllen = tswapal(target_msgh->msg_controllen);
  1642. if (msg_controllen < sizeof (struct target_cmsghdr))
  1643. goto the_end;
  1644. target_cmsg_addr = tswapal(target_msgh->msg_control);
  1645. target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
  1646. target_cmsg_start = target_cmsg;
  1647. if (!target_cmsg)
  1648. return -TARGET_EFAULT;
  1649. while (cmsg && target_cmsg) {
  1650. void *data = CMSG_DATA(cmsg);
  1651. void *target_data = TARGET_CMSG_DATA(target_cmsg);
  1652. int len = cmsg->cmsg_len - sizeof(struct cmsghdr);
  1653. int tgt_len, tgt_space;
  1654. /* We never copy a half-header but may copy half-data;
  1655. * this is Linux's behaviour in put_cmsg(). Note that
  1656. * truncation here is a guest problem (which we report
  1657. * to the guest via the CTRUNC bit), unlike truncation
  1658. * in target_to_host_cmsg, which is a QEMU bug.
  1659. */
  1660. if (msg_controllen < sizeof(struct target_cmsghdr)) {
  1661. target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
  1662. break;
  1663. }
  1664. if (cmsg->cmsg_level == SOL_SOCKET) {
  1665. target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
  1666. } else {
  1667. target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
  1668. }
  1669. target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
  1670. /* Payload types which need a different size of payload on
  1671. * the target must adjust tgt_len here.
  1672. */
  1673. tgt_len = len;
  1674. switch (cmsg->cmsg_level) {
  1675. case SOL_SOCKET:
  1676. switch (cmsg->cmsg_type) {
  1677. case SO_TIMESTAMP:
  1678. tgt_len = sizeof(struct target_timeval);
  1679. break;
  1680. default:
  1681. break;
  1682. }
  1683. break;
  1684. default:
  1685. break;
  1686. }
  1687. if (msg_controllen < TARGET_CMSG_LEN(tgt_len)) {
  1688. target_msgh->msg_flags |= tswap32(MSG_CTRUNC);
  1689. tgt_len = msg_controllen - sizeof(struct target_cmsghdr);
  1690. }
  1691. /* We must now copy-and-convert len bytes of payload
  1692. * into tgt_len bytes of destination space. Bear in mind
  1693. * that in both source and destination we may be dealing
  1694. * with a truncated value!
  1695. */
  1696. switch (cmsg->cmsg_level) {
  1697. case SOL_SOCKET:
  1698. switch (cmsg->cmsg_type) {
  1699. case SCM_RIGHTS:
  1700. {
  1701. int *fd = (int *)data;
  1702. int *target_fd = (int *)target_data;
  1703. int i, numfds = tgt_len / sizeof(int);
  1704. for (i = 0; i < numfds; i++) {
  1705. __put_user(fd[i], target_fd + i);
  1706. }
  1707. break;
  1708. }
  1709. case SO_TIMESTAMP:
  1710. {
  1711. struct timeval *tv = (struct timeval *)data;
  1712. struct target_timeval *target_tv =
  1713. (struct target_timeval *)target_data;
  1714. if (len != sizeof(struct timeval) ||
  1715. tgt_len != sizeof(struct target_timeval)) {
  1716. goto unimplemented;
  1717. }
  1718. /* copy struct timeval to target */
  1719. __put_user(tv->tv_sec, &target_tv->tv_sec);
  1720. __put_user(tv->tv_usec, &target_tv->tv_usec);
  1721. break;
  1722. }
  1723. case SCM_CREDENTIALS:
  1724. {
  1725. struct ucred *cred = (struct ucred *)data;
  1726. struct target_ucred *target_cred =
  1727. (struct target_ucred *)target_data;
  1728. __put_user(cred->pid, &target_cred->pid);
  1729. __put_user(cred->uid, &target_cred->uid);
  1730. __put_user(cred->gid, &target_cred->gid);
  1731. break;
  1732. }
  1733. default:
  1734. goto unimplemented;
  1735. }
  1736. break;
  1737. case SOL_IP:
  1738. switch (cmsg->cmsg_type) {
  1739. case IP_TTL:
  1740. {
  1741. uint32_t *v = (uint32_t *)data;
  1742. uint32_t *t_int = (uint32_t *)target_data;
  1743. if (len != sizeof(uint32_t) ||
  1744. tgt_len != sizeof(uint32_t)) {
  1745. goto unimplemented;
  1746. }
  1747. __put_user(*v, t_int);
  1748. break;
  1749. }
  1750. case IP_RECVERR:
  1751. {
  1752. struct errhdr_t {
  1753. struct sock_extended_err ee;
  1754. struct sockaddr_in offender;
  1755. };
  1756. struct errhdr_t *errh = (struct errhdr_t *)data;
  1757. struct errhdr_t *target_errh =
  1758. (struct errhdr_t *)target_data;
  1759. if (len != sizeof(struct errhdr_t) ||
  1760. tgt_len != sizeof(struct errhdr_t)) {
  1761. goto unimplemented;
  1762. }
  1763. __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
  1764. __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
  1765. __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
  1766. __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
  1767. __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
  1768. __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
  1769. __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
  1770. host_to_target_sockaddr((unsigned long) &target_errh->offender,
  1771. (void *) &errh->offender, sizeof(errh->offender));
  1772. break;
  1773. }
  1774. case IP_PKTINFO:
  1775. {
  1776. struct in_pktinfo *pkti = data;
  1777. struct target_in_pktinfo *target_pi = target_data;
  1778. __put_user(pkti->ipi_ifindex, &target_pi->ipi_ifindex);
  1779. target_pi->ipi_spec_dst.s_addr = pkti->ipi_spec_dst.s_addr;
  1780. target_pi->ipi_addr.s_addr = pkti->ipi_addr.s_addr;
  1781. break;
  1782. }
  1783. default:
  1784. goto unimplemented;
  1785. }
  1786. break;
  1787. case SOL_IPV6:
  1788. switch (cmsg->cmsg_type) {
  1789. case IPV6_HOPLIMIT:
  1790. {
  1791. uint32_t *v = (uint32_t *)data;
  1792. uint32_t *t_int = (uint32_t *)target_data;
  1793. if (len != sizeof(uint32_t) ||
  1794. tgt_len != sizeof(uint32_t)) {
  1795. goto unimplemented;
  1796. }
  1797. __put_user(*v, t_int);
  1798. break;
  1799. }
  1800. case IPV6_RECVERR:
  1801. {
  1802. struct errhdr6_t {
  1803. struct sock_extended_err ee;
  1804. struct sockaddr_in6 offender;
  1805. };
  1806. struct errhdr6_t *errh = (struct errhdr6_t *)data;
  1807. struct errhdr6_t *target_errh =
  1808. (struct errhdr6_t *)target_data;
  1809. if (len != sizeof(struct errhdr6_t) ||
  1810. tgt_len != sizeof(struct errhdr6_t)) {
  1811. goto unimplemented;
  1812. }
  1813. __put_user(errh->ee.ee_errno, &target_errh->ee.ee_errno);
  1814. __put_user(errh->ee.ee_origin, &target_errh->ee.ee_origin);
  1815. __put_user(errh->ee.ee_type, &target_errh->ee.ee_type);
  1816. __put_user(errh->ee.ee_code, &target_errh->ee.ee_code);
  1817. __put_user(errh->ee.ee_pad, &target_errh->ee.ee_pad);
  1818. __put_user(errh->ee.ee_info, &target_errh->ee.ee_info);
  1819. __put_user(errh->ee.ee_data, &target_errh->ee.ee_data);
  1820. host_to_target_sockaddr((unsigned long) &target_errh->offender,
  1821. (void *) &errh->offender, sizeof(errh->offender));
  1822. break;
  1823. }
  1824. default:
  1825. goto unimplemented;
  1826. }
  1827. break;
  1828. default:
  1829. unimplemented:
  1830. qemu_log_mask(LOG_UNIMP, "Unsupported host ancillary data: %d/%d\n",
  1831. cmsg->cmsg_level, cmsg->cmsg_type);
  1832. memcpy(target_data, data, MIN(len, tgt_len));
  1833. if (tgt_len > len) {
  1834. memset(target_data + len, 0, tgt_len - len);
  1835. }
  1836. }
  1837. target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(tgt_len));
  1838. tgt_space = TARGET_CMSG_SPACE(tgt_len);
  1839. if (msg_controllen < tgt_space) {
  1840. tgt_space = msg_controllen;
  1841. }
  1842. msg_controllen -= tgt_space;
  1843. space += tgt_space;
  1844. cmsg = CMSG_NXTHDR(msgh, cmsg);
  1845. target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg,
  1846. target_cmsg_start);
  1847. }
  1848. unlock_user(target_cmsg, target_cmsg_addr, space);
  1849. the_end:
  1850. target_msgh->msg_controllen = tswapal(space);
  1851. return 0;
  1852. }
  1853. /* do_setsockopt() Must return target values and target errnos. */
  1854. static abi_long do_setsockopt(int sockfd, int level, int optname,
  1855. abi_ulong optval_addr, socklen_t optlen)
  1856. {
  1857. abi_long ret;
  1858. int val;
  1859. switch(level) {
  1860. case SOL_TCP:
  1861. case SOL_UDP:
  1862. /* TCP and UDP options all take an 'int' value. */
  1863. if (optlen < sizeof(uint32_t))
  1864. return -TARGET_EINVAL;
  1865. if (get_user_u32(val, optval_addr))
  1866. return -TARGET_EFAULT;
  1867. ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
  1868. break;
  1869. case SOL_IP:
  1870. switch(optname) {
  1871. case IP_TOS:
  1872. case IP_TTL:
  1873. case IP_HDRINCL:
  1874. case IP_ROUTER_ALERT:
  1875. case IP_RECVOPTS:
  1876. case IP_RETOPTS:
  1877. case IP_PKTINFO:
  1878. case IP_MTU_DISCOVER:
  1879. case IP_RECVERR:
  1880. case IP_RECVTTL:
  1881. case IP_RECVTOS:
  1882. #ifdef IP_FREEBIND
  1883. case IP_FREEBIND:
  1884. #endif
  1885. case IP_MULTICAST_TTL:
  1886. case IP_MULTICAST_LOOP:
  1887. val = 0;
  1888. if (optlen >= sizeof(uint32_t)) {
  1889. if (get_user_u32(val, optval_addr))
  1890. return -TARGET_EFAULT;
  1891. } else if (optlen >= 1) {
  1892. if (get_user_u8(val, optval_addr))
  1893. return -TARGET_EFAULT;
  1894. }
  1895. ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
  1896. break;
  1897. case IP_MULTICAST_IF:
  1898. case IP_ADD_MEMBERSHIP:
  1899. case IP_DROP_MEMBERSHIP:
  1900. {
  1901. struct ip_mreqn ip_mreq;
  1902. struct target_ip_mreqn *target_smreqn;
  1903. int min_size;
  1904. QEMU_BUILD_BUG_ON(sizeof(struct ip_mreq) !=
  1905. sizeof(struct target_ip_mreq));
  1906. if (optname == IP_MULTICAST_IF) {
  1907. min_size = sizeof(struct in_addr);
  1908. } else {
  1909. min_size = sizeof(struct target_ip_mreq);
  1910. }
  1911. if (optlen < min_size ||
  1912. optlen > sizeof (struct target_ip_mreqn)) {
  1913. return -TARGET_EINVAL;
  1914. }
  1915. target_smreqn = lock_user(VERIFY_READ, optval_addr, optlen, 1);
  1916. if (!target_smreqn) {
  1917. return -TARGET_EFAULT;
  1918. }
  1919. ip_mreq.imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
  1920. if (optlen >= sizeof(struct target_ip_mreq)) {
  1921. ip_mreq.imr_address.s_addr = target_smreqn->imr_address.s_addr;
  1922. if (optlen >= sizeof(struct target_ip_mreqn)) {
  1923. __put_user(target_smreqn->imr_ifindex, &ip_mreq.imr_ifindex);
  1924. optlen = sizeof(struct ip_mreqn);
  1925. }
  1926. }
  1927. unlock_user(target_smreqn, optval_addr, 0);
  1928. ret = get_errno(setsockopt(sockfd, level, optname, &ip_mreq, optlen));
  1929. break;
  1930. }
  1931. case IP_BLOCK_SOURCE:
  1932. case IP_UNBLOCK_SOURCE:
  1933. case IP_ADD_SOURCE_MEMBERSHIP:
  1934. case IP_DROP_SOURCE_MEMBERSHIP:
  1935. {
  1936. struct ip_mreq_source *ip_mreq_source;
  1937. if (optlen != sizeof (struct target_ip_mreq_source))
  1938. return -TARGET_EINVAL;
  1939. ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
  1940. if (!ip_mreq_source) {
  1941. return -TARGET_EFAULT;
  1942. }
  1943. ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
  1944. unlock_user (ip_mreq_source, optval_addr, 0);
  1945. break;
  1946. }
  1947. default:
  1948. goto unimplemented;
  1949. }
  1950. break;
  1951. case SOL_IPV6:
  1952. switch (optname) {
  1953. case IPV6_MTU_DISCOVER:
  1954. case IPV6_MTU:
  1955. case IPV6_V6ONLY:
  1956. case IPV6_RECVPKTINFO:
  1957. case IPV6_UNICAST_HOPS:
  1958. case IPV6_MULTICAST_HOPS:
  1959. case IPV6_MULTICAST_LOOP:
  1960. case IPV6_RECVERR:
  1961. case IPV6_RECVHOPLIMIT:
  1962. case IPV6_2292HOPLIMIT:
  1963. case IPV6_CHECKSUM:
  1964. case IPV6_ADDRFORM:
  1965. case IPV6_2292PKTINFO:
  1966. case IPV6_RECVTCLASS:
  1967. case IPV6_RECVRTHDR:
  1968. case IPV6_2292RTHDR:
  1969. case IPV6_RECVHOPOPTS:
  1970. case IPV6_2292HOPOPTS:
  1971. case IPV6_RECVDSTOPTS:
  1972. case IPV6_2292DSTOPTS:
  1973. case IPV6_TCLASS:
  1974. case IPV6_ADDR_PREFERENCES:
  1975. #ifdef IPV6_RECVPATHMTU
  1976. case IPV6_RECVPATHMTU:
  1977. #endif
  1978. #ifdef IPV6_TRANSPARENT
  1979. case IPV6_TRANSPARENT:
  1980. #endif
  1981. #ifdef IPV6_FREEBIND
  1982. case IPV6_FREEBIND:
  1983. #endif
  1984. #ifdef IPV6_RECVORIGDSTADDR
  1985. case IPV6_RECVORIGDSTADDR:
  1986. #endif
  1987. val = 0;
  1988. if (optlen < sizeof(uint32_t)) {
  1989. return -TARGET_EINVAL;
  1990. }
  1991. if (get_user_u32(val, optval_addr)) {
  1992. return -TARGET_EFAULT;
  1993. }
  1994. ret = get_errno(setsockopt(sockfd, level, optname,
  1995. &val, sizeof(val)));
  1996. break;
  1997. case IPV6_PKTINFO:
  1998. {
  1999. struct in6_pktinfo pki;
  2000. if (optlen < sizeof(pki)) {
  2001. return -TARGET_EINVAL;
  2002. }
  2003. if (copy_from_user(&pki, optval_addr, sizeof(pki))) {
  2004. return -TARGET_EFAULT;
  2005. }
  2006. pki.ipi6_ifindex = tswap32(pki.ipi6_ifindex);
  2007. ret = get_errno(setsockopt(sockfd, level, optname,
  2008. &pki, sizeof(pki)));
  2009. break;
  2010. }
  2011. case IPV6_ADD_MEMBERSHIP:
  2012. case IPV6_DROP_MEMBERSHIP:
  2013. {
  2014. struct ipv6_mreq ipv6mreq;
  2015. if (optlen < sizeof(ipv6mreq)) {
  2016. return -TARGET_EINVAL;
  2017. }
  2018. if (copy_from_user(&ipv6mreq, optval_addr, sizeof(ipv6mreq))) {
  2019. return -TARGET_EFAULT;
  2020. }
  2021. ipv6mreq.ipv6mr_interface = tswap32(ipv6mreq.ipv6mr_interface);
  2022. ret = get_errno(setsockopt(sockfd, level, optname,
  2023. &ipv6mreq, sizeof(ipv6mreq)));
  2024. break;
  2025. }
  2026. default:
  2027. goto unimplemented;
  2028. }
  2029. break;
  2030. case SOL_ICMPV6:
  2031. switch (optname) {
  2032. case ICMPV6_FILTER:
  2033. {
  2034. struct icmp6_filter icmp6f;
  2035. if (optlen > sizeof(icmp6f)) {
  2036. optlen = sizeof(icmp6f);
  2037. }
  2038. if (copy_from_user(&icmp6f, optval_addr, optlen)) {
  2039. return -TARGET_EFAULT;
  2040. }
  2041. for (val = 0; val < 8; val++) {
  2042. icmp6f.data[val] = tswap32(icmp6f.data[val]);
  2043. }
  2044. ret = get_errno(setsockopt(sockfd, level, optname,
  2045. &icmp6f, optlen));
  2046. break;
  2047. }
  2048. default:
  2049. goto unimplemented;
  2050. }
  2051. break;
  2052. case SOL_RAW:
  2053. switch (optname) {
  2054. case ICMP_FILTER:
  2055. case IPV6_CHECKSUM:
  2056. /* those take an u32 value */
  2057. if (optlen < sizeof(uint32_t)) {
  2058. return -TARGET_EINVAL;
  2059. }
  2060. if (get_user_u32(val, optval_addr)) {
  2061. return -TARGET_EFAULT;
  2062. }
  2063. ret = get_errno(setsockopt(sockfd, level, optname,
  2064. &val, sizeof(val)));
  2065. break;
  2066. default:
  2067. goto unimplemented;
  2068. }
  2069. break;
  2070. #if defined(SOL_ALG) && defined(ALG_SET_KEY) && defined(ALG_SET_AEAD_AUTHSIZE)
  2071. case SOL_ALG:
  2072. switch (optname) {
  2073. case ALG_SET_KEY:
  2074. {
  2075. char *alg_key = lock_user(VERIFY_READ, optval_addr, optlen, 1);
  2076. if (!alg_key) {
  2077. return -TARGET_EFAULT;
  2078. }
  2079. ret = get_errno(setsockopt(sockfd, level, optname,
  2080. alg_key, optlen));
  2081. unlock_user(alg_key, optval_addr, optlen);
  2082. break;
  2083. }
  2084. case ALG_SET_AEAD_AUTHSIZE:
  2085. {
  2086. ret = get_errno(setsockopt(sockfd, level, optname,
  2087. NULL, optlen));
  2088. break;
  2089. }
  2090. default:
  2091. goto unimplemented;
  2092. }
  2093. break;
  2094. #endif
  2095. case TARGET_SOL_SOCKET:
  2096. switch (optname) {
  2097. case TARGET_SO_RCVTIMEO:
  2098. case TARGET_SO_SNDTIMEO:
  2099. {
  2100. struct timeval tv;
  2101. if (optlen != sizeof(struct target_timeval)) {
  2102. return -TARGET_EINVAL;
  2103. }
  2104. if (copy_from_user_timeval(&tv, optval_addr)) {
  2105. return -TARGET_EFAULT;
  2106. }
  2107. ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
  2108. optname == TARGET_SO_RCVTIMEO ?
  2109. SO_RCVTIMEO : SO_SNDTIMEO,
  2110. &tv, sizeof(tv)));
  2111. return ret;
  2112. }
  2113. case TARGET_SO_ATTACH_FILTER:
  2114. {
  2115. struct target_sock_fprog *tfprog;
  2116. struct target_sock_filter *tfilter;
  2117. struct sock_fprog fprog;
  2118. struct sock_filter *filter;
  2119. int i;
  2120. if (optlen != sizeof(*tfprog)) {
  2121. return -TARGET_EINVAL;
  2122. }
  2123. if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
  2124. return -TARGET_EFAULT;
  2125. }
  2126. if (!lock_user_struct(VERIFY_READ, tfilter,
  2127. tswapal(tfprog->filter), 0)) {
  2128. unlock_user_struct(tfprog, optval_addr, 1);
  2129. return -TARGET_EFAULT;
  2130. }
  2131. fprog.len = tswap16(tfprog->len);
  2132. filter = g_try_new(struct sock_filter, fprog.len);
  2133. if (filter == NULL) {
  2134. unlock_user_struct(tfilter, tfprog->filter, 1);
  2135. unlock_user_struct(tfprog, optval_addr, 1);
  2136. return -TARGET_ENOMEM;
  2137. }
  2138. for (i = 0; i < fprog.len; i++) {
  2139. filter[i].code = tswap16(tfilter[i].code);
  2140. filter[i].jt = tfilter[i].jt;
  2141. filter[i].jf = tfilter[i].jf;
  2142. filter[i].k = tswap32(tfilter[i].k);
  2143. }
  2144. fprog.filter = filter;
  2145. ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
  2146. SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
  2147. g_free(filter);
  2148. unlock_user_struct(tfilter, tfprog->filter, 1);
  2149. unlock_user_struct(tfprog, optval_addr, 1);
  2150. return ret;
  2151. }
  2152. case TARGET_SO_BINDTODEVICE:
  2153. {
  2154. char *dev_ifname, *addr_ifname;
  2155. if (optlen > IFNAMSIZ - 1) {
  2156. optlen = IFNAMSIZ - 1;
  2157. }
  2158. dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
  2159. if (!dev_ifname) {
  2160. return -TARGET_EFAULT;
  2161. }
  2162. optname = SO_BINDTODEVICE;
  2163. addr_ifname = alloca(IFNAMSIZ);
  2164. memcpy(addr_ifname, dev_ifname, optlen);
  2165. addr_ifname[optlen] = 0;
  2166. ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
  2167. addr_ifname, optlen));
  2168. unlock_user (dev_ifname, optval_addr, 0);
  2169. return ret;
  2170. }
  2171. case TARGET_SO_LINGER:
  2172. {
  2173. struct linger lg;
  2174. struct target_linger *tlg;
  2175. if (optlen != sizeof(struct target_linger)) {
  2176. return -TARGET_EINVAL;
  2177. }
  2178. if (!lock_user_struct(VERIFY_READ, tlg, optval_addr, 1)) {
  2179. return -TARGET_EFAULT;
  2180. }
  2181. __get_user(lg.l_onoff, &tlg->l_onoff);
  2182. __get_user(lg.l_linger, &tlg->l_linger);
  2183. ret = get_errno(setsockopt(sockfd, SOL_SOCKET, SO_LINGER,
  2184. &lg, sizeof(lg)));
  2185. unlock_user_struct(tlg, optval_addr, 0);
  2186. return ret;
  2187. }
  2188. /* Options with 'int' argument. */
  2189. case TARGET_SO_DEBUG:
  2190. optname = SO_DEBUG;
  2191. break;
  2192. case TARGET_SO_REUSEADDR:
  2193. optname = SO_REUSEADDR;
  2194. break;
  2195. #ifdef SO_REUSEPORT
  2196. case TARGET_SO_REUSEPORT:
  2197. optname = SO_REUSEPORT;
  2198. break;
  2199. #endif
  2200. case TARGET_SO_TYPE:
  2201. optname = SO_TYPE;
  2202. break;
  2203. case TARGET_SO_ERROR:
  2204. optname = SO_ERROR;
  2205. break;
  2206. case TARGET_SO_DONTROUTE:
  2207. optname = SO_DONTROUTE;
  2208. break;
  2209. case TARGET_SO_BROADCAST:
  2210. optname = SO_BROADCAST;
  2211. break;
  2212. case TARGET_SO_SNDBUF:
  2213. optname = SO_SNDBUF;
  2214. break;
  2215. case TARGET_SO_SNDBUFFORCE:
  2216. optname = SO_SNDBUFFORCE;
  2217. break;
  2218. case TARGET_SO_RCVBUF:
  2219. optname = SO_RCVBUF;
  2220. break;
  2221. case TARGET_SO_RCVBUFFORCE:
  2222. optname = SO_RCVBUFFORCE;
  2223. break;
  2224. case TARGET_SO_KEEPALIVE:
  2225. optname = SO_KEEPALIVE;
  2226. break;
  2227. case TARGET_SO_OOBINLINE:
  2228. optname = SO_OOBINLINE;
  2229. break;
  2230. case TARGET_SO_NO_CHECK:
  2231. optname = SO_NO_CHECK;
  2232. break;
  2233. case TARGET_SO_PRIORITY:
  2234. optname = SO_PRIORITY;
  2235. break;
  2236. #ifdef SO_BSDCOMPAT
  2237. case TARGET_SO_BSDCOMPAT:
  2238. optname = SO_BSDCOMPAT;
  2239. break;
  2240. #endif
  2241. case TARGET_SO_PASSCRED:
  2242. optname = SO_PASSCRED;
  2243. break;
  2244. case TARGET_SO_PASSSEC:
  2245. optname = SO_PASSSEC;
  2246. break;
  2247. case TARGET_SO_TIMESTAMP:
  2248. optname = SO_TIMESTAMP;
  2249. break;
  2250. case TARGET_SO_RCVLOWAT:
  2251. optname = SO_RCVLOWAT;
  2252. break;
  2253. default:
  2254. goto unimplemented;
  2255. }
  2256. if (optlen < sizeof(uint32_t))
  2257. return -TARGET_EINVAL;
  2258. if (get_user_u32(val, optval_addr))
  2259. return -TARGET_EFAULT;
  2260. ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
  2261. break;
  2262. #ifdef SOL_NETLINK
  2263. case SOL_NETLINK:
  2264. switch (optname) {
  2265. case NETLINK_PKTINFO:
  2266. case NETLINK_ADD_MEMBERSHIP:
  2267. case NETLINK_DROP_MEMBERSHIP:
  2268. case NETLINK_BROADCAST_ERROR:
  2269. case NETLINK_NO_ENOBUFS:
  2270. #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
  2271. case NETLINK_LISTEN_ALL_NSID:
  2272. case NETLINK_CAP_ACK:
  2273. #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
  2274. #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
  2275. case NETLINK_EXT_ACK:
  2276. #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
  2277. #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
  2278. case NETLINK_GET_STRICT_CHK:
  2279. #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
  2280. break;
  2281. default:
  2282. goto unimplemented;
  2283. }
  2284. val = 0;
  2285. if (optlen < sizeof(uint32_t)) {
  2286. return -TARGET_EINVAL;
  2287. }
  2288. if (get_user_u32(val, optval_addr)) {
  2289. return -TARGET_EFAULT;
  2290. }
  2291. ret = get_errno(setsockopt(sockfd, SOL_NETLINK, optname, &val,
  2292. sizeof(val)));
  2293. break;
  2294. #endif /* SOL_NETLINK */
  2295. default:
  2296. unimplemented:
  2297. qemu_log_mask(LOG_UNIMP, "Unsupported setsockopt level=%d optname=%d\n",
  2298. level, optname);
  2299. ret = -TARGET_ENOPROTOOPT;
  2300. }
  2301. return ret;
  2302. }
  2303. /* do_getsockopt() Must return target values and target errnos. */
  2304. static abi_long do_getsockopt(int sockfd, int level, int optname,
  2305. abi_ulong optval_addr, abi_ulong optlen)
  2306. {
  2307. abi_long ret;
  2308. int len, val;
  2309. socklen_t lv;
  2310. switch(level) {
  2311. case TARGET_SOL_SOCKET:
  2312. level = SOL_SOCKET;
  2313. switch (optname) {
  2314. /* These don't just return a single integer */
  2315. case TARGET_SO_PEERNAME:
  2316. goto unimplemented;
  2317. case TARGET_SO_RCVTIMEO: {
  2318. struct timeval tv;
  2319. socklen_t tvlen;
  2320. optname = SO_RCVTIMEO;
  2321. get_timeout:
  2322. if (get_user_u32(len, optlen)) {
  2323. return -TARGET_EFAULT;
  2324. }
  2325. if (len < 0) {
  2326. return -TARGET_EINVAL;
  2327. }
  2328. tvlen = sizeof(tv);
  2329. ret = get_errno(getsockopt(sockfd, level, optname,
  2330. &tv, &tvlen));
  2331. if (ret < 0) {
  2332. return ret;
  2333. }
  2334. if (len > sizeof(struct target_timeval)) {
  2335. len = sizeof(struct target_timeval);
  2336. }
  2337. if (copy_to_user_timeval(optval_addr, &tv)) {
  2338. return -TARGET_EFAULT;
  2339. }
  2340. if (put_user_u32(len, optlen)) {
  2341. return -TARGET_EFAULT;
  2342. }
  2343. break;
  2344. }
  2345. case TARGET_SO_SNDTIMEO:
  2346. optname = SO_SNDTIMEO;
  2347. goto get_timeout;
  2348. case TARGET_SO_PEERCRED: {
  2349. struct ucred cr;
  2350. socklen_t crlen;
  2351. struct target_ucred *tcr;
  2352. if (get_user_u32(len, optlen)) {
  2353. return -TARGET_EFAULT;
  2354. }
  2355. if (len < 0) {
  2356. return -TARGET_EINVAL;
  2357. }
  2358. crlen = sizeof(cr);
  2359. ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
  2360. &cr, &crlen));
  2361. if (ret < 0) {
  2362. return ret;
  2363. }
  2364. if (len > crlen) {
  2365. len = crlen;
  2366. }
  2367. if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
  2368. return -TARGET_EFAULT;
  2369. }
  2370. __put_user(cr.pid, &tcr->pid);
  2371. __put_user(cr.uid, &tcr->uid);
  2372. __put_user(cr.gid, &tcr->gid);
  2373. unlock_user_struct(tcr, optval_addr, 1);
  2374. if (put_user_u32(len, optlen)) {
  2375. return -TARGET_EFAULT;
  2376. }
  2377. break;
  2378. }
  2379. case TARGET_SO_PEERSEC: {
  2380. char *name;
  2381. if (get_user_u32(len, optlen)) {
  2382. return -TARGET_EFAULT;
  2383. }
  2384. if (len < 0) {
  2385. return -TARGET_EINVAL;
  2386. }
  2387. name = lock_user(VERIFY_WRITE, optval_addr, len, 0);
  2388. if (!name) {
  2389. return -TARGET_EFAULT;
  2390. }
  2391. lv = len;
  2392. ret = get_errno(getsockopt(sockfd, level, SO_PEERSEC,
  2393. name, &lv));
  2394. if (put_user_u32(lv, optlen)) {
  2395. ret = -TARGET_EFAULT;
  2396. }
  2397. unlock_user(name, optval_addr, lv);
  2398. break;
  2399. }
  2400. case TARGET_SO_LINGER:
  2401. {
  2402. struct linger lg;
  2403. socklen_t lglen;
  2404. struct target_linger *tlg;
  2405. if (get_user_u32(len, optlen)) {
  2406. return -TARGET_EFAULT;
  2407. }
  2408. if (len < 0) {
  2409. return -TARGET_EINVAL;
  2410. }
  2411. lglen = sizeof(lg);
  2412. ret = get_errno(getsockopt(sockfd, level, SO_LINGER,
  2413. &lg, &lglen));
  2414. if (ret < 0) {
  2415. return ret;
  2416. }
  2417. if (len > lglen) {
  2418. len = lglen;
  2419. }
  2420. if (!lock_user_struct(VERIFY_WRITE, tlg, optval_addr, 0)) {
  2421. return -TARGET_EFAULT;
  2422. }
  2423. __put_user(lg.l_onoff, &tlg->l_onoff);
  2424. __put_user(lg.l_linger, &tlg->l_linger);
  2425. unlock_user_struct(tlg, optval_addr, 1);
  2426. if (put_user_u32(len, optlen)) {
  2427. return -TARGET_EFAULT;
  2428. }
  2429. break;
  2430. }
  2431. /* Options with 'int' argument. */
  2432. case TARGET_SO_DEBUG:
  2433. optname = SO_DEBUG;
  2434. goto int_case;
  2435. case TARGET_SO_REUSEADDR:
  2436. optname = SO_REUSEADDR;
  2437. goto int_case;
  2438. #ifdef SO_REUSEPORT
  2439. case TARGET_SO_REUSEPORT:
  2440. optname = SO_REUSEPORT;
  2441. goto int_case;
  2442. #endif
  2443. case TARGET_SO_TYPE:
  2444. optname = SO_TYPE;
  2445. goto int_case;
  2446. case TARGET_SO_ERROR:
  2447. optname = SO_ERROR;
  2448. goto int_case;
  2449. case TARGET_SO_DONTROUTE:
  2450. optname = SO_DONTROUTE;
  2451. goto int_case;
  2452. case TARGET_SO_BROADCAST:
  2453. optname = SO_BROADCAST;
  2454. goto int_case;
  2455. case TARGET_SO_SNDBUF:
  2456. optname = SO_SNDBUF;
  2457. goto int_case;
  2458. case TARGET_SO_RCVBUF:
  2459. optname = SO_RCVBUF;
  2460. goto int_case;
  2461. case TARGET_SO_KEEPALIVE:
  2462. optname = SO_KEEPALIVE;
  2463. goto int_case;
  2464. case TARGET_SO_OOBINLINE:
  2465. optname = SO_OOBINLINE;
  2466. goto int_case;
  2467. case TARGET_SO_NO_CHECK:
  2468. optname = SO_NO_CHECK;
  2469. goto int_case;
  2470. case TARGET_SO_PRIORITY:
  2471. optname = SO_PRIORITY;
  2472. goto int_case;
  2473. #ifdef SO_BSDCOMPAT
  2474. case TARGET_SO_BSDCOMPAT:
  2475. optname = SO_BSDCOMPAT;
  2476. goto int_case;
  2477. #endif
  2478. case TARGET_SO_PASSCRED:
  2479. optname = SO_PASSCRED;
  2480. goto int_case;
  2481. case TARGET_SO_TIMESTAMP:
  2482. optname = SO_TIMESTAMP;
  2483. goto int_case;
  2484. case TARGET_SO_RCVLOWAT:
  2485. optname = SO_RCVLOWAT;
  2486. goto int_case;
  2487. case TARGET_SO_ACCEPTCONN:
  2488. optname = SO_ACCEPTCONN;
  2489. goto int_case;
  2490. case TARGET_SO_PROTOCOL:
  2491. optname = SO_PROTOCOL;
  2492. goto int_case;
  2493. case TARGET_SO_DOMAIN:
  2494. optname = SO_DOMAIN;
  2495. goto int_case;
  2496. default:
  2497. goto int_case;
  2498. }
  2499. break;
  2500. case SOL_TCP:
  2501. case SOL_UDP:
  2502. /* TCP and UDP options all take an 'int' value. */
  2503. int_case:
  2504. if (get_user_u32(len, optlen))
  2505. return -TARGET_EFAULT;
  2506. if (len < 0)
  2507. return -TARGET_EINVAL;
  2508. lv = sizeof(lv);
  2509. ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
  2510. if (ret < 0)
  2511. return ret;
  2512. switch (optname) {
  2513. case SO_TYPE:
  2514. val = host_to_target_sock_type(val);
  2515. break;
  2516. case SO_ERROR:
  2517. val = host_to_target_errno(val);
  2518. break;
  2519. }
  2520. if (len > lv)
  2521. len = lv;
  2522. if (len == 4) {
  2523. if (put_user_u32(val, optval_addr))
  2524. return -TARGET_EFAULT;
  2525. } else {
  2526. if (put_user_u8(val, optval_addr))
  2527. return -TARGET_EFAULT;
  2528. }
  2529. if (put_user_u32(len, optlen))
  2530. return -TARGET_EFAULT;
  2531. break;
  2532. case SOL_IP:
  2533. switch(optname) {
  2534. case IP_TOS:
  2535. case IP_TTL:
  2536. case IP_HDRINCL:
  2537. case IP_ROUTER_ALERT:
  2538. case IP_RECVOPTS:
  2539. case IP_RETOPTS:
  2540. case IP_PKTINFO:
  2541. case IP_MTU_DISCOVER:
  2542. case IP_RECVERR:
  2543. case IP_RECVTOS:
  2544. #ifdef IP_FREEBIND
  2545. case IP_FREEBIND:
  2546. #endif
  2547. case IP_MULTICAST_TTL:
  2548. case IP_MULTICAST_LOOP:
  2549. if (get_user_u32(len, optlen))
  2550. return -TARGET_EFAULT;
  2551. if (len < 0)
  2552. return -TARGET_EINVAL;
  2553. lv = sizeof(lv);
  2554. ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
  2555. if (ret < 0)
  2556. return ret;
  2557. if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
  2558. len = 1;
  2559. if (put_user_u32(len, optlen)
  2560. || put_user_u8(val, optval_addr))
  2561. return -TARGET_EFAULT;
  2562. } else {
  2563. if (len > sizeof(int))
  2564. len = sizeof(int);
  2565. if (put_user_u32(len, optlen)
  2566. || put_user_u32(val, optval_addr))
  2567. return -TARGET_EFAULT;
  2568. }
  2569. break;
  2570. default:
  2571. ret = -TARGET_ENOPROTOOPT;
  2572. break;
  2573. }
  2574. break;
  2575. case SOL_IPV6:
  2576. switch (optname) {
  2577. case IPV6_MTU_DISCOVER:
  2578. case IPV6_MTU:
  2579. case IPV6_V6ONLY:
  2580. case IPV6_RECVPKTINFO:
  2581. case IPV6_UNICAST_HOPS:
  2582. case IPV6_MULTICAST_HOPS:
  2583. case IPV6_MULTICAST_LOOP:
  2584. case IPV6_RECVERR:
  2585. case IPV6_RECVHOPLIMIT:
  2586. case IPV6_2292HOPLIMIT:
  2587. case IPV6_CHECKSUM:
  2588. case IPV6_ADDRFORM:
  2589. case IPV6_2292PKTINFO:
  2590. case IPV6_RECVTCLASS:
  2591. case IPV6_RECVRTHDR:
  2592. case IPV6_2292RTHDR:
  2593. case IPV6_RECVHOPOPTS:
  2594. case IPV6_2292HOPOPTS:
  2595. case IPV6_RECVDSTOPTS:
  2596. case IPV6_2292DSTOPTS:
  2597. case IPV6_TCLASS:
  2598. case IPV6_ADDR_PREFERENCES:
  2599. #ifdef IPV6_RECVPATHMTU
  2600. case IPV6_RECVPATHMTU:
  2601. #endif
  2602. #ifdef IPV6_TRANSPARENT
  2603. case IPV6_TRANSPARENT:
  2604. #endif
  2605. #ifdef IPV6_FREEBIND
  2606. case IPV6_FREEBIND:
  2607. #endif
  2608. #ifdef IPV6_RECVORIGDSTADDR
  2609. case IPV6_RECVORIGDSTADDR:
  2610. #endif
  2611. if (get_user_u32(len, optlen))
  2612. return -TARGET_EFAULT;
  2613. if (len < 0)
  2614. return -TARGET_EINVAL;
  2615. lv = sizeof(lv);
  2616. ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
  2617. if (ret < 0)
  2618. return ret;
  2619. if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
  2620. len = 1;
  2621. if (put_user_u32(len, optlen)
  2622. || put_user_u8(val, optval_addr))
  2623. return -TARGET_EFAULT;
  2624. } else {
  2625. if (len > sizeof(int))
  2626. len = sizeof(int);
  2627. if (put_user_u32(len, optlen)
  2628. || put_user_u32(val, optval_addr))
  2629. return -TARGET_EFAULT;
  2630. }
  2631. break;
  2632. default:
  2633. ret = -TARGET_ENOPROTOOPT;
  2634. break;
  2635. }
  2636. break;
  2637. #ifdef SOL_NETLINK
  2638. case SOL_NETLINK:
  2639. switch (optname) {
  2640. case NETLINK_PKTINFO:
  2641. case NETLINK_BROADCAST_ERROR:
  2642. case NETLINK_NO_ENOBUFS:
  2643. #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
  2644. case NETLINK_LISTEN_ALL_NSID:
  2645. case NETLINK_CAP_ACK:
  2646. #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
  2647. #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0)
  2648. case NETLINK_EXT_ACK:
  2649. #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
  2650. #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)
  2651. case NETLINK_GET_STRICT_CHK:
  2652. #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
  2653. if (get_user_u32(len, optlen)) {
  2654. return -TARGET_EFAULT;
  2655. }
  2656. if (len != sizeof(val)) {
  2657. return -TARGET_EINVAL;
  2658. }
  2659. lv = len;
  2660. ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
  2661. if (ret < 0) {
  2662. return ret;
  2663. }
  2664. if (put_user_u32(lv, optlen)
  2665. || put_user_u32(val, optval_addr)) {
  2666. return -TARGET_EFAULT;
  2667. }
  2668. break;
  2669. #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
  2670. case NETLINK_LIST_MEMBERSHIPS:
  2671. {
  2672. uint32_t *results;
  2673. int i;
  2674. if (get_user_u32(len, optlen)) {
  2675. return -TARGET_EFAULT;
  2676. }
  2677. if (len < 0) {
  2678. return -TARGET_EINVAL;
  2679. }
  2680. results = lock_user(VERIFY_WRITE, optval_addr, len, 1);
  2681. if (!results && len > 0) {
  2682. return -TARGET_EFAULT;
  2683. }
  2684. lv = len;
  2685. ret = get_errno(getsockopt(sockfd, level, optname, results, &lv));
  2686. if (ret < 0) {
  2687. unlock_user(results, optval_addr, 0);
  2688. return ret;
  2689. }
  2690. /* swap host endianness to target endianness. */
  2691. for (i = 0; i < (len / sizeof(uint32_t)); i++) {
  2692. results[i] = tswap32(results[i]);
  2693. }
  2694. if (put_user_u32(lv, optlen)) {
  2695. return -TARGET_EFAULT;
  2696. }
  2697. unlock_user(results, optval_addr, 0);
  2698. break;
  2699. }
  2700. #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) */
  2701. default:
  2702. goto unimplemented;
  2703. }
  2704. break;
  2705. #endif /* SOL_NETLINK */
  2706. default:
  2707. unimplemented:
  2708. qemu_log_mask(LOG_UNIMP,
  2709. "getsockopt level=%d optname=%d not yet supported\n",
  2710. level, optname);
  2711. ret = -TARGET_EOPNOTSUPP;
  2712. break;
  2713. }
  2714. return ret;
  2715. }
  2716. /* Convert target low/high pair representing file offset into the host
  2717. * low/high pair. This function doesn't handle offsets bigger than 64 bits
  2718. * as the kernel doesn't handle them either.
  2719. */
  2720. static void target_to_host_low_high(abi_ulong tlow,
  2721. abi_ulong thigh,
  2722. unsigned long *hlow,
  2723. unsigned long *hhigh)
  2724. {
  2725. uint64_t off = tlow |
  2726. ((unsigned long long)thigh << TARGET_LONG_BITS / 2) <<
  2727. TARGET_LONG_BITS / 2;
  2728. *hlow = off;
  2729. *hhigh = (off >> HOST_LONG_BITS / 2) >> HOST_LONG_BITS / 2;
  2730. }
  2731. static struct iovec *lock_iovec(int type, abi_ulong target_addr,
  2732. abi_ulong count, int copy)
  2733. {
  2734. struct target_iovec *target_vec;
  2735. struct iovec *vec;
  2736. abi_ulong total_len, max_len;
  2737. int i;
  2738. int err = 0;
  2739. bool bad_address = false;
  2740. if (count == 0) {
  2741. errno = 0;
  2742. return NULL;
  2743. }
  2744. if (count > IOV_MAX) {
  2745. errno = EINVAL;
  2746. return NULL;
  2747. }
  2748. vec = g_try_new0(struct iovec, count);
  2749. if (vec == NULL) {
  2750. errno = ENOMEM;
  2751. return NULL;
  2752. }
  2753. target_vec = lock_user(VERIFY_READ, target_addr,
  2754. count * sizeof(struct target_iovec), 1);
  2755. if (target_vec == NULL) {
  2756. err = EFAULT;
  2757. goto fail2;
  2758. }
  2759. /* ??? If host page size > target page size, this will result in a
  2760. value larger than what we can actually support. */
  2761. max_len = 0x7fffffff & TARGET_PAGE_MASK;
  2762. total_len = 0;
  2763. for (i = 0; i < count; i++) {
  2764. abi_ulong base = tswapal(target_vec[i].iov_base);
  2765. abi_long len = tswapal(target_vec[i].iov_len);
  2766. if (len < 0) {
  2767. err = EINVAL;
  2768. goto fail;
  2769. } else if (len == 0) {
  2770. /* Zero length pointer is ignored. */
  2771. vec[i].iov_base = 0;
  2772. } else {
  2773. vec[i].iov_base = lock_user(type, base, len, copy);
  2774. /* If the first buffer pointer is bad, this is a fault. But
  2775. * subsequent bad buffers will result in a partial write; this
  2776. * is realized by filling the vector with null pointers and
  2777. * zero lengths. */
  2778. if (!vec[i].iov_base) {
  2779. if (i == 0) {
  2780. err = EFAULT;
  2781. goto fail;
  2782. } else {
  2783. bad_address = true;
  2784. }
  2785. }
  2786. if (bad_address) {
  2787. len = 0;
  2788. }
  2789. if (len > max_len - total_len) {
  2790. len = max_len - total_len;
  2791. }
  2792. }
  2793. vec[i].iov_len = len;
  2794. total_len += len;
  2795. }
  2796. unlock_user(target_vec, target_addr, 0);
  2797. return vec;
  2798. fail:
  2799. while (--i >= 0) {
  2800. if (tswapal(target_vec[i].iov_len) > 0) {
  2801. unlock_user(vec[i].iov_base, tswapal(target_vec[i].iov_base), 0);
  2802. }
  2803. }
  2804. unlock_user(target_vec, target_addr, 0);
  2805. fail2:
  2806. g_free(vec);
  2807. errno = err;
  2808. return NULL;
  2809. }
  2810. static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
  2811. abi_ulong count, int copy)
  2812. {
  2813. struct target_iovec *target_vec;
  2814. int i;
  2815. target_vec = lock_user(VERIFY_READ, target_addr,
  2816. count * sizeof(struct target_iovec), 1);
  2817. if (target_vec) {
  2818. for (i = 0; i < count; i++) {
  2819. abi_ulong base = tswapal(target_vec[i].iov_base);
  2820. abi_long len = tswapal(target_vec[i].iov_len);
  2821. if (len < 0) {
  2822. break;
  2823. }
  2824. unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
  2825. }
  2826. unlock_user(target_vec, target_addr, 0);
  2827. }
  2828. g_free(vec);
  2829. }
  2830. static inline int target_to_host_sock_type(int *type)
  2831. {
  2832. int host_type = 0;
  2833. int target_type = *type;
  2834. switch (target_type & TARGET_SOCK_TYPE_MASK) {
  2835. case TARGET_SOCK_DGRAM:
  2836. host_type = SOCK_DGRAM;
  2837. break;
  2838. case TARGET_SOCK_STREAM:
  2839. host_type = SOCK_STREAM;
  2840. break;
  2841. default:
  2842. host_type = target_type & TARGET_SOCK_TYPE_MASK;
  2843. break;
  2844. }
  2845. if (target_type & TARGET_SOCK_CLOEXEC) {
  2846. #if defined(SOCK_CLOEXEC)
  2847. host_type |= SOCK_CLOEXEC;
  2848. #else
  2849. return -TARGET_EINVAL;
  2850. #endif
  2851. }
  2852. if (target_type & TARGET_SOCK_NONBLOCK) {
  2853. #if defined(SOCK_NONBLOCK)
  2854. host_type |= SOCK_NONBLOCK;
  2855. #elif !defined(O_NONBLOCK)
  2856. return -TARGET_EINVAL;
  2857. #endif
  2858. }
  2859. *type = host_type;
  2860. return 0;
  2861. }
  2862. /* Try to emulate socket type flags after socket creation. */
  2863. static int sock_flags_fixup(int fd, int target_type)
  2864. {
  2865. #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
  2866. if (target_type & TARGET_SOCK_NONBLOCK) {
  2867. int flags = fcntl(fd, F_GETFL);
  2868. if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
  2869. close(fd);
  2870. return -TARGET_EINVAL;
  2871. }
  2872. }
  2873. #endif
  2874. return fd;
  2875. }
  2876. /* do_socket() Must return target values and target errnos. */
  2877. static abi_long do_socket(int domain, int type, int protocol)
  2878. {
  2879. int target_type = type;
  2880. int ret;
  2881. ret = target_to_host_sock_type(&type);
  2882. if (ret) {
  2883. return ret;
  2884. }
  2885. if (domain == PF_NETLINK && !(
  2886. #ifdef CONFIG_RTNETLINK
  2887. protocol == NETLINK_ROUTE ||
  2888. #endif
  2889. protocol == NETLINK_KOBJECT_UEVENT ||
  2890. protocol == NETLINK_AUDIT)) {
  2891. return -TARGET_EPROTONOSUPPORT;
  2892. }
  2893. if (domain == AF_PACKET ||
  2894. (domain == AF_INET && type == SOCK_PACKET)) {
  2895. protocol = tswap16(protocol);
  2896. }
  2897. ret = get_errno(socket(domain, type, protocol));
  2898. if (ret >= 0) {
  2899. ret = sock_flags_fixup(ret, target_type);
  2900. if (type == SOCK_PACKET) {
  2901. /* Manage an obsolete case :
  2902. * if socket type is SOCK_PACKET, bind by name
  2903. */
  2904. fd_trans_register(ret, &target_packet_trans);
  2905. } else if (domain == PF_NETLINK) {
  2906. switch (protocol) {
  2907. #ifdef CONFIG_RTNETLINK
  2908. case NETLINK_ROUTE:
  2909. fd_trans_register(ret, &target_netlink_route_trans);
  2910. break;
  2911. #endif
  2912. case NETLINK_KOBJECT_UEVENT:
  2913. /* nothing to do: messages are strings */
  2914. break;
  2915. case NETLINK_AUDIT:
  2916. fd_trans_register(ret, &target_netlink_audit_trans);
  2917. break;
  2918. default:
  2919. g_assert_not_reached();
  2920. }
  2921. }
  2922. }
  2923. return ret;
  2924. }
  2925. /* do_bind() Must return target values and target errnos. */
  2926. static abi_long do_bind(int sockfd, abi_ulong target_addr,
  2927. socklen_t addrlen)
  2928. {
  2929. void *addr;
  2930. abi_long ret;
  2931. if ((int)addrlen < 0) {
  2932. return -TARGET_EINVAL;
  2933. }
  2934. addr = alloca(addrlen+1);
  2935. ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
  2936. if (ret)
  2937. return ret;
  2938. return get_errno(bind(sockfd, addr, addrlen));
  2939. }
  2940. /* do_connect() Must return target values and target errnos. */
  2941. static abi_long do_connect(int sockfd, abi_ulong target_addr,
  2942. socklen_t addrlen)
  2943. {
  2944. void *addr;
  2945. abi_long ret;
  2946. if ((int)addrlen < 0) {
  2947. return -TARGET_EINVAL;
  2948. }
  2949. addr = alloca(addrlen+1);
  2950. ret = target_to_host_sockaddr(sockfd, addr, target_addr, addrlen);
  2951. if (ret)
  2952. return ret;
  2953. return get_errno(safe_connect(sockfd, addr, addrlen));
  2954. }
  2955. /* do_sendrecvmsg_locked() Must return target values and target errnos. */
  2956. static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
  2957. int flags, int send)
  2958. {
  2959. abi_long ret, len;
  2960. struct msghdr msg;
  2961. abi_ulong count;
  2962. struct iovec *vec;
  2963. abi_ulong target_vec;
  2964. if (msgp->msg_name) {
  2965. msg.msg_namelen = tswap32(msgp->msg_namelen);
  2966. msg.msg_name = alloca(msg.msg_namelen+1);
  2967. ret = target_to_host_sockaddr(fd, msg.msg_name,
  2968. tswapal(msgp->msg_name),
  2969. msg.msg_namelen);
  2970. if (ret == -TARGET_EFAULT) {
  2971. /* For connected sockets msg_name and msg_namelen must
  2972. * be ignored, so returning EFAULT immediately is wrong.
  2973. * Instead, pass a bad msg_name to the host kernel, and
  2974. * let it decide whether to return EFAULT or not.
  2975. */
  2976. msg.msg_name = (void *)-1;
  2977. } else if (ret) {
  2978. goto out2;
  2979. }
  2980. } else {
  2981. msg.msg_name = NULL;
  2982. msg.msg_namelen = 0;
  2983. }
  2984. msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
  2985. msg.msg_control = alloca(msg.msg_controllen);
  2986. memset(msg.msg_control, 0, msg.msg_controllen);
  2987. msg.msg_flags = tswap32(msgp->msg_flags);
  2988. count = tswapal(msgp->msg_iovlen);
  2989. target_vec = tswapal(msgp->msg_iov);
  2990. if (count > IOV_MAX) {
  2991. /* sendrcvmsg returns a different errno for this condition than
  2992. * readv/writev, so we must catch it here before lock_iovec() does.
  2993. */
  2994. ret = -TARGET_EMSGSIZE;
  2995. goto out2;
  2996. }
  2997. vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
  2998. target_vec, count, send);
  2999. if (vec == NULL) {
  3000. ret = -host_to_target_errno(errno);
  3001. /* allow sending packet without any iov, e.g. with MSG_MORE flag */
  3002. if (!send || ret) {
  3003. goto out2;
  3004. }
  3005. }
  3006. msg.msg_iovlen = count;
  3007. msg.msg_iov = vec;
  3008. if (send) {
  3009. if (fd_trans_target_to_host_data(fd)) {
  3010. void *host_msg;
  3011. host_msg = g_malloc(msg.msg_iov->iov_len);
  3012. memcpy(host_msg, msg.msg_iov->iov_base, msg.msg_iov->iov_len);
  3013. ret = fd_trans_target_to_host_data(fd)(host_msg,
  3014. msg.msg_iov->iov_len);
  3015. if (ret >= 0) {
  3016. msg.msg_iov->iov_base = host_msg;
  3017. ret = get_errno(safe_sendmsg(fd, &msg, flags));
  3018. }
  3019. g_free(host_msg);
  3020. } else {
  3021. ret = target_to_host_cmsg(&msg, msgp);
  3022. if (ret == 0) {
  3023. ret = get_errno(safe_sendmsg(fd, &msg, flags));
  3024. }
  3025. }
  3026. } else {
  3027. ret = get_errno(safe_recvmsg(fd, &msg, flags));
  3028. if (!is_error(ret)) {
  3029. len = ret;
  3030. if (fd_trans_host_to_target_data(fd)) {
  3031. ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base,
  3032. MIN(msg.msg_iov->iov_len, len));
  3033. }
  3034. if (!is_error(ret)) {
  3035. ret = host_to_target_cmsg(msgp, &msg);
  3036. }
  3037. if (!is_error(ret)) {
  3038. msgp->msg_namelen = tswap32(msg.msg_namelen);
  3039. msgp->msg_flags = tswap32(msg.msg_flags);
  3040. if (msg.msg_name != NULL && msg.msg_name != (void *)-1) {
  3041. ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
  3042. msg.msg_name, msg.msg_namelen);
  3043. if (ret) {
  3044. goto out;
  3045. }
  3046. }
  3047. ret = len;
  3048. }
  3049. }
  3050. }
  3051. out:
  3052. if (vec) {
  3053. unlock_iovec(vec, target_vec, count, !send);
  3054. }
  3055. out2:
  3056. return ret;
  3057. }
  3058. static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
  3059. int flags, int send)
  3060. {
  3061. abi_long ret;
  3062. struct target_msghdr *msgp;
  3063. if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
  3064. msgp,
  3065. target_msg,
  3066. send ? 1 : 0)) {
  3067. return -TARGET_EFAULT;
  3068. }
  3069. ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
  3070. unlock_user_struct(msgp, target_msg, send ? 0 : 1);
  3071. return ret;
  3072. }
  3073. /* We don't rely on the C library to have sendmmsg/recvmmsg support,
  3074. * so it might not have this *mmsg-specific flag either.
  3075. */
  3076. #ifndef MSG_WAITFORONE
  3077. #define MSG_WAITFORONE 0x10000
  3078. #endif
  3079. static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
  3080. unsigned int vlen, unsigned int flags,
  3081. int send)
  3082. {
  3083. struct target_mmsghdr *mmsgp;
  3084. abi_long ret = 0;
  3085. int i;
  3086. if (vlen > UIO_MAXIOV) {
  3087. vlen = UIO_MAXIOV;
  3088. }
  3089. mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
  3090. if (!mmsgp) {
  3091. return -TARGET_EFAULT;
  3092. }
  3093. for (i = 0; i < vlen; i++) {
  3094. ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
  3095. if (is_error(ret)) {
  3096. break;
  3097. }
  3098. mmsgp[i].msg_len = tswap32(ret);
  3099. /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
  3100. if (flags & MSG_WAITFORONE) {
  3101. flags |= MSG_DONTWAIT;
  3102. }
  3103. }
  3104. unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
  3105. /* Return number of datagrams sent if we sent any at all;
  3106. * otherwise return the error.
  3107. */
  3108. if (i) {
  3109. return i;
  3110. }
  3111. return ret;
  3112. }
  3113. /* do_accept4() Must return target values and target errnos. */
  3114. static abi_long do_accept4(int fd, abi_ulong target_addr,
  3115. abi_ulong target_addrlen_addr, int flags)
  3116. {
  3117. socklen_t addrlen, ret_addrlen;
  3118. void *addr;
  3119. abi_long ret;
  3120. int host_flags;
  3121. if (flags & ~(TARGET_SOCK_CLOEXEC | TARGET_SOCK_NONBLOCK)) {
  3122. return -TARGET_EINVAL;
  3123. }
  3124. host_flags = 0;
  3125. if (flags & TARGET_SOCK_NONBLOCK) {
  3126. host_flags |= SOCK_NONBLOCK;
  3127. }
  3128. if (flags & TARGET_SOCK_CLOEXEC) {
  3129. host_flags |= SOCK_CLOEXEC;
  3130. }
  3131. if (target_addr == 0) {
  3132. return get_errno(safe_accept4(fd, NULL, NULL, host_flags));
  3133. }
  3134. /* linux returns EFAULT if addrlen pointer is invalid */
  3135. if (get_user_u32(addrlen, target_addrlen_addr))
  3136. return -TARGET_EFAULT;
  3137. if ((int)addrlen < 0) {
  3138. return -TARGET_EINVAL;
  3139. }
  3140. if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
  3141. return -TARGET_EFAULT;
  3142. }
  3143. addr = alloca(addrlen);
  3144. ret_addrlen = addrlen;
  3145. ret = get_errno(safe_accept4(fd, addr, &ret_addrlen, host_flags));
  3146. if (!is_error(ret)) {
  3147. host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
  3148. if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
  3149. ret = -TARGET_EFAULT;
  3150. }
  3151. }
  3152. return ret;
  3153. }
  3154. /* do_getpeername() Must return target values and target errnos. */
  3155. static abi_long do_getpeername(int fd, abi_ulong target_addr,
  3156. abi_ulong target_addrlen_addr)
  3157. {
  3158. socklen_t addrlen, ret_addrlen;
  3159. void *addr;
  3160. abi_long ret;
  3161. if (get_user_u32(addrlen, target_addrlen_addr))
  3162. return -TARGET_EFAULT;
  3163. if ((int)addrlen < 0) {
  3164. return -TARGET_EINVAL;
  3165. }
  3166. if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
  3167. return -TARGET_EFAULT;
  3168. }
  3169. addr = alloca(addrlen);
  3170. ret_addrlen = addrlen;
  3171. ret = get_errno(getpeername(fd, addr, &ret_addrlen));
  3172. if (!is_error(ret)) {
  3173. host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
  3174. if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
  3175. ret = -TARGET_EFAULT;
  3176. }
  3177. }
  3178. return ret;
  3179. }
  3180. /* do_getsockname() Must return target values and target errnos. */
  3181. static abi_long do_getsockname(int fd, abi_ulong target_addr,
  3182. abi_ulong target_addrlen_addr)
  3183. {
  3184. socklen_t addrlen, ret_addrlen;
  3185. void *addr;
  3186. abi_long ret;
  3187. if (get_user_u32(addrlen, target_addrlen_addr))
  3188. return -TARGET_EFAULT;
  3189. if ((int)addrlen < 0) {
  3190. return -TARGET_EINVAL;
  3191. }
  3192. if (!access_ok(thread_cpu, VERIFY_WRITE, target_addr, addrlen)) {
  3193. return -TARGET_EFAULT;
  3194. }
  3195. addr = alloca(addrlen);
  3196. ret_addrlen = addrlen;
  3197. ret = get_errno(getsockname(fd, addr, &ret_addrlen));
  3198. if (!is_error(ret)) {
  3199. host_to_target_sockaddr(target_addr, addr, MIN(addrlen, ret_addrlen));
  3200. if (put_user_u32(ret_addrlen, target_addrlen_addr)) {
  3201. ret = -TARGET_EFAULT;
  3202. }
  3203. }
  3204. return ret;
  3205. }
  3206. /* do_socketpair() Must return target values and target errnos. */
  3207. static abi_long do_socketpair(int domain, int type, int protocol,
  3208. abi_ulong target_tab_addr)
  3209. {
  3210. int tab[2];
  3211. abi_long ret;
  3212. target_to_host_sock_type(&type);
  3213. ret = get_errno(socketpair(domain, type, protocol, tab));
  3214. if (!is_error(ret)) {
  3215. if (put_user_s32(tab[0], target_tab_addr)
  3216. || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
  3217. ret = -TARGET_EFAULT;
  3218. }
  3219. return ret;
  3220. }
  3221. /* do_sendto() Must return target values and target errnos. */
  3222. static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
  3223. abi_ulong target_addr, socklen_t addrlen)
  3224. {
  3225. void *addr;
  3226. void *host_msg;
  3227. void *copy_msg = NULL;
  3228. abi_long ret;
  3229. if ((int)addrlen < 0) {
  3230. return -TARGET_EINVAL;
  3231. }
  3232. host_msg = lock_user(VERIFY_READ, msg, len, 1);
  3233. if (!host_msg)
  3234. return -TARGET_EFAULT;
  3235. if (fd_trans_target_to_host_data(fd)) {
  3236. copy_msg = host_msg;
  3237. host_msg = g_malloc(len);
  3238. memcpy(host_msg, copy_msg, len);
  3239. ret = fd_trans_target_to_host_data(fd)(host_msg, len);
  3240. if (ret < 0) {
  3241. goto fail;
  3242. }
  3243. }
  3244. if (target_addr) {
  3245. addr = alloca(addrlen+1);
  3246. ret = target_to_host_sockaddr(fd, addr, target_addr, addrlen);
  3247. if (ret) {
  3248. goto fail;
  3249. }
  3250. ret = get_errno(safe_sendto(fd, host_msg, len, flags, addr, addrlen));
  3251. } else {
  3252. ret = get_errno(safe_sendto(fd, host_msg, len, flags, NULL, 0));
  3253. }
  3254. fail:
  3255. if (copy_msg) {
  3256. g_free(host_msg);
  3257. host_msg = copy_msg;
  3258. }
  3259. unlock_user(host_msg, msg, 0);
  3260. return ret;
  3261. }
  3262. /* do_recvfrom() Must return target values and target errnos. */
  3263. static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
  3264. abi_ulong target_addr,
  3265. abi_ulong target_addrlen)
  3266. {
  3267. socklen_t addrlen, ret_addrlen;
  3268. void *addr;
  3269. void *host_msg;
  3270. abi_long ret;
  3271. if (!msg) {
  3272. host_msg = NULL;
  3273. } else {
  3274. host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
  3275. if (!host_msg) {
  3276. return -TARGET_EFAULT;
  3277. }
  3278. }
  3279. if (target_addr) {
  3280. if (get_user_u32(addrlen, target_addrlen)) {
  3281. ret = -TARGET_EFAULT;
  3282. goto fail;
  3283. }
  3284. if ((int)addrlen < 0) {
  3285. ret = -TARGET_EINVAL;
  3286. goto fail;
  3287. }
  3288. addr = alloca(addrlen);
  3289. ret_addrlen = addrlen;
  3290. ret = get_errno(safe_recvfrom(fd, host_msg, len, flags,
  3291. addr, &ret_addrlen));
  3292. } else {
  3293. addr = NULL; /* To keep compiler quiet. */
  3294. addrlen = 0; /* To keep compiler quiet. */
  3295. ret = get_errno(safe_recvfrom(fd, host_msg, len, flags, NULL, 0));
  3296. }
  3297. if (!is_error(ret)) {
  3298. if (fd_trans_host_to_target_data(fd)) {
  3299. abi_long trans;
  3300. trans = fd_trans_host_to_target_data(fd)(host_msg, MIN(ret, len));
  3301. if (is_error(trans)) {
  3302. ret = trans;
  3303. goto fail;
  3304. }
  3305. }
  3306. if (target_addr) {
  3307. host_to_target_sockaddr(target_addr, addr,
  3308. MIN(addrlen, ret_addrlen));
  3309. if (put_user_u32(ret_addrlen, target_addrlen)) {
  3310. ret = -TARGET_EFAULT;
  3311. goto fail;
  3312. }
  3313. }
  3314. unlock_user(host_msg, msg, len);
  3315. } else {
  3316. fail:
  3317. unlock_user(host_msg, msg, 0);
  3318. }
  3319. return ret;
  3320. }
  3321. #ifdef TARGET_NR_socketcall
  3322. /* do_socketcall() must return target values and target errnos. */
  3323. static abi_long do_socketcall(int num, abi_ulong vptr)
  3324. {
  3325. static const unsigned nargs[] = { /* number of arguments per operation */
  3326. [TARGET_SYS_SOCKET] = 3, /* domain, type, protocol */
  3327. [TARGET_SYS_BIND] = 3, /* fd, addr, addrlen */
  3328. [TARGET_SYS_CONNECT] = 3, /* fd, addr, addrlen */
  3329. [TARGET_SYS_LISTEN] = 2, /* fd, backlog */
  3330. [TARGET_SYS_ACCEPT] = 3, /* fd, addr, addrlen */
  3331. [TARGET_SYS_GETSOCKNAME] = 3, /* fd, addr, addrlen */
  3332. [TARGET_SYS_GETPEERNAME] = 3, /* fd, addr, addrlen */
  3333. [TARGET_SYS_SOCKETPAIR] = 4, /* domain, type, protocol, tab */
  3334. [TARGET_SYS_SEND] = 4, /* fd, msg, len, flags */
  3335. [TARGET_SYS_RECV] = 4, /* fd, msg, len, flags */
  3336. [TARGET_SYS_SENDTO] = 6, /* fd, msg, len, flags, addr, addrlen */
  3337. [TARGET_SYS_RECVFROM] = 6, /* fd, msg, len, flags, addr, addrlen */
  3338. [TARGET_SYS_SHUTDOWN] = 2, /* fd, how */
  3339. [TARGET_SYS_SETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
  3340. [TARGET_SYS_GETSOCKOPT] = 5, /* fd, level, optname, optval, optlen */
  3341. [TARGET_SYS_SENDMSG] = 3, /* fd, msg, flags */
  3342. [TARGET_SYS_RECVMSG] = 3, /* fd, msg, flags */
  3343. [TARGET_SYS_ACCEPT4] = 4, /* fd, addr, addrlen, flags */
  3344. [TARGET_SYS_RECVMMSG] = 4, /* fd, msgvec, vlen, flags */
  3345. [TARGET_SYS_SENDMMSG] = 4, /* fd, msgvec, vlen, flags */
  3346. };
  3347. abi_long a[6]; /* max 6 args */
  3348. unsigned i;
  3349. /* check the range of the first argument num */
  3350. /* (TARGET_SYS_SENDMMSG is the highest among TARGET_SYS_xxx) */
  3351. if (num < 1 || num > TARGET_SYS_SENDMMSG) {
  3352. return -TARGET_EINVAL;
  3353. }
  3354. /* ensure we have space for args */
  3355. if (nargs[num] > ARRAY_SIZE(a)) {
  3356. return -TARGET_EINVAL;
  3357. }
  3358. /* collect the arguments in a[] according to nargs[] */
  3359. for (i = 0; i < nargs[num]; ++i) {
  3360. if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
  3361. return -TARGET_EFAULT;
  3362. }
  3363. }
  3364. /* now when we have the args, invoke the appropriate underlying function */
  3365. switch (num) {
  3366. case TARGET_SYS_SOCKET: /* domain, type, protocol */
  3367. return do_socket(a[0], a[1], a[2]);
  3368. case TARGET_SYS_BIND: /* sockfd, addr, addrlen */
  3369. return do_bind(a[0], a[1], a[2]);
  3370. case TARGET_SYS_CONNECT: /* sockfd, addr, addrlen */
  3371. return do_connect(a[0], a[1], a[2]);
  3372. case TARGET_SYS_LISTEN: /* sockfd, backlog */
  3373. return get_errno(listen(a[0], a[1]));
  3374. case TARGET_SYS_ACCEPT: /* sockfd, addr, addrlen */
  3375. return do_accept4(a[0], a[1], a[2], 0);
  3376. case TARGET_SYS_GETSOCKNAME: /* sockfd, addr, addrlen */
  3377. return do_getsockname(a[0], a[1], a[2]);
  3378. case TARGET_SYS_GETPEERNAME: /* sockfd, addr, addrlen */
  3379. return do_getpeername(a[0], a[1], a[2]);
  3380. case TARGET_SYS_SOCKETPAIR: /* domain, type, protocol, tab */
  3381. return do_socketpair(a[0], a[1], a[2], a[3]);
  3382. case TARGET_SYS_SEND: /* sockfd, msg, len, flags */
  3383. return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
  3384. case TARGET_SYS_RECV: /* sockfd, msg, len, flags */
  3385. return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
  3386. case TARGET_SYS_SENDTO: /* sockfd, msg, len, flags, addr, addrlen */
  3387. return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
  3388. case TARGET_SYS_RECVFROM: /* sockfd, msg, len, flags, addr, addrlen */
  3389. return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
  3390. case TARGET_SYS_SHUTDOWN: /* sockfd, how */
  3391. return get_errno(shutdown(a[0], a[1]));
  3392. case TARGET_SYS_SETSOCKOPT: /* sockfd, level, optname, optval, optlen */
  3393. return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
  3394. case TARGET_SYS_GETSOCKOPT: /* sockfd, level, optname, optval, optlen */
  3395. return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
  3396. case TARGET_SYS_SENDMSG: /* sockfd, msg, flags */
  3397. return do_sendrecvmsg(a[0], a[1], a[2], 1);
  3398. case TARGET_SYS_RECVMSG: /* sockfd, msg, flags */
  3399. return do_sendrecvmsg(a[0], a[1], a[2], 0);
  3400. case TARGET_SYS_ACCEPT4: /* sockfd, addr, addrlen, flags */
  3401. return do_accept4(a[0], a[1], a[2], a[3]);
  3402. case TARGET_SYS_RECVMMSG: /* sockfd, msgvec, vlen, flags */
  3403. return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 0);
  3404. case TARGET_SYS_SENDMMSG: /* sockfd, msgvec, vlen, flags */
  3405. return do_sendrecvmmsg(a[0], a[1], a[2], a[3], 1);
  3406. default:
  3407. qemu_log_mask(LOG_UNIMP, "Unsupported socketcall: %d\n", num);
  3408. return -TARGET_EINVAL;
  3409. }
  3410. }
  3411. #endif
  3412. #ifndef TARGET_SEMID64_DS
  3413. /* asm-generic version of this struct */
  3414. struct target_semid64_ds
  3415. {
  3416. struct target_ipc_perm sem_perm;
  3417. abi_ulong sem_otime;
  3418. #if TARGET_ABI_BITS == 32
  3419. abi_ulong __unused1;
  3420. #endif
  3421. abi_ulong sem_ctime;
  3422. #if TARGET_ABI_BITS == 32
  3423. abi_ulong __unused2;
  3424. #endif
  3425. abi_ulong sem_nsems;
  3426. abi_ulong __unused3;
  3427. abi_ulong __unused4;
  3428. };
  3429. #endif
  3430. static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
  3431. abi_ulong target_addr)
  3432. {
  3433. struct target_ipc_perm *target_ip;
  3434. struct target_semid64_ds *target_sd;
  3435. if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
  3436. return -TARGET_EFAULT;
  3437. target_ip = &(target_sd->sem_perm);
  3438. host_ip->__key = tswap32(target_ip->__key);
  3439. host_ip->uid = tswap32(target_ip->uid);
  3440. host_ip->gid = tswap32(target_ip->gid);
  3441. host_ip->cuid = tswap32(target_ip->cuid);
  3442. host_ip->cgid = tswap32(target_ip->cgid);
  3443. #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
  3444. host_ip->mode = tswap32(target_ip->mode);
  3445. #else
  3446. host_ip->mode = tswap16(target_ip->mode);
  3447. #endif
  3448. #if defined(TARGET_PPC)
  3449. host_ip->__seq = tswap32(target_ip->__seq);
  3450. #else
  3451. host_ip->__seq = tswap16(target_ip->__seq);
  3452. #endif
  3453. unlock_user_struct(target_sd, target_addr, 0);
  3454. return 0;
  3455. }
  3456. static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
  3457. struct ipc_perm *host_ip)
  3458. {
  3459. struct target_ipc_perm *target_ip;
  3460. struct target_semid64_ds *target_sd;
  3461. if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
  3462. return -TARGET_EFAULT;
  3463. target_ip = &(target_sd->sem_perm);
  3464. target_ip->__key = tswap32(host_ip->__key);
  3465. target_ip->uid = tswap32(host_ip->uid);
  3466. target_ip->gid = tswap32(host_ip->gid);
  3467. target_ip->cuid = tswap32(host_ip->cuid);
  3468. target_ip->cgid = tswap32(host_ip->cgid);
  3469. #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
  3470. target_ip->mode = tswap32(host_ip->mode);
  3471. #else
  3472. target_ip->mode = tswap16(host_ip->mode);
  3473. #endif
  3474. #if defined(TARGET_PPC)
  3475. target_ip->__seq = tswap32(host_ip->__seq);
  3476. #else
  3477. target_ip->__seq = tswap16(host_ip->__seq);
  3478. #endif
  3479. unlock_user_struct(target_sd, target_addr, 1);
  3480. return 0;
  3481. }
  3482. static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
  3483. abi_ulong target_addr)
  3484. {
  3485. struct target_semid64_ds *target_sd;
  3486. if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
  3487. return -TARGET_EFAULT;
  3488. if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
  3489. return -TARGET_EFAULT;
  3490. host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
  3491. host_sd->sem_otime = tswapal(target_sd->sem_otime);
  3492. host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
  3493. unlock_user_struct(target_sd, target_addr, 0);
  3494. return 0;
  3495. }
  3496. static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
  3497. struct semid_ds *host_sd)
  3498. {
  3499. struct target_semid64_ds *target_sd;
  3500. if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
  3501. return -TARGET_EFAULT;
  3502. if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
  3503. return -TARGET_EFAULT;
  3504. target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
  3505. target_sd->sem_otime = tswapal(host_sd->sem_otime);
  3506. target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
  3507. unlock_user_struct(target_sd, target_addr, 1);
  3508. return 0;
  3509. }
  3510. struct target_seminfo {
  3511. int semmap;
  3512. int semmni;
  3513. int semmns;
  3514. int semmnu;
  3515. int semmsl;
  3516. int semopm;
  3517. int semume;
  3518. int semusz;
  3519. int semvmx;
  3520. int semaem;
  3521. };
  3522. static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
  3523. struct seminfo *host_seminfo)
  3524. {
  3525. struct target_seminfo *target_seminfo;
  3526. if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
  3527. return -TARGET_EFAULT;
  3528. __put_user(host_seminfo->semmap, &target_seminfo->semmap);
  3529. __put_user(host_seminfo->semmni, &target_seminfo->semmni);
  3530. __put_user(host_seminfo->semmns, &target_seminfo->semmns);
  3531. __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
  3532. __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
  3533. __put_user(host_seminfo->semopm, &target_seminfo->semopm);
  3534. __put_user(host_seminfo->semume, &target_seminfo->semume);
  3535. __put_user(host_seminfo->semusz, &target_seminfo->semusz);
  3536. __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
  3537. __put_user(host_seminfo->semaem, &target_seminfo->semaem);
  3538. unlock_user_struct(target_seminfo, target_addr, 1);
  3539. return 0;
  3540. }
  3541. union semun {
  3542. int val;
  3543. struct semid_ds *buf;
  3544. unsigned short *array;
  3545. struct seminfo *__buf;
  3546. };
  3547. union target_semun {
  3548. int val;
  3549. abi_ulong buf;
  3550. abi_ulong array;
  3551. abi_ulong __buf;
  3552. };
  3553. static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
  3554. abi_ulong target_addr)
  3555. {
  3556. int nsems;
  3557. unsigned short *array;
  3558. union semun semun;
  3559. struct semid_ds semid_ds;
  3560. int i, ret;
  3561. semun.buf = &semid_ds;
  3562. ret = semctl(semid, 0, IPC_STAT, semun);
  3563. if (ret == -1)
  3564. return get_errno(ret);
  3565. nsems = semid_ds.sem_nsems;
  3566. *host_array = g_try_new(unsigned short, nsems);
  3567. if (!*host_array) {
  3568. return -TARGET_ENOMEM;
  3569. }
  3570. array = lock_user(VERIFY_READ, target_addr,
  3571. nsems*sizeof(unsigned short), 1);
  3572. if (!array) {
  3573. g_free(*host_array);
  3574. return -TARGET_EFAULT;
  3575. }
  3576. for(i=0; i<nsems; i++) {
  3577. __get_user((*host_array)[i], &array[i]);
  3578. }
  3579. unlock_user(array, target_addr, 0);
  3580. return 0;
  3581. }
  3582. static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
  3583. unsigned short **host_array)
  3584. {
  3585. int nsems;
  3586. unsigned short *array;
  3587. union semun semun;
  3588. struct semid_ds semid_ds;
  3589. int i, ret;
  3590. semun.buf = &semid_ds;
  3591. ret = semctl(semid, 0, IPC_STAT, semun);
  3592. if (ret == -1)
  3593. return get_errno(ret);
  3594. nsems = semid_ds.sem_nsems;
  3595. array = lock_user(VERIFY_WRITE, target_addr,
  3596. nsems*sizeof(unsigned short), 0);
  3597. if (!array)
  3598. return -TARGET_EFAULT;
  3599. for(i=0; i<nsems; i++) {
  3600. __put_user((*host_array)[i], &array[i]);
  3601. }
  3602. g_free(*host_array);
  3603. unlock_user(array, target_addr, 1);
  3604. return 0;
  3605. }
  3606. static inline abi_long do_semctl(int semid, int semnum, int cmd,
  3607. abi_ulong target_arg)
  3608. {
  3609. union target_semun target_su = { .buf = target_arg };
  3610. union semun arg;
  3611. struct semid_ds dsarg;
  3612. unsigned short *array = NULL;
  3613. struct seminfo seminfo;
  3614. abi_long ret = -TARGET_EINVAL;
  3615. abi_long err;
  3616. cmd &= 0xff;
  3617. switch( cmd ) {
  3618. case GETVAL:
  3619. case SETVAL:
  3620. /* In 64 bit cross-endian situations, we will erroneously pick up
  3621. * the wrong half of the union for the "val" element. To rectify
  3622. * this, the entire 8-byte structure is byteswapped, followed by
  3623. * a swap of the 4 byte val field. In other cases, the data is
  3624. * already in proper host byte order. */
  3625. if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
  3626. target_su.buf = tswapal(target_su.buf);
  3627. arg.val = tswap32(target_su.val);
  3628. } else {
  3629. arg.val = target_su.val;
  3630. }
  3631. ret = get_errno(semctl(semid, semnum, cmd, arg));
  3632. break;
  3633. case GETALL:
  3634. case SETALL:
  3635. err = target_to_host_semarray(semid, &array, target_su.array);
  3636. if (err)
  3637. return err;
  3638. arg.array = array;
  3639. ret = get_errno(semctl(semid, semnum, cmd, arg));
  3640. err = host_to_target_semarray(semid, target_su.array, &array);
  3641. if (err)
  3642. return err;
  3643. break;
  3644. case IPC_STAT:
  3645. case IPC_SET:
  3646. case SEM_STAT:
  3647. err = target_to_host_semid_ds(&dsarg, target_su.buf);
  3648. if (err)
  3649. return err;
  3650. arg.buf = &dsarg;
  3651. ret = get_errno(semctl(semid, semnum, cmd, arg));
  3652. err = host_to_target_semid_ds(target_su.buf, &dsarg);
  3653. if (err)
  3654. return err;
  3655. break;
  3656. case IPC_INFO:
  3657. case SEM_INFO:
  3658. arg.__buf = &seminfo;
  3659. ret = get_errno(semctl(semid, semnum, cmd, arg));
  3660. err = host_to_target_seminfo(target_su.__buf, &seminfo);
  3661. if (err)
  3662. return err;
  3663. break;
  3664. case IPC_RMID:
  3665. case GETPID:
  3666. case GETNCNT:
  3667. case GETZCNT:
  3668. ret = get_errno(semctl(semid, semnum, cmd, NULL));
  3669. break;
  3670. }
  3671. return ret;
  3672. }
  3673. struct target_sembuf {
  3674. unsigned short sem_num;
  3675. short sem_op;
  3676. short sem_flg;
  3677. };
  3678. static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
  3679. abi_ulong target_addr,
  3680. unsigned nsops)
  3681. {
  3682. struct target_sembuf *target_sembuf;
  3683. int i;
  3684. target_sembuf = lock_user(VERIFY_READ, target_addr,
  3685. nsops*sizeof(struct target_sembuf), 1);
  3686. if (!target_sembuf)
  3687. return -TARGET_EFAULT;
  3688. for(i=0; i<nsops; i++) {
  3689. __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
  3690. __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
  3691. __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
  3692. }
  3693. unlock_user(target_sembuf, target_addr, 0);
  3694. return 0;
  3695. }
  3696. #if defined(TARGET_NR_ipc) || defined(TARGET_NR_semop) || \
  3697. defined(TARGET_NR_semtimedop) || defined(TARGET_NR_semtimedop_time64)
  3698. /*
  3699. * This macro is required to handle the s390 variants, which passes the
  3700. * arguments in a different order than default.
  3701. */
  3702. #ifdef __s390x__
  3703. #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
  3704. (__nsops), (__timeout), (__sops)
  3705. #else
  3706. #define SEMTIMEDOP_IPC_ARGS(__nsops, __sops, __timeout) \
  3707. (__nsops), 0, (__sops), (__timeout)
  3708. #endif
  3709. static inline abi_long do_semtimedop(int semid,
  3710. abi_long ptr,
  3711. unsigned nsops,
  3712. abi_long timeout, bool time64)
  3713. {
  3714. struct sembuf *sops;
  3715. struct timespec ts, *pts = NULL;
  3716. abi_long ret;
  3717. if (timeout) {
  3718. pts = &ts;
  3719. if (time64) {
  3720. if (target_to_host_timespec64(pts, timeout)) {
  3721. return -TARGET_EFAULT;
  3722. }
  3723. } else {
  3724. if (target_to_host_timespec(pts, timeout)) {
  3725. return -TARGET_EFAULT;
  3726. }
  3727. }
  3728. }
  3729. if (nsops > TARGET_SEMOPM) {
  3730. return -TARGET_E2BIG;
  3731. }
  3732. sops = g_new(struct sembuf, nsops);
  3733. if (target_to_host_sembuf(sops, ptr, nsops)) {
  3734. g_free(sops);
  3735. return -TARGET_EFAULT;
  3736. }
  3737. ret = -TARGET_ENOSYS;
  3738. #ifdef __NR_semtimedop
  3739. ret = get_errno(safe_semtimedop(semid, sops, nsops, pts));
  3740. #endif
  3741. #ifdef __NR_ipc
  3742. if (ret == -TARGET_ENOSYS) {
  3743. ret = get_errno(safe_ipc(IPCOP_semtimedop, semid,
  3744. SEMTIMEDOP_IPC_ARGS(nsops, sops, (long)pts)));
  3745. }
  3746. #endif
  3747. g_free(sops);
  3748. return ret;
  3749. }
  3750. #endif
  3751. struct target_msqid_ds
  3752. {
  3753. struct target_ipc_perm msg_perm;
  3754. abi_ulong msg_stime;
  3755. #if TARGET_ABI_BITS == 32
  3756. abi_ulong __unused1;
  3757. #endif
  3758. abi_ulong msg_rtime;
  3759. #if TARGET_ABI_BITS == 32
  3760. abi_ulong __unused2;
  3761. #endif
  3762. abi_ulong msg_ctime;
  3763. #if TARGET_ABI_BITS == 32
  3764. abi_ulong __unused3;
  3765. #endif
  3766. abi_ulong __msg_cbytes;
  3767. abi_ulong msg_qnum;
  3768. abi_ulong msg_qbytes;
  3769. abi_ulong msg_lspid;
  3770. abi_ulong msg_lrpid;
  3771. abi_ulong __unused4;
  3772. abi_ulong __unused5;
  3773. };
  3774. static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
  3775. abi_ulong target_addr)
  3776. {
  3777. struct target_msqid_ds *target_md;
  3778. if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
  3779. return -TARGET_EFAULT;
  3780. if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
  3781. return -TARGET_EFAULT;
  3782. host_md->msg_stime = tswapal(target_md->msg_stime);
  3783. host_md->msg_rtime = tswapal(target_md->msg_rtime);
  3784. host_md->msg_ctime = tswapal(target_md->msg_ctime);
  3785. host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
  3786. host_md->msg_qnum = tswapal(target_md->msg_qnum);
  3787. host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
  3788. host_md->msg_lspid = tswapal(target_md->msg_lspid);
  3789. host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
  3790. unlock_user_struct(target_md, target_addr, 0);
  3791. return 0;
  3792. }
  3793. static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
  3794. struct msqid_ds *host_md)
  3795. {
  3796. struct target_msqid_ds *target_md;
  3797. if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
  3798. return -TARGET_EFAULT;
  3799. if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
  3800. return -TARGET_EFAULT;
  3801. target_md->msg_stime = tswapal(host_md->msg_stime);
  3802. target_md->msg_rtime = tswapal(host_md->msg_rtime);
  3803. target_md->msg_ctime = tswapal(host_md->msg_ctime);
  3804. target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
  3805. target_md->msg_qnum = tswapal(host_md->msg_qnum);
  3806. target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
  3807. target_md->msg_lspid = tswapal(host_md->msg_lspid);
  3808. target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
  3809. unlock_user_struct(target_md, target_addr, 1);
  3810. return 0;
  3811. }
  3812. struct target_msginfo {
  3813. int msgpool;
  3814. int msgmap;
  3815. int msgmax;
  3816. int msgmnb;
  3817. int msgmni;
  3818. int msgssz;
  3819. int msgtql;
  3820. unsigned short int msgseg;
  3821. };
  3822. static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
  3823. struct msginfo *host_msginfo)
  3824. {
  3825. struct target_msginfo *target_msginfo;
  3826. if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
  3827. return -TARGET_EFAULT;
  3828. __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
  3829. __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
  3830. __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
  3831. __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
  3832. __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
  3833. __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
  3834. __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
  3835. __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
  3836. unlock_user_struct(target_msginfo, target_addr, 1);
  3837. return 0;
  3838. }
  3839. static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
  3840. {
  3841. struct msqid_ds dsarg;
  3842. struct msginfo msginfo;
  3843. abi_long ret = -TARGET_EINVAL;
  3844. cmd &= 0xff;
  3845. switch (cmd) {
  3846. case IPC_STAT:
  3847. case IPC_SET:
  3848. case MSG_STAT:
  3849. if (target_to_host_msqid_ds(&dsarg,ptr))
  3850. return -TARGET_EFAULT;
  3851. ret = get_errno(msgctl(msgid, cmd, &dsarg));
  3852. if (host_to_target_msqid_ds(ptr,&dsarg))
  3853. return -TARGET_EFAULT;
  3854. break;
  3855. case IPC_RMID:
  3856. ret = get_errno(msgctl(msgid, cmd, NULL));
  3857. break;
  3858. case IPC_INFO:
  3859. case MSG_INFO:
  3860. ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
  3861. if (host_to_target_msginfo(ptr, &msginfo))
  3862. return -TARGET_EFAULT;
  3863. break;
  3864. }
  3865. return ret;
  3866. }
  3867. struct target_msgbuf {
  3868. abi_long mtype;
  3869. char mtext[1];
  3870. };
  3871. static inline abi_long do_msgsnd(int msqid, abi_long msgp,
  3872. ssize_t msgsz, int msgflg)
  3873. {
  3874. struct target_msgbuf *target_mb;
  3875. struct msgbuf *host_mb;
  3876. abi_long ret = 0;
  3877. if (msgsz < 0) {
  3878. return -TARGET_EINVAL;
  3879. }
  3880. if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
  3881. return -TARGET_EFAULT;
  3882. host_mb = g_try_malloc(msgsz + sizeof(long));
  3883. if (!host_mb) {
  3884. unlock_user_struct(target_mb, msgp, 0);
  3885. return -TARGET_ENOMEM;
  3886. }
  3887. host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
  3888. memcpy(host_mb->mtext, target_mb->mtext, msgsz);
  3889. ret = -TARGET_ENOSYS;
  3890. #ifdef __NR_msgsnd
  3891. ret = get_errno(safe_msgsnd(msqid, host_mb, msgsz, msgflg));
  3892. #endif
  3893. #ifdef __NR_ipc
  3894. if (ret == -TARGET_ENOSYS) {
  3895. #ifdef __s390x__
  3896. ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
  3897. host_mb));
  3898. #else
  3899. ret = get_errno(safe_ipc(IPCOP_msgsnd, msqid, msgsz, msgflg,
  3900. host_mb, 0));
  3901. #endif
  3902. }
  3903. #endif
  3904. g_free(host_mb);
  3905. unlock_user_struct(target_mb, msgp, 0);
  3906. return ret;
  3907. }
  3908. #ifdef __NR_ipc
  3909. #if defined(__sparc__)
  3910. /* SPARC for msgrcv it does not use the kludge on final 2 arguments. */
  3911. #define MSGRCV_ARGS(__msgp, __msgtyp) __msgp, __msgtyp
  3912. #elif defined(__s390x__)
  3913. /* The s390 sys_ipc variant has only five parameters. */
  3914. #define MSGRCV_ARGS(__msgp, __msgtyp) \
  3915. ((long int[]){(long int)__msgp, __msgtyp})
  3916. #else
  3917. #define MSGRCV_ARGS(__msgp, __msgtyp) \
  3918. ((long int[]){(long int)__msgp, __msgtyp}), 0
  3919. #endif
  3920. #endif
  3921. static inline abi_long do_msgrcv(int msqid, abi_long msgp,
  3922. ssize_t msgsz, abi_long msgtyp,
  3923. int msgflg)
  3924. {
  3925. struct target_msgbuf *target_mb;
  3926. char *target_mtext;
  3927. struct msgbuf *host_mb;
  3928. abi_long ret = 0;
  3929. if (msgsz < 0) {
  3930. return -TARGET_EINVAL;
  3931. }
  3932. if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
  3933. return -TARGET_EFAULT;
  3934. host_mb = g_try_malloc(msgsz + sizeof(long));
  3935. if (!host_mb) {
  3936. ret = -TARGET_ENOMEM;
  3937. goto end;
  3938. }
  3939. ret = -TARGET_ENOSYS;
  3940. #ifdef __NR_msgrcv
  3941. ret = get_errno(safe_msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
  3942. #endif
  3943. #ifdef __NR_ipc
  3944. if (ret == -TARGET_ENOSYS) {
  3945. ret = get_errno(safe_ipc(IPCOP_CALL(1, IPCOP_msgrcv), msqid, msgsz,
  3946. msgflg, MSGRCV_ARGS(host_mb, msgtyp)));
  3947. }
  3948. #endif
  3949. if (ret > 0) {
  3950. abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
  3951. target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
  3952. if (!target_mtext) {
  3953. ret = -TARGET_EFAULT;
  3954. goto end;
  3955. }
  3956. memcpy(target_mb->mtext, host_mb->mtext, ret);
  3957. unlock_user(target_mtext, target_mtext_addr, ret);
  3958. }
  3959. target_mb->mtype = tswapal(host_mb->mtype);
  3960. end:
  3961. if (target_mb)
  3962. unlock_user_struct(target_mb, msgp, 1);
  3963. g_free(host_mb);
  3964. return ret;
  3965. }
  3966. static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
  3967. abi_ulong target_addr)
  3968. {
  3969. struct target_shmid_ds *target_sd;
  3970. if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
  3971. return -TARGET_EFAULT;
  3972. if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
  3973. return -TARGET_EFAULT;
  3974. __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
  3975. __get_user(host_sd->shm_atime, &target_sd->shm_atime);
  3976. __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
  3977. __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
  3978. __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
  3979. __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
  3980. __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
  3981. unlock_user_struct(target_sd, target_addr, 0);
  3982. return 0;
  3983. }
  3984. static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
  3985. struct shmid_ds *host_sd)
  3986. {
  3987. struct target_shmid_ds *target_sd;
  3988. if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
  3989. return -TARGET_EFAULT;
  3990. if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
  3991. return -TARGET_EFAULT;
  3992. __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
  3993. __put_user(host_sd->shm_atime, &target_sd->shm_atime);
  3994. __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
  3995. __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
  3996. __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
  3997. __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
  3998. __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
  3999. unlock_user_struct(target_sd, target_addr, 1);
  4000. return 0;
  4001. }
  4002. struct target_shminfo {
  4003. abi_ulong shmmax;
  4004. abi_ulong shmmin;
  4005. abi_ulong shmmni;
  4006. abi_ulong shmseg;
  4007. abi_ulong shmall;
  4008. };
  4009. static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
  4010. struct shminfo *host_shminfo)
  4011. {
  4012. struct target_shminfo *target_shminfo;
  4013. if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
  4014. return -TARGET_EFAULT;
  4015. __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
  4016. __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
  4017. __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
  4018. __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
  4019. __put_user(host_shminfo->shmall, &target_shminfo->shmall);
  4020. unlock_user_struct(target_shminfo, target_addr, 1);
  4021. return 0;
  4022. }
  4023. struct target_shm_info {
  4024. int used_ids;
  4025. abi_ulong shm_tot;
  4026. abi_ulong shm_rss;
  4027. abi_ulong shm_swp;
  4028. abi_ulong swap_attempts;
  4029. abi_ulong swap_successes;
  4030. };
  4031. static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
  4032. struct shm_info *host_shm_info)
  4033. {
  4034. struct target_shm_info *target_shm_info;
  4035. if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
  4036. return -TARGET_EFAULT;
  4037. __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
  4038. __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
  4039. __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
  4040. __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
  4041. __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
  4042. __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
  4043. unlock_user_struct(target_shm_info, target_addr, 1);
  4044. return 0;
  4045. }
  4046. static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
  4047. {
  4048. struct shmid_ds dsarg;
  4049. struct shminfo shminfo;
  4050. struct shm_info shm_info;
  4051. abi_long ret = -TARGET_EINVAL;
  4052. cmd &= 0xff;
  4053. switch(cmd) {
  4054. case IPC_STAT:
  4055. case IPC_SET:
  4056. case SHM_STAT:
  4057. if (target_to_host_shmid_ds(&dsarg, buf))
  4058. return -TARGET_EFAULT;
  4059. ret = get_errno(shmctl(shmid, cmd, &dsarg));
  4060. if (host_to_target_shmid_ds(buf, &dsarg))
  4061. return -TARGET_EFAULT;
  4062. break;
  4063. case IPC_INFO:
  4064. ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
  4065. if (host_to_target_shminfo(buf, &shminfo))
  4066. return -TARGET_EFAULT;
  4067. break;
  4068. case SHM_INFO:
  4069. ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
  4070. if (host_to_target_shm_info(buf, &shm_info))
  4071. return -TARGET_EFAULT;
  4072. break;
  4073. case IPC_RMID:
  4074. case SHM_LOCK:
  4075. case SHM_UNLOCK:
  4076. ret = get_errno(shmctl(shmid, cmd, NULL));
  4077. break;
  4078. }
  4079. return ret;
  4080. }
  4081. #ifdef TARGET_NR_ipc
  4082. /* ??? This only works with linear mappings. */
  4083. /* do_ipc() must return target values and target errnos. */
  4084. static abi_long do_ipc(CPUArchState *cpu_env,
  4085. unsigned int call, abi_long first,
  4086. abi_long second, abi_long third,
  4087. abi_long ptr, abi_long fifth)
  4088. {
  4089. int version;
  4090. abi_long ret = 0;
  4091. version = call >> 16;
  4092. call &= 0xffff;
  4093. switch (call) {
  4094. case IPCOP_semop:
  4095. ret = do_semtimedop(first, ptr, second, 0, false);
  4096. break;
  4097. case IPCOP_semtimedop:
  4098. /*
  4099. * The s390 sys_ipc variant has only five parameters instead of six
  4100. * (as for default variant) and the only difference is the handling of
  4101. * SEMTIMEDOP where on s390 the third parameter is used as a pointer
  4102. * to a struct timespec where the generic variant uses fifth parameter.
  4103. */
  4104. #if defined(TARGET_S390X)
  4105. ret = do_semtimedop(first, ptr, second, third, TARGET_ABI_BITS == 64);
  4106. #else
  4107. ret = do_semtimedop(first, ptr, second, fifth, TARGET_ABI_BITS == 64);
  4108. #endif
  4109. break;
  4110. case IPCOP_semget:
  4111. ret = get_errno(semget(first, second, third));
  4112. break;
  4113. case IPCOP_semctl: {
  4114. /* The semun argument to semctl is passed by value, so dereference the
  4115. * ptr argument. */
  4116. abi_ulong atptr;
  4117. get_user_ual(atptr, ptr);
  4118. ret = do_semctl(first, second, third, atptr);
  4119. break;
  4120. }
  4121. case IPCOP_msgget:
  4122. ret = get_errno(msgget(first, second));
  4123. break;
  4124. case IPCOP_msgsnd:
  4125. ret = do_msgsnd(first, ptr, second, third);
  4126. break;
  4127. case IPCOP_msgctl:
  4128. ret = do_msgctl(first, second, ptr);
  4129. break;
  4130. case IPCOP_msgrcv:
  4131. switch (version) {
  4132. case 0:
  4133. {
  4134. struct target_ipc_kludge {
  4135. abi_long msgp;
  4136. abi_long msgtyp;
  4137. } *tmp;
  4138. if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
  4139. ret = -TARGET_EFAULT;
  4140. break;
  4141. }
  4142. ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
  4143. unlock_user_struct(tmp, ptr, 0);
  4144. break;
  4145. }
  4146. default:
  4147. ret = do_msgrcv(first, ptr, second, fifth, third);
  4148. }
  4149. break;
  4150. case IPCOP_shmat:
  4151. switch (version) {
  4152. default:
  4153. {
  4154. abi_ulong raddr;
  4155. raddr = target_shmat(cpu_env, first, ptr, second);
  4156. if (is_error(raddr))
  4157. return get_errno(raddr);
  4158. if (put_user_ual(raddr, third))
  4159. return -TARGET_EFAULT;
  4160. break;
  4161. }
  4162. case 1:
  4163. ret = -TARGET_EINVAL;
  4164. break;
  4165. }
  4166. break;
  4167. case IPCOP_shmdt:
  4168. ret = target_shmdt(ptr);
  4169. break;
  4170. case IPCOP_shmget:
  4171. /* IPC_* flag values are the same on all linux platforms */
  4172. ret = get_errno(shmget(first, second, third));
  4173. break;
  4174. /* IPC_* and SHM_* command values are the same on all linux platforms */
  4175. case IPCOP_shmctl:
  4176. ret = do_shmctl(first, second, ptr);
  4177. break;
  4178. default:
  4179. qemu_log_mask(LOG_UNIMP, "Unsupported ipc call: %d (version %d)\n",
  4180. call, version);
  4181. ret = -TARGET_ENOSYS;
  4182. break;
  4183. }
  4184. return ret;
  4185. }
  4186. #endif
  4187. /* kernel structure types definitions */
  4188. #define STRUCT(name, ...) STRUCT_ ## name,
  4189. #define STRUCT_SPECIAL(name) STRUCT_ ## name,
  4190. enum {
  4191. #include "syscall_types.h"
  4192. STRUCT_MAX
  4193. };
  4194. #undef STRUCT
  4195. #undef STRUCT_SPECIAL
  4196. #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
  4197. #define STRUCT_SPECIAL(name)
  4198. #include "syscall_types.h"
  4199. #undef STRUCT
  4200. #undef STRUCT_SPECIAL
  4201. #define MAX_STRUCT_SIZE 4096
  4202. #ifdef CONFIG_FIEMAP
  4203. /* So fiemap access checks don't overflow on 32 bit systems.
  4204. * This is very slightly smaller than the limit imposed by
  4205. * the underlying kernel.
  4206. */
  4207. #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
  4208. / sizeof(struct fiemap_extent))
  4209. static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
  4210. int fd, int cmd, abi_long arg)
  4211. {
  4212. /* The parameter for this ioctl is a struct fiemap followed
  4213. * by an array of struct fiemap_extent whose size is set
  4214. * in fiemap->fm_extent_count. The array is filled in by the
  4215. * ioctl.
  4216. */
  4217. int target_size_in, target_size_out;
  4218. struct fiemap *fm;
  4219. const argtype *arg_type = ie->arg_type;
  4220. const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
  4221. void *argptr, *p;
  4222. abi_long ret;
  4223. int i, extent_size = thunk_type_size(extent_arg_type, 0);
  4224. uint32_t outbufsz;
  4225. int free_fm = 0;
  4226. assert(arg_type[0] == TYPE_PTR);
  4227. assert(ie->access == IOC_RW);
  4228. arg_type++;
  4229. target_size_in = thunk_type_size(arg_type, 0);
  4230. argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
  4231. if (!argptr) {
  4232. return -TARGET_EFAULT;
  4233. }
  4234. thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
  4235. unlock_user(argptr, arg, 0);
  4236. fm = (struct fiemap *)buf_temp;
  4237. if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
  4238. return -TARGET_EINVAL;
  4239. }
  4240. outbufsz = sizeof (*fm) +
  4241. (sizeof(struct fiemap_extent) * fm->fm_extent_count);
  4242. if (outbufsz > MAX_STRUCT_SIZE) {
  4243. /* We can't fit all the extents into the fixed size buffer.
  4244. * Allocate one that is large enough and use it instead.
  4245. */
  4246. fm = g_try_malloc(outbufsz);
  4247. if (!fm) {
  4248. return -TARGET_ENOMEM;
  4249. }
  4250. memcpy(fm, buf_temp, sizeof(struct fiemap));
  4251. free_fm = 1;
  4252. }
  4253. ret = get_errno(safe_ioctl(fd, ie->host_cmd, fm));
  4254. if (!is_error(ret)) {
  4255. target_size_out = target_size_in;
  4256. /* An extent_count of 0 means we were only counting the extents
  4257. * so there are no structs to copy
  4258. */
  4259. if (fm->fm_extent_count != 0) {
  4260. target_size_out += fm->fm_mapped_extents * extent_size;
  4261. }
  4262. argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
  4263. if (!argptr) {
  4264. ret = -TARGET_EFAULT;
  4265. } else {
  4266. /* Convert the struct fiemap */
  4267. thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
  4268. if (fm->fm_extent_count != 0) {
  4269. p = argptr + target_size_in;
  4270. /* ...and then all the struct fiemap_extents */
  4271. for (i = 0; i < fm->fm_mapped_extents; i++) {
  4272. thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
  4273. THUNK_TARGET);
  4274. p += extent_size;
  4275. }
  4276. }
  4277. unlock_user(argptr, arg, target_size_out);
  4278. }
  4279. }
  4280. if (free_fm) {
  4281. g_free(fm);
  4282. }
  4283. return ret;
  4284. }
  4285. #endif
  4286. static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
  4287. int fd, int cmd, abi_long arg)
  4288. {
  4289. const argtype *arg_type = ie->arg_type;
  4290. int target_size;
  4291. void *argptr;
  4292. int ret;
  4293. struct ifconf *host_ifconf;
  4294. uint32_t outbufsz;
  4295. const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
  4296. const argtype ifreq_max_type[] = { MK_STRUCT(STRUCT_ifmap_ifreq) };
  4297. int target_ifreq_size;
  4298. int nb_ifreq;
  4299. int free_buf = 0;
  4300. int i;
  4301. int target_ifc_len;
  4302. abi_long target_ifc_buf;
  4303. int host_ifc_len;
  4304. char *host_ifc_buf;
  4305. assert(arg_type[0] == TYPE_PTR);
  4306. assert(ie->access == IOC_RW);
  4307. arg_type++;
  4308. target_size = thunk_type_size(arg_type, 0);
  4309. argptr = lock_user(VERIFY_READ, arg, target_size, 1);
  4310. if (!argptr)
  4311. return -TARGET_EFAULT;
  4312. thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
  4313. unlock_user(argptr, arg, 0);
  4314. host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
  4315. target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
  4316. target_ifreq_size = thunk_type_size(ifreq_max_type, 0);
  4317. if (target_ifc_buf != 0) {
  4318. target_ifc_len = host_ifconf->ifc_len;
  4319. nb_ifreq = target_ifc_len / target_ifreq_size;
  4320. host_ifc_len = nb_ifreq * sizeof(struct ifreq);
  4321. outbufsz = sizeof(*host_ifconf) + host_ifc_len;
  4322. if (outbufsz > MAX_STRUCT_SIZE) {
  4323. /*
  4324. * We can't fit all the extents into the fixed size buffer.
  4325. * Allocate one that is large enough and use it instead.
  4326. */
  4327. host_ifconf = g_try_malloc(outbufsz);
  4328. if (!host_ifconf) {
  4329. return -TARGET_ENOMEM;
  4330. }
  4331. memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
  4332. free_buf = 1;
  4333. }
  4334. host_ifc_buf = (char *)host_ifconf + sizeof(*host_ifconf);
  4335. host_ifconf->ifc_len = host_ifc_len;
  4336. } else {
  4337. host_ifc_buf = NULL;
  4338. }
  4339. host_ifconf->ifc_buf = host_ifc_buf;
  4340. ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_ifconf));
  4341. if (!is_error(ret)) {
  4342. /* convert host ifc_len to target ifc_len */
  4343. nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
  4344. target_ifc_len = nb_ifreq * target_ifreq_size;
  4345. host_ifconf->ifc_len = target_ifc_len;
  4346. /* restore target ifc_buf */
  4347. host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
  4348. /* copy struct ifconf to target user */
  4349. argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
  4350. if (!argptr)
  4351. return -TARGET_EFAULT;
  4352. thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
  4353. unlock_user(argptr, arg, target_size);
  4354. if (target_ifc_buf != 0) {
  4355. /* copy ifreq[] to target user */
  4356. argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
  4357. for (i = 0; i < nb_ifreq ; i++) {
  4358. thunk_convert(argptr + i * target_ifreq_size,
  4359. host_ifc_buf + i * sizeof(struct ifreq),
  4360. ifreq_arg_type, THUNK_TARGET);
  4361. }
  4362. unlock_user(argptr, target_ifc_buf, target_ifc_len);
  4363. }
  4364. }
  4365. if (free_buf) {
  4366. g_free(host_ifconf);
  4367. }
  4368. return ret;
  4369. }
  4370. #if defined(CONFIG_USBFS)
  4371. #if HOST_LONG_BITS > 64
  4372. #error USBDEVFS thunks do not support >64 bit hosts yet.
  4373. #endif
  4374. struct live_urb {
  4375. uint64_t target_urb_adr;
  4376. uint64_t target_buf_adr;
  4377. char *target_buf_ptr;
  4378. struct usbdevfs_urb host_urb;
  4379. };
  4380. static GHashTable *usbdevfs_urb_hashtable(void)
  4381. {
  4382. static GHashTable *urb_hashtable;
  4383. if (!urb_hashtable) {
  4384. urb_hashtable = g_hash_table_new(g_int64_hash, g_int64_equal);
  4385. }
  4386. return urb_hashtable;
  4387. }
  4388. static void urb_hashtable_insert(struct live_urb *urb)
  4389. {
  4390. GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
  4391. g_hash_table_insert(urb_hashtable, urb, urb);
  4392. }
  4393. static struct live_urb *urb_hashtable_lookup(uint64_t target_urb_adr)
  4394. {
  4395. GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
  4396. return g_hash_table_lookup(urb_hashtable, &target_urb_adr);
  4397. }
  4398. static void urb_hashtable_remove(struct live_urb *urb)
  4399. {
  4400. GHashTable *urb_hashtable = usbdevfs_urb_hashtable();
  4401. g_hash_table_remove(urb_hashtable, urb);
  4402. }
  4403. static abi_long
  4404. do_ioctl_usbdevfs_reapurb(const IOCTLEntry *ie, uint8_t *buf_temp,
  4405. int fd, int cmd, abi_long arg)
  4406. {
  4407. const argtype usbfsurb_arg_type[] = { MK_STRUCT(STRUCT_usbdevfs_urb) };
  4408. const argtype ptrvoid_arg_type[] = { TYPE_PTRVOID, 0, 0 };
  4409. struct live_urb *lurb;
  4410. void *argptr;
  4411. uint64_t hurb;
  4412. int target_size;
  4413. uintptr_t target_urb_adr;
  4414. abi_long ret;
  4415. target_size = thunk_type_size(usbfsurb_arg_type, THUNK_TARGET);
  4416. memset(buf_temp, 0, sizeof(uint64_t));
  4417. ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
  4418. if (is_error(ret)) {
  4419. return ret;
  4420. }
  4421. memcpy(&hurb, buf_temp, sizeof(uint64_t));
  4422. lurb = (void *)((uintptr_t)hurb - offsetof(struct live_urb, host_urb));
  4423. if (!lurb->target_urb_adr) {
  4424. return -TARGET_EFAULT;
  4425. }
  4426. urb_hashtable_remove(lurb);
  4427. unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr,
  4428. lurb->host_urb.buffer_length);
  4429. lurb->target_buf_ptr = NULL;
  4430. /* restore the guest buffer pointer */
  4431. lurb->host_urb.buffer = (void *)(uintptr_t)lurb->target_buf_adr;
  4432. /* update the guest urb struct */
  4433. argptr = lock_user(VERIFY_WRITE, lurb->target_urb_adr, target_size, 0);
  4434. if (!argptr) {
  4435. g_free(lurb);
  4436. return -TARGET_EFAULT;
  4437. }
  4438. thunk_convert(argptr, &lurb->host_urb, usbfsurb_arg_type, THUNK_TARGET);
  4439. unlock_user(argptr, lurb->target_urb_adr, target_size);
  4440. target_size = thunk_type_size(ptrvoid_arg_type, THUNK_TARGET);
  4441. /* write back the urb handle */
  4442. argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
  4443. if (!argptr) {
  4444. g_free(lurb);
  4445. return -TARGET_EFAULT;
  4446. }
  4447. /* GHashTable uses 64-bit keys but thunk_convert expects uintptr_t */
  4448. target_urb_adr = lurb->target_urb_adr;
  4449. thunk_convert(argptr, &target_urb_adr, ptrvoid_arg_type, THUNK_TARGET);
  4450. unlock_user(argptr, arg, target_size);
  4451. g_free(lurb);
  4452. return ret;
  4453. }
  4454. static abi_long
  4455. do_ioctl_usbdevfs_discardurb(const IOCTLEntry *ie,
  4456. uint8_t *buf_temp __attribute__((unused)),
  4457. int fd, int cmd, abi_long arg)
  4458. {
  4459. struct live_urb *lurb;
  4460. /* map target address back to host URB with metadata. */
  4461. lurb = urb_hashtable_lookup(arg);
  4462. if (!lurb) {
  4463. return -TARGET_EFAULT;
  4464. }
  4465. return get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
  4466. }
  4467. static abi_long
  4468. do_ioctl_usbdevfs_submiturb(const IOCTLEntry *ie, uint8_t *buf_temp,
  4469. int fd, int cmd, abi_long arg)
  4470. {
  4471. const argtype *arg_type = ie->arg_type;
  4472. int target_size;
  4473. abi_long ret;
  4474. void *argptr;
  4475. int rw_dir;
  4476. struct live_urb *lurb;
  4477. /*
  4478. * each submitted URB needs to map to a unique ID for the
  4479. * kernel, and that unique ID needs to be a pointer to
  4480. * host memory. hence, we need to malloc for each URB.
  4481. * isochronous transfers have a variable length struct.
  4482. */
  4483. arg_type++;
  4484. target_size = thunk_type_size(arg_type, THUNK_TARGET);
  4485. /* construct host copy of urb and metadata */
  4486. lurb = g_try_new0(struct live_urb, 1);
  4487. if (!lurb) {
  4488. return -TARGET_ENOMEM;
  4489. }
  4490. argptr = lock_user(VERIFY_READ, arg, target_size, 1);
  4491. if (!argptr) {
  4492. g_free(lurb);
  4493. return -TARGET_EFAULT;
  4494. }
  4495. thunk_convert(&lurb->host_urb, argptr, arg_type, THUNK_HOST);
  4496. unlock_user(argptr, arg, 0);
  4497. lurb->target_urb_adr = arg;
  4498. lurb->target_buf_adr = (uintptr_t)lurb->host_urb.buffer;
  4499. /* buffer space used depends on endpoint type so lock the entire buffer */
  4500. /* control type urbs should check the buffer contents for true direction */
  4501. rw_dir = lurb->host_urb.endpoint & USB_DIR_IN ? VERIFY_WRITE : VERIFY_READ;
  4502. lurb->target_buf_ptr = lock_user(rw_dir, lurb->target_buf_adr,
  4503. lurb->host_urb.buffer_length, 1);
  4504. if (lurb->target_buf_ptr == NULL) {
  4505. g_free(lurb);
  4506. return -TARGET_EFAULT;
  4507. }
  4508. /* update buffer pointer in host copy */
  4509. lurb->host_urb.buffer = lurb->target_buf_ptr;
  4510. ret = get_errno(safe_ioctl(fd, ie->host_cmd, &lurb->host_urb));
  4511. if (is_error(ret)) {
  4512. unlock_user(lurb->target_buf_ptr, lurb->target_buf_adr, 0);
  4513. g_free(lurb);
  4514. } else {
  4515. urb_hashtable_insert(lurb);
  4516. }
  4517. return ret;
  4518. }
  4519. #endif /* CONFIG_USBFS */
  4520. static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
  4521. int cmd, abi_long arg)
  4522. {
  4523. void *argptr;
  4524. struct dm_ioctl *host_dm;
  4525. abi_long guest_data;
  4526. uint32_t guest_data_size;
  4527. int target_size;
  4528. const argtype *arg_type = ie->arg_type;
  4529. abi_long ret;
  4530. void *big_buf = NULL;
  4531. char *host_data;
  4532. arg_type++;
  4533. target_size = thunk_type_size(arg_type, 0);
  4534. argptr = lock_user(VERIFY_READ, arg, target_size, 1);
  4535. if (!argptr) {
  4536. ret = -TARGET_EFAULT;
  4537. goto out;
  4538. }
  4539. thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
  4540. unlock_user(argptr, arg, 0);
  4541. /* buf_temp is too small, so fetch things into a bigger buffer */
  4542. big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
  4543. memcpy(big_buf, buf_temp, target_size);
  4544. buf_temp = big_buf;
  4545. host_dm = big_buf;
  4546. guest_data = arg + host_dm->data_start;
  4547. if ((guest_data - arg) < 0) {
  4548. ret = -TARGET_EINVAL;
  4549. goto out;
  4550. }
  4551. guest_data_size = host_dm->data_size - host_dm->data_start;
  4552. host_data = (char*)host_dm + host_dm->data_start;
  4553. argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
  4554. if (!argptr) {
  4555. ret = -TARGET_EFAULT;
  4556. goto out;
  4557. }
  4558. switch (ie->host_cmd) {
  4559. case DM_REMOVE_ALL:
  4560. case DM_LIST_DEVICES:
  4561. case DM_DEV_CREATE:
  4562. case DM_DEV_REMOVE:
  4563. case DM_DEV_SUSPEND:
  4564. case DM_DEV_STATUS:
  4565. case DM_DEV_WAIT:
  4566. case DM_TABLE_STATUS:
  4567. case DM_TABLE_CLEAR:
  4568. case DM_TABLE_DEPS:
  4569. case DM_LIST_VERSIONS:
  4570. /* no input data */
  4571. break;
  4572. case DM_DEV_RENAME:
  4573. case DM_DEV_SET_GEOMETRY:
  4574. /* data contains only strings */
  4575. memcpy(host_data, argptr, guest_data_size);
  4576. break;
  4577. case DM_TARGET_MSG:
  4578. memcpy(host_data, argptr, guest_data_size);
  4579. *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
  4580. break;
  4581. case DM_TABLE_LOAD:
  4582. {
  4583. void *gspec = argptr;
  4584. void *cur_data = host_data;
  4585. const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
  4586. int spec_size = thunk_type_size(dm_arg_type, 0);
  4587. int i;
  4588. for (i = 0; i < host_dm->target_count; i++) {
  4589. struct dm_target_spec *spec = cur_data;
  4590. uint32_t next;
  4591. int slen;
  4592. thunk_convert(spec, gspec, dm_arg_type, THUNK_HOST);
  4593. slen = strlen((char*)gspec + spec_size) + 1;
  4594. next = spec->next;
  4595. spec->next = sizeof(*spec) + slen;
  4596. strcpy((char*)&spec[1], gspec + spec_size);
  4597. gspec += next;
  4598. cur_data += spec->next;
  4599. }
  4600. break;
  4601. }
  4602. default:
  4603. ret = -TARGET_EINVAL;
  4604. unlock_user(argptr, guest_data, 0);
  4605. goto out;
  4606. }
  4607. unlock_user(argptr, guest_data, 0);
  4608. ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
  4609. if (!is_error(ret)) {
  4610. guest_data = arg + host_dm->data_start;
  4611. guest_data_size = host_dm->data_size - host_dm->data_start;
  4612. argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
  4613. switch (ie->host_cmd) {
  4614. case DM_REMOVE_ALL:
  4615. case DM_DEV_CREATE:
  4616. case DM_DEV_REMOVE:
  4617. case DM_DEV_RENAME:
  4618. case DM_DEV_SUSPEND:
  4619. case DM_DEV_STATUS:
  4620. case DM_TABLE_LOAD:
  4621. case DM_TABLE_CLEAR:
  4622. case DM_TARGET_MSG:
  4623. case DM_DEV_SET_GEOMETRY:
  4624. /* no return data */
  4625. break;
  4626. case DM_LIST_DEVICES:
  4627. {
  4628. struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
  4629. uint32_t remaining_data = guest_data_size;
  4630. void *cur_data = argptr;
  4631. const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
  4632. int nl_size = 12; /* can't use thunk_size due to alignment */
  4633. while (1) {
  4634. uint32_t next = nl->next;
  4635. if (next) {
  4636. nl->next = nl_size + (strlen(nl->name) + 1);
  4637. }
  4638. if (remaining_data < nl->next) {
  4639. host_dm->flags |= DM_BUFFER_FULL_FLAG;
  4640. break;
  4641. }
  4642. thunk_convert(cur_data, nl, dm_arg_type, THUNK_TARGET);
  4643. strcpy(cur_data + nl_size, nl->name);
  4644. cur_data += nl->next;
  4645. remaining_data -= nl->next;
  4646. if (!next) {
  4647. break;
  4648. }
  4649. nl = (void*)nl + next;
  4650. }
  4651. break;
  4652. }
  4653. case DM_DEV_WAIT:
  4654. case DM_TABLE_STATUS:
  4655. {
  4656. struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
  4657. void *cur_data = argptr;
  4658. const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
  4659. int spec_size = thunk_type_size(dm_arg_type, 0);
  4660. int i;
  4661. for (i = 0; i < host_dm->target_count; i++) {
  4662. uint32_t next = spec->next;
  4663. int slen = strlen((char*)&spec[1]) + 1;
  4664. spec->next = (cur_data - argptr) + spec_size + slen;
  4665. if (guest_data_size < spec->next) {
  4666. host_dm->flags |= DM_BUFFER_FULL_FLAG;
  4667. break;
  4668. }
  4669. thunk_convert(cur_data, spec, dm_arg_type, THUNK_TARGET);
  4670. strcpy(cur_data + spec_size, (char*)&spec[1]);
  4671. cur_data = argptr + spec->next;
  4672. spec = (void*)host_dm + host_dm->data_start + next;
  4673. }
  4674. break;
  4675. }
  4676. case DM_TABLE_DEPS:
  4677. {
  4678. void *hdata = (void*)host_dm + host_dm->data_start;
  4679. int count = *(uint32_t*)hdata;
  4680. uint64_t *hdev = hdata + 8;
  4681. uint64_t *gdev = argptr + 8;
  4682. int i;
  4683. *(uint32_t*)argptr = tswap32(count);
  4684. for (i = 0; i < count; i++) {
  4685. *gdev = tswap64(*hdev);
  4686. gdev++;
  4687. hdev++;
  4688. }
  4689. break;
  4690. }
  4691. case DM_LIST_VERSIONS:
  4692. {
  4693. struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
  4694. uint32_t remaining_data = guest_data_size;
  4695. void *cur_data = argptr;
  4696. const argtype dm_arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
  4697. int vers_size = thunk_type_size(dm_arg_type, 0);
  4698. while (1) {
  4699. uint32_t next = vers->next;
  4700. if (next) {
  4701. vers->next = vers_size + (strlen(vers->name) + 1);
  4702. }
  4703. if (remaining_data < vers->next) {
  4704. host_dm->flags |= DM_BUFFER_FULL_FLAG;
  4705. break;
  4706. }
  4707. thunk_convert(cur_data, vers, dm_arg_type, THUNK_TARGET);
  4708. strcpy(cur_data + vers_size, vers->name);
  4709. cur_data += vers->next;
  4710. remaining_data -= vers->next;
  4711. if (!next) {
  4712. break;
  4713. }
  4714. vers = (void*)vers + next;
  4715. }
  4716. break;
  4717. }
  4718. default:
  4719. unlock_user(argptr, guest_data, 0);
  4720. ret = -TARGET_EINVAL;
  4721. goto out;
  4722. }
  4723. unlock_user(argptr, guest_data, guest_data_size);
  4724. argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
  4725. if (!argptr) {
  4726. ret = -TARGET_EFAULT;
  4727. goto out;
  4728. }
  4729. thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
  4730. unlock_user(argptr, arg, target_size);
  4731. }
  4732. out:
  4733. g_free(big_buf);
  4734. return ret;
  4735. }
  4736. static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
  4737. int cmd, abi_long arg)
  4738. {
  4739. void *argptr;
  4740. int target_size;
  4741. const argtype *arg_type = ie->arg_type;
  4742. const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
  4743. abi_long ret;
  4744. struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
  4745. struct blkpg_partition host_part;
  4746. /* Read and convert blkpg */
  4747. arg_type++;
  4748. target_size = thunk_type_size(arg_type, 0);
  4749. argptr = lock_user(VERIFY_READ, arg, target_size, 1);
  4750. if (!argptr) {
  4751. ret = -TARGET_EFAULT;
  4752. goto out;
  4753. }
  4754. thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
  4755. unlock_user(argptr, arg, 0);
  4756. switch (host_blkpg->op) {
  4757. case BLKPG_ADD_PARTITION:
  4758. case BLKPG_DEL_PARTITION:
  4759. /* payload is struct blkpg_partition */
  4760. break;
  4761. default:
  4762. /* Unknown opcode */
  4763. ret = -TARGET_EINVAL;
  4764. goto out;
  4765. }
  4766. /* Read and convert blkpg->data */
  4767. arg = (abi_long)(uintptr_t)host_blkpg->data;
  4768. target_size = thunk_type_size(part_arg_type, 0);
  4769. argptr = lock_user(VERIFY_READ, arg, target_size, 1);
  4770. if (!argptr) {
  4771. ret = -TARGET_EFAULT;
  4772. goto out;
  4773. }
  4774. thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
  4775. unlock_user(argptr, arg, 0);
  4776. /* Swizzle the data pointer to our local copy and call! */
  4777. host_blkpg->data = &host_part;
  4778. ret = get_errno(safe_ioctl(fd, ie->host_cmd, host_blkpg));
  4779. out:
  4780. return ret;
  4781. }
  4782. static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
  4783. int fd, int cmd, abi_long arg)
  4784. {
  4785. const argtype *arg_type = ie->arg_type;
  4786. const StructEntry *se;
  4787. const argtype *field_types;
  4788. const int *dst_offsets, *src_offsets;
  4789. int target_size;
  4790. void *argptr;
  4791. abi_ulong *target_rt_dev_ptr = NULL;
  4792. unsigned long *host_rt_dev_ptr = NULL;
  4793. abi_long ret;
  4794. int i;
  4795. assert(ie->access == IOC_W);
  4796. assert(*arg_type == TYPE_PTR);
  4797. arg_type++;
  4798. assert(*arg_type == TYPE_STRUCT);
  4799. target_size = thunk_type_size(arg_type, 0);
  4800. argptr = lock_user(VERIFY_READ, arg, target_size, 1);
  4801. if (!argptr) {
  4802. return -TARGET_EFAULT;
  4803. }
  4804. arg_type++;
  4805. assert(*arg_type == (int)STRUCT_rtentry);
  4806. se = struct_entries + *arg_type++;
  4807. assert(se->convert[0] == NULL);
  4808. /* convert struct here to be able to catch rt_dev string */
  4809. field_types = se->field_types;
  4810. dst_offsets = se->field_offsets[THUNK_HOST];
  4811. src_offsets = se->field_offsets[THUNK_TARGET];
  4812. for (i = 0; i < se->nb_fields; i++) {
  4813. if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
  4814. assert(*field_types == TYPE_PTRVOID);
  4815. target_rt_dev_ptr = argptr + src_offsets[i];
  4816. host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
  4817. if (*target_rt_dev_ptr != 0) {
  4818. *host_rt_dev_ptr = (unsigned long)lock_user_string(
  4819. tswapal(*target_rt_dev_ptr));
  4820. if (!*host_rt_dev_ptr) {
  4821. unlock_user(argptr, arg, 0);
  4822. return -TARGET_EFAULT;
  4823. }
  4824. } else {
  4825. *host_rt_dev_ptr = 0;
  4826. }
  4827. field_types++;
  4828. continue;
  4829. }
  4830. field_types = thunk_convert(buf_temp + dst_offsets[i],
  4831. argptr + src_offsets[i],
  4832. field_types, THUNK_HOST);
  4833. }
  4834. unlock_user(argptr, arg, 0);
  4835. ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
  4836. assert(host_rt_dev_ptr != NULL);
  4837. assert(target_rt_dev_ptr != NULL);
  4838. if (*host_rt_dev_ptr != 0) {
  4839. unlock_user((void *)*host_rt_dev_ptr,
  4840. *target_rt_dev_ptr, 0);
  4841. }
  4842. return ret;
  4843. }
  4844. static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
  4845. int fd, int cmd, abi_long arg)
  4846. {
  4847. int sig = target_to_host_signal(arg);
  4848. return get_errno(safe_ioctl(fd, ie->host_cmd, sig));
  4849. }
  4850. static abi_long do_ioctl_SIOCGSTAMP(const IOCTLEntry *ie, uint8_t *buf_temp,
  4851. int fd, int cmd, abi_long arg)
  4852. {
  4853. struct timeval tv;
  4854. abi_long ret;
  4855. ret = get_errno(safe_ioctl(fd, SIOCGSTAMP, &tv));
  4856. if (is_error(ret)) {
  4857. return ret;
  4858. }
  4859. if (cmd == (int)TARGET_SIOCGSTAMP_OLD) {
  4860. if (copy_to_user_timeval(arg, &tv)) {
  4861. return -TARGET_EFAULT;
  4862. }
  4863. } else {
  4864. if (copy_to_user_timeval64(arg, &tv)) {
  4865. return -TARGET_EFAULT;
  4866. }
  4867. }
  4868. return ret;
  4869. }
  4870. static abi_long do_ioctl_SIOCGSTAMPNS(const IOCTLEntry *ie, uint8_t *buf_temp,
  4871. int fd, int cmd, abi_long arg)
  4872. {
  4873. struct timespec ts;
  4874. abi_long ret;
  4875. ret = get_errno(safe_ioctl(fd, SIOCGSTAMPNS, &ts));
  4876. if (is_error(ret)) {
  4877. return ret;
  4878. }
  4879. if (cmd == (int)TARGET_SIOCGSTAMPNS_OLD) {
  4880. if (host_to_target_timespec(arg, &ts)) {
  4881. return -TARGET_EFAULT;
  4882. }
  4883. } else{
  4884. if (host_to_target_timespec64(arg, &ts)) {
  4885. return -TARGET_EFAULT;
  4886. }
  4887. }
  4888. return ret;
  4889. }
  4890. #ifdef TIOCGPTPEER
  4891. static abi_long do_ioctl_tiocgptpeer(const IOCTLEntry *ie, uint8_t *buf_temp,
  4892. int fd, int cmd, abi_long arg)
  4893. {
  4894. int flags = target_to_host_bitmask(arg, fcntl_flags_tbl);
  4895. return get_errno(safe_ioctl(fd, ie->host_cmd, flags));
  4896. }
  4897. #endif
  4898. #ifdef HAVE_DRM_H
  4899. static void unlock_drm_version(struct drm_version *host_ver,
  4900. struct target_drm_version *target_ver,
  4901. bool copy)
  4902. {
  4903. unlock_user(host_ver->name, target_ver->name,
  4904. copy ? host_ver->name_len : 0);
  4905. unlock_user(host_ver->date, target_ver->date,
  4906. copy ? host_ver->date_len : 0);
  4907. unlock_user(host_ver->desc, target_ver->desc,
  4908. copy ? host_ver->desc_len : 0);
  4909. }
  4910. static inline abi_long target_to_host_drmversion(struct drm_version *host_ver,
  4911. struct target_drm_version *target_ver)
  4912. {
  4913. memset(host_ver, 0, sizeof(*host_ver));
  4914. __get_user(host_ver->name_len, &target_ver->name_len);
  4915. if (host_ver->name_len) {
  4916. host_ver->name = lock_user(VERIFY_WRITE, target_ver->name,
  4917. target_ver->name_len, 0);
  4918. if (!host_ver->name) {
  4919. return -EFAULT;
  4920. }
  4921. }
  4922. __get_user(host_ver->date_len, &target_ver->date_len);
  4923. if (host_ver->date_len) {
  4924. host_ver->date = lock_user(VERIFY_WRITE, target_ver->date,
  4925. target_ver->date_len, 0);
  4926. if (!host_ver->date) {
  4927. goto err;
  4928. }
  4929. }
  4930. __get_user(host_ver->desc_len, &target_ver->desc_len);
  4931. if (host_ver->desc_len) {
  4932. host_ver->desc = lock_user(VERIFY_WRITE, target_ver->desc,
  4933. target_ver->desc_len, 0);
  4934. if (!host_ver->desc) {
  4935. goto err;
  4936. }
  4937. }
  4938. return 0;
  4939. err:
  4940. unlock_drm_version(host_ver, target_ver, false);
  4941. return -EFAULT;
  4942. }
  4943. static inline void host_to_target_drmversion(
  4944. struct target_drm_version *target_ver,
  4945. struct drm_version *host_ver)
  4946. {
  4947. __put_user(host_ver->version_major, &target_ver->version_major);
  4948. __put_user(host_ver->version_minor, &target_ver->version_minor);
  4949. __put_user(host_ver->version_patchlevel, &target_ver->version_patchlevel);
  4950. __put_user(host_ver->name_len, &target_ver->name_len);
  4951. __put_user(host_ver->date_len, &target_ver->date_len);
  4952. __put_user(host_ver->desc_len, &target_ver->desc_len);
  4953. unlock_drm_version(host_ver, target_ver, true);
  4954. }
  4955. static abi_long do_ioctl_drm(const IOCTLEntry *ie, uint8_t *buf_temp,
  4956. int fd, int cmd, abi_long arg)
  4957. {
  4958. struct drm_version *ver;
  4959. struct target_drm_version *target_ver;
  4960. abi_long ret;
  4961. switch (ie->host_cmd) {
  4962. case DRM_IOCTL_VERSION:
  4963. if (!lock_user_struct(VERIFY_WRITE, target_ver, arg, 0)) {
  4964. return -TARGET_EFAULT;
  4965. }
  4966. ver = (struct drm_version *)buf_temp;
  4967. ret = target_to_host_drmversion(ver, target_ver);
  4968. if (!is_error(ret)) {
  4969. ret = get_errno(safe_ioctl(fd, ie->host_cmd, ver));
  4970. if (is_error(ret)) {
  4971. unlock_drm_version(ver, target_ver, false);
  4972. } else {
  4973. host_to_target_drmversion(target_ver, ver);
  4974. }
  4975. }
  4976. unlock_user_struct(target_ver, arg, 0);
  4977. return ret;
  4978. }
  4979. return -TARGET_ENOSYS;
  4980. }
  4981. static abi_long do_ioctl_drm_i915_getparam(const IOCTLEntry *ie,
  4982. struct drm_i915_getparam *gparam,
  4983. int fd, abi_long arg)
  4984. {
  4985. abi_long ret;
  4986. int value;
  4987. struct target_drm_i915_getparam *target_gparam;
  4988. if (!lock_user_struct(VERIFY_READ, target_gparam, arg, 0)) {
  4989. return -TARGET_EFAULT;
  4990. }
  4991. __get_user(gparam->param, &target_gparam->param);
  4992. gparam->value = &value;
  4993. ret = get_errno(safe_ioctl(fd, ie->host_cmd, gparam));
  4994. put_user_s32(value, target_gparam->value);
  4995. unlock_user_struct(target_gparam, arg, 0);
  4996. return ret;
  4997. }
  4998. static abi_long do_ioctl_drm_i915(const IOCTLEntry *ie, uint8_t *buf_temp,
  4999. int fd, int cmd, abi_long arg)
  5000. {
  5001. switch (ie->host_cmd) {
  5002. case DRM_IOCTL_I915_GETPARAM:
  5003. return do_ioctl_drm_i915_getparam(ie,
  5004. (struct drm_i915_getparam *)buf_temp,
  5005. fd, arg);
  5006. default:
  5007. return -TARGET_ENOSYS;
  5008. }
  5009. }
  5010. #endif
  5011. static abi_long do_ioctl_TUNSETTXFILTER(const IOCTLEntry *ie, uint8_t *buf_temp,
  5012. int fd, int cmd, abi_long arg)
  5013. {
  5014. struct tun_filter *filter = (struct tun_filter *)buf_temp;
  5015. struct tun_filter *target_filter;
  5016. char *target_addr;
  5017. assert(ie->access == IOC_W);
  5018. target_filter = lock_user(VERIFY_READ, arg, sizeof(*target_filter), 1);
  5019. if (!target_filter) {
  5020. return -TARGET_EFAULT;
  5021. }
  5022. filter->flags = tswap16(target_filter->flags);
  5023. filter->count = tswap16(target_filter->count);
  5024. unlock_user(target_filter, arg, 0);
  5025. if (filter->count) {
  5026. if (offsetof(struct tun_filter, addr) + filter->count * ETH_ALEN >
  5027. MAX_STRUCT_SIZE) {
  5028. return -TARGET_EFAULT;
  5029. }
  5030. target_addr = lock_user(VERIFY_READ,
  5031. arg + offsetof(struct tun_filter, addr),
  5032. filter->count * ETH_ALEN, 1);
  5033. if (!target_addr) {
  5034. return -TARGET_EFAULT;
  5035. }
  5036. memcpy(filter->addr, target_addr, filter->count * ETH_ALEN);
  5037. unlock_user(target_addr, arg + offsetof(struct tun_filter, addr), 0);
  5038. }
  5039. return get_errno(safe_ioctl(fd, ie->host_cmd, filter));
  5040. }
  5041. IOCTLEntry ioctl_entries[] = {
  5042. #define IOCTL(cmd, access, ...) \
  5043. { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
  5044. #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
  5045. { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
  5046. #define IOCTL_IGNORE(cmd) \
  5047. { TARGET_ ## cmd, 0, #cmd },
  5048. #include "ioctls.h"
  5049. { 0, 0, },
  5050. };
  5051. /* ??? Implement proper locking for ioctls. */
  5052. /* do_ioctl() Must return target values and target errnos. */
  5053. static abi_long do_ioctl(int fd, int cmd, abi_long arg)
  5054. {
  5055. const IOCTLEntry *ie;
  5056. const argtype *arg_type;
  5057. abi_long ret;
  5058. uint8_t buf_temp[MAX_STRUCT_SIZE];
  5059. int target_size;
  5060. void *argptr;
  5061. ie = ioctl_entries;
  5062. for(;;) {
  5063. if (ie->target_cmd == 0) {
  5064. qemu_log_mask(
  5065. LOG_UNIMP, "Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
  5066. return -TARGET_ENOTTY;
  5067. }
  5068. if (ie->target_cmd == cmd)
  5069. break;
  5070. ie++;
  5071. }
  5072. arg_type = ie->arg_type;
  5073. if (ie->do_ioctl) {
  5074. return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
  5075. } else if (!ie->host_cmd) {
  5076. /* Some architectures define BSD ioctls in their headers
  5077. that are not implemented in Linux. */
  5078. return -TARGET_ENOTTY;
  5079. }
  5080. switch(arg_type[0]) {
  5081. case TYPE_NULL:
  5082. /* no argument */
  5083. ret = get_errno(safe_ioctl(fd, ie->host_cmd));
  5084. break;
  5085. case TYPE_PTRVOID:
  5086. case TYPE_INT:
  5087. case TYPE_LONG:
  5088. case TYPE_ULONG:
  5089. ret = get_errno(safe_ioctl(fd, ie->host_cmd, arg));
  5090. break;
  5091. case TYPE_PTR:
  5092. arg_type++;
  5093. target_size = thunk_type_size(arg_type, 0);
  5094. switch(ie->access) {
  5095. case IOC_R:
  5096. ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
  5097. if (!is_error(ret)) {
  5098. argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
  5099. if (!argptr)
  5100. return -TARGET_EFAULT;
  5101. thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
  5102. unlock_user(argptr, arg, target_size);
  5103. }
  5104. break;
  5105. case IOC_W:
  5106. argptr = lock_user(VERIFY_READ, arg, target_size, 1);
  5107. if (!argptr)
  5108. return -TARGET_EFAULT;
  5109. thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
  5110. unlock_user(argptr, arg, 0);
  5111. ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
  5112. break;
  5113. default:
  5114. case IOC_RW:
  5115. argptr = lock_user(VERIFY_READ, arg, target_size, 1);
  5116. if (!argptr)
  5117. return -TARGET_EFAULT;
  5118. thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
  5119. unlock_user(argptr, arg, 0);
  5120. ret = get_errno(safe_ioctl(fd, ie->host_cmd, buf_temp));
  5121. if (!is_error(ret)) {
  5122. argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
  5123. if (!argptr)
  5124. return -TARGET_EFAULT;
  5125. thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
  5126. unlock_user(argptr, arg, target_size);
  5127. }
  5128. break;
  5129. }
  5130. break;
  5131. default:
  5132. qemu_log_mask(LOG_UNIMP,
  5133. "Unsupported ioctl type: cmd=0x%04lx type=%d\n",
  5134. (long)cmd, arg_type[0]);
  5135. ret = -TARGET_ENOTTY;
  5136. break;
  5137. }
  5138. return ret;
  5139. }
  5140. static const bitmask_transtbl iflag_tbl[] = {
  5141. { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
  5142. { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
  5143. { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
  5144. { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
  5145. { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
  5146. { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
  5147. { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
  5148. { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
  5149. { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
  5150. { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
  5151. { TARGET_IXON, TARGET_IXON, IXON, IXON },
  5152. { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
  5153. { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
  5154. { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
  5155. { TARGET_IUTF8, TARGET_IUTF8, IUTF8, IUTF8},
  5156. };
  5157. static const bitmask_transtbl oflag_tbl[] = {
  5158. { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
  5159. { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
  5160. { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
  5161. { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
  5162. { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
  5163. { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
  5164. { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
  5165. { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
  5166. { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
  5167. { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
  5168. { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
  5169. { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
  5170. { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
  5171. { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
  5172. { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
  5173. { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
  5174. { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
  5175. { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
  5176. { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
  5177. { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
  5178. { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
  5179. { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
  5180. { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
  5181. { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
  5182. };
  5183. static const bitmask_transtbl cflag_tbl[] = {
  5184. { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
  5185. { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
  5186. { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
  5187. { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
  5188. { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
  5189. { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
  5190. { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
  5191. { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
  5192. { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
  5193. { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
  5194. { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
  5195. { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
  5196. { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
  5197. { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
  5198. { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
  5199. { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
  5200. { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
  5201. { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
  5202. { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
  5203. { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
  5204. { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
  5205. { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
  5206. { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
  5207. { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
  5208. { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
  5209. { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
  5210. { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
  5211. { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
  5212. { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
  5213. { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
  5214. { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
  5215. };
  5216. static const bitmask_transtbl lflag_tbl[] = {
  5217. { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
  5218. { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
  5219. { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
  5220. { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
  5221. { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
  5222. { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
  5223. { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
  5224. { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
  5225. { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
  5226. { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
  5227. { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
  5228. { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
  5229. { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
  5230. { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
  5231. { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
  5232. { TARGET_EXTPROC, TARGET_EXTPROC, EXTPROC, EXTPROC},
  5233. };
  5234. static void target_to_host_termios (void *dst, const void *src)
  5235. {
  5236. struct host_termios *host = dst;
  5237. const struct target_termios *target = src;
  5238. host->c_iflag =
  5239. target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
  5240. host->c_oflag =
  5241. target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
  5242. host->c_cflag =
  5243. target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
  5244. host->c_lflag =
  5245. target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
  5246. host->c_line = target->c_line;
  5247. memset(host->c_cc, 0, sizeof(host->c_cc));
  5248. host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
  5249. host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
  5250. host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
  5251. host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
  5252. host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
  5253. host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
  5254. host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
  5255. host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
  5256. host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
  5257. host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
  5258. host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
  5259. host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
  5260. host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
  5261. host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
  5262. host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
  5263. host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
  5264. host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
  5265. }
  5266. static void host_to_target_termios (void *dst, const void *src)
  5267. {
  5268. struct target_termios *target = dst;
  5269. const struct host_termios *host = src;
  5270. target->c_iflag =
  5271. tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
  5272. target->c_oflag =
  5273. tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
  5274. target->c_cflag =
  5275. tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
  5276. target->c_lflag =
  5277. tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
  5278. target->c_line = host->c_line;
  5279. memset(target->c_cc, 0, sizeof(target->c_cc));
  5280. target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
  5281. target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
  5282. target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
  5283. target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
  5284. target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
  5285. target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
  5286. target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
  5287. target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
  5288. target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
  5289. target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
  5290. target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
  5291. target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
  5292. target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
  5293. target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
  5294. target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
  5295. target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
  5296. target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
  5297. }
  5298. static const StructEntry struct_termios_def = {
  5299. .convert = { host_to_target_termios, target_to_host_termios },
  5300. .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
  5301. .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
  5302. .print = print_termios,
  5303. };
  5304. /* If the host does not provide these bits, they may be safely discarded. */
  5305. #ifndef MAP_SYNC
  5306. #define MAP_SYNC 0
  5307. #endif
  5308. #ifndef MAP_UNINITIALIZED
  5309. #define MAP_UNINITIALIZED 0
  5310. #endif
  5311. static const bitmask_transtbl mmap_flags_tbl[] = {
  5312. { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
  5313. { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS,
  5314. MAP_ANONYMOUS, MAP_ANONYMOUS },
  5315. { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN,
  5316. MAP_GROWSDOWN, MAP_GROWSDOWN },
  5317. { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE,
  5318. MAP_DENYWRITE, MAP_DENYWRITE },
  5319. { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE,
  5320. MAP_EXECUTABLE, MAP_EXECUTABLE },
  5321. { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
  5322. { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE,
  5323. MAP_NORESERVE, MAP_NORESERVE },
  5324. { TARGET_MAP_HUGETLB, TARGET_MAP_HUGETLB, MAP_HUGETLB, MAP_HUGETLB },
  5325. /* MAP_STACK had been ignored by the kernel for quite some time.
  5326. Recognize it for the target insofar as we do not want to pass
  5327. it through to the host. */
  5328. { TARGET_MAP_STACK, TARGET_MAP_STACK, 0, 0 },
  5329. { TARGET_MAP_NONBLOCK, TARGET_MAP_NONBLOCK, MAP_NONBLOCK, MAP_NONBLOCK },
  5330. { TARGET_MAP_POPULATE, TARGET_MAP_POPULATE, MAP_POPULATE, MAP_POPULATE },
  5331. { TARGET_MAP_FIXED_NOREPLACE, TARGET_MAP_FIXED_NOREPLACE,
  5332. MAP_FIXED_NOREPLACE, MAP_FIXED_NOREPLACE },
  5333. { TARGET_MAP_UNINITIALIZED, TARGET_MAP_UNINITIALIZED,
  5334. MAP_UNINITIALIZED, MAP_UNINITIALIZED },
  5335. };
  5336. /*
  5337. * Arrange for legacy / undefined architecture specific flags to be
  5338. * ignored by mmap handling code.
  5339. */
  5340. #ifndef TARGET_MAP_32BIT
  5341. #define TARGET_MAP_32BIT 0
  5342. #endif
  5343. #ifndef TARGET_MAP_HUGE_2MB
  5344. #define TARGET_MAP_HUGE_2MB 0
  5345. #endif
  5346. #ifndef TARGET_MAP_HUGE_1GB
  5347. #define TARGET_MAP_HUGE_1GB 0
  5348. #endif
  5349. static abi_long do_mmap(abi_ulong addr, abi_ulong len, int prot,
  5350. int target_flags, int fd, off_t offset)
  5351. {
  5352. /*
  5353. * The historical set of flags that all mmap types implicitly support.
  5354. */
  5355. enum {
  5356. TARGET_LEGACY_MAP_MASK = TARGET_MAP_SHARED
  5357. | TARGET_MAP_PRIVATE
  5358. | TARGET_MAP_FIXED
  5359. | TARGET_MAP_ANONYMOUS
  5360. | TARGET_MAP_DENYWRITE
  5361. | TARGET_MAP_EXECUTABLE
  5362. | TARGET_MAP_UNINITIALIZED
  5363. | TARGET_MAP_GROWSDOWN
  5364. | TARGET_MAP_LOCKED
  5365. | TARGET_MAP_NORESERVE
  5366. | TARGET_MAP_POPULATE
  5367. | TARGET_MAP_NONBLOCK
  5368. | TARGET_MAP_STACK
  5369. | TARGET_MAP_HUGETLB
  5370. | TARGET_MAP_32BIT
  5371. | TARGET_MAP_HUGE_2MB
  5372. | TARGET_MAP_HUGE_1GB
  5373. };
  5374. int host_flags;
  5375. switch (target_flags & TARGET_MAP_TYPE) {
  5376. case TARGET_MAP_PRIVATE:
  5377. host_flags = MAP_PRIVATE;
  5378. break;
  5379. case TARGET_MAP_SHARED:
  5380. host_flags = MAP_SHARED;
  5381. break;
  5382. case TARGET_MAP_SHARED_VALIDATE:
  5383. /*
  5384. * MAP_SYNC is only supported for MAP_SHARED_VALIDATE, and is
  5385. * therefore omitted from mmap_flags_tbl and TARGET_LEGACY_MAP_MASK.
  5386. */
  5387. if (target_flags & ~(TARGET_LEGACY_MAP_MASK | TARGET_MAP_SYNC)) {
  5388. return -TARGET_EOPNOTSUPP;
  5389. }
  5390. host_flags = MAP_SHARED_VALIDATE;
  5391. if (target_flags & TARGET_MAP_SYNC) {
  5392. host_flags |= MAP_SYNC;
  5393. }
  5394. break;
  5395. default:
  5396. return -TARGET_EINVAL;
  5397. }
  5398. host_flags |= target_to_host_bitmask(target_flags, mmap_flags_tbl);
  5399. return get_errno(target_mmap(addr, len, prot, host_flags, fd, offset));
  5400. }
  5401. /*
  5402. * NOTE: TARGET_ABI32 is defined for TARGET_I386 (but not for TARGET_X86_64)
  5403. * TARGET_I386 is defined if TARGET_X86_64 is defined
  5404. */
  5405. #if defined(TARGET_I386)
  5406. /* NOTE: there is really one LDT for all the threads */
  5407. static uint8_t *ldt_table;
  5408. static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
  5409. {
  5410. int size;
  5411. void *p;
  5412. if (!ldt_table)
  5413. return 0;
  5414. size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
  5415. if (size > bytecount)
  5416. size = bytecount;
  5417. p = lock_user(VERIFY_WRITE, ptr, size, 0);
  5418. if (!p)
  5419. return -TARGET_EFAULT;
  5420. /* ??? Should this by byteswapped? */
  5421. memcpy(p, ldt_table, size);
  5422. unlock_user(p, ptr, size);
  5423. return size;
  5424. }
  5425. /* XXX: add locking support */
  5426. static abi_long write_ldt(CPUX86State *env,
  5427. abi_ulong ptr, unsigned long bytecount, int oldmode)
  5428. {
  5429. struct target_modify_ldt_ldt_s ldt_info;
  5430. struct target_modify_ldt_ldt_s *target_ldt_info;
  5431. int seg_32bit, contents, read_exec_only, limit_in_pages;
  5432. int seg_not_present, useable, lm;
  5433. uint32_t *lp, entry_1, entry_2;
  5434. if (bytecount != sizeof(ldt_info))
  5435. return -TARGET_EINVAL;
  5436. if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
  5437. return -TARGET_EFAULT;
  5438. ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
  5439. ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
  5440. ldt_info.limit = tswap32(target_ldt_info->limit);
  5441. ldt_info.flags = tswap32(target_ldt_info->flags);
  5442. unlock_user_struct(target_ldt_info, ptr, 0);
  5443. if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
  5444. return -TARGET_EINVAL;
  5445. seg_32bit = ldt_info.flags & 1;
  5446. contents = (ldt_info.flags >> 1) & 3;
  5447. read_exec_only = (ldt_info.flags >> 3) & 1;
  5448. limit_in_pages = (ldt_info.flags >> 4) & 1;
  5449. seg_not_present = (ldt_info.flags >> 5) & 1;
  5450. useable = (ldt_info.flags >> 6) & 1;
  5451. #ifdef TARGET_ABI32
  5452. lm = 0;
  5453. #else
  5454. lm = (ldt_info.flags >> 7) & 1;
  5455. #endif
  5456. if (contents == 3) {
  5457. if (oldmode)
  5458. return -TARGET_EINVAL;
  5459. if (seg_not_present == 0)
  5460. return -TARGET_EINVAL;
  5461. }
  5462. /* allocate the LDT */
  5463. if (!ldt_table) {
  5464. env->ldt.base = target_mmap(0,
  5465. TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
  5466. PROT_READ|PROT_WRITE,
  5467. MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
  5468. if (env->ldt.base == -1)
  5469. return -TARGET_ENOMEM;
  5470. memset(g2h_untagged(env->ldt.base), 0,
  5471. TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
  5472. env->ldt.limit = 0xffff;
  5473. ldt_table = g2h_untagged(env->ldt.base);
  5474. }
  5475. /* NOTE: same code as Linux kernel */
  5476. /* Allow LDTs to be cleared by the user. */
  5477. if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
  5478. if (oldmode ||
  5479. (contents == 0 &&
  5480. read_exec_only == 1 &&
  5481. seg_32bit == 0 &&
  5482. limit_in_pages == 0 &&
  5483. seg_not_present == 1 &&
  5484. useable == 0 )) {
  5485. entry_1 = 0;
  5486. entry_2 = 0;
  5487. goto install;
  5488. }
  5489. }
  5490. entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
  5491. (ldt_info.limit & 0x0ffff);
  5492. entry_2 = (ldt_info.base_addr & 0xff000000) |
  5493. ((ldt_info.base_addr & 0x00ff0000) >> 16) |
  5494. (ldt_info.limit & 0xf0000) |
  5495. ((read_exec_only ^ 1) << 9) |
  5496. (contents << 10) |
  5497. ((seg_not_present ^ 1) << 15) |
  5498. (seg_32bit << 22) |
  5499. (limit_in_pages << 23) |
  5500. (lm << 21) |
  5501. 0x7000;
  5502. if (!oldmode)
  5503. entry_2 |= (useable << 20);
  5504. /* Install the new entry ... */
  5505. install:
  5506. lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
  5507. lp[0] = tswap32(entry_1);
  5508. lp[1] = tswap32(entry_2);
  5509. return 0;
  5510. }
  5511. /* specific and weird i386 syscalls */
  5512. static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
  5513. unsigned long bytecount)
  5514. {
  5515. abi_long ret;
  5516. switch (func) {
  5517. case 0:
  5518. ret = read_ldt(ptr, bytecount);
  5519. break;
  5520. case 1:
  5521. ret = write_ldt(env, ptr, bytecount, 1);
  5522. break;
  5523. case 0x11:
  5524. ret = write_ldt(env, ptr, bytecount, 0);
  5525. break;
  5526. default:
  5527. ret = -TARGET_ENOSYS;
  5528. break;
  5529. }
  5530. return ret;
  5531. }
  5532. #if defined(TARGET_ABI32)
  5533. abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
  5534. {
  5535. uint64_t *gdt_table = g2h_untagged(env->gdt.base);
  5536. struct target_modify_ldt_ldt_s ldt_info;
  5537. struct target_modify_ldt_ldt_s *target_ldt_info;
  5538. int seg_32bit, contents, read_exec_only, limit_in_pages;
  5539. int seg_not_present, useable, lm;
  5540. uint32_t *lp, entry_1, entry_2;
  5541. int i;
  5542. lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
  5543. if (!target_ldt_info)
  5544. return -TARGET_EFAULT;
  5545. ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
  5546. ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
  5547. ldt_info.limit = tswap32(target_ldt_info->limit);
  5548. ldt_info.flags = tswap32(target_ldt_info->flags);
  5549. if (ldt_info.entry_number == -1) {
  5550. for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
  5551. if (gdt_table[i] == 0) {
  5552. ldt_info.entry_number = i;
  5553. target_ldt_info->entry_number = tswap32(i);
  5554. break;
  5555. }
  5556. }
  5557. }
  5558. unlock_user_struct(target_ldt_info, ptr, 1);
  5559. if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
  5560. ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
  5561. return -TARGET_EINVAL;
  5562. seg_32bit = ldt_info.flags & 1;
  5563. contents = (ldt_info.flags >> 1) & 3;
  5564. read_exec_only = (ldt_info.flags >> 3) & 1;
  5565. limit_in_pages = (ldt_info.flags >> 4) & 1;
  5566. seg_not_present = (ldt_info.flags >> 5) & 1;
  5567. useable = (ldt_info.flags >> 6) & 1;
  5568. #ifdef TARGET_ABI32
  5569. lm = 0;
  5570. #else
  5571. lm = (ldt_info.flags >> 7) & 1;
  5572. #endif
  5573. if (contents == 3) {
  5574. if (seg_not_present == 0)
  5575. return -TARGET_EINVAL;
  5576. }
  5577. /* NOTE: same code as Linux kernel */
  5578. /* Allow LDTs to be cleared by the user. */
  5579. if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
  5580. if ((contents == 0 &&
  5581. read_exec_only == 1 &&
  5582. seg_32bit == 0 &&
  5583. limit_in_pages == 0 &&
  5584. seg_not_present == 1 &&
  5585. useable == 0 )) {
  5586. entry_1 = 0;
  5587. entry_2 = 0;
  5588. goto install;
  5589. }
  5590. }
  5591. entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
  5592. (ldt_info.limit & 0x0ffff);
  5593. entry_2 = (ldt_info.base_addr & 0xff000000) |
  5594. ((ldt_info.base_addr & 0x00ff0000) >> 16) |
  5595. (ldt_info.limit & 0xf0000) |
  5596. ((read_exec_only ^ 1) << 9) |
  5597. (contents << 10) |
  5598. ((seg_not_present ^ 1) << 15) |
  5599. (seg_32bit << 22) |
  5600. (limit_in_pages << 23) |
  5601. (useable << 20) |
  5602. (lm << 21) |
  5603. 0x7000;
  5604. /* Install the new entry ... */
  5605. install:
  5606. lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
  5607. lp[0] = tswap32(entry_1);
  5608. lp[1] = tswap32(entry_2);
  5609. return 0;
  5610. }
  5611. static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
  5612. {
  5613. struct target_modify_ldt_ldt_s *target_ldt_info;
  5614. uint64_t *gdt_table = g2h_untagged(env->gdt.base);
  5615. uint32_t base_addr, limit, flags;
  5616. int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
  5617. int seg_not_present, useable, lm;
  5618. uint32_t *lp, entry_1, entry_2;
  5619. lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
  5620. if (!target_ldt_info)
  5621. return -TARGET_EFAULT;
  5622. idx = tswap32(target_ldt_info->entry_number);
  5623. if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
  5624. idx > TARGET_GDT_ENTRY_TLS_MAX) {
  5625. unlock_user_struct(target_ldt_info, ptr, 1);
  5626. return -TARGET_EINVAL;
  5627. }
  5628. lp = (uint32_t *)(gdt_table + idx);
  5629. entry_1 = tswap32(lp[0]);
  5630. entry_2 = tswap32(lp[1]);
  5631. read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
  5632. contents = (entry_2 >> 10) & 3;
  5633. seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
  5634. seg_32bit = (entry_2 >> 22) & 1;
  5635. limit_in_pages = (entry_2 >> 23) & 1;
  5636. useable = (entry_2 >> 20) & 1;
  5637. #ifdef TARGET_ABI32
  5638. lm = 0;
  5639. #else
  5640. lm = (entry_2 >> 21) & 1;
  5641. #endif
  5642. flags = (seg_32bit << 0) | (contents << 1) |
  5643. (read_exec_only << 3) | (limit_in_pages << 4) |
  5644. (seg_not_present << 5) | (useable << 6) | (lm << 7);
  5645. limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
  5646. base_addr = (entry_1 >> 16) |
  5647. (entry_2 & 0xff000000) |
  5648. ((entry_2 & 0xff) << 16);
  5649. target_ldt_info->base_addr = tswapal(base_addr);
  5650. target_ldt_info->limit = tswap32(limit);
  5651. target_ldt_info->flags = tswap32(flags);
  5652. unlock_user_struct(target_ldt_info, ptr, 1);
  5653. return 0;
  5654. }
  5655. abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
  5656. {
  5657. return -TARGET_ENOSYS;
  5658. }
  5659. #else
  5660. abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
  5661. {
  5662. abi_long ret = 0;
  5663. abi_ulong val;
  5664. int idx;
  5665. switch(code) {
  5666. case TARGET_ARCH_SET_GS:
  5667. case TARGET_ARCH_SET_FS:
  5668. if (code == TARGET_ARCH_SET_GS)
  5669. idx = R_GS;
  5670. else
  5671. idx = R_FS;
  5672. cpu_x86_load_seg(env, idx, 0);
  5673. env->segs[idx].base = addr;
  5674. break;
  5675. case TARGET_ARCH_GET_GS:
  5676. case TARGET_ARCH_GET_FS:
  5677. if (code == TARGET_ARCH_GET_GS)
  5678. idx = R_GS;
  5679. else
  5680. idx = R_FS;
  5681. val = env->segs[idx].base;
  5682. if (put_user(val, addr, abi_ulong))
  5683. ret = -TARGET_EFAULT;
  5684. break;
  5685. default:
  5686. ret = -TARGET_EINVAL;
  5687. break;
  5688. }
  5689. return ret;
  5690. }
  5691. #endif /* defined(TARGET_ABI32 */
  5692. #endif /* defined(TARGET_I386) */
  5693. /*
  5694. * These constants are generic. Supply any that are missing from the host.
  5695. */
  5696. #ifndef PR_SET_NAME
  5697. # define PR_SET_NAME 15
  5698. # define PR_GET_NAME 16
  5699. #endif
  5700. #ifndef PR_SET_FP_MODE
  5701. # define PR_SET_FP_MODE 45
  5702. # define PR_GET_FP_MODE 46
  5703. # define PR_FP_MODE_FR (1 << 0)
  5704. # define PR_FP_MODE_FRE (1 << 1)
  5705. #endif
  5706. #ifndef PR_SVE_SET_VL
  5707. # define PR_SVE_SET_VL 50
  5708. # define PR_SVE_GET_VL 51
  5709. # define PR_SVE_VL_LEN_MASK 0xffff
  5710. # define PR_SVE_VL_INHERIT (1 << 17)
  5711. #endif
  5712. #ifndef PR_PAC_RESET_KEYS
  5713. # define PR_PAC_RESET_KEYS 54
  5714. # define PR_PAC_APIAKEY (1 << 0)
  5715. # define PR_PAC_APIBKEY (1 << 1)
  5716. # define PR_PAC_APDAKEY (1 << 2)
  5717. # define PR_PAC_APDBKEY (1 << 3)
  5718. # define PR_PAC_APGAKEY (1 << 4)
  5719. #endif
  5720. #ifndef PR_SET_TAGGED_ADDR_CTRL
  5721. # define PR_SET_TAGGED_ADDR_CTRL 55
  5722. # define PR_GET_TAGGED_ADDR_CTRL 56
  5723. # define PR_TAGGED_ADDR_ENABLE (1UL << 0)
  5724. #endif
  5725. #ifndef PR_SET_IO_FLUSHER
  5726. # define PR_SET_IO_FLUSHER 57
  5727. # define PR_GET_IO_FLUSHER 58
  5728. #endif
  5729. #ifndef PR_SET_SYSCALL_USER_DISPATCH
  5730. # define PR_SET_SYSCALL_USER_DISPATCH 59
  5731. #endif
  5732. #ifndef PR_SME_SET_VL
  5733. # define PR_SME_SET_VL 63
  5734. # define PR_SME_GET_VL 64
  5735. # define PR_SME_VL_LEN_MASK 0xffff
  5736. # define PR_SME_VL_INHERIT (1 << 17)
  5737. #endif
  5738. #include "target_prctl.h"
  5739. static abi_long do_prctl_inval0(CPUArchState *env)
  5740. {
  5741. return -TARGET_EINVAL;
  5742. }
  5743. static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2)
  5744. {
  5745. return -TARGET_EINVAL;
  5746. }
  5747. #ifndef do_prctl_get_fp_mode
  5748. #define do_prctl_get_fp_mode do_prctl_inval0
  5749. #endif
  5750. #ifndef do_prctl_set_fp_mode
  5751. #define do_prctl_set_fp_mode do_prctl_inval1
  5752. #endif
  5753. #ifndef do_prctl_sve_get_vl
  5754. #define do_prctl_sve_get_vl do_prctl_inval0
  5755. #endif
  5756. #ifndef do_prctl_sve_set_vl
  5757. #define do_prctl_sve_set_vl do_prctl_inval1
  5758. #endif
  5759. #ifndef do_prctl_reset_keys
  5760. #define do_prctl_reset_keys do_prctl_inval1
  5761. #endif
  5762. #ifndef do_prctl_set_tagged_addr_ctrl
  5763. #define do_prctl_set_tagged_addr_ctrl do_prctl_inval1
  5764. #endif
  5765. #ifndef do_prctl_get_tagged_addr_ctrl
  5766. #define do_prctl_get_tagged_addr_ctrl do_prctl_inval0
  5767. #endif
  5768. #ifndef do_prctl_get_unalign
  5769. #define do_prctl_get_unalign do_prctl_inval1
  5770. #endif
  5771. #ifndef do_prctl_set_unalign
  5772. #define do_prctl_set_unalign do_prctl_inval1
  5773. #endif
  5774. #ifndef do_prctl_sme_get_vl
  5775. #define do_prctl_sme_get_vl do_prctl_inval0
  5776. #endif
  5777. #ifndef do_prctl_sme_set_vl
  5778. #define do_prctl_sme_set_vl do_prctl_inval1
  5779. #endif
  5780. static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2,
  5781. abi_long arg3, abi_long arg4, abi_long arg5)
  5782. {
  5783. abi_long ret;
  5784. switch (option) {
  5785. case PR_GET_PDEATHSIG:
  5786. {
  5787. int deathsig;
  5788. ret = get_errno(prctl(PR_GET_PDEATHSIG, &deathsig,
  5789. arg3, arg4, arg5));
  5790. if (!is_error(ret) &&
  5791. put_user_s32(host_to_target_signal(deathsig), arg2)) {
  5792. return -TARGET_EFAULT;
  5793. }
  5794. return ret;
  5795. }
  5796. case PR_SET_PDEATHSIG:
  5797. return get_errno(prctl(PR_SET_PDEATHSIG, target_to_host_signal(arg2),
  5798. arg3, arg4, arg5));
  5799. case PR_GET_NAME:
  5800. {
  5801. void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
  5802. if (!name) {
  5803. return -TARGET_EFAULT;
  5804. }
  5805. ret = get_errno(prctl(PR_GET_NAME, (uintptr_t)name,
  5806. arg3, arg4, arg5));
  5807. unlock_user(name, arg2, 16);
  5808. return ret;
  5809. }
  5810. case PR_SET_NAME:
  5811. {
  5812. void *name = lock_user(VERIFY_READ, arg2, 16, 1);
  5813. if (!name) {
  5814. return -TARGET_EFAULT;
  5815. }
  5816. ret = get_errno(prctl(PR_SET_NAME, (uintptr_t)name,
  5817. arg3, arg4, arg5));
  5818. unlock_user(name, arg2, 0);
  5819. return ret;
  5820. }
  5821. case PR_GET_FP_MODE:
  5822. return do_prctl_get_fp_mode(env);
  5823. case PR_SET_FP_MODE:
  5824. return do_prctl_set_fp_mode(env, arg2);
  5825. case PR_SVE_GET_VL:
  5826. return do_prctl_sve_get_vl(env);
  5827. case PR_SVE_SET_VL:
  5828. return do_prctl_sve_set_vl(env, arg2);
  5829. case PR_SME_GET_VL:
  5830. return do_prctl_sme_get_vl(env);
  5831. case PR_SME_SET_VL:
  5832. return do_prctl_sme_set_vl(env, arg2);
  5833. case PR_PAC_RESET_KEYS:
  5834. if (arg3 || arg4 || arg5) {
  5835. return -TARGET_EINVAL;
  5836. }
  5837. return do_prctl_reset_keys(env, arg2);
  5838. case PR_SET_TAGGED_ADDR_CTRL:
  5839. if (arg3 || arg4 || arg5) {
  5840. return -TARGET_EINVAL;
  5841. }
  5842. return do_prctl_set_tagged_addr_ctrl(env, arg2);
  5843. case PR_GET_TAGGED_ADDR_CTRL:
  5844. if (arg2 || arg3 || arg4 || arg5) {
  5845. return -TARGET_EINVAL;
  5846. }
  5847. return do_prctl_get_tagged_addr_ctrl(env);
  5848. case PR_GET_UNALIGN:
  5849. return do_prctl_get_unalign(env, arg2);
  5850. case PR_SET_UNALIGN:
  5851. return do_prctl_set_unalign(env, arg2);
  5852. case PR_CAP_AMBIENT:
  5853. case PR_CAPBSET_READ:
  5854. case PR_CAPBSET_DROP:
  5855. case PR_GET_DUMPABLE:
  5856. case PR_SET_DUMPABLE:
  5857. case PR_GET_KEEPCAPS:
  5858. case PR_SET_KEEPCAPS:
  5859. case PR_GET_SECUREBITS:
  5860. case PR_SET_SECUREBITS:
  5861. case PR_GET_TIMING:
  5862. case PR_SET_TIMING:
  5863. case PR_GET_TIMERSLACK:
  5864. case PR_SET_TIMERSLACK:
  5865. case PR_MCE_KILL:
  5866. case PR_MCE_KILL_GET:
  5867. case PR_GET_NO_NEW_PRIVS:
  5868. case PR_SET_NO_NEW_PRIVS:
  5869. case PR_GET_IO_FLUSHER:
  5870. case PR_SET_IO_FLUSHER:
  5871. case PR_SET_CHILD_SUBREAPER:
  5872. case PR_GET_SPECULATION_CTRL:
  5873. case PR_SET_SPECULATION_CTRL:
  5874. /* Some prctl options have no pointer arguments and we can pass on. */
  5875. return get_errno(prctl(option, arg2, arg3, arg4, arg5));
  5876. case PR_GET_CHILD_SUBREAPER:
  5877. {
  5878. int val;
  5879. ret = get_errno(prctl(PR_GET_CHILD_SUBREAPER, &val,
  5880. arg3, arg4, arg5));
  5881. if (!is_error(ret) && put_user_s32(val, arg2)) {
  5882. return -TARGET_EFAULT;
  5883. }
  5884. return ret;
  5885. }
  5886. case PR_GET_TID_ADDRESS:
  5887. {
  5888. TaskState *ts = get_task_state(env_cpu(env));
  5889. return put_user_ual(ts->child_tidptr, arg2);
  5890. }
  5891. case PR_GET_FPEXC:
  5892. case PR_SET_FPEXC:
  5893. /* Was used for SPE on PowerPC. */
  5894. return -TARGET_EINVAL;
  5895. case PR_GET_ENDIAN:
  5896. case PR_SET_ENDIAN:
  5897. case PR_GET_FPEMU:
  5898. case PR_SET_FPEMU:
  5899. case PR_SET_MM:
  5900. case PR_GET_SECCOMP:
  5901. case PR_SET_SECCOMP:
  5902. case PR_SET_SYSCALL_USER_DISPATCH:
  5903. case PR_GET_THP_DISABLE:
  5904. case PR_SET_THP_DISABLE:
  5905. case PR_GET_TSC:
  5906. case PR_SET_TSC:
  5907. /* Disable to prevent the target disabling stuff we need. */
  5908. return -TARGET_EINVAL;
  5909. default:
  5910. qemu_log_mask(LOG_UNIMP, "Unsupported prctl: " TARGET_ABI_FMT_ld "\n",
  5911. option);
  5912. return -TARGET_EINVAL;
  5913. }
  5914. }
  5915. #define NEW_STACK_SIZE 0x40000
  5916. static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
  5917. typedef struct {
  5918. CPUArchState *env;
  5919. pthread_mutex_t mutex;
  5920. pthread_cond_t cond;
  5921. pthread_t thread;
  5922. uint32_t tid;
  5923. abi_ulong child_tidptr;
  5924. abi_ulong parent_tidptr;
  5925. sigset_t sigmask;
  5926. } new_thread_info;
  5927. static void *clone_func(void *arg)
  5928. {
  5929. new_thread_info *info = arg;
  5930. CPUArchState *env;
  5931. CPUState *cpu;
  5932. TaskState *ts;
  5933. rcu_register_thread();
  5934. tcg_register_thread();
  5935. env = info->env;
  5936. cpu = env_cpu(env);
  5937. thread_cpu = cpu;
  5938. ts = get_task_state(cpu);
  5939. info->tid = sys_gettid();
  5940. task_settid(ts);
  5941. if (info->child_tidptr)
  5942. put_user_u32(info->tid, info->child_tidptr);
  5943. if (info->parent_tidptr)
  5944. put_user_u32(info->tid, info->parent_tidptr);
  5945. qemu_guest_random_seed_thread_part2(cpu->random_seed);
  5946. /* Enable signals. */
  5947. sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
  5948. /* Signal to the parent that we're ready. */
  5949. pthread_mutex_lock(&info->mutex);
  5950. pthread_cond_broadcast(&info->cond);
  5951. pthread_mutex_unlock(&info->mutex);
  5952. /* Wait until the parent has finished initializing the tls state. */
  5953. pthread_mutex_lock(&clone_lock);
  5954. pthread_mutex_unlock(&clone_lock);
  5955. cpu_loop(env);
  5956. /* never exits */
  5957. return NULL;
  5958. }
  5959. /* do_fork() Must return host values and target errnos (unlike most
  5960. do_*() functions). */
  5961. static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
  5962. abi_ulong parent_tidptr, target_ulong newtls,
  5963. abi_ulong child_tidptr)
  5964. {
  5965. CPUState *cpu = env_cpu(env);
  5966. int ret;
  5967. TaskState *ts;
  5968. CPUState *new_cpu;
  5969. CPUArchState *new_env;
  5970. sigset_t sigmask;
  5971. flags &= ~CLONE_IGNORED_FLAGS;
  5972. /* Emulate vfork() with fork() */
  5973. if (flags & CLONE_VFORK)
  5974. flags &= ~(CLONE_VFORK | CLONE_VM);
  5975. if (flags & CLONE_VM) {
  5976. TaskState *parent_ts = get_task_state(cpu);
  5977. new_thread_info info;
  5978. pthread_attr_t attr;
  5979. if (((flags & CLONE_THREAD_FLAGS) != CLONE_THREAD_FLAGS) ||
  5980. (flags & CLONE_INVALID_THREAD_FLAGS)) {
  5981. return -TARGET_EINVAL;
  5982. }
  5983. ts = g_new0(TaskState, 1);
  5984. init_task_state(ts);
  5985. /* Grab a mutex so that thread setup appears atomic. */
  5986. pthread_mutex_lock(&clone_lock);
  5987. /*
  5988. * If this is our first additional thread, we need to ensure we
  5989. * generate code for parallel execution and flush old translations.
  5990. * Do this now so that the copy gets CF_PARALLEL too.
  5991. */
  5992. if (!tcg_cflags_has(cpu, CF_PARALLEL)) {
  5993. tcg_cflags_set(cpu, CF_PARALLEL);
  5994. tb_flush(cpu);
  5995. }
  5996. /* we create a new CPU instance. */
  5997. new_env = cpu_copy(env);
  5998. /* Init regs that differ from the parent. */
  5999. cpu_clone_regs_child(new_env, newsp, flags);
  6000. cpu_clone_regs_parent(env, flags);
  6001. new_cpu = env_cpu(new_env);
  6002. new_cpu->opaque = ts;
  6003. ts->bprm = parent_ts->bprm;
  6004. ts->info = parent_ts->info;
  6005. ts->signal_mask = parent_ts->signal_mask;
  6006. if (flags & CLONE_CHILD_CLEARTID) {
  6007. ts->child_tidptr = child_tidptr;
  6008. }
  6009. if (flags & CLONE_SETTLS) {
  6010. cpu_set_tls (new_env, newtls);
  6011. }
  6012. memset(&info, 0, sizeof(info));
  6013. pthread_mutex_init(&info.mutex, NULL);
  6014. pthread_mutex_lock(&info.mutex);
  6015. pthread_cond_init(&info.cond, NULL);
  6016. info.env = new_env;
  6017. if (flags & CLONE_CHILD_SETTID) {
  6018. info.child_tidptr = child_tidptr;
  6019. }
  6020. if (flags & CLONE_PARENT_SETTID) {
  6021. info.parent_tidptr = parent_tidptr;
  6022. }
  6023. ret = pthread_attr_init(&attr);
  6024. ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
  6025. ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
  6026. /* It is not safe to deliver signals until the child has finished
  6027. initializing, so temporarily block all signals. */
  6028. sigfillset(&sigmask);
  6029. sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
  6030. cpu->random_seed = qemu_guest_random_seed_thread_part1();
  6031. ret = pthread_create(&info.thread, &attr, clone_func, &info);
  6032. /* TODO: Free new CPU state if thread creation failed. */
  6033. sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
  6034. pthread_attr_destroy(&attr);
  6035. if (ret == 0) {
  6036. /* Wait for the child to initialize. */
  6037. pthread_cond_wait(&info.cond, &info.mutex);
  6038. ret = info.tid;
  6039. } else {
  6040. ret = -1;
  6041. }
  6042. pthread_mutex_unlock(&info.mutex);
  6043. pthread_cond_destroy(&info.cond);
  6044. pthread_mutex_destroy(&info.mutex);
  6045. pthread_mutex_unlock(&clone_lock);
  6046. } else {
  6047. /* if no CLONE_VM, we consider it is a fork */
  6048. if (flags & CLONE_INVALID_FORK_FLAGS) {
  6049. return -TARGET_EINVAL;
  6050. }
  6051. /* We can't support custom termination signals */
  6052. if ((flags & CSIGNAL) != TARGET_SIGCHLD) {
  6053. return -TARGET_EINVAL;
  6054. }
  6055. #if !defined(__NR_pidfd_open) || !defined(TARGET_NR_pidfd_open)
  6056. if (flags & CLONE_PIDFD) {
  6057. return -TARGET_EINVAL;
  6058. }
  6059. #endif
  6060. /* Can not allow CLONE_PIDFD with CLONE_PARENT_SETTID */
  6061. if ((flags & CLONE_PIDFD) && (flags & CLONE_PARENT_SETTID)) {
  6062. return -TARGET_EINVAL;
  6063. }
  6064. if (block_signals()) {
  6065. return -QEMU_ERESTARTSYS;
  6066. }
  6067. fork_start();
  6068. ret = fork();
  6069. if (ret == 0) {
  6070. /* Child Process. */
  6071. cpu_clone_regs_child(env, newsp, flags);
  6072. fork_end(ret);
  6073. /* There is a race condition here. The parent process could
  6074. theoretically read the TID in the child process before the child
  6075. tid is set. This would require using either ptrace
  6076. (not implemented) or having *_tidptr to point at a shared memory
  6077. mapping. We can't repeat the spinlock hack used above because
  6078. the child process gets its own copy of the lock. */
  6079. if (flags & CLONE_CHILD_SETTID)
  6080. put_user_u32(sys_gettid(), child_tidptr);
  6081. if (flags & CLONE_PARENT_SETTID)
  6082. put_user_u32(sys_gettid(), parent_tidptr);
  6083. ts = get_task_state(cpu);
  6084. if (flags & CLONE_SETTLS)
  6085. cpu_set_tls (env, newtls);
  6086. if (flags & CLONE_CHILD_CLEARTID)
  6087. ts->child_tidptr = child_tidptr;
  6088. } else {
  6089. cpu_clone_regs_parent(env, flags);
  6090. if (flags & CLONE_PIDFD) {
  6091. int pid_fd = 0;
  6092. #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
  6093. int pid_child = ret;
  6094. pid_fd = pidfd_open(pid_child, 0);
  6095. if (pid_fd >= 0) {
  6096. fcntl(pid_fd, F_SETFD, fcntl(pid_fd, F_GETFL)
  6097. | FD_CLOEXEC);
  6098. } else {
  6099. pid_fd = 0;
  6100. }
  6101. #endif
  6102. put_user_u32(pid_fd, parent_tidptr);
  6103. }
  6104. fork_end(ret);
  6105. }
  6106. g_assert(!cpu_in_exclusive_context(cpu));
  6107. }
  6108. return ret;
  6109. }
  6110. /* warning : doesn't handle linux specific flags... */
  6111. static int target_to_host_fcntl_cmd(int cmd)
  6112. {
  6113. int ret;
  6114. switch(cmd) {
  6115. case TARGET_F_DUPFD:
  6116. case TARGET_F_GETFD:
  6117. case TARGET_F_SETFD:
  6118. case TARGET_F_GETFL:
  6119. case TARGET_F_SETFL:
  6120. case TARGET_F_OFD_GETLK:
  6121. case TARGET_F_OFD_SETLK:
  6122. case TARGET_F_OFD_SETLKW:
  6123. ret = cmd;
  6124. break;
  6125. case TARGET_F_GETLK:
  6126. ret = F_GETLK;
  6127. break;
  6128. case TARGET_F_SETLK:
  6129. ret = F_SETLK;
  6130. break;
  6131. case TARGET_F_SETLKW:
  6132. ret = F_SETLKW;
  6133. break;
  6134. case TARGET_F_GETOWN:
  6135. ret = F_GETOWN;
  6136. break;
  6137. case TARGET_F_SETOWN:
  6138. ret = F_SETOWN;
  6139. break;
  6140. case TARGET_F_GETSIG:
  6141. ret = F_GETSIG;
  6142. break;
  6143. case TARGET_F_SETSIG:
  6144. ret = F_SETSIG;
  6145. break;
  6146. #if TARGET_ABI_BITS == 32
  6147. case TARGET_F_GETLK64:
  6148. ret = F_GETLK;
  6149. break;
  6150. case TARGET_F_SETLK64:
  6151. ret = F_SETLK;
  6152. break;
  6153. case TARGET_F_SETLKW64:
  6154. ret = F_SETLKW;
  6155. break;
  6156. #endif
  6157. case TARGET_F_SETLEASE:
  6158. ret = F_SETLEASE;
  6159. break;
  6160. case TARGET_F_GETLEASE:
  6161. ret = F_GETLEASE;
  6162. break;
  6163. #ifdef F_DUPFD_CLOEXEC
  6164. case TARGET_F_DUPFD_CLOEXEC:
  6165. ret = F_DUPFD_CLOEXEC;
  6166. break;
  6167. #endif
  6168. case TARGET_F_NOTIFY:
  6169. ret = F_NOTIFY;
  6170. break;
  6171. #ifdef F_GETOWN_EX
  6172. case TARGET_F_GETOWN_EX:
  6173. ret = F_GETOWN_EX;
  6174. break;
  6175. #endif
  6176. #ifdef F_SETOWN_EX
  6177. case TARGET_F_SETOWN_EX:
  6178. ret = F_SETOWN_EX;
  6179. break;
  6180. #endif
  6181. #ifdef F_SETPIPE_SZ
  6182. case TARGET_F_SETPIPE_SZ:
  6183. ret = F_SETPIPE_SZ;
  6184. break;
  6185. case TARGET_F_GETPIPE_SZ:
  6186. ret = F_GETPIPE_SZ;
  6187. break;
  6188. #endif
  6189. #ifdef F_ADD_SEALS
  6190. case TARGET_F_ADD_SEALS:
  6191. ret = F_ADD_SEALS;
  6192. break;
  6193. case TARGET_F_GET_SEALS:
  6194. ret = F_GET_SEALS;
  6195. break;
  6196. #endif
  6197. default:
  6198. ret = -TARGET_EINVAL;
  6199. break;
  6200. }
  6201. #if defined(__powerpc64__)
  6202. /* On PPC64, glibc headers has the F_*LK* defined to 12, 13 and 14 and
  6203. * is not supported by kernel. The glibc fcntl call actually adjusts
  6204. * them to 5, 6 and 7 before making the syscall(). Since we make the
  6205. * syscall directly, adjust to what is supported by the kernel.
  6206. */
  6207. if (ret >= F_GETLK && ret <= F_SETLKW) {
  6208. ret -= F_GETLK - 5;
  6209. }
  6210. #endif
  6211. return ret;
  6212. }
  6213. #define FLOCK_TRANSTBL \
  6214. switch (type) { \
  6215. TRANSTBL_CONVERT(F_RDLCK); \
  6216. TRANSTBL_CONVERT(F_WRLCK); \
  6217. TRANSTBL_CONVERT(F_UNLCK); \
  6218. }
  6219. static int target_to_host_flock(int type)
  6220. {
  6221. #define TRANSTBL_CONVERT(a) case TARGET_##a: return a
  6222. FLOCK_TRANSTBL
  6223. #undef TRANSTBL_CONVERT
  6224. return -TARGET_EINVAL;
  6225. }
  6226. static int host_to_target_flock(int type)
  6227. {
  6228. #define TRANSTBL_CONVERT(a) case a: return TARGET_##a
  6229. FLOCK_TRANSTBL
  6230. #undef TRANSTBL_CONVERT
  6231. /* if we don't know how to convert the value coming
  6232. * from the host we copy to the target field as-is
  6233. */
  6234. return type;
  6235. }
  6236. static inline abi_long copy_from_user_flock(struct flock *fl,
  6237. abi_ulong target_flock_addr)
  6238. {
  6239. struct target_flock *target_fl;
  6240. int l_type;
  6241. if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
  6242. return -TARGET_EFAULT;
  6243. }
  6244. __get_user(l_type, &target_fl->l_type);
  6245. l_type = target_to_host_flock(l_type);
  6246. if (l_type < 0) {
  6247. return l_type;
  6248. }
  6249. fl->l_type = l_type;
  6250. __get_user(fl->l_whence, &target_fl->l_whence);
  6251. __get_user(fl->l_start, &target_fl->l_start);
  6252. __get_user(fl->l_len, &target_fl->l_len);
  6253. __get_user(fl->l_pid, &target_fl->l_pid);
  6254. unlock_user_struct(target_fl, target_flock_addr, 0);
  6255. return 0;
  6256. }
  6257. static inline abi_long copy_to_user_flock(abi_ulong target_flock_addr,
  6258. const struct flock *fl)
  6259. {
  6260. struct target_flock *target_fl;
  6261. short l_type;
  6262. if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
  6263. return -TARGET_EFAULT;
  6264. }
  6265. l_type = host_to_target_flock(fl->l_type);
  6266. __put_user(l_type, &target_fl->l_type);
  6267. __put_user(fl->l_whence, &target_fl->l_whence);
  6268. __put_user(fl->l_start, &target_fl->l_start);
  6269. __put_user(fl->l_len, &target_fl->l_len);
  6270. __put_user(fl->l_pid, &target_fl->l_pid);
  6271. unlock_user_struct(target_fl, target_flock_addr, 1);
  6272. return 0;
  6273. }
  6274. typedef abi_long from_flock64_fn(struct flock *fl, abi_ulong target_addr);
  6275. typedef abi_long to_flock64_fn(abi_ulong target_addr, const struct flock *fl);
  6276. #if defined(TARGET_ARM) && TARGET_ABI_BITS == 32
  6277. struct target_oabi_flock64 {
  6278. abi_short l_type;
  6279. abi_short l_whence;
  6280. abi_llong l_start;
  6281. abi_llong l_len;
  6282. abi_int l_pid;
  6283. } QEMU_PACKED;
  6284. static inline abi_long copy_from_user_oabi_flock64(struct flock *fl,
  6285. abi_ulong target_flock_addr)
  6286. {
  6287. struct target_oabi_flock64 *target_fl;
  6288. int l_type;
  6289. if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
  6290. return -TARGET_EFAULT;
  6291. }
  6292. __get_user(l_type, &target_fl->l_type);
  6293. l_type = target_to_host_flock(l_type);
  6294. if (l_type < 0) {
  6295. return l_type;
  6296. }
  6297. fl->l_type = l_type;
  6298. __get_user(fl->l_whence, &target_fl->l_whence);
  6299. __get_user(fl->l_start, &target_fl->l_start);
  6300. __get_user(fl->l_len, &target_fl->l_len);
  6301. __get_user(fl->l_pid, &target_fl->l_pid);
  6302. unlock_user_struct(target_fl, target_flock_addr, 0);
  6303. return 0;
  6304. }
  6305. static inline abi_long copy_to_user_oabi_flock64(abi_ulong target_flock_addr,
  6306. const struct flock *fl)
  6307. {
  6308. struct target_oabi_flock64 *target_fl;
  6309. short l_type;
  6310. if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
  6311. return -TARGET_EFAULT;
  6312. }
  6313. l_type = host_to_target_flock(fl->l_type);
  6314. __put_user(l_type, &target_fl->l_type);
  6315. __put_user(fl->l_whence, &target_fl->l_whence);
  6316. __put_user(fl->l_start, &target_fl->l_start);
  6317. __put_user(fl->l_len, &target_fl->l_len);
  6318. __put_user(fl->l_pid, &target_fl->l_pid);
  6319. unlock_user_struct(target_fl, target_flock_addr, 1);
  6320. return 0;
  6321. }
  6322. #endif
  6323. static inline abi_long copy_from_user_flock64(struct flock *fl,
  6324. abi_ulong target_flock_addr)
  6325. {
  6326. struct target_flock64 *target_fl;
  6327. int l_type;
  6328. if (!lock_user_struct(VERIFY_READ, target_fl, target_flock_addr, 1)) {
  6329. return -TARGET_EFAULT;
  6330. }
  6331. __get_user(l_type, &target_fl->l_type);
  6332. l_type = target_to_host_flock(l_type);
  6333. if (l_type < 0) {
  6334. return l_type;
  6335. }
  6336. fl->l_type = l_type;
  6337. __get_user(fl->l_whence, &target_fl->l_whence);
  6338. __get_user(fl->l_start, &target_fl->l_start);
  6339. __get_user(fl->l_len, &target_fl->l_len);
  6340. __get_user(fl->l_pid, &target_fl->l_pid);
  6341. unlock_user_struct(target_fl, target_flock_addr, 0);
  6342. return 0;
  6343. }
  6344. static inline abi_long copy_to_user_flock64(abi_ulong target_flock_addr,
  6345. const struct flock *fl)
  6346. {
  6347. struct target_flock64 *target_fl;
  6348. short l_type;
  6349. if (!lock_user_struct(VERIFY_WRITE, target_fl, target_flock_addr, 0)) {
  6350. return -TARGET_EFAULT;
  6351. }
  6352. l_type = host_to_target_flock(fl->l_type);
  6353. __put_user(l_type, &target_fl->l_type);
  6354. __put_user(fl->l_whence, &target_fl->l_whence);
  6355. __put_user(fl->l_start, &target_fl->l_start);
  6356. __put_user(fl->l_len, &target_fl->l_len);
  6357. __put_user(fl->l_pid, &target_fl->l_pid);
  6358. unlock_user_struct(target_fl, target_flock_addr, 1);
  6359. return 0;
  6360. }
  6361. static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
  6362. {
  6363. struct flock fl;
  6364. #ifdef F_GETOWN_EX
  6365. struct f_owner_ex fox;
  6366. struct target_f_owner_ex *target_fox;
  6367. #endif
  6368. abi_long ret;
  6369. int host_cmd = target_to_host_fcntl_cmd(cmd);
  6370. if (host_cmd == -TARGET_EINVAL)
  6371. return host_cmd;
  6372. switch(cmd) {
  6373. case TARGET_F_GETLK:
  6374. ret = copy_from_user_flock(&fl, arg);
  6375. if (ret) {
  6376. return ret;
  6377. }
  6378. ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
  6379. if (ret == 0) {
  6380. ret = copy_to_user_flock(arg, &fl);
  6381. }
  6382. break;
  6383. case TARGET_F_SETLK:
  6384. case TARGET_F_SETLKW:
  6385. ret = copy_from_user_flock(&fl, arg);
  6386. if (ret) {
  6387. return ret;
  6388. }
  6389. ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
  6390. break;
  6391. case TARGET_F_GETLK64:
  6392. case TARGET_F_OFD_GETLK:
  6393. ret = copy_from_user_flock64(&fl, arg);
  6394. if (ret) {
  6395. return ret;
  6396. }
  6397. ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
  6398. if (ret == 0) {
  6399. ret = copy_to_user_flock64(arg, &fl);
  6400. }
  6401. break;
  6402. case TARGET_F_SETLK64:
  6403. case TARGET_F_SETLKW64:
  6404. case TARGET_F_OFD_SETLK:
  6405. case TARGET_F_OFD_SETLKW:
  6406. ret = copy_from_user_flock64(&fl, arg);
  6407. if (ret) {
  6408. return ret;
  6409. }
  6410. ret = get_errno(safe_fcntl(fd, host_cmd, &fl));
  6411. break;
  6412. case TARGET_F_GETFL:
  6413. ret = get_errno(safe_fcntl(fd, host_cmd, arg));
  6414. if (ret >= 0) {
  6415. ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
  6416. /* tell 32-bit guests it uses largefile on 64-bit hosts: */
  6417. if (O_LARGEFILE == 0 && HOST_LONG_BITS == 64) {
  6418. ret |= TARGET_O_LARGEFILE;
  6419. }
  6420. }
  6421. break;
  6422. case TARGET_F_SETFL:
  6423. ret = get_errno(safe_fcntl(fd, host_cmd,
  6424. target_to_host_bitmask(arg,
  6425. fcntl_flags_tbl)));
  6426. break;
  6427. #ifdef F_GETOWN_EX
  6428. case TARGET_F_GETOWN_EX:
  6429. ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
  6430. if (ret >= 0) {
  6431. if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
  6432. return -TARGET_EFAULT;
  6433. target_fox->type = tswap32(fox.type);
  6434. target_fox->pid = tswap32(fox.pid);
  6435. unlock_user_struct(target_fox, arg, 1);
  6436. }
  6437. break;
  6438. #endif
  6439. #ifdef F_SETOWN_EX
  6440. case TARGET_F_SETOWN_EX:
  6441. if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
  6442. return -TARGET_EFAULT;
  6443. fox.type = tswap32(target_fox->type);
  6444. fox.pid = tswap32(target_fox->pid);
  6445. unlock_user_struct(target_fox, arg, 0);
  6446. ret = get_errno(safe_fcntl(fd, host_cmd, &fox));
  6447. break;
  6448. #endif
  6449. case TARGET_F_SETSIG:
  6450. ret = get_errno(safe_fcntl(fd, host_cmd, target_to_host_signal(arg)));
  6451. break;
  6452. case TARGET_F_GETSIG:
  6453. ret = host_to_target_signal(get_errno(safe_fcntl(fd, host_cmd, arg)));
  6454. break;
  6455. case TARGET_F_SETOWN:
  6456. case TARGET_F_GETOWN:
  6457. case TARGET_F_SETLEASE:
  6458. case TARGET_F_GETLEASE:
  6459. case TARGET_F_SETPIPE_SZ:
  6460. case TARGET_F_GETPIPE_SZ:
  6461. case TARGET_F_ADD_SEALS:
  6462. case TARGET_F_GET_SEALS:
  6463. ret = get_errno(safe_fcntl(fd, host_cmd, arg));
  6464. break;
  6465. default:
  6466. ret = get_errno(safe_fcntl(fd, cmd, arg));
  6467. break;
  6468. }
  6469. return ret;
  6470. }
  6471. #ifdef USE_UID16
  6472. static inline int high2lowuid(int uid)
  6473. {
  6474. if (uid > 65535)
  6475. return 65534;
  6476. else
  6477. return uid;
  6478. }
  6479. static inline int high2lowgid(int gid)
  6480. {
  6481. if (gid > 65535)
  6482. return 65534;
  6483. else
  6484. return gid;
  6485. }
  6486. static inline int low2highuid(int uid)
  6487. {
  6488. if ((int16_t)uid == -1)
  6489. return -1;
  6490. else
  6491. return uid;
  6492. }
  6493. static inline int low2highgid(int gid)
  6494. {
  6495. if ((int16_t)gid == -1)
  6496. return -1;
  6497. else
  6498. return gid;
  6499. }
  6500. static inline int tswapid(int id)
  6501. {
  6502. return tswap16(id);
  6503. }
  6504. #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
  6505. #else /* !USE_UID16 */
  6506. static inline int high2lowuid(int uid)
  6507. {
  6508. return uid;
  6509. }
  6510. static inline int high2lowgid(int gid)
  6511. {
  6512. return gid;
  6513. }
  6514. static inline int low2highuid(int uid)
  6515. {
  6516. return uid;
  6517. }
  6518. static inline int low2highgid(int gid)
  6519. {
  6520. return gid;
  6521. }
  6522. static inline int tswapid(int id)
  6523. {
  6524. return tswap32(id);
  6525. }
  6526. #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
  6527. #endif /* USE_UID16 */
  6528. /* We must do direct syscalls for setting UID/GID, because we want to
  6529. * implement the Linux system call semantics of "change only for this thread",
  6530. * not the libc/POSIX semantics of "change for all threads in process".
  6531. * (See http://ewontfix.com/17/ for more details.)
  6532. * We use the 32-bit version of the syscalls if present; if it is not
  6533. * then either the host architecture supports 32-bit UIDs natively with
  6534. * the standard syscall, or the 16-bit UID is the best we can do.
  6535. */
  6536. #ifdef __NR_setuid32
  6537. #define __NR_sys_setuid __NR_setuid32
  6538. #else
  6539. #define __NR_sys_setuid __NR_setuid
  6540. #endif
  6541. #ifdef __NR_setgid32
  6542. #define __NR_sys_setgid __NR_setgid32
  6543. #else
  6544. #define __NR_sys_setgid __NR_setgid
  6545. #endif
  6546. #ifdef __NR_setresuid32
  6547. #define __NR_sys_setresuid __NR_setresuid32
  6548. #else
  6549. #define __NR_sys_setresuid __NR_setresuid
  6550. #endif
  6551. #ifdef __NR_setresgid32
  6552. #define __NR_sys_setresgid __NR_setresgid32
  6553. #else
  6554. #define __NR_sys_setresgid __NR_setresgid
  6555. #endif
  6556. #ifdef __NR_setgroups32
  6557. #define __NR_sys_setgroups __NR_setgroups32
  6558. #else
  6559. #define __NR_sys_setgroups __NR_setgroups
  6560. #endif
  6561. #ifdef __NR_sys_setreuid32
  6562. #define __NR_sys_setreuid __NR_setreuid32
  6563. #else
  6564. #define __NR_sys_setreuid __NR_setreuid
  6565. #endif
  6566. #ifdef __NR_sys_setregid32
  6567. #define __NR_sys_setregid __NR_setregid32
  6568. #else
  6569. #define __NR_sys_setregid __NR_setregid
  6570. #endif
  6571. _syscall1(int, sys_setuid, uid_t, uid)
  6572. _syscall1(int, sys_setgid, gid_t, gid)
  6573. _syscall3(int, sys_setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
  6574. _syscall3(int, sys_setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
  6575. _syscall2(int, sys_setgroups, int, size, gid_t *, grouplist)
  6576. _syscall2(int, sys_setreuid, uid_t, ruid, uid_t, euid);
  6577. _syscall2(int, sys_setregid, gid_t, rgid, gid_t, egid);
  6578. void syscall_init(void)
  6579. {
  6580. IOCTLEntry *ie;
  6581. const argtype *arg_type;
  6582. int size;
  6583. thunk_init(STRUCT_MAX);
  6584. #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
  6585. #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
  6586. #include "syscall_types.h"
  6587. #undef STRUCT
  6588. #undef STRUCT_SPECIAL
  6589. /* we patch the ioctl size if necessary. We rely on the fact that
  6590. no ioctl has all the bits at '1' in the size field */
  6591. ie = ioctl_entries;
  6592. while (ie->target_cmd != 0) {
  6593. if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
  6594. TARGET_IOC_SIZEMASK) {
  6595. arg_type = ie->arg_type;
  6596. if (arg_type[0] != TYPE_PTR) {
  6597. fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
  6598. ie->target_cmd);
  6599. exit(1);
  6600. }
  6601. arg_type++;
  6602. size = thunk_type_size(arg_type, 0);
  6603. ie->target_cmd = (ie->target_cmd &
  6604. ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
  6605. (size << TARGET_IOC_SIZESHIFT);
  6606. }
  6607. /* automatic consistency check if same arch */
  6608. #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
  6609. (defined(__x86_64__) && defined(TARGET_X86_64))
  6610. if (unlikely(ie->target_cmd != ie->host_cmd)) {
  6611. fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
  6612. ie->name, ie->target_cmd, ie->host_cmd);
  6613. }
  6614. #endif
  6615. ie++;
  6616. }
  6617. }
  6618. #ifdef TARGET_NR_truncate64
  6619. static inline abi_long target_truncate64(CPUArchState *cpu_env, const char *arg1,
  6620. abi_long arg2,
  6621. abi_long arg3,
  6622. abi_long arg4)
  6623. {
  6624. if (regpairs_aligned(cpu_env, TARGET_NR_truncate64)) {
  6625. arg2 = arg3;
  6626. arg3 = arg4;
  6627. }
  6628. return get_errno(truncate(arg1, target_offset64(arg2, arg3)));
  6629. }
  6630. #endif
  6631. #ifdef TARGET_NR_ftruncate64
  6632. static inline abi_long target_ftruncate64(CPUArchState *cpu_env, abi_long arg1,
  6633. abi_long arg2,
  6634. abi_long arg3,
  6635. abi_long arg4)
  6636. {
  6637. if (regpairs_aligned(cpu_env, TARGET_NR_ftruncate64)) {
  6638. arg2 = arg3;
  6639. arg3 = arg4;
  6640. }
  6641. return get_errno(ftruncate(arg1, target_offset64(arg2, arg3)));
  6642. }
  6643. #endif
  6644. #if defined(TARGET_NR_timer_settime) || \
  6645. (defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD))
  6646. static inline abi_long target_to_host_itimerspec(struct itimerspec *host_its,
  6647. abi_ulong target_addr)
  6648. {
  6649. if (target_to_host_timespec(&host_its->it_interval, target_addr +
  6650. offsetof(struct target_itimerspec,
  6651. it_interval)) ||
  6652. target_to_host_timespec(&host_its->it_value, target_addr +
  6653. offsetof(struct target_itimerspec,
  6654. it_value))) {
  6655. return -TARGET_EFAULT;
  6656. }
  6657. return 0;
  6658. }
  6659. #endif
  6660. #if defined(TARGET_NR_timer_settime64) || \
  6661. (defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD))
  6662. static inline abi_long target_to_host_itimerspec64(struct itimerspec *host_its,
  6663. abi_ulong target_addr)
  6664. {
  6665. if (target_to_host_timespec64(&host_its->it_interval, target_addr +
  6666. offsetof(struct target__kernel_itimerspec,
  6667. it_interval)) ||
  6668. target_to_host_timespec64(&host_its->it_value, target_addr +
  6669. offsetof(struct target__kernel_itimerspec,
  6670. it_value))) {
  6671. return -TARGET_EFAULT;
  6672. }
  6673. return 0;
  6674. }
  6675. #endif
  6676. #if ((defined(TARGET_NR_timerfd_gettime) || \
  6677. defined(TARGET_NR_timerfd_settime)) && defined(CONFIG_TIMERFD)) || \
  6678. defined(TARGET_NR_timer_gettime) || defined(TARGET_NR_timer_settime)
  6679. static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
  6680. struct itimerspec *host_its)
  6681. {
  6682. if (host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
  6683. it_interval),
  6684. &host_its->it_interval) ||
  6685. host_to_target_timespec(target_addr + offsetof(struct target_itimerspec,
  6686. it_value),
  6687. &host_its->it_value)) {
  6688. return -TARGET_EFAULT;
  6689. }
  6690. return 0;
  6691. }
  6692. #endif
  6693. #if ((defined(TARGET_NR_timerfd_gettime64) || \
  6694. defined(TARGET_NR_timerfd_settime64)) && defined(CONFIG_TIMERFD)) || \
  6695. defined(TARGET_NR_timer_gettime64) || defined(TARGET_NR_timer_settime64)
  6696. static inline abi_long host_to_target_itimerspec64(abi_ulong target_addr,
  6697. struct itimerspec *host_its)
  6698. {
  6699. if (host_to_target_timespec64(target_addr +
  6700. offsetof(struct target__kernel_itimerspec,
  6701. it_interval),
  6702. &host_its->it_interval) ||
  6703. host_to_target_timespec64(target_addr +
  6704. offsetof(struct target__kernel_itimerspec,
  6705. it_value),
  6706. &host_its->it_value)) {
  6707. return -TARGET_EFAULT;
  6708. }
  6709. return 0;
  6710. }
  6711. #endif
  6712. #if defined(TARGET_NR_adjtimex) || \
  6713. (defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME))
  6714. static inline abi_long target_to_host_timex(struct timex *host_tx,
  6715. abi_long target_addr)
  6716. {
  6717. struct target_timex *target_tx;
  6718. if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
  6719. return -TARGET_EFAULT;
  6720. }
  6721. __get_user(host_tx->modes, &target_tx->modes);
  6722. __get_user(host_tx->offset, &target_tx->offset);
  6723. __get_user(host_tx->freq, &target_tx->freq);
  6724. __get_user(host_tx->maxerror, &target_tx->maxerror);
  6725. __get_user(host_tx->esterror, &target_tx->esterror);
  6726. __get_user(host_tx->status, &target_tx->status);
  6727. __get_user(host_tx->constant, &target_tx->constant);
  6728. __get_user(host_tx->precision, &target_tx->precision);
  6729. __get_user(host_tx->tolerance, &target_tx->tolerance);
  6730. __get_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
  6731. __get_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
  6732. __get_user(host_tx->tick, &target_tx->tick);
  6733. __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
  6734. __get_user(host_tx->jitter, &target_tx->jitter);
  6735. __get_user(host_tx->shift, &target_tx->shift);
  6736. __get_user(host_tx->stabil, &target_tx->stabil);
  6737. __get_user(host_tx->jitcnt, &target_tx->jitcnt);
  6738. __get_user(host_tx->calcnt, &target_tx->calcnt);
  6739. __get_user(host_tx->errcnt, &target_tx->errcnt);
  6740. __get_user(host_tx->stbcnt, &target_tx->stbcnt);
  6741. __get_user(host_tx->tai, &target_tx->tai);
  6742. unlock_user_struct(target_tx, target_addr, 0);
  6743. return 0;
  6744. }
  6745. static inline abi_long host_to_target_timex(abi_long target_addr,
  6746. struct timex *host_tx)
  6747. {
  6748. struct target_timex *target_tx;
  6749. if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
  6750. return -TARGET_EFAULT;
  6751. }
  6752. __put_user(host_tx->modes, &target_tx->modes);
  6753. __put_user(host_tx->offset, &target_tx->offset);
  6754. __put_user(host_tx->freq, &target_tx->freq);
  6755. __put_user(host_tx->maxerror, &target_tx->maxerror);
  6756. __put_user(host_tx->esterror, &target_tx->esterror);
  6757. __put_user(host_tx->status, &target_tx->status);
  6758. __put_user(host_tx->constant, &target_tx->constant);
  6759. __put_user(host_tx->precision, &target_tx->precision);
  6760. __put_user(host_tx->tolerance, &target_tx->tolerance);
  6761. __put_user(host_tx->time.tv_sec, &target_tx->time.tv_sec);
  6762. __put_user(host_tx->time.tv_usec, &target_tx->time.tv_usec);
  6763. __put_user(host_tx->tick, &target_tx->tick);
  6764. __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
  6765. __put_user(host_tx->jitter, &target_tx->jitter);
  6766. __put_user(host_tx->shift, &target_tx->shift);
  6767. __put_user(host_tx->stabil, &target_tx->stabil);
  6768. __put_user(host_tx->jitcnt, &target_tx->jitcnt);
  6769. __put_user(host_tx->calcnt, &target_tx->calcnt);
  6770. __put_user(host_tx->errcnt, &target_tx->errcnt);
  6771. __put_user(host_tx->stbcnt, &target_tx->stbcnt);
  6772. __put_user(host_tx->tai, &target_tx->tai);
  6773. unlock_user_struct(target_tx, target_addr, 1);
  6774. return 0;
  6775. }
  6776. #endif
  6777. #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
  6778. static inline abi_long target_to_host_timex64(struct timex *host_tx,
  6779. abi_long target_addr)
  6780. {
  6781. struct target__kernel_timex *target_tx;
  6782. if (copy_from_user_timeval64(&host_tx->time, target_addr +
  6783. offsetof(struct target__kernel_timex,
  6784. time))) {
  6785. return -TARGET_EFAULT;
  6786. }
  6787. if (!lock_user_struct(VERIFY_READ, target_tx, target_addr, 1)) {
  6788. return -TARGET_EFAULT;
  6789. }
  6790. __get_user(host_tx->modes, &target_tx->modes);
  6791. __get_user(host_tx->offset, &target_tx->offset);
  6792. __get_user(host_tx->freq, &target_tx->freq);
  6793. __get_user(host_tx->maxerror, &target_tx->maxerror);
  6794. __get_user(host_tx->esterror, &target_tx->esterror);
  6795. __get_user(host_tx->status, &target_tx->status);
  6796. __get_user(host_tx->constant, &target_tx->constant);
  6797. __get_user(host_tx->precision, &target_tx->precision);
  6798. __get_user(host_tx->tolerance, &target_tx->tolerance);
  6799. __get_user(host_tx->tick, &target_tx->tick);
  6800. __get_user(host_tx->ppsfreq, &target_tx->ppsfreq);
  6801. __get_user(host_tx->jitter, &target_tx->jitter);
  6802. __get_user(host_tx->shift, &target_tx->shift);
  6803. __get_user(host_tx->stabil, &target_tx->stabil);
  6804. __get_user(host_tx->jitcnt, &target_tx->jitcnt);
  6805. __get_user(host_tx->calcnt, &target_tx->calcnt);
  6806. __get_user(host_tx->errcnt, &target_tx->errcnt);
  6807. __get_user(host_tx->stbcnt, &target_tx->stbcnt);
  6808. __get_user(host_tx->tai, &target_tx->tai);
  6809. unlock_user_struct(target_tx, target_addr, 0);
  6810. return 0;
  6811. }
  6812. static inline abi_long host_to_target_timex64(abi_long target_addr,
  6813. struct timex *host_tx)
  6814. {
  6815. struct target__kernel_timex *target_tx;
  6816. if (copy_to_user_timeval64(target_addr +
  6817. offsetof(struct target__kernel_timex, time),
  6818. &host_tx->time)) {
  6819. return -TARGET_EFAULT;
  6820. }
  6821. if (!lock_user_struct(VERIFY_WRITE, target_tx, target_addr, 0)) {
  6822. return -TARGET_EFAULT;
  6823. }
  6824. __put_user(host_tx->modes, &target_tx->modes);
  6825. __put_user(host_tx->offset, &target_tx->offset);
  6826. __put_user(host_tx->freq, &target_tx->freq);
  6827. __put_user(host_tx->maxerror, &target_tx->maxerror);
  6828. __put_user(host_tx->esterror, &target_tx->esterror);
  6829. __put_user(host_tx->status, &target_tx->status);
  6830. __put_user(host_tx->constant, &target_tx->constant);
  6831. __put_user(host_tx->precision, &target_tx->precision);
  6832. __put_user(host_tx->tolerance, &target_tx->tolerance);
  6833. __put_user(host_tx->tick, &target_tx->tick);
  6834. __put_user(host_tx->ppsfreq, &target_tx->ppsfreq);
  6835. __put_user(host_tx->jitter, &target_tx->jitter);
  6836. __put_user(host_tx->shift, &target_tx->shift);
  6837. __put_user(host_tx->stabil, &target_tx->stabil);
  6838. __put_user(host_tx->jitcnt, &target_tx->jitcnt);
  6839. __put_user(host_tx->calcnt, &target_tx->calcnt);
  6840. __put_user(host_tx->errcnt, &target_tx->errcnt);
  6841. __put_user(host_tx->stbcnt, &target_tx->stbcnt);
  6842. __put_user(host_tx->tai, &target_tx->tai);
  6843. unlock_user_struct(target_tx, target_addr, 1);
  6844. return 0;
  6845. }
  6846. #endif
  6847. #ifndef HAVE_SIGEV_NOTIFY_THREAD_ID
  6848. #define sigev_notify_thread_id _sigev_un._tid
  6849. #endif
  6850. static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
  6851. abi_ulong target_addr)
  6852. {
  6853. struct target_sigevent *target_sevp;
  6854. if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
  6855. return -TARGET_EFAULT;
  6856. }
  6857. /* This union is awkward on 64 bit systems because it has a 32 bit
  6858. * integer and a pointer in it; we follow the conversion approach
  6859. * used for handling sigval types in signal.c so the guest should get
  6860. * the correct value back even if we did a 64 bit byteswap and it's
  6861. * using the 32 bit integer.
  6862. */
  6863. host_sevp->sigev_value.sival_ptr =
  6864. (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
  6865. host_sevp->sigev_signo =
  6866. target_to_host_signal(tswap32(target_sevp->sigev_signo));
  6867. host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
  6868. host_sevp->sigev_notify_thread_id = tswap32(target_sevp->_sigev_un._tid);
  6869. unlock_user_struct(target_sevp, target_addr, 1);
  6870. return 0;
  6871. }
  6872. #if defined(TARGET_NR_mlockall)
  6873. static inline int target_to_host_mlockall_arg(int arg)
  6874. {
  6875. int result = 0;
  6876. if (arg & TARGET_MCL_CURRENT) {
  6877. result |= MCL_CURRENT;
  6878. }
  6879. if (arg & TARGET_MCL_FUTURE) {
  6880. result |= MCL_FUTURE;
  6881. }
  6882. #ifdef MCL_ONFAULT
  6883. if (arg & TARGET_MCL_ONFAULT) {
  6884. result |= MCL_ONFAULT;
  6885. }
  6886. #endif
  6887. return result;
  6888. }
  6889. #endif
  6890. static inline int target_to_host_msync_arg(abi_long arg)
  6891. {
  6892. return ((arg & TARGET_MS_ASYNC) ? MS_ASYNC : 0) |
  6893. ((arg & TARGET_MS_INVALIDATE) ? MS_INVALIDATE : 0) |
  6894. ((arg & TARGET_MS_SYNC) ? MS_SYNC : 0) |
  6895. (arg & ~(TARGET_MS_ASYNC | TARGET_MS_INVALIDATE | TARGET_MS_SYNC));
  6896. }
  6897. #if (defined(TARGET_NR_stat64) || defined(TARGET_NR_lstat64) || \
  6898. defined(TARGET_NR_fstat64) || defined(TARGET_NR_fstatat64) || \
  6899. defined(TARGET_NR_newfstatat))
  6900. static inline abi_long host_to_target_stat64(CPUArchState *cpu_env,
  6901. abi_ulong target_addr,
  6902. struct stat *host_st)
  6903. {
  6904. #if defined(TARGET_ARM) && defined(TARGET_ABI32)
  6905. if (cpu_env->eabi) {
  6906. struct target_eabi_stat64 *target_st;
  6907. if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
  6908. return -TARGET_EFAULT;
  6909. memset(target_st, 0, sizeof(struct target_eabi_stat64));
  6910. __put_user(host_st->st_dev, &target_st->st_dev);
  6911. __put_user(host_st->st_ino, &target_st->st_ino);
  6912. #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
  6913. __put_user(host_st->st_ino, &target_st->__st_ino);
  6914. #endif
  6915. __put_user(host_st->st_mode, &target_st->st_mode);
  6916. __put_user(host_st->st_nlink, &target_st->st_nlink);
  6917. __put_user(host_st->st_uid, &target_st->st_uid);
  6918. __put_user(host_st->st_gid, &target_st->st_gid);
  6919. __put_user(host_st->st_rdev, &target_st->st_rdev);
  6920. __put_user(host_st->st_size, &target_st->st_size);
  6921. __put_user(host_st->st_blksize, &target_st->st_blksize);
  6922. __put_user(host_st->st_blocks, &target_st->st_blocks);
  6923. __put_user(host_st->st_atime, &target_st->target_st_atime);
  6924. __put_user(host_st->st_mtime, &target_st->target_st_mtime);
  6925. __put_user(host_st->st_ctime, &target_st->target_st_ctime);
  6926. #ifdef HAVE_STRUCT_STAT_ST_ATIM
  6927. __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
  6928. __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
  6929. __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
  6930. #endif
  6931. unlock_user_struct(target_st, target_addr, 1);
  6932. } else
  6933. #endif
  6934. {
  6935. #if defined(TARGET_HAS_STRUCT_STAT64)
  6936. struct target_stat64 *target_st;
  6937. #else
  6938. struct target_stat *target_st;
  6939. #endif
  6940. if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
  6941. return -TARGET_EFAULT;
  6942. memset(target_st, 0, sizeof(*target_st));
  6943. __put_user(host_st->st_dev, &target_st->st_dev);
  6944. __put_user(host_st->st_ino, &target_st->st_ino);
  6945. #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
  6946. __put_user(host_st->st_ino, &target_st->__st_ino);
  6947. #endif
  6948. __put_user(host_st->st_mode, &target_st->st_mode);
  6949. __put_user(host_st->st_nlink, &target_st->st_nlink);
  6950. __put_user(host_st->st_uid, &target_st->st_uid);
  6951. __put_user(host_st->st_gid, &target_st->st_gid);
  6952. __put_user(host_st->st_rdev, &target_st->st_rdev);
  6953. /* XXX: better use of kernel struct */
  6954. __put_user(host_st->st_size, &target_st->st_size);
  6955. __put_user(host_st->st_blksize, &target_st->st_blksize);
  6956. __put_user(host_st->st_blocks, &target_st->st_blocks);
  6957. __put_user(host_st->st_atime, &target_st->target_st_atime);
  6958. __put_user(host_st->st_mtime, &target_st->target_st_mtime);
  6959. __put_user(host_st->st_ctime, &target_st->target_st_ctime);
  6960. #ifdef HAVE_STRUCT_STAT_ST_ATIM
  6961. __put_user(host_st->st_atim.tv_nsec, &target_st->target_st_atime_nsec);
  6962. __put_user(host_st->st_mtim.tv_nsec, &target_st->target_st_mtime_nsec);
  6963. __put_user(host_st->st_ctim.tv_nsec, &target_st->target_st_ctime_nsec);
  6964. #endif
  6965. unlock_user_struct(target_st, target_addr, 1);
  6966. }
  6967. return 0;
  6968. }
  6969. #endif
  6970. #if defined(TARGET_NR_statx) && defined(__NR_statx)
  6971. static inline abi_long host_to_target_statx(struct target_statx *host_stx,
  6972. abi_ulong target_addr)
  6973. {
  6974. struct target_statx *target_stx;
  6975. if (!lock_user_struct(VERIFY_WRITE, target_stx, target_addr, 0)) {
  6976. return -TARGET_EFAULT;
  6977. }
  6978. memset(target_stx, 0, sizeof(*target_stx));
  6979. __put_user(host_stx->stx_mask, &target_stx->stx_mask);
  6980. __put_user(host_stx->stx_blksize, &target_stx->stx_blksize);
  6981. __put_user(host_stx->stx_attributes, &target_stx->stx_attributes);
  6982. __put_user(host_stx->stx_nlink, &target_stx->stx_nlink);
  6983. __put_user(host_stx->stx_uid, &target_stx->stx_uid);
  6984. __put_user(host_stx->stx_gid, &target_stx->stx_gid);
  6985. __put_user(host_stx->stx_mode, &target_stx->stx_mode);
  6986. __put_user(host_stx->stx_ino, &target_stx->stx_ino);
  6987. __put_user(host_stx->stx_size, &target_stx->stx_size);
  6988. __put_user(host_stx->stx_blocks, &target_stx->stx_blocks);
  6989. __put_user(host_stx->stx_attributes_mask, &target_stx->stx_attributes_mask);
  6990. __put_user(host_stx->stx_atime.tv_sec, &target_stx->stx_atime.tv_sec);
  6991. __put_user(host_stx->stx_atime.tv_nsec, &target_stx->stx_atime.tv_nsec);
  6992. __put_user(host_stx->stx_btime.tv_sec, &target_stx->stx_btime.tv_sec);
  6993. __put_user(host_stx->stx_btime.tv_nsec, &target_stx->stx_btime.tv_nsec);
  6994. __put_user(host_stx->stx_ctime.tv_sec, &target_stx->stx_ctime.tv_sec);
  6995. __put_user(host_stx->stx_ctime.tv_nsec, &target_stx->stx_ctime.tv_nsec);
  6996. __put_user(host_stx->stx_mtime.tv_sec, &target_stx->stx_mtime.tv_sec);
  6997. __put_user(host_stx->stx_mtime.tv_nsec, &target_stx->stx_mtime.tv_nsec);
  6998. __put_user(host_stx->stx_rdev_major, &target_stx->stx_rdev_major);
  6999. __put_user(host_stx->stx_rdev_minor, &target_stx->stx_rdev_minor);
  7000. __put_user(host_stx->stx_dev_major, &target_stx->stx_dev_major);
  7001. __put_user(host_stx->stx_dev_minor, &target_stx->stx_dev_minor);
  7002. unlock_user_struct(target_stx, target_addr, 1);
  7003. return 0;
  7004. }
  7005. #endif
  7006. static int do_sys_futex(int *uaddr, int op, int val,
  7007. const struct timespec *timeout, int *uaddr2,
  7008. int val3)
  7009. {
  7010. #if HOST_LONG_BITS == 64
  7011. #if defined(__NR_futex)
  7012. /* always a 64-bit time_t, it doesn't define _time64 version */
  7013. return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
  7014. #endif
  7015. #else /* HOST_LONG_BITS == 64 */
  7016. #if defined(__NR_futex_time64)
  7017. if (sizeof(timeout->tv_sec) == 8) {
  7018. /* _time64 function on 32bit arch */
  7019. return sys_futex_time64(uaddr, op, val, timeout, uaddr2, val3);
  7020. }
  7021. #endif
  7022. #if defined(__NR_futex)
  7023. /* old function on 32bit arch */
  7024. return sys_futex(uaddr, op, val, timeout, uaddr2, val3);
  7025. #endif
  7026. #endif /* HOST_LONG_BITS == 64 */
  7027. g_assert_not_reached();
  7028. }
  7029. static int do_safe_futex(int *uaddr, int op, int val,
  7030. const struct timespec *timeout, int *uaddr2,
  7031. int val3)
  7032. {
  7033. #if HOST_LONG_BITS == 64
  7034. #if defined(__NR_futex)
  7035. /* always a 64-bit time_t, it doesn't define _time64 version */
  7036. return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
  7037. #endif
  7038. #else /* HOST_LONG_BITS == 64 */
  7039. #if defined(__NR_futex_time64)
  7040. if (sizeof(timeout->tv_sec) == 8) {
  7041. /* _time64 function on 32bit arch */
  7042. return get_errno(safe_futex_time64(uaddr, op, val, timeout, uaddr2,
  7043. val3));
  7044. }
  7045. #endif
  7046. #if defined(__NR_futex)
  7047. /* old function on 32bit arch */
  7048. return get_errno(safe_futex(uaddr, op, val, timeout, uaddr2, val3));
  7049. #endif
  7050. #endif /* HOST_LONG_BITS == 64 */
  7051. return -TARGET_ENOSYS;
  7052. }
  7053. /* ??? Using host futex calls even when target atomic operations
  7054. are not really atomic probably breaks things. However implementing
  7055. futexes locally would make futexes shared between multiple processes
  7056. tricky. However they're probably useless because guest atomic
  7057. operations won't work either. */
  7058. #if defined(TARGET_NR_futex) || defined(TARGET_NR_futex_time64)
  7059. static int do_futex(CPUState *cpu, bool time64, target_ulong uaddr,
  7060. int op, int val, target_ulong timeout,
  7061. target_ulong uaddr2, int val3)
  7062. {
  7063. struct timespec ts, *pts = NULL;
  7064. void *haddr2 = NULL;
  7065. int base_op;
  7066. /* We assume FUTEX_* constants are the same on both host and target. */
  7067. #ifdef FUTEX_CMD_MASK
  7068. base_op = op & FUTEX_CMD_MASK;
  7069. #else
  7070. base_op = op;
  7071. #endif
  7072. switch (base_op) {
  7073. case FUTEX_WAIT:
  7074. case FUTEX_WAIT_BITSET:
  7075. val = tswap32(val);
  7076. break;
  7077. case FUTEX_WAIT_REQUEUE_PI:
  7078. val = tswap32(val);
  7079. haddr2 = g2h(cpu, uaddr2);
  7080. break;
  7081. case FUTEX_LOCK_PI:
  7082. case FUTEX_LOCK_PI2:
  7083. break;
  7084. case FUTEX_WAKE:
  7085. case FUTEX_WAKE_BITSET:
  7086. case FUTEX_TRYLOCK_PI:
  7087. case FUTEX_UNLOCK_PI:
  7088. timeout = 0;
  7089. break;
  7090. case FUTEX_FD:
  7091. val = target_to_host_signal(val);
  7092. timeout = 0;
  7093. break;
  7094. case FUTEX_CMP_REQUEUE:
  7095. case FUTEX_CMP_REQUEUE_PI:
  7096. val3 = tswap32(val3);
  7097. /* fall through */
  7098. case FUTEX_REQUEUE:
  7099. case FUTEX_WAKE_OP:
  7100. /*
  7101. * For these, the 4th argument is not TIMEOUT, but VAL2.
  7102. * But the prototype of do_safe_futex takes a pointer, so
  7103. * insert casts to satisfy the compiler. We do not need
  7104. * to tswap VAL2 since it's not compared to guest memory.
  7105. */
  7106. pts = (struct timespec *)(uintptr_t)timeout;
  7107. timeout = 0;
  7108. haddr2 = g2h(cpu, uaddr2);
  7109. break;
  7110. default:
  7111. return -TARGET_ENOSYS;
  7112. }
  7113. if (timeout) {
  7114. pts = &ts;
  7115. if (time64
  7116. ? target_to_host_timespec64(pts, timeout)
  7117. : target_to_host_timespec(pts, timeout)) {
  7118. return -TARGET_EFAULT;
  7119. }
  7120. }
  7121. return do_safe_futex(g2h(cpu, uaddr), op, val, pts, haddr2, val3);
  7122. }
  7123. #endif
  7124. #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
  7125. static abi_long do_name_to_handle_at(abi_long dirfd, abi_long pathname,
  7126. abi_long handle, abi_long mount_id,
  7127. abi_long flags)
  7128. {
  7129. struct file_handle *target_fh;
  7130. struct file_handle *fh;
  7131. int mid = 0;
  7132. abi_long ret;
  7133. char *name;
  7134. unsigned int size, total_size;
  7135. if (get_user_s32(size, handle)) {
  7136. return -TARGET_EFAULT;
  7137. }
  7138. name = lock_user_string(pathname);
  7139. if (!name) {
  7140. return -TARGET_EFAULT;
  7141. }
  7142. total_size = sizeof(struct file_handle) + size;
  7143. target_fh = lock_user(VERIFY_WRITE, handle, total_size, 0);
  7144. if (!target_fh) {
  7145. unlock_user(name, pathname, 0);
  7146. return -TARGET_EFAULT;
  7147. }
  7148. fh = g_malloc0(total_size);
  7149. fh->handle_bytes = size;
  7150. ret = get_errno(name_to_handle_at(dirfd, path(name), fh, &mid, flags));
  7151. unlock_user(name, pathname, 0);
  7152. /* man name_to_handle_at(2):
  7153. * Other than the use of the handle_bytes field, the caller should treat
  7154. * the file_handle structure as an opaque data type
  7155. */
  7156. memcpy(target_fh, fh, total_size);
  7157. target_fh->handle_bytes = tswap32(fh->handle_bytes);
  7158. target_fh->handle_type = tswap32(fh->handle_type);
  7159. g_free(fh);
  7160. unlock_user(target_fh, handle, total_size);
  7161. if (put_user_s32(mid, mount_id)) {
  7162. return -TARGET_EFAULT;
  7163. }
  7164. return ret;
  7165. }
  7166. #endif
  7167. #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
  7168. static abi_long do_open_by_handle_at(abi_long mount_fd, abi_long handle,
  7169. abi_long flags)
  7170. {
  7171. struct file_handle *target_fh;
  7172. struct file_handle *fh;
  7173. unsigned int size, total_size;
  7174. abi_long ret;
  7175. if (get_user_s32(size, handle)) {
  7176. return -TARGET_EFAULT;
  7177. }
  7178. total_size = sizeof(struct file_handle) + size;
  7179. target_fh = lock_user(VERIFY_READ, handle, total_size, 1);
  7180. if (!target_fh) {
  7181. return -TARGET_EFAULT;
  7182. }
  7183. fh = g_memdup(target_fh, total_size);
  7184. fh->handle_bytes = size;
  7185. fh->handle_type = tswap32(target_fh->handle_type);
  7186. ret = get_errno(open_by_handle_at(mount_fd, fh,
  7187. target_to_host_bitmask(flags, fcntl_flags_tbl)));
  7188. g_free(fh);
  7189. unlock_user(target_fh, handle, total_size);
  7190. return ret;
  7191. }
  7192. #endif
  7193. #if defined(TARGET_NR_signalfd) || defined(TARGET_NR_signalfd4)
  7194. static abi_long do_signalfd4(int fd, abi_long mask, int flags)
  7195. {
  7196. int host_flags;
  7197. target_sigset_t *target_mask;
  7198. sigset_t host_mask;
  7199. abi_long ret;
  7200. if (flags & ~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC)) {
  7201. return -TARGET_EINVAL;
  7202. }
  7203. if (!lock_user_struct(VERIFY_READ, target_mask, mask, 1)) {
  7204. return -TARGET_EFAULT;
  7205. }
  7206. target_to_host_sigset(&host_mask, target_mask);
  7207. host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
  7208. ret = get_errno(signalfd(fd, &host_mask, host_flags));
  7209. if (ret >= 0) {
  7210. fd_trans_register(ret, &target_signalfd_trans);
  7211. }
  7212. unlock_user_struct(target_mask, mask, 0);
  7213. return ret;
  7214. }
  7215. #endif
  7216. /* Map host to target signal numbers for the wait family of syscalls.
  7217. Assume all other status bits are the same. */
  7218. int host_to_target_waitstatus(int status)
  7219. {
  7220. if (WIFSIGNALED(status)) {
  7221. return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
  7222. }
  7223. if (WIFSTOPPED(status)) {
  7224. return (host_to_target_signal(WSTOPSIG(status)) << 8)
  7225. | (status & 0xff);
  7226. }
  7227. return status;
  7228. }
  7229. static int open_self_cmdline(CPUArchState *cpu_env, int fd)
  7230. {
  7231. CPUState *cpu = env_cpu(cpu_env);
  7232. struct linux_binprm *bprm = get_task_state(cpu)->bprm;
  7233. int i;
  7234. for (i = 0; i < bprm->argc; i++) {
  7235. size_t len = strlen(bprm->argv[i]) + 1;
  7236. if (write(fd, bprm->argv[i], len) != len) {
  7237. return -1;
  7238. }
  7239. }
  7240. return 0;
  7241. }
  7242. struct open_self_maps_data {
  7243. TaskState *ts;
  7244. IntervalTreeRoot *host_maps;
  7245. int fd;
  7246. bool smaps;
  7247. };
  7248. /*
  7249. * Subroutine to output one line of /proc/self/maps,
  7250. * or one region of /proc/self/smaps.
  7251. */
  7252. #ifdef TARGET_HPPA
  7253. # define test_stack(S, E, L) (E == L)
  7254. #else
  7255. # define test_stack(S, E, L) (S == L)
  7256. #endif
  7257. static void open_self_maps_4(const struct open_self_maps_data *d,
  7258. const MapInfo *mi, abi_ptr start,
  7259. abi_ptr end, unsigned flags)
  7260. {
  7261. const struct image_info *info = d->ts->info;
  7262. const char *path = mi->path;
  7263. uint64_t offset;
  7264. int fd = d->fd;
  7265. int count;
  7266. if (test_stack(start, end, info->stack_limit)) {
  7267. path = "[stack]";
  7268. } else if (start == info->brk) {
  7269. path = "[heap]";
  7270. } else if (start == info->vdso) {
  7271. path = "[vdso]";
  7272. #ifdef TARGET_X86_64
  7273. } else if (start == TARGET_VSYSCALL_PAGE) {
  7274. path = "[vsyscall]";
  7275. #endif
  7276. }
  7277. /* Except null device (MAP_ANON), adjust offset for this fragment. */
  7278. offset = mi->offset;
  7279. if (mi->dev) {
  7280. uintptr_t hstart = (uintptr_t)g2h_untagged(start);
  7281. offset += hstart - mi->itree.start;
  7282. }
  7283. count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
  7284. " %c%c%c%c %08" PRIx64 " %02x:%02x %"PRId64,
  7285. start, end,
  7286. (flags & PAGE_READ) ? 'r' : '-',
  7287. (flags & PAGE_WRITE_ORG) ? 'w' : '-',
  7288. (flags & PAGE_EXEC) ? 'x' : '-',
  7289. mi->is_priv ? 'p' : 's',
  7290. offset, major(mi->dev), minor(mi->dev),
  7291. (uint64_t)mi->inode);
  7292. if (path) {
  7293. dprintf(fd, "%*s%s\n", 73 - count, "", path);
  7294. } else {
  7295. dprintf(fd, "\n");
  7296. }
  7297. if (d->smaps) {
  7298. unsigned long size = end - start;
  7299. unsigned long page_size_kb = TARGET_PAGE_SIZE >> 10;
  7300. unsigned long size_kb = size >> 10;
  7301. dprintf(fd, "Size: %lu kB\n"
  7302. "KernelPageSize: %lu kB\n"
  7303. "MMUPageSize: %lu kB\n"
  7304. "Rss: 0 kB\n"
  7305. "Pss: 0 kB\n"
  7306. "Pss_Dirty: 0 kB\n"
  7307. "Shared_Clean: 0 kB\n"
  7308. "Shared_Dirty: 0 kB\n"
  7309. "Private_Clean: 0 kB\n"
  7310. "Private_Dirty: 0 kB\n"
  7311. "Referenced: 0 kB\n"
  7312. "Anonymous: %lu kB\n"
  7313. "LazyFree: 0 kB\n"
  7314. "AnonHugePages: 0 kB\n"
  7315. "ShmemPmdMapped: 0 kB\n"
  7316. "FilePmdMapped: 0 kB\n"
  7317. "Shared_Hugetlb: 0 kB\n"
  7318. "Private_Hugetlb: 0 kB\n"
  7319. "Swap: 0 kB\n"
  7320. "SwapPss: 0 kB\n"
  7321. "Locked: 0 kB\n"
  7322. "THPeligible: 0\n"
  7323. "VmFlags:%s%s%s%s%s%s%s%s\n",
  7324. size_kb, page_size_kb, page_size_kb,
  7325. (flags & PAGE_ANON ? size_kb : 0),
  7326. (flags & PAGE_READ) ? " rd" : "",
  7327. (flags & PAGE_WRITE_ORG) ? " wr" : "",
  7328. (flags & PAGE_EXEC) ? " ex" : "",
  7329. mi->is_priv ? "" : " sh",
  7330. (flags & PAGE_READ) ? " mr" : "",
  7331. (flags & PAGE_WRITE_ORG) ? " mw" : "",
  7332. (flags & PAGE_EXEC) ? " me" : "",
  7333. mi->is_priv ? "" : " ms");
  7334. }
  7335. }
  7336. /*
  7337. * Callback for walk_memory_regions, when read_self_maps() fails.
  7338. * Proceed without the benefit of host /proc/self/maps cross-check.
  7339. */
  7340. static int open_self_maps_3(void *opaque, target_ulong guest_start,
  7341. target_ulong guest_end, unsigned long flags)
  7342. {
  7343. static const MapInfo mi = { .is_priv = true };
  7344. open_self_maps_4(opaque, &mi, guest_start, guest_end, flags);
  7345. return 0;
  7346. }
  7347. /*
  7348. * Callback for walk_memory_regions, when read_self_maps() succeeds.
  7349. */
  7350. static int open_self_maps_2(void *opaque, target_ulong guest_start,
  7351. target_ulong guest_end, unsigned long flags)
  7352. {
  7353. const struct open_self_maps_data *d = opaque;
  7354. uintptr_t host_start = (uintptr_t)g2h_untagged(guest_start);
  7355. uintptr_t host_last = (uintptr_t)g2h_untagged(guest_end - 1);
  7356. #ifdef TARGET_X86_64
  7357. /*
  7358. * Because of the extremely high position of the page within the guest
  7359. * virtual address space, this is not backed by host memory at all.
  7360. * Therefore the loop below would fail. This is the only instance
  7361. * of not having host backing memory.
  7362. */
  7363. if (guest_start == TARGET_VSYSCALL_PAGE) {
  7364. return open_self_maps_3(opaque, guest_start, guest_end, flags);
  7365. }
  7366. #endif
  7367. while (1) {
  7368. IntervalTreeNode *n =
  7369. interval_tree_iter_first(d->host_maps, host_start, host_start);
  7370. MapInfo *mi = container_of(n, MapInfo, itree);
  7371. uintptr_t this_hlast = MIN(host_last, n->last);
  7372. target_ulong this_gend = h2g(this_hlast) + 1;
  7373. open_self_maps_4(d, mi, guest_start, this_gend, flags);
  7374. if (this_hlast == host_last) {
  7375. return 0;
  7376. }
  7377. host_start = this_hlast + 1;
  7378. guest_start = h2g(host_start);
  7379. }
  7380. }
  7381. static int open_self_maps_1(CPUArchState *env, int fd, bool smaps)
  7382. {
  7383. struct open_self_maps_data d = {
  7384. .ts = get_task_state(env_cpu(env)),
  7385. .fd = fd,
  7386. .smaps = smaps
  7387. };
  7388. mmap_lock();
  7389. d.host_maps = read_self_maps();
  7390. if (d.host_maps) {
  7391. walk_memory_regions(&d, open_self_maps_2);
  7392. free_self_maps(d.host_maps);
  7393. } else {
  7394. walk_memory_regions(&d, open_self_maps_3);
  7395. }
  7396. mmap_unlock();
  7397. return 0;
  7398. }
  7399. static int open_self_maps(CPUArchState *cpu_env, int fd)
  7400. {
  7401. return open_self_maps_1(cpu_env, fd, false);
  7402. }
  7403. static int open_self_smaps(CPUArchState *cpu_env, int fd)
  7404. {
  7405. return open_self_maps_1(cpu_env, fd, true);
  7406. }
  7407. static int open_self_stat(CPUArchState *cpu_env, int fd)
  7408. {
  7409. CPUState *cpu = env_cpu(cpu_env);
  7410. TaskState *ts = get_task_state(cpu);
  7411. g_autoptr(GString) buf = g_string_new(NULL);
  7412. int i;
  7413. for (i = 0; i < 44; i++) {
  7414. if (i == 0) {
  7415. /* pid */
  7416. g_string_printf(buf, FMT_pid " ", getpid());
  7417. } else if (i == 1) {
  7418. /* app name */
  7419. gchar *bin = g_strrstr(ts->bprm->argv[0], "/");
  7420. bin = bin ? bin + 1 : ts->bprm->argv[0];
  7421. g_string_printf(buf, "(%.15s) ", bin);
  7422. } else if (i == 2) {
  7423. /* task state */
  7424. g_string_assign(buf, "R "); /* we are running right now */
  7425. } else if (i == 3) {
  7426. /* ppid */
  7427. g_string_printf(buf, FMT_pid " ", getppid());
  7428. } else if (i == 19) {
  7429. /* num_threads */
  7430. int cpus = 0;
  7431. WITH_RCU_READ_LOCK_GUARD() {
  7432. CPUState *cpu_iter;
  7433. CPU_FOREACH(cpu_iter) {
  7434. cpus++;
  7435. }
  7436. }
  7437. g_string_printf(buf, "%d ", cpus);
  7438. } else if (i == 21) {
  7439. /* starttime */
  7440. g_string_printf(buf, "%" PRIu64 " ", ts->start_boottime);
  7441. } else if (i == 27) {
  7442. /* stack bottom */
  7443. g_string_printf(buf, TARGET_ABI_FMT_ld " ", ts->info->start_stack);
  7444. } else {
  7445. /* for the rest, there is MasterCard */
  7446. g_string_printf(buf, "0%c", i == 43 ? '\n' : ' ');
  7447. }
  7448. if (write(fd, buf->str, buf->len) != buf->len) {
  7449. return -1;
  7450. }
  7451. }
  7452. return 0;
  7453. }
  7454. static int open_self_auxv(CPUArchState *cpu_env, int fd)
  7455. {
  7456. CPUState *cpu = env_cpu(cpu_env);
  7457. TaskState *ts = get_task_state(cpu);
  7458. abi_ulong auxv = ts->info->saved_auxv;
  7459. abi_ulong len = ts->info->auxv_len;
  7460. char *ptr;
  7461. /*
  7462. * Auxiliary vector is stored in target process stack.
  7463. * read in whole auxv vector and copy it to file
  7464. */
  7465. ptr = lock_user(VERIFY_READ, auxv, len, 0);
  7466. if (ptr != NULL) {
  7467. while (len > 0) {
  7468. ssize_t r;
  7469. r = write(fd, ptr, len);
  7470. if (r <= 0) {
  7471. break;
  7472. }
  7473. len -= r;
  7474. ptr += r;
  7475. }
  7476. lseek(fd, 0, SEEK_SET);
  7477. unlock_user(ptr, auxv, len);
  7478. }
  7479. return 0;
  7480. }
  7481. static int is_proc_myself(const char *filename, const char *entry)
  7482. {
  7483. if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
  7484. filename += strlen("/proc/");
  7485. if (!strncmp(filename, "self/", strlen("self/"))) {
  7486. filename += strlen("self/");
  7487. } else if (*filename >= '1' && *filename <= '9') {
  7488. char myself[80];
  7489. snprintf(myself, sizeof(myself), "%d/", getpid());
  7490. if (!strncmp(filename, myself, strlen(myself))) {
  7491. filename += strlen(myself);
  7492. } else {
  7493. return 0;
  7494. }
  7495. } else {
  7496. return 0;
  7497. }
  7498. if (!strcmp(filename, entry)) {
  7499. return 1;
  7500. }
  7501. }
  7502. return 0;
  7503. }
  7504. static void excp_dump_file(FILE *logfile, CPUArchState *env,
  7505. const char *fmt, int code)
  7506. {
  7507. if (logfile) {
  7508. CPUState *cs = env_cpu(env);
  7509. fprintf(logfile, fmt, code);
  7510. fprintf(logfile, "Failing executable: %s\n", exec_path);
  7511. cpu_dump_state(cs, logfile, 0);
  7512. open_self_maps(env, fileno(logfile));
  7513. }
  7514. }
  7515. void target_exception_dump(CPUArchState *env, const char *fmt, int code)
  7516. {
  7517. /* dump to console */
  7518. excp_dump_file(stderr, env, fmt, code);
  7519. /* dump to log file */
  7520. if (qemu_log_separate()) {
  7521. FILE *logfile = qemu_log_trylock();
  7522. excp_dump_file(logfile, env, fmt, code);
  7523. qemu_log_unlock(logfile);
  7524. }
  7525. }
  7526. #include "target_proc.h"
  7527. #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN || \
  7528. defined(HAVE_ARCH_PROC_CPUINFO) || \
  7529. defined(HAVE_ARCH_PROC_HARDWARE)
  7530. static int is_proc(const char *filename, const char *entry)
  7531. {
  7532. return strcmp(filename, entry) == 0;
  7533. }
  7534. #endif
  7535. #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
  7536. static int open_net_route(CPUArchState *cpu_env, int fd)
  7537. {
  7538. FILE *fp;
  7539. char *line = NULL;
  7540. size_t len = 0;
  7541. ssize_t read;
  7542. fp = fopen("/proc/net/route", "r");
  7543. if (fp == NULL) {
  7544. return -1;
  7545. }
  7546. /* read header */
  7547. read = getline(&line, &len, fp);
  7548. dprintf(fd, "%s", line);
  7549. /* read routes */
  7550. while ((read = getline(&line, &len, fp)) != -1) {
  7551. char iface[16];
  7552. uint32_t dest, gw, mask;
  7553. unsigned int flags, refcnt, use, metric, mtu, window, irtt;
  7554. int fields;
  7555. fields = sscanf(line,
  7556. "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
  7557. iface, &dest, &gw, &flags, &refcnt, &use, &metric,
  7558. &mask, &mtu, &window, &irtt);
  7559. if (fields != 11) {
  7560. continue;
  7561. }
  7562. dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
  7563. iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
  7564. metric, tswap32(mask), mtu, window, irtt);
  7565. }
  7566. free(line);
  7567. fclose(fp);
  7568. return 0;
  7569. }
  7570. #endif
  7571. static int maybe_do_fake_open(CPUArchState *cpu_env, int dirfd,
  7572. const char *fname, int flags, mode_t mode,
  7573. int openat2_resolve, bool safe)
  7574. {
  7575. g_autofree char *proc_name = NULL;
  7576. const char *pathname;
  7577. struct fake_open {
  7578. const char *filename;
  7579. int (*fill)(CPUArchState *cpu_env, int fd);
  7580. int (*cmp)(const char *s1, const char *s2);
  7581. };
  7582. const struct fake_open *fake_open;
  7583. static const struct fake_open fakes[] = {
  7584. { "maps", open_self_maps, is_proc_myself },
  7585. { "smaps", open_self_smaps, is_proc_myself },
  7586. { "stat", open_self_stat, is_proc_myself },
  7587. { "auxv", open_self_auxv, is_proc_myself },
  7588. { "cmdline", open_self_cmdline, is_proc_myself },
  7589. #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
  7590. { "/proc/net/route", open_net_route, is_proc },
  7591. #endif
  7592. #if defined(HAVE_ARCH_PROC_CPUINFO)
  7593. { "/proc/cpuinfo", open_cpuinfo, is_proc },
  7594. #endif
  7595. #if defined(HAVE_ARCH_PROC_HARDWARE)
  7596. { "/proc/hardware", open_hardware, is_proc },
  7597. #endif
  7598. { NULL, NULL, NULL }
  7599. };
  7600. /* if this is a file from /proc/ filesystem, expand full name */
  7601. proc_name = realpath(fname, NULL);
  7602. if (proc_name && strncmp(proc_name, "/proc/", 6) == 0) {
  7603. pathname = proc_name;
  7604. } else {
  7605. pathname = fname;
  7606. }
  7607. if (is_proc_myself(pathname, "exe")) {
  7608. /* Honor openat2 resolve flags */
  7609. if ((openat2_resolve & RESOLVE_NO_MAGICLINKS) ||
  7610. (openat2_resolve & RESOLVE_NO_SYMLINKS)) {
  7611. errno = ELOOP;
  7612. return -1;
  7613. }
  7614. if (safe) {
  7615. return safe_openat(dirfd, exec_path, flags, mode);
  7616. } else {
  7617. return openat(dirfd, exec_path, flags, mode);
  7618. }
  7619. }
  7620. for (fake_open = fakes; fake_open->filename; fake_open++) {
  7621. if (fake_open->cmp(pathname, fake_open->filename)) {
  7622. break;
  7623. }
  7624. }
  7625. if (fake_open->filename) {
  7626. const char *tmpdir;
  7627. char filename[PATH_MAX];
  7628. int fd, r;
  7629. fd = memfd_create("qemu-open", 0);
  7630. if (fd < 0) {
  7631. if (errno != ENOSYS) {
  7632. return fd;
  7633. }
  7634. /* create temporary file to map stat to */
  7635. tmpdir = getenv("TMPDIR");
  7636. if (!tmpdir)
  7637. tmpdir = "/tmp";
  7638. snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
  7639. fd = mkstemp(filename);
  7640. if (fd < 0) {
  7641. return fd;
  7642. }
  7643. unlink(filename);
  7644. }
  7645. if ((r = fake_open->fill(cpu_env, fd))) {
  7646. int e = errno;
  7647. close(fd);
  7648. errno = e;
  7649. return r;
  7650. }
  7651. lseek(fd, 0, SEEK_SET);
  7652. return fd;
  7653. }
  7654. return -2;
  7655. }
  7656. int do_guest_openat(CPUArchState *cpu_env, int dirfd, const char *pathname,
  7657. int flags, mode_t mode, bool safe)
  7658. {
  7659. int fd = maybe_do_fake_open(cpu_env, dirfd, pathname, flags, mode, 0, safe);
  7660. if (fd > -2) {
  7661. return fd;
  7662. }
  7663. if (safe) {
  7664. return safe_openat(dirfd, path(pathname), flags, mode);
  7665. } else {
  7666. return openat(dirfd, path(pathname), flags, mode);
  7667. }
  7668. }
  7669. static int do_openat2(CPUArchState *cpu_env, abi_long dirfd,
  7670. abi_ptr guest_pathname, abi_ptr guest_open_how,
  7671. abi_ulong guest_size)
  7672. {
  7673. struct open_how_ver0 how = {0};
  7674. char *pathname;
  7675. int ret;
  7676. if (guest_size < sizeof(struct target_open_how_ver0)) {
  7677. return -TARGET_EINVAL;
  7678. }
  7679. ret = copy_struct_from_user(&how, sizeof(how), guest_open_how, guest_size);
  7680. if (ret) {
  7681. if (ret == -TARGET_E2BIG) {
  7682. qemu_log_mask(LOG_UNIMP,
  7683. "Unimplemented openat2 open_how size: "
  7684. TARGET_ABI_FMT_lu "\n", guest_size);
  7685. }
  7686. return ret;
  7687. }
  7688. pathname = lock_user_string(guest_pathname);
  7689. if (!pathname) {
  7690. return -TARGET_EFAULT;
  7691. }
  7692. how.flags = target_to_host_bitmask(tswap64(how.flags), fcntl_flags_tbl);
  7693. how.mode = tswap64(how.mode);
  7694. how.resolve = tswap64(how.resolve);
  7695. int fd = maybe_do_fake_open(cpu_env, dirfd, pathname, how.flags, how.mode,
  7696. how.resolve, true);
  7697. if (fd > -2) {
  7698. ret = get_errno(fd);
  7699. } else {
  7700. ret = get_errno(safe_openat2(dirfd, pathname, &how,
  7701. sizeof(struct open_how_ver0)));
  7702. }
  7703. fd_trans_unregister(ret);
  7704. unlock_user(pathname, guest_pathname, 0);
  7705. return ret;
  7706. }
  7707. ssize_t do_guest_readlink(const char *pathname, char *buf, size_t bufsiz)
  7708. {
  7709. ssize_t ret;
  7710. if (!pathname || !buf) {
  7711. errno = EFAULT;
  7712. return -1;
  7713. }
  7714. if (!bufsiz) {
  7715. /* Short circuit this for the magic exe check. */
  7716. errno = EINVAL;
  7717. return -1;
  7718. }
  7719. if (is_proc_myself((const char *)pathname, "exe")) {
  7720. /*
  7721. * Don't worry about sign mismatch as earlier mapping
  7722. * logic would have thrown a bad address error.
  7723. */
  7724. ret = MIN(strlen(exec_path), bufsiz);
  7725. /* We cannot NUL terminate the string. */
  7726. memcpy(buf, exec_path, ret);
  7727. } else {
  7728. ret = readlink(path(pathname), buf, bufsiz);
  7729. }
  7730. return ret;
  7731. }
  7732. static int do_execv(CPUArchState *cpu_env, int dirfd,
  7733. abi_long pathname, abi_long guest_argp,
  7734. abi_long guest_envp, int flags, bool is_execveat)
  7735. {
  7736. int ret;
  7737. char **argp, **envp;
  7738. int argc, envc;
  7739. abi_ulong gp;
  7740. abi_ulong addr;
  7741. char **q;
  7742. void *p;
  7743. argc = 0;
  7744. for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
  7745. if (get_user_ual(addr, gp)) {
  7746. return -TARGET_EFAULT;
  7747. }
  7748. if (!addr) {
  7749. break;
  7750. }
  7751. argc++;
  7752. }
  7753. envc = 0;
  7754. for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
  7755. if (get_user_ual(addr, gp)) {
  7756. return -TARGET_EFAULT;
  7757. }
  7758. if (!addr) {
  7759. break;
  7760. }
  7761. envc++;
  7762. }
  7763. argp = g_new0(char *, argc + 1);
  7764. envp = g_new0(char *, envc + 1);
  7765. for (gp = guest_argp, q = argp; gp; gp += sizeof(abi_ulong), q++) {
  7766. if (get_user_ual(addr, gp)) {
  7767. goto execve_efault;
  7768. }
  7769. if (!addr) {
  7770. break;
  7771. }
  7772. *q = lock_user_string(addr);
  7773. if (!*q) {
  7774. goto execve_efault;
  7775. }
  7776. }
  7777. *q = NULL;
  7778. for (gp = guest_envp, q = envp; gp; gp += sizeof(abi_ulong), q++) {
  7779. if (get_user_ual(addr, gp)) {
  7780. goto execve_efault;
  7781. }
  7782. if (!addr) {
  7783. break;
  7784. }
  7785. *q = lock_user_string(addr);
  7786. if (!*q) {
  7787. goto execve_efault;
  7788. }
  7789. }
  7790. *q = NULL;
  7791. /*
  7792. * Although execve() is not an interruptible syscall it is
  7793. * a special case where we must use the safe_syscall wrapper:
  7794. * if we allow a signal to happen before we make the host
  7795. * syscall then we will 'lose' it, because at the point of
  7796. * execve the process leaves QEMU's control. So we use the
  7797. * safe syscall wrapper to ensure that we either take the
  7798. * signal as a guest signal, or else it does not happen
  7799. * before the execve completes and makes it the other
  7800. * program's problem.
  7801. */
  7802. p = lock_user_string(pathname);
  7803. if (!p) {
  7804. goto execve_efault;
  7805. }
  7806. const char *exe = p;
  7807. if (is_proc_myself(p, "exe")) {
  7808. exe = exec_path;
  7809. }
  7810. ret = is_execveat
  7811. ? safe_execveat(dirfd, exe, argp, envp, flags)
  7812. : safe_execve(exe, argp, envp);
  7813. ret = get_errno(ret);
  7814. unlock_user(p, pathname, 0);
  7815. goto execve_end;
  7816. execve_efault:
  7817. ret = -TARGET_EFAULT;
  7818. execve_end:
  7819. for (gp = guest_argp, q = argp; *q; gp += sizeof(abi_ulong), q++) {
  7820. if (get_user_ual(addr, gp) || !addr) {
  7821. break;
  7822. }
  7823. unlock_user(*q, addr, 0);
  7824. }
  7825. for (gp = guest_envp, q = envp; *q; gp += sizeof(abi_ulong), q++) {
  7826. if (get_user_ual(addr, gp) || !addr) {
  7827. break;
  7828. }
  7829. unlock_user(*q, addr, 0);
  7830. }
  7831. g_free(argp);
  7832. g_free(envp);
  7833. return ret;
  7834. }
  7835. #define TIMER_MAGIC 0x0caf0000
  7836. #define TIMER_MAGIC_MASK 0xffff0000
  7837. /* Convert QEMU provided timer ID back to internal 16bit index format */
  7838. static target_timer_t get_timer_id(abi_long arg)
  7839. {
  7840. target_timer_t timerid = arg;
  7841. if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
  7842. return -TARGET_EINVAL;
  7843. }
  7844. timerid &= 0xffff;
  7845. if (timerid >= ARRAY_SIZE(g_posix_timers)) {
  7846. return -TARGET_EINVAL;
  7847. }
  7848. return timerid;
  7849. }
  7850. static int target_to_host_cpu_mask(unsigned long *host_mask,
  7851. size_t host_size,
  7852. abi_ulong target_addr,
  7853. size_t target_size)
  7854. {
  7855. unsigned target_bits = sizeof(abi_ulong) * 8;
  7856. unsigned host_bits = sizeof(*host_mask) * 8;
  7857. abi_ulong *target_mask;
  7858. unsigned i, j;
  7859. assert(host_size >= target_size);
  7860. target_mask = lock_user(VERIFY_READ, target_addr, target_size, 1);
  7861. if (!target_mask) {
  7862. return -TARGET_EFAULT;
  7863. }
  7864. memset(host_mask, 0, host_size);
  7865. for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
  7866. unsigned bit = i * target_bits;
  7867. abi_ulong val;
  7868. __get_user(val, &target_mask[i]);
  7869. for (j = 0; j < target_bits; j++, bit++) {
  7870. if (val & (1UL << j)) {
  7871. host_mask[bit / host_bits] |= 1UL << (bit % host_bits);
  7872. }
  7873. }
  7874. }
  7875. unlock_user(target_mask, target_addr, 0);
  7876. return 0;
  7877. }
  7878. static int host_to_target_cpu_mask(const unsigned long *host_mask,
  7879. size_t host_size,
  7880. abi_ulong target_addr,
  7881. size_t target_size)
  7882. {
  7883. unsigned target_bits = sizeof(abi_ulong) * 8;
  7884. unsigned host_bits = sizeof(*host_mask) * 8;
  7885. abi_ulong *target_mask;
  7886. unsigned i, j;
  7887. assert(host_size >= target_size);
  7888. target_mask = lock_user(VERIFY_WRITE, target_addr, target_size, 0);
  7889. if (!target_mask) {
  7890. return -TARGET_EFAULT;
  7891. }
  7892. for (i = 0 ; i < target_size / sizeof(abi_ulong); i++) {
  7893. unsigned bit = i * target_bits;
  7894. abi_ulong val = 0;
  7895. for (j = 0; j < target_bits; j++, bit++) {
  7896. if (host_mask[bit / host_bits] & (1UL << (bit % host_bits))) {
  7897. val |= 1UL << j;
  7898. }
  7899. }
  7900. __put_user(val, &target_mask[i]);
  7901. }
  7902. unlock_user(target_mask, target_addr, target_size);
  7903. return 0;
  7904. }
  7905. #ifdef TARGET_NR_getdents
  7906. static int do_getdents(abi_long dirfd, abi_long arg2, abi_long count)
  7907. {
  7908. g_autofree void *hdirp = NULL;
  7909. void *tdirp;
  7910. int hlen, hoff, toff;
  7911. int hreclen, treclen;
  7912. off_t prev_diroff = 0;
  7913. hdirp = g_try_malloc(count);
  7914. if (!hdirp) {
  7915. return -TARGET_ENOMEM;
  7916. }
  7917. #ifdef EMULATE_GETDENTS_WITH_GETDENTS
  7918. hlen = sys_getdents(dirfd, hdirp, count);
  7919. #else
  7920. hlen = sys_getdents64(dirfd, hdirp, count);
  7921. #endif
  7922. hlen = get_errno(hlen);
  7923. if (is_error(hlen)) {
  7924. return hlen;
  7925. }
  7926. tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
  7927. if (!tdirp) {
  7928. return -TARGET_EFAULT;
  7929. }
  7930. for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
  7931. #ifdef EMULATE_GETDENTS_WITH_GETDENTS
  7932. struct linux_dirent *hde = hdirp + hoff;
  7933. #else
  7934. struct linux_dirent64 *hde = hdirp + hoff;
  7935. #endif
  7936. struct target_dirent *tde = tdirp + toff;
  7937. int namelen;
  7938. uint8_t type;
  7939. namelen = strlen(hde->d_name);
  7940. hreclen = hde->d_reclen;
  7941. treclen = offsetof(struct target_dirent, d_name) + namelen + 2;
  7942. treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent));
  7943. if (toff + treclen > count) {
  7944. /*
  7945. * If the host struct is smaller than the target struct, or
  7946. * requires less alignment and thus packs into less space,
  7947. * then the host can return more entries than we can pass
  7948. * on to the guest.
  7949. */
  7950. if (toff == 0) {
  7951. toff = -TARGET_EINVAL; /* result buffer is too small */
  7952. break;
  7953. }
  7954. /*
  7955. * Return what we have, resetting the file pointer to the
  7956. * location of the first record not returned.
  7957. */
  7958. lseek(dirfd, prev_diroff, SEEK_SET);
  7959. break;
  7960. }
  7961. prev_diroff = hde->d_off;
  7962. tde->d_ino = tswapal(hde->d_ino);
  7963. tde->d_off = tswapal(hde->d_off);
  7964. tde->d_reclen = tswap16(treclen);
  7965. memcpy(tde->d_name, hde->d_name, namelen + 1);
  7966. /*
  7967. * The getdents type is in what was formerly a padding byte at the
  7968. * end of the structure.
  7969. */
  7970. #ifdef EMULATE_GETDENTS_WITH_GETDENTS
  7971. type = *((uint8_t *)hde + hreclen - 1);
  7972. #else
  7973. type = hde->d_type;
  7974. #endif
  7975. *((uint8_t *)tde + treclen - 1) = type;
  7976. }
  7977. unlock_user(tdirp, arg2, toff);
  7978. return toff;
  7979. }
  7980. #endif /* TARGET_NR_getdents */
  7981. #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
  7982. static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count)
  7983. {
  7984. g_autofree void *hdirp = NULL;
  7985. void *tdirp;
  7986. int hlen, hoff, toff;
  7987. int hreclen, treclen;
  7988. off_t prev_diroff = 0;
  7989. hdirp = g_try_malloc(count);
  7990. if (!hdirp) {
  7991. return -TARGET_ENOMEM;
  7992. }
  7993. hlen = get_errno(sys_getdents64(dirfd, hdirp, count));
  7994. if (is_error(hlen)) {
  7995. return hlen;
  7996. }
  7997. tdirp = lock_user(VERIFY_WRITE, arg2, count, 0);
  7998. if (!tdirp) {
  7999. return -TARGET_EFAULT;
  8000. }
  8001. for (hoff = toff = 0; hoff < hlen; hoff += hreclen, toff += treclen) {
  8002. struct linux_dirent64 *hde = hdirp + hoff;
  8003. struct target_dirent64 *tde = tdirp + toff;
  8004. int namelen;
  8005. namelen = strlen(hde->d_name) + 1;
  8006. hreclen = hde->d_reclen;
  8007. treclen = offsetof(struct target_dirent64, d_name) + namelen;
  8008. treclen = QEMU_ALIGN_UP(treclen, __alignof(struct target_dirent64));
  8009. if (toff + treclen > count) {
  8010. /*
  8011. * If the host struct is smaller than the target struct, or
  8012. * requires less alignment and thus packs into less space,
  8013. * then the host can return more entries than we can pass
  8014. * on to the guest.
  8015. */
  8016. if (toff == 0) {
  8017. toff = -TARGET_EINVAL; /* result buffer is too small */
  8018. break;
  8019. }
  8020. /*
  8021. * Return what we have, resetting the file pointer to the
  8022. * location of the first record not returned.
  8023. */
  8024. lseek(dirfd, prev_diroff, SEEK_SET);
  8025. break;
  8026. }
  8027. prev_diroff = hde->d_off;
  8028. tde->d_ino = tswap64(hde->d_ino);
  8029. tde->d_off = tswap64(hde->d_off);
  8030. tde->d_reclen = tswap16(treclen);
  8031. tde->d_type = hde->d_type;
  8032. memcpy(tde->d_name, hde->d_name, namelen);
  8033. }
  8034. unlock_user(tdirp, arg2, toff);
  8035. return toff;
  8036. }
  8037. #endif /* TARGET_NR_getdents64 */
  8038. #if defined(TARGET_NR_riscv_hwprobe)
  8039. #define RISCV_HWPROBE_KEY_MVENDORID 0
  8040. #define RISCV_HWPROBE_KEY_MARCHID 1
  8041. #define RISCV_HWPROBE_KEY_MIMPID 2
  8042. #define RISCV_HWPROBE_KEY_BASE_BEHAVIOR 3
  8043. #define RISCV_HWPROBE_BASE_BEHAVIOR_IMA (1 << 0)
  8044. #define RISCV_HWPROBE_KEY_IMA_EXT_0 4
  8045. #define RISCV_HWPROBE_IMA_FD (1 << 0)
  8046. #define RISCV_HWPROBE_IMA_C (1 << 1)
  8047. #define RISCV_HWPROBE_IMA_V (1 << 2)
  8048. #define RISCV_HWPROBE_EXT_ZBA (1 << 3)
  8049. #define RISCV_HWPROBE_EXT_ZBB (1 << 4)
  8050. #define RISCV_HWPROBE_EXT_ZBS (1 << 5)
  8051. #define RISCV_HWPROBE_EXT_ZICBOZ (1 << 6)
  8052. #define RISCV_HWPROBE_EXT_ZBC (1 << 7)
  8053. #define RISCV_HWPROBE_EXT_ZBKB (1 << 8)
  8054. #define RISCV_HWPROBE_EXT_ZBKC (1 << 9)
  8055. #define RISCV_HWPROBE_EXT_ZBKX (1 << 10)
  8056. #define RISCV_HWPROBE_EXT_ZKND (1 << 11)
  8057. #define RISCV_HWPROBE_EXT_ZKNE (1 << 12)
  8058. #define RISCV_HWPROBE_EXT_ZKNH (1 << 13)
  8059. #define RISCV_HWPROBE_EXT_ZKSED (1 << 14)
  8060. #define RISCV_HWPROBE_EXT_ZKSH (1 << 15)
  8061. #define RISCV_HWPROBE_EXT_ZKT (1 << 16)
  8062. #define RISCV_HWPROBE_EXT_ZVBB (1 << 17)
  8063. #define RISCV_HWPROBE_EXT_ZVBC (1 << 18)
  8064. #define RISCV_HWPROBE_EXT_ZVKB (1 << 19)
  8065. #define RISCV_HWPROBE_EXT_ZVKG (1 << 20)
  8066. #define RISCV_HWPROBE_EXT_ZVKNED (1 << 21)
  8067. #define RISCV_HWPROBE_EXT_ZVKNHA (1 << 22)
  8068. #define RISCV_HWPROBE_EXT_ZVKNHB (1 << 23)
  8069. #define RISCV_HWPROBE_EXT_ZVKSED (1 << 24)
  8070. #define RISCV_HWPROBE_EXT_ZVKSH (1 << 25)
  8071. #define RISCV_HWPROBE_EXT_ZVKT (1 << 26)
  8072. #define RISCV_HWPROBE_EXT_ZFH (1 << 27)
  8073. #define RISCV_HWPROBE_EXT_ZFHMIN (1 << 28)
  8074. #define RISCV_HWPROBE_EXT_ZIHINTNTL (1 << 29)
  8075. #define RISCV_HWPROBE_EXT_ZVFH (1 << 30)
  8076. #define RISCV_HWPROBE_EXT_ZVFHMIN (1ULL << 31)
  8077. #define RISCV_HWPROBE_EXT_ZFA (1ULL << 32)
  8078. #define RISCV_HWPROBE_EXT_ZTSO (1ULL << 33)
  8079. #define RISCV_HWPROBE_EXT_ZACAS (1ULL << 34)
  8080. #define RISCV_HWPROBE_EXT_ZICOND (1ULL << 35)
  8081. #define RISCV_HWPROBE_KEY_CPUPERF_0 5
  8082. #define RISCV_HWPROBE_MISALIGNED_UNKNOWN (0 << 0)
  8083. #define RISCV_HWPROBE_MISALIGNED_EMULATED (1 << 0)
  8084. #define RISCV_HWPROBE_MISALIGNED_SLOW (2 << 0)
  8085. #define RISCV_HWPROBE_MISALIGNED_FAST (3 << 0)
  8086. #define RISCV_HWPROBE_MISALIGNED_UNSUPPORTED (4 << 0)
  8087. #define RISCV_HWPROBE_MISALIGNED_MASK (7 << 0)
  8088. #define RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE 6
  8089. struct riscv_hwprobe {
  8090. abi_llong key;
  8091. abi_ullong value;
  8092. };
  8093. static void risc_hwprobe_fill_pairs(CPURISCVState *env,
  8094. struct riscv_hwprobe *pair,
  8095. size_t pair_count)
  8096. {
  8097. const RISCVCPUConfig *cfg = riscv_cpu_cfg(env);
  8098. for (; pair_count > 0; pair_count--, pair++) {
  8099. abi_llong key;
  8100. abi_ullong value;
  8101. __put_user(0, &pair->value);
  8102. __get_user(key, &pair->key);
  8103. switch (key) {
  8104. case RISCV_HWPROBE_KEY_MVENDORID:
  8105. __put_user(cfg->mvendorid, &pair->value);
  8106. break;
  8107. case RISCV_HWPROBE_KEY_MARCHID:
  8108. __put_user(cfg->marchid, &pair->value);
  8109. break;
  8110. case RISCV_HWPROBE_KEY_MIMPID:
  8111. __put_user(cfg->mimpid, &pair->value);
  8112. break;
  8113. case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
  8114. value = riscv_has_ext(env, RVI) &&
  8115. riscv_has_ext(env, RVM) &&
  8116. riscv_has_ext(env, RVA) ?
  8117. RISCV_HWPROBE_BASE_BEHAVIOR_IMA : 0;
  8118. __put_user(value, &pair->value);
  8119. break;
  8120. case RISCV_HWPROBE_KEY_IMA_EXT_0:
  8121. value = riscv_has_ext(env, RVF) &&
  8122. riscv_has_ext(env, RVD) ?
  8123. RISCV_HWPROBE_IMA_FD : 0;
  8124. value |= riscv_has_ext(env, RVC) ?
  8125. RISCV_HWPROBE_IMA_C : 0;
  8126. value |= riscv_has_ext(env, RVV) ?
  8127. RISCV_HWPROBE_IMA_V : 0;
  8128. value |= cfg->ext_zba ?
  8129. RISCV_HWPROBE_EXT_ZBA : 0;
  8130. value |= cfg->ext_zbb ?
  8131. RISCV_HWPROBE_EXT_ZBB : 0;
  8132. value |= cfg->ext_zbs ?
  8133. RISCV_HWPROBE_EXT_ZBS : 0;
  8134. value |= cfg->ext_zicboz ?
  8135. RISCV_HWPROBE_EXT_ZICBOZ : 0;
  8136. value |= cfg->ext_zbc ?
  8137. RISCV_HWPROBE_EXT_ZBC : 0;
  8138. value |= cfg->ext_zbkb ?
  8139. RISCV_HWPROBE_EXT_ZBKB : 0;
  8140. value |= cfg->ext_zbkc ?
  8141. RISCV_HWPROBE_EXT_ZBKC : 0;
  8142. value |= cfg->ext_zbkx ?
  8143. RISCV_HWPROBE_EXT_ZBKX : 0;
  8144. value |= cfg->ext_zknd ?
  8145. RISCV_HWPROBE_EXT_ZKND : 0;
  8146. value |= cfg->ext_zkne ?
  8147. RISCV_HWPROBE_EXT_ZKNE : 0;
  8148. value |= cfg->ext_zknh ?
  8149. RISCV_HWPROBE_EXT_ZKNH : 0;
  8150. value |= cfg->ext_zksed ?
  8151. RISCV_HWPROBE_EXT_ZKSED : 0;
  8152. value |= cfg->ext_zksh ?
  8153. RISCV_HWPROBE_EXT_ZKSH : 0;
  8154. value |= cfg->ext_zkt ?
  8155. RISCV_HWPROBE_EXT_ZKT : 0;
  8156. value |= cfg->ext_zvbb ?
  8157. RISCV_HWPROBE_EXT_ZVBB : 0;
  8158. value |= cfg->ext_zvbc ?
  8159. RISCV_HWPROBE_EXT_ZVBC : 0;
  8160. value |= cfg->ext_zvkb ?
  8161. RISCV_HWPROBE_EXT_ZVKB : 0;
  8162. value |= cfg->ext_zvkg ?
  8163. RISCV_HWPROBE_EXT_ZVKG : 0;
  8164. value |= cfg->ext_zvkned ?
  8165. RISCV_HWPROBE_EXT_ZVKNED : 0;
  8166. value |= cfg->ext_zvknha ?
  8167. RISCV_HWPROBE_EXT_ZVKNHA : 0;
  8168. value |= cfg->ext_zvknhb ?
  8169. RISCV_HWPROBE_EXT_ZVKNHB : 0;
  8170. value |= cfg->ext_zvksed ?
  8171. RISCV_HWPROBE_EXT_ZVKSED : 0;
  8172. value |= cfg->ext_zvksh ?
  8173. RISCV_HWPROBE_EXT_ZVKSH : 0;
  8174. value |= cfg->ext_zvkt ?
  8175. RISCV_HWPROBE_EXT_ZVKT : 0;
  8176. value |= cfg->ext_zfh ?
  8177. RISCV_HWPROBE_EXT_ZFH : 0;
  8178. value |= cfg->ext_zfhmin ?
  8179. RISCV_HWPROBE_EXT_ZFHMIN : 0;
  8180. value |= cfg->ext_zihintntl ?
  8181. RISCV_HWPROBE_EXT_ZIHINTNTL : 0;
  8182. value |= cfg->ext_zvfh ?
  8183. RISCV_HWPROBE_EXT_ZVFH : 0;
  8184. value |= cfg->ext_zvfhmin ?
  8185. RISCV_HWPROBE_EXT_ZVFHMIN : 0;
  8186. value |= cfg->ext_zfa ?
  8187. RISCV_HWPROBE_EXT_ZFA : 0;
  8188. value |= cfg->ext_ztso ?
  8189. RISCV_HWPROBE_EXT_ZTSO : 0;
  8190. value |= cfg->ext_zacas ?
  8191. RISCV_HWPROBE_EXT_ZACAS : 0;
  8192. value |= cfg->ext_zicond ?
  8193. RISCV_HWPROBE_EXT_ZICOND : 0;
  8194. __put_user(value, &pair->value);
  8195. break;
  8196. case RISCV_HWPROBE_KEY_CPUPERF_0:
  8197. __put_user(RISCV_HWPROBE_MISALIGNED_FAST, &pair->value);
  8198. break;
  8199. case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE:
  8200. value = cfg->ext_zicboz ? cfg->cboz_blocksize : 0;
  8201. __put_user(value, &pair->value);
  8202. break;
  8203. default:
  8204. __put_user(-1, &pair->key);
  8205. break;
  8206. }
  8207. }
  8208. }
  8209. /*
  8210. * If the cpumask_t of (target_cpus, cpusetsize) cannot be read: -EFAULT.
  8211. * If the cpumast_t has no bits set: -EINVAL.
  8212. * Otherwise the cpumask_t contains some bit set: 0.
  8213. * Unlike the kernel, we do not mask cpumask_t by the set of online cpus,
  8214. * nor bound the search by cpumask_size().
  8215. */
  8216. static int nonempty_cpu_set(abi_ulong cpusetsize, abi_ptr target_cpus)
  8217. {
  8218. unsigned char *p = lock_user(VERIFY_READ, target_cpus, cpusetsize, 1);
  8219. int ret = -TARGET_EFAULT;
  8220. if (p) {
  8221. ret = -TARGET_EINVAL;
  8222. /*
  8223. * Since we only care about the empty/non-empty state of the cpumask_t
  8224. * not the individual bits, we do not need to repartition the bits
  8225. * from target abi_ulong to host unsigned long.
  8226. *
  8227. * Note that the kernel does not round up cpusetsize to a multiple of
  8228. * sizeof(abi_ulong). After bounding cpusetsize by cpumask_size(),
  8229. * it copies exactly cpusetsize bytes into a zeroed buffer.
  8230. */
  8231. for (abi_ulong i = 0; i < cpusetsize; ++i) {
  8232. if (p[i]) {
  8233. ret = 0;
  8234. break;
  8235. }
  8236. }
  8237. unlock_user(p, target_cpus, 0);
  8238. }
  8239. return ret;
  8240. }
  8241. static abi_long do_riscv_hwprobe(CPUArchState *cpu_env, abi_long arg1,
  8242. abi_long arg2, abi_long arg3,
  8243. abi_long arg4, abi_long arg5)
  8244. {
  8245. int ret;
  8246. struct riscv_hwprobe *host_pairs;
  8247. /* flags must be 0 */
  8248. if (arg5 != 0) {
  8249. return -TARGET_EINVAL;
  8250. }
  8251. /* check cpu_set */
  8252. if (arg3 != 0) {
  8253. ret = nonempty_cpu_set(arg3, arg4);
  8254. if (ret != 0) {
  8255. return ret;
  8256. }
  8257. } else if (arg4 != 0) {
  8258. return -TARGET_EINVAL;
  8259. }
  8260. /* no pairs */
  8261. if (arg2 == 0) {
  8262. return 0;
  8263. }
  8264. host_pairs = lock_user(VERIFY_WRITE, arg1,
  8265. sizeof(*host_pairs) * (size_t)arg2, 0);
  8266. if (host_pairs == NULL) {
  8267. return -TARGET_EFAULT;
  8268. }
  8269. risc_hwprobe_fill_pairs(cpu_env, host_pairs, arg2);
  8270. unlock_user(host_pairs, arg1, sizeof(*host_pairs) * (size_t)arg2);
  8271. return 0;
  8272. }
  8273. #endif /* TARGET_NR_riscv_hwprobe */
  8274. #if defined(TARGET_NR_pivot_root) && defined(__NR_pivot_root)
  8275. _syscall2(int, pivot_root, const char *, new_root, const char *, put_old)
  8276. #endif
  8277. #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
  8278. #define __NR_sys_open_tree __NR_open_tree
  8279. _syscall3(int, sys_open_tree, int, __dfd, const char *, __filename,
  8280. unsigned int, __flags)
  8281. #endif
  8282. #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
  8283. #define __NR_sys_move_mount __NR_move_mount
  8284. _syscall5(int, sys_move_mount, int, __from_dfd, const char *, __from_pathname,
  8285. int, __to_dfd, const char *, __to_pathname, unsigned int, flag)
  8286. #endif
  8287. /* This is an internal helper for do_syscall so that it is easier
  8288. * to have a single return point, so that actions, such as logging
  8289. * of syscall results, can be performed.
  8290. * All errnos that do_syscall() returns must be -TARGET_<errcode>.
  8291. */
  8292. static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1,
  8293. abi_long arg2, abi_long arg3, abi_long arg4,
  8294. abi_long arg5, abi_long arg6, abi_long arg7,
  8295. abi_long arg8)
  8296. {
  8297. CPUState *cpu = env_cpu(cpu_env);
  8298. abi_long ret;
  8299. #if defined(TARGET_NR_stat) || defined(TARGET_NR_stat64) \
  8300. || defined(TARGET_NR_lstat) || defined(TARGET_NR_lstat64) \
  8301. || defined(TARGET_NR_fstat) || defined(TARGET_NR_fstat64) \
  8302. || defined(TARGET_NR_statx)
  8303. struct stat st;
  8304. #endif
  8305. #if defined(TARGET_NR_statfs) || defined(TARGET_NR_statfs64) \
  8306. || defined(TARGET_NR_fstatfs)
  8307. struct statfs stfs;
  8308. #endif
  8309. void *p;
  8310. switch(num) {
  8311. case TARGET_NR_exit:
  8312. /* In old applications this may be used to implement _exit(2).
  8313. However in threaded applications it is used for thread termination,
  8314. and _exit_group is used for application termination.
  8315. Do thread termination if we have more then one thread. */
  8316. if (block_signals()) {
  8317. return -QEMU_ERESTARTSYS;
  8318. }
  8319. pthread_mutex_lock(&clone_lock);
  8320. if (CPU_NEXT(first_cpu)) {
  8321. TaskState *ts = get_task_state(cpu);
  8322. if (ts->child_tidptr) {
  8323. put_user_u32(0, ts->child_tidptr);
  8324. do_sys_futex(g2h(cpu, ts->child_tidptr),
  8325. FUTEX_WAKE, INT_MAX, NULL, NULL, 0);
  8326. }
  8327. object_unparent(OBJECT(cpu));
  8328. object_unref(OBJECT(cpu));
  8329. /*
  8330. * At this point the CPU should be unrealized and removed
  8331. * from cpu lists. We can clean-up the rest of the thread
  8332. * data without the lock held.
  8333. */
  8334. pthread_mutex_unlock(&clone_lock);
  8335. thread_cpu = NULL;
  8336. g_free(ts);
  8337. rcu_unregister_thread();
  8338. pthread_exit(NULL);
  8339. }
  8340. pthread_mutex_unlock(&clone_lock);
  8341. preexit_cleanup(cpu_env, arg1);
  8342. _exit(arg1);
  8343. return 0; /* avoid warning */
  8344. case TARGET_NR_read:
  8345. if (arg2 == 0 && arg3 == 0) {
  8346. return get_errno(safe_read(arg1, 0, 0));
  8347. } else {
  8348. if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
  8349. return -TARGET_EFAULT;
  8350. ret = get_errno(safe_read(arg1, p, arg3));
  8351. if (ret >= 0 &&
  8352. fd_trans_host_to_target_data(arg1)) {
  8353. ret = fd_trans_host_to_target_data(arg1)(p, ret);
  8354. }
  8355. unlock_user(p, arg2, ret);
  8356. }
  8357. return ret;
  8358. case TARGET_NR_write:
  8359. if (arg2 == 0 && arg3 == 0) {
  8360. return get_errno(safe_write(arg1, 0, 0));
  8361. }
  8362. if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
  8363. return -TARGET_EFAULT;
  8364. if (fd_trans_target_to_host_data(arg1)) {
  8365. void *copy = g_malloc(arg3);
  8366. memcpy(copy, p, arg3);
  8367. ret = fd_trans_target_to_host_data(arg1)(copy, arg3);
  8368. if (ret >= 0) {
  8369. ret = get_errno(safe_write(arg1, copy, ret));
  8370. }
  8371. g_free(copy);
  8372. } else {
  8373. ret = get_errno(safe_write(arg1, p, arg3));
  8374. }
  8375. unlock_user(p, arg2, 0);
  8376. return ret;
  8377. #ifdef TARGET_NR_open
  8378. case TARGET_NR_open:
  8379. if (!(p = lock_user_string(arg1)))
  8380. return -TARGET_EFAULT;
  8381. ret = get_errno(do_guest_openat(cpu_env, AT_FDCWD, p,
  8382. target_to_host_bitmask(arg2, fcntl_flags_tbl),
  8383. arg3, true));
  8384. fd_trans_unregister(ret);
  8385. unlock_user(p, arg1, 0);
  8386. return ret;
  8387. #endif
  8388. case TARGET_NR_openat:
  8389. if (!(p = lock_user_string(arg2)))
  8390. return -TARGET_EFAULT;
  8391. ret = get_errno(do_guest_openat(cpu_env, arg1, p,
  8392. target_to_host_bitmask(arg3, fcntl_flags_tbl),
  8393. arg4, true));
  8394. fd_trans_unregister(ret);
  8395. unlock_user(p, arg2, 0);
  8396. return ret;
  8397. case TARGET_NR_openat2:
  8398. ret = do_openat2(cpu_env, arg1, arg2, arg3, arg4);
  8399. return ret;
  8400. #if defined(TARGET_NR_name_to_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
  8401. case TARGET_NR_name_to_handle_at:
  8402. ret = do_name_to_handle_at(arg1, arg2, arg3, arg4, arg5);
  8403. return ret;
  8404. #endif
  8405. #if defined(TARGET_NR_open_by_handle_at) && defined(CONFIG_OPEN_BY_HANDLE)
  8406. case TARGET_NR_open_by_handle_at:
  8407. ret = do_open_by_handle_at(arg1, arg2, arg3);
  8408. fd_trans_unregister(ret);
  8409. return ret;
  8410. #endif
  8411. #if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open)
  8412. case TARGET_NR_pidfd_open:
  8413. return get_errno(pidfd_open(arg1, arg2));
  8414. #endif
  8415. #if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal)
  8416. case TARGET_NR_pidfd_send_signal:
  8417. {
  8418. siginfo_t uinfo, *puinfo;
  8419. if (arg3) {
  8420. p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
  8421. if (!p) {
  8422. return -TARGET_EFAULT;
  8423. }
  8424. target_to_host_siginfo(&uinfo, p);
  8425. unlock_user(p, arg3, 0);
  8426. puinfo = &uinfo;
  8427. } else {
  8428. puinfo = NULL;
  8429. }
  8430. ret = get_errno(pidfd_send_signal(arg1, target_to_host_signal(arg2),
  8431. puinfo, arg4));
  8432. }
  8433. return ret;
  8434. #endif
  8435. #if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd)
  8436. case TARGET_NR_pidfd_getfd:
  8437. return get_errno(pidfd_getfd(arg1, arg2, arg3));
  8438. #endif
  8439. case TARGET_NR_close:
  8440. fd_trans_unregister(arg1);
  8441. return get_errno(close(arg1));
  8442. #if defined(__NR_close_range) && defined(TARGET_NR_close_range)
  8443. case TARGET_NR_close_range:
  8444. ret = get_errno(sys_close_range(arg1, arg2, arg3));
  8445. if (ret == 0 && !(arg3 & CLOSE_RANGE_CLOEXEC)) {
  8446. abi_long fd, maxfd;
  8447. maxfd = MIN(arg2, target_fd_max);
  8448. for (fd = arg1; fd < maxfd; fd++) {
  8449. fd_trans_unregister(fd);
  8450. }
  8451. }
  8452. return ret;
  8453. #endif
  8454. case TARGET_NR_brk:
  8455. return do_brk(arg1);
  8456. #ifdef TARGET_NR_fork
  8457. case TARGET_NR_fork:
  8458. return get_errno(do_fork(cpu_env, TARGET_SIGCHLD, 0, 0, 0, 0));
  8459. #endif
  8460. #ifdef TARGET_NR_waitpid
  8461. case TARGET_NR_waitpid:
  8462. {
  8463. int status;
  8464. ret = get_errno(safe_wait4(arg1, &status, arg3, 0));
  8465. if (!is_error(ret) && arg2 && ret
  8466. && put_user_s32(host_to_target_waitstatus(status), arg2))
  8467. return -TARGET_EFAULT;
  8468. }
  8469. return ret;
  8470. #endif
  8471. #ifdef TARGET_NR_waitid
  8472. case TARGET_NR_waitid:
  8473. {
  8474. struct rusage ru;
  8475. siginfo_t info;
  8476. ret = get_errno(safe_waitid(arg1, arg2, (arg3 ? &info : NULL),
  8477. arg4, (arg5 ? &ru : NULL)));
  8478. if (!is_error(ret)) {
  8479. if (arg3) {
  8480. p = lock_user(VERIFY_WRITE, arg3,
  8481. sizeof(target_siginfo_t), 0);
  8482. if (!p) {
  8483. return -TARGET_EFAULT;
  8484. }
  8485. host_to_target_siginfo(p, &info);
  8486. unlock_user(p, arg3, sizeof(target_siginfo_t));
  8487. }
  8488. if (arg5 && host_to_target_rusage(arg5, &ru)) {
  8489. return -TARGET_EFAULT;
  8490. }
  8491. }
  8492. }
  8493. return ret;
  8494. #endif
  8495. #ifdef TARGET_NR_creat /* not on alpha */
  8496. case TARGET_NR_creat:
  8497. if (!(p = lock_user_string(arg1)))
  8498. return -TARGET_EFAULT;
  8499. ret = get_errno(creat(p, arg2));
  8500. fd_trans_unregister(ret);
  8501. unlock_user(p, arg1, 0);
  8502. return ret;
  8503. #endif
  8504. #ifdef TARGET_NR_link
  8505. case TARGET_NR_link:
  8506. {
  8507. void * p2;
  8508. p = lock_user_string(arg1);
  8509. p2 = lock_user_string(arg2);
  8510. if (!p || !p2)
  8511. ret = -TARGET_EFAULT;
  8512. else
  8513. ret = get_errno(link(p, p2));
  8514. unlock_user(p2, arg2, 0);
  8515. unlock_user(p, arg1, 0);
  8516. }
  8517. return ret;
  8518. #endif
  8519. #if defined(TARGET_NR_linkat)
  8520. case TARGET_NR_linkat:
  8521. {
  8522. void * p2 = NULL;
  8523. if (!arg2 || !arg4)
  8524. return -TARGET_EFAULT;
  8525. p = lock_user_string(arg2);
  8526. p2 = lock_user_string(arg4);
  8527. if (!p || !p2)
  8528. ret = -TARGET_EFAULT;
  8529. else
  8530. ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
  8531. unlock_user(p, arg2, 0);
  8532. unlock_user(p2, arg4, 0);
  8533. }
  8534. return ret;
  8535. #endif
  8536. #ifdef TARGET_NR_unlink
  8537. case TARGET_NR_unlink:
  8538. if (!(p = lock_user_string(arg1)))
  8539. return -TARGET_EFAULT;
  8540. ret = get_errno(unlink(p));
  8541. unlock_user(p, arg1, 0);
  8542. return ret;
  8543. #endif
  8544. #if defined(TARGET_NR_unlinkat)
  8545. case TARGET_NR_unlinkat:
  8546. if (!(p = lock_user_string(arg2)))
  8547. return -TARGET_EFAULT;
  8548. ret = get_errno(unlinkat(arg1, p, arg3));
  8549. unlock_user(p, arg2, 0);
  8550. return ret;
  8551. #endif
  8552. case TARGET_NR_execveat:
  8553. return do_execv(cpu_env, arg1, arg2, arg3, arg4, arg5, true);
  8554. case TARGET_NR_execve:
  8555. return do_execv(cpu_env, AT_FDCWD, arg1, arg2, arg3, 0, false);
  8556. case TARGET_NR_chdir:
  8557. if (!(p = lock_user_string(arg1)))
  8558. return -TARGET_EFAULT;
  8559. ret = get_errno(chdir(p));
  8560. unlock_user(p, arg1, 0);
  8561. return ret;
  8562. #ifdef TARGET_NR_time
  8563. case TARGET_NR_time:
  8564. {
  8565. time_t host_time;
  8566. ret = get_errno(time(&host_time));
  8567. if (!is_error(ret)
  8568. && arg1
  8569. && put_user_sal(host_time, arg1))
  8570. return -TARGET_EFAULT;
  8571. }
  8572. return ret;
  8573. #endif
  8574. #ifdef TARGET_NR_mknod
  8575. case TARGET_NR_mknod:
  8576. if (!(p = lock_user_string(arg1)))
  8577. return -TARGET_EFAULT;
  8578. ret = get_errno(mknod(p, arg2, arg3));
  8579. unlock_user(p, arg1, 0);
  8580. return ret;
  8581. #endif
  8582. #if defined(TARGET_NR_mknodat)
  8583. case TARGET_NR_mknodat:
  8584. if (!(p = lock_user_string(arg2)))
  8585. return -TARGET_EFAULT;
  8586. ret = get_errno(mknodat(arg1, p, arg3, arg4));
  8587. unlock_user(p, arg2, 0);
  8588. return ret;
  8589. #endif
  8590. #ifdef TARGET_NR_chmod
  8591. case TARGET_NR_chmod:
  8592. if (!(p = lock_user_string(arg1)))
  8593. return -TARGET_EFAULT;
  8594. ret = get_errno(chmod(p, arg2));
  8595. unlock_user(p, arg1, 0);
  8596. return ret;
  8597. #endif
  8598. #ifdef TARGET_NR_lseek
  8599. case TARGET_NR_lseek:
  8600. return get_errno(lseek(arg1, arg2, arg3));
  8601. #endif
  8602. #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
  8603. /* Alpha specific */
  8604. case TARGET_NR_getxpid:
  8605. cpu_env->ir[IR_A4] = getppid();
  8606. return get_errno(getpid());
  8607. #endif
  8608. #ifdef TARGET_NR_getpid
  8609. case TARGET_NR_getpid:
  8610. return get_errno(getpid());
  8611. #endif
  8612. case TARGET_NR_mount:
  8613. {
  8614. /* need to look at the data field */
  8615. void *p2, *p3;
  8616. if (arg1) {
  8617. p = lock_user_string(arg1);
  8618. if (!p) {
  8619. return -TARGET_EFAULT;
  8620. }
  8621. } else {
  8622. p = NULL;
  8623. }
  8624. p2 = lock_user_string(arg2);
  8625. if (!p2) {
  8626. if (arg1) {
  8627. unlock_user(p, arg1, 0);
  8628. }
  8629. return -TARGET_EFAULT;
  8630. }
  8631. if (arg3) {
  8632. p3 = lock_user_string(arg3);
  8633. if (!p3) {
  8634. if (arg1) {
  8635. unlock_user(p, arg1, 0);
  8636. }
  8637. unlock_user(p2, arg2, 0);
  8638. return -TARGET_EFAULT;
  8639. }
  8640. } else {
  8641. p3 = NULL;
  8642. }
  8643. /* FIXME - arg5 should be locked, but it isn't clear how to
  8644. * do that since it's not guaranteed to be a NULL-terminated
  8645. * string.
  8646. */
  8647. if (!arg5) {
  8648. ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
  8649. } else {
  8650. ret = mount(p, p2, p3, (unsigned long)arg4, g2h(cpu, arg5));
  8651. }
  8652. ret = get_errno(ret);
  8653. if (arg1) {
  8654. unlock_user(p, arg1, 0);
  8655. }
  8656. unlock_user(p2, arg2, 0);
  8657. if (arg3) {
  8658. unlock_user(p3, arg3, 0);
  8659. }
  8660. }
  8661. return ret;
  8662. #if defined(TARGET_NR_umount) || defined(TARGET_NR_oldumount)
  8663. #if defined(TARGET_NR_umount)
  8664. case TARGET_NR_umount:
  8665. #endif
  8666. #if defined(TARGET_NR_oldumount)
  8667. case TARGET_NR_oldumount:
  8668. #endif
  8669. if (!(p = lock_user_string(arg1)))
  8670. return -TARGET_EFAULT;
  8671. ret = get_errno(umount(p));
  8672. unlock_user(p, arg1, 0);
  8673. return ret;
  8674. #endif
  8675. #if defined(TARGET_NR_move_mount) && defined(__NR_move_mount)
  8676. case TARGET_NR_move_mount:
  8677. {
  8678. void *p2, *p4;
  8679. if (!arg2 || !arg4) {
  8680. return -TARGET_EFAULT;
  8681. }
  8682. p2 = lock_user_string(arg2);
  8683. if (!p2) {
  8684. return -TARGET_EFAULT;
  8685. }
  8686. p4 = lock_user_string(arg4);
  8687. if (!p4) {
  8688. unlock_user(p2, arg2, 0);
  8689. return -TARGET_EFAULT;
  8690. }
  8691. ret = get_errno(sys_move_mount(arg1, p2, arg3, p4, arg5));
  8692. unlock_user(p2, arg2, 0);
  8693. unlock_user(p4, arg4, 0);
  8694. return ret;
  8695. }
  8696. #endif
  8697. #if defined(TARGET_NR_open_tree) && defined(__NR_open_tree)
  8698. case TARGET_NR_open_tree:
  8699. {
  8700. void *p2;
  8701. int host_flags;
  8702. if (!arg2) {
  8703. return -TARGET_EFAULT;
  8704. }
  8705. p2 = lock_user_string(arg2);
  8706. if (!p2) {
  8707. return -TARGET_EFAULT;
  8708. }
  8709. host_flags = arg3 & ~TARGET_O_CLOEXEC;
  8710. if (arg3 & TARGET_O_CLOEXEC) {
  8711. host_flags |= O_CLOEXEC;
  8712. }
  8713. ret = get_errno(sys_open_tree(arg1, p2, host_flags));
  8714. unlock_user(p2, arg2, 0);
  8715. return ret;
  8716. }
  8717. #endif
  8718. #ifdef TARGET_NR_stime /* not on alpha */
  8719. case TARGET_NR_stime:
  8720. {
  8721. struct timespec ts;
  8722. ts.tv_nsec = 0;
  8723. if (get_user_sal(ts.tv_sec, arg1)) {
  8724. return -TARGET_EFAULT;
  8725. }
  8726. return get_errno(clock_settime(CLOCK_REALTIME, &ts));
  8727. }
  8728. #endif
  8729. #ifdef TARGET_NR_alarm /* not on alpha */
  8730. case TARGET_NR_alarm:
  8731. return alarm(arg1);
  8732. #endif
  8733. #ifdef TARGET_NR_pause /* not on alpha */
  8734. case TARGET_NR_pause:
  8735. if (!block_signals()) {
  8736. sigsuspend(&get_task_state(cpu)->signal_mask);
  8737. }
  8738. return -TARGET_EINTR;
  8739. #endif
  8740. #ifdef TARGET_NR_utime
  8741. case TARGET_NR_utime:
  8742. {
  8743. struct utimbuf tbuf, *host_tbuf;
  8744. struct target_utimbuf *target_tbuf;
  8745. if (arg2) {
  8746. if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
  8747. return -TARGET_EFAULT;
  8748. tbuf.actime = tswapal(target_tbuf->actime);
  8749. tbuf.modtime = tswapal(target_tbuf->modtime);
  8750. unlock_user_struct(target_tbuf, arg2, 0);
  8751. host_tbuf = &tbuf;
  8752. } else {
  8753. host_tbuf = NULL;
  8754. }
  8755. if (!(p = lock_user_string(arg1)))
  8756. return -TARGET_EFAULT;
  8757. ret = get_errno(utime(p, host_tbuf));
  8758. unlock_user(p, arg1, 0);
  8759. }
  8760. return ret;
  8761. #endif
  8762. #ifdef TARGET_NR_utimes
  8763. case TARGET_NR_utimes:
  8764. {
  8765. struct timeval *tvp, tv[2];
  8766. if (arg2) {
  8767. if (copy_from_user_timeval(&tv[0], arg2)
  8768. || copy_from_user_timeval(&tv[1],
  8769. arg2 + sizeof(struct target_timeval)))
  8770. return -TARGET_EFAULT;
  8771. tvp = tv;
  8772. } else {
  8773. tvp = NULL;
  8774. }
  8775. if (!(p = lock_user_string(arg1)))
  8776. return -TARGET_EFAULT;
  8777. ret = get_errno(utimes(p, tvp));
  8778. unlock_user(p, arg1, 0);
  8779. }
  8780. return ret;
  8781. #endif
  8782. #if defined(TARGET_NR_futimesat)
  8783. case TARGET_NR_futimesat:
  8784. {
  8785. struct timeval *tvp, tv[2];
  8786. if (arg3) {
  8787. if (copy_from_user_timeval(&tv[0], arg3)
  8788. || copy_from_user_timeval(&tv[1],
  8789. arg3 + sizeof(struct target_timeval)))
  8790. return -TARGET_EFAULT;
  8791. tvp = tv;
  8792. } else {
  8793. tvp = NULL;
  8794. }
  8795. if (!(p = lock_user_string(arg2))) {
  8796. return -TARGET_EFAULT;
  8797. }
  8798. ret = get_errno(futimesat(arg1, path(p), tvp));
  8799. unlock_user(p, arg2, 0);
  8800. }
  8801. return ret;
  8802. #endif
  8803. #ifdef TARGET_NR_access
  8804. case TARGET_NR_access:
  8805. if (!(p = lock_user_string(arg1))) {
  8806. return -TARGET_EFAULT;
  8807. }
  8808. ret = get_errno(access(path(p), arg2));
  8809. unlock_user(p, arg1, 0);
  8810. return ret;
  8811. #endif
  8812. #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
  8813. case TARGET_NR_faccessat:
  8814. if (!(p = lock_user_string(arg2))) {
  8815. return -TARGET_EFAULT;
  8816. }
  8817. ret = get_errno(faccessat(arg1, p, arg3, 0));
  8818. unlock_user(p, arg2, 0);
  8819. return ret;
  8820. #endif
  8821. #if defined(TARGET_NR_faccessat2)
  8822. case TARGET_NR_faccessat2:
  8823. if (!(p = lock_user_string(arg2))) {
  8824. return -TARGET_EFAULT;
  8825. }
  8826. ret = get_errno(faccessat(arg1, p, arg3, arg4));
  8827. unlock_user(p, arg2, 0);
  8828. return ret;
  8829. #endif
  8830. #ifdef TARGET_NR_nice /* not on alpha */
  8831. case TARGET_NR_nice:
  8832. return get_errno(nice(arg1));
  8833. #endif
  8834. case TARGET_NR_sync:
  8835. sync();
  8836. return 0;
  8837. #if defined(TARGET_NR_syncfs) && defined(CONFIG_SYNCFS)
  8838. case TARGET_NR_syncfs:
  8839. return get_errno(syncfs(arg1));
  8840. #endif
  8841. case TARGET_NR_kill:
  8842. return get_errno(safe_kill(arg1, target_to_host_signal(arg2)));
  8843. #ifdef TARGET_NR_rename
  8844. case TARGET_NR_rename:
  8845. {
  8846. void *p2;
  8847. p = lock_user_string(arg1);
  8848. p2 = lock_user_string(arg2);
  8849. if (!p || !p2)
  8850. ret = -TARGET_EFAULT;
  8851. else
  8852. ret = get_errno(rename(p, p2));
  8853. unlock_user(p2, arg2, 0);
  8854. unlock_user(p, arg1, 0);
  8855. }
  8856. return ret;
  8857. #endif
  8858. #if defined(TARGET_NR_renameat)
  8859. case TARGET_NR_renameat:
  8860. {
  8861. void *p2;
  8862. p = lock_user_string(arg2);
  8863. p2 = lock_user_string(arg4);
  8864. if (!p || !p2)
  8865. ret = -TARGET_EFAULT;
  8866. else
  8867. ret = get_errno(renameat(arg1, p, arg3, p2));
  8868. unlock_user(p2, arg4, 0);
  8869. unlock_user(p, arg2, 0);
  8870. }
  8871. return ret;
  8872. #endif
  8873. #if defined(TARGET_NR_renameat2)
  8874. case TARGET_NR_renameat2:
  8875. {
  8876. void *p2;
  8877. p = lock_user_string(arg2);
  8878. p2 = lock_user_string(arg4);
  8879. if (!p || !p2) {
  8880. ret = -TARGET_EFAULT;
  8881. } else {
  8882. ret = get_errno(sys_renameat2(arg1, p, arg3, p2, arg5));
  8883. }
  8884. unlock_user(p2, arg4, 0);
  8885. unlock_user(p, arg2, 0);
  8886. }
  8887. return ret;
  8888. #endif
  8889. #ifdef TARGET_NR_mkdir
  8890. case TARGET_NR_mkdir:
  8891. if (!(p = lock_user_string(arg1)))
  8892. return -TARGET_EFAULT;
  8893. ret = get_errno(mkdir(p, arg2));
  8894. unlock_user(p, arg1, 0);
  8895. return ret;
  8896. #endif
  8897. #if defined(TARGET_NR_mkdirat)
  8898. case TARGET_NR_mkdirat:
  8899. if (!(p = lock_user_string(arg2)))
  8900. return -TARGET_EFAULT;
  8901. ret = get_errno(mkdirat(arg1, p, arg3));
  8902. unlock_user(p, arg2, 0);
  8903. return ret;
  8904. #endif
  8905. #ifdef TARGET_NR_rmdir
  8906. case TARGET_NR_rmdir:
  8907. if (!(p = lock_user_string(arg1)))
  8908. return -TARGET_EFAULT;
  8909. ret = get_errno(rmdir(p));
  8910. unlock_user(p, arg1, 0);
  8911. return ret;
  8912. #endif
  8913. case TARGET_NR_dup:
  8914. ret = get_errno(dup(arg1));
  8915. if (ret >= 0) {
  8916. fd_trans_dup(arg1, ret);
  8917. }
  8918. return ret;
  8919. #ifdef TARGET_NR_pipe
  8920. case TARGET_NR_pipe:
  8921. return do_pipe(cpu_env, arg1, 0, 0);
  8922. #endif
  8923. #ifdef TARGET_NR_pipe2
  8924. case TARGET_NR_pipe2:
  8925. return do_pipe(cpu_env, arg1,
  8926. target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
  8927. #endif
  8928. case TARGET_NR_times:
  8929. {
  8930. struct target_tms *tmsp;
  8931. struct tms tms;
  8932. ret = get_errno(times(&tms));
  8933. if (arg1) {
  8934. tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
  8935. if (!tmsp)
  8936. return -TARGET_EFAULT;
  8937. tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
  8938. tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
  8939. tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
  8940. tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
  8941. }
  8942. if (!is_error(ret))
  8943. ret = host_to_target_clock_t(ret);
  8944. }
  8945. return ret;
  8946. case TARGET_NR_acct:
  8947. if (arg1 == 0) {
  8948. ret = get_errno(acct(NULL));
  8949. } else {
  8950. if (!(p = lock_user_string(arg1))) {
  8951. return -TARGET_EFAULT;
  8952. }
  8953. ret = get_errno(acct(path(p)));
  8954. unlock_user(p, arg1, 0);
  8955. }
  8956. return ret;
  8957. #ifdef TARGET_NR_umount2
  8958. case TARGET_NR_umount2:
  8959. if (!(p = lock_user_string(arg1)))
  8960. return -TARGET_EFAULT;
  8961. ret = get_errno(umount2(p, arg2));
  8962. unlock_user(p, arg1, 0);
  8963. return ret;
  8964. #endif
  8965. case TARGET_NR_ioctl:
  8966. return do_ioctl(arg1, arg2, arg3);
  8967. #ifdef TARGET_NR_fcntl
  8968. case TARGET_NR_fcntl:
  8969. return do_fcntl(arg1, arg2, arg3);
  8970. #endif
  8971. case TARGET_NR_setpgid:
  8972. return get_errno(setpgid(arg1, arg2));
  8973. case TARGET_NR_umask:
  8974. return get_errno(umask(arg1));
  8975. case TARGET_NR_chroot:
  8976. if (!(p = lock_user_string(arg1)))
  8977. return -TARGET_EFAULT;
  8978. ret = get_errno(chroot(p));
  8979. unlock_user(p, arg1, 0);
  8980. return ret;
  8981. #ifdef TARGET_NR_dup2
  8982. case TARGET_NR_dup2:
  8983. ret = get_errno(dup2(arg1, arg2));
  8984. if (ret >= 0) {
  8985. fd_trans_dup(arg1, arg2);
  8986. }
  8987. return ret;
  8988. #endif
  8989. #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
  8990. case TARGET_NR_dup3:
  8991. {
  8992. int host_flags;
  8993. if ((arg3 & ~TARGET_O_CLOEXEC) != 0) {
  8994. return -EINVAL;
  8995. }
  8996. host_flags = target_to_host_bitmask(arg3, fcntl_flags_tbl);
  8997. ret = get_errno(dup3(arg1, arg2, host_flags));
  8998. if (ret >= 0) {
  8999. fd_trans_dup(arg1, arg2);
  9000. }
  9001. return ret;
  9002. }
  9003. #endif
  9004. #ifdef TARGET_NR_getppid /* not on alpha */
  9005. case TARGET_NR_getppid:
  9006. return get_errno(getppid());
  9007. #endif
  9008. #ifdef TARGET_NR_getpgrp
  9009. case TARGET_NR_getpgrp:
  9010. return get_errno(getpgrp());
  9011. #endif
  9012. case TARGET_NR_setsid:
  9013. return get_errno(setsid());
  9014. #ifdef TARGET_NR_sigaction
  9015. case TARGET_NR_sigaction:
  9016. {
  9017. #if defined(TARGET_MIPS)
  9018. struct target_sigaction act, oact, *pact, *old_act;
  9019. if (arg2) {
  9020. if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
  9021. return -TARGET_EFAULT;
  9022. act._sa_handler = old_act->_sa_handler;
  9023. target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
  9024. act.sa_flags = old_act->sa_flags;
  9025. unlock_user_struct(old_act, arg2, 0);
  9026. pact = &act;
  9027. } else {
  9028. pact = NULL;
  9029. }
  9030. ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
  9031. if (!is_error(ret) && arg3) {
  9032. if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
  9033. return -TARGET_EFAULT;
  9034. old_act->_sa_handler = oact._sa_handler;
  9035. old_act->sa_flags = oact.sa_flags;
  9036. old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
  9037. old_act->sa_mask.sig[1] = 0;
  9038. old_act->sa_mask.sig[2] = 0;
  9039. old_act->sa_mask.sig[3] = 0;
  9040. unlock_user_struct(old_act, arg3, 1);
  9041. }
  9042. #else
  9043. struct target_old_sigaction *old_act;
  9044. struct target_sigaction act, oact, *pact;
  9045. if (arg2) {
  9046. if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
  9047. return -TARGET_EFAULT;
  9048. act._sa_handler = old_act->_sa_handler;
  9049. target_siginitset(&act.sa_mask, old_act->sa_mask);
  9050. act.sa_flags = old_act->sa_flags;
  9051. #ifdef TARGET_ARCH_HAS_SA_RESTORER
  9052. act.sa_restorer = old_act->sa_restorer;
  9053. #endif
  9054. unlock_user_struct(old_act, arg2, 0);
  9055. pact = &act;
  9056. } else {
  9057. pact = NULL;
  9058. }
  9059. ret = get_errno(do_sigaction(arg1, pact, &oact, 0));
  9060. if (!is_error(ret) && arg3) {
  9061. if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
  9062. return -TARGET_EFAULT;
  9063. old_act->_sa_handler = oact._sa_handler;
  9064. old_act->sa_mask = oact.sa_mask.sig[0];
  9065. old_act->sa_flags = oact.sa_flags;
  9066. #ifdef TARGET_ARCH_HAS_SA_RESTORER
  9067. old_act->sa_restorer = oact.sa_restorer;
  9068. #endif
  9069. unlock_user_struct(old_act, arg3, 1);
  9070. }
  9071. #endif
  9072. }
  9073. return ret;
  9074. #endif
  9075. case TARGET_NR_rt_sigaction:
  9076. {
  9077. /*
  9078. * For Alpha and SPARC this is a 5 argument syscall, with
  9079. * a 'restorer' parameter which must be copied into the
  9080. * sa_restorer field of the sigaction struct.
  9081. * For Alpha that 'restorer' is arg5; for SPARC it is arg4,
  9082. * and arg5 is the sigsetsize.
  9083. */
  9084. #if defined(TARGET_ALPHA)
  9085. target_ulong sigsetsize = arg4;
  9086. target_ulong restorer = arg5;
  9087. #elif defined(TARGET_SPARC)
  9088. target_ulong restorer = arg4;
  9089. target_ulong sigsetsize = arg5;
  9090. #else
  9091. target_ulong sigsetsize = arg4;
  9092. target_ulong restorer = 0;
  9093. #endif
  9094. struct target_sigaction *act = NULL;
  9095. struct target_sigaction *oact = NULL;
  9096. if (sigsetsize != sizeof(target_sigset_t)) {
  9097. return -TARGET_EINVAL;
  9098. }
  9099. if (arg2 && !lock_user_struct(VERIFY_READ, act, arg2, 1)) {
  9100. return -TARGET_EFAULT;
  9101. }
  9102. if (arg3 && !lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
  9103. ret = -TARGET_EFAULT;
  9104. } else {
  9105. ret = get_errno(do_sigaction(arg1, act, oact, restorer));
  9106. if (oact) {
  9107. unlock_user_struct(oact, arg3, 1);
  9108. }
  9109. }
  9110. if (act) {
  9111. unlock_user_struct(act, arg2, 0);
  9112. }
  9113. }
  9114. return ret;
  9115. #ifdef TARGET_NR_sgetmask /* not on alpha */
  9116. case TARGET_NR_sgetmask:
  9117. {
  9118. sigset_t cur_set;
  9119. abi_ulong target_set;
  9120. ret = do_sigprocmask(0, NULL, &cur_set);
  9121. if (!ret) {
  9122. host_to_target_old_sigset(&target_set, &cur_set);
  9123. ret = target_set;
  9124. }
  9125. }
  9126. return ret;
  9127. #endif
  9128. #ifdef TARGET_NR_ssetmask /* not on alpha */
  9129. case TARGET_NR_ssetmask:
  9130. {
  9131. sigset_t set, oset;
  9132. abi_ulong target_set = arg1;
  9133. target_to_host_old_sigset(&set, &target_set);
  9134. ret = do_sigprocmask(SIG_SETMASK, &set, &oset);
  9135. if (!ret) {
  9136. host_to_target_old_sigset(&target_set, &oset);
  9137. ret = target_set;
  9138. }
  9139. }
  9140. return ret;
  9141. #endif
  9142. #ifdef TARGET_NR_sigprocmask
  9143. case TARGET_NR_sigprocmask:
  9144. {
  9145. #if defined(TARGET_ALPHA)
  9146. sigset_t set, oldset;
  9147. abi_ulong mask;
  9148. int how;
  9149. switch (arg1) {
  9150. case TARGET_SIG_BLOCK:
  9151. how = SIG_BLOCK;
  9152. break;
  9153. case TARGET_SIG_UNBLOCK:
  9154. how = SIG_UNBLOCK;
  9155. break;
  9156. case TARGET_SIG_SETMASK:
  9157. how = SIG_SETMASK;
  9158. break;
  9159. default:
  9160. return -TARGET_EINVAL;
  9161. }
  9162. mask = arg2;
  9163. target_to_host_old_sigset(&set, &mask);
  9164. ret = do_sigprocmask(how, &set, &oldset);
  9165. if (!is_error(ret)) {
  9166. host_to_target_old_sigset(&mask, &oldset);
  9167. ret = mask;
  9168. cpu_env->ir[IR_V0] = 0; /* force no error */
  9169. }
  9170. #else
  9171. sigset_t set, oldset, *set_ptr;
  9172. int how;
  9173. if (arg2) {
  9174. p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
  9175. if (!p) {
  9176. return -TARGET_EFAULT;
  9177. }
  9178. target_to_host_old_sigset(&set, p);
  9179. unlock_user(p, arg2, 0);
  9180. set_ptr = &set;
  9181. switch (arg1) {
  9182. case TARGET_SIG_BLOCK:
  9183. how = SIG_BLOCK;
  9184. break;
  9185. case TARGET_SIG_UNBLOCK:
  9186. how = SIG_UNBLOCK;
  9187. break;
  9188. case TARGET_SIG_SETMASK:
  9189. how = SIG_SETMASK;
  9190. break;
  9191. default:
  9192. return -TARGET_EINVAL;
  9193. }
  9194. } else {
  9195. how = 0;
  9196. set_ptr = NULL;
  9197. }
  9198. ret = do_sigprocmask(how, set_ptr, &oldset);
  9199. if (!is_error(ret) && arg3) {
  9200. if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
  9201. return -TARGET_EFAULT;
  9202. host_to_target_old_sigset(p, &oldset);
  9203. unlock_user(p, arg3, sizeof(target_sigset_t));
  9204. }
  9205. #endif
  9206. }
  9207. return ret;
  9208. #endif
  9209. case TARGET_NR_rt_sigprocmask:
  9210. {
  9211. int how = arg1;
  9212. sigset_t set, oldset, *set_ptr;
  9213. if (arg4 != sizeof(target_sigset_t)) {
  9214. return -TARGET_EINVAL;
  9215. }
  9216. if (arg2) {
  9217. p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1);
  9218. if (!p) {
  9219. return -TARGET_EFAULT;
  9220. }
  9221. target_to_host_sigset(&set, p);
  9222. unlock_user(p, arg2, 0);
  9223. set_ptr = &set;
  9224. switch(how) {
  9225. case TARGET_SIG_BLOCK:
  9226. how = SIG_BLOCK;
  9227. break;
  9228. case TARGET_SIG_UNBLOCK:
  9229. how = SIG_UNBLOCK;
  9230. break;
  9231. case TARGET_SIG_SETMASK:
  9232. how = SIG_SETMASK;
  9233. break;
  9234. default:
  9235. return -TARGET_EINVAL;
  9236. }
  9237. } else {
  9238. how = 0;
  9239. set_ptr = NULL;
  9240. }
  9241. ret = do_sigprocmask(how, set_ptr, &oldset);
  9242. if (!is_error(ret) && arg3) {
  9243. if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
  9244. return -TARGET_EFAULT;
  9245. host_to_target_sigset(p, &oldset);
  9246. unlock_user(p, arg3, sizeof(target_sigset_t));
  9247. }
  9248. }
  9249. return ret;
  9250. #ifdef TARGET_NR_sigpending
  9251. case TARGET_NR_sigpending:
  9252. {
  9253. sigset_t set;
  9254. ret = get_errno(sigpending(&set));
  9255. if (!is_error(ret)) {
  9256. if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
  9257. return -TARGET_EFAULT;
  9258. host_to_target_old_sigset(p, &set);
  9259. unlock_user(p, arg1, sizeof(target_sigset_t));
  9260. }
  9261. }
  9262. return ret;
  9263. #endif
  9264. case TARGET_NR_rt_sigpending:
  9265. {
  9266. sigset_t set;
  9267. /* Yes, this check is >, not != like most. We follow the kernel's
  9268. * logic and it does it like this because it implements
  9269. * NR_sigpending through the same code path, and in that case
  9270. * the old_sigset_t is smaller in size.
  9271. */
  9272. if (arg2 > sizeof(target_sigset_t)) {
  9273. return -TARGET_EINVAL;
  9274. }
  9275. ret = get_errno(sigpending(&set));
  9276. if (!is_error(ret)) {
  9277. if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
  9278. return -TARGET_EFAULT;
  9279. host_to_target_sigset(p, &set);
  9280. unlock_user(p, arg1, sizeof(target_sigset_t));
  9281. }
  9282. }
  9283. return ret;
  9284. #ifdef TARGET_NR_sigsuspend
  9285. case TARGET_NR_sigsuspend:
  9286. {
  9287. sigset_t *set;
  9288. #if defined(TARGET_ALPHA)
  9289. TaskState *ts = get_task_state(cpu);
  9290. /* target_to_host_old_sigset will bswap back */
  9291. abi_ulong mask = tswapal(arg1);
  9292. set = &ts->sigsuspend_mask;
  9293. target_to_host_old_sigset(set, &mask);
  9294. #else
  9295. ret = process_sigsuspend_mask(&set, arg1, sizeof(target_sigset_t));
  9296. if (ret != 0) {
  9297. return ret;
  9298. }
  9299. #endif
  9300. ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
  9301. finish_sigsuspend_mask(ret);
  9302. }
  9303. return ret;
  9304. #endif
  9305. case TARGET_NR_rt_sigsuspend:
  9306. {
  9307. sigset_t *set;
  9308. ret = process_sigsuspend_mask(&set, arg1, arg2);
  9309. if (ret != 0) {
  9310. return ret;
  9311. }
  9312. ret = get_errno(safe_rt_sigsuspend(set, SIGSET_T_SIZE));
  9313. finish_sigsuspend_mask(ret);
  9314. }
  9315. return ret;
  9316. #ifdef TARGET_NR_rt_sigtimedwait
  9317. case TARGET_NR_rt_sigtimedwait:
  9318. {
  9319. sigset_t set;
  9320. struct timespec uts, *puts;
  9321. siginfo_t uinfo;
  9322. if (arg4 != sizeof(target_sigset_t)) {
  9323. return -TARGET_EINVAL;
  9324. }
  9325. if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
  9326. return -TARGET_EFAULT;
  9327. target_to_host_sigset(&set, p);
  9328. unlock_user(p, arg1, 0);
  9329. if (arg3) {
  9330. puts = &uts;
  9331. if (target_to_host_timespec(puts, arg3)) {
  9332. return -TARGET_EFAULT;
  9333. }
  9334. } else {
  9335. puts = NULL;
  9336. }
  9337. ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
  9338. SIGSET_T_SIZE));
  9339. if (!is_error(ret)) {
  9340. if (arg2) {
  9341. p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
  9342. 0);
  9343. if (!p) {
  9344. return -TARGET_EFAULT;
  9345. }
  9346. host_to_target_siginfo(p, &uinfo);
  9347. unlock_user(p, arg2, sizeof(target_siginfo_t));
  9348. }
  9349. ret = host_to_target_signal(ret);
  9350. }
  9351. }
  9352. return ret;
  9353. #endif
  9354. #ifdef TARGET_NR_rt_sigtimedwait_time64
  9355. case TARGET_NR_rt_sigtimedwait_time64:
  9356. {
  9357. sigset_t set;
  9358. struct timespec uts, *puts;
  9359. siginfo_t uinfo;
  9360. if (arg4 != sizeof(target_sigset_t)) {
  9361. return -TARGET_EINVAL;
  9362. }
  9363. p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1);
  9364. if (!p) {
  9365. return -TARGET_EFAULT;
  9366. }
  9367. target_to_host_sigset(&set, p);
  9368. unlock_user(p, arg1, 0);
  9369. if (arg3) {
  9370. puts = &uts;
  9371. if (target_to_host_timespec64(puts, arg3)) {
  9372. return -TARGET_EFAULT;
  9373. }
  9374. } else {
  9375. puts = NULL;
  9376. }
  9377. ret = get_errno(safe_rt_sigtimedwait(&set, &uinfo, puts,
  9378. SIGSET_T_SIZE));
  9379. if (!is_error(ret)) {
  9380. if (arg2) {
  9381. p = lock_user(VERIFY_WRITE, arg2,
  9382. sizeof(target_siginfo_t), 0);
  9383. if (!p) {
  9384. return -TARGET_EFAULT;
  9385. }
  9386. host_to_target_siginfo(p, &uinfo);
  9387. unlock_user(p, arg2, sizeof(target_siginfo_t));
  9388. }
  9389. ret = host_to_target_signal(ret);
  9390. }
  9391. }
  9392. return ret;
  9393. #endif
  9394. case TARGET_NR_rt_sigqueueinfo:
  9395. {
  9396. siginfo_t uinfo;
  9397. p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1);
  9398. if (!p) {
  9399. return -TARGET_EFAULT;
  9400. }
  9401. target_to_host_siginfo(&uinfo, p);
  9402. unlock_user(p, arg3, 0);
  9403. ret = get_errno(sys_rt_sigqueueinfo(arg1, target_to_host_signal(arg2), &uinfo));
  9404. }
  9405. return ret;
  9406. case TARGET_NR_rt_tgsigqueueinfo:
  9407. {
  9408. siginfo_t uinfo;
  9409. p = lock_user(VERIFY_READ, arg4, sizeof(target_siginfo_t), 1);
  9410. if (!p) {
  9411. return -TARGET_EFAULT;
  9412. }
  9413. target_to_host_siginfo(&uinfo, p);
  9414. unlock_user(p, arg4, 0);
  9415. ret = get_errno(sys_rt_tgsigqueueinfo(arg1, arg2, target_to_host_signal(arg3), &uinfo));
  9416. }
  9417. return ret;
  9418. #ifdef TARGET_NR_sigreturn
  9419. case TARGET_NR_sigreturn:
  9420. if (block_signals()) {
  9421. return -QEMU_ERESTARTSYS;
  9422. }
  9423. return do_sigreturn(cpu_env);
  9424. #endif
  9425. case TARGET_NR_rt_sigreturn:
  9426. if (block_signals()) {
  9427. return -QEMU_ERESTARTSYS;
  9428. }
  9429. return do_rt_sigreturn(cpu_env);
  9430. case TARGET_NR_sethostname:
  9431. if (!(p = lock_user_string(arg1)))
  9432. return -TARGET_EFAULT;
  9433. ret = get_errno(sethostname(p, arg2));
  9434. unlock_user(p, arg1, 0);
  9435. return ret;
  9436. #ifdef TARGET_NR_setrlimit
  9437. case TARGET_NR_setrlimit:
  9438. {
  9439. int resource = target_to_host_resource(arg1);
  9440. struct target_rlimit *target_rlim;
  9441. struct rlimit rlim;
  9442. if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
  9443. return -TARGET_EFAULT;
  9444. rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
  9445. rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
  9446. unlock_user_struct(target_rlim, arg2, 0);
  9447. /*
  9448. * If we just passed through resource limit settings for memory then
  9449. * they would also apply to QEMU's own allocations, and QEMU will
  9450. * crash or hang or die if its allocations fail. Ideally we would
  9451. * track the guest allocations in QEMU and apply the limits ourselves.
  9452. * For now, just tell the guest the call succeeded but don't actually
  9453. * limit anything.
  9454. */
  9455. if (resource != RLIMIT_AS &&
  9456. resource != RLIMIT_DATA &&
  9457. resource != RLIMIT_STACK) {
  9458. return get_errno(setrlimit(resource, &rlim));
  9459. } else {
  9460. return 0;
  9461. }
  9462. }
  9463. #endif
  9464. #ifdef TARGET_NR_getrlimit
  9465. case TARGET_NR_getrlimit:
  9466. {
  9467. int resource = target_to_host_resource(arg1);
  9468. struct target_rlimit *target_rlim;
  9469. struct rlimit rlim;
  9470. ret = get_errno(getrlimit(resource, &rlim));
  9471. if (!is_error(ret)) {
  9472. if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
  9473. return -TARGET_EFAULT;
  9474. target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
  9475. target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
  9476. unlock_user_struct(target_rlim, arg2, 1);
  9477. }
  9478. }
  9479. return ret;
  9480. #endif
  9481. case TARGET_NR_getrusage:
  9482. {
  9483. struct rusage rusage;
  9484. ret = get_errno(getrusage(arg1, &rusage));
  9485. if (!is_error(ret)) {
  9486. ret = host_to_target_rusage(arg2, &rusage);
  9487. }
  9488. }
  9489. return ret;
  9490. #if defined(TARGET_NR_gettimeofday)
  9491. case TARGET_NR_gettimeofday:
  9492. {
  9493. struct timeval tv;
  9494. struct timezone tz;
  9495. ret = get_errno(gettimeofday(&tv, &tz));
  9496. if (!is_error(ret)) {
  9497. if (arg1 && copy_to_user_timeval(arg1, &tv)) {
  9498. return -TARGET_EFAULT;
  9499. }
  9500. if (arg2 && copy_to_user_timezone(arg2, &tz)) {
  9501. return -TARGET_EFAULT;
  9502. }
  9503. }
  9504. }
  9505. return ret;
  9506. #endif
  9507. #if defined(TARGET_NR_settimeofday)
  9508. case TARGET_NR_settimeofday:
  9509. {
  9510. struct timeval tv, *ptv = NULL;
  9511. struct timezone tz, *ptz = NULL;
  9512. if (arg1) {
  9513. if (copy_from_user_timeval(&tv, arg1)) {
  9514. return -TARGET_EFAULT;
  9515. }
  9516. ptv = &tv;
  9517. }
  9518. if (arg2) {
  9519. if (copy_from_user_timezone(&tz, arg2)) {
  9520. return -TARGET_EFAULT;
  9521. }
  9522. ptz = &tz;
  9523. }
  9524. return get_errno(settimeofday(ptv, ptz));
  9525. }
  9526. #endif
  9527. #if defined(TARGET_NR_select)
  9528. case TARGET_NR_select:
  9529. #if defined(TARGET_WANT_NI_OLD_SELECT)
  9530. /* some architectures used to have old_select here
  9531. * but now ENOSYS it.
  9532. */
  9533. ret = -TARGET_ENOSYS;
  9534. #elif defined(TARGET_WANT_OLD_SYS_SELECT)
  9535. ret = do_old_select(arg1);
  9536. #else
  9537. ret = do_select(arg1, arg2, arg3, arg4, arg5);
  9538. #endif
  9539. return ret;
  9540. #endif
  9541. #ifdef TARGET_NR_pselect6
  9542. case TARGET_NR_pselect6:
  9543. return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, false);
  9544. #endif
  9545. #ifdef TARGET_NR_pselect6_time64
  9546. case TARGET_NR_pselect6_time64:
  9547. return do_pselect6(arg1, arg2, arg3, arg4, arg5, arg6, true);
  9548. #endif
  9549. #ifdef TARGET_NR_symlink
  9550. case TARGET_NR_symlink:
  9551. {
  9552. void *p2;
  9553. p = lock_user_string(arg1);
  9554. p2 = lock_user_string(arg2);
  9555. if (!p || !p2)
  9556. ret = -TARGET_EFAULT;
  9557. else
  9558. ret = get_errno(symlink(p, p2));
  9559. unlock_user(p2, arg2, 0);
  9560. unlock_user(p, arg1, 0);
  9561. }
  9562. return ret;
  9563. #endif
  9564. #if defined(TARGET_NR_symlinkat)
  9565. case TARGET_NR_symlinkat:
  9566. {
  9567. void *p2;
  9568. p = lock_user_string(arg1);
  9569. p2 = lock_user_string(arg3);
  9570. if (!p || !p2)
  9571. ret = -TARGET_EFAULT;
  9572. else
  9573. ret = get_errno(symlinkat(p, arg2, p2));
  9574. unlock_user(p2, arg3, 0);
  9575. unlock_user(p, arg1, 0);
  9576. }
  9577. return ret;
  9578. #endif
  9579. #ifdef TARGET_NR_readlink
  9580. case TARGET_NR_readlink:
  9581. {
  9582. void *p2;
  9583. p = lock_user_string(arg1);
  9584. p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
  9585. ret = get_errno(do_guest_readlink(p, p2, arg3));
  9586. unlock_user(p2, arg2, ret);
  9587. unlock_user(p, arg1, 0);
  9588. }
  9589. return ret;
  9590. #endif
  9591. #if defined(TARGET_NR_readlinkat)
  9592. case TARGET_NR_readlinkat:
  9593. {
  9594. void *p2;
  9595. p = lock_user_string(arg2);
  9596. p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
  9597. if (!p || !p2) {
  9598. ret = -TARGET_EFAULT;
  9599. } else if (!arg4) {
  9600. /* Short circuit this for the magic exe check. */
  9601. ret = -TARGET_EINVAL;
  9602. } else if (is_proc_myself((const char *)p, "exe")) {
  9603. /*
  9604. * Don't worry about sign mismatch as earlier mapping
  9605. * logic would have thrown a bad address error.
  9606. */
  9607. ret = MIN(strlen(exec_path), arg4);
  9608. /* We cannot NUL terminate the string. */
  9609. memcpy(p2, exec_path, ret);
  9610. } else {
  9611. ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
  9612. }
  9613. unlock_user(p2, arg3, ret);
  9614. unlock_user(p, arg2, 0);
  9615. }
  9616. return ret;
  9617. #endif
  9618. #ifdef TARGET_NR_swapon
  9619. case TARGET_NR_swapon:
  9620. if (!(p = lock_user_string(arg1)))
  9621. return -TARGET_EFAULT;
  9622. ret = get_errno(swapon(p, arg2));
  9623. unlock_user(p, arg1, 0);
  9624. return ret;
  9625. #endif
  9626. case TARGET_NR_reboot:
  9627. if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
  9628. /* arg4 must be ignored in all other cases */
  9629. p = lock_user_string(arg4);
  9630. if (!p) {
  9631. return -TARGET_EFAULT;
  9632. }
  9633. ret = get_errno(reboot(arg1, arg2, arg3, p));
  9634. unlock_user(p, arg4, 0);
  9635. } else {
  9636. ret = get_errno(reboot(arg1, arg2, arg3, NULL));
  9637. }
  9638. return ret;
  9639. #ifdef TARGET_NR_mmap
  9640. case TARGET_NR_mmap:
  9641. #ifdef TARGET_ARCH_WANT_SYS_OLD_MMAP
  9642. {
  9643. abi_ulong *v;
  9644. abi_ulong v1, v2, v3, v4, v5, v6;
  9645. if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
  9646. return -TARGET_EFAULT;
  9647. v1 = tswapal(v[0]);
  9648. v2 = tswapal(v[1]);
  9649. v3 = tswapal(v[2]);
  9650. v4 = tswapal(v[3]);
  9651. v5 = tswapal(v[4]);
  9652. v6 = tswapal(v[5]);
  9653. unlock_user(v, arg1, 0);
  9654. return do_mmap(v1, v2, v3, v4, v5, v6);
  9655. }
  9656. #else
  9657. /* mmap pointers are always untagged */
  9658. return do_mmap(arg1, arg2, arg3, arg4, arg5, arg6);
  9659. #endif
  9660. #endif
  9661. #ifdef TARGET_NR_mmap2
  9662. case TARGET_NR_mmap2:
  9663. #ifndef MMAP_SHIFT
  9664. #define MMAP_SHIFT 12
  9665. #endif
  9666. return do_mmap(arg1, arg2, arg3, arg4, arg5,
  9667. (off_t)(abi_ulong)arg6 << MMAP_SHIFT);
  9668. #endif
  9669. case TARGET_NR_munmap:
  9670. arg1 = cpu_untagged_addr(cpu, arg1);
  9671. return get_errno(target_munmap(arg1, arg2));
  9672. case TARGET_NR_mprotect:
  9673. arg1 = cpu_untagged_addr(cpu, arg1);
  9674. {
  9675. TaskState *ts = get_task_state(cpu);
  9676. /* Special hack to detect libc making the stack executable. */
  9677. if ((arg3 & PROT_GROWSDOWN)
  9678. && arg1 >= ts->info->stack_limit
  9679. && arg1 <= ts->info->start_stack) {
  9680. arg3 &= ~PROT_GROWSDOWN;
  9681. arg2 = arg2 + arg1 - ts->info->stack_limit;
  9682. arg1 = ts->info->stack_limit;
  9683. }
  9684. }
  9685. return get_errno(target_mprotect(arg1, arg2, arg3));
  9686. #ifdef TARGET_NR_mremap
  9687. case TARGET_NR_mremap:
  9688. arg1 = cpu_untagged_addr(cpu, arg1);
  9689. /* mremap new_addr (arg5) is always untagged */
  9690. return get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
  9691. #endif
  9692. /* ??? msync/mlock/munlock are broken for softmmu. */
  9693. #ifdef TARGET_NR_msync
  9694. case TARGET_NR_msync:
  9695. return get_errno(msync(g2h(cpu, arg1), arg2,
  9696. target_to_host_msync_arg(arg3)));
  9697. #endif
  9698. #ifdef TARGET_NR_mlock
  9699. case TARGET_NR_mlock:
  9700. return get_errno(mlock(g2h(cpu, arg1), arg2));
  9701. #endif
  9702. #ifdef TARGET_NR_munlock
  9703. case TARGET_NR_munlock:
  9704. return get_errno(munlock(g2h(cpu, arg1), arg2));
  9705. #endif
  9706. #ifdef TARGET_NR_mlockall
  9707. case TARGET_NR_mlockall:
  9708. return get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
  9709. #endif
  9710. #ifdef TARGET_NR_munlockall
  9711. case TARGET_NR_munlockall:
  9712. return get_errno(munlockall());
  9713. #endif
  9714. #ifdef TARGET_NR_truncate
  9715. case TARGET_NR_truncate:
  9716. if (!(p = lock_user_string(arg1)))
  9717. return -TARGET_EFAULT;
  9718. ret = get_errno(truncate(p, arg2));
  9719. unlock_user(p, arg1, 0);
  9720. return ret;
  9721. #endif
  9722. #ifdef TARGET_NR_ftruncate
  9723. case TARGET_NR_ftruncate:
  9724. return get_errno(ftruncate(arg1, arg2));
  9725. #endif
  9726. case TARGET_NR_fchmod:
  9727. return get_errno(fchmod(arg1, arg2));
  9728. #if defined(TARGET_NR_fchmodat)
  9729. case TARGET_NR_fchmodat:
  9730. if (!(p = lock_user_string(arg2)))
  9731. return -TARGET_EFAULT;
  9732. ret = get_errno(fchmodat(arg1, p, arg3, 0));
  9733. unlock_user(p, arg2, 0);
  9734. return ret;
  9735. #endif
  9736. case TARGET_NR_getpriority:
  9737. /* Note that negative values are valid for getpriority, so we must
  9738. differentiate based on errno settings. */
  9739. errno = 0;
  9740. ret = getpriority(arg1, arg2);
  9741. if (ret == -1 && errno != 0) {
  9742. return -host_to_target_errno(errno);
  9743. }
  9744. #ifdef TARGET_ALPHA
  9745. /* Return value is the unbiased priority. Signal no error. */
  9746. cpu_env->ir[IR_V0] = 0;
  9747. #else
  9748. /* Return value is a biased priority to avoid negative numbers. */
  9749. ret = 20 - ret;
  9750. #endif
  9751. return ret;
  9752. case TARGET_NR_setpriority:
  9753. return get_errno(setpriority(arg1, arg2, arg3));
  9754. #ifdef TARGET_NR_statfs
  9755. case TARGET_NR_statfs:
  9756. if (!(p = lock_user_string(arg1))) {
  9757. return -TARGET_EFAULT;
  9758. }
  9759. ret = get_errno(statfs(path(p), &stfs));
  9760. unlock_user(p, arg1, 0);
  9761. convert_statfs:
  9762. if (!is_error(ret)) {
  9763. struct target_statfs *target_stfs;
  9764. if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
  9765. return -TARGET_EFAULT;
  9766. __put_user(stfs.f_type, &target_stfs->f_type);
  9767. __put_user(stfs.f_bsize, &target_stfs->f_bsize);
  9768. __put_user(stfs.f_blocks, &target_stfs->f_blocks);
  9769. __put_user(stfs.f_bfree, &target_stfs->f_bfree);
  9770. __put_user(stfs.f_bavail, &target_stfs->f_bavail);
  9771. __put_user(stfs.f_files, &target_stfs->f_files);
  9772. __put_user(stfs.f_ffree, &target_stfs->f_ffree);
  9773. __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
  9774. __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
  9775. __put_user(stfs.f_namelen, &target_stfs->f_namelen);
  9776. __put_user(stfs.f_frsize, &target_stfs->f_frsize);
  9777. #ifdef _STATFS_F_FLAGS
  9778. __put_user(stfs.f_flags, &target_stfs->f_flags);
  9779. #else
  9780. __put_user(0, &target_stfs->f_flags);
  9781. #endif
  9782. memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
  9783. unlock_user_struct(target_stfs, arg2, 1);
  9784. }
  9785. return ret;
  9786. #endif
  9787. #ifdef TARGET_NR_fstatfs
  9788. case TARGET_NR_fstatfs:
  9789. ret = get_errno(fstatfs(arg1, &stfs));
  9790. goto convert_statfs;
  9791. #endif
  9792. #ifdef TARGET_NR_statfs64
  9793. case TARGET_NR_statfs64:
  9794. if (!(p = lock_user_string(arg1))) {
  9795. return -TARGET_EFAULT;
  9796. }
  9797. ret = get_errno(statfs(path(p), &stfs));
  9798. unlock_user(p, arg1, 0);
  9799. convert_statfs64:
  9800. if (!is_error(ret)) {
  9801. struct target_statfs64 *target_stfs;
  9802. if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
  9803. return -TARGET_EFAULT;
  9804. __put_user(stfs.f_type, &target_stfs->f_type);
  9805. __put_user(stfs.f_bsize, &target_stfs->f_bsize);
  9806. __put_user(stfs.f_blocks, &target_stfs->f_blocks);
  9807. __put_user(stfs.f_bfree, &target_stfs->f_bfree);
  9808. __put_user(stfs.f_bavail, &target_stfs->f_bavail);
  9809. __put_user(stfs.f_files, &target_stfs->f_files);
  9810. __put_user(stfs.f_ffree, &target_stfs->f_ffree);
  9811. __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
  9812. __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
  9813. __put_user(stfs.f_namelen, &target_stfs->f_namelen);
  9814. __put_user(stfs.f_frsize, &target_stfs->f_frsize);
  9815. #ifdef _STATFS_F_FLAGS
  9816. __put_user(stfs.f_flags, &target_stfs->f_flags);
  9817. #else
  9818. __put_user(0, &target_stfs->f_flags);
  9819. #endif
  9820. memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
  9821. unlock_user_struct(target_stfs, arg3, 1);
  9822. }
  9823. return ret;
  9824. case TARGET_NR_fstatfs64:
  9825. ret = get_errno(fstatfs(arg1, &stfs));
  9826. goto convert_statfs64;
  9827. #endif
  9828. #ifdef TARGET_NR_socketcall
  9829. case TARGET_NR_socketcall:
  9830. return do_socketcall(arg1, arg2);
  9831. #endif
  9832. #ifdef TARGET_NR_accept
  9833. case TARGET_NR_accept:
  9834. return do_accept4(arg1, arg2, arg3, 0);
  9835. #endif
  9836. #ifdef TARGET_NR_accept4
  9837. case TARGET_NR_accept4:
  9838. return do_accept4(arg1, arg2, arg3, arg4);
  9839. #endif
  9840. #ifdef TARGET_NR_bind
  9841. case TARGET_NR_bind:
  9842. return do_bind(arg1, arg2, arg3);
  9843. #endif
  9844. #ifdef TARGET_NR_connect
  9845. case TARGET_NR_connect:
  9846. return do_connect(arg1, arg2, arg3);
  9847. #endif
  9848. #ifdef TARGET_NR_getpeername
  9849. case TARGET_NR_getpeername:
  9850. return do_getpeername(arg1, arg2, arg3);
  9851. #endif
  9852. #ifdef TARGET_NR_getsockname
  9853. case TARGET_NR_getsockname:
  9854. return do_getsockname(arg1, arg2, arg3);
  9855. #endif
  9856. #ifdef TARGET_NR_getsockopt
  9857. case TARGET_NR_getsockopt:
  9858. return do_getsockopt(arg1, arg2, arg3, arg4, arg5);
  9859. #endif
  9860. #ifdef TARGET_NR_listen
  9861. case TARGET_NR_listen:
  9862. return get_errno(listen(arg1, arg2));
  9863. #endif
  9864. #ifdef TARGET_NR_recv
  9865. case TARGET_NR_recv:
  9866. return do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
  9867. #endif
  9868. #ifdef TARGET_NR_recvfrom
  9869. case TARGET_NR_recvfrom:
  9870. return do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
  9871. #endif
  9872. #ifdef TARGET_NR_recvmsg
  9873. case TARGET_NR_recvmsg:
  9874. return do_sendrecvmsg(arg1, arg2, arg3, 0);
  9875. #endif
  9876. #ifdef TARGET_NR_send
  9877. case TARGET_NR_send:
  9878. return do_sendto(arg1, arg2, arg3, arg4, 0, 0);
  9879. #endif
  9880. #ifdef TARGET_NR_sendmsg
  9881. case TARGET_NR_sendmsg:
  9882. return do_sendrecvmsg(arg1, arg2, arg3, 1);
  9883. #endif
  9884. #ifdef TARGET_NR_sendmmsg
  9885. case TARGET_NR_sendmmsg:
  9886. return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
  9887. #endif
  9888. #ifdef TARGET_NR_recvmmsg
  9889. case TARGET_NR_recvmmsg:
  9890. return do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
  9891. #endif
  9892. #ifdef TARGET_NR_sendto
  9893. case TARGET_NR_sendto:
  9894. return do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
  9895. #endif
  9896. #ifdef TARGET_NR_shutdown
  9897. case TARGET_NR_shutdown:
  9898. return get_errno(shutdown(arg1, arg2));
  9899. #endif
  9900. #if defined(TARGET_NR_getrandom) && defined(__NR_getrandom)
  9901. case TARGET_NR_getrandom:
  9902. p = lock_user(VERIFY_WRITE, arg1, arg2, 0);
  9903. if (!p) {
  9904. return -TARGET_EFAULT;
  9905. }
  9906. ret = get_errno(getrandom(p, arg2, arg3));
  9907. unlock_user(p, arg1, ret);
  9908. return ret;
  9909. #endif
  9910. #ifdef TARGET_NR_socket
  9911. case TARGET_NR_socket:
  9912. return do_socket(arg1, arg2, arg3);
  9913. #endif
  9914. #ifdef TARGET_NR_socketpair
  9915. case TARGET_NR_socketpair:
  9916. return do_socketpair(arg1, arg2, arg3, arg4);
  9917. #endif
  9918. #ifdef TARGET_NR_setsockopt
  9919. case TARGET_NR_setsockopt:
  9920. return do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
  9921. #endif
  9922. #if defined(TARGET_NR_syslog)
  9923. case TARGET_NR_syslog:
  9924. {
  9925. int len = arg2;
  9926. switch (arg1) {
  9927. case TARGET_SYSLOG_ACTION_CLOSE: /* Close log */
  9928. case TARGET_SYSLOG_ACTION_OPEN: /* Open log */
  9929. case TARGET_SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
  9930. case TARGET_SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging */
  9931. case TARGET_SYSLOG_ACTION_CONSOLE_ON: /* Enable logging */
  9932. case TARGET_SYSLOG_ACTION_CONSOLE_LEVEL: /* Set messages level */
  9933. case TARGET_SYSLOG_ACTION_SIZE_UNREAD: /* Number of chars */
  9934. case TARGET_SYSLOG_ACTION_SIZE_BUFFER: /* Size of the buffer */
  9935. return get_errno(sys_syslog((int)arg1, NULL, (int)arg3));
  9936. case TARGET_SYSLOG_ACTION_READ: /* Read from log */
  9937. case TARGET_SYSLOG_ACTION_READ_CLEAR: /* Read/clear msgs */
  9938. case TARGET_SYSLOG_ACTION_READ_ALL: /* Read last messages */
  9939. {
  9940. if (len < 0) {
  9941. return -TARGET_EINVAL;
  9942. }
  9943. if (len == 0) {
  9944. return 0;
  9945. }
  9946. p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
  9947. if (!p) {
  9948. return -TARGET_EFAULT;
  9949. }
  9950. ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
  9951. unlock_user(p, arg2, arg3);
  9952. }
  9953. return ret;
  9954. default:
  9955. return -TARGET_EINVAL;
  9956. }
  9957. }
  9958. break;
  9959. #endif
  9960. case TARGET_NR_setitimer:
  9961. {
  9962. struct itimerval value, ovalue, *pvalue;
  9963. if (arg2) {
  9964. pvalue = &value;
  9965. if (copy_from_user_timeval(&pvalue->it_interval, arg2)
  9966. || copy_from_user_timeval(&pvalue->it_value,
  9967. arg2 + sizeof(struct target_timeval)))
  9968. return -TARGET_EFAULT;
  9969. } else {
  9970. pvalue = NULL;
  9971. }
  9972. ret = get_errno(setitimer(arg1, pvalue, &ovalue));
  9973. if (!is_error(ret) && arg3) {
  9974. if (copy_to_user_timeval(arg3,
  9975. &ovalue.it_interval)
  9976. || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
  9977. &ovalue.it_value))
  9978. return -TARGET_EFAULT;
  9979. }
  9980. }
  9981. return ret;
  9982. case TARGET_NR_getitimer:
  9983. {
  9984. struct itimerval value;
  9985. ret = get_errno(getitimer(arg1, &value));
  9986. if (!is_error(ret) && arg2) {
  9987. if (copy_to_user_timeval(arg2,
  9988. &value.it_interval)
  9989. || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
  9990. &value.it_value))
  9991. return -TARGET_EFAULT;
  9992. }
  9993. }
  9994. return ret;
  9995. #ifdef TARGET_NR_stat
  9996. case TARGET_NR_stat:
  9997. if (!(p = lock_user_string(arg1))) {
  9998. return -TARGET_EFAULT;
  9999. }
  10000. ret = get_errno(stat(path(p), &st));
  10001. unlock_user(p, arg1, 0);
  10002. goto do_stat;
  10003. #endif
  10004. #ifdef TARGET_NR_lstat
  10005. case TARGET_NR_lstat:
  10006. if (!(p = lock_user_string(arg1))) {
  10007. return -TARGET_EFAULT;
  10008. }
  10009. ret = get_errno(lstat(path(p), &st));
  10010. unlock_user(p, arg1, 0);
  10011. goto do_stat;
  10012. #endif
  10013. #ifdef TARGET_NR_fstat
  10014. case TARGET_NR_fstat:
  10015. {
  10016. ret = get_errno(fstat(arg1, &st));
  10017. #if defined(TARGET_NR_stat) || defined(TARGET_NR_lstat)
  10018. do_stat:
  10019. #endif
  10020. if (!is_error(ret)) {
  10021. struct target_stat *target_st;
  10022. if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
  10023. return -TARGET_EFAULT;
  10024. memset(target_st, 0, sizeof(*target_st));
  10025. __put_user(st.st_dev, &target_st->st_dev);
  10026. __put_user(st.st_ino, &target_st->st_ino);
  10027. __put_user(st.st_mode, &target_st->st_mode);
  10028. __put_user(st.st_uid, &target_st->st_uid);
  10029. __put_user(st.st_gid, &target_st->st_gid);
  10030. __put_user(st.st_nlink, &target_st->st_nlink);
  10031. __put_user(st.st_rdev, &target_st->st_rdev);
  10032. __put_user(st.st_size, &target_st->st_size);
  10033. __put_user(st.st_blksize, &target_st->st_blksize);
  10034. __put_user(st.st_blocks, &target_st->st_blocks);
  10035. __put_user(st.st_atime, &target_st->target_st_atime);
  10036. __put_user(st.st_mtime, &target_st->target_st_mtime);
  10037. __put_user(st.st_ctime, &target_st->target_st_ctime);
  10038. #if defined(HAVE_STRUCT_STAT_ST_ATIM) && defined(TARGET_STAT_HAVE_NSEC)
  10039. __put_user(st.st_atim.tv_nsec,
  10040. &target_st->target_st_atime_nsec);
  10041. __put_user(st.st_mtim.tv_nsec,
  10042. &target_st->target_st_mtime_nsec);
  10043. __put_user(st.st_ctim.tv_nsec,
  10044. &target_st->target_st_ctime_nsec);
  10045. #endif
  10046. unlock_user_struct(target_st, arg2, 1);
  10047. }
  10048. }
  10049. return ret;
  10050. #endif
  10051. case TARGET_NR_vhangup:
  10052. return get_errno(vhangup());
  10053. #ifdef TARGET_NR_syscall
  10054. case TARGET_NR_syscall:
  10055. return do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
  10056. arg6, arg7, arg8, 0);
  10057. #endif
  10058. #if defined(TARGET_NR_wait4)
  10059. case TARGET_NR_wait4:
  10060. {
  10061. int status;
  10062. abi_long status_ptr = arg2;
  10063. struct rusage rusage, *rusage_ptr;
  10064. abi_ulong target_rusage = arg4;
  10065. abi_long rusage_err;
  10066. if (target_rusage)
  10067. rusage_ptr = &rusage;
  10068. else
  10069. rusage_ptr = NULL;
  10070. ret = get_errno(safe_wait4(arg1, &status, arg3, rusage_ptr));
  10071. if (!is_error(ret)) {
  10072. if (status_ptr && ret) {
  10073. status = host_to_target_waitstatus(status);
  10074. if (put_user_s32(status, status_ptr))
  10075. return -TARGET_EFAULT;
  10076. }
  10077. if (target_rusage) {
  10078. rusage_err = host_to_target_rusage(target_rusage, &rusage);
  10079. if (rusage_err) {
  10080. ret = rusage_err;
  10081. }
  10082. }
  10083. }
  10084. }
  10085. return ret;
  10086. #endif
  10087. #ifdef TARGET_NR_swapoff
  10088. case TARGET_NR_swapoff:
  10089. if (!(p = lock_user_string(arg1)))
  10090. return -TARGET_EFAULT;
  10091. ret = get_errno(swapoff(p));
  10092. unlock_user(p, arg1, 0);
  10093. return ret;
  10094. #endif
  10095. case TARGET_NR_sysinfo:
  10096. {
  10097. struct target_sysinfo *target_value;
  10098. struct sysinfo value;
  10099. ret = get_errno(sysinfo(&value));
  10100. if (!is_error(ret) && arg1)
  10101. {
  10102. if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
  10103. return -TARGET_EFAULT;
  10104. __put_user(value.uptime, &target_value->uptime);
  10105. __put_user(value.loads[0], &target_value->loads[0]);
  10106. __put_user(value.loads[1], &target_value->loads[1]);
  10107. __put_user(value.loads[2], &target_value->loads[2]);
  10108. __put_user(value.totalram, &target_value->totalram);
  10109. __put_user(value.freeram, &target_value->freeram);
  10110. __put_user(value.sharedram, &target_value->sharedram);
  10111. __put_user(value.bufferram, &target_value->bufferram);
  10112. __put_user(value.totalswap, &target_value->totalswap);
  10113. __put_user(value.freeswap, &target_value->freeswap);
  10114. __put_user(value.procs, &target_value->procs);
  10115. __put_user(value.totalhigh, &target_value->totalhigh);
  10116. __put_user(value.freehigh, &target_value->freehigh);
  10117. __put_user(value.mem_unit, &target_value->mem_unit);
  10118. unlock_user_struct(target_value, arg1, 1);
  10119. }
  10120. }
  10121. return ret;
  10122. #ifdef TARGET_NR_ipc
  10123. case TARGET_NR_ipc:
  10124. return do_ipc(cpu_env, arg1, arg2, arg3, arg4, arg5, arg6);
  10125. #endif
  10126. #ifdef TARGET_NR_semget
  10127. case TARGET_NR_semget:
  10128. return get_errno(semget(arg1, arg2, arg3));
  10129. #endif
  10130. #ifdef TARGET_NR_semop
  10131. case TARGET_NR_semop:
  10132. return do_semtimedop(arg1, arg2, arg3, 0, false);
  10133. #endif
  10134. #ifdef TARGET_NR_semtimedop
  10135. case TARGET_NR_semtimedop:
  10136. return do_semtimedop(arg1, arg2, arg3, arg4, false);
  10137. #endif
  10138. #ifdef TARGET_NR_semtimedop_time64
  10139. case TARGET_NR_semtimedop_time64:
  10140. return do_semtimedop(arg1, arg2, arg3, arg4, true);
  10141. #endif
  10142. #ifdef TARGET_NR_semctl
  10143. case TARGET_NR_semctl:
  10144. return do_semctl(arg1, arg2, arg3, arg4);
  10145. #endif
  10146. #ifdef TARGET_NR_msgctl
  10147. case TARGET_NR_msgctl:
  10148. return do_msgctl(arg1, arg2, arg3);
  10149. #endif
  10150. #ifdef TARGET_NR_msgget
  10151. case TARGET_NR_msgget:
  10152. return get_errno(msgget(arg1, arg2));
  10153. #endif
  10154. #ifdef TARGET_NR_msgrcv
  10155. case TARGET_NR_msgrcv:
  10156. return do_msgrcv(arg1, arg2, arg3, arg4, arg5);
  10157. #endif
  10158. #ifdef TARGET_NR_msgsnd
  10159. case TARGET_NR_msgsnd:
  10160. return do_msgsnd(arg1, arg2, arg3, arg4);
  10161. #endif
  10162. #ifdef TARGET_NR_shmget
  10163. case TARGET_NR_shmget:
  10164. return get_errno(shmget(arg1, arg2, arg3));
  10165. #endif
  10166. #ifdef TARGET_NR_shmctl
  10167. case TARGET_NR_shmctl:
  10168. return do_shmctl(arg1, arg2, arg3);
  10169. #endif
  10170. #ifdef TARGET_NR_shmat
  10171. case TARGET_NR_shmat:
  10172. return target_shmat(cpu_env, arg1, arg2, arg3);
  10173. #endif
  10174. #ifdef TARGET_NR_shmdt
  10175. case TARGET_NR_shmdt:
  10176. return target_shmdt(arg1);
  10177. #endif
  10178. case TARGET_NR_fsync:
  10179. return get_errno(fsync(arg1));
  10180. case TARGET_NR_clone:
  10181. /* Linux manages to have three different orderings for its
  10182. * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
  10183. * match the kernel's CONFIG_CLONE_* settings.
  10184. * Microblaze is further special in that it uses a sixth
  10185. * implicit argument to clone for the TLS pointer.
  10186. */
  10187. #if defined(TARGET_MICROBLAZE)
  10188. ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
  10189. #elif defined(TARGET_CLONE_BACKWARDS)
  10190. ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
  10191. #elif defined(TARGET_CLONE_BACKWARDS2)
  10192. ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
  10193. #else
  10194. ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
  10195. #endif
  10196. return ret;
  10197. #ifdef __NR_exit_group
  10198. /* new thread calls */
  10199. case TARGET_NR_exit_group:
  10200. preexit_cleanup(cpu_env, arg1);
  10201. return get_errno(exit_group(arg1));
  10202. #endif
  10203. case TARGET_NR_setdomainname:
  10204. if (!(p = lock_user_string(arg1)))
  10205. return -TARGET_EFAULT;
  10206. ret = get_errno(setdomainname(p, arg2));
  10207. unlock_user(p, arg1, 0);
  10208. return ret;
  10209. case TARGET_NR_uname:
  10210. /* no need to transcode because we use the linux syscall */
  10211. {
  10212. struct new_utsname * buf;
  10213. if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
  10214. return -TARGET_EFAULT;
  10215. ret = get_errno(sys_uname(buf));
  10216. if (!is_error(ret)) {
  10217. /* Overwrite the native machine name with whatever is being
  10218. emulated. */
  10219. g_strlcpy(buf->machine, cpu_to_uname_machine(cpu_env),
  10220. sizeof(buf->machine));
  10221. /* Allow the user to override the reported release. */
  10222. if (qemu_uname_release && *qemu_uname_release) {
  10223. g_strlcpy(buf->release, qemu_uname_release,
  10224. sizeof(buf->release));
  10225. }
  10226. }
  10227. unlock_user_struct(buf, arg1, 1);
  10228. }
  10229. return ret;
  10230. #ifdef TARGET_I386
  10231. case TARGET_NR_modify_ldt:
  10232. return do_modify_ldt(cpu_env, arg1, arg2, arg3);
  10233. #if !defined(TARGET_X86_64)
  10234. case TARGET_NR_vm86:
  10235. return do_vm86(cpu_env, arg1, arg2);
  10236. #endif
  10237. #endif
  10238. #if defined(TARGET_NR_adjtimex)
  10239. case TARGET_NR_adjtimex:
  10240. {
  10241. struct timex host_buf;
  10242. if (target_to_host_timex(&host_buf, arg1) != 0) {
  10243. return -TARGET_EFAULT;
  10244. }
  10245. ret = get_errno(adjtimex(&host_buf));
  10246. if (!is_error(ret)) {
  10247. if (host_to_target_timex(arg1, &host_buf) != 0) {
  10248. return -TARGET_EFAULT;
  10249. }
  10250. }
  10251. }
  10252. return ret;
  10253. #endif
  10254. #if defined(TARGET_NR_clock_adjtime) && defined(CONFIG_CLOCK_ADJTIME)
  10255. case TARGET_NR_clock_adjtime:
  10256. {
  10257. struct timex htx;
  10258. if (target_to_host_timex(&htx, arg2) != 0) {
  10259. return -TARGET_EFAULT;
  10260. }
  10261. ret = get_errno(clock_adjtime(arg1, &htx));
  10262. if (!is_error(ret) && host_to_target_timex(arg2, &htx)) {
  10263. return -TARGET_EFAULT;
  10264. }
  10265. }
  10266. return ret;
  10267. #endif
  10268. #if defined(TARGET_NR_clock_adjtime64) && defined(CONFIG_CLOCK_ADJTIME)
  10269. case TARGET_NR_clock_adjtime64:
  10270. {
  10271. struct timex htx;
  10272. if (target_to_host_timex64(&htx, arg2) != 0) {
  10273. return -TARGET_EFAULT;
  10274. }
  10275. ret = get_errno(clock_adjtime(arg1, &htx));
  10276. if (!is_error(ret) && host_to_target_timex64(arg2, &htx)) {
  10277. return -TARGET_EFAULT;
  10278. }
  10279. }
  10280. return ret;
  10281. #endif
  10282. case TARGET_NR_getpgid:
  10283. return get_errno(getpgid(arg1));
  10284. case TARGET_NR_fchdir:
  10285. return get_errno(fchdir(arg1));
  10286. case TARGET_NR_personality:
  10287. return get_errno(personality(arg1));
  10288. #ifdef TARGET_NR__llseek /* Not on alpha */
  10289. case TARGET_NR__llseek:
  10290. {
  10291. int64_t res;
  10292. #if !defined(__NR_llseek)
  10293. res = lseek(arg1, ((uint64_t)arg2 << 32) | (abi_ulong)arg3, arg5);
  10294. if (res == -1) {
  10295. ret = get_errno(res);
  10296. } else {
  10297. ret = 0;
  10298. }
  10299. #else
  10300. ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
  10301. #endif
  10302. if ((ret == 0) && put_user_s64(res, arg4)) {
  10303. return -TARGET_EFAULT;
  10304. }
  10305. }
  10306. return ret;
  10307. #endif
  10308. #ifdef TARGET_NR_getdents
  10309. case TARGET_NR_getdents:
  10310. return do_getdents(arg1, arg2, arg3);
  10311. #endif /* TARGET_NR_getdents */
  10312. #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
  10313. case TARGET_NR_getdents64:
  10314. return do_getdents64(arg1, arg2, arg3);
  10315. #endif /* TARGET_NR_getdents64 */
  10316. #if defined(TARGET_NR__newselect)
  10317. case TARGET_NR__newselect:
  10318. return do_select(arg1, arg2, arg3, arg4, arg5);
  10319. #endif
  10320. #ifdef TARGET_NR_poll
  10321. case TARGET_NR_poll:
  10322. return do_ppoll(arg1, arg2, arg3, arg4, arg5, false, false);
  10323. #endif
  10324. #ifdef TARGET_NR_ppoll
  10325. case TARGET_NR_ppoll:
  10326. return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, false);
  10327. #endif
  10328. #ifdef TARGET_NR_ppoll_time64
  10329. case TARGET_NR_ppoll_time64:
  10330. return do_ppoll(arg1, arg2, arg3, arg4, arg5, true, true);
  10331. #endif
  10332. case TARGET_NR_flock:
  10333. /* NOTE: the flock constant seems to be the same for every
  10334. Linux platform */
  10335. return get_errno(safe_flock(arg1, arg2));
  10336. case TARGET_NR_readv:
  10337. {
  10338. struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
  10339. if (vec != NULL) {
  10340. ret = get_errno(safe_readv(arg1, vec, arg3));
  10341. unlock_iovec(vec, arg2, arg3, 1);
  10342. } else {
  10343. ret = -host_to_target_errno(errno);
  10344. }
  10345. }
  10346. return ret;
  10347. case TARGET_NR_writev:
  10348. {
  10349. struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
  10350. if (vec != NULL) {
  10351. ret = get_errno(safe_writev(arg1, vec, arg3));
  10352. unlock_iovec(vec, arg2, arg3, 0);
  10353. } else {
  10354. ret = -host_to_target_errno(errno);
  10355. }
  10356. }
  10357. return ret;
  10358. #if defined(TARGET_NR_preadv)
  10359. case TARGET_NR_preadv:
  10360. {
  10361. struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
  10362. if (vec != NULL) {
  10363. unsigned long low, high;
  10364. target_to_host_low_high(arg4, arg5, &low, &high);
  10365. ret = get_errno(safe_preadv(arg1, vec, arg3, low, high));
  10366. unlock_iovec(vec, arg2, arg3, 1);
  10367. } else {
  10368. ret = -host_to_target_errno(errno);
  10369. }
  10370. }
  10371. return ret;
  10372. #endif
  10373. #if defined(TARGET_NR_pwritev)
  10374. case TARGET_NR_pwritev:
  10375. {
  10376. struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
  10377. if (vec != NULL) {
  10378. unsigned long low, high;
  10379. target_to_host_low_high(arg4, arg5, &low, &high);
  10380. ret = get_errno(safe_pwritev(arg1, vec, arg3, low, high));
  10381. unlock_iovec(vec, arg2, arg3, 0);
  10382. } else {
  10383. ret = -host_to_target_errno(errno);
  10384. }
  10385. }
  10386. return ret;
  10387. #endif
  10388. case TARGET_NR_getsid:
  10389. return get_errno(getsid(arg1));
  10390. #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
  10391. case TARGET_NR_fdatasync:
  10392. return get_errno(fdatasync(arg1));
  10393. #endif
  10394. case TARGET_NR_sched_getaffinity:
  10395. {
  10396. unsigned int mask_size;
  10397. unsigned long *mask;
  10398. /*
  10399. * sched_getaffinity needs multiples of ulong, so need to take
  10400. * care of mismatches between target ulong and host ulong sizes.
  10401. */
  10402. if (arg2 & (sizeof(abi_ulong) - 1)) {
  10403. return -TARGET_EINVAL;
  10404. }
  10405. mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
  10406. mask = alloca(mask_size);
  10407. memset(mask, 0, mask_size);
  10408. ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
  10409. if (!is_error(ret)) {
  10410. if (ret > arg2) {
  10411. /* More data returned than the caller's buffer will fit.
  10412. * This only happens if sizeof(abi_long) < sizeof(long)
  10413. * and the caller passed us a buffer holding an odd number
  10414. * of abi_longs. If the host kernel is actually using the
  10415. * extra 4 bytes then fail EINVAL; otherwise we can just
  10416. * ignore them and only copy the interesting part.
  10417. */
  10418. int numcpus = sysconf(_SC_NPROCESSORS_CONF);
  10419. if (numcpus > arg2 * 8) {
  10420. return -TARGET_EINVAL;
  10421. }
  10422. ret = arg2;
  10423. }
  10424. if (host_to_target_cpu_mask(mask, mask_size, arg3, ret)) {
  10425. return -TARGET_EFAULT;
  10426. }
  10427. }
  10428. }
  10429. return ret;
  10430. case TARGET_NR_sched_setaffinity:
  10431. {
  10432. unsigned int mask_size;
  10433. unsigned long *mask;
  10434. /*
  10435. * sched_setaffinity needs multiples of ulong, so need to take
  10436. * care of mismatches between target ulong and host ulong sizes.
  10437. */
  10438. if (arg2 & (sizeof(abi_ulong) - 1)) {
  10439. return -TARGET_EINVAL;
  10440. }
  10441. mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
  10442. mask = alloca(mask_size);
  10443. ret = target_to_host_cpu_mask(mask, mask_size, arg3, arg2);
  10444. if (ret) {
  10445. return ret;
  10446. }
  10447. return get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
  10448. }
  10449. case TARGET_NR_getcpu:
  10450. {
  10451. unsigned cpuid, node;
  10452. ret = get_errno(sys_getcpu(arg1 ? &cpuid : NULL,
  10453. arg2 ? &node : NULL,
  10454. NULL));
  10455. if (is_error(ret)) {
  10456. return ret;
  10457. }
  10458. if (arg1 && put_user_u32(cpuid, arg1)) {
  10459. return -TARGET_EFAULT;
  10460. }
  10461. if (arg2 && put_user_u32(node, arg2)) {
  10462. return -TARGET_EFAULT;
  10463. }
  10464. }
  10465. return ret;
  10466. case TARGET_NR_sched_setparam:
  10467. {
  10468. struct target_sched_param *target_schp;
  10469. struct sched_param schp;
  10470. if (arg2 == 0) {
  10471. return -TARGET_EINVAL;
  10472. }
  10473. if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1)) {
  10474. return -TARGET_EFAULT;
  10475. }
  10476. schp.sched_priority = tswap32(target_schp->sched_priority);
  10477. unlock_user_struct(target_schp, arg2, 0);
  10478. return get_errno(sys_sched_setparam(arg1, &schp));
  10479. }
  10480. case TARGET_NR_sched_getparam:
  10481. {
  10482. struct target_sched_param *target_schp;
  10483. struct sched_param schp;
  10484. if (arg2 == 0) {
  10485. return -TARGET_EINVAL;
  10486. }
  10487. ret = get_errno(sys_sched_getparam(arg1, &schp));
  10488. if (!is_error(ret)) {
  10489. if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0)) {
  10490. return -TARGET_EFAULT;
  10491. }
  10492. target_schp->sched_priority = tswap32(schp.sched_priority);
  10493. unlock_user_struct(target_schp, arg2, 1);
  10494. }
  10495. }
  10496. return ret;
  10497. case TARGET_NR_sched_setscheduler:
  10498. {
  10499. struct target_sched_param *target_schp;
  10500. struct sched_param schp;
  10501. if (arg3 == 0) {
  10502. return -TARGET_EINVAL;
  10503. }
  10504. if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1)) {
  10505. return -TARGET_EFAULT;
  10506. }
  10507. schp.sched_priority = tswap32(target_schp->sched_priority);
  10508. unlock_user_struct(target_schp, arg3, 0);
  10509. return get_errno(sys_sched_setscheduler(arg1, arg2, &schp));
  10510. }
  10511. case TARGET_NR_sched_getscheduler:
  10512. return get_errno(sys_sched_getscheduler(arg1));
  10513. case TARGET_NR_sched_getattr:
  10514. {
  10515. struct target_sched_attr *target_scha;
  10516. struct sched_attr scha;
  10517. if (arg2 == 0) {
  10518. return -TARGET_EINVAL;
  10519. }
  10520. if (arg3 > sizeof(scha)) {
  10521. arg3 = sizeof(scha);
  10522. }
  10523. ret = get_errno(sys_sched_getattr(arg1, &scha, arg3, arg4));
  10524. if (!is_error(ret)) {
  10525. target_scha = lock_user(VERIFY_WRITE, arg2, arg3, 0);
  10526. if (!target_scha) {
  10527. return -TARGET_EFAULT;
  10528. }
  10529. target_scha->size = tswap32(scha.size);
  10530. target_scha->sched_policy = tswap32(scha.sched_policy);
  10531. target_scha->sched_flags = tswap64(scha.sched_flags);
  10532. target_scha->sched_nice = tswap32(scha.sched_nice);
  10533. target_scha->sched_priority = tswap32(scha.sched_priority);
  10534. target_scha->sched_runtime = tswap64(scha.sched_runtime);
  10535. target_scha->sched_deadline = tswap64(scha.sched_deadline);
  10536. target_scha->sched_period = tswap64(scha.sched_period);
  10537. if (scha.size > offsetof(struct sched_attr, sched_util_min)) {
  10538. target_scha->sched_util_min = tswap32(scha.sched_util_min);
  10539. target_scha->sched_util_max = tswap32(scha.sched_util_max);
  10540. }
  10541. unlock_user(target_scha, arg2, arg3);
  10542. }
  10543. return ret;
  10544. }
  10545. case TARGET_NR_sched_setattr:
  10546. {
  10547. struct target_sched_attr *target_scha;
  10548. struct sched_attr scha;
  10549. uint32_t size;
  10550. int zeroed;
  10551. if (arg2 == 0) {
  10552. return -TARGET_EINVAL;
  10553. }
  10554. if (get_user_u32(size, arg2)) {
  10555. return -TARGET_EFAULT;
  10556. }
  10557. if (!size) {
  10558. size = offsetof(struct target_sched_attr, sched_util_min);
  10559. }
  10560. if (size < offsetof(struct target_sched_attr, sched_util_min)) {
  10561. if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
  10562. return -TARGET_EFAULT;
  10563. }
  10564. return -TARGET_E2BIG;
  10565. }
  10566. zeroed = check_zeroed_user(arg2, sizeof(struct target_sched_attr), size);
  10567. if (zeroed < 0) {
  10568. return zeroed;
  10569. } else if (zeroed == 0) {
  10570. if (put_user_u32(sizeof(struct target_sched_attr), arg2)) {
  10571. return -TARGET_EFAULT;
  10572. }
  10573. return -TARGET_E2BIG;
  10574. }
  10575. if (size > sizeof(struct target_sched_attr)) {
  10576. size = sizeof(struct target_sched_attr);
  10577. }
  10578. target_scha = lock_user(VERIFY_READ, arg2, size, 1);
  10579. if (!target_scha) {
  10580. return -TARGET_EFAULT;
  10581. }
  10582. scha.size = size;
  10583. scha.sched_policy = tswap32(target_scha->sched_policy);
  10584. scha.sched_flags = tswap64(target_scha->sched_flags);
  10585. scha.sched_nice = tswap32(target_scha->sched_nice);
  10586. scha.sched_priority = tswap32(target_scha->sched_priority);
  10587. scha.sched_runtime = tswap64(target_scha->sched_runtime);
  10588. scha.sched_deadline = tswap64(target_scha->sched_deadline);
  10589. scha.sched_period = tswap64(target_scha->sched_period);
  10590. if (size > offsetof(struct target_sched_attr, sched_util_min)) {
  10591. scha.sched_util_min = tswap32(target_scha->sched_util_min);
  10592. scha.sched_util_max = tswap32(target_scha->sched_util_max);
  10593. }
  10594. unlock_user(target_scha, arg2, 0);
  10595. return get_errno(sys_sched_setattr(arg1, &scha, arg3));
  10596. }
  10597. case TARGET_NR_sched_yield:
  10598. return get_errno(sched_yield());
  10599. case TARGET_NR_sched_get_priority_max:
  10600. return get_errno(sched_get_priority_max(arg1));
  10601. case TARGET_NR_sched_get_priority_min:
  10602. return get_errno(sched_get_priority_min(arg1));
  10603. #ifdef TARGET_NR_sched_rr_get_interval
  10604. case TARGET_NR_sched_rr_get_interval:
  10605. {
  10606. struct timespec ts;
  10607. ret = get_errno(sched_rr_get_interval(arg1, &ts));
  10608. if (!is_error(ret)) {
  10609. ret = host_to_target_timespec(arg2, &ts);
  10610. }
  10611. }
  10612. return ret;
  10613. #endif
  10614. #ifdef TARGET_NR_sched_rr_get_interval_time64
  10615. case TARGET_NR_sched_rr_get_interval_time64:
  10616. {
  10617. struct timespec ts;
  10618. ret = get_errno(sched_rr_get_interval(arg1, &ts));
  10619. if (!is_error(ret)) {
  10620. ret = host_to_target_timespec64(arg2, &ts);
  10621. }
  10622. }
  10623. return ret;
  10624. #endif
  10625. #if defined(TARGET_NR_nanosleep)
  10626. case TARGET_NR_nanosleep:
  10627. {
  10628. struct timespec req, rem;
  10629. target_to_host_timespec(&req, arg1);
  10630. ret = get_errno(safe_nanosleep(&req, &rem));
  10631. if (is_error(ret) && arg2) {
  10632. host_to_target_timespec(arg2, &rem);
  10633. }
  10634. }
  10635. return ret;
  10636. #endif
  10637. case TARGET_NR_prctl:
  10638. return do_prctl(cpu_env, arg1, arg2, arg3, arg4, arg5);
  10639. break;
  10640. #ifdef TARGET_NR_arch_prctl
  10641. case TARGET_NR_arch_prctl:
  10642. return do_arch_prctl(cpu_env, arg1, arg2);
  10643. #endif
  10644. #ifdef TARGET_NR_pread64
  10645. case TARGET_NR_pread64:
  10646. if (regpairs_aligned(cpu_env, num)) {
  10647. arg4 = arg5;
  10648. arg5 = arg6;
  10649. }
  10650. if (arg2 == 0 && arg3 == 0) {
  10651. /* Special-case NULL buffer and zero length, which should succeed */
  10652. p = 0;
  10653. } else {
  10654. p = lock_user(VERIFY_WRITE, arg2, arg3, 0);
  10655. if (!p) {
  10656. return -TARGET_EFAULT;
  10657. }
  10658. }
  10659. ret = get_errno(pread(arg1, p, arg3, target_offset64(arg4, arg5)));
  10660. unlock_user(p, arg2, ret);
  10661. return ret;
  10662. case TARGET_NR_pwrite64:
  10663. if (regpairs_aligned(cpu_env, num)) {
  10664. arg4 = arg5;
  10665. arg5 = arg6;
  10666. }
  10667. if (arg2 == 0 && arg3 == 0) {
  10668. /* Special-case NULL buffer and zero length, which should succeed */
  10669. p = 0;
  10670. } else {
  10671. p = lock_user(VERIFY_READ, arg2, arg3, 1);
  10672. if (!p) {
  10673. return -TARGET_EFAULT;
  10674. }
  10675. }
  10676. ret = get_errno(pwrite(arg1, p, arg3, target_offset64(arg4, arg5)));
  10677. unlock_user(p, arg2, 0);
  10678. return ret;
  10679. #endif
  10680. case TARGET_NR_getcwd:
  10681. if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
  10682. return -TARGET_EFAULT;
  10683. ret = get_errno(sys_getcwd1(p, arg2));
  10684. unlock_user(p, arg1, ret);
  10685. return ret;
  10686. case TARGET_NR_capget:
  10687. case TARGET_NR_capset:
  10688. {
  10689. struct target_user_cap_header *target_header;
  10690. struct target_user_cap_data *target_data = NULL;
  10691. struct __user_cap_header_struct header;
  10692. struct __user_cap_data_struct data[2];
  10693. struct __user_cap_data_struct *dataptr = NULL;
  10694. int i, target_datalen;
  10695. int data_items = 1;
  10696. if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
  10697. return -TARGET_EFAULT;
  10698. }
  10699. header.version = tswap32(target_header->version);
  10700. header.pid = tswap32(target_header->pid);
  10701. if (header.version != _LINUX_CAPABILITY_VERSION) {
  10702. /* Version 2 and up takes pointer to two user_data structs */
  10703. data_items = 2;
  10704. }
  10705. target_datalen = sizeof(*target_data) * data_items;
  10706. if (arg2) {
  10707. if (num == TARGET_NR_capget) {
  10708. target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
  10709. } else {
  10710. target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
  10711. }
  10712. if (!target_data) {
  10713. unlock_user_struct(target_header, arg1, 0);
  10714. return -TARGET_EFAULT;
  10715. }
  10716. if (num == TARGET_NR_capset) {
  10717. for (i = 0; i < data_items; i++) {
  10718. data[i].effective = tswap32(target_data[i].effective);
  10719. data[i].permitted = tswap32(target_data[i].permitted);
  10720. data[i].inheritable = tswap32(target_data[i].inheritable);
  10721. }
  10722. }
  10723. dataptr = data;
  10724. }
  10725. if (num == TARGET_NR_capget) {
  10726. ret = get_errno(capget(&header, dataptr));
  10727. } else {
  10728. ret = get_errno(capset(&header, dataptr));
  10729. }
  10730. /* The kernel always updates version for both capget and capset */
  10731. target_header->version = tswap32(header.version);
  10732. unlock_user_struct(target_header, arg1, 1);
  10733. if (arg2) {
  10734. if (num == TARGET_NR_capget) {
  10735. for (i = 0; i < data_items; i++) {
  10736. target_data[i].effective = tswap32(data[i].effective);
  10737. target_data[i].permitted = tswap32(data[i].permitted);
  10738. target_data[i].inheritable = tswap32(data[i].inheritable);
  10739. }
  10740. unlock_user(target_data, arg2, target_datalen);
  10741. } else {
  10742. unlock_user(target_data, arg2, 0);
  10743. }
  10744. }
  10745. return ret;
  10746. }
  10747. case TARGET_NR_sigaltstack:
  10748. return do_sigaltstack(arg1, arg2, cpu_env);
  10749. #ifdef CONFIG_SENDFILE
  10750. #ifdef TARGET_NR_sendfile
  10751. case TARGET_NR_sendfile:
  10752. {
  10753. off_t *offp = NULL;
  10754. off_t off;
  10755. if (arg3) {
  10756. ret = get_user_sal(off, arg3);
  10757. if (is_error(ret)) {
  10758. return ret;
  10759. }
  10760. offp = &off;
  10761. }
  10762. ret = get_errno(sendfile(arg1, arg2, offp, arg4));
  10763. if (!is_error(ret) && arg3) {
  10764. abi_long ret2 = put_user_sal(off, arg3);
  10765. if (is_error(ret2)) {
  10766. ret = ret2;
  10767. }
  10768. }
  10769. return ret;
  10770. }
  10771. #endif
  10772. #ifdef TARGET_NR_sendfile64
  10773. case TARGET_NR_sendfile64:
  10774. {
  10775. off_t *offp = NULL;
  10776. off_t off;
  10777. if (arg3) {
  10778. ret = get_user_s64(off, arg3);
  10779. if (is_error(ret)) {
  10780. return ret;
  10781. }
  10782. offp = &off;
  10783. }
  10784. ret = get_errno(sendfile(arg1, arg2, offp, arg4));
  10785. if (!is_error(ret) && arg3) {
  10786. abi_long ret2 = put_user_s64(off, arg3);
  10787. if (is_error(ret2)) {
  10788. ret = ret2;
  10789. }
  10790. }
  10791. return ret;
  10792. }
  10793. #endif
  10794. #endif
  10795. #ifdef TARGET_NR_vfork
  10796. case TARGET_NR_vfork:
  10797. return get_errno(do_fork(cpu_env,
  10798. CLONE_VFORK | CLONE_VM | TARGET_SIGCHLD,
  10799. 0, 0, 0, 0));
  10800. #endif
  10801. #ifdef TARGET_NR_ugetrlimit
  10802. case TARGET_NR_ugetrlimit:
  10803. {
  10804. struct rlimit rlim;
  10805. int resource = target_to_host_resource(arg1);
  10806. ret = get_errno(getrlimit(resource, &rlim));
  10807. if (!is_error(ret)) {
  10808. struct target_rlimit *target_rlim;
  10809. if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
  10810. return -TARGET_EFAULT;
  10811. target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
  10812. target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
  10813. unlock_user_struct(target_rlim, arg2, 1);
  10814. }
  10815. return ret;
  10816. }
  10817. #endif
  10818. #ifdef TARGET_NR_truncate64
  10819. case TARGET_NR_truncate64:
  10820. if (!(p = lock_user_string(arg1)))
  10821. return -TARGET_EFAULT;
  10822. ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
  10823. unlock_user(p, arg1, 0);
  10824. return ret;
  10825. #endif
  10826. #ifdef TARGET_NR_ftruncate64
  10827. case TARGET_NR_ftruncate64:
  10828. return target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
  10829. #endif
  10830. #ifdef TARGET_NR_stat64
  10831. case TARGET_NR_stat64:
  10832. if (!(p = lock_user_string(arg1))) {
  10833. return -TARGET_EFAULT;
  10834. }
  10835. ret = get_errno(stat(path(p), &st));
  10836. unlock_user(p, arg1, 0);
  10837. if (!is_error(ret))
  10838. ret = host_to_target_stat64(cpu_env, arg2, &st);
  10839. return ret;
  10840. #endif
  10841. #ifdef TARGET_NR_lstat64
  10842. case TARGET_NR_lstat64:
  10843. if (!(p = lock_user_string(arg1))) {
  10844. return -TARGET_EFAULT;
  10845. }
  10846. ret = get_errno(lstat(path(p), &st));
  10847. unlock_user(p, arg1, 0);
  10848. if (!is_error(ret))
  10849. ret = host_to_target_stat64(cpu_env, arg2, &st);
  10850. return ret;
  10851. #endif
  10852. #ifdef TARGET_NR_fstat64
  10853. case TARGET_NR_fstat64:
  10854. ret = get_errno(fstat(arg1, &st));
  10855. if (!is_error(ret))
  10856. ret = host_to_target_stat64(cpu_env, arg2, &st);
  10857. return ret;
  10858. #endif
  10859. #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
  10860. #ifdef TARGET_NR_fstatat64
  10861. case TARGET_NR_fstatat64:
  10862. #endif
  10863. #ifdef TARGET_NR_newfstatat
  10864. case TARGET_NR_newfstatat:
  10865. #endif
  10866. if (!(p = lock_user_string(arg2))) {
  10867. return -TARGET_EFAULT;
  10868. }
  10869. ret = get_errno(fstatat(arg1, path(p), &st, arg4));
  10870. unlock_user(p, arg2, 0);
  10871. if (!is_error(ret))
  10872. ret = host_to_target_stat64(cpu_env, arg3, &st);
  10873. return ret;
  10874. #endif
  10875. #if defined(TARGET_NR_statx)
  10876. case TARGET_NR_statx:
  10877. {
  10878. struct target_statx *target_stx;
  10879. int dirfd = arg1;
  10880. int flags = arg3;
  10881. p = lock_user_string(arg2);
  10882. if (p == NULL) {
  10883. return -TARGET_EFAULT;
  10884. }
  10885. #if defined(__NR_statx)
  10886. {
  10887. /*
  10888. * It is assumed that struct statx is architecture independent.
  10889. */
  10890. struct target_statx host_stx;
  10891. int mask = arg4;
  10892. ret = get_errno(sys_statx(dirfd, p, flags, mask, &host_stx));
  10893. if (!is_error(ret)) {
  10894. if (host_to_target_statx(&host_stx, arg5) != 0) {
  10895. unlock_user(p, arg2, 0);
  10896. return -TARGET_EFAULT;
  10897. }
  10898. }
  10899. if (ret != -TARGET_ENOSYS) {
  10900. unlock_user(p, arg2, 0);
  10901. return ret;
  10902. }
  10903. }
  10904. #endif
  10905. ret = get_errno(fstatat(dirfd, path(p), &st, flags));
  10906. unlock_user(p, arg2, 0);
  10907. if (!is_error(ret)) {
  10908. if (!lock_user_struct(VERIFY_WRITE, target_stx, arg5, 0)) {
  10909. return -TARGET_EFAULT;
  10910. }
  10911. memset(target_stx, 0, sizeof(*target_stx));
  10912. __put_user(major(st.st_dev), &target_stx->stx_dev_major);
  10913. __put_user(minor(st.st_dev), &target_stx->stx_dev_minor);
  10914. __put_user(st.st_ino, &target_stx->stx_ino);
  10915. __put_user(st.st_mode, &target_stx->stx_mode);
  10916. __put_user(st.st_uid, &target_stx->stx_uid);
  10917. __put_user(st.st_gid, &target_stx->stx_gid);
  10918. __put_user(st.st_nlink, &target_stx->stx_nlink);
  10919. __put_user(major(st.st_rdev), &target_stx->stx_rdev_major);
  10920. __put_user(minor(st.st_rdev), &target_stx->stx_rdev_minor);
  10921. __put_user(st.st_size, &target_stx->stx_size);
  10922. __put_user(st.st_blksize, &target_stx->stx_blksize);
  10923. __put_user(st.st_blocks, &target_stx->stx_blocks);
  10924. __put_user(st.st_atime, &target_stx->stx_atime.tv_sec);
  10925. __put_user(st.st_mtime, &target_stx->stx_mtime.tv_sec);
  10926. __put_user(st.st_ctime, &target_stx->stx_ctime.tv_sec);
  10927. unlock_user_struct(target_stx, arg5, 1);
  10928. }
  10929. }
  10930. return ret;
  10931. #endif
  10932. #ifdef TARGET_NR_lchown
  10933. case TARGET_NR_lchown:
  10934. if (!(p = lock_user_string(arg1)))
  10935. return -TARGET_EFAULT;
  10936. ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
  10937. unlock_user(p, arg1, 0);
  10938. return ret;
  10939. #endif
  10940. #ifdef TARGET_NR_getuid
  10941. case TARGET_NR_getuid:
  10942. return get_errno(high2lowuid(getuid()));
  10943. #endif
  10944. #ifdef TARGET_NR_getgid
  10945. case TARGET_NR_getgid:
  10946. return get_errno(high2lowgid(getgid()));
  10947. #endif
  10948. #ifdef TARGET_NR_geteuid
  10949. case TARGET_NR_geteuid:
  10950. return get_errno(high2lowuid(geteuid()));
  10951. #endif
  10952. #ifdef TARGET_NR_getegid
  10953. case TARGET_NR_getegid:
  10954. return get_errno(high2lowgid(getegid()));
  10955. #endif
  10956. case TARGET_NR_setreuid:
  10957. return get_errno(sys_setreuid(low2highuid(arg1), low2highuid(arg2)));
  10958. case TARGET_NR_setregid:
  10959. return get_errno(sys_setregid(low2highgid(arg1), low2highgid(arg2)));
  10960. case TARGET_NR_getgroups:
  10961. { /* the same code as for TARGET_NR_getgroups32 */
  10962. int gidsetsize = arg1;
  10963. target_id *target_grouplist;
  10964. g_autofree gid_t *grouplist = NULL;
  10965. int i;
  10966. if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
  10967. return -TARGET_EINVAL;
  10968. }
  10969. if (gidsetsize > 0) {
  10970. grouplist = g_try_new(gid_t, gidsetsize);
  10971. if (!grouplist) {
  10972. return -TARGET_ENOMEM;
  10973. }
  10974. }
  10975. ret = get_errno(getgroups(gidsetsize, grouplist));
  10976. if (!is_error(ret) && gidsetsize > 0) {
  10977. target_grouplist = lock_user(VERIFY_WRITE, arg2,
  10978. gidsetsize * sizeof(target_id), 0);
  10979. if (!target_grouplist) {
  10980. return -TARGET_EFAULT;
  10981. }
  10982. for (i = 0; i < ret; i++) {
  10983. target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
  10984. }
  10985. unlock_user(target_grouplist, arg2,
  10986. gidsetsize * sizeof(target_id));
  10987. }
  10988. return ret;
  10989. }
  10990. case TARGET_NR_setgroups:
  10991. { /* the same code as for TARGET_NR_setgroups32 */
  10992. int gidsetsize = arg1;
  10993. target_id *target_grouplist;
  10994. g_autofree gid_t *grouplist = NULL;
  10995. int i;
  10996. if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
  10997. return -TARGET_EINVAL;
  10998. }
  10999. if (gidsetsize > 0) {
  11000. grouplist = g_try_new(gid_t, gidsetsize);
  11001. if (!grouplist) {
  11002. return -TARGET_ENOMEM;
  11003. }
  11004. target_grouplist = lock_user(VERIFY_READ, arg2,
  11005. gidsetsize * sizeof(target_id), 1);
  11006. if (!target_grouplist) {
  11007. return -TARGET_EFAULT;
  11008. }
  11009. for (i = 0; i < gidsetsize; i++) {
  11010. grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
  11011. }
  11012. unlock_user(target_grouplist, arg2,
  11013. gidsetsize * sizeof(target_id));
  11014. }
  11015. return get_errno(sys_setgroups(gidsetsize, grouplist));
  11016. }
  11017. case TARGET_NR_fchown:
  11018. return get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
  11019. #if defined(TARGET_NR_fchownat)
  11020. case TARGET_NR_fchownat:
  11021. if (!(p = lock_user_string(arg2)))
  11022. return -TARGET_EFAULT;
  11023. ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
  11024. low2highgid(arg4), arg5));
  11025. unlock_user(p, arg2, 0);
  11026. return ret;
  11027. #endif
  11028. #ifdef TARGET_NR_setresuid
  11029. case TARGET_NR_setresuid:
  11030. return get_errno(sys_setresuid(low2highuid(arg1),
  11031. low2highuid(arg2),
  11032. low2highuid(arg3)));
  11033. #endif
  11034. #ifdef TARGET_NR_getresuid
  11035. case TARGET_NR_getresuid:
  11036. {
  11037. uid_t ruid, euid, suid;
  11038. ret = get_errno(getresuid(&ruid, &euid, &suid));
  11039. if (!is_error(ret)) {
  11040. if (put_user_id(high2lowuid(ruid), arg1)
  11041. || put_user_id(high2lowuid(euid), arg2)
  11042. || put_user_id(high2lowuid(suid), arg3))
  11043. return -TARGET_EFAULT;
  11044. }
  11045. }
  11046. return ret;
  11047. #endif
  11048. #ifdef TARGET_NR_getresgid
  11049. case TARGET_NR_setresgid:
  11050. return get_errno(sys_setresgid(low2highgid(arg1),
  11051. low2highgid(arg2),
  11052. low2highgid(arg3)));
  11053. #endif
  11054. #ifdef TARGET_NR_getresgid
  11055. case TARGET_NR_getresgid:
  11056. {
  11057. gid_t rgid, egid, sgid;
  11058. ret = get_errno(getresgid(&rgid, &egid, &sgid));
  11059. if (!is_error(ret)) {
  11060. if (put_user_id(high2lowgid(rgid), arg1)
  11061. || put_user_id(high2lowgid(egid), arg2)
  11062. || put_user_id(high2lowgid(sgid), arg3))
  11063. return -TARGET_EFAULT;
  11064. }
  11065. }
  11066. return ret;
  11067. #endif
  11068. #ifdef TARGET_NR_chown
  11069. case TARGET_NR_chown:
  11070. if (!(p = lock_user_string(arg1)))
  11071. return -TARGET_EFAULT;
  11072. ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
  11073. unlock_user(p, arg1, 0);
  11074. return ret;
  11075. #endif
  11076. case TARGET_NR_setuid:
  11077. return get_errno(sys_setuid(low2highuid(arg1)));
  11078. case TARGET_NR_setgid:
  11079. return get_errno(sys_setgid(low2highgid(arg1)));
  11080. case TARGET_NR_setfsuid:
  11081. return get_errno(setfsuid(arg1));
  11082. case TARGET_NR_setfsgid:
  11083. return get_errno(setfsgid(arg1));
  11084. #ifdef TARGET_NR_lchown32
  11085. case TARGET_NR_lchown32:
  11086. if (!(p = lock_user_string(arg1)))
  11087. return -TARGET_EFAULT;
  11088. ret = get_errno(lchown(p, arg2, arg3));
  11089. unlock_user(p, arg1, 0);
  11090. return ret;
  11091. #endif
  11092. #ifdef TARGET_NR_getuid32
  11093. case TARGET_NR_getuid32:
  11094. return get_errno(getuid());
  11095. #endif
  11096. #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
  11097. /* Alpha specific */
  11098. case TARGET_NR_getxuid:
  11099. {
  11100. uid_t euid;
  11101. euid=geteuid();
  11102. cpu_env->ir[IR_A4]=euid;
  11103. }
  11104. return get_errno(getuid());
  11105. #endif
  11106. #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
  11107. /* Alpha specific */
  11108. case TARGET_NR_getxgid:
  11109. {
  11110. uid_t egid;
  11111. egid=getegid();
  11112. cpu_env->ir[IR_A4]=egid;
  11113. }
  11114. return get_errno(getgid());
  11115. #endif
  11116. #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
  11117. /* Alpha specific */
  11118. case TARGET_NR_osf_getsysinfo:
  11119. ret = -TARGET_EOPNOTSUPP;
  11120. switch (arg1) {
  11121. case TARGET_GSI_IEEE_FP_CONTROL:
  11122. {
  11123. uint64_t fpcr = cpu_alpha_load_fpcr(cpu_env);
  11124. uint64_t swcr = cpu_env->swcr;
  11125. swcr &= ~SWCR_STATUS_MASK;
  11126. swcr |= (fpcr >> 35) & SWCR_STATUS_MASK;
  11127. if (put_user_u64 (swcr, arg2))
  11128. return -TARGET_EFAULT;
  11129. ret = 0;
  11130. }
  11131. break;
  11132. /* case GSI_IEEE_STATE_AT_SIGNAL:
  11133. -- Not implemented in linux kernel.
  11134. case GSI_UACPROC:
  11135. -- Retrieves current unaligned access state; not much used.
  11136. case GSI_PROC_TYPE:
  11137. -- Retrieves implver information; surely not used.
  11138. case GSI_GET_HWRPB:
  11139. -- Grabs a copy of the HWRPB; surely not used.
  11140. */
  11141. }
  11142. return ret;
  11143. #endif
  11144. #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
  11145. /* Alpha specific */
  11146. case TARGET_NR_osf_setsysinfo:
  11147. ret = -TARGET_EOPNOTSUPP;
  11148. switch (arg1) {
  11149. case TARGET_SSI_IEEE_FP_CONTROL:
  11150. {
  11151. uint64_t swcr, fpcr;
  11152. if (get_user_u64 (swcr, arg2)) {
  11153. return -TARGET_EFAULT;
  11154. }
  11155. /*
  11156. * The kernel calls swcr_update_status to update the
  11157. * status bits from the fpcr at every point that it
  11158. * could be queried. Therefore, we store the status
  11159. * bits only in FPCR.
  11160. */
  11161. cpu_env->swcr = swcr & (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK);
  11162. fpcr = cpu_alpha_load_fpcr(cpu_env);
  11163. fpcr &= ((uint64_t)FPCR_DYN_MASK << 32);
  11164. fpcr |= alpha_ieee_swcr_to_fpcr(swcr);
  11165. cpu_alpha_store_fpcr(cpu_env, fpcr);
  11166. ret = 0;
  11167. }
  11168. break;
  11169. case TARGET_SSI_IEEE_RAISE_EXCEPTION:
  11170. {
  11171. uint64_t exc, fpcr, fex;
  11172. if (get_user_u64(exc, arg2)) {
  11173. return -TARGET_EFAULT;
  11174. }
  11175. exc &= SWCR_STATUS_MASK;
  11176. fpcr = cpu_alpha_load_fpcr(cpu_env);
  11177. /* Old exceptions are not signaled. */
  11178. fex = alpha_ieee_fpcr_to_swcr(fpcr);
  11179. fex = exc & ~fex;
  11180. fex >>= SWCR_STATUS_TO_EXCSUM_SHIFT;
  11181. fex &= (cpu_env)->swcr;
  11182. /* Update the hardware fpcr. */
  11183. fpcr |= alpha_ieee_swcr_to_fpcr(exc);
  11184. cpu_alpha_store_fpcr(cpu_env, fpcr);
  11185. if (fex) {
  11186. int si_code = TARGET_FPE_FLTUNK;
  11187. target_siginfo_t info;
  11188. if (fex & SWCR_TRAP_ENABLE_DNO) {
  11189. si_code = TARGET_FPE_FLTUND;
  11190. }
  11191. if (fex & SWCR_TRAP_ENABLE_INE) {
  11192. si_code = TARGET_FPE_FLTRES;
  11193. }
  11194. if (fex & SWCR_TRAP_ENABLE_UNF) {
  11195. si_code = TARGET_FPE_FLTUND;
  11196. }
  11197. if (fex & SWCR_TRAP_ENABLE_OVF) {
  11198. si_code = TARGET_FPE_FLTOVF;
  11199. }
  11200. if (fex & SWCR_TRAP_ENABLE_DZE) {
  11201. si_code = TARGET_FPE_FLTDIV;
  11202. }
  11203. if (fex & SWCR_TRAP_ENABLE_INV) {
  11204. si_code = TARGET_FPE_FLTINV;
  11205. }
  11206. info.si_signo = SIGFPE;
  11207. info.si_errno = 0;
  11208. info.si_code = si_code;
  11209. info._sifields._sigfault._addr = (cpu_env)->pc;
  11210. queue_signal(cpu_env, info.si_signo,
  11211. QEMU_SI_FAULT, &info);
  11212. }
  11213. ret = 0;
  11214. }
  11215. break;
  11216. /* case SSI_NVPAIRS:
  11217. -- Used with SSIN_UACPROC to enable unaligned accesses.
  11218. case SSI_IEEE_STATE_AT_SIGNAL:
  11219. case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
  11220. -- Not implemented in linux kernel
  11221. */
  11222. }
  11223. return ret;
  11224. #endif
  11225. #ifdef TARGET_NR_osf_sigprocmask
  11226. /* Alpha specific. */
  11227. case TARGET_NR_osf_sigprocmask:
  11228. {
  11229. abi_ulong mask;
  11230. int how;
  11231. sigset_t set, oldset;
  11232. switch(arg1) {
  11233. case TARGET_SIG_BLOCK:
  11234. how = SIG_BLOCK;
  11235. break;
  11236. case TARGET_SIG_UNBLOCK:
  11237. how = SIG_UNBLOCK;
  11238. break;
  11239. case TARGET_SIG_SETMASK:
  11240. how = SIG_SETMASK;
  11241. break;
  11242. default:
  11243. return -TARGET_EINVAL;
  11244. }
  11245. mask = arg2;
  11246. target_to_host_old_sigset(&set, &mask);
  11247. ret = do_sigprocmask(how, &set, &oldset);
  11248. if (!ret) {
  11249. host_to_target_old_sigset(&mask, &oldset);
  11250. ret = mask;
  11251. }
  11252. }
  11253. return ret;
  11254. #endif
  11255. #ifdef TARGET_NR_getgid32
  11256. case TARGET_NR_getgid32:
  11257. return get_errno(getgid());
  11258. #endif
  11259. #ifdef TARGET_NR_geteuid32
  11260. case TARGET_NR_geteuid32:
  11261. return get_errno(geteuid());
  11262. #endif
  11263. #ifdef TARGET_NR_getegid32
  11264. case TARGET_NR_getegid32:
  11265. return get_errno(getegid());
  11266. #endif
  11267. #ifdef TARGET_NR_setreuid32
  11268. case TARGET_NR_setreuid32:
  11269. return get_errno(sys_setreuid(arg1, arg2));
  11270. #endif
  11271. #ifdef TARGET_NR_setregid32
  11272. case TARGET_NR_setregid32:
  11273. return get_errno(sys_setregid(arg1, arg2));
  11274. #endif
  11275. #ifdef TARGET_NR_getgroups32
  11276. case TARGET_NR_getgroups32:
  11277. { /* the same code as for TARGET_NR_getgroups */
  11278. int gidsetsize = arg1;
  11279. uint32_t *target_grouplist;
  11280. g_autofree gid_t *grouplist = NULL;
  11281. int i;
  11282. if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
  11283. return -TARGET_EINVAL;
  11284. }
  11285. if (gidsetsize > 0) {
  11286. grouplist = g_try_new(gid_t, gidsetsize);
  11287. if (!grouplist) {
  11288. return -TARGET_ENOMEM;
  11289. }
  11290. }
  11291. ret = get_errno(getgroups(gidsetsize, grouplist));
  11292. if (!is_error(ret) && gidsetsize > 0) {
  11293. target_grouplist = lock_user(VERIFY_WRITE, arg2,
  11294. gidsetsize * 4, 0);
  11295. if (!target_grouplist) {
  11296. return -TARGET_EFAULT;
  11297. }
  11298. for (i = 0; i < ret; i++) {
  11299. target_grouplist[i] = tswap32(grouplist[i]);
  11300. }
  11301. unlock_user(target_grouplist, arg2, gidsetsize * 4);
  11302. }
  11303. return ret;
  11304. }
  11305. #endif
  11306. #ifdef TARGET_NR_setgroups32
  11307. case TARGET_NR_setgroups32:
  11308. { /* the same code as for TARGET_NR_setgroups */
  11309. int gidsetsize = arg1;
  11310. uint32_t *target_grouplist;
  11311. g_autofree gid_t *grouplist = NULL;
  11312. int i;
  11313. if (gidsetsize > NGROUPS_MAX || gidsetsize < 0) {
  11314. return -TARGET_EINVAL;
  11315. }
  11316. if (gidsetsize > 0) {
  11317. grouplist = g_try_new(gid_t, gidsetsize);
  11318. if (!grouplist) {
  11319. return -TARGET_ENOMEM;
  11320. }
  11321. target_grouplist = lock_user(VERIFY_READ, arg2,
  11322. gidsetsize * 4, 1);
  11323. if (!target_grouplist) {
  11324. return -TARGET_EFAULT;
  11325. }
  11326. for (i = 0; i < gidsetsize; i++) {
  11327. grouplist[i] = tswap32(target_grouplist[i]);
  11328. }
  11329. unlock_user(target_grouplist, arg2, 0);
  11330. }
  11331. return get_errno(sys_setgroups(gidsetsize, grouplist));
  11332. }
  11333. #endif
  11334. #ifdef TARGET_NR_fchown32
  11335. case TARGET_NR_fchown32:
  11336. return get_errno(fchown(arg1, arg2, arg3));
  11337. #endif
  11338. #ifdef TARGET_NR_setresuid32
  11339. case TARGET_NR_setresuid32:
  11340. return get_errno(sys_setresuid(arg1, arg2, arg3));
  11341. #endif
  11342. #ifdef TARGET_NR_getresuid32
  11343. case TARGET_NR_getresuid32:
  11344. {
  11345. uid_t ruid, euid, suid;
  11346. ret = get_errno(getresuid(&ruid, &euid, &suid));
  11347. if (!is_error(ret)) {
  11348. if (put_user_u32(ruid, arg1)
  11349. || put_user_u32(euid, arg2)
  11350. || put_user_u32(suid, arg3))
  11351. return -TARGET_EFAULT;
  11352. }
  11353. }
  11354. return ret;
  11355. #endif
  11356. #ifdef TARGET_NR_setresgid32
  11357. case TARGET_NR_setresgid32:
  11358. return get_errno(sys_setresgid(arg1, arg2, arg3));
  11359. #endif
  11360. #ifdef TARGET_NR_getresgid32
  11361. case TARGET_NR_getresgid32:
  11362. {
  11363. gid_t rgid, egid, sgid;
  11364. ret = get_errno(getresgid(&rgid, &egid, &sgid));
  11365. if (!is_error(ret)) {
  11366. if (put_user_u32(rgid, arg1)
  11367. || put_user_u32(egid, arg2)
  11368. || put_user_u32(sgid, arg3))
  11369. return -TARGET_EFAULT;
  11370. }
  11371. }
  11372. return ret;
  11373. #endif
  11374. #ifdef TARGET_NR_chown32
  11375. case TARGET_NR_chown32:
  11376. if (!(p = lock_user_string(arg1)))
  11377. return -TARGET_EFAULT;
  11378. ret = get_errno(chown(p, arg2, arg3));
  11379. unlock_user(p, arg1, 0);
  11380. return ret;
  11381. #endif
  11382. #ifdef TARGET_NR_setuid32
  11383. case TARGET_NR_setuid32:
  11384. return get_errno(sys_setuid(arg1));
  11385. #endif
  11386. #ifdef TARGET_NR_setgid32
  11387. case TARGET_NR_setgid32:
  11388. return get_errno(sys_setgid(arg1));
  11389. #endif
  11390. #ifdef TARGET_NR_setfsuid32
  11391. case TARGET_NR_setfsuid32:
  11392. return get_errno(setfsuid(arg1));
  11393. #endif
  11394. #ifdef TARGET_NR_setfsgid32
  11395. case TARGET_NR_setfsgid32:
  11396. return get_errno(setfsgid(arg1));
  11397. #endif
  11398. #ifdef TARGET_NR_mincore
  11399. case TARGET_NR_mincore:
  11400. {
  11401. void *a = lock_user(VERIFY_NONE, arg1, arg2, 0);
  11402. if (!a) {
  11403. return -TARGET_ENOMEM;
  11404. }
  11405. p = lock_user_string(arg3);
  11406. if (!p) {
  11407. ret = -TARGET_EFAULT;
  11408. } else {
  11409. ret = get_errno(mincore(a, arg2, p));
  11410. unlock_user(p, arg3, ret);
  11411. }
  11412. unlock_user(a, arg1, 0);
  11413. }
  11414. return ret;
  11415. #endif
  11416. #ifdef TARGET_NR_arm_fadvise64_64
  11417. case TARGET_NR_arm_fadvise64_64:
  11418. /* arm_fadvise64_64 looks like fadvise64_64 but
  11419. * with different argument order: fd, advice, offset, len
  11420. * rather than the usual fd, offset, len, advice.
  11421. * Note that offset and len are both 64-bit so appear as
  11422. * pairs of 32-bit registers.
  11423. */
  11424. ret = posix_fadvise(arg1, target_offset64(arg3, arg4),
  11425. target_offset64(arg5, arg6), arg2);
  11426. return -host_to_target_errno(ret);
  11427. #endif
  11428. #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
  11429. #ifdef TARGET_NR_fadvise64_64
  11430. case TARGET_NR_fadvise64_64:
  11431. #if defined(TARGET_PPC) || defined(TARGET_XTENSA)
  11432. /* 6 args: fd, advice, offset (high, low), len (high, low) */
  11433. ret = arg2;
  11434. arg2 = arg3;
  11435. arg3 = arg4;
  11436. arg4 = arg5;
  11437. arg5 = arg6;
  11438. arg6 = ret;
  11439. #else
  11440. /* 6 args: fd, offset (high, low), len (high, low), advice */
  11441. if (regpairs_aligned(cpu_env, num)) {
  11442. /* offset is in (3,4), len in (5,6) and advice in 7 */
  11443. arg2 = arg3;
  11444. arg3 = arg4;
  11445. arg4 = arg5;
  11446. arg5 = arg6;
  11447. arg6 = arg7;
  11448. }
  11449. #endif
  11450. ret = posix_fadvise(arg1, target_offset64(arg2, arg3),
  11451. target_offset64(arg4, arg5), arg6);
  11452. return -host_to_target_errno(ret);
  11453. #endif
  11454. #ifdef TARGET_NR_fadvise64
  11455. case TARGET_NR_fadvise64:
  11456. /* 5 args: fd, offset (high, low), len, advice */
  11457. if (regpairs_aligned(cpu_env, num)) {
  11458. /* offset is in (3,4), len in 5 and advice in 6 */
  11459. arg2 = arg3;
  11460. arg3 = arg4;
  11461. arg4 = arg5;
  11462. arg5 = arg6;
  11463. }
  11464. ret = posix_fadvise(arg1, target_offset64(arg2, arg3), arg4, arg5);
  11465. return -host_to_target_errno(ret);
  11466. #endif
  11467. #else /* not a 32-bit ABI */
  11468. #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_fadvise64)
  11469. #ifdef TARGET_NR_fadvise64_64
  11470. case TARGET_NR_fadvise64_64:
  11471. #endif
  11472. #ifdef TARGET_NR_fadvise64
  11473. case TARGET_NR_fadvise64:
  11474. #endif
  11475. #ifdef TARGET_S390X
  11476. switch (arg4) {
  11477. case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
  11478. case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
  11479. case 6: arg4 = POSIX_FADV_DONTNEED; break;
  11480. case 7: arg4 = POSIX_FADV_NOREUSE; break;
  11481. default: break;
  11482. }
  11483. #endif
  11484. return -host_to_target_errno(posix_fadvise(arg1, arg2, arg3, arg4));
  11485. #endif
  11486. #endif /* end of 64-bit ABI fadvise handling */
  11487. #ifdef TARGET_NR_madvise
  11488. case TARGET_NR_madvise:
  11489. return target_madvise(arg1, arg2, arg3);
  11490. #endif
  11491. #ifdef TARGET_NR_fcntl64
  11492. case TARGET_NR_fcntl64:
  11493. {
  11494. int cmd;
  11495. struct flock fl;
  11496. from_flock64_fn *copyfrom = copy_from_user_flock64;
  11497. to_flock64_fn *copyto = copy_to_user_flock64;
  11498. #ifdef TARGET_ARM
  11499. if (!cpu_env->eabi) {
  11500. copyfrom = copy_from_user_oabi_flock64;
  11501. copyto = copy_to_user_oabi_flock64;
  11502. }
  11503. #endif
  11504. cmd = target_to_host_fcntl_cmd(arg2);
  11505. if (cmd == -TARGET_EINVAL) {
  11506. return cmd;
  11507. }
  11508. switch(arg2) {
  11509. case TARGET_F_GETLK64:
  11510. ret = copyfrom(&fl, arg3);
  11511. if (ret) {
  11512. break;
  11513. }
  11514. ret = get_errno(safe_fcntl(arg1, cmd, &fl));
  11515. if (ret == 0) {
  11516. ret = copyto(arg3, &fl);
  11517. }
  11518. break;
  11519. case TARGET_F_SETLK64:
  11520. case TARGET_F_SETLKW64:
  11521. ret = copyfrom(&fl, arg3);
  11522. if (ret) {
  11523. break;
  11524. }
  11525. ret = get_errno(safe_fcntl(arg1, cmd, &fl));
  11526. break;
  11527. default:
  11528. ret = do_fcntl(arg1, arg2, arg3);
  11529. break;
  11530. }
  11531. return ret;
  11532. }
  11533. #endif
  11534. #ifdef TARGET_NR_cacheflush
  11535. case TARGET_NR_cacheflush:
  11536. /* self-modifying code is handled automatically, so nothing needed */
  11537. return 0;
  11538. #endif
  11539. #ifdef TARGET_NR_getpagesize
  11540. case TARGET_NR_getpagesize:
  11541. return TARGET_PAGE_SIZE;
  11542. #endif
  11543. case TARGET_NR_gettid:
  11544. return get_errno(sys_gettid());
  11545. #ifdef TARGET_NR_readahead
  11546. case TARGET_NR_readahead:
  11547. #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
  11548. if (regpairs_aligned(cpu_env, num)) {
  11549. arg2 = arg3;
  11550. arg3 = arg4;
  11551. arg4 = arg5;
  11552. }
  11553. ret = get_errno(readahead(arg1, target_offset64(arg2, arg3) , arg4));
  11554. #else
  11555. ret = get_errno(readahead(arg1, arg2, arg3));
  11556. #endif
  11557. return ret;
  11558. #endif
  11559. #ifdef CONFIG_ATTR
  11560. #ifdef TARGET_NR_setxattr
  11561. case TARGET_NR_listxattr:
  11562. case TARGET_NR_llistxattr:
  11563. {
  11564. void *b = 0;
  11565. if (arg2) {
  11566. b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
  11567. if (!b) {
  11568. return -TARGET_EFAULT;
  11569. }
  11570. }
  11571. p = lock_user_string(arg1);
  11572. if (p) {
  11573. if (num == TARGET_NR_listxattr) {
  11574. ret = get_errno(listxattr(p, b, arg3));
  11575. } else {
  11576. ret = get_errno(llistxattr(p, b, arg3));
  11577. }
  11578. } else {
  11579. ret = -TARGET_EFAULT;
  11580. }
  11581. unlock_user(p, arg1, 0);
  11582. unlock_user(b, arg2, arg3);
  11583. return ret;
  11584. }
  11585. case TARGET_NR_flistxattr:
  11586. {
  11587. void *b = 0;
  11588. if (arg2) {
  11589. b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
  11590. if (!b) {
  11591. return -TARGET_EFAULT;
  11592. }
  11593. }
  11594. ret = get_errno(flistxattr(arg1, b, arg3));
  11595. unlock_user(b, arg2, arg3);
  11596. return ret;
  11597. }
  11598. case TARGET_NR_setxattr:
  11599. case TARGET_NR_lsetxattr:
  11600. {
  11601. void *n, *v = 0;
  11602. if (arg3) {
  11603. v = lock_user(VERIFY_READ, arg3, arg4, 1);
  11604. if (!v) {
  11605. return -TARGET_EFAULT;
  11606. }
  11607. }
  11608. p = lock_user_string(arg1);
  11609. n = lock_user_string(arg2);
  11610. if (p && n) {
  11611. if (num == TARGET_NR_setxattr) {
  11612. ret = get_errno(setxattr(p, n, v, arg4, arg5));
  11613. } else {
  11614. ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
  11615. }
  11616. } else {
  11617. ret = -TARGET_EFAULT;
  11618. }
  11619. unlock_user(p, arg1, 0);
  11620. unlock_user(n, arg2, 0);
  11621. unlock_user(v, arg3, 0);
  11622. }
  11623. return ret;
  11624. case TARGET_NR_fsetxattr:
  11625. {
  11626. void *n, *v = 0;
  11627. if (arg3) {
  11628. v = lock_user(VERIFY_READ, arg3, arg4, 1);
  11629. if (!v) {
  11630. return -TARGET_EFAULT;
  11631. }
  11632. }
  11633. n = lock_user_string(arg2);
  11634. if (n) {
  11635. ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
  11636. } else {
  11637. ret = -TARGET_EFAULT;
  11638. }
  11639. unlock_user(n, arg2, 0);
  11640. unlock_user(v, arg3, 0);
  11641. }
  11642. return ret;
  11643. case TARGET_NR_getxattr:
  11644. case TARGET_NR_lgetxattr:
  11645. {
  11646. void *n, *v = 0;
  11647. if (arg3) {
  11648. v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
  11649. if (!v) {
  11650. return -TARGET_EFAULT;
  11651. }
  11652. }
  11653. p = lock_user_string(arg1);
  11654. n = lock_user_string(arg2);
  11655. if (p && n) {
  11656. if (num == TARGET_NR_getxattr) {
  11657. ret = get_errno(getxattr(p, n, v, arg4));
  11658. } else {
  11659. ret = get_errno(lgetxattr(p, n, v, arg4));
  11660. }
  11661. } else {
  11662. ret = -TARGET_EFAULT;
  11663. }
  11664. unlock_user(p, arg1, 0);
  11665. unlock_user(n, arg2, 0);
  11666. unlock_user(v, arg3, arg4);
  11667. }
  11668. return ret;
  11669. case TARGET_NR_fgetxattr:
  11670. {
  11671. void *n, *v = 0;
  11672. if (arg3) {
  11673. v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
  11674. if (!v) {
  11675. return -TARGET_EFAULT;
  11676. }
  11677. }
  11678. n = lock_user_string(arg2);
  11679. if (n) {
  11680. ret = get_errno(fgetxattr(arg1, n, v, arg4));
  11681. } else {
  11682. ret = -TARGET_EFAULT;
  11683. }
  11684. unlock_user(n, arg2, 0);
  11685. unlock_user(v, arg3, arg4);
  11686. }
  11687. return ret;
  11688. case TARGET_NR_removexattr:
  11689. case TARGET_NR_lremovexattr:
  11690. {
  11691. void *n;
  11692. p = lock_user_string(arg1);
  11693. n = lock_user_string(arg2);
  11694. if (p && n) {
  11695. if (num == TARGET_NR_removexattr) {
  11696. ret = get_errno(removexattr(p, n));
  11697. } else {
  11698. ret = get_errno(lremovexattr(p, n));
  11699. }
  11700. } else {
  11701. ret = -TARGET_EFAULT;
  11702. }
  11703. unlock_user(p, arg1, 0);
  11704. unlock_user(n, arg2, 0);
  11705. }
  11706. return ret;
  11707. case TARGET_NR_fremovexattr:
  11708. {
  11709. void *n;
  11710. n = lock_user_string(arg2);
  11711. if (n) {
  11712. ret = get_errno(fremovexattr(arg1, n));
  11713. } else {
  11714. ret = -TARGET_EFAULT;
  11715. }
  11716. unlock_user(n, arg2, 0);
  11717. }
  11718. return ret;
  11719. #endif
  11720. #endif /* CONFIG_ATTR */
  11721. #ifdef TARGET_NR_set_thread_area
  11722. case TARGET_NR_set_thread_area:
  11723. #if defined(TARGET_MIPS)
  11724. cpu_env->active_tc.CP0_UserLocal = arg1;
  11725. return 0;
  11726. #elif defined(TARGET_I386) && defined(TARGET_ABI32)
  11727. return do_set_thread_area(cpu_env, arg1);
  11728. #elif defined(TARGET_M68K)
  11729. {
  11730. TaskState *ts = get_task_state(cpu);
  11731. ts->tp_value = arg1;
  11732. return 0;
  11733. }
  11734. #else
  11735. return -TARGET_ENOSYS;
  11736. #endif
  11737. #endif
  11738. #ifdef TARGET_NR_get_thread_area
  11739. case TARGET_NR_get_thread_area:
  11740. #if defined(TARGET_I386) && defined(TARGET_ABI32)
  11741. return do_get_thread_area(cpu_env, arg1);
  11742. #elif defined(TARGET_M68K)
  11743. {
  11744. TaskState *ts = get_task_state(cpu);
  11745. return ts->tp_value;
  11746. }
  11747. #else
  11748. return -TARGET_ENOSYS;
  11749. #endif
  11750. #endif
  11751. #ifdef TARGET_NR_getdomainname
  11752. case TARGET_NR_getdomainname:
  11753. return -TARGET_ENOSYS;
  11754. #endif
  11755. #ifdef TARGET_NR_clock_settime
  11756. case TARGET_NR_clock_settime:
  11757. {
  11758. struct timespec ts;
  11759. ret = target_to_host_timespec(&ts, arg2);
  11760. if (!is_error(ret)) {
  11761. ret = get_errno(clock_settime(arg1, &ts));
  11762. }
  11763. return ret;
  11764. }
  11765. #endif
  11766. #ifdef TARGET_NR_clock_settime64
  11767. case TARGET_NR_clock_settime64:
  11768. {
  11769. struct timespec ts;
  11770. ret = target_to_host_timespec64(&ts, arg2);
  11771. if (!is_error(ret)) {
  11772. ret = get_errno(clock_settime(arg1, &ts));
  11773. }
  11774. return ret;
  11775. }
  11776. #endif
  11777. #ifdef TARGET_NR_clock_gettime
  11778. case TARGET_NR_clock_gettime:
  11779. {
  11780. struct timespec ts;
  11781. ret = get_errno(clock_gettime(arg1, &ts));
  11782. if (!is_error(ret)) {
  11783. ret = host_to_target_timespec(arg2, &ts);
  11784. }
  11785. return ret;
  11786. }
  11787. #endif
  11788. #ifdef TARGET_NR_clock_gettime64
  11789. case TARGET_NR_clock_gettime64:
  11790. {
  11791. struct timespec ts;
  11792. ret = get_errno(clock_gettime(arg1, &ts));
  11793. if (!is_error(ret)) {
  11794. ret = host_to_target_timespec64(arg2, &ts);
  11795. }
  11796. return ret;
  11797. }
  11798. #endif
  11799. #ifdef TARGET_NR_clock_getres
  11800. case TARGET_NR_clock_getres:
  11801. {
  11802. struct timespec ts;
  11803. ret = get_errno(clock_getres(arg1, &ts));
  11804. if (!is_error(ret)) {
  11805. host_to_target_timespec(arg2, &ts);
  11806. }
  11807. return ret;
  11808. }
  11809. #endif
  11810. #ifdef TARGET_NR_clock_getres_time64
  11811. case TARGET_NR_clock_getres_time64:
  11812. {
  11813. struct timespec ts;
  11814. ret = get_errno(clock_getres(arg1, &ts));
  11815. if (!is_error(ret)) {
  11816. host_to_target_timespec64(arg2, &ts);
  11817. }
  11818. return ret;
  11819. }
  11820. #endif
  11821. #ifdef TARGET_NR_clock_nanosleep
  11822. case TARGET_NR_clock_nanosleep:
  11823. {
  11824. struct timespec ts;
  11825. if (target_to_host_timespec(&ts, arg3)) {
  11826. return -TARGET_EFAULT;
  11827. }
  11828. ret = get_errno(safe_clock_nanosleep(arg1, arg2,
  11829. &ts, arg4 ? &ts : NULL));
  11830. /*
  11831. * if the call is interrupted by a signal handler, it fails
  11832. * with error -TARGET_EINTR and if arg4 is not NULL and arg2 is not
  11833. * TIMER_ABSTIME, it returns the remaining unslept time in arg4.
  11834. */
  11835. if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
  11836. host_to_target_timespec(arg4, &ts)) {
  11837. return -TARGET_EFAULT;
  11838. }
  11839. return ret;
  11840. }
  11841. #endif
  11842. #ifdef TARGET_NR_clock_nanosleep_time64
  11843. case TARGET_NR_clock_nanosleep_time64:
  11844. {
  11845. struct timespec ts;
  11846. if (target_to_host_timespec64(&ts, arg3)) {
  11847. return -TARGET_EFAULT;
  11848. }
  11849. ret = get_errno(safe_clock_nanosleep(arg1, arg2,
  11850. &ts, arg4 ? &ts : NULL));
  11851. if (ret == -TARGET_EINTR && arg4 && arg2 != TIMER_ABSTIME &&
  11852. host_to_target_timespec64(arg4, &ts)) {
  11853. return -TARGET_EFAULT;
  11854. }
  11855. return ret;
  11856. }
  11857. #endif
  11858. #if defined(TARGET_NR_set_tid_address)
  11859. case TARGET_NR_set_tid_address:
  11860. {
  11861. TaskState *ts = get_task_state(cpu);
  11862. ts->child_tidptr = arg1;
  11863. /* do not call host set_tid_address() syscall, instead return tid() */
  11864. return get_errno(sys_gettid());
  11865. }
  11866. #endif
  11867. case TARGET_NR_tkill:
  11868. return get_errno(safe_tkill((int)arg1, target_to_host_signal(arg2)));
  11869. case TARGET_NR_tgkill:
  11870. return get_errno(safe_tgkill((int)arg1, (int)arg2,
  11871. target_to_host_signal(arg3)));
  11872. #ifdef TARGET_NR_set_robust_list
  11873. case TARGET_NR_set_robust_list:
  11874. case TARGET_NR_get_robust_list:
  11875. /* The ABI for supporting robust futexes has userspace pass
  11876. * the kernel a pointer to a linked list which is updated by
  11877. * userspace after the syscall; the list is walked by the kernel
  11878. * when the thread exits. Since the linked list in QEMU guest
  11879. * memory isn't a valid linked list for the host and we have
  11880. * no way to reliably intercept the thread-death event, we can't
  11881. * support these. Silently return ENOSYS so that guest userspace
  11882. * falls back to a non-robust futex implementation (which should
  11883. * be OK except in the corner case of the guest crashing while
  11884. * holding a mutex that is shared with another process via
  11885. * shared memory).
  11886. */
  11887. return -TARGET_ENOSYS;
  11888. #endif
  11889. #if defined(TARGET_NR_utimensat)
  11890. case TARGET_NR_utimensat:
  11891. {
  11892. struct timespec *tsp, ts[2];
  11893. if (!arg3) {
  11894. tsp = NULL;
  11895. } else {
  11896. if (target_to_host_timespec(ts, arg3)) {
  11897. return -TARGET_EFAULT;
  11898. }
  11899. if (target_to_host_timespec(ts + 1, arg3 +
  11900. sizeof(struct target_timespec))) {
  11901. return -TARGET_EFAULT;
  11902. }
  11903. tsp = ts;
  11904. }
  11905. if (!arg2)
  11906. ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
  11907. else {
  11908. if (!(p = lock_user_string(arg2))) {
  11909. return -TARGET_EFAULT;
  11910. }
  11911. ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
  11912. unlock_user(p, arg2, 0);
  11913. }
  11914. }
  11915. return ret;
  11916. #endif
  11917. #ifdef TARGET_NR_utimensat_time64
  11918. case TARGET_NR_utimensat_time64:
  11919. {
  11920. struct timespec *tsp, ts[2];
  11921. if (!arg3) {
  11922. tsp = NULL;
  11923. } else {
  11924. if (target_to_host_timespec64(ts, arg3)) {
  11925. return -TARGET_EFAULT;
  11926. }
  11927. if (target_to_host_timespec64(ts + 1, arg3 +
  11928. sizeof(struct target__kernel_timespec))) {
  11929. return -TARGET_EFAULT;
  11930. }
  11931. tsp = ts;
  11932. }
  11933. if (!arg2)
  11934. ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
  11935. else {
  11936. p = lock_user_string(arg2);
  11937. if (!p) {
  11938. return -TARGET_EFAULT;
  11939. }
  11940. ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
  11941. unlock_user(p, arg2, 0);
  11942. }
  11943. }
  11944. return ret;
  11945. #endif
  11946. #ifdef TARGET_NR_futex
  11947. case TARGET_NR_futex:
  11948. return do_futex(cpu, false, arg1, arg2, arg3, arg4, arg5, arg6);
  11949. #endif
  11950. #ifdef TARGET_NR_futex_time64
  11951. case TARGET_NR_futex_time64:
  11952. return do_futex(cpu, true, arg1, arg2, arg3, arg4, arg5, arg6);
  11953. #endif
  11954. #ifdef CONFIG_INOTIFY
  11955. #if defined(TARGET_NR_inotify_init)
  11956. case TARGET_NR_inotify_init:
  11957. ret = get_errno(inotify_init());
  11958. if (ret >= 0) {
  11959. fd_trans_register(ret, &target_inotify_trans);
  11960. }
  11961. return ret;
  11962. #endif
  11963. #if defined(TARGET_NR_inotify_init1) && defined(CONFIG_INOTIFY1)
  11964. case TARGET_NR_inotify_init1:
  11965. ret = get_errno(inotify_init1(target_to_host_bitmask(arg1,
  11966. fcntl_flags_tbl)));
  11967. if (ret >= 0) {
  11968. fd_trans_register(ret, &target_inotify_trans);
  11969. }
  11970. return ret;
  11971. #endif
  11972. #if defined(TARGET_NR_inotify_add_watch)
  11973. case TARGET_NR_inotify_add_watch:
  11974. p = lock_user_string(arg2);
  11975. ret = get_errno(inotify_add_watch(arg1, path(p), arg3));
  11976. unlock_user(p, arg2, 0);
  11977. return ret;
  11978. #endif
  11979. #if defined(TARGET_NR_inotify_rm_watch)
  11980. case TARGET_NR_inotify_rm_watch:
  11981. return get_errno(inotify_rm_watch(arg1, arg2));
  11982. #endif
  11983. #endif
  11984. #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
  11985. case TARGET_NR_mq_open:
  11986. {
  11987. struct mq_attr posix_mq_attr;
  11988. struct mq_attr *pposix_mq_attr;
  11989. int host_flags;
  11990. host_flags = target_to_host_bitmask(arg2, fcntl_flags_tbl);
  11991. pposix_mq_attr = NULL;
  11992. if (arg4) {
  11993. if (copy_from_user_mq_attr(&posix_mq_attr, arg4) != 0) {
  11994. return -TARGET_EFAULT;
  11995. }
  11996. pposix_mq_attr = &posix_mq_attr;
  11997. }
  11998. p = lock_user_string(arg1 - 1);
  11999. if (!p) {
  12000. return -TARGET_EFAULT;
  12001. }
  12002. ret = get_errno(mq_open(p, host_flags, arg3, pposix_mq_attr));
  12003. unlock_user (p, arg1, 0);
  12004. }
  12005. return ret;
  12006. case TARGET_NR_mq_unlink:
  12007. p = lock_user_string(arg1 - 1);
  12008. if (!p) {
  12009. return -TARGET_EFAULT;
  12010. }
  12011. ret = get_errno(mq_unlink(p));
  12012. unlock_user (p, arg1, 0);
  12013. return ret;
  12014. #ifdef TARGET_NR_mq_timedsend
  12015. case TARGET_NR_mq_timedsend:
  12016. {
  12017. struct timespec ts;
  12018. p = lock_user (VERIFY_READ, arg2, arg3, 1);
  12019. if (arg5 != 0) {
  12020. if (target_to_host_timespec(&ts, arg5)) {
  12021. return -TARGET_EFAULT;
  12022. }
  12023. ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
  12024. if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
  12025. return -TARGET_EFAULT;
  12026. }
  12027. } else {
  12028. ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
  12029. }
  12030. unlock_user (p, arg2, arg3);
  12031. }
  12032. return ret;
  12033. #endif
  12034. #ifdef TARGET_NR_mq_timedsend_time64
  12035. case TARGET_NR_mq_timedsend_time64:
  12036. {
  12037. struct timespec ts;
  12038. p = lock_user(VERIFY_READ, arg2, arg3, 1);
  12039. if (arg5 != 0) {
  12040. if (target_to_host_timespec64(&ts, arg5)) {
  12041. return -TARGET_EFAULT;
  12042. }
  12043. ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, &ts));
  12044. if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
  12045. return -TARGET_EFAULT;
  12046. }
  12047. } else {
  12048. ret = get_errno(safe_mq_timedsend(arg1, p, arg3, arg4, NULL));
  12049. }
  12050. unlock_user(p, arg2, arg3);
  12051. }
  12052. return ret;
  12053. #endif
  12054. #ifdef TARGET_NR_mq_timedreceive
  12055. case TARGET_NR_mq_timedreceive:
  12056. {
  12057. struct timespec ts;
  12058. unsigned int prio;
  12059. p = lock_user (VERIFY_READ, arg2, arg3, 1);
  12060. if (arg5 != 0) {
  12061. if (target_to_host_timespec(&ts, arg5)) {
  12062. return -TARGET_EFAULT;
  12063. }
  12064. ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
  12065. &prio, &ts));
  12066. if (!is_error(ret) && host_to_target_timespec(arg5, &ts)) {
  12067. return -TARGET_EFAULT;
  12068. }
  12069. } else {
  12070. ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
  12071. &prio, NULL));
  12072. }
  12073. unlock_user (p, arg2, arg3);
  12074. if (arg4 != 0)
  12075. put_user_u32(prio, arg4);
  12076. }
  12077. return ret;
  12078. #endif
  12079. #ifdef TARGET_NR_mq_timedreceive_time64
  12080. case TARGET_NR_mq_timedreceive_time64:
  12081. {
  12082. struct timespec ts;
  12083. unsigned int prio;
  12084. p = lock_user(VERIFY_READ, arg2, arg3, 1);
  12085. if (arg5 != 0) {
  12086. if (target_to_host_timespec64(&ts, arg5)) {
  12087. return -TARGET_EFAULT;
  12088. }
  12089. ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
  12090. &prio, &ts));
  12091. if (!is_error(ret) && host_to_target_timespec64(arg5, &ts)) {
  12092. return -TARGET_EFAULT;
  12093. }
  12094. } else {
  12095. ret = get_errno(safe_mq_timedreceive(arg1, p, arg3,
  12096. &prio, NULL));
  12097. }
  12098. unlock_user(p, arg2, arg3);
  12099. if (arg4 != 0) {
  12100. put_user_u32(prio, arg4);
  12101. }
  12102. }
  12103. return ret;
  12104. #endif
  12105. /* Not implemented for now... */
  12106. /* case TARGET_NR_mq_notify: */
  12107. /* break; */
  12108. case TARGET_NR_mq_getsetattr:
  12109. {
  12110. struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
  12111. ret = 0;
  12112. if (arg2 != 0) {
  12113. copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
  12114. ret = get_errno(mq_setattr(arg1, &posix_mq_attr_in,
  12115. &posix_mq_attr_out));
  12116. } else if (arg3 != 0) {
  12117. ret = get_errno(mq_getattr(arg1, &posix_mq_attr_out));
  12118. }
  12119. if (ret == 0 && arg3 != 0) {
  12120. copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
  12121. }
  12122. }
  12123. return ret;
  12124. #endif
  12125. #ifdef CONFIG_SPLICE
  12126. #ifdef TARGET_NR_tee
  12127. case TARGET_NR_tee:
  12128. {
  12129. ret = get_errno(tee(arg1,arg2,arg3,arg4));
  12130. }
  12131. return ret;
  12132. #endif
  12133. #ifdef TARGET_NR_splice
  12134. case TARGET_NR_splice:
  12135. {
  12136. loff_t loff_in, loff_out;
  12137. loff_t *ploff_in = NULL, *ploff_out = NULL;
  12138. if (arg2) {
  12139. if (get_user_u64(loff_in, arg2)) {
  12140. return -TARGET_EFAULT;
  12141. }
  12142. ploff_in = &loff_in;
  12143. }
  12144. if (arg4) {
  12145. if (get_user_u64(loff_out, arg4)) {
  12146. return -TARGET_EFAULT;
  12147. }
  12148. ploff_out = &loff_out;
  12149. }
  12150. ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
  12151. if (arg2) {
  12152. if (put_user_u64(loff_in, arg2)) {
  12153. return -TARGET_EFAULT;
  12154. }
  12155. }
  12156. if (arg4) {
  12157. if (put_user_u64(loff_out, arg4)) {
  12158. return -TARGET_EFAULT;
  12159. }
  12160. }
  12161. }
  12162. return ret;
  12163. #endif
  12164. #ifdef TARGET_NR_vmsplice
  12165. case TARGET_NR_vmsplice:
  12166. {
  12167. struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
  12168. if (vec != NULL) {
  12169. ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
  12170. unlock_iovec(vec, arg2, arg3, 0);
  12171. } else {
  12172. ret = -host_to_target_errno(errno);
  12173. }
  12174. }
  12175. return ret;
  12176. #endif
  12177. #endif /* CONFIG_SPLICE */
  12178. #ifdef CONFIG_EVENTFD
  12179. #if defined(TARGET_NR_eventfd)
  12180. case TARGET_NR_eventfd:
  12181. ret = get_errno(eventfd(arg1, 0));
  12182. if (ret >= 0) {
  12183. fd_trans_register(ret, &target_eventfd_trans);
  12184. }
  12185. return ret;
  12186. #endif
  12187. #if defined(TARGET_NR_eventfd2)
  12188. case TARGET_NR_eventfd2:
  12189. {
  12190. int host_flags = arg2 & (~(TARGET_O_NONBLOCK_MASK | TARGET_O_CLOEXEC));
  12191. if (arg2 & TARGET_O_NONBLOCK) {
  12192. host_flags |= O_NONBLOCK;
  12193. }
  12194. if (arg2 & TARGET_O_CLOEXEC) {
  12195. host_flags |= O_CLOEXEC;
  12196. }
  12197. ret = get_errno(eventfd(arg1, host_flags));
  12198. if (ret >= 0) {
  12199. fd_trans_register(ret, &target_eventfd_trans);
  12200. }
  12201. return ret;
  12202. }
  12203. #endif
  12204. #endif /* CONFIG_EVENTFD */
  12205. #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
  12206. case TARGET_NR_fallocate:
  12207. #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
  12208. ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
  12209. target_offset64(arg5, arg6)));
  12210. #else
  12211. ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
  12212. #endif
  12213. return ret;
  12214. #endif
  12215. #if defined(CONFIG_SYNC_FILE_RANGE)
  12216. #if defined(TARGET_NR_sync_file_range)
  12217. case TARGET_NR_sync_file_range:
  12218. #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
  12219. #if defined(TARGET_MIPS)
  12220. ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
  12221. target_offset64(arg5, arg6), arg7));
  12222. #else
  12223. ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
  12224. target_offset64(arg4, arg5), arg6));
  12225. #endif /* !TARGET_MIPS */
  12226. #else
  12227. ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
  12228. #endif
  12229. return ret;
  12230. #endif
  12231. #if defined(TARGET_NR_sync_file_range2) || \
  12232. defined(TARGET_NR_arm_sync_file_range)
  12233. #if defined(TARGET_NR_sync_file_range2)
  12234. case TARGET_NR_sync_file_range2:
  12235. #endif
  12236. #if defined(TARGET_NR_arm_sync_file_range)
  12237. case TARGET_NR_arm_sync_file_range:
  12238. #endif
  12239. /* This is like sync_file_range but the arguments are reordered */
  12240. #if TARGET_ABI_BITS == 32 && !defined(TARGET_ABI_MIPSN32)
  12241. ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
  12242. target_offset64(arg5, arg6), arg2));
  12243. #else
  12244. ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
  12245. #endif
  12246. return ret;
  12247. #endif
  12248. #endif
  12249. #if defined(TARGET_NR_signalfd4)
  12250. case TARGET_NR_signalfd4:
  12251. return do_signalfd4(arg1, arg2, arg4);
  12252. #endif
  12253. #if defined(TARGET_NR_signalfd)
  12254. case TARGET_NR_signalfd:
  12255. return do_signalfd4(arg1, arg2, 0);
  12256. #endif
  12257. #if defined(CONFIG_EPOLL)
  12258. #if defined(TARGET_NR_epoll_create)
  12259. case TARGET_NR_epoll_create:
  12260. return get_errno(epoll_create(arg1));
  12261. #endif
  12262. #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
  12263. case TARGET_NR_epoll_create1:
  12264. return get_errno(epoll_create1(target_to_host_bitmask(arg1, fcntl_flags_tbl)));
  12265. #endif
  12266. #if defined(TARGET_NR_epoll_ctl)
  12267. case TARGET_NR_epoll_ctl:
  12268. {
  12269. struct epoll_event ep;
  12270. struct epoll_event *epp = 0;
  12271. if (arg4) {
  12272. if (arg2 != EPOLL_CTL_DEL) {
  12273. struct target_epoll_event *target_ep;
  12274. if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
  12275. return -TARGET_EFAULT;
  12276. }
  12277. ep.events = tswap32(target_ep->events);
  12278. /*
  12279. * The epoll_data_t union is just opaque data to the kernel,
  12280. * so we transfer all 64 bits across and need not worry what
  12281. * actual data type it is.
  12282. */
  12283. ep.data.u64 = tswap64(target_ep->data.u64);
  12284. unlock_user_struct(target_ep, arg4, 0);
  12285. }
  12286. /*
  12287. * before kernel 2.6.9, EPOLL_CTL_DEL operation required a
  12288. * non-null pointer, even though this argument is ignored.
  12289. *
  12290. */
  12291. epp = &ep;
  12292. }
  12293. return get_errno(epoll_ctl(arg1, arg2, arg3, epp));
  12294. }
  12295. #endif
  12296. #if defined(TARGET_NR_epoll_wait) || defined(TARGET_NR_epoll_pwait)
  12297. #if defined(TARGET_NR_epoll_wait)
  12298. case TARGET_NR_epoll_wait:
  12299. #endif
  12300. #if defined(TARGET_NR_epoll_pwait)
  12301. case TARGET_NR_epoll_pwait:
  12302. #endif
  12303. {
  12304. struct target_epoll_event *target_ep;
  12305. struct epoll_event *ep;
  12306. int epfd = arg1;
  12307. int maxevents = arg3;
  12308. int timeout = arg4;
  12309. if (maxevents <= 0 || maxevents > TARGET_EP_MAX_EVENTS) {
  12310. return -TARGET_EINVAL;
  12311. }
  12312. target_ep = lock_user(VERIFY_WRITE, arg2,
  12313. maxevents * sizeof(struct target_epoll_event), 1);
  12314. if (!target_ep) {
  12315. return -TARGET_EFAULT;
  12316. }
  12317. ep = g_try_new(struct epoll_event, maxevents);
  12318. if (!ep) {
  12319. unlock_user(target_ep, arg2, 0);
  12320. return -TARGET_ENOMEM;
  12321. }
  12322. switch (num) {
  12323. #if defined(TARGET_NR_epoll_pwait)
  12324. case TARGET_NR_epoll_pwait:
  12325. {
  12326. sigset_t *set = NULL;
  12327. if (arg5) {
  12328. ret = process_sigsuspend_mask(&set, arg5, arg6);
  12329. if (ret != 0) {
  12330. break;
  12331. }
  12332. }
  12333. ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
  12334. set, SIGSET_T_SIZE));
  12335. if (set) {
  12336. finish_sigsuspend_mask(ret);
  12337. }
  12338. break;
  12339. }
  12340. #endif
  12341. #if defined(TARGET_NR_epoll_wait)
  12342. case TARGET_NR_epoll_wait:
  12343. ret = get_errno(safe_epoll_pwait(epfd, ep, maxevents, timeout,
  12344. NULL, 0));
  12345. break;
  12346. #endif
  12347. default:
  12348. ret = -TARGET_ENOSYS;
  12349. }
  12350. if (!is_error(ret)) {
  12351. int i;
  12352. for (i = 0; i < ret; i++) {
  12353. target_ep[i].events = tswap32(ep[i].events);
  12354. target_ep[i].data.u64 = tswap64(ep[i].data.u64);
  12355. }
  12356. unlock_user(target_ep, arg2,
  12357. ret * sizeof(struct target_epoll_event));
  12358. } else {
  12359. unlock_user(target_ep, arg2, 0);
  12360. }
  12361. g_free(ep);
  12362. return ret;
  12363. }
  12364. #endif
  12365. #endif
  12366. #ifdef TARGET_NR_prlimit64
  12367. case TARGET_NR_prlimit64:
  12368. {
  12369. /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
  12370. struct target_rlimit64 *target_rnew, *target_rold;
  12371. struct host_rlimit64 rnew, rold, *rnewp = 0;
  12372. int resource = target_to_host_resource(arg2);
  12373. if (arg3 && (resource != RLIMIT_AS &&
  12374. resource != RLIMIT_DATA &&
  12375. resource != RLIMIT_STACK)) {
  12376. if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
  12377. return -TARGET_EFAULT;
  12378. }
  12379. __get_user(rnew.rlim_cur, &target_rnew->rlim_cur);
  12380. __get_user(rnew.rlim_max, &target_rnew->rlim_max);
  12381. unlock_user_struct(target_rnew, arg3, 0);
  12382. rnewp = &rnew;
  12383. }
  12384. ret = get_errno(sys_prlimit64(arg1, resource, rnewp, arg4 ? &rold : 0));
  12385. if (!is_error(ret) && arg4) {
  12386. if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
  12387. return -TARGET_EFAULT;
  12388. }
  12389. __put_user(rold.rlim_cur, &target_rold->rlim_cur);
  12390. __put_user(rold.rlim_max, &target_rold->rlim_max);
  12391. unlock_user_struct(target_rold, arg4, 1);
  12392. }
  12393. return ret;
  12394. }
  12395. #endif
  12396. #ifdef TARGET_NR_gethostname
  12397. case TARGET_NR_gethostname:
  12398. {
  12399. char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
  12400. if (name) {
  12401. ret = get_errno(gethostname(name, arg2));
  12402. unlock_user(name, arg1, arg2);
  12403. } else {
  12404. ret = -TARGET_EFAULT;
  12405. }
  12406. return ret;
  12407. }
  12408. #endif
  12409. #ifdef TARGET_NR_atomic_cmpxchg_32
  12410. case TARGET_NR_atomic_cmpxchg_32:
  12411. {
  12412. /* should use start_exclusive from main.c */
  12413. abi_ulong mem_value;
  12414. if (get_user_u32(mem_value, arg6)) {
  12415. target_siginfo_t info;
  12416. info.si_signo = SIGSEGV;
  12417. info.si_errno = 0;
  12418. info.si_code = TARGET_SEGV_MAPERR;
  12419. info._sifields._sigfault._addr = arg6;
  12420. queue_signal(cpu_env, info.si_signo, QEMU_SI_FAULT, &info);
  12421. ret = 0xdeadbeef;
  12422. }
  12423. if (mem_value == arg2)
  12424. put_user_u32(arg1, arg6);
  12425. return mem_value;
  12426. }
  12427. #endif
  12428. #ifdef TARGET_NR_atomic_barrier
  12429. case TARGET_NR_atomic_barrier:
  12430. /* Like the kernel implementation and the
  12431. qemu arm barrier, no-op this? */
  12432. return 0;
  12433. #endif
  12434. #ifdef TARGET_NR_timer_create
  12435. case TARGET_NR_timer_create:
  12436. {
  12437. /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
  12438. struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
  12439. int clkid = arg1;
  12440. int timer_index = next_free_host_timer();
  12441. if (timer_index < 0) {
  12442. ret = -TARGET_EAGAIN;
  12443. } else {
  12444. timer_t *phtimer = g_posix_timers + timer_index;
  12445. if (arg2) {
  12446. phost_sevp = &host_sevp;
  12447. ret = target_to_host_sigevent(phost_sevp, arg2);
  12448. if (ret != 0) {
  12449. free_host_timer_slot(timer_index);
  12450. return ret;
  12451. }
  12452. }
  12453. ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
  12454. if (ret) {
  12455. free_host_timer_slot(timer_index);
  12456. } else {
  12457. if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
  12458. timer_delete(*phtimer);
  12459. free_host_timer_slot(timer_index);
  12460. return -TARGET_EFAULT;
  12461. }
  12462. }
  12463. }
  12464. return ret;
  12465. }
  12466. #endif
  12467. #ifdef TARGET_NR_timer_settime
  12468. case TARGET_NR_timer_settime:
  12469. {
  12470. /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
  12471. * struct itimerspec * old_value */
  12472. target_timer_t timerid = get_timer_id(arg1);
  12473. if (timerid < 0) {
  12474. ret = timerid;
  12475. } else if (arg3 == 0) {
  12476. ret = -TARGET_EINVAL;
  12477. } else {
  12478. timer_t htimer = g_posix_timers[timerid];
  12479. struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
  12480. if (target_to_host_itimerspec(&hspec_new, arg3)) {
  12481. return -TARGET_EFAULT;
  12482. }
  12483. ret = get_errno(
  12484. timer_settime(htimer, arg2, &hspec_new, &hspec_old));
  12485. if (arg4 && host_to_target_itimerspec(arg4, &hspec_old)) {
  12486. return -TARGET_EFAULT;
  12487. }
  12488. }
  12489. return ret;
  12490. }
  12491. #endif
  12492. #ifdef TARGET_NR_timer_settime64
  12493. case TARGET_NR_timer_settime64:
  12494. {
  12495. target_timer_t timerid = get_timer_id(arg1);
  12496. if (timerid < 0) {
  12497. ret = timerid;
  12498. } else if (arg3 == 0) {
  12499. ret = -TARGET_EINVAL;
  12500. } else {
  12501. timer_t htimer = g_posix_timers[timerid];
  12502. struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
  12503. if (target_to_host_itimerspec64(&hspec_new, arg3)) {
  12504. return -TARGET_EFAULT;
  12505. }
  12506. ret = get_errno(
  12507. timer_settime(htimer, arg2, &hspec_new, &hspec_old));
  12508. if (arg4 && host_to_target_itimerspec64(arg4, &hspec_old)) {
  12509. return -TARGET_EFAULT;
  12510. }
  12511. }
  12512. return ret;
  12513. }
  12514. #endif
  12515. #ifdef TARGET_NR_timer_gettime
  12516. case TARGET_NR_timer_gettime:
  12517. {
  12518. /* args: timer_t timerid, struct itimerspec *curr_value */
  12519. target_timer_t timerid = get_timer_id(arg1);
  12520. if (timerid < 0) {
  12521. ret = timerid;
  12522. } else if (!arg2) {
  12523. ret = -TARGET_EFAULT;
  12524. } else {
  12525. timer_t htimer = g_posix_timers[timerid];
  12526. struct itimerspec hspec;
  12527. ret = get_errno(timer_gettime(htimer, &hspec));
  12528. if (host_to_target_itimerspec(arg2, &hspec)) {
  12529. ret = -TARGET_EFAULT;
  12530. }
  12531. }
  12532. return ret;
  12533. }
  12534. #endif
  12535. #ifdef TARGET_NR_timer_gettime64
  12536. case TARGET_NR_timer_gettime64:
  12537. {
  12538. /* args: timer_t timerid, struct itimerspec64 *curr_value */
  12539. target_timer_t timerid = get_timer_id(arg1);
  12540. if (timerid < 0) {
  12541. ret = timerid;
  12542. } else if (!arg2) {
  12543. ret = -TARGET_EFAULT;
  12544. } else {
  12545. timer_t htimer = g_posix_timers[timerid];
  12546. struct itimerspec hspec;
  12547. ret = get_errno(timer_gettime(htimer, &hspec));
  12548. if (host_to_target_itimerspec64(arg2, &hspec)) {
  12549. ret = -TARGET_EFAULT;
  12550. }
  12551. }
  12552. return ret;
  12553. }
  12554. #endif
  12555. #ifdef TARGET_NR_timer_getoverrun
  12556. case TARGET_NR_timer_getoverrun:
  12557. {
  12558. /* args: timer_t timerid */
  12559. target_timer_t timerid = get_timer_id(arg1);
  12560. if (timerid < 0) {
  12561. ret = timerid;
  12562. } else {
  12563. timer_t htimer = g_posix_timers[timerid];
  12564. ret = get_errno(timer_getoverrun(htimer));
  12565. }
  12566. return ret;
  12567. }
  12568. #endif
  12569. #ifdef TARGET_NR_timer_delete
  12570. case TARGET_NR_timer_delete:
  12571. {
  12572. /* args: timer_t timerid */
  12573. target_timer_t timerid = get_timer_id(arg1);
  12574. if (timerid < 0) {
  12575. ret = timerid;
  12576. } else {
  12577. timer_t htimer = g_posix_timers[timerid];
  12578. ret = get_errno(timer_delete(htimer));
  12579. free_host_timer_slot(timerid);
  12580. }
  12581. return ret;
  12582. }
  12583. #endif
  12584. #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
  12585. case TARGET_NR_timerfd_create:
  12586. ret = get_errno(timerfd_create(arg1,
  12587. target_to_host_bitmask(arg2, fcntl_flags_tbl)));
  12588. if (ret >= 0) {
  12589. fd_trans_register(ret, &target_timerfd_trans);
  12590. }
  12591. return ret;
  12592. #endif
  12593. #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
  12594. case TARGET_NR_timerfd_gettime:
  12595. {
  12596. struct itimerspec its_curr;
  12597. ret = get_errno(timerfd_gettime(arg1, &its_curr));
  12598. if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
  12599. return -TARGET_EFAULT;
  12600. }
  12601. }
  12602. return ret;
  12603. #endif
  12604. #if defined(TARGET_NR_timerfd_gettime64) && defined(CONFIG_TIMERFD)
  12605. case TARGET_NR_timerfd_gettime64:
  12606. {
  12607. struct itimerspec its_curr;
  12608. ret = get_errno(timerfd_gettime(arg1, &its_curr));
  12609. if (arg2 && host_to_target_itimerspec64(arg2, &its_curr)) {
  12610. return -TARGET_EFAULT;
  12611. }
  12612. }
  12613. return ret;
  12614. #endif
  12615. #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
  12616. case TARGET_NR_timerfd_settime:
  12617. {
  12618. struct itimerspec its_new, its_old, *p_new;
  12619. if (arg3) {
  12620. if (target_to_host_itimerspec(&its_new, arg3)) {
  12621. return -TARGET_EFAULT;
  12622. }
  12623. p_new = &its_new;
  12624. } else {
  12625. p_new = NULL;
  12626. }
  12627. ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
  12628. if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
  12629. return -TARGET_EFAULT;
  12630. }
  12631. }
  12632. return ret;
  12633. #endif
  12634. #if defined(TARGET_NR_timerfd_settime64) && defined(CONFIG_TIMERFD)
  12635. case TARGET_NR_timerfd_settime64:
  12636. {
  12637. struct itimerspec its_new, its_old, *p_new;
  12638. if (arg3) {
  12639. if (target_to_host_itimerspec64(&its_new, arg3)) {
  12640. return -TARGET_EFAULT;
  12641. }
  12642. p_new = &its_new;
  12643. } else {
  12644. p_new = NULL;
  12645. }
  12646. ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
  12647. if (arg4 && host_to_target_itimerspec64(arg4, &its_old)) {
  12648. return -TARGET_EFAULT;
  12649. }
  12650. }
  12651. return ret;
  12652. #endif
  12653. #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
  12654. case TARGET_NR_ioprio_get:
  12655. return get_errno(ioprio_get(arg1, arg2));
  12656. #endif
  12657. #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
  12658. case TARGET_NR_ioprio_set:
  12659. return get_errno(ioprio_set(arg1, arg2, arg3));
  12660. #endif
  12661. #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
  12662. case TARGET_NR_setns:
  12663. return get_errno(setns(arg1, arg2));
  12664. #endif
  12665. #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
  12666. case TARGET_NR_unshare:
  12667. return get_errno(unshare(arg1));
  12668. #endif
  12669. #if defined(TARGET_NR_kcmp) && defined(__NR_kcmp)
  12670. case TARGET_NR_kcmp:
  12671. return get_errno(kcmp(arg1, arg2, arg3, arg4, arg5));
  12672. #endif
  12673. #ifdef TARGET_NR_swapcontext
  12674. case TARGET_NR_swapcontext:
  12675. /* PowerPC specific. */
  12676. return do_swapcontext(cpu_env, arg1, arg2, arg3);
  12677. #endif
  12678. #ifdef TARGET_NR_memfd_create
  12679. case TARGET_NR_memfd_create:
  12680. p = lock_user_string(arg1);
  12681. if (!p) {
  12682. return -TARGET_EFAULT;
  12683. }
  12684. ret = get_errno(memfd_create(p, arg2));
  12685. fd_trans_unregister(ret);
  12686. unlock_user(p, arg1, 0);
  12687. return ret;
  12688. #endif
  12689. #if defined TARGET_NR_membarrier && defined __NR_membarrier
  12690. case TARGET_NR_membarrier:
  12691. return get_errno(membarrier(arg1, arg2));
  12692. #endif
  12693. #if defined(TARGET_NR_copy_file_range) && defined(__NR_copy_file_range)
  12694. case TARGET_NR_copy_file_range:
  12695. {
  12696. loff_t inoff, outoff;
  12697. loff_t *pinoff = NULL, *poutoff = NULL;
  12698. if (arg2) {
  12699. if (get_user_u64(inoff, arg2)) {
  12700. return -TARGET_EFAULT;
  12701. }
  12702. pinoff = &inoff;
  12703. }
  12704. if (arg4) {
  12705. if (get_user_u64(outoff, arg4)) {
  12706. return -TARGET_EFAULT;
  12707. }
  12708. poutoff = &outoff;
  12709. }
  12710. /* Do not sign-extend the count parameter. */
  12711. ret = get_errno(safe_copy_file_range(arg1, pinoff, arg3, poutoff,
  12712. (abi_ulong)arg5, arg6));
  12713. if (!is_error(ret) && ret > 0) {
  12714. if (arg2) {
  12715. if (put_user_u64(inoff, arg2)) {
  12716. return -TARGET_EFAULT;
  12717. }
  12718. }
  12719. if (arg4) {
  12720. if (put_user_u64(outoff, arg4)) {
  12721. return -TARGET_EFAULT;
  12722. }
  12723. }
  12724. }
  12725. }
  12726. return ret;
  12727. #endif
  12728. #if defined(TARGET_NR_pivot_root)
  12729. case TARGET_NR_pivot_root:
  12730. {
  12731. void *p2;
  12732. p = lock_user_string(arg1); /* new_root */
  12733. p2 = lock_user_string(arg2); /* put_old */
  12734. if (!p || !p2) {
  12735. ret = -TARGET_EFAULT;
  12736. } else {
  12737. ret = get_errno(pivot_root(p, p2));
  12738. }
  12739. unlock_user(p2, arg2, 0);
  12740. unlock_user(p, arg1, 0);
  12741. }
  12742. return ret;
  12743. #endif
  12744. #if defined(TARGET_NR_riscv_hwprobe)
  12745. case TARGET_NR_riscv_hwprobe:
  12746. return do_riscv_hwprobe(cpu_env, arg1, arg2, arg3, arg4, arg5);
  12747. #endif
  12748. default:
  12749. qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num);
  12750. return -TARGET_ENOSYS;
  12751. }
  12752. return ret;
  12753. }
  12754. abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1,
  12755. abi_long arg2, abi_long arg3, abi_long arg4,
  12756. abi_long arg5, abi_long arg6, abi_long arg7,
  12757. abi_long arg8)
  12758. {
  12759. CPUState *cpu = env_cpu(cpu_env);
  12760. abi_long ret;
  12761. #ifdef DEBUG_ERESTARTSYS
  12762. /* Debug-only code for exercising the syscall-restart code paths
  12763. * in the per-architecture cpu main loops: restart every syscall
  12764. * the guest makes once before letting it through.
  12765. */
  12766. {
  12767. static bool flag;
  12768. flag = !flag;
  12769. if (flag) {
  12770. return -QEMU_ERESTARTSYS;
  12771. }
  12772. }
  12773. #endif
  12774. record_syscall_start(cpu, num, arg1,
  12775. arg2, arg3, arg4, arg5, arg6, arg7, arg8);
  12776. if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
  12777. print_syscall(cpu_env, num, arg1, arg2, arg3, arg4, arg5, arg6);
  12778. }
  12779. ret = do_syscall1(cpu_env, num, arg1, arg2, arg3, arg4,
  12780. arg5, arg6, arg7, arg8);
  12781. if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
  12782. print_syscall_ret(cpu_env, num, ret, arg1, arg2,
  12783. arg3, arg4, arg5, arg6);
  12784. }
  12785. record_syscall_return(cpu, num, ret);
  12786. return ret;
  12787. }