2
0

syscall.c 303 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789779077917792779377947795779677977798779978007801780278037804780578067807780878097810781178127813781478157816781778187819782078217822782378247825782678277828782978307831783278337834783578367837783878397840784178427843784478457846784778487849785078517852785378547855785678577858785978607861786278637864786578667867786878697870787178727873787478757876787778787879788078817882788378847885788678877888788978907891789278937894789578967897789878997900790179027903790479057906790779087909791079117912791379147915791679177918791979207921792279237924792579267927792879297930793179327933793479357936793779387939794079417942794379447945794679477948794979507951795279537954795579567957795879597960796179627963796479657966796779687969797079717972797379747975797679777978797979807981798279837984798579867987798879897990799179927993799479957996799779987999800080018002800380048005800680078008800980108011801280138014801580168017801880198020802180228023802480258026802780288029803080318032803380348035803680378038803980408041804280438044804580468047804880498050805180528053805480558056805780588059806080618062806380648065806680678068806980708071807280738074807580768077807880798080808180828083808480858086808780888089809080918092809380948095809680978098809981008101810281038104810581068107810881098110811181128113811481158116811781188119812081218122812381248125812681278128812981308131813281338134813581368137813881398140814181428143814481458146814781488149815081518152815381548155815681578158815981608161816281638164816581668167816881698170817181728173817481758176817781788179818081818182818381848185818681878188818981908191819281938194819581968197819881998200820182028203820482058206820782088209821082118212821382148215821682178218821982208221822282238224822582268227822882298230823182328233823482358236823782388239824082418242824382448245824682478248824982508251825282538254825582568257825882598260826182628263826482658266826782688269827082718272827382748275827682778278827982808281828282838284828582868287828882898290829182928293829482958296829782988299830083018302830383048305830683078308830983108311831283138314831583168317831883198320832183228323832483258326832783288329833083318332833383348335833683378338833983408341834283438344834583468347834883498350835183528353835483558356835783588359836083618362836383648365836683678368836983708371837283738374837583768377837883798380838183828383838483858386838783888389839083918392839383948395839683978398839984008401840284038404840584068407840884098410841184128413841484158416841784188419842084218422842384248425842684278428842984308431843284338434843584368437843884398440844184428443844484458446844784488449845084518452845384548455845684578458845984608461846284638464846584668467846884698470847184728473847484758476847784788479848084818482848384848485848684878488848984908491849284938494849584968497849884998500850185028503850485058506850785088509851085118512851385148515851685178518851985208521852285238524852585268527852885298530853185328533853485358536853785388539854085418542854385448545854685478548854985508551855285538554855585568557855885598560856185628563856485658566856785688569857085718572857385748575857685778578857985808581858285838584858585868587858885898590859185928593859485958596859785988599860086018602860386048605860686078608860986108611861286138614861586168617861886198620862186228623862486258626862786288629863086318632863386348635863686378638863986408641864286438644864586468647864886498650865186528653865486558656865786588659866086618662866386648665866686678668866986708671867286738674867586768677867886798680868186828683868486858686868786888689869086918692869386948695869686978698869987008701870287038704870587068707870887098710871187128713871487158716871787188719872087218722872387248725872687278728872987308731873287338734873587368737873887398740874187428743874487458746874787488749875087518752875387548755875687578758875987608761876287638764876587668767876887698770877187728773877487758776877787788779878087818782878387848785878687878788878987908791879287938794879587968797879887998800880188028803880488058806880788088809881088118812881388148815881688178818881988208821882288238824882588268827882888298830883188328833883488358836883788388839884088418842884388448845884688478848884988508851885288538854885588568857885888598860886188628863886488658866886788688869887088718872887388748875887688778878887988808881888288838884888588868887888888898890889188928893889488958896889788988899890089018902890389048905890689078908890989108911891289138914891589168917891889198920892189228923892489258926892789288929893089318932893389348935893689378938893989408941894289438944894589468947894889498950895189528953895489558956895789588959896089618962896389648965896689678968896989708971897289738974897589768977897889798980898189828983898489858986898789888989899089918992899389948995899689978998899990009001900290039004900590069007900890099010901190129013901490159016901790189019902090219022902390249025902690279028902990309031903290339034903590369037903890399040904190429043904490459046904790489049905090519052905390549055905690579058905990609061906290639064906590669067906890699070907190729073907490759076907790789079908090819082908390849085908690879088908990909091909290939094909590969097909890999100910191029103910491059106910791089109911091119112911391149115911691179118911991209121912291239124912591269127912891299130913191329133913491359136913791389139914091419142914391449145914691479148914991509151915291539154915591569157915891599160916191629163916491659166916791689169917091719172917391749175917691779178917991809181918291839184918591869187918891899190919191929193919491959196919791989199920092019202920392049205920692079208920992109211921292139214921592169217921892199220922192229223922492259226922792289229923092319232923392349235923692379238923992409241924292439244924592469247924892499250925192529253925492559256925792589259926092619262926392649265926692679268926992709271927292739274927592769277927892799280928192829283928492859286928792889289929092919292929392949295929692979298929993009301930293039304930593069307930893099310931193129313931493159316931793189319932093219322932393249325932693279328932993309331933293339334933593369337933893399340934193429343934493459346934793489349935093519352935393549355935693579358935993609361936293639364936593669367936893699370937193729373937493759376937793789379938093819382938393849385938693879388938993909391939293939394939593969397939893999400940194029403940494059406940794089409941094119412941394149415941694179418941994209421942294239424942594269427942894299430943194329433943494359436943794389439944094419442944394449445944694479448944994509451945294539454945594569457945894599460946194629463946494659466946794689469947094719472947394749475947694779478947994809481948294839484948594869487948894899490949194929493949494959496949794989499950095019502950395049505950695079508950995109511951295139514951595169517951895199520952195229523952495259526952795289529953095319532953395349535953695379538953995409541954295439544954595469547954895499550955195529553955495559556955795589559956095619562956395649565956695679568956995709571957295739574957595769577957895799580958195829583958495859586958795889589959095919592959395949595959695979598959996009601960296039604960596069607960896099610961196129613961496159616961796189619962096219622962396249625962696279628962996309631963296339634963596369637963896399640964196429643964496459646964796489649965096519652965396549655965696579658965996609661966296639664966596669667966896699670967196729673967496759676967796789679968096819682968396849685968696879688968996909691969296939694969596969697969896999700970197029703970497059706970797089709971097119712971397149715971697179718971997209721972297239724972597269727972897299730973197329733973497359736973797389739974097419742974397449745974697479748974997509751975297539754975597569757975897599760976197629763976497659766976797689769977097719772977397749775977697779778977997809781978297839784978597869787978897899790979197929793979497959796979797989799
  1. /*
  2. * Linux syscalls
  3. *
  4. * Copyright (c) 2003 Fabrice Bellard
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #define _ATFILE_SOURCE
  20. #include <stdlib.h>
  21. #include <stdio.h>
  22. #include <stdarg.h>
  23. #include <string.h>
  24. #include <elf.h>
  25. #include <endian.h>
  26. #include <errno.h>
  27. #include <unistd.h>
  28. #include <fcntl.h>
  29. #include <time.h>
  30. #include <limits.h>
  31. #include <grp.h>
  32. #include <sys/types.h>
  33. #include <sys/ipc.h>
  34. #include <sys/msg.h>
  35. #include <sys/wait.h>
  36. #include <sys/time.h>
  37. #include <sys/stat.h>
  38. #include <sys/mount.h>
  39. #include <sys/file.h>
  40. #include <sys/fsuid.h>
  41. #include <sys/personality.h>
  42. #include <sys/prctl.h>
  43. #include <sys/resource.h>
  44. #include <sys/mman.h>
  45. #include <sys/swap.h>
  46. #include <linux/capability.h>
  47. #include <signal.h>
  48. #include <sched.h>
  49. #ifdef __ia64__
  50. int __clone2(int (*fn)(void *), void *child_stack_base,
  51. size_t stack_size, int flags, void *arg, ...);
  52. #endif
  53. #include <sys/socket.h>
  54. #include <sys/un.h>
  55. #include <sys/uio.h>
  56. #include <sys/poll.h>
  57. #include <sys/times.h>
  58. #include <sys/shm.h>
  59. #include <sys/sem.h>
  60. #include <sys/statfs.h>
  61. #include <utime.h>
  62. #include <sys/sysinfo.h>
  63. //#include <sys/user.h>
  64. #include <netinet/ip.h>
  65. #include <netinet/tcp.h>
  66. #include <linux/wireless.h>
  67. #include <linux/icmp.h>
  68. #include "qemu-common.h"
  69. #ifdef CONFIG_TIMERFD
  70. #include <sys/timerfd.h>
  71. #endif
  72. #ifdef TARGET_GPROF
  73. #include <sys/gmon.h>
  74. #endif
  75. #ifdef CONFIG_EVENTFD
  76. #include <sys/eventfd.h>
  77. #endif
  78. #ifdef CONFIG_EPOLL
  79. #include <sys/epoll.h>
  80. #endif
  81. #ifdef CONFIG_ATTR
  82. #include "qemu/xattr.h"
  83. #endif
  84. #ifdef CONFIG_SENDFILE
  85. #include <sys/sendfile.h>
  86. #endif
  87. #define termios host_termios
  88. #define winsize host_winsize
  89. #define termio host_termio
  90. #define sgttyb host_sgttyb /* same as target */
  91. #define tchars host_tchars /* same as target */
  92. #define ltchars host_ltchars /* same as target */
  93. #include <linux/termios.h>
  94. #include <linux/unistd.h>
  95. #include <linux/cdrom.h>
  96. #include <linux/hdreg.h>
  97. #include <linux/soundcard.h>
  98. #include <linux/kd.h>
  99. #include <linux/mtio.h>
  100. #include <linux/fs.h>
  101. #if defined(CONFIG_FIEMAP)
  102. #include <linux/fiemap.h>
  103. #endif
  104. #include <linux/fb.h>
  105. #include <linux/vt.h>
  106. #include <linux/dm-ioctl.h>
  107. #include <linux/reboot.h>
  108. #include <linux/route.h>
  109. #include <linux/filter.h>
  110. #include <linux/blkpg.h>
  111. #include "linux_loop.h"
  112. #include "uname.h"
  113. #include "qemu.h"
  114. #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
  115. CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
  116. //#define DEBUG
  117. //#include <linux/msdos_fs.h>
  118. #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct linux_dirent [2])
  119. #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct linux_dirent [2])
  120. #undef _syscall0
  121. #undef _syscall1
  122. #undef _syscall2
  123. #undef _syscall3
  124. #undef _syscall4
  125. #undef _syscall5
  126. #undef _syscall6
  127. #define _syscall0(type,name) \
  128. static type name (void) \
  129. { \
  130. return syscall(__NR_##name); \
  131. }
  132. #define _syscall1(type,name,type1,arg1) \
  133. static type name (type1 arg1) \
  134. { \
  135. return syscall(__NR_##name, arg1); \
  136. }
  137. #define _syscall2(type,name,type1,arg1,type2,arg2) \
  138. static type name (type1 arg1,type2 arg2) \
  139. { \
  140. return syscall(__NR_##name, arg1, arg2); \
  141. }
  142. #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
  143. static type name (type1 arg1,type2 arg2,type3 arg3) \
  144. { \
  145. return syscall(__NR_##name, arg1, arg2, arg3); \
  146. }
  147. #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
  148. static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4) \
  149. { \
  150. return syscall(__NR_##name, arg1, arg2, arg3, arg4); \
  151. }
  152. #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
  153. type5,arg5) \
  154. static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5) \
  155. { \
  156. return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5); \
  157. }
  158. #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
  159. type5,arg5,type6,arg6) \
  160. static type name (type1 arg1,type2 arg2,type3 arg3,type4 arg4,type5 arg5, \
  161. type6 arg6) \
  162. { \
  163. return syscall(__NR_##name, arg1, arg2, arg3, arg4, arg5, arg6); \
  164. }
  165. #define __NR_sys_uname __NR_uname
  166. #define __NR_sys_getcwd1 __NR_getcwd
  167. #define __NR_sys_getdents __NR_getdents
  168. #define __NR_sys_getdents64 __NR_getdents64
  169. #define __NR_sys_getpriority __NR_getpriority
  170. #define __NR_sys_rt_sigqueueinfo __NR_rt_sigqueueinfo
  171. #define __NR_sys_syslog __NR_syslog
  172. #define __NR_sys_tgkill __NR_tgkill
  173. #define __NR_sys_tkill __NR_tkill
  174. #define __NR_sys_futex __NR_futex
  175. #define __NR_sys_inotify_init __NR_inotify_init
  176. #define __NR_sys_inotify_add_watch __NR_inotify_add_watch
  177. #define __NR_sys_inotify_rm_watch __NR_inotify_rm_watch
  178. #if defined(__alpha__) || defined (__ia64__) || defined(__x86_64__) || \
  179. defined(__s390x__)
  180. #define __NR__llseek __NR_lseek
  181. #endif
  182. /* Newer kernel ports have llseek() instead of _llseek() */
  183. #if defined(TARGET_NR_llseek) && !defined(TARGET_NR__llseek)
  184. #define TARGET_NR__llseek TARGET_NR_llseek
  185. #endif
  186. #ifdef __NR_gettid
  187. _syscall0(int, gettid)
  188. #else
  189. /* This is a replacement for the host gettid() and must return a host
  190. errno. */
  191. static int gettid(void) {
  192. return -ENOSYS;
  193. }
  194. #endif
  195. #ifdef __NR_getdents
  196. _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count);
  197. #endif
  198. #if !defined(__NR_getdents) || \
  199. (defined(TARGET_NR_getdents64) && defined(__NR_getdents64))
  200. _syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
  201. #endif
  202. #if defined(TARGET_NR__llseek) && defined(__NR_llseek)
  203. _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
  204. loff_t *, res, uint, wh);
  205. #endif
  206. _syscall3(int,sys_rt_sigqueueinfo,int,pid,int,sig,siginfo_t *,uinfo)
  207. _syscall3(int,sys_syslog,int,type,char*,bufp,int,len)
  208. #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
  209. _syscall3(int,sys_tgkill,int,tgid,int,pid,int,sig)
  210. #endif
  211. #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
  212. _syscall2(int,sys_tkill,int,tid,int,sig)
  213. #endif
  214. #ifdef __NR_exit_group
  215. _syscall1(int,exit_group,int,error_code)
  216. #endif
  217. #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
  218. _syscall1(int,set_tid_address,int *,tidptr)
  219. #endif
  220. #if defined(TARGET_NR_futex) && defined(__NR_futex)
  221. _syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
  222. const struct timespec *,timeout,int *,uaddr2,int,val3)
  223. #endif
  224. #define __NR_sys_sched_getaffinity __NR_sched_getaffinity
  225. _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len,
  226. unsigned long *, user_mask_ptr);
  227. #define __NR_sys_sched_setaffinity __NR_sched_setaffinity
  228. _syscall3(int, sys_sched_setaffinity, pid_t, pid, unsigned int, len,
  229. unsigned long *, user_mask_ptr);
  230. _syscall4(int, reboot, int, magic1, int, magic2, unsigned int, cmd,
  231. void *, arg);
  232. _syscall2(int, capget, struct __user_cap_header_struct *, header,
  233. struct __user_cap_data_struct *, data);
  234. _syscall2(int, capset, struct __user_cap_header_struct *, header,
  235. struct __user_cap_data_struct *, data);
  236. #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
  237. _syscall2(int, ioprio_get, int, which, int, who)
  238. #endif
  239. #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
  240. _syscall3(int, ioprio_set, int, which, int, who, int, ioprio)
  241. #endif
  242. static bitmask_transtbl fcntl_flags_tbl[] = {
  243. { TARGET_O_ACCMODE, TARGET_O_WRONLY, O_ACCMODE, O_WRONLY, },
  244. { TARGET_O_ACCMODE, TARGET_O_RDWR, O_ACCMODE, O_RDWR, },
  245. { TARGET_O_CREAT, TARGET_O_CREAT, O_CREAT, O_CREAT, },
  246. { TARGET_O_EXCL, TARGET_O_EXCL, O_EXCL, O_EXCL, },
  247. { TARGET_O_NOCTTY, TARGET_O_NOCTTY, O_NOCTTY, O_NOCTTY, },
  248. { TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
  249. { TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
  250. { TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
  251. { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
  252. { TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
  253. { TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
  254. { TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
  255. { TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
  256. #if defined(O_DIRECT)
  257. { TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
  258. #endif
  259. #if defined(O_NOATIME)
  260. { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
  261. #endif
  262. #if defined(O_CLOEXEC)
  263. { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
  264. #endif
  265. #if defined(O_PATH)
  266. { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
  267. #endif
  268. /* Don't terminate the list prematurely on 64-bit host+guest. */
  269. #if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
  270. { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
  271. #endif
  272. { 0, 0, 0, 0 }
  273. };
  274. static int sys_getcwd1(char *buf, size_t size)
  275. {
  276. if (getcwd(buf, size) == NULL) {
  277. /* getcwd() sets errno */
  278. return (-1);
  279. }
  280. return strlen(buf)+1;
  281. }
  282. static int sys_openat(int dirfd, const char *pathname, int flags, mode_t mode)
  283. {
  284. /*
  285. * open(2) has extra parameter 'mode' when called with
  286. * flag O_CREAT.
  287. */
  288. if ((flags & O_CREAT) != 0) {
  289. return (openat(dirfd, pathname, flags, mode));
  290. }
  291. return (openat(dirfd, pathname, flags));
  292. }
  293. #ifdef TARGET_NR_utimensat
  294. #ifdef CONFIG_UTIMENSAT
  295. static int sys_utimensat(int dirfd, const char *pathname,
  296. const struct timespec times[2], int flags)
  297. {
  298. if (pathname == NULL)
  299. return futimens(dirfd, times);
  300. else
  301. return utimensat(dirfd, pathname, times, flags);
  302. }
  303. #elif defined(__NR_utimensat)
  304. #define __NR_sys_utimensat __NR_utimensat
  305. _syscall4(int,sys_utimensat,int,dirfd,const char *,pathname,
  306. const struct timespec *,tsp,int,flags)
  307. #else
  308. static int sys_utimensat(int dirfd, const char *pathname,
  309. const struct timespec times[2], int flags)
  310. {
  311. errno = ENOSYS;
  312. return -1;
  313. }
  314. #endif
  315. #endif /* TARGET_NR_utimensat */
  316. #ifdef CONFIG_INOTIFY
  317. #include <sys/inotify.h>
  318. #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
  319. static int sys_inotify_init(void)
  320. {
  321. return (inotify_init());
  322. }
  323. #endif
  324. #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
  325. static int sys_inotify_add_watch(int fd,const char *pathname, int32_t mask)
  326. {
  327. return (inotify_add_watch(fd, pathname, mask));
  328. }
  329. #endif
  330. #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
  331. static int sys_inotify_rm_watch(int fd, int32_t wd)
  332. {
  333. return (inotify_rm_watch(fd, wd));
  334. }
  335. #endif
  336. #ifdef CONFIG_INOTIFY1
  337. #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
  338. static int sys_inotify_init1(int flags)
  339. {
  340. return (inotify_init1(flags));
  341. }
  342. #endif
  343. #endif
  344. #else
  345. /* Userspace can usually survive runtime without inotify */
  346. #undef TARGET_NR_inotify_init
  347. #undef TARGET_NR_inotify_init1
  348. #undef TARGET_NR_inotify_add_watch
  349. #undef TARGET_NR_inotify_rm_watch
  350. #endif /* CONFIG_INOTIFY */
  351. #if defined(TARGET_NR_ppoll)
  352. #ifndef __NR_ppoll
  353. # define __NR_ppoll -1
  354. #endif
  355. #define __NR_sys_ppoll __NR_ppoll
  356. _syscall5(int, sys_ppoll, struct pollfd *, fds, nfds_t, nfds,
  357. struct timespec *, timeout, const sigset_t *, sigmask,
  358. size_t, sigsetsize)
  359. #endif
  360. #if defined(TARGET_NR_pselect6)
  361. #ifndef __NR_pselect6
  362. # define __NR_pselect6 -1
  363. #endif
  364. #define __NR_sys_pselect6 __NR_pselect6
  365. _syscall6(int, sys_pselect6, int, nfds, fd_set *, readfds, fd_set *, writefds,
  366. fd_set *, exceptfds, struct timespec *, timeout, void *, sig);
  367. #endif
  368. #if defined(TARGET_NR_prlimit64)
  369. #ifndef __NR_prlimit64
  370. # define __NR_prlimit64 -1
  371. #endif
  372. #define __NR_sys_prlimit64 __NR_prlimit64
  373. /* The glibc rlimit structure may not be that used by the underlying syscall */
  374. struct host_rlimit64 {
  375. uint64_t rlim_cur;
  376. uint64_t rlim_max;
  377. };
  378. _syscall4(int, sys_prlimit64, pid_t, pid, int, resource,
  379. const struct host_rlimit64 *, new_limit,
  380. struct host_rlimit64 *, old_limit)
  381. #endif
  382. #if defined(TARGET_NR_timer_create)
  383. /* Maxiumum of 32 active POSIX timers allowed at any one time. */
  384. static timer_t g_posix_timers[32] = { 0, } ;
  385. static inline int next_free_host_timer(void)
  386. {
  387. int k ;
  388. /* FIXME: Does finding the next free slot require a lock? */
  389. for (k = 0; k < ARRAY_SIZE(g_posix_timers); k++) {
  390. if (g_posix_timers[k] == 0) {
  391. g_posix_timers[k] = (timer_t) 1;
  392. return k;
  393. }
  394. }
  395. return -1;
  396. }
  397. #endif
  398. /* ARM EABI and MIPS expect 64bit types aligned even on pairs or registers */
  399. #ifdef TARGET_ARM
  400. static inline int regpairs_aligned(void *cpu_env) {
  401. return ((((CPUARMState *)cpu_env)->eabi) == 1) ;
  402. }
  403. #elif defined(TARGET_MIPS)
  404. static inline int regpairs_aligned(void *cpu_env) { return 1; }
  405. #elif defined(TARGET_PPC) && !defined(TARGET_PPC64)
  406. /* SysV AVI for PPC32 expects 64bit parameters to be passed on odd/even pairs
  407. * of registers which translates to the same as ARM/MIPS, because we start with
  408. * r3 as arg1 */
  409. static inline int regpairs_aligned(void *cpu_env) { return 1; }
  410. #else
  411. static inline int regpairs_aligned(void *cpu_env) { return 0; }
  412. #endif
  413. #define ERRNO_TABLE_SIZE 1200
  414. /* target_to_host_errno_table[] is initialized from
  415. * host_to_target_errno_table[] in syscall_init(). */
  416. static uint16_t target_to_host_errno_table[ERRNO_TABLE_SIZE] = {
  417. };
  418. /*
  419. * This list is the union of errno values overridden in asm-<arch>/errno.h
  420. * minus the errnos that are not actually generic to all archs.
  421. */
  422. static uint16_t host_to_target_errno_table[ERRNO_TABLE_SIZE] = {
  423. [EIDRM] = TARGET_EIDRM,
  424. [ECHRNG] = TARGET_ECHRNG,
  425. [EL2NSYNC] = TARGET_EL2NSYNC,
  426. [EL3HLT] = TARGET_EL3HLT,
  427. [EL3RST] = TARGET_EL3RST,
  428. [ELNRNG] = TARGET_ELNRNG,
  429. [EUNATCH] = TARGET_EUNATCH,
  430. [ENOCSI] = TARGET_ENOCSI,
  431. [EL2HLT] = TARGET_EL2HLT,
  432. [EDEADLK] = TARGET_EDEADLK,
  433. [ENOLCK] = TARGET_ENOLCK,
  434. [EBADE] = TARGET_EBADE,
  435. [EBADR] = TARGET_EBADR,
  436. [EXFULL] = TARGET_EXFULL,
  437. [ENOANO] = TARGET_ENOANO,
  438. [EBADRQC] = TARGET_EBADRQC,
  439. [EBADSLT] = TARGET_EBADSLT,
  440. [EBFONT] = TARGET_EBFONT,
  441. [ENOSTR] = TARGET_ENOSTR,
  442. [ENODATA] = TARGET_ENODATA,
  443. [ETIME] = TARGET_ETIME,
  444. [ENOSR] = TARGET_ENOSR,
  445. [ENONET] = TARGET_ENONET,
  446. [ENOPKG] = TARGET_ENOPKG,
  447. [EREMOTE] = TARGET_EREMOTE,
  448. [ENOLINK] = TARGET_ENOLINK,
  449. [EADV] = TARGET_EADV,
  450. [ESRMNT] = TARGET_ESRMNT,
  451. [ECOMM] = TARGET_ECOMM,
  452. [EPROTO] = TARGET_EPROTO,
  453. [EDOTDOT] = TARGET_EDOTDOT,
  454. [EMULTIHOP] = TARGET_EMULTIHOP,
  455. [EBADMSG] = TARGET_EBADMSG,
  456. [ENAMETOOLONG] = TARGET_ENAMETOOLONG,
  457. [EOVERFLOW] = TARGET_EOVERFLOW,
  458. [ENOTUNIQ] = TARGET_ENOTUNIQ,
  459. [EBADFD] = TARGET_EBADFD,
  460. [EREMCHG] = TARGET_EREMCHG,
  461. [ELIBACC] = TARGET_ELIBACC,
  462. [ELIBBAD] = TARGET_ELIBBAD,
  463. [ELIBSCN] = TARGET_ELIBSCN,
  464. [ELIBMAX] = TARGET_ELIBMAX,
  465. [ELIBEXEC] = TARGET_ELIBEXEC,
  466. [EILSEQ] = TARGET_EILSEQ,
  467. [ENOSYS] = TARGET_ENOSYS,
  468. [ELOOP] = TARGET_ELOOP,
  469. [ERESTART] = TARGET_ERESTART,
  470. [ESTRPIPE] = TARGET_ESTRPIPE,
  471. [ENOTEMPTY] = TARGET_ENOTEMPTY,
  472. [EUSERS] = TARGET_EUSERS,
  473. [ENOTSOCK] = TARGET_ENOTSOCK,
  474. [EDESTADDRREQ] = TARGET_EDESTADDRREQ,
  475. [EMSGSIZE] = TARGET_EMSGSIZE,
  476. [EPROTOTYPE] = TARGET_EPROTOTYPE,
  477. [ENOPROTOOPT] = TARGET_ENOPROTOOPT,
  478. [EPROTONOSUPPORT] = TARGET_EPROTONOSUPPORT,
  479. [ESOCKTNOSUPPORT] = TARGET_ESOCKTNOSUPPORT,
  480. [EOPNOTSUPP] = TARGET_EOPNOTSUPP,
  481. [EPFNOSUPPORT] = TARGET_EPFNOSUPPORT,
  482. [EAFNOSUPPORT] = TARGET_EAFNOSUPPORT,
  483. [EADDRINUSE] = TARGET_EADDRINUSE,
  484. [EADDRNOTAVAIL] = TARGET_EADDRNOTAVAIL,
  485. [ENETDOWN] = TARGET_ENETDOWN,
  486. [ENETUNREACH] = TARGET_ENETUNREACH,
  487. [ENETRESET] = TARGET_ENETRESET,
  488. [ECONNABORTED] = TARGET_ECONNABORTED,
  489. [ECONNRESET] = TARGET_ECONNRESET,
  490. [ENOBUFS] = TARGET_ENOBUFS,
  491. [EISCONN] = TARGET_EISCONN,
  492. [ENOTCONN] = TARGET_ENOTCONN,
  493. [EUCLEAN] = TARGET_EUCLEAN,
  494. [ENOTNAM] = TARGET_ENOTNAM,
  495. [ENAVAIL] = TARGET_ENAVAIL,
  496. [EISNAM] = TARGET_EISNAM,
  497. [EREMOTEIO] = TARGET_EREMOTEIO,
  498. [ESHUTDOWN] = TARGET_ESHUTDOWN,
  499. [ETOOMANYREFS] = TARGET_ETOOMANYREFS,
  500. [ETIMEDOUT] = TARGET_ETIMEDOUT,
  501. [ECONNREFUSED] = TARGET_ECONNREFUSED,
  502. [EHOSTDOWN] = TARGET_EHOSTDOWN,
  503. [EHOSTUNREACH] = TARGET_EHOSTUNREACH,
  504. [EALREADY] = TARGET_EALREADY,
  505. [EINPROGRESS] = TARGET_EINPROGRESS,
  506. [ESTALE] = TARGET_ESTALE,
  507. [ECANCELED] = TARGET_ECANCELED,
  508. [ENOMEDIUM] = TARGET_ENOMEDIUM,
  509. [EMEDIUMTYPE] = TARGET_EMEDIUMTYPE,
  510. #ifdef ENOKEY
  511. [ENOKEY] = TARGET_ENOKEY,
  512. #endif
  513. #ifdef EKEYEXPIRED
  514. [EKEYEXPIRED] = TARGET_EKEYEXPIRED,
  515. #endif
  516. #ifdef EKEYREVOKED
  517. [EKEYREVOKED] = TARGET_EKEYREVOKED,
  518. #endif
  519. #ifdef EKEYREJECTED
  520. [EKEYREJECTED] = TARGET_EKEYREJECTED,
  521. #endif
  522. #ifdef EOWNERDEAD
  523. [EOWNERDEAD] = TARGET_EOWNERDEAD,
  524. #endif
  525. #ifdef ENOTRECOVERABLE
  526. [ENOTRECOVERABLE] = TARGET_ENOTRECOVERABLE,
  527. #endif
  528. };
  529. static inline int host_to_target_errno(int err)
  530. {
  531. if(host_to_target_errno_table[err])
  532. return host_to_target_errno_table[err];
  533. return err;
  534. }
  535. static inline int target_to_host_errno(int err)
  536. {
  537. if (target_to_host_errno_table[err])
  538. return target_to_host_errno_table[err];
  539. return err;
  540. }
  541. static inline abi_long get_errno(abi_long ret)
  542. {
  543. if (ret == -1)
  544. return -host_to_target_errno(errno);
  545. else
  546. return ret;
  547. }
  548. static inline int is_error(abi_long ret)
  549. {
  550. return (abi_ulong)ret >= (abi_ulong)(-4096);
  551. }
  552. char *target_strerror(int err)
  553. {
  554. if ((err >= ERRNO_TABLE_SIZE) || (err < 0)) {
  555. return NULL;
  556. }
  557. return strerror(target_to_host_errno(err));
  558. }
  559. static inline int host_to_target_sock_type(int host_type)
  560. {
  561. int target_type;
  562. switch (host_type & 0xf /* SOCK_TYPE_MASK */) {
  563. case SOCK_DGRAM:
  564. target_type = TARGET_SOCK_DGRAM;
  565. break;
  566. case SOCK_STREAM:
  567. target_type = TARGET_SOCK_STREAM;
  568. break;
  569. default:
  570. target_type = host_type & 0xf /* SOCK_TYPE_MASK */;
  571. break;
  572. }
  573. #if defined(SOCK_CLOEXEC)
  574. if (host_type & SOCK_CLOEXEC) {
  575. target_type |= TARGET_SOCK_CLOEXEC;
  576. }
  577. #endif
  578. #if defined(SOCK_NONBLOCK)
  579. if (host_type & SOCK_NONBLOCK) {
  580. target_type |= TARGET_SOCK_NONBLOCK;
  581. }
  582. #endif
  583. return target_type;
  584. }
  585. static abi_ulong target_brk;
  586. static abi_ulong target_original_brk;
  587. static abi_ulong brk_page;
  588. void target_set_brk(abi_ulong new_brk)
  589. {
  590. target_original_brk = target_brk = HOST_PAGE_ALIGN(new_brk);
  591. brk_page = HOST_PAGE_ALIGN(target_brk);
  592. }
  593. //#define DEBUGF_BRK(message, args...) do { fprintf(stderr, (message), ## args); } while (0)
  594. #define DEBUGF_BRK(message, args...)
  595. /* do_brk() must return target values and target errnos. */
  596. abi_long do_brk(abi_ulong new_brk)
  597. {
  598. abi_long mapped_addr;
  599. int new_alloc_size;
  600. DEBUGF_BRK("do_brk(" TARGET_ABI_FMT_lx ") -> ", new_brk);
  601. if (!new_brk) {
  602. DEBUGF_BRK(TARGET_ABI_FMT_lx " (!new_brk)\n", target_brk);
  603. return target_brk;
  604. }
  605. if (new_brk < target_original_brk) {
  606. DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk < target_original_brk)\n",
  607. target_brk);
  608. return target_brk;
  609. }
  610. /* If the new brk is less than the highest page reserved to the
  611. * target heap allocation, set it and we're almost done... */
  612. if (new_brk <= brk_page) {
  613. /* Heap contents are initialized to zero, as for anonymous
  614. * mapped pages. */
  615. if (new_brk > target_brk) {
  616. memset(g2h(target_brk), 0, new_brk - target_brk);
  617. }
  618. target_brk = new_brk;
  619. DEBUGF_BRK(TARGET_ABI_FMT_lx " (new_brk <= brk_page)\n", target_brk);
  620. return target_brk;
  621. }
  622. /* We need to allocate more memory after the brk... Note that
  623. * we don't use MAP_FIXED because that will map over the top of
  624. * any existing mapping (like the one with the host libc or qemu
  625. * itself); instead we treat "mapped but at wrong address" as
  626. * a failure and unmap again.
  627. */
  628. new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page);
  629. mapped_addr = get_errno(target_mmap(brk_page, new_alloc_size,
  630. PROT_READ|PROT_WRITE,
  631. MAP_ANON|MAP_PRIVATE, 0, 0));
  632. if (mapped_addr == brk_page) {
  633. /* Heap contents are initialized to zero, as for anonymous
  634. * mapped pages. Technically the new pages are already
  635. * initialized to zero since they *are* anonymous mapped
  636. * pages, however we have to take care with the contents that
  637. * come from the remaining part of the previous page: it may
  638. * contains garbage data due to a previous heap usage (grown
  639. * then shrunken). */
  640. memset(g2h(target_brk), 0, brk_page - target_brk);
  641. target_brk = new_brk;
  642. brk_page = HOST_PAGE_ALIGN(target_brk);
  643. DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr == brk_page)\n",
  644. target_brk);
  645. return target_brk;
  646. } else if (mapped_addr != -1) {
  647. /* Mapped but at wrong address, meaning there wasn't actually
  648. * enough space for this brk.
  649. */
  650. target_munmap(mapped_addr, new_alloc_size);
  651. mapped_addr = -1;
  652. DEBUGF_BRK(TARGET_ABI_FMT_lx " (mapped_addr != -1)\n", target_brk);
  653. }
  654. else {
  655. DEBUGF_BRK(TARGET_ABI_FMT_lx " (otherwise)\n", target_brk);
  656. }
  657. #if defined(TARGET_ALPHA)
  658. /* We (partially) emulate OSF/1 on Alpha, which requires we
  659. return a proper errno, not an unchanged brk value. */
  660. return -TARGET_ENOMEM;
  661. #endif
  662. /* For everything else, return the previous break. */
  663. return target_brk;
  664. }
  665. static inline abi_long copy_from_user_fdset(fd_set *fds,
  666. abi_ulong target_fds_addr,
  667. int n)
  668. {
  669. int i, nw, j, k;
  670. abi_ulong b, *target_fds;
  671. nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
  672. if (!(target_fds = lock_user(VERIFY_READ,
  673. target_fds_addr,
  674. sizeof(abi_ulong) * nw,
  675. 1)))
  676. return -TARGET_EFAULT;
  677. FD_ZERO(fds);
  678. k = 0;
  679. for (i = 0; i < nw; i++) {
  680. /* grab the abi_ulong */
  681. __get_user(b, &target_fds[i]);
  682. for (j = 0; j < TARGET_ABI_BITS; j++) {
  683. /* check the bit inside the abi_ulong */
  684. if ((b >> j) & 1)
  685. FD_SET(k, fds);
  686. k++;
  687. }
  688. }
  689. unlock_user(target_fds, target_fds_addr, 0);
  690. return 0;
  691. }
  692. static inline abi_ulong copy_from_user_fdset_ptr(fd_set *fds, fd_set **fds_ptr,
  693. abi_ulong target_fds_addr,
  694. int n)
  695. {
  696. if (target_fds_addr) {
  697. if (copy_from_user_fdset(fds, target_fds_addr, n))
  698. return -TARGET_EFAULT;
  699. *fds_ptr = fds;
  700. } else {
  701. *fds_ptr = NULL;
  702. }
  703. return 0;
  704. }
  705. static inline abi_long copy_to_user_fdset(abi_ulong target_fds_addr,
  706. const fd_set *fds,
  707. int n)
  708. {
  709. int i, nw, j, k;
  710. abi_long v;
  711. abi_ulong *target_fds;
  712. nw = (n + TARGET_ABI_BITS - 1) / TARGET_ABI_BITS;
  713. if (!(target_fds = lock_user(VERIFY_WRITE,
  714. target_fds_addr,
  715. sizeof(abi_ulong) * nw,
  716. 0)))
  717. return -TARGET_EFAULT;
  718. k = 0;
  719. for (i = 0; i < nw; i++) {
  720. v = 0;
  721. for (j = 0; j < TARGET_ABI_BITS; j++) {
  722. v |= ((abi_ulong)(FD_ISSET(k, fds) != 0) << j);
  723. k++;
  724. }
  725. __put_user(v, &target_fds[i]);
  726. }
  727. unlock_user(target_fds, target_fds_addr, sizeof(abi_ulong) * nw);
  728. return 0;
  729. }
  730. #if defined(__alpha__)
  731. #define HOST_HZ 1024
  732. #else
  733. #define HOST_HZ 100
  734. #endif
  735. static inline abi_long host_to_target_clock_t(long ticks)
  736. {
  737. #if HOST_HZ == TARGET_HZ
  738. return ticks;
  739. #else
  740. return ((int64_t)ticks * TARGET_HZ) / HOST_HZ;
  741. #endif
  742. }
  743. static inline abi_long host_to_target_rusage(abi_ulong target_addr,
  744. const struct rusage *rusage)
  745. {
  746. struct target_rusage *target_rusage;
  747. if (!lock_user_struct(VERIFY_WRITE, target_rusage, target_addr, 0))
  748. return -TARGET_EFAULT;
  749. target_rusage->ru_utime.tv_sec = tswapal(rusage->ru_utime.tv_sec);
  750. target_rusage->ru_utime.tv_usec = tswapal(rusage->ru_utime.tv_usec);
  751. target_rusage->ru_stime.tv_sec = tswapal(rusage->ru_stime.tv_sec);
  752. target_rusage->ru_stime.tv_usec = tswapal(rusage->ru_stime.tv_usec);
  753. target_rusage->ru_maxrss = tswapal(rusage->ru_maxrss);
  754. target_rusage->ru_ixrss = tswapal(rusage->ru_ixrss);
  755. target_rusage->ru_idrss = tswapal(rusage->ru_idrss);
  756. target_rusage->ru_isrss = tswapal(rusage->ru_isrss);
  757. target_rusage->ru_minflt = tswapal(rusage->ru_minflt);
  758. target_rusage->ru_majflt = tswapal(rusage->ru_majflt);
  759. target_rusage->ru_nswap = tswapal(rusage->ru_nswap);
  760. target_rusage->ru_inblock = tswapal(rusage->ru_inblock);
  761. target_rusage->ru_oublock = tswapal(rusage->ru_oublock);
  762. target_rusage->ru_msgsnd = tswapal(rusage->ru_msgsnd);
  763. target_rusage->ru_msgrcv = tswapal(rusage->ru_msgrcv);
  764. target_rusage->ru_nsignals = tswapal(rusage->ru_nsignals);
  765. target_rusage->ru_nvcsw = tswapal(rusage->ru_nvcsw);
  766. target_rusage->ru_nivcsw = tswapal(rusage->ru_nivcsw);
  767. unlock_user_struct(target_rusage, target_addr, 1);
  768. return 0;
  769. }
  770. static inline rlim_t target_to_host_rlim(abi_ulong target_rlim)
  771. {
  772. abi_ulong target_rlim_swap;
  773. rlim_t result;
  774. target_rlim_swap = tswapal(target_rlim);
  775. if (target_rlim_swap == TARGET_RLIM_INFINITY)
  776. return RLIM_INFINITY;
  777. result = target_rlim_swap;
  778. if (target_rlim_swap != (rlim_t)result)
  779. return RLIM_INFINITY;
  780. return result;
  781. }
  782. static inline abi_ulong host_to_target_rlim(rlim_t rlim)
  783. {
  784. abi_ulong target_rlim_swap;
  785. abi_ulong result;
  786. if (rlim == RLIM_INFINITY || rlim != (abi_long)rlim)
  787. target_rlim_swap = TARGET_RLIM_INFINITY;
  788. else
  789. target_rlim_swap = rlim;
  790. result = tswapal(target_rlim_swap);
  791. return result;
  792. }
  793. static inline int target_to_host_resource(int code)
  794. {
  795. switch (code) {
  796. case TARGET_RLIMIT_AS:
  797. return RLIMIT_AS;
  798. case TARGET_RLIMIT_CORE:
  799. return RLIMIT_CORE;
  800. case TARGET_RLIMIT_CPU:
  801. return RLIMIT_CPU;
  802. case TARGET_RLIMIT_DATA:
  803. return RLIMIT_DATA;
  804. case TARGET_RLIMIT_FSIZE:
  805. return RLIMIT_FSIZE;
  806. case TARGET_RLIMIT_LOCKS:
  807. return RLIMIT_LOCKS;
  808. case TARGET_RLIMIT_MEMLOCK:
  809. return RLIMIT_MEMLOCK;
  810. case TARGET_RLIMIT_MSGQUEUE:
  811. return RLIMIT_MSGQUEUE;
  812. case TARGET_RLIMIT_NICE:
  813. return RLIMIT_NICE;
  814. case TARGET_RLIMIT_NOFILE:
  815. return RLIMIT_NOFILE;
  816. case TARGET_RLIMIT_NPROC:
  817. return RLIMIT_NPROC;
  818. case TARGET_RLIMIT_RSS:
  819. return RLIMIT_RSS;
  820. case TARGET_RLIMIT_RTPRIO:
  821. return RLIMIT_RTPRIO;
  822. case TARGET_RLIMIT_SIGPENDING:
  823. return RLIMIT_SIGPENDING;
  824. case TARGET_RLIMIT_STACK:
  825. return RLIMIT_STACK;
  826. default:
  827. return code;
  828. }
  829. }
  830. static inline abi_long copy_from_user_timeval(struct timeval *tv,
  831. abi_ulong target_tv_addr)
  832. {
  833. struct target_timeval *target_tv;
  834. if (!lock_user_struct(VERIFY_READ, target_tv, target_tv_addr, 1))
  835. return -TARGET_EFAULT;
  836. __get_user(tv->tv_sec, &target_tv->tv_sec);
  837. __get_user(tv->tv_usec, &target_tv->tv_usec);
  838. unlock_user_struct(target_tv, target_tv_addr, 0);
  839. return 0;
  840. }
  841. static inline abi_long copy_to_user_timeval(abi_ulong target_tv_addr,
  842. const struct timeval *tv)
  843. {
  844. struct target_timeval *target_tv;
  845. if (!lock_user_struct(VERIFY_WRITE, target_tv, target_tv_addr, 0))
  846. return -TARGET_EFAULT;
  847. __put_user(tv->tv_sec, &target_tv->tv_sec);
  848. __put_user(tv->tv_usec, &target_tv->tv_usec);
  849. unlock_user_struct(target_tv, target_tv_addr, 1);
  850. return 0;
  851. }
  852. static inline abi_long copy_from_user_timezone(struct timezone *tz,
  853. abi_ulong target_tz_addr)
  854. {
  855. struct target_timezone *target_tz;
  856. if (!lock_user_struct(VERIFY_READ, target_tz, target_tz_addr, 1)) {
  857. return -TARGET_EFAULT;
  858. }
  859. __get_user(tz->tz_minuteswest, &target_tz->tz_minuteswest);
  860. __get_user(tz->tz_dsttime, &target_tz->tz_dsttime);
  861. unlock_user_struct(target_tz, target_tz_addr, 0);
  862. return 0;
  863. }
  864. #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
  865. #include <mqueue.h>
  866. static inline abi_long copy_from_user_mq_attr(struct mq_attr *attr,
  867. abi_ulong target_mq_attr_addr)
  868. {
  869. struct target_mq_attr *target_mq_attr;
  870. if (!lock_user_struct(VERIFY_READ, target_mq_attr,
  871. target_mq_attr_addr, 1))
  872. return -TARGET_EFAULT;
  873. __get_user(attr->mq_flags, &target_mq_attr->mq_flags);
  874. __get_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
  875. __get_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
  876. __get_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
  877. unlock_user_struct(target_mq_attr, target_mq_attr_addr, 0);
  878. return 0;
  879. }
  880. static inline abi_long copy_to_user_mq_attr(abi_ulong target_mq_attr_addr,
  881. const struct mq_attr *attr)
  882. {
  883. struct target_mq_attr *target_mq_attr;
  884. if (!lock_user_struct(VERIFY_WRITE, target_mq_attr,
  885. target_mq_attr_addr, 0))
  886. return -TARGET_EFAULT;
  887. __put_user(attr->mq_flags, &target_mq_attr->mq_flags);
  888. __put_user(attr->mq_maxmsg, &target_mq_attr->mq_maxmsg);
  889. __put_user(attr->mq_msgsize, &target_mq_attr->mq_msgsize);
  890. __put_user(attr->mq_curmsgs, &target_mq_attr->mq_curmsgs);
  891. unlock_user_struct(target_mq_attr, target_mq_attr_addr, 1);
  892. return 0;
  893. }
  894. #endif
  895. #if defined(TARGET_NR_select) || defined(TARGET_NR__newselect)
  896. /* do_select() must return target values and target errnos. */
  897. static abi_long do_select(int n,
  898. abi_ulong rfd_addr, abi_ulong wfd_addr,
  899. abi_ulong efd_addr, abi_ulong target_tv_addr)
  900. {
  901. fd_set rfds, wfds, efds;
  902. fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
  903. struct timeval tv, *tv_ptr;
  904. abi_long ret;
  905. ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
  906. if (ret) {
  907. return ret;
  908. }
  909. ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
  910. if (ret) {
  911. return ret;
  912. }
  913. ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
  914. if (ret) {
  915. return ret;
  916. }
  917. if (target_tv_addr) {
  918. if (copy_from_user_timeval(&tv, target_tv_addr))
  919. return -TARGET_EFAULT;
  920. tv_ptr = &tv;
  921. } else {
  922. tv_ptr = NULL;
  923. }
  924. ret = get_errno(select(n, rfds_ptr, wfds_ptr, efds_ptr, tv_ptr));
  925. if (!is_error(ret)) {
  926. if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
  927. return -TARGET_EFAULT;
  928. if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
  929. return -TARGET_EFAULT;
  930. if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
  931. return -TARGET_EFAULT;
  932. if (target_tv_addr && copy_to_user_timeval(target_tv_addr, &tv))
  933. return -TARGET_EFAULT;
  934. }
  935. return ret;
  936. }
  937. #endif
  938. static abi_long do_pipe2(int host_pipe[], int flags)
  939. {
  940. #ifdef CONFIG_PIPE2
  941. return pipe2(host_pipe, flags);
  942. #else
  943. return -ENOSYS;
  944. #endif
  945. }
  946. static abi_long do_pipe(void *cpu_env, abi_ulong pipedes,
  947. int flags, int is_pipe2)
  948. {
  949. int host_pipe[2];
  950. abi_long ret;
  951. ret = flags ? do_pipe2(host_pipe, flags) : pipe(host_pipe);
  952. if (is_error(ret))
  953. return get_errno(ret);
  954. /* Several targets have special calling conventions for the original
  955. pipe syscall, but didn't replicate this into the pipe2 syscall. */
  956. if (!is_pipe2) {
  957. #if defined(TARGET_ALPHA)
  958. ((CPUAlphaState *)cpu_env)->ir[IR_A4] = host_pipe[1];
  959. return host_pipe[0];
  960. #elif defined(TARGET_MIPS)
  961. ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1];
  962. return host_pipe[0];
  963. #elif defined(TARGET_SH4)
  964. ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1];
  965. return host_pipe[0];
  966. #elif defined(TARGET_SPARC)
  967. ((CPUSPARCState*)cpu_env)->regwptr[1] = host_pipe[1];
  968. return host_pipe[0];
  969. #endif
  970. }
  971. if (put_user_s32(host_pipe[0], pipedes)
  972. || put_user_s32(host_pipe[1], pipedes + sizeof(host_pipe[0])))
  973. return -TARGET_EFAULT;
  974. return get_errno(ret);
  975. }
  976. static inline abi_long target_to_host_ip_mreq(struct ip_mreqn *mreqn,
  977. abi_ulong target_addr,
  978. socklen_t len)
  979. {
  980. struct target_ip_mreqn *target_smreqn;
  981. target_smreqn = lock_user(VERIFY_READ, target_addr, len, 1);
  982. if (!target_smreqn)
  983. return -TARGET_EFAULT;
  984. mreqn->imr_multiaddr.s_addr = target_smreqn->imr_multiaddr.s_addr;
  985. mreqn->imr_address.s_addr = target_smreqn->imr_address.s_addr;
  986. if (len == sizeof(struct target_ip_mreqn))
  987. mreqn->imr_ifindex = tswapal(target_smreqn->imr_ifindex);
  988. unlock_user(target_smreqn, target_addr, 0);
  989. return 0;
  990. }
  991. static inline abi_long target_to_host_sockaddr(struct sockaddr *addr,
  992. abi_ulong target_addr,
  993. socklen_t len)
  994. {
  995. const socklen_t unix_maxlen = sizeof (struct sockaddr_un);
  996. sa_family_t sa_family;
  997. struct target_sockaddr *target_saddr;
  998. target_saddr = lock_user(VERIFY_READ, target_addr, len, 1);
  999. if (!target_saddr)
  1000. return -TARGET_EFAULT;
  1001. sa_family = tswap16(target_saddr->sa_family);
  1002. /* Oops. The caller might send a incomplete sun_path; sun_path
  1003. * must be terminated by \0 (see the manual page), but
  1004. * unfortunately it is quite common to specify sockaddr_un
  1005. * length as "strlen(x->sun_path)" while it should be
  1006. * "strlen(...) + 1". We'll fix that here if needed.
  1007. * Linux kernel has a similar feature.
  1008. */
  1009. if (sa_family == AF_UNIX) {
  1010. if (len < unix_maxlen && len > 0) {
  1011. char *cp = (char*)target_saddr;
  1012. if ( cp[len-1] && !cp[len] )
  1013. len++;
  1014. }
  1015. if (len > unix_maxlen)
  1016. len = unix_maxlen;
  1017. }
  1018. memcpy(addr, target_saddr, len);
  1019. addr->sa_family = sa_family;
  1020. if (sa_family == AF_PACKET) {
  1021. struct target_sockaddr_ll *lladdr;
  1022. lladdr = (struct target_sockaddr_ll *)addr;
  1023. lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex);
  1024. lladdr->sll_hatype = tswap16(lladdr->sll_hatype);
  1025. }
  1026. unlock_user(target_saddr, target_addr, 0);
  1027. return 0;
  1028. }
  1029. static inline abi_long host_to_target_sockaddr(abi_ulong target_addr,
  1030. struct sockaddr *addr,
  1031. socklen_t len)
  1032. {
  1033. struct target_sockaddr *target_saddr;
  1034. target_saddr = lock_user(VERIFY_WRITE, target_addr, len, 0);
  1035. if (!target_saddr)
  1036. return -TARGET_EFAULT;
  1037. memcpy(target_saddr, addr, len);
  1038. target_saddr->sa_family = tswap16(addr->sa_family);
  1039. unlock_user(target_saddr, target_addr, len);
  1040. return 0;
  1041. }
  1042. static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
  1043. struct target_msghdr *target_msgh)
  1044. {
  1045. struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
  1046. abi_long msg_controllen;
  1047. abi_ulong target_cmsg_addr;
  1048. struct target_cmsghdr *target_cmsg;
  1049. socklen_t space = 0;
  1050. msg_controllen = tswapal(target_msgh->msg_controllen);
  1051. if (msg_controllen < sizeof (struct target_cmsghdr))
  1052. goto the_end;
  1053. target_cmsg_addr = tswapal(target_msgh->msg_control);
  1054. target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
  1055. if (!target_cmsg)
  1056. return -TARGET_EFAULT;
  1057. while (cmsg && target_cmsg) {
  1058. void *data = CMSG_DATA(cmsg);
  1059. void *target_data = TARGET_CMSG_DATA(target_cmsg);
  1060. int len = tswapal(target_cmsg->cmsg_len)
  1061. - TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
  1062. space += CMSG_SPACE(len);
  1063. if (space > msgh->msg_controllen) {
  1064. space -= CMSG_SPACE(len);
  1065. gemu_log("Host cmsg overflow\n");
  1066. break;
  1067. }
  1068. if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
  1069. cmsg->cmsg_level = SOL_SOCKET;
  1070. } else {
  1071. cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
  1072. }
  1073. cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
  1074. cmsg->cmsg_len = CMSG_LEN(len);
  1075. if (cmsg->cmsg_level != SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
  1076. gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
  1077. memcpy(data, target_data, len);
  1078. } else {
  1079. int *fd = (int *)data;
  1080. int *target_fd = (int *)target_data;
  1081. int i, numfds = len / sizeof(int);
  1082. for (i = 0; i < numfds; i++)
  1083. fd[i] = tswap32(target_fd[i]);
  1084. }
  1085. cmsg = CMSG_NXTHDR(msgh, cmsg);
  1086. target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
  1087. }
  1088. unlock_user(target_cmsg, target_cmsg_addr, 0);
  1089. the_end:
  1090. msgh->msg_controllen = space;
  1091. return 0;
  1092. }
  1093. static inline abi_long host_to_target_cmsg(struct target_msghdr *target_msgh,
  1094. struct msghdr *msgh)
  1095. {
  1096. struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
  1097. abi_long msg_controllen;
  1098. abi_ulong target_cmsg_addr;
  1099. struct target_cmsghdr *target_cmsg;
  1100. socklen_t space = 0;
  1101. msg_controllen = tswapal(target_msgh->msg_controllen);
  1102. if (msg_controllen < sizeof (struct target_cmsghdr))
  1103. goto the_end;
  1104. target_cmsg_addr = tswapal(target_msgh->msg_control);
  1105. target_cmsg = lock_user(VERIFY_WRITE, target_cmsg_addr, msg_controllen, 0);
  1106. if (!target_cmsg)
  1107. return -TARGET_EFAULT;
  1108. while (cmsg && target_cmsg) {
  1109. void *data = CMSG_DATA(cmsg);
  1110. void *target_data = TARGET_CMSG_DATA(target_cmsg);
  1111. int len = cmsg->cmsg_len - CMSG_ALIGN(sizeof (struct cmsghdr));
  1112. space += TARGET_CMSG_SPACE(len);
  1113. if (space > msg_controllen) {
  1114. space -= TARGET_CMSG_SPACE(len);
  1115. gemu_log("Target cmsg overflow\n");
  1116. break;
  1117. }
  1118. if (cmsg->cmsg_level == SOL_SOCKET) {
  1119. target_cmsg->cmsg_level = tswap32(TARGET_SOL_SOCKET);
  1120. } else {
  1121. target_cmsg->cmsg_level = tswap32(cmsg->cmsg_level);
  1122. }
  1123. target_cmsg->cmsg_type = tswap32(cmsg->cmsg_type);
  1124. target_cmsg->cmsg_len = tswapal(TARGET_CMSG_LEN(len));
  1125. switch (cmsg->cmsg_level) {
  1126. case SOL_SOCKET:
  1127. switch (cmsg->cmsg_type) {
  1128. case SCM_RIGHTS:
  1129. {
  1130. int *fd = (int *)data;
  1131. int *target_fd = (int *)target_data;
  1132. int i, numfds = len / sizeof(int);
  1133. for (i = 0; i < numfds; i++)
  1134. target_fd[i] = tswap32(fd[i]);
  1135. break;
  1136. }
  1137. case SO_TIMESTAMP:
  1138. {
  1139. struct timeval *tv = (struct timeval *)data;
  1140. struct target_timeval *target_tv =
  1141. (struct target_timeval *)target_data;
  1142. if (len != sizeof(struct timeval))
  1143. goto unimplemented;
  1144. /* copy struct timeval to target */
  1145. target_tv->tv_sec = tswapal(tv->tv_sec);
  1146. target_tv->tv_usec = tswapal(tv->tv_usec);
  1147. break;
  1148. }
  1149. case SCM_CREDENTIALS:
  1150. {
  1151. struct ucred *cred = (struct ucred *)data;
  1152. struct target_ucred *target_cred =
  1153. (struct target_ucred *)target_data;
  1154. __put_user(cred->pid, &target_cred->pid);
  1155. __put_user(cred->uid, &target_cred->uid);
  1156. __put_user(cred->gid, &target_cred->gid);
  1157. break;
  1158. }
  1159. default:
  1160. goto unimplemented;
  1161. }
  1162. break;
  1163. default:
  1164. unimplemented:
  1165. gemu_log("Unsupported ancillary data: %d/%d\n",
  1166. cmsg->cmsg_level, cmsg->cmsg_type);
  1167. memcpy(target_data, data, len);
  1168. }
  1169. cmsg = CMSG_NXTHDR(msgh, cmsg);
  1170. target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
  1171. }
  1172. unlock_user(target_cmsg, target_cmsg_addr, space);
  1173. the_end:
  1174. target_msgh->msg_controllen = tswapal(space);
  1175. return 0;
  1176. }
  1177. /* do_setsockopt() Must return target values and target errnos. */
  1178. static abi_long do_setsockopt(int sockfd, int level, int optname,
  1179. abi_ulong optval_addr, socklen_t optlen)
  1180. {
  1181. abi_long ret;
  1182. int val;
  1183. struct ip_mreqn *ip_mreq;
  1184. struct ip_mreq_source *ip_mreq_source;
  1185. switch(level) {
  1186. case SOL_TCP:
  1187. /* TCP options all take an 'int' value. */
  1188. if (optlen < sizeof(uint32_t))
  1189. return -TARGET_EINVAL;
  1190. if (get_user_u32(val, optval_addr))
  1191. return -TARGET_EFAULT;
  1192. ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
  1193. break;
  1194. case SOL_IP:
  1195. switch(optname) {
  1196. case IP_TOS:
  1197. case IP_TTL:
  1198. case IP_HDRINCL:
  1199. case IP_ROUTER_ALERT:
  1200. case IP_RECVOPTS:
  1201. case IP_RETOPTS:
  1202. case IP_PKTINFO:
  1203. case IP_MTU_DISCOVER:
  1204. case IP_RECVERR:
  1205. case IP_RECVTOS:
  1206. #ifdef IP_FREEBIND
  1207. case IP_FREEBIND:
  1208. #endif
  1209. case IP_MULTICAST_TTL:
  1210. case IP_MULTICAST_LOOP:
  1211. val = 0;
  1212. if (optlen >= sizeof(uint32_t)) {
  1213. if (get_user_u32(val, optval_addr))
  1214. return -TARGET_EFAULT;
  1215. } else if (optlen >= 1) {
  1216. if (get_user_u8(val, optval_addr))
  1217. return -TARGET_EFAULT;
  1218. }
  1219. ret = get_errno(setsockopt(sockfd, level, optname, &val, sizeof(val)));
  1220. break;
  1221. case IP_ADD_MEMBERSHIP:
  1222. case IP_DROP_MEMBERSHIP:
  1223. if (optlen < sizeof (struct target_ip_mreq) ||
  1224. optlen > sizeof (struct target_ip_mreqn))
  1225. return -TARGET_EINVAL;
  1226. ip_mreq = (struct ip_mreqn *) alloca(optlen);
  1227. target_to_host_ip_mreq(ip_mreq, optval_addr, optlen);
  1228. ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq, optlen));
  1229. break;
  1230. case IP_BLOCK_SOURCE:
  1231. case IP_UNBLOCK_SOURCE:
  1232. case IP_ADD_SOURCE_MEMBERSHIP:
  1233. case IP_DROP_SOURCE_MEMBERSHIP:
  1234. if (optlen != sizeof (struct target_ip_mreq_source))
  1235. return -TARGET_EINVAL;
  1236. ip_mreq_source = lock_user(VERIFY_READ, optval_addr, optlen, 1);
  1237. ret = get_errno(setsockopt(sockfd, level, optname, ip_mreq_source, optlen));
  1238. unlock_user (ip_mreq_source, optval_addr, 0);
  1239. break;
  1240. default:
  1241. goto unimplemented;
  1242. }
  1243. break;
  1244. case SOL_IPV6:
  1245. switch (optname) {
  1246. case IPV6_MTU_DISCOVER:
  1247. case IPV6_MTU:
  1248. case IPV6_V6ONLY:
  1249. case IPV6_RECVPKTINFO:
  1250. val = 0;
  1251. if (optlen < sizeof(uint32_t)) {
  1252. return -TARGET_EINVAL;
  1253. }
  1254. if (get_user_u32(val, optval_addr)) {
  1255. return -TARGET_EFAULT;
  1256. }
  1257. ret = get_errno(setsockopt(sockfd, level, optname,
  1258. &val, sizeof(val)));
  1259. break;
  1260. default:
  1261. goto unimplemented;
  1262. }
  1263. break;
  1264. case SOL_RAW:
  1265. switch (optname) {
  1266. case ICMP_FILTER:
  1267. /* struct icmp_filter takes an u32 value */
  1268. if (optlen < sizeof(uint32_t)) {
  1269. return -TARGET_EINVAL;
  1270. }
  1271. if (get_user_u32(val, optval_addr)) {
  1272. return -TARGET_EFAULT;
  1273. }
  1274. ret = get_errno(setsockopt(sockfd, level, optname,
  1275. &val, sizeof(val)));
  1276. break;
  1277. default:
  1278. goto unimplemented;
  1279. }
  1280. break;
  1281. case TARGET_SOL_SOCKET:
  1282. switch (optname) {
  1283. case TARGET_SO_RCVTIMEO:
  1284. {
  1285. struct timeval tv;
  1286. optname = SO_RCVTIMEO;
  1287. set_timeout:
  1288. if (optlen != sizeof(struct target_timeval)) {
  1289. return -TARGET_EINVAL;
  1290. }
  1291. if (copy_from_user_timeval(&tv, optval_addr)) {
  1292. return -TARGET_EFAULT;
  1293. }
  1294. ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname,
  1295. &tv, sizeof(tv)));
  1296. return ret;
  1297. }
  1298. case TARGET_SO_SNDTIMEO:
  1299. optname = SO_SNDTIMEO;
  1300. goto set_timeout;
  1301. case TARGET_SO_ATTACH_FILTER:
  1302. {
  1303. struct target_sock_fprog *tfprog;
  1304. struct target_sock_filter *tfilter;
  1305. struct sock_fprog fprog;
  1306. struct sock_filter *filter;
  1307. int i;
  1308. if (optlen != sizeof(*tfprog)) {
  1309. return -TARGET_EINVAL;
  1310. }
  1311. if (!lock_user_struct(VERIFY_READ, tfprog, optval_addr, 0)) {
  1312. return -TARGET_EFAULT;
  1313. }
  1314. if (!lock_user_struct(VERIFY_READ, tfilter,
  1315. tswapal(tfprog->filter), 0)) {
  1316. unlock_user_struct(tfprog, optval_addr, 1);
  1317. return -TARGET_EFAULT;
  1318. }
  1319. fprog.len = tswap16(tfprog->len);
  1320. filter = malloc(fprog.len * sizeof(*filter));
  1321. if (filter == NULL) {
  1322. unlock_user_struct(tfilter, tfprog->filter, 1);
  1323. unlock_user_struct(tfprog, optval_addr, 1);
  1324. return -TARGET_ENOMEM;
  1325. }
  1326. for (i = 0; i < fprog.len; i++) {
  1327. filter[i].code = tswap16(tfilter[i].code);
  1328. filter[i].jt = tfilter[i].jt;
  1329. filter[i].jf = tfilter[i].jf;
  1330. filter[i].k = tswap32(tfilter[i].k);
  1331. }
  1332. fprog.filter = filter;
  1333. ret = get_errno(setsockopt(sockfd, SOL_SOCKET,
  1334. SO_ATTACH_FILTER, &fprog, sizeof(fprog)));
  1335. free(filter);
  1336. unlock_user_struct(tfilter, tfprog->filter, 1);
  1337. unlock_user_struct(tfprog, optval_addr, 1);
  1338. return ret;
  1339. }
  1340. case TARGET_SO_BINDTODEVICE:
  1341. {
  1342. char *dev_ifname, *addr_ifname;
  1343. if (optlen > IFNAMSIZ - 1) {
  1344. optlen = IFNAMSIZ - 1;
  1345. }
  1346. dev_ifname = lock_user(VERIFY_READ, optval_addr, optlen, 1);
  1347. if (!dev_ifname) {
  1348. return -TARGET_EFAULT;
  1349. }
  1350. optname = SO_BINDTODEVICE;
  1351. addr_ifname = alloca(IFNAMSIZ);
  1352. memcpy(addr_ifname, dev_ifname, optlen);
  1353. addr_ifname[optlen] = 0;
  1354. ret = get_errno(setsockopt(sockfd, level, optname, addr_ifname, optlen));
  1355. unlock_user (dev_ifname, optval_addr, 0);
  1356. return ret;
  1357. }
  1358. /* Options with 'int' argument. */
  1359. case TARGET_SO_DEBUG:
  1360. optname = SO_DEBUG;
  1361. break;
  1362. case TARGET_SO_REUSEADDR:
  1363. optname = SO_REUSEADDR;
  1364. break;
  1365. case TARGET_SO_TYPE:
  1366. optname = SO_TYPE;
  1367. break;
  1368. case TARGET_SO_ERROR:
  1369. optname = SO_ERROR;
  1370. break;
  1371. case TARGET_SO_DONTROUTE:
  1372. optname = SO_DONTROUTE;
  1373. break;
  1374. case TARGET_SO_BROADCAST:
  1375. optname = SO_BROADCAST;
  1376. break;
  1377. case TARGET_SO_SNDBUF:
  1378. optname = SO_SNDBUF;
  1379. break;
  1380. case TARGET_SO_SNDBUFFORCE:
  1381. optname = SO_SNDBUFFORCE;
  1382. break;
  1383. case TARGET_SO_RCVBUF:
  1384. optname = SO_RCVBUF;
  1385. break;
  1386. case TARGET_SO_RCVBUFFORCE:
  1387. optname = SO_RCVBUFFORCE;
  1388. break;
  1389. case TARGET_SO_KEEPALIVE:
  1390. optname = SO_KEEPALIVE;
  1391. break;
  1392. case TARGET_SO_OOBINLINE:
  1393. optname = SO_OOBINLINE;
  1394. break;
  1395. case TARGET_SO_NO_CHECK:
  1396. optname = SO_NO_CHECK;
  1397. break;
  1398. case TARGET_SO_PRIORITY:
  1399. optname = SO_PRIORITY;
  1400. break;
  1401. #ifdef SO_BSDCOMPAT
  1402. case TARGET_SO_BSDCOMPAT:
  1403. optname = SO_BSDCOMPAT;
  1404. break;
  1405. #endif
  1406. case TARGET_SO_PASSCRED:
  1407. optname = SO_PASSCRED;
  1408. break;
  1409. case TARGET_SO_PASSSEC:
  1410. optname = SO_PASSSEC;
  1411. break;
  1412. case TARGET_SO_TIMESTAMP:
  1413. optname = SO_TIMESTAMP;
  1414. break;
  1415. case TARGET_SO_RCVLOWAT:
  1416. optname = SO_RCVLOWAT;
  1417. break;
  1418. break;
  1419. default:
  1420. goto unimplemented;
  1421. }
  1422. if (optlen < sizeof(uint32_t))
  1423. return -TARGET_EINVAL;
  1424. if (get_user_u32(val, optval_addr))
  1425. return -TARGET_EFAULT;
  1426. ret = get_errno(setsockopt(sockfd, SOL_SOCKET, optname, &val, sizeof(val)));
  1427. break;
  1428. default:
  1429. unimplemented:
  1430. gemu_log("Unsupported setsockopt level=%d optname=%d\n", level, optname);
  1431. ret = -TARGET_ENOPROTOOPT;
  1432. }
  1433. return ret;
  1434. }
  1435. /* do_getsockopt() Must return target values and target errnos. */
  1436. static abi_long do_getsockopt(int sockfd, int level, int optname,
  1437. abi_ulong optval_addr, abi_ulong optlen)
  1438. {
  1439. abi_long ret;
  1440. int len, val;
  1441. socklen_t lv;
  1442. switch(level) {
  1443. case TARGET_SOL_SOCKET:
  1444. level = SOL_SOCKET;
  1445. switch (optname) {
  1446. /* These don't just return a single integer */
  1447. case TARGET_SO_LINGER:
  1448. case TARGET_SO_RCVTIMEO:
  1449. case TARGET_SO_SNDTIMEO:
  1450. case TARGET_SO_PEERNAME:
  1451. goto unimplemented;
  1452. case TARGET_SO_PEERCRED: {
  1453. struct ucred cr;
  1454. socklen_t crlen;
  1455. struct target_ucred *tcr;
  1456. if (get_user_u32(len, optlen)) {
  1457. return -TARGET_EFAULT;
  1458. }
  1459. if (len < 0) {
  1460. return -TARGET_EINVAL;
  1461. }
  1462. crlen = sizeof(cr);
  1463. ret = get_errno(getsockopt(sockfd, level, SO_PEERCRED,
  1464. &cr, &crlen));
  1465. if (ret < 0) {
  1466. return ret;
  1467. }
  1468. if (len > crlen) {
  1469. len = crlen;
  1470. }
  1471. if (!lock_user_struct(VERIFY_WRITE, tcr, optval_addr, 0)) {
  1472. return -TARGET_EFAULT;
  1473. }
  1474. __put_user(cr.pid, &tcr->pid);
  1475. __put_user(cr.uid, &tcr->uid);
  1476. __put_user(cr.gid, &tcr->gid);
  1477. unlock_user_struct(tcr, optval_addr, 1);
  1478. if (put_user_u32(len, optlen)) {
  1479. return -TARGET_EFAULT;
  1480. }
  1481. break;
  1482. }
  1483. /* Options with 'int' argument. */
  1484. case TARGET_SO_DEBUG:
  1485. optname = SO_DEBUG;
  1486. goto int_case;
  1487. case TARGET_SO_REUSEADDR:
  1488. optname = SO_REUSEADDR;
  1489. goto int_case;
  1490. case TARGET_SO_TYPE:
  1491. optname = SO_TYPE;
  1492. goto int_case;
  1493. case TARGET_SO_ERROR:
  1494. optname = SO_ERROR;
  1495. goto int_case;
  1496. case TARGET_SO_DONTROUTE:
  1497. optname = SO_DONTROUTE;
  1498. goto int_case;
  1499. case TARGET_SO_BROADCAST:
  1500. optname = SO_BROADCAST;
  1501. goto int_case;
  1502. case TARGET_SO_SNDBUF:
  1503. optname = SO_SNDBUF;
  1504. goto int_case;
  1505. case TARGET_SO_RCVBUF:
  1506. optname = SO_RCVBUF;
  1507. goto int_case;
  1508. case TARGET_SO_KEEPALIVE:
  1509. optname = SO_KEEPALIVE;
  1510. goto int_case;
  1511. case TARGET_SO_OOBINLINE:
  1512. optname = SO_OOBINLINE;
  1513. goto int_case;
  1514. case TARGET_SO_NO_CHECK:
  1515. optname = SO_NO_CHECK;
  1516. goto int_case;
  1517. case TARGET_SO_PRIORITY:
  1518. optname = SO_PRIORITY;
  1519. goto int_case;
  1520. #ifdef SO_BSDCOMPAT
  1521. case TARGET_SO_BSDCOMPAT:
  1522. optname = SO_BSDCOMPAT;
  1523. goto int_case;
  1524. #endif
  1525. case TARGET_SO_PASSCRED:
  1526. optname = SO_PASSCRED;
  1527. goto int_case;
  1528. case TARGET_SO_TIMESTAMP:
  1529. optname = SO_TIMESTAMP;
  1530. goto int_case;
  1531. case TARGET_SO_RCVLOWAT:
  1532. optname = SO_RCVLOWAT;
  1533. goto int_case;
  1534. case TARGET_SO_ACCEPTCONN:
  1535. optname = SO_ACCEPTCONN;
  1536. goto int_case;
  1537. default:
  1538. goto int_case;
  1539. }
  1540. break;
  1541. case SOL_TCP:
  1542. /* TCP options all take an 'int' value. */
  1543. int_case:
  1544. if (get_user_u32(len, optlen))
  1545. return -TARGET_EFAULT;
  1546. if (len < 0)
  1547. return -TARGET_EINVAL;
  1548. lv = sizeof(lv);
  1549. ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
  1550. if (ret < 0)
  1551. return ret;
  1552. if (optname == SO_TYPE) {
  1553. val = host_to_target_sock_type(val);
  1554. }
  1555. if (len > lv)
  1556. len = lv;
  1557. if (len == 4) {
  1558. if (put_user_u32(val, optval_addr))
  1559. return -TARGET_EFAULT;
  1560. } else {
  1561. if (put_user_u8(val, optval_addr))
  1562. return -TARGET_EFAULT;
  1563. }
  1564. if (put_user_u32(len, optlen))
  1565. return -TARGET_EFAULT;
  1566. break;
  1567. case SOL_IP:
  1568. switch(optname) {
  1569. case IP_TOS:
  1570. case IP_TTL:
  1571. case IP_HDRINCL:
  1572. case IP_ROUTER_ALERT:
  1573. case IP_RECVOPTS:
  1574. case IP_RETOPTS:
  1575. case IP_PKTINFO:
  1576. case IP_MTU_DISCOVER:
  1577. case IP_RECVERR:
  1578. case IP_RECVTOS:
  1579. #ifdef IP_FREEBIND
  1580. case IP_FREEBIND:
  1581. #endif
  1582. case IP_MULTICAST_TTL:
  1583. case IP_MULTICAST_LOOP:
  1584. if (get_user_u32(len, optlen))
  1585. return -TARGET_EFAULT;
  1586. if (len < 0)
  1587. return -TARGET_EINVAL;
  1588. lv = sizeof(lv);
  1589. ret = get_errno(getsockopt(sockfd, level, optname, &val, &lv));
  1590. if (ret < 0)
  1591. return ret;
  1592. if (len < sizeof(int) && len > 0 && val >= 0 && val < 255) {
  1593. len = 1;
  1594. if (put_user_u32(len, optlen)
  1595. || put_user_u8(val, optval_addr))
  1596. return -TARGET_EFAULT;
  1597. } else {
  1598. if (len > sizeof(int))
  1599. len = sizeof(int);
  1600. if (put_user_u32(len, optlen)
  1601. || put_user_u32(val, optval_addr))
  1602. return -TARGET_EFAULT;
  1603. }
  1604. break;
  1605. default:
  1606. ret = -TARGET_ENOPROTOOPT;
  1607. break;
  1608. }
  1609. break;
  1610. default:
  1611. unimplemented:
  1612. gemu_log("getsockopt level=%d optname=%d not yet supported\n",
  1613. level, optname);
  1614. ret = -TARGET_EOPNOTSUPP;
  1615. break;
  1616. }
  1617. return ret;
  1618. }
  1619. static struct iovec *lock_iovec(int type, abi_ulong target_addr,
  1620. int count, int copy)
  1621. {
  1622. struct target_iovec *target_vec;
  1623. struct iovec *vec;
  1624. abi_ulong total_len, max_len;
  1625. int i;
  1626. int err = 0;
  1627. bool bad_address = false;
  1628. if (count == 0) {
  1629. errno = 0;
  1630. return NULL;
  1631. }
  1632. if (count < 0 || count > IOV_MAX) {
  1633. errno = EINVAL;
  1634. return NULL;
  1635. }
  1636. vec = calloc(count, sizeof(struct iovec));
  1637. if (vec == NULL) {
  1638. errno = ENOMEM;
  1639. return NULL;
  1640. }
  1641. target_vec = lock_user(VERIFY_READ, target_addr,
  1642. count * sizeof(struct target_iovec), 1);
  1643. if (target_vec == NULL) {
  1644. err = EFAULT;
  1645. goto fail2;
  1646. }
  1647. /* ??? If host page size > target page size, this will result in a
  1648. value larger than what we can actually support. */
  1649. max_len = 0x7fffffff & TARGET_PAGE_MASK;
  1650. total_len = 0;
  1651. for (i = 0; i < count; i++) {
  1652. abi_ulong base = tswapal(target_vec[i].iov_base);
  1653. abi_long len = tswapal(target_vec[i].iov_len);
  1654. if (len < 0) {
  1655. err = EINVAL;
  1656. goto fail;
  1657. } else if (len == 0) {
  1658. /* Zero length pointer is ignored. */
  1659. vec[i].iov_base = 0;
  1660. } else {
  1661. vec[i].iov_base = lock_user(type, base, len, copy);
  1662. /* If the first buffer pointer is bad, this is a fault. But
  1663. * subsequent bad buffers will result in a partial write; this
  1664. * is realized by filling the vector with null pointers and
  1665. * zero lengths. */
  1666. if (!vec[i].iov_base) {
  1667. if (i == 0) {
  1668. err = EFAULT;
  1669. goto fail;
  1670. } else {
  1671. bad_address = true;
  1672. }
  1673. }
  1674. if (bad_address) {
  1675. len = 0;
  1676. }
  1677. if (len > max_len - total_len) {
  1678. len = max_len - total_len;
  1679. }
  1680. }
  1681. vec[i].iov_len = len;
  1682. total_len += len;
  1683. }
  1684. unlock_user(target_vec, target_addr, 0);
  1685. return vec;
  1686. fail:
  1687. unlock_user(target_vec, target_addr, 0);
  1688. fail2:
  1689. free(vec);
  1690. errno = err;
  1691. return NULL;
  1692. }
  1693. static void unlock_iovec(struct iovec *vec, abi_ulong target_addr,
  1694. int count, int copy)
  1695. {
  1696. struct target_iovec *target_vec;
  1697. int i;
  1698. target_vec = lock_user(VERIFY_READ, target_addr,
  1699. count * sizeof(struct target_iovec), 1);
  1700. if (target_vec) {
  1701. for (i = 0; i < count; i++) {
  1702. abi_ulong base = tswapal(target_vec[i].iov_base);
  1703. abi_long len = tswapal(target_vec[i].iov_base);
  1704. if (len < 0) {
  1705. break;
  1706. }
  1707. unlock_user(vec[i].iov_base, base, copy ? vec[i].iov_len : 0);
  1708. }
  1709. unlock_user(target_vec, target_addr, 0);
  1710. }
  1711. free(vec);
  1712. }
  1713. static inline int target_to_host_sock_type(int *type)
  1714. {
  1715. int host_type = 0;
  1716. int target_type = *type;
  1717. switch (target_type & TARGET_SOCK_TYPE_MASK) {
  1718. case TARGET_SOCK_DGRAM:
  1719. host_type = SOCK_DGRAM;
  1720. break;
  1721. case TARGET_SOCK_STREAM:
  1722. host_type = SOCK_STREAM;
  1723. break;
  1724. default:
  1725. host_type = target_type & TARGET_SOCK_TYPE_MASK;
  1726. break;
  1727. }
  1728. if (target_type & TARGET_SOCK_CLOEXEC) {
  1729. #if defined(SOCK_CLOEXEC)
  1730. host_type |= SOCK_CLOEXEC;
  1731. #else
  1732. return -TARGET_EINVAL;
  1733. #endif
  1734. }
  1735. if (target_type & TARGET_SOCK_NONBLOCK) {
  1736. #if defined(SOCK_NONBLOCK)
  1737. host_type |= SOCK_NONBLOCK;
  1738. #elif !defined(O_NONBLOCK)
  1739. return -TARGET_EINVAL;
  1740. #endif
  1741. }
  1742. *type = host_type;
  1743. return 0;
  1744. }
  1745. /* Try to emulate socket type flags after socket creation. */
  1746. static int sock_flags_fixup(int fd, int target_type)
  1747. {
  1748. #if !defined(SOCK_NONBLOCK) && defined(O_NONBLOCK)
  1749. if (target_type & TARGET_SOCK_NONBLOCK) {
  1750. int flags = fcntl(fd, F_GETFL);
  1751. if (fcntl(fd, F_SETFL, O_NONBLOCK | flags) == -1) {
  1752. close(fd);
  1753. return -TARGET_EINVAL;
  1754. }
  1755. }
  1756. #endif
  1757. return fd;
  1758. }
  1759. /* do_socket() Must return target values and target errnos. */
  1760. static abi_long do_socket(int domain, int type, int protocol)
  1761. {
  1762. int target_type = type;
  1763. int ret;
  1764. ret = target_to_host_sock_type(&type);
  1765. if (ret) {
  1766. return ret;
  1767. }
  1768. if (domain == PF_NETLINK)
  1769. return -TARGET_EAFNOSUPPORT;
  1770. ret = get_errno(socket(domain, type, protocol));
  1771. if (ret >= 0) {
  1772. ret = sock_flags_fixup(ret, target_type);
  1773. }
  1774. return ret;
  1775. }
  1776. /* do_bind() Must return target values and target errnos. */
  1777. static abi_long do_bind(int sockfd, abi_ulong target_addr,
  1778. socklen_t addrlen)
  1779. {
  1780. void *addr;
  1781. abi_long ret;
  1782. if ((int)addrlen < 0) {
  1783. return -TARGET_EINVAL;
  1784. }
  1785. addr = alloca(addrlen+1);
  1786. ret = target_to_host_sockaddr(addr, target_addr, addrlen);
  1787. if (ret)
  1788. return ret;
  1789. return get_errno(bind(sockfd, addr, addrlen));
  1790. }
  1791. /* do_connect() Must return target values and target errnos. */
  1792. static abi_long do_connect(int sockfd, abi_ulong target_addr,
  1793. socklen_t addrlen)
  1794. {
  1795. void *addr;
  1796. abi_long ret;
  1797. if ((int)addrlen < 0) {
  1798. return -TARGET_EINVAL;
  1799. }
  1800. addr = alloca(addrlen+1);
  1801. ret = target_to_host_sockaddr(addr, target_addr, addrlen);
  1802. if (ret)
  1803. return ret;
  1804. return get_errno(connect(sockfd, addr, addrlen));
  1805. }
  1806. /* do_sendrecvmsg_locked() Must return target values and target errnos. */
  1807. static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp,
  1808. int flags, int send)
  1809. {
  1810. abi_long ret, len;
  1811. struct msghdr msg;
  1812. int count;
  1813. struct iovec *vec;
  1814. abi_ulong target_vec;
  1815. if (msgp->msg_name) {
  1816. msg.msg_namelen = tswap32(msgp->msg_namelen);
  1817. msg.msg_name = alloca(msg.msg_namelen+1);
  1818. ret = target_to_host_sockaddr(msg.msg_name, tswapal(msgp->msg_name),
  1819. msg.msg_namelen);
  1820. if (ret) {
  1821. goto out2;
  1822. }
  1823. } else {
  1824. msg.msg_name = NULL;
  1825. msg.msg_namelen = 0;
  1826. }
  1827. msg.msg_controllen = 2 * tswapal(msgp->msg_controllen);
  1828. msg.msg_control = alloca(msg.msg_controllen);
  1829. msg.msg_flags = tswap32(msgp->msg_flags);
  1830. count = tswapal(msgp->msg_iovlen);
  1831. target_vec = tswapal(msgp->msg_iov);
  1832. vec = lock_iovec(send ? VERIFY_READ : VERIFY_WRITE,
  1833. target_vec, count, send);
  1834. if (vec == NULL) {
  1835. ret = -host_to_target_errno(errno);
  1836. goto out2;
  1837. }
  1838. msg.msg_iovlen = count;
  1839. msg.msg_iov = vec;
  1840. if (send) {
  1841. ret = target_to_host_cmsg(&msg, msgp);
  1842. if (ret == 0)
  1843. ret = get_errno(sendmsg(fd, &msg, flags));
  1844. } else {
  1845. ret = get_errno(recvmsg(fd, &msg, flags));
  1846. if (!is_error(ret)) {
  1847. len = ret;
  1848. ret = host_to_target_cmsg(msgp, &msg);
  1849. if (!is_error(ret)) {
  1850. msgp->msg_namelen = tswap32(msg.msg_namelen);
  1851. if (msg.msg_name != NULL) {
  1852. ret = host_to_target_sockaddr(tswapal(msgp->msg_name),
  1853. msg.msg_name, msg.msg_namelen);
  1854. if (ret) {
  1855. goto out;
  1856. }
  1857. }
  1858. ret = len;
  1859. }
  1860. }
  1861. }
  1862. out:
  1863. unlock_iovec(vec, target_vec, count, !send);
  1864. out2:
  1865. return ret;
  1866. }
  1867. static abi_long do_sendrecvmsg(int fd, abi_ulong target_msg,
  1868. int flags, int send)
  1869. {
  1870. abi_long ret;
  1871. struct target_msghdr *msgp;
  1872. if (!lock_user_struct(send ? VERIFY_READ : VERIFY_WRITE,
  1873. msgp,
  1874. target_msg,
  1875. send ? 1 : 0)) {
  1876. return -TARGET_EFAULT;
  1877. }
  1878. ret = do_sendrecvmsg_locked(fd, msgp, flags, send);
  1879. unlock_user_struct(msgp, target_msg, send ? 0 : 1);
  1880. return ret;
  1881. }
  1882. #ifdef TARGET_NR_sendmmsg
  1883. /* We don't rely on the C library to have sendmmsg/recvmmsg support,
  1884. * so it might not have this *mmsg-specific flag either.
  1885. */
  1886. #ifndef MSG_WAITFORONE
  1887. #define MSG_WAITFORONE 0x10000
  1888. #endif
  1889. static abi_long do_sendrecvmmsg(int fd, abi_ulong target_msgvec,
  1890. unsigned int vlen, unsigned int flags,
  1891. int send)
  1892. {
  1893. struct target_mmsghdr *mmsgp;
  1894. abi_long ret = 0;
  1895. int i;
  1896. if (vlen > UIO_MAXIOV) {
  1897. vlen = UIO_MAXIOV;
  1898. }
  1899. mmsgp = lock_user(VERIFY_WRITE, target_msgvec, sizeof(*mmsgp) * vlen, 1);
  1900. if (!mmsgp) {
  1901. return -TARGET_EFAULT;
  1902. }
  1903. for (i = 0; i < vlen; i++) {
  1904. ret = do_sendrecvmsg_locked(fd, &mmsgp[i].msg_hdr, flags, send);
  1905. if (is_error(ret)) {
  1906. break;
  1907. }
  1908. mmsgp[i].msg_len = tswap32(ret);
  1909. /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
  1910. if (flags & MSG_WAITFORONE) {
  1911. flags |= MSG_DONTWAIT;
  1912. }
  1913. }
  1914. unlock_user(mmsgp, target_msgvec, sizeof(*mmsgp) * i);
  1915. /* Return number of datagrams sent if we sent any at all;
  1916. * otherwise return the error.
  1917. */
  1918. if (i) {
  1919. return i;
  1920. }
  1921. return ret;
  1922. }
  1923. #endif
  1924. /* If we don't have a system accept4() then just call accept.
  1925. * The callsites to do_accept4() will ensure that they don't
  1926. * pass a non-zero flags argument in this config.
  1927. */
  1928. #ifndef CONFIG_ACCEPT4
  1929. static inline int accept4(int sockfd, struct sockaddr *addr,
  1930. socklen_t *addrlen, int flags)
  1931. {
  1932. assert(flags == 0);
  1933. return accept(sockfd, addr, addrlen);
  1934. }
  1935. #endif
  1936. /* do_accept4() Must return target values and target errnos. */
  1937. static abi_long do_accept4(int fd, abi_ulong target_addr,
  1938. abi_ulong target_addrlen_addr, int flags)
  1939. {
  1940. socklen_t addrlen;
  1941. void *addr;
  1942. abi_long ret;
  1943. int host_flags;
  1944. host_flags = target_to_host_bitmask(flags, fcntl_flags_tbl);
  1945. if (target_addr == 0) {
  1946. return get_errno(accept4(fd, NULL, NULL, host_flags));
  1947. }
  1948. /* linux returns EINVAL if addrlen pointer is invalid */
  1949. if (get_user_u32(addrlen, target_addrlen_addr))
  1950. return -TARGET_EINVAL;
  1951. if ((int)addrlen < 0) {
  1952. return -TARGET_EINVAL;
  1953. }
  1954. if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
  1955. return -TARGET_EINVAL;
  1956. addr = alloca(addrlen);
  1957. ret = get_errno(accept4(fd, addr, &addrlen, host_flags));
  1958. if (!is_error(ret)) {
  1959. host_to_target_sockaddr(target_addr, addr, addrlen);
  1960. if (put_user_u32(addrlen, target_addrlen_addr))
  1961. ret = -TARGET_EFAULT;
  1962. }
  1963. return ret;
  1964. }
  1965. /* do_getpeername() Must return target values and target errnos. */
  1966. static abi_long do_getpeername(int fd, abi_ulong target_addr,
  1967. abi_ulong target_addrlen_addr)
  1968. {
  1969. socklen_t addrlen;
  1970. void *addr;
  1971. abi_long ret;
  1972. if (get_user_u32(addrlen, target_addrlen_addr))
  1973. return -TARGET_EFAULT;
  1974. if ((int)addrlen < 0) {
  1975. return -TARGET_EINVAL;
  1976. }
  1977. if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
  1978. return -TARGET_EFAULT;
  1979. addr = alloca(addrlen);
  1980. ret = get_errno(getpeername(fd, addr, &addrlen));
  1981. if (!is_error(ret)) {
  1982. host_to_target_sockaddr(target_addr, addr, addrlen);
  1983. if (put_user_u32(addrlen, target_addrlen_addr))
  1984. ret = -TARGET_EFAULT;
  1985. }
  1986. return ret;
  1987. }
  1988. /* do_getsockname() Must return target values and target errnos. */
  1989. static abi_long do_getsockname(int fd, abi_ulong target_addr,
  1990. abi_ulong target_addrlen_addr)
  1991. {
  1992. socklen_t addrlen;
  1993. void *addr;
  1994. abi_long ret;
  1995. if (get_user_u32(addrlen, target_addrlen_addr))
  1996. return -TARGET_EFAULT;
  1997. if ((int)addrlen < 0) {
  1998. return -TARGET_EINVAL;
  1999. }
  2000. if (!access_ok(VERIFY_WRITE, target_addr, addrlen))
  2001. return -TARGET_EFAULT;
  2002. addr = alloca(addrlen);
  2003. ret = get_errno(getsockname(fd, addr, &addrlen));
  2004. if (!is_error(ret)) {
  2005. host_to_target_sockaddr(target_addr, addr, addrlen);
  2006. if (put_user_u32(addrlen, target_addrlen_addr))
  2007. ret = -TARGET_EFAULT;
  2008. }
  2009. return ret;
  2010. }
  2011. /* do_socketpair() Must return target values and target errnos. */
  2012. static abi_long do_socketpair(int domain, int type, int protocol,
  2013. abi_ulong target_tab_addr)
  2014. {
  2015. int tab[2];
  2016. abi_long ret;
  2017. target_to_host_sock_type(&type);
  2018. ret = get_errno(socketpair(domain, type, protocol, tab));
  2019. if (!is_error(ret)) {
  2020. if (put_user_s32(tab[0], target_tab_addr)
  2021. || put_user_s32(tab[1], target_tab_addr + sizeof(tab[0])))
  2022. ret = -TARGET_EFAULT;
  2023. }
  2024. return ret;
  2025. }
  2026. /* do_sendto() Must return target values and target errnos. */
  2027. static abi_long do_sendto(int fd, abi_ulong msg, size_t len, int flags,
  2028. abi_ulong target_addr, socklen_t addrlen)
  2029. {
  2030. void *addr;
  2031. void *host_msg;
  2032. abi_long ret;
  2033. if ((int)addrlen < 0) {
  2034. return -TARGET_EINVAL;
  2035. }
  2036. host_msg = lock_user(VERIFY_READ, msg, len, 1);
  2037. if (!host_msg)
  2038. return -TARGET_EFAULT;
  2039. if (target_addr) {
  2040. addr = alloca(addrlen+1);
  2041. ret = target_to_host_sockaddr(addr, target_addr, addrlen);
  2042. if (ret) {
  2043. unlock_user(host_msg, msg, 0);
  2044. return ret;
  2045. }
  2046. ret = get_errno(sendto(fd, host_msg, len, flags, addr, addrlen));
  2047. } else {
  2048. ret = get_errno(send(fd, host_msg, len, flags));
  2049. }
  2050. unlock_user(host_msg, msg, 0);
  2051. return ret;
  2052. }
  2053. /* do_recvfrom() Must return target values and target errnos. */
  2054. static abi_long do_recvfrom(int fd, abi_ulong msg, size_t len, int flags,
  2055. abi_ulong target_addr,
  2056. abi_ulong target_addrlen)
  2057. {
  2058. socklen_t addrlen;
  2059. void *addr;
  2060. void *host_msg;
  2061. abi_long ret;
  2062. host_msg = lock_user(VERIFY_WRITE, msg, len, 0);
  2063. if (!host_msg)
  2064. return -TARGET_EFAULT;
  2065. if (target_addr) {
  2066. if (get_user_u32(addrlen, target_addrlen)) {
  2067. ret = -TARGET_EFAULT;
  2068. goto fail;
  2069. }
  2070. if ((int)addrlen < 0) {
  2071. ret = -TARGET_EINVAL;
  2072. goto fail;
  2073. }
  2074. addr = alloca(addrlen);
  2075. ret = get_errno(recvfrom(fd, host_msg, len, flags, addr, &addrlen));
  2076. } else {
  2077. addr = NULL; /* To keep compiler quiet. */
  2078. ret = get_errno(qemu_recv(fd, host_msg, len, flags));
  2079. }
  2080. if (!is_error(ret)) {
  2081. if (target_addr) {
  2082. host_to_target_sockaddr(target_addr, addr, addrlen);
  2083. if (put_user_u32(addrlen, target_addrlen)) {
  2084. ret = -TARGET_EFAULT;
  2085. goto fail;
  2086. }
  2087. }
  2088. unlock_user(host_msg, msg, len);
  2089. } else {
  2090. fail:
  2091. unlock_user(host_msg, msg, 0);
  2092. }
  2093. return ret;
  2094. }
  2095. #ifdef TARGET_NR_socketcall
  2096. /* do_socketcall() Must return target values and target errnos. */
  2097. static abi_long do_socketcall(int num, abi_ulong vptr)
  2098. {
  2099. static const unsigned ac[] = { /* number of arguments per call */
  2100. [SOCKOP_socket] = 3, /* domain, type, protocol */
  2101. [SOCKOP_bind] = 3, /* sockfd, addr, addrlen */
  2102. [SOCKOP_connect] = 3, /* sockfd, addr, addrlen */
  2103. [SOCKOP_listen] = 2, /* sockfd, backlog */
  2104. [SOCKOP_accept] = 3, /* sockfd, addr, addrlen */
  2105. [SOCKOP_accept4] = 4, /* sockfd, addr, addrlen, flags */
  2106. [SOCKOP_getsockname] = 3, /* sockfd, addr, addrlen */
  2107. [SOCKOP_getpeername] = 3, /* sockfd, addr, addrlen */
  2108. [SOCKOP_socketpair] = 4, /* domain, type, protocol, tab */
  2109. [SOCKOP_send] = 4, /* sockfd, msg, len, flags */
  2110. [SOCKOP_recv] = 4, /* sockfd, msg, len, flags */
  2111. [SOCKOP_sendto] = 6, /* sockfd, msg, len, flags, addr, addrlen */
  2112. [SOCKOP_recvfrom] = 6, /* sockfd, msg, len, flags, addr, addrlen */
  2113. [SOCKOP_shutdown] = 2, /* sockfd, how */
  2114. [SOCKOP_sendmsg] = 3, /* sockfd, msg, flags */
  2115. [SOCKOP_recvmsg] = 3, /* sockfd, msg, flags */
  2116. [SOCKOP_setsockopt] = 5, /* sockfd, level, optname, optval, optlen */
  2117. [SOCKOP_getsockopt] = 5, /* sockfd, level, optname, optval, optlen */
  2118. };
  2119. abi_long a[6]; /* max 6 args */
  2120. /* first, collect the arguments in a[] according to ac[] */
  2121. if (num >= 0 && num < ARRAY_SIZE(ac)) {
  2122. unsigned i;
  2123. assert(ARRAY_SIZE(a) >= ac[num]); /* ensure we have space for args */
  2124. for (i = 0; i < ac[num]; ++i) {
  2125. if (get_user_ual(a[i], vptr + i * sizeof(abi_long)) != 0) {
  2126. return -TARGET_EFAULT;
  2127. }
  2128. }
  2129. }
  2130. /* now when we have the args, actually handle the call */
  2131. switch (num) {
  2132. case SOCKOP_socket: /* domain, type, protocol */
  2133. return do_socket(a[0], a[1], a[2]);
  2134. case SOCKOP_bind: /* sockfd, addr, addrlen */
  2135. return do_bind(a[0], a[1], a[2]);
  2136. case SOCKOP_connect: /* sockfd, addr, addrlen */
  2137. return do_connect(a[0], a[1], a[2]);
  2138. case SOCKOP_listen: /* sockfd, backlog */
  2139. return get_errno(listen(a[0], a[1]));
  2140. case SOCKOP_accept: /* sockfd, addr, addrlen */
  2141. return do_accept4(a[0], a[1], a[2], 0);
  2142. case SOCKOP_accept4: /* sockfd, addr, addrlen, flags */
  2143. return do_accept4(a[0], a[1], a[2], a[3]);
  2144. case SOCKOP_getsockname: /* sockfd, addr, addrlen */
  2145. return do_getsockname(a[0], a[1], a[2]);
  2146. case SOCKOP_getpeername: /* sockfd, addr, addrlen */
  2147. return do_getpeername(a[0], a[1], a[2]);
  2148. case SOCKOP_socketpair: /* domain, type, protocol, tab */
  2149. return do_socketpair(a[0], a[1], a[2], a[3]);
  2150. case SOCKOP_send: /* sockfd, msg, len, flags */
  2151. return do_sendto(a[0], a[1], a[2], a[3], 0, 0);
  2152. case SOCKOP_recv: /* sockfd, msg, len, flags */
  2153. return do_recvfrom(a[0], a[1], a[2], a[3], 0, 0);
  2154. case SOCKOP_sendto: /* sockfd, msg, len, flags, addr, addrlen */
  2155. return do_sendto(a[0], a[1], a[2], a[3], a[4], a[5]);
  2156. case SOCKOP_recvfrom: /* sockfd, msg, len, flags, addr, addrlen */
  2157. return do_recvfrom(a[0], a[1], a[2], a[3], a[4], a[5]);
  2158. case SOCKOP_shutdown: /* sockfd, how */
  2159. return get_errno(shutdown(a[0], a[1]));
  2160. case SOCKOP_sendmsg: /* sockfd, msg, flags */
  2161. return do_sendrecvmsg(a[0], a[1], a[2], 1);
  2162. case SOCKOP_recvmsg: /* sockfd, msg, flags */
  2163. return do_sendrecvmsg(a[0], a[1], a[2], 0);
  2164. case SOCKOP_setsockopt: /* sockfd, level, optname, optval, optlen */
  2165. return do_setsockopt(a[0], a[1], a[2], a[3], a[4]);
  2166. case SOCKOP_getsockopt: /* sockfd, level, optname, optval, optlen */
  2167. return do_getsockopt(a[0], a[1], a[2], a[3], a[4]);
  2168. default:
  2169. gemu_log("Unsupported socketcall: %d\n", num);
  2170. return -TARGET_ENOSYS;
  2171. }
  2172. }
  2173. #endif
  2174. #define N_SHM_REGIONS 32
  2175. static struct shm_region {
  2176. abi_ulong start;
  2177. abi_ulong size;
  2178. } shm_regions[N_SHM_REGIONS];
  2179. struct target_semid_ds
  2180. {
  2181. struct target_ipc_perm sem_perm;
  2182. abi_ulong sem_otime;
  2183. #if !defined(TARGET_PPC64)
  2184. abi_ulong __unused1;
  2185. #endif
  2186. abi_ulong sem_ctime;
  2187. #if !defined(TARGET_PPC64)
  2188. abi_ulong __unused2;
  2189. #endif
  2190. abi_ulong sem_nsems;
  2191. abi_ulong __unused3;
  2192. abi_ulong __unused4;
  2193. };
  2194. static inline abi_long target_to_host_ipc_perm(struct ipc_perm *host_ip,
  2195. abi_ulong target_addr)
  2196. {
  2197. struct target_ipc_perm *target_ip;
  2198. struct target_semid_ds *target_sd;
  2199. if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
  2200. return -TARGET_EFAULT;
  2201. target_ip = &(target_sd->sem_perm);
  2202. host_ip->__key = tswap32(target_ip->__key);
  2203. host_ip->uid = tswap32(target_ip->uid);
  2204. host_ip->gid = tswap32(target_ip->gid);
  2205. host_ip->cuid = tswap32(target_ip->cuid);
  2206. host_ip->cgid = tswap32(target_ip->cgid);
  2207. #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
  2208. host_ip->mode = tswap32(target_ip->mode);
  2209. #else
  2210. host_ip->mode = tswap16(target_ip->mode);
  2211. #endif
  2212. #if defined(TARGET_PPC)
  2213. host_ip->__seq = tswap32(target_ip->__seq);
  2214. #else
  2215. host_ip->__seq = tswap16(target_ip->__seq);
  2216. #endif
  2217. unlock_user_struct(target_sd, target_addr, 0);
  2218. return 0;
  2219. }
  2220. static inline abi_long host_to_target_ipc_perm(abi_ulong target_addr,
  2221. struct ipc_perm *host_ip)
  2222. {
  2223. struct target_ipc_perm *target_ip;
  2224. struct target_semid_ds *target_sd;
  2225. if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
  2226. return -TARGET_EFAULT;
  2227. target_ip = &(target_sd->sem_perm);
  2228. target_ip->__key = tswap32(host_ip->__key);
  2229. target_ip->uid = tswap32(host_ip->uid);
  2230. target_ip->gid = tswap32(host_ip->gid);
  2231. target_ip->cuid = tswap32(host_ip->cuid);
  2232. target_ip->cgid = tswap32(host_ip->cgid);
  2233. #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_PPC)
  2234. target_ip->mode = tswap32(host_ip->mode);
  2235. #else
  2236. target_ip->mode = tswap16(host_ip->mode);
  2237. #endif
  2238. #if defined(TARGET_PPC)
  2239. target_ip->__seq = tswap32(host_ip->__seq);
  2240. #else
  2241. target_ip->__seq = tswap16(host_ip->__seq);
  2242. #endif
  2243. unlock_user_struct(target_sd, target_addr, 1);
  2244. return 0;
  2245. }
  2246. static inline abi_long target_to_host_semid_ds(struct semid_ds *host_sd,
  2247. abi_ulong target_addr)
  2248. {
  2249. struct target_semid_ds *target_sd;
  2250. if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
  2251. return -TARGET_EFAULT;
  2252. if (target_to_host_ipc_perm(&(host_sd->sem_perm),target_addr))
  2253. return -TARGET_EFAULT;
  2254. host_sd->sem_nsems = tswapal(target_sd->sem_nsems);
  2255. host_sd->sem_otime = tswapal(target_sd->sem_otime);
  2256. host_sd->sem_ctime = tswapal(target_sd->sem_ctime);
  2257. unlock_user_struct(target_sd, target_addr, 0);
  2258. return 0;
  2259. }
  2260. static inline abi_long host_to_target_semid_ds(abi_ulong target_addr,
  2261. struct semid_ds *host_sd)
  2262. {
  2263. struct target_semid_ds *target_sd;
  2264. if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
  2265. return -TARGET_EFAULT;
  2266. if (host_to_target_ipc_perm(target_addr,&(host_sd->sem_perm)))
  2267. return -TARGET_EFAULT;
  2268. target_sd->sem_nsems = tswapal(host_sd->sem_nsems);
  2269. target_sd->sem_otime = tswapal(host_sd->sem_otime);
  2270. target_sd->sem_ctime = tswapal(host_sd->sem_ctime);
  2271. unlock_user_struct(target_sd, target_addr, 1);
  2272. return 0;
  2273. }
  2274. struct target_seminfo {
  2275. int semmap;
  2276. int semmni;
  2277. int semmns;
  2278. int semmnu;
  2279. int semmsl;
  2280. int semopm;
  2281. int semume;
  2282. int semusz;
  2283. int semvmx;
  2284. int semaem;
  2285. };
  2286. static inline abi_long host_to_target_seminfo(abi_ulong target_addr,
  2287. struct seminfo *host_seminfo)
  2288. {
  2289. struct target_seminfo *target_seminfo;
  2290. if (!lock_user_struct(VERIFY_WRITE, target_seminfo, target_addr, 0))
  2291. return -TARGET_EFAULT;
  2292. __put_user(host_seminfo->semmap, &target_seminfo->semmap);
  2293. __put_user(host_seminfo->semmni, &target_seminfo->semmni);
  2294. __put_user(host_seminfo->semmns, &target_seminfo->semmns);
  2295. __put_user(host_seminfo->semmnu, &target_seminfo->semmnu);
  2296. __put_user(host_seminfo->semmsl, &target_seminfo->semmsl);
  2297. __put_user(host_seminfo->semopm, &target_seminfo->semopm);
  2298. __put_user(host_seminfo->semume, &target_seminfo->semume);
  2299. __put_user(host_seminfo->semusz, &target_seminfo->semusz);
  2300. __put_user(host_seminfo->semvmx, &target_seminfo->semvmx);
  2301. __put_user(host_seminfo->semaem, &target_seminfo->semaem);
  2302. unlock_user_struct(target_seminfo, target_addr, 1);
  2303. return 0;
  2304. }
  2305. union semun {
  2306. int val;
  2307. struct semid_ds *buf;
  2308. unsigned short *array;
  2309. struct seminfo *__buf;
  2310. };
  2311. union target_semun {
  2312. int val;
  2313. abi_ulong buf;
  2314. abi_ulong array;
  2315. abi_ulong __buf;
  2316. };
  2317. static inline abi_long target_to_host_semarray(int semid, unsigned short **host_array,
  2318. abi_ulong target_addr)
  2319. {
  2320. int nsems;
  2321. unsigned short *array;
  2322. union semun semun;
  2323. struct semid_ds semid_ds;
  2324. int i, ret;
  2325. semun.buf = &semid_ds;
  2326. ret = semctl(semid, 0, IPC_STAT, semun);
  2327. if (ret == -1)
  2328. return get_errno(ret);
  2329. nsems = semid_ds.sem_nsems;
  2330. *host_array = malloc(nsems*sizeof(unsigned short));
  2331. if (!*host_array) {
  2332. return -TARGET_ENOMEM;
  2333. }
  2334. array = lock_user(VERIFY_READ, target_addr,
  2335. nsems*sizeof(unsigned short), 1);
  2336. if (!array) {
  2337. free(*host_array);
  2338. return -TARGET_EFAULT;
  2339. }
  2340. for(i=0; i<nsems; i++) {
  2341. __get_user((*host_array)[i], &array[i]);
  2342. }
  2343. unlock_user(array, target_addr, 0);
  2344. return 0;
  2345. }
  2346. static inline abi_long host_to_target_semarray(int semid, abi_ulong target_addr,
  2347. unsigned short **host_array)
  2348. {
  2349. int nsems;
  2350. unsigned short *array;
  2351. union semun semun;
  2352. struct semid_ds semid_ds;
  2353. int i, ret;
  2354. semun.buf = &semid_ds;
  2355. ret = semctl(semid, 0, IPC_STAT, semun);
  2356. if (ret == -1)
  2357. return get_errno(ret);
  2358. nsems = semid_ds.sem_nsems;
  2359. array = lock_user(VERIFY_WRITE, target_addr,
  2360. nsems*sizeof(unsigned short), 0);
  2361. if (!array)
  2362. return -TARGET_EFAULT;
  2363. for(i=0; i<nsems; i++) {
  2364. __put_user((*host_array)[i], &array[i]);
  2365. }
  2366. free(*host_array);
  2367. unlock_user(array, target_addr, 1);
  2368. return 0;
  2369. }
  2370. static inline abi_long do_semctl(int semid, int semnum, int cmd,
  2371. union target_semun target_su)
  2372. {
  2373. union semun arg;
  2374. struct semid_ds dsarg;
  2375. unsigned short *array = NULL;
  2376. struct seminfo seminfo;
  2377. abi_long ret = -TARGET_EINVAL;
  2378. abi_long err;
  2379. cmd &= 0xff;
  2380. switch( cmd ) {
  2381. case GETVAL:
  2382. case SETVAL:
  2383. /* In 64 bit cross-endian situations, we will erroneously pick up
  2384. * the wrong half of the union for the "val" element. To rectify
  2385. * this, the entire 8-byte structure is byteswapped, followed by
  2386. * a swap of the 4 byte val field. In other cases, the data is
  2387. * already in proper host byte order. */
  2388. if (sizeof(target_su.val) != (sizeof(target_su.buf))) {
  2389. target_su.buf = tswapal(target_su.buf);
  2390. arg.val = tswap32(target_su.val);
  2391. } else {
  2392. arg.val = target_su.val;
  2393. }
  2394. ret = get_errno(semctl(semid, semnum, cmd, arg));
  2395. break;
  2396. case GETALL:
  2397. case SETALL:
  2398. err = target_to_host_semarray(semid, &array, target_su.array);
  2399. if (err)
  2400. return err;
  2401. arg.array = array;
  2402. ret = get_errno(semctl(semid, semnum, cmd, arg));
  2403. err = host_to_target_semarray(semid, target_su.array, &array);
  2404. if (err)
  2405. return err;
  2406. break;
  2407. case IPC_STAT:
  2408. case IPC_SET:
  2409. case SEM_STAT:
  2410. err = target_to_host_semid_ds(&dsarg, target_su.buf);
  2411. if (err)
  2412. return err;
  2413. arg.buf = &dsarg;
  2414. ret = get_errno(semctl(semid, semnum, cmd, arg));
  2415. err = host_to_target_semid_ds(target_su.buf, &dsarg);
  2416. if (err)
  2417. return err;
  2418. break;
  2419. case IPC_INFO:
  2420. case SEM_INFO:
  2421. arg.__buf = &seminfo;
  2422. ret = get_errno(semctl(semid, semnum, cmd, arg));
  2423. err = host_to_target_seminfo(target_su.__buf, &seminfo);
  2424. if (err)
  2425. return err;
  2426. break;
  2427. case IPC_RMID:
  2428. case GETPID:
  2429. case GETNCNT:
  2430. case GETZCNT:
  2431. ret = get_errno(semctl(semid, semnum, cmd, NULL));
  2432. break;
  2433. }
  2434. return ret;
  2435. }
  2436. struct target_sembuf {
  2437. unsigned short sem_num;
  2438. short sem_op;
  2439. short sem_flg;
  2440. };
  2441. static inline abi_long target_to_host_sembuf(struct sembuf *host_sembuf,
  2442. abi_ulong target_addr,
  2443. unsigned nsops)
  2444. {
  2445. struct target_sembuf *target_sembuf;
  2446. int i;
  2447. target_sembuf = lock_user(VERIFY_READ, target_addr,
  2448. nsops*sizeof(struct target_sembuf), 1);
  2449. if (!target_sembuf)
  2450. return -TARGET_EFAULT;
  2451. for(i=0; i<nsops; i++) {
  2452. __get_user(host_sembuf[i].sem_num, &target_sembuf[i].sem_num);
  2453. __get_user(host_sembuf[i].sem_op, &target_sembuf[i].sem_op);
  2454. __get_user(host_sembuf[i].sem_flg, &target_sembuf[i].sem_flg);
  2455. }
  2456. unlock_user(target_sembuf, target_addr, 0);
  2457. return 0;
  2458. }
  2459. static inline abi_long do_semop(int semid, abi_long ptr, unsigned nsops)
  2460. {
  2461. struct sembuf sops[nsops];
  2462. if (target_to_host_sembuf(sops, ptr, nsops))
  2463. return -TARGET_EFAULT;
  2464. return get_errno(semop(semid, sops, nsops));
  2465. }
  2466. struct target_msqid_ds
  2467. {
  2468. struct target_ipc_perm msg_perm;
  2469. abi_ulong msg_stime;
  2470. #if TARGET_ABI_BITS == 32
  2471. abi_ulong __unused1;
  2472. #endif
  2473. abi_ulong msg_rtime;
  2474. #if TARGET_ABI_BITS == 32
  2475. abi_ulong __unused2;
  2476. #endif
  2477. abi_ulong msg_ctime;
  2478. #if TARGET_ABI_BITS == 32
  2479. abi_ulong __unused3;
  2480. #endif
  2481. abi_ulong __msg_cbytes;
  2482. abi_ulong msg_qnum;
  2483. abi_ulong msg_qbytes;
  2484. abi_ulong msg_lspid;
  2485. abi_ulong msg_lrpid;
  2486. abi_ulong __unused4;
  2487. abi_ulong __unused5;
  2488. };
  2489. static inline abi_long target_to_host_msqid_ds(struct msqid_ds *host_md,
  2490. abi_ulong target_addr)
  2491. {
  2492. struct target_msqid_ds *target_md;
  2493. if (!lock_user_struct(VERIFY_READ, target_md, target_addr, 1))
  2494. return -TARGET_EFAULT;
  2495. if (target_to_host_ipc_perm(&(host_md->msg_perm),target_addr))
  2496. return -TARGET_EFAULT;
  2497. host_md->msg_stime = tswapal(target_md->msg_stime);
  2498. host_md->msg_rtime = tswapal(target_md->msg_rtime);
  2499. host_md->msg_ctime = tswapal(target_md->msg_ctime);
  2500. host_md->__msg_cbytes = tswapal(target_md->__msg_cbytes);
  2501. host_md->msg_qnum = tswapal(target_md->msg_qnum);
  2502. host_md->msg_qbytes = tswapal(target_md->msg_qbytes);
  2503. host_md->msg_lspid = tswapal(target_md->msg_lspid);
  2504. host_md->msg_lrpid = tswapal(target_md->msg_lrpid);
  2505. unlock_user_struct(target_md, target_addr, 0);
  2506. return 0;
  2507. }
  2508. static inline abi_long host_to_target_msqid_ds(abi_ulong target_addr,
  2509. struct msqid_ds *host_md)
  2510. {
  2511. struct target_msqid_ds *target_md;
  2512. if (!lock_user_struct(VERIFY_WRITE, target_md, target_addr, 0))
  2513. return -TARGET_EFAULT;
  2514. if (host_to_target_ipc_perm(target_addr,&(host_md->msg_perm)))
  2515. return -TARGET_EFAULT;
  2516. target_md->msg_stime = tswapal(host_md->msg_stime);
  2517. target_md->msg_rtime = tswapal(host_md->msg_rtime);
  2518. target_md->msg_ctime = tswapal(host_md->msg_ctime);
  2519. target_md->__msg_cbytes = tswapal(host_md->__msg_cbytes);
  2520. target_md->msg_qnum = tswapal(host_md->msg_qnum);
  2521. target_md->msg_qbytes = tswapal(host_md->msg_qbytes);
  2522. target_md->msg_lspid = tswapal(host_md->msg_lspid);
  2523. target_md->msg_lrpid = tswapal(host_md->msg_lrpid);
  2524. unlock_user_struct(target_md, target_addr, 1);
  2525. return 0;
  2526. }
  2527. struct target_msginfo {
  2528. int msgpool;
  2529. int msgmap;
  2530. int msgmax;
  2531. int msgmnb;
  2532. int msgmni;
  2533. int msgssz;
  2534. int msgtql;
  2535. unsigned short int msgseg;
  2536. };
  2537. static inline abi_long host_to_target_msginfo(abi_ulong target_addr,
  2538. struct msginfo *host_msginfo)
  2539. {
  2540. struct target_msginfo *target_msginfo;
  2541. if (!lock_user_struct(VERIFY_WRITE, target_msginfo, target_addr, 0))
  2542. return -TARGET_EFAULT;
  2543. __put_user(host_msginfo->msgpool, &target_msginfo->msgpool);
  2544. __put_user(host_msginfo->msgmap, &target_msginfo->msgmap);
  2545. __put_user(host_msginfo->msgmax, &target_msginfo->msgmax);
  2546. __put_user(host_msginfo->msgmnb, &target_msginfo->msgmnb);
  2547. __put_user(host_msginfo->msgmni, &target_msginfo->msgmni);
  2548. __put_user(host_msginfo->msgssz, &target_msginfo->msgssz);
  2549. __put_user(host_msginfo->msgtql, &target_msginfo->msgtql);
  2550. __put_user(host_msginfo->msgseg, &target_msginfo->msgseg);
  2551. unlock_user_struct(target_msginfo, target_addr, 1);
  2552. return 0;
  2553. }
  2554. static inline abi_long do_msgctl(int msgid, int cmd, abi_long ptr)
  2555. {
  2556. struct msqid_ds dsarg;
  2557. struct msginfo msginfo;
  2558. abi_long ret = -TARGET_EINVAL;
  2559. cmd &= 0xff;
  2560. switch (cmd) {
  2561. case IPC_STAT:
  2562. case IPC_SET:
  2563. case MSG_STAT:
  2564. if (target_to_host_msqid_ds(&dsarg,ptr))
  2565. return -TARGET_EFAULT;
  2566. ret = get_errno(msgctl(msgid, cmd, &dsarg));
  2567. if (host_to_target_msqid_ds(ptr,&dsarg))
  2568. return -TARGET_EFAULT;
  2569. break;
  2570. case IPC_RMID:
  2571. ret = get_errno(msgctl(msgid, cmd, NULL));
  2572. break;
  2573. case IPC_INFO:
  2574. case MSG_INFO:
  2575. ret = get_errno(msgctl(msgid, cmd, (struct msqid_ds *)&msginfo));
  2576. if (host_to_target_msginfo(ptr, &msginfo))
  2577. return -TARGET_EFAULT;
  2578. break;
  2579. }
  2580. return ret;
  2581. }
  2582. struct target_msgbuf {
  2583. abi_long mtype;
  2584. char mtext[1];
  2585. };
  2586. static inline abi_long do_msgsnd(int msqid, abi_long msgp,
  2587. ssize_t msgsz, int msgflg)
  2588. {
  2589. struct target_msgbuf *target_mb;
  2590. struct msgbuf *host_mb;
  2591. abi_long ret = 0;
  2592. if (msgsz < 0) {
  2593. return -TARGET_EINVAL;
  2594. }
  2595. if (!lock_user_struct(VERIFY_READ, target_mb, msgp, 0))
  2596. return -TARGET_EFAULT;
  2597. host_mb = malloc(msgsz+sizeof(long));
  2598. if (!host_mb) {
  2599. unlock_user_struct(target_mb, msgp, 0);
  2600. return -TARGET_ENOMEM;
  2601. }
  2602. host_mb->mtype = (abi_long) tswapal(target_mb->mtype);
  2603. memcpy(host_mb->mtext, target_mb->mtext, msgsz);
  2604. ret = get_errno(msgsnd(msqid, host_mb, msgsz, msgflg));
  2605. free(host_mb);
  2606. unlock_user_struct(target_mb, msgp, 0);
  2607. return ret;
  2608. }
  2609. static inline abi_long do_msgrcv(int msqid, abi_long msgp,
  2610. unsigned int msgsz, abi_long msgtyp,
  2611. int msgflg)
  2612. {
  2613. struct target_msgbuf *target_mb;
  2614. char *target_mtext;
  2615. struct msgbuf *host_mb;
  2616. abi_long ret = 0;
  2617. if (!lock_user_struct(VERIFY_WRITE, target_mb, msgp, 0))
  2618. return -TARGET_EFAULT;
  2619. host_mb = g_malloc(msgsz+sizeof(long));
  2620. ret = get_errno(msgrcv(msqid, host_mb, msgsz, msgtyp, msgflg));
  2621. if (ret > 0) {
  2622. abi_ulong target_mtext_addr = msgp + sizeof(abi_ulong);
  2623. target_mtext = lock_user(VERIFY_WRITE, target_mtext_addr, ret, 0);
  2624. if (!target_mtext) {
  2625. ret = -TARGET_EFAULT;
  2626. goto end;
  2627. }
  2628. memcpy(target_mb->mtext, host_mb->mtext, ret);
  2629. unlock_user(target_mtext, target_mtext_addr, ret);
  2630. }
  2631. target_mb->mtype = tswapal(host_mb->mtype);
  2632. end:
  2633. if (target_mb)
  2634. unlock_user_struct(target_mb, msgp, 1);
  2635. g_free(host_mb);
  2636. return ret;
  2637. }
  2638. static inline abi_long target_to_host_shmid_ds(struct shmid_ds *host_sd,
  2639. abi_ulong target_addr)
  2640. {
  2641. struct target_shmid_ds *target_sd;
  2642. if (!lock_user_struct(VERIFY_READ, target_sd, target_addr, 1))
  2643. return -TARGET_EFAULT;
  2644. if (target_to_host_ipc_perm(&(host_sd->shm_perm), target_addr))
  2645. return -TARGET_EFAULT;
  2646. __get_user(host_sd->shm_segsz, &target_sd->shm_segsz);
  2647. __get_user(host_sd->shm_atime, &target_sd->shm_atime);
  2648. __get_user(host_sd->shm_dtime, &target_sd->shm_dtime);
  2649. __get_user(host_sd->shm_ctime, &target_sd->shm_ctime);
  2650. __get_user(host_sd->shm_cpid, &target_sd->shm_cpid);
  2651. __get_user(host_sd->shm_lpid, &target_sd->shm_lpid);
  2652. __get_user(host_sd->shm_nattch, &target_sd->shm_nattch);
  2653. unlock_user_struct(target_sd, target_addr, 0);
  2654. return 0;
  2655. }
  2656. static inline abi_long host_to_target_shmid_ds(abi_ulong target_addr,
  2657. struct shmid_ds *host_sd)
  2658. {
  2659. struct target_shmid_ds *target_sd;
  2660. if (!lock_user_struct(VERIFY_WRITE, target_sd, target_addr, 0))
  2661. return -TARGET_EFAULT;
  2662. if (host_to_target_ipc_perm(target_addr, &(host_sd->shm_perm)))
  2663. return -TARGET_EFAULT;
  2664. __put_user(host_sd->shm_segsz, &target_sd->shm_segsz);
  2665. __put_user(host_sd->shm_atime, &target_sd->shm_atime);
  2666. __put_user(host_sd->shm_dtime, &target_sd->shm_dtime);
  2667. __put_user(host_sd->shm_ctime, &target_sd->shm_ctime);
  2668. __put_user(host_sd->shm_cpid, &target_sd->shm_cpid);
  2669. __put_user(host_sd->shm_lpid, &target_sd->shm_lpid);
  2670. __put_user(host_sd->shm_nattch, &target_sd->shm_nattch);
  2671. unlock_user_struct(target_sd, target_addr, 1);
  2672. return 0;
  2673. }
  2674. struct target_shminfo {
  2675. abi_ulong shmmax;
  2676. abi_ulong shmmin;
  2677. abi_ulong shmmni;
  2678. abi_ulong shmseg;
  2679. abi_ulong shmall;
  2680. };
  2681. static inline abi_long host_to_target_shminfo(abi_ulong target_addr,
  2682. struct shminfo *host_shminfo)
  2683. {
  2684. struct target_shminfo *target_shminfo;
  2685. if (!lock_user_struct(VERIFY_WRITE, target_shminfo, target_addr, 0))
  2686. return -TARGET_EFAULT;
  2687. __put_user(host_shminfo->shmmax, &target_shminfo->shmmax);
  2688. __put_user(host_shminfo->shmmin, &target_shminfo->shmmin);
  2689. __put_user(host_shminfo->shmmni, &target_shminfo->shmmni);
  2690. __put_user(host_shminfo->shmseg, &target_shminfo->shmseg);
  2691. __put_user(host_shminfo->shmall, &target_shminfo->shmall);
  2692. unlock_user_struct(target_shminfo, target_addr, 1);
  2693. return 0;
  2694. }
  2695. struct target_shm_info {
  2696. int used_ids;
  2697. abi_ulong shm_tot;
  2698. abi_ulong shm_rss;
  2699. abi_ulong shm_swp;
  2700. abi_ulong swap_attempts;
  2701. abi_ulong swap_successes;
  2702. };
  2703. static inline abi_long host_to_target_shm_info(abi_ulong target_addr,
  2704. struct shm_info *host_shm_info)
  2705. {
  2706. struct target_shm_info *target_shm_info;
  2707. if (!lock_user_struct(VERIFY_WRITE, target_shm_info, target_addr, 0))
  2708. return -TARGET_EFAULT;
  2709. __put_user(host_shm_info->used_ids, &target_shm_info->used_ids);
  2710. __put_user(host_shm_info->shm_tot, &target_shm_info->shm_tot);
  2711. __put_user(host_shm_info->shm_rss, &target_shm_info->shm_rss);
  2712. __put_user(host_shm_info->shm_swp, &target_shm_info->shm_swp);
  2713. __put_user(host_shm_info->swap_attempts, &target_shm_info->swap_attempts);
  2714. __put_user(host_shm_info->swap_successes, &target_shm_info->swap_successes);
  2715. unlock_user_struct(target_shm_info, target_addr, 1);
  2716. return 0;
  2717. }
  2718. static inline abi_long do_shmctl(int shmid, int cmd, abi_long buf)
  2719. {
  2720. struct shmid_ds dsarg;
  2721. struct shminfo shminfo;
  2722. struct shm_info shm_info;
  2723. abi_long ret = -TARGET_EINVAL;
  2724. cmd &= 0xff;
  2725. switch(cmd) {
  2726. case IPC_STAT:
  2727. case IPC_SET:
  2728. case SHM_STAT:
  2729. if (target_to_host_shmid_ds(&dsarg, buf))
  2730. return -TARGET_EFAULT;
  2731. ret = get_errno(shmctl(shmid, cmd, &dsarg));
  2732. if (host_to_target_shmid_ds(buf, &dsarg))
  2733. return -TARGET_EFAULT;
  2734. break;
  2735. case IPC_INFO:
  2736. ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shminfo));
  2737. if (host_to_target_shminfo(buf, &shminfo))
  2738. return -TARGET_EFAULT;
  2739. break;
  2740. case SHM_INFO:
  2741. ret = get_errno(shmctl(shmid, cmd, (struct shmid_ds *)&shm_info));
  2742. if (host_to_target_shm_info(buf, &shm_info))
  2743. return -TARGET_EFAULT;
  2744. break;
  2745. case IPC_RMID:
  2746. case SHM_LOCK:
  2747. case SHM_UNLOCK:
  2748. ret = get_errno(shmctl(shmid, cmd, NULL));
  2749. break;
  2750. }
  2751. return ret;
  2752. }
  2753. static inline abi_ulong do_shmat(int shmid, abi_ulong shmaddr, int shmflg)
  2754. {
  2755. abi_long raddr;
  2756. void *host_raddr;
  2757. struct shmid_ds shm_info;
  2758. int i,ret;
  2759. /* find out the length of the shared memory segment */
  2760. ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
  2761. if (is_error(ret)) {
  2762. /* can't get length, bail out */
  2763. return ret;
  2764. }
  2765. mmap_lock();
  2766. if (shmaddr)
  2767. host_raddr = shmat(shmid, (void *)g2h(shmaddr), shmflg);
  2768. else {
  2769. abi_ulong mmap_start;
  2770. mmap_start = mmap_find_vma(0, shm_info.shm_segsz);
  2771. if (mmap_start == -1) {
  2772. errno = ENOMEM;
  2773. host_raddr = (void *)-1;
  2774. } else
  2775. host_raddr = shmat(shmid, g2h(mmap_start), shmflg | SHM_REMAP);
  2776. }
  2777. if (host_raddr == (void *)-1) {
  2778. mmap_unlock();
  2779. return get_errno((long)host_raddr);
  2780. }
  2781. raddr=h2g((unsigned long)host_raddr);
  2782. page_set_flags(raddr, raddr + shm_info.shm_segsz,
  2783. PAGE_VALID | PAGE_READ |
  2784. ((shmflg & SHM_RDONLY)? 0 : PAGE_WRITE));
  2785. for (i = 0; i < N_SHM_REGIONS; i++) {
  2786. if (shm_regions[i].start == 0) {
  2787. shm_regions[i].start = raddr;
  2788. shm_regions[i].size = shm_info.shm_segsz;
  2789. break;
  2790. }
  2791. }
  2792. mmap_unlock();
  2793. return raddr;
  2794. }
  2795. static inline abi_long do_shmdt(abi_ulong shmaddr)
  2796. {
  2797. int i;
  2798. for (i = 0; i < N_SHM_REGIONS; ++i) {
  2799. if (shm_regions[i].start == shmaddr) {
  2800. shm_regions[i].start = 0;
  2801. page_set_flags(shmaddr, shmaddr + shm_regions[i].size, 0);
  2802. break;
  2803. }
  2804. }
  2805. return get_errno(shmdt(g2h(shmaddr)));
  2806. }
  2807. #ifdef TARGET_NR_ipc
  2808. /* ??? This only works with linear mappings. */
  2809. /* do_ipc() must return target values and target errnos. */
  2810. static abi_long do_ipc(unsigned int call, abi_long first,
  2811. abi_long second, abi_long third,
  2812. abi_long ptr, abi_long fifth)
  2813. {
  2814. int version;
  2815. abi_long ret = 0;
  2816. version = call >> 16;
  2817. call &= 0xffff;
  2818. switch (call) {
  2819. case IPCOP_semop:
  2820. ret = do_semop(first, ptr, second);
  2821. break;
  2822. case IPCOP_semget:
  2823. ret = get_errno(semget(first, second, third));
  2824. break;
  2825. case IPCOP_semctl: {
  2826. /* The semun argument to semctl is passed by value, so dereference the
  2827. * ptr argument. */
  2828. abi_ulong atptr;
  2829. get_user_ual(atptr, ptr);
  2830. ret = do_semctl(first, second, third,
  2831. (union target_semun) atptr);
  2832. break;
  2833. }
  2834. case IPCOP_msgget:
  2835. ret = get_errno(msgget(first, second));
  2836. break;
  2837. case IPCOP_msgsnd:
  2838. ret = do_msgsnd(first, ptr, second, third);
  2839. break;
  2840. case IPCOP_msgctl:
  2841. ret = do_msgctl(first, second, ptr);
  2842. break;
  2843. case IPCOP_msgrcv:
  2844. switch (version) {
  2845. case 0:
  2846. {
  2847. struct target_ipc_kludge {
  2848. abi_long msgp;
  2849. abi_long msgtyp;
  2850. } *tmp;
  2851. if (!lock_user_struct(VERIFY_READ, tmp, ptr, 1)) {
  2852. ret = -TARGET_EFAULT;
  2853. break;
  2854. }
  2855. ret = do_msgrcv(first, tswapal(tmp->msgp), second, tswapal(tmp->msgtyp), third);
  2856. unlock_user_struct(tmp, ptr, 0);
  2857. break;
  2858. }
  2859. default:
  2860. ret = do_msgrcv(first, ptr, second, fifth, third);
  2861. }
  2862. break;
  2863. case IPCOP_shmat:
  2864. switch (version) {
  2865. default:
  2866. {
  2867. abi_ulong raddr;
  2868. raddr = do_shmat(first, ptr, second);
  2869. if (is_error(raddr))
  2870. return get_errno(raddr);
  2871. if (put_user_ual(raddr, third))
  2872. return -TARGET_EFAULT;
  2873. break;
  2874. }
  2875. case 1:
  2876. ret = -TARGET_EINVAL;
  2877. break;
  2878. }
  2879. break;
  2880. case IPCOP_shmdt:
  2881. ret = do_shmdt(ptr);
  2882. break;
  2883. case IPCOP_shmget:
  2884. /* IPC_* flag values are the same on all linux platforms */
  2885. ret = get_errno(shmget(first, second, third));
  2886. break;
  2887. /* IPC_* and SHM_* command values are the same on all linux platforms */
  2888. case IPCOP_shmctl:
  2889. ret = do_shmctl(first, second, ptr);
  2890. break;
  2891. default:
  2892. gemu_log("Unsupported ipc call: %d (version %d)\n", call, version);
  2893. ret = -TARGET_ENOSYS;
  2894. break;
  2895. }
  2896. return ret;
  2897. }
  2898. #endif
  2899. /* kernel structure types definitions */
  2900. #define STRUCT(name, ...) STRUCT_ ## name,
  2901. #define STRUCT_SPECIAL(name) STRUCT_ ## name,
  2902. enum {
  2903. #include "syscall_types.h"
  2904. };
  2905. #undef STRUCT
  2906. #undef STRUCT_SPECIAL
  2907. #define STRUCT(name, ...) static const argtype struct_ ## name ## _def[] = { __VA_ARGS__, TYPE_NULL };
  2908. #define STRUCT_SPECIAL(name)
  2909. #include "syscall_types.h"
  2910. #undef STRUCT
  2911. #undef STRUCT_SPECIAL
  2912. typedef struct IOCTLEntry IOCTLEntry;
  2913. typedef abi_long do_ioctl_fn(const IOCTLEntry *ie, uint8_t *buf_temp,
  2914. int fd, abi_long cmd, abi_long arg);
  2915. struct IOCTLEntry {
  2916. unsigned int target_cmd;
  2917. unsigned int host_cmd;
  2918. const char *name;
  2919. int access;
  2920. do_ioctl_fn *do_ioctl;
  2921. const argtype arg_type[5];
  2922. };
  2923. #define IOC_R 0x0001
  2924. #define IOC_W 0x0002
  2925. #define IOC_RW (IOC_R | IOC_W)
  2926. #define MAX_STRUCT_SIZE 4096
  2927. #ifdef CONFIG_FIEMAP
  2928. /* So fiemap access checks don't overflow on 32 bit systems.
  2929. * This is very slightly smaller than the limit imposed by
  2930. * the underlying kernel.
  2931. */
  2932. #define FIEMAP_MAX_EXTENTS ((UINT_MAX - sizeof(struct fiemap)) \
  2933. / sizeof(struct fiemap_extent))
  2934. static abi_long do_ioctl_fs_ioc_fiemap(const IOCTLEntry *ie, uint8_t *buf_temp,
  2935. int fd, abi_long cmd, abi_long arg)
  2936. {
  2937. /* The parameter for this ioctl is a struct fiemap followed
  2938. * by an array of struct fiemap_extent whose size is set
  2939. * in fiemap->fm_extent_count. The array is filled in by the
  2940. * ioctl.
  2941. */
  2942. int target_size_in, target_size_out;
  2943. struct fiemap *fm;
  2944. const argtype *arg_type = ie->arg_type;
  2945. const argtype extent_arg_type[] = { MK_STRUCT(STRUCT_fiemap_extent) };
  2946. void *argptr, *p;
  2947. abi_long ret;
  2948. int i, extent_size = thunk_type_size(extent_arg_type, 0);
  2949. uint32_t outbufsz;
  2950. int free_fm = 0;
  2951. assert(arg_type[0] == TYPE_PTR);
  2952. assert(ie->access == IOC_RW);
  2953. arg_type++;
  2954. target_size_in = thunk_type_size(arg_type, 0);
  2955. argptr = lock_user(VERIFY_READ, arg, target_size_in, 1);
  2956. if (!argptr) {
  2957. return -TARGET_EFAULT;
  2958. }
  2959. thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
  2960. unlock_user(argptr, arg, 0);
  2961. fm = (struct fiemap *)buf_temp;
  2962. if (fm->fm_extent_count > FIEMAP_MAX_EXTENTS) {
  2963. return -TARGET_EINVAL;
  2964. }
  2965. outbufsz = sizeof (*fm) +
  2966. (sizeof(struct fiemap_extent) * fm->fm_extent_count);
  2967. if (outbufsz > MAX_STRUCT_SIZE) {
  2968. /* We can't fit all the extents into the fixed size buffer.
  2969. * Allocate one that is large enough and use it instead.
  2970. */
  2971. fm = malloc(outbufsz);
  2972. if (!fm) {
  2973. return -TARGET_ENOMEM;
  2974. }
  2975. memcpy(fm, buf_temp, sizeof(struct fiemap));
  2976. free_fm = 1;
  2977. }
  2978. ret = get_errno(ioctl(fd, ie->host_cmd, fm));
  2979. if (!is_error(ret)) {
  2980. target_size_out = target_size_in;
  2981. /* An extent_count of 0 means we were only counting the extents
  2982. * so there are no structs to copy
  2983. */
  2984. if (fm->fm_extent_count != 0) {
  2985. target_size_out += fm->fm_mapped_extents * extent_size;
  2986. }
  2987. argptr = lock_user(VERIFY_WRITE, arg, target_size_out, 0);
  2988. if (!argptr) {
  2989. ret = -TARGET_EFAULT;
  2990. } else {
  2991. /* Convert the struct fiemap */
  2992. thunk_convert(argptr, fm, arg_type, THUNK_TARGET);
  2993. if (fm->fm_extent_count != 0) {
  2994. p = argptr + target_size_in;
  2995. /* ...and then all the struct fiemap_extents */
  2996. for (i = 0; i < fm->fm_mapped_extents; i++) {
  2997. thunk_convert(p, &fm->fm_extents[i], extent_arg_type,
  2998. THUNK_TARGET);
  2999. p += extent_size;
  3000. }
  3001. }
  3002. unlock_user(argptr, arg, target_size_out);
  3003. }
  3004. }
  3005. if (free_fm) {
  3006. free(fm);
  3007. }
  3008. return ret;
  3009. }
  3010. #endif
  3011. static abi_long do_ioctl_ifconf(const IOCTLEntry *ie, uint8_t *buf_temp,
  3012. int fd, abi_long cmd, abi_long arg)
  3013. {
  3014. const argtype *arg_type = ie->arg_type;
  3015. int target_size;
  3016. void *argptr;
  3017. int ret;
  3018. struct ifconf *host_ifconf;
  3019. uint32_t outbufsz;
  3020. const argtype ifreq_arg_type[] = { MK_STRUCT(STRUCT_sockaddr_ifreq) };
  3021. int target_ifreq_size;
  3022. int nb_ifreq;
  3023. int free_buf = 0;
  3024. int i;
  3025. int target_ifc_len;
  3026. abi_long target_ifc_buf;
  3027. int host_ifc_len;
  3028. char *host_ifc_buf;
  3029. assert(arg_type[0] == TYPE_PTR);
  3030. assert(ie->access == IOC_RW);
  3031. arg_type++;
  3032. target_size = thunk_type_size(arg_type, 0);
  3033. argptr = lock_user(VERIFY_READ, arg, target_size, 1);
  3034. if (!argptr)
  3035. return -TARGET_EFAULT;
  3036. thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
  3037. unlock_user(argptr, arg, 0);
  3038. host_ifconf = (struct ifconf *)(unsigned long)buf_temp;
  3039. target_ifc_len = host_ifconf->ifc_len;
  3040. target_ifc_buf = (abi_long)(unsigned long)host_ifconf->ifc_buf;
  3041. target_ifreq_size = thunk_type_size(ifreq_arg_type, 0);
  3042. nb_ifreq = target_ifc_len / target_ifreq_size;
  3043. host_ifc_len = nb_ifreq * sizeof(struct ifreq);
  3044. outbufsz = sizeof(*host_ifconf) + host_ifc_len;
  3045. if (outbufsz > MAX_STRUCT_SIZE) {
  3046. /* We can't fit all the extents into the fixed size buffer.
  3047. * Allocate one that is large enough and use it instead.
  3048. */
  3049. host_ifconf = malloc(outbufsz);
  3050. if (!host_ifconf) {
  3051. return -TARGET_ENOMEM;
  3052. }
  3053. memcpy(host_ifconf, buf_temp, sizeof(*host_ifconf));
  3054. free_buf = 1;
  3055. }
  3056. host_ifc_buf = (char*)host_ifconf + sizeof(*host_ifconf);
  3057. host_ifconf->ifc_len = host_ifc_len;
  3058. host_ifconf->ifc_buf = host_ifc_buf;
  3059. ret = get_errno(ioctl(fd, ie->host_cmd, host_ifconf));
  3060. if (!is_error(ret)) {
  3061. /* convert host ifc_len to target ifc_len */
  3062. nb_ifreq = host_ifconf->ifc_len / sizeof(struct ifreq);
  3063. target_ifc_len = nb_ifreq * target_ifreq_size;
  3064. host_ifconf->ifc_len = target_ifc_len;
  3065. /* restore target ifc_buf */
  3066. host_ifconf->ifc_buf = (char *)(unsigned long)target_ifc_buf;
  3067. /* copy struct ifconf to target user */
  3068. argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
  3069. if (!argptr)
  3070. return -TARGET_EFAULT;
  3071. thunk_convert(argptr, host_ifconf, arg_type, THUNK_TARGET);
  3072. unlock_user(argptr, arg, target_size);
  3073. /* copy ifreq[] to target user */
  3074. argptr = lock_user(VERIFY_WRITE, target_ifc_buf, target_ifc_len, 0);
  3075. for (i = 0; i < nb_ifreq ; i++) {
  3076. thunk_convert(argptr + i * target_ifreq_size,
  3077. host_ifc_buf + i * sizeof(struct ifreq),
  3078. ifreq_arg_type, THUNK_TARGET);
  3079. }
  3080. unlock_user(argptr, target_ifc_buf, target_ifc_len);
  3081. }
  3082. if (free_buf) {
  3083. free(host_ifconf);
  3084. }
  3085. return ret;
  3086. }
  3087. static abi_long do_ioctl_dm(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
  3088. abi_long cmd, abi_long arg)
  3089. {
  3090. void *argptr;
  3091. struct dm_ioctl *host_dm;
  3092. abi_long guest_data;
  3093. uint32_t guest_data_size;
  3094. int target_size;
  3095. const argtype *arg_type = ie->arg_type;
  3096. abi_long ret;
  3097. void *big_buf = NULL;
  3098. char *host_data;
  3099. arg_type++;
  3100. target_size = thunk_type_size(arg_type, 0);
  3101. argptr = lock_user(VERIFY_READ, arg, target_size, 1);
  3102. if (!argptr) {
  3103. ret = -TARGET_EFAULT;
  3104. goto out;
  3105. }
  3106. thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
  3107. unlock_user(argptr, arg, 0);
  3108. /* buf_temp is too small, so fetch things into a bigger buffer */
  3109. big_buf = g_malloc0(((struct dm_ioctl*)buf_temp)->data_size * 2);
  3110. memcpy(big_buf, buf_temp, target_size);
  3111. buf_temp = big_buf;
  3112. host_dm = big_buf;
  3113. guest_data = arg + host_dm->data_start;
  3114. if ((guest_data - arg) < 0) {
  3115. ret = -EINVAL;
  3116. goto out;
  3117. }
  3118. guest_data_size = host_dm->data_size - host_dm->data_start;
  3119. host_data = (char*)host_dm + host_dm->data_start;
  3120. argptr = lock_user(VERIFY_READ, guest_data, guest_data_size, 1);
  3121. switch (ie->host_cmd) {
  3122. case DM_REMOVE_ALL:
  3123. case DM_LIST_DEVICES:
  3124. case DM_DEV_CREATE:
  3125. case DM_DEV_REMOVE:
  3126. case DM_DEV_SUSPEND:
  3127. case DM_DEV_STATUS:
  3128. case DM_DEV_WAIT:
  3129. case DM_TABLE_STATUS:
  3130. case DM_TABLE_CLEAR:
  3131. case DM_TABLE_DEPS:
  3132. case DM_LIST_VERSIONS:
  3133. /* no input data */
  3134. break;
  3135. case DM_DEV_RENAME:
  3136. case DM_DEV_SET_GEOMETRY:
  3137. /* data contains only strings */
  3138. memcpy(host_data, argptr, guest_data_size);
  3139. break;
  3140. case DM_TARGET_MSG:
  3141. memcpy(host_data, argptr, guest_data_size);
  3142. *(uint64_t*)host_data = tswap64(*(uint64_t*)argptr);
  3143. break;
  3144. case DM_TABLE_LOAD:
  3145. {
  3146. void *gspec = argptr;
  3147. void *cur_data = host_data;
  3148. const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
  3149. int spec_size = thunk_type_size(arg_type, 0);
  3150. int i;
  3151. for (i = 0; i < host_dm->target_count; i++) {
  3152. struct dm_target_spec *spec = cur_data;
  3153. uint32_t next;
  3154. int slen;
  3155. thunk_convert(spec, gspec, arg_type, THUNK_HOST);
  3156. slen = strlen((char*)gspec + spec_size) + 1;
  3157. next = spec->next;
  3158. spec->next = sizeof(*spec) + slen;
  3159. strcpy((char*)&spec[1], gspec + spec_size);
  3160. gspec += next;
  3161. cur_data += spec->next;
  3162. }
  3163. break;
  3164. }
  3165. default:
  3166. ret = -TARGET_EINVAL;
  3167. goto out;
  3168. }
  3169. unlock_user(argptr, guest_data, 0);
  3170. ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
  3171. if (!is_error(ret)) {
  3172. guest_data = arg + host_dm->data_start;
  3173. guest_data_size = host_dm->data_size - host_dm->data_start;
  3174. argptr = lock_user(VERIFY_WRITE, guest_data, guest_data_size, 0);
  3175. switch (ie->host_cmd) {
  3176. case DM_REMOVE_ALL:
  3177. case DM_DEV_CREATE:
  3178. case DM_DEV_REMOVE:
  3179. case DM_DEV_RENAME:
  3180. case DM_DEV_SUSPEND:
  3181. case DM_DEV_STATUS:
  3182. case DM_TABLE_LOAD:
  3183. case DM_TABLE_CLEAR:
  3184. case DM_TARGET_MSG:
  3185. case DM_DEV_SET_GEOMETRY:
  3186. /* no return data */
  3187. break;
  3188. case DM_LIST_DEVICES:
  3189. {
  3190. struct dm_name_list *nl = (void*)host_dm + host_dm->data_start;
  3191. uint32_t remaining_data = guest_data_size;
  3192. void *cur_data = argptr;
  3193. const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_name_list) };
  3194. int nl_size = 12; /* can't use thunk_size due to alignment */
  3195. while (1) {
  3196. uint32_t next = nl->next;
  3197. if (next) {
  3198. nl->next = nl_size + (strlen(nl->name) + 1);
  3199. }
  3200. if (remaining_data < nl->next) {
  3201. host_dm->flags |= DM_BUFFER_FULL_FLAG;
  3202. break;
  3203. }
  3204. thunk_convert(cur_data, nl, arg_type, THUNK_TARGET);
  3205. strcpy(cur_data + nl_size, nl->name);
  3206. cur_data += nl->next;
  3207. remaining_data -= nl->next;
  3208. if (!next) {
  3209. break;
  3210. }
  3211. nl = (void*)nl + next;
  3212. }
  3213. break;
  3214. }
  3215. case DM_DEV_WAIT:
  3216. case DM_TABLE_STATUS:
  3217. {
  3218. struct dm_target_spec *spec = (void*)host_dm + host_dm->data_start;
  3219. void *cur_data = argptr;
  3220. const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_spec) };
  3221. int spec_size = thunk_type_size(arg_type, 0);
  3222. int i;
  3223. for (i = 0; i < host_dm->target_count; i++) {
  3224. uint32_t next = spec->next;
  3225. int slen = strlen((char*)&spec[1]) + 1;
  3226. spec->next = (cur_data - argptr) + spec_size + slen;
  3227. if (guest_data_size < spec->next) {
  3228. host_dm->flags |= DM_BUFFER_FULL_FLAG;
  3229. break;
  3230. }
  3231. thunk_convert(cur_data, spec, arg_type, THUNK_TARGET);
  3232. strcpy(cur_data + spec_size, (char*)&spec[1]);
  3233. cur_data = argptr + spec->next;
  3234. spec = (void*)host_dm + host_dm->data_start + next;
  3235. }
  3236. break;
  3237. }
  3238. case DM_TABLE_DEPS:
  3239. {
  3240. void *hdata = (void*)host_dm + host_dm->data_start;
  3241. int count = *(uint32_t*)hdata;
  3242. uint64_t *hdev = hdata + 8;
  3243. uint64_t *gdev = argptr + 8;
  3244. int i;
  3245. *(uint32_t*)argptr = tswap32(count);
  3246. for (i = 0; i < count; i++) {
  3247. *gdev = tswap64(*hdev);
  3248. gdev++;
  3249. hdev++;
  3250. }
  3251. break;
  3252. }
  3253. case DM_LIST_VERSIONS:
  3254. {
  3255. struct dm_target_versions *vers = (void*)host_dm + host_dm->data_start;
  3256. uint32_t remaining_data = guest_data_size;
  3257. void *cur_data = argptr;
  3258. const argtype arg_type[] = { MK_STRUCT(STRUCT_dm_target_versions) };
  3259. int vers_size = thunk_type_size(arg_type, 0);
  3260. while (1) {
  3261. uint32_t next = vers->next;
  3262. if (next) {
  3263. vers->next = vers_size + (strlen(vers->name) + 1);
  3264. }
  3265. if (remaining_data < vers->next) {
  3266. host_dm->flags |= DM_BUFFER_FULL_FLAG;
  3267. break;
  3268. }
  3269. thunk_convert(cur_data, vers, arg_type, THUNK_TARGET);
  3270. strcpy(cur_data + vers_size, vers->name);
  3271. cur_data += vers->next;
  3272. remaining_data -= vers->next;
  3273. if (!next) {
  3274. break;
  3275. }
  3276. vers = (void*)vers + next;
  3277. }
  3278. break;
  3279. }
  3280. default:
  3281. ret = -TARGET_EINVAL;
  3282. goto out;
  3283. }
  3284. unlock_user(argptr, guest_data, guest_data_size);
  3285. argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
  3286. if (!argptr) {
  3287. ret = -TARGET_EFAULT;
  3288. goto out;
  3289. }
  3290. thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
  3291. unlock_user(argptr, arg, target_size);
  3292. }
  3293. out:
  3294. g_free(big_buf);
  3295. return ret;
  3296. }
  3297. static abi_long do_ioctl_blkpg(const IOCTLEntry *ie, uint8_t *buf_temp, int fd,
  3298. abi_long cmd, abi_long arg)
  3299. {
  3300. void *argptr;
  3301. int target_size;
  3302. const argtype *arg_type = ie->arg_type;
  3303. const argtype part_arg_type[] = { MK_STRUCT(STRUCT_blkpg_partition) };
  3304. abi_long ret;
  3305. struct blkpg_ioctl_arg *host_blkpg = (void*)buf_temp;
  3306. struct blkpg_partition host_part;
  3307. /* Read and convert blkpg */
  3308. arg_type++;
  3309. target_size = thunk_type_size(arg_type, 0);
  3310. argptr = lock_user(VERIFY_READ, arg, target_size, 1);
  3311. if (!argptr) {
  3312. ret = -TARGET_EFAULT;
  3313. goto out;
  3314. }
  3315. thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
  3316. unlock_user(argptr, arg, 0);
  3317. switch (host_blkpg->op) {
  3318. case BLKPG_ADD_PARTITION:
  3319. case BLKPG_DEL_PARTITION:
  3320. /* payload is struct blkpg_partition */
  3321. break;
  3322. default:
  3323. /* Unknown opcode */
  3324. ret = -TARGET_EINVAL;
  3325. goto out;
  3326. }
  3327. /* Read and convert blkpg->data */
  3328. arg = (abi_long)(uintptr_t)host_blkpg->data;
  3329. target_size = thunk_type_size(part_arg_type, 0);
  3330. argptr = lock_user(VERIFY_READ, arg, target_size, 1);
  3331. if (!argptr) {
  3332. ret = -TARGET_EFAULT;
  3333. goto out;
  3334. }
  3335. thunk_convert(&host_part, argptr, part_arg_type, THUNK_HOST);
  3336. unlock_user(argptr, arg, 0);
  3337. /* Swizzle the data pointer to our local copy and call! */
  3338. host_blkpg->data = &host_part;
  3339. ret = get_errno(ioctl(fd, ie->host_cmd, host_blkpg));
  3340. out:
  3341. return ret;
  3342. }
  3343. static abi_long do_ioctl_rt(const IOCTLEntry *ie, uint8_t *buf_temp,
  3344. int fd, abi_long cmd, abi_long arg)
  3345. {
  3346. const argtype *arg_type = ie->arg_type;
  3347. const StructEntry *se;
  3348. const argtype *field_types;
  3349. const int *dst_offsets, *src_offsets;
  3350. int target_size;
  3351. void *argptr;
  3352. abi_ulong *target_rt_dev_ptr;
  3353. unsigned long *host_rt_dev_ptr;
  3354. abi_long ret;
  3355. int i;
  3356. assert(ie->access == IOC_W);
  3357. assert(*arg_type == TYPE_PTR);
  3358. arg_type++;
  3359. assert(*arg_type == TYPE_STRUCT);
  3360. target_size = thunk_type_size(arg_type, 0);
  3361. argptr = lock_user(VERIFY_READ, arg, target_size, 1);
  3362. if (!argptr) {
  3363. return -TARGET_EFAULT;
  3364. }
  3365. arg_type++;
  3366. assert(*arg_type == (int)STRUCT_rtentry);
  3367. se = struct_entries + *arg_type++;
  3368. assert(se->convert[0] == NULL);
  3369. /* convert struct here to be able to catch rt_dev string */
  3370. field_types = se->field_types;
  3371. dst_offsets = se->field_offsets[THUNK_HOST];
  3372. src_offsets = se->field_offsets[THUNK_TARGET];
  3373. for (i = 0; i < se->nb_fields; i++) {
  3374. if (dst_offsets[i] == offsetof(struct rtentry, rt_dev)) {
  3375. assert(*field_types == TYPE_PTRVOID);
  3376. target_rt_dev_ptr = (abi_ulong *)(argptr + src_offsets[i]);
  3377. host_rt_dev_ptr = (unsigned long *)(buf_temp + dst_offsets[i]);
  3378. if (*target_rt_dev_ptr != 0) {
  3379. *host_rt_dev_ptr = (unsigned long)lock_user_string(
  3380. tswapal(*target_rt_dev_ptr));
  3381. if (!*host_rt_dev_ptr) {
  3382. unlock_user(argptr, arg, 0);
  3383. return -TARGET_EFAULT;
  3384. }
  3385. } else {
  3386. *host_rt_dev_ptr = 0;
  3387. }
  3388. field_types++;
  3389. continue;
  3390. }
  3391. field_types = thunk_convert(buf_temp + dst_offsets[i],
  3392. argptr + src_offsets[i],
  3393. field_types, THUNK_HOST);
  3394. }
  3395. unlock_user(argptr, arg, 0);
  3396. ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
  3397. if (*host_rt_dev_ptr != 0) {
  3398. unlock_user((void *)*host_rt_dev_ptr,
  3399. *target_rt_dev_ptr, 0);
  3400. }
  3401. return ret;
  3402. }
  3403. static abi_long do_ioctl_kdsigaccept(const IOCTLEntry *ie, uint8_t *buf_temp,
  3404. int fd, abi_long cmd, abi_long arg)
  3405. {
  3406. int sig = target_to_host_signal(arg);
  3407. return get_errno(ioctl(fd, ie->host_cmd, sig));
  3408. }
  3409. static IOCTLEntry ioctl_entries[] = {
  3410. #define IOCTL(cmd, access, ...) \
  3411. { TARGET_ ## cmd, cmd, #cmd, access, 0, { __VA_ARGS__ } },
  3412. #define IOCTL_SPECIAL(cmd, access, dofn, ...) \
  3413. { TARGET_ ## cmd, cmd, #cmd, access, dofn, { __VA_ARGS__ } },
  3414. #include "ioctls.h"
  3415. { 0, 0, },
  3416. };
  3417. /* ??? Implement proper locking for ioctls. */
  3418. /* do_ioctl() Must return target values and target errnos. */
  3419. static abi_long do_ioctl(int fd, abi_long cmd, abi_long arg)
  3420. {
  3421. const IOCTLEntry *ie;
  3422. const argtype *arg_type;
  3423. abi_long ret;
  3424. uint8_t buf_temp[MAX_STRUCT_SIZE];
  3425. int target_size;
  3426. void *argptr;
  3427. ie = ioctl_entries;
  3428. for(;;) {
  3429. if (ie->target_cmd == 0) {
  3430. gemu_log("Unsupported ioctl: cmd=0x%04lx\n", (long)cmd);
  3431. return -TARGET_ENOSYS;
  3432. }
  3433. if (ie->target_cmd == cmd)
  3434. break;
  3435. ie++;
  3436. }
  3437. arg_type = ie->arg_type;
  3438. #if defined(DEBUG)
  3439. gemu_log("ioctl: cmd=0x%04lx (%s)\n", (long)cmd, ie->name);
  3440. #endif
  3441. if (ie->do_ioctl) {
  3442. return ie->do_ioctl(ie, buf_temp, fd, cmd, arg);
  3443. }
  3444. switch(arg_type[0]) {
  3445. case TYPE_NULL:
  3446. /* no argument */
  3447. ret = get_errno(ioctl(fd, ie->host_cmd));
  3448. break;
  3449. case TYPE_PTRVOID:
  3450. case TYPE_INT:
  3451. /* int argment */
  3452. ret = get_errno(ioctl(fd, ie->host_cmd, arg));
  3453. break;
  3454. case TYPE_PTR:
  3455. arg_type++;
  3456. target_size = thunk_type_size(arg_type, 0);
  3457. switch(ie->access) {
  3458. case IOC_R:
  3459. ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
  3460. if (!is_error(ret)) {
  3461. argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
  3462. if (!argptr)
  3463. return -TARGET_EFAULT;
  3464. thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
  3465. unlock_user(argptr, arg, target_size);
  3466. }
  3467. break;
  3468. case IOC_W:
  3469. argptr = lock_user(VERIFY_READ, arg, target_size, 1);
  3470. if (!argptr)
  3471. return -TARGET_EFAULT;
  3472. thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
  3473. unlock_user(argptr, arg, 0);
  3474. ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
  3475. break;
  3476. default:
  3477. case IOC_RW:
  3478. argptr = lock_user(VERIFY_READ, arg, target_size, 1);
  3479. if (!argptr)
  3480. return -TARGET_EFAULT;
  3481. thunk_convert(buf_temp, argptr, arg_type, THUNK_HOST);
  3482. unlock_user(argptr, arg, 0);
  3483. ret = get_errno(ioctl(fd, ie->host_cmd, buf_temp));
  3484. if (!is_error(ret)) {
  3485. argptr = lock_user(VERIFY_WRITE, arg, target_size, 0);
  3486. if (!argptr)
  3487. return -TARGET_EFAULT;
  3488. thunk_convert(argptr, buf_temp, arg_type, THUNK_TARGET);
  3489. unlock_user(argptr, arg, target_size);
  3490. }
  3491. break;
  3492. }
  3493. break;
  3494. default:
  3495. gemu_log("Unsupported ioctl type: cmd=0x%04lx type=%d\n",
  3496. (long)cmd, arg_type[0]);
  3497. ret = -TARGET_ENOSYS;
  3498. break;
  3499. }
  3500. return ret;
  3501. }
  3502. static const bitmask_transtbl iflag_tbl[] = {
  3503. { TARGET_IGNBRK, TARGET_IGNBRK, IGNBRK, IGNBRK },
  3504. { TARGET_BRKINT, TARGET_BRKINT, BRKINT, BRKINT },
  3505. { TARGET_IGNPAR, TARGET_IGNPAR, IGNPAR, IGNPAR },
  3506. { TARGET_PARMRK, TARGET_PARMRK, PARMRK, PARMRK },
  3507. { TARGET_INPCK, TARGET_INPCK, INPCK, INPCK },
  3508. { TARGET_ISTRIP, TARGET_ISTRIP, ISTRIP, ISTRIP },
  3509. { TARGET_INLCR, TARGET_INLCR, INLCR, INLCR },
  3510. { TARGET_IGNCR, TARGET_IGNCR, IGNCR, IGNCR },
  3511. { TARGET_ICRNL, TARGET_ICRNL, ICRNL, ICRNL },
  3512. { TARGET_IUCLC, TARGET_IUCLC, IUCLC, IUCLC },
  3513. { TARGET_IXON, TARGET_IXON, IXON, IXON },
  3514. { TARGET_IXANY, TARGET_IXANY, IXANY, IXANY },
  3515. { TARGET_IXOFF, TARGET_IXOFF, IXOFF, IXOFF },
  3516. { TARGET_IMAXBEL, TARGET_IMAXBEL, IMAXBEL, IMAXBEL },
  3517. { 0, 0, 0, 0 }
  3518. };
  3519. static const bitmask_transtbl oflag_tbl[] = {
  3520. { TARGET_OPOST, TARGET_OPOST, OPOST, OPOST },
  3521. { TARGET_OLCUC, TARGET_OLCUC, OLCUC, OLCUC },
  3522. { TARGET_ONLCR, TARGET_ONLCR, ONLCR, ONLCR },
  3523. { TARGET_OCRNL, TARGET_OCRNL, OCRNL, OCRNL },
  3524. { TARGET_ONOCR, TARGET_ONOCR, ONOCR, ONOCR },
  3525. { TARGET_ONLRET, TARGET_ONLRET, ONLRET, ONLRET },
  3526. { TARGET_OFILL, TARGET_OFILL, OFILL, OFILL },
  3527. { TARGET_OFDEL, TARGET_OFDEL, OFDEL, OFDEL },
  3528. { TARGET_NLDLY, TARGET_NL0, NLDLY, NL0 },
  3529. { TARGET_NLDLY, TARGET_NL1, NLDLY, NL1 },
  3530. { TARGET_CRDLY, TARGET_CR0, CRDLY, CR0 },
  3531. { TARGET_CRDLY, TARGET_CR1, CRDLY, CR1 },
  3532. { TARGET_CRDLY, TARGET_CR2, CRDLY, CR2 },
  3533. { TARGET_CRDLY, TARGET_CR3, CRDLY, CR3 },
  3534. { TARGET_TABDLY, TARGET_TAB0, TABDLY, TAB0 },
  3535. { TARGET_TABDLY, TARGET_TAB1, TABDLY, TAB1 },
  3536. { TARGET_TABDLY, TARGET_TAB2, TABDLY, TAB2 },
  3537. { TARGET_TABDLY, TARGET_TAB3, TABDLY, TAB3 },
  3538. { TARGET_BSDLY, TARGET_BS0, BSDLY, BS0 },
  3539. { TARGET_BSDLY, TARGET_BS1, BSDLY, BS1 },
  3540. { TARGET_VTDLY, TARGET_VT0, VTDLY, VT0 },
  3541. { TARGET_VTDLY, TARGET_VT1, VTDLY, VT1 },
  3542. { TARGET_FFDLY, TARGET_FF0, FFDLY, FF0 },
  3543. { TARGET_FFDLY, TARGET_FF1, FFDLY, FF1 },
  3544. { 0, 0, 0, 0 }
  3545. };
  3546. static const bitmask_transtbl cflag_tbl[] = {
  3547. { TARGET_CBAUD, TARGET_B0, CBAUD, B0 },
  3548. { TARGET_CBAUD, TARGET_B50, CBAUD, B50 },
  3549. { TARGET_CBAUD, TARGET_B75, CBAUD, B75 },
  3550. { TARGET_CBAUD, TARGET_B110, CBAUD, B110 },
  3551. { TARGET_CBAUD, TARGET_B134, CBAUD, B134 },
  3552. { TARGET_CBAUD, TARGET_B150, CBAUD, B150 },
  3553. { TARGET_CBAUD, TARGET_B200, CBAUD, B200 },
  3554. { TARGET_CBAUD, TARGET_B300, CBAUD, B300 },
  3555. { TARGET_CBAUD, TARGET_B600, CBAUD, B600 },
  3556. { TARGET_CBAUD, TARGET_B1200, CBAUD, B1200 },
  3557. { TARGET_CBAUD, TARGET_B1800, CBAUD, B1800 },
  3558. { TARGET_CBAUD, TARGET_B2400, CBAUD, B2400 },
  3559. { TARGET_CBAUD, TARGET_B4800, CBAUD, B4800 },
  3560. { TARGET_CBAUD, TARGET_B9600, CBAUD, B9600 },
  3561. { TARGET_CBAUD, TARGET_B19200, CBAUD, B19200 },
  3562. { TARGET_CBAUD, TARGET_B38400, CBAUD, B38400 },
  3563. { TARGET_CBAUD, TARGET_B57600, CBAUD, B57600 },
  3564. { TARGET_CBAUD, TARGET_B115200, CBAUD, B115200 },
  3565. { TARGET_CBAUD, TARGET_B230400, CBAUD, B230400 },
  3566. { TARGET_CBAUD, TARGET_B460800, CBAUD, B460800 },
  3567. { TARGET_CSIZE, TARGET_CS5, CSIZE, CS5 },
  3568. { TARGET_CSIZE, TARGET_CS6, CSIZE, CS6 },
  3569. { TARGET_CSIZE, TARGET_CS7, CSIZE, CS7 },
  3570. { TARGET_CSIZE, TARGET_CS8, CSIZE, CS8 },
  3571. { TARGET_CSTOPB, TARGET_CSTOPB, CSTOPB, CSTOPB },
  3572. { TARGET_CREAD, TARGET_CREAD, CREAD, CREAD },
  3573. { TARGET_PARENB, TARGET_PARENB, PARENB, PARENB },
  3574. { TARGET_PARODD, TARGET_PARODD, PARODD, PARODD },
  3575. { TARGET_HUPCL, TARGET_HUPCL, HUPCL, HUPCL },
  3576. { TARGET_CLOCAL, TARGET_CLOCAL, CLOCAL, CLOCAL },
  3577. { TARGET_CRTSCTS, TARGET_CRTSCTS, CRTSCTS, CRTSCTS },
  3578. { 0, 0, 0, 0 }
  3579. };
  3580. static const bitmask_transtbl lflag_tbl[] = {
  3581. { TARGET_ISIG, TARGET_ISIG, ISIG, ISIG },
  3582. { TARGET_ICANON, TARGET_ICANON, ICANON, ICANON },
  3583. { TARGET_XCASE, TARGET_XCASE, XCASE, XCASE },
  3584. { TARGET_ECHO, TARGET_ECHO, ECHO, ECHO },
  3585. { TARGET_ECHOE, TARGET_ECHOE, ECHOE, ECHOE },
  3586. { TARGET_ECHOK, TARGET_ECHOK, ECHOK, ECHOK },
  3587. { TARGET_ECHONL, TARGET_ECHONL, ECHONL, ECHONL },
  3588. { TARGET_NOFLSH, TARGET_NOFLSH, NOFLSH, NOFLSH },
  3589. { TARGET_TOSTOP, TARGET_TOSTOP, TOSTOP, TOSTOP },
  3590. { TARGET_ECHOCTL, TARGET_ECHOCTL, ECHOCTL, ECHOCTL },
  3591. { TARGET_ECHOPRT, TARGET_ECHOPRT, ECHOPRT, ECHOPRT },
  3592. { TARGET_ECHOKE, TARGET_ECHOKE, ECHOKE, ECHOKE },
  3593. { TARGET_FLUSHO, TARGET_FLUSHO, FLUSHO, FLUSHO },
  3594. { TARGET_PENDIN, TARGET_PENDIN, PENDIN, PENDIN },
  3595. { TARGET_IEXTEN, TARGET_IEXTEN, IEXTEN, IEXTEN },
  3596. { 0, 0, 0, 0 }
  3597. };
  3598. static void target_to_host_termios (void *dst, const void *src)
  3599. {
  3600. struct host_termios *host = dst;
  3601. const struct target_termios *target = src;
  3602. host->c_iflag =
  3603. target_to_host_bitmask(tswap32(target->c_iflag), iflag_tbl);
  3604. host->c_oflag =
  3605. target_to_host_bitmask(tswap32(target->c_oflag), oflag_tbl);
  3606. host->c_cflag =
  3607. target_to_host_bitmask(tswap32(target->c_cflag), cflag_tbl);
  3608. host->c_lflag =
  3609. target_to_host_bitmask(tswap32(target->c_lflag), lflag_tbl);
  3610. host->c_line = target->c_line;
  3611. memset(host->c_cc, 0, sizeof(host->c_cc));
  3612. host->c_cc[VINTR] = target->c_cc[TARGET_VINTR];
  3613. host->c_cc[VQUIT] = target->c_cc[TARGET_VQUIT];
  3614. host->c_cc[VERASE] = target->c_cc[TARGET_VERASE];
  3615. host->c_cc[VKILL] = target->c_cc[TARGET_VKILL];
  3616. host->c_cc[VEOF] = target->c_cc[TARGET_VEOF];
  3617. host->c_cc[VTIME] = target->c_cc[TARGET_VTIME];
  3618. host->c_cc[VMIN] = target->c_cc[TARGET_VMIN];
  3619. host->c_cc[VSWTC] = target->c_cc[TARGET_VSWTC];
  3620. host->c_cc[VSTART] = target->c_cc[TARGET_VSTART];
  3621. host->c_cc[VSTOP] = target->c_cc[TARGET_VSTOP];
  3622. host->c_cc[VSUSP] = target->c_cc[TARGET_VSUSP];
  3623. host->c_cc[VEOL] = target->c_cc[TARGET_VEOL];
  3624. host->c_cc[VREPRINT] = target->c_cc[TARGET_VREPRINT];
  3625. host->c_cc[VDISCARD] = target->c_cc[TARGET_VDISCARD];
  3626. host->c_cc[VWERASE] = target->c_cc[TARGET_VWERASE];
  3627. host->c_cc[VLNEXT] = target->c_cc[TARGET_VLNEXT];
  3628. host->c_cc[VEOL2] = target->c_cc[TARGET_VEOL2];
  3629. }
  3630. static void host_to_target_termios (void *dst, const void *src)
  3631. {
  3632. struct target_termios *target = dst;
  3633. const struct host_termios *host = src;
  3634. target->c_iflag =
  3635. tswap32(host_to_target_bitmask(host->c_iflag, iflag_tbl));
  3636. target->c_oflag =
  3637. tswap32(host_to_target_bitmask(host->c_oflag, oflag_tbl));
  3638. target->c_cflag =
  3639. tswap32(host_to_target_bitmask(host->c_cflag, cflag_tbl));
  3640. target->c_lflag =
  3641. tswap32(host_to_target_bitmask(host->c_lflag, lflag_tbl));
  3642. target->c_line = host->c_line;
  3643. memset(target->c_cc, 0, sizeof(target->c_cc));
  3644. target->c_cc[TARGET_VINTR] = host->c_cc[VINTR];
  3645. target->c_cc[TARGET_VQUIT] = host->c_cc[VQUIT];
  3646. target->c_cc[TARGET_VERASE] = host->c_cc[VERASE];
  3647. target->c_cc[TARGET_VKILL] = host->c_cc[VKILL];
  3648. target->c_cc[TARGET_VEOF] = host->c_cc[VEOF];
  3649. target->c_cc[TARGET_VTIME] = host->c_cc[VTIME];
  3650. target->c_cc[TARGET_VMIN] = host->c_cc[VMIN];
  3651. target->c_cc[TARGET_VSWTC] = host->c_cc[VSWTC];
  3652. target->c_cc[TARGET_VSTART] = host->c_cc[VSTART];
  3653. target->c_cc[TARGET_VSTOP] = host->c_cc[VSTOP];
  3654. target->c_cc[TARGET_VSUSP] = host->c_cc[VSUSP];
  3655. target->c_cc[TARGET_VEOL] = host->c_cc[VEOL];
  3656. target->c_cc[TARGET_VREPRINT] = host->c_cc[VREPRINT];
  3657. target->c_cc[TARGET_VDISCARD] = host->c_cc[VDISCARD];
  3658. target->c_cc[TARGET_VWERASE] = host->c_cc[VWERASE];
  3659. target->c_cc[TARGET_VLNEXT] = host->c_cc[VLNEXT];
  3660. target->c_cc[TARGET_VEOL2] = host->c_cc[VEOL2];
  3661. }
  3662. static const StructEntry struct_termios_def = {
  3663. .convert = { host_to_target_termios, target_to_host_termios },
  3664. .size = { sizeof(struct target_termios), sizeof(struct host_termios) },
  3665. .align = { __alignof__(struct target_termios), __alignof__(struct host_termios) },
  3666. };
  3667. static bitmask_transtbl mmap_flags_tbl[] = {
  3668. { TARGET_MAP_SHARED, TARGET_MAP_SHARED, MAP_SHARED, MAP_SHARED },
  3669. { TARGET_MAP_PRIVATE, TARGET_MAP_PRIVATE, MAP_PRIVATE, MAP_PRIVATE },
  3670. { TARGET_MAP_FIXED, TARGET_MAP_FIXED, MAP_FIXED, MAP_FIXED },
  3671. { TARGET_MAP_ANONYMOUS, TARGET_MAP_ANONYMOUS, MAP_ANONYMOUS, MAP_ANONYMOUS },
  3672. { TARGET_MAP_GROWSDOWN, TARGET_MAP_GROWSDOWN, MAP_GROWSDOWN, MAP_GROWSDOWN },
  3673. { TARGET_MAP_DENYWRITE, TARGET_MAP_DENYWRITE, MAP_DENYWRITE, MAP_DENYWRITE },
  3674. { TARGET_MAP_EXECUTABLE, TARGET_MAP_EXECUTABLE, MAP_EXECUTABLE, MAP_EXECUTABLE },
  3675. { TARGET_MAP_LOCKED, TARGET_MAP_LOCKED, MAP_LOCKED, MAP_LOCKED },
  3676. { TARGET_MAP_NORESERVE, TARGET_MAP_NORESERVE, MAP_NORESERVE,
  3677. MAP_NORESERVE },
  3678. { 0, 0, 0, 0 }
  3679. };
  3680. #if defined(TARGET_I386)
  3681. /* NOTE: there is really one LDT for all the threads */
  3682. static uint8_t *ldt_table;
  3683. static abi_long read_ldt(abi_ulong ptr, unsigned long bytecount)
  3684. {
  3685. int size;
  3686. void *p;
  3687. if (!ldt_table)
  3688. return 0;
  3689. size = TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE;
  3690. if (size > bytecount)
  3691. size = bytecount;
  3692. p = lock_user(VERIFY_WRITE, ptr, size, 0);
  3693. if (!p)
  3694. return -TARGET_EFAULT;
  3695. /* ??? Should this by byteswapped? */
  3696. memcpy(p, ldt_table, size);
  3697. unlock_user(p, ptr, size);
  3698. return size;
  3699. }
  3700. /* XXX: add locking support */
  3701. static abi_long write_ldt(CPUX86State *env,
  3702. abi_ulong ptr, unsigned long bytecount, int oldmode)
  3703. {
  3704. struct target_modify_ldt_ldt_s ldt_info;
  3705. struct target_modify_ldt_ldt_s *target_ldt_info;
  3706. int seg_32bit, contents, read_exec_only, limit_in_pages;
  3707. int seg_not_present, useable, lm;
  3708. uint32_t *lp, entry_1, entry_2;
  3709. if (bytecount != sizeof(ldt_info))
  3710. return -TARGET_EINVAL;
  3711. if (!lock_user_struct(VERIFY_READ, target_ldt_info, ptr, 1))
  3712. return -TARGET_EFAULT;
  3713. ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
  3714. ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
  3715. ldt_info.limit = tswap32(target_ldt_info->limit);
  3716. ldt_info.flags = tswap32(target_ldt_info->flags);
  3717. unlock_user_struct(target_ldt_info, ptr, 0);
  3718. if (ldt_info.entry_number >= TARGET_LDT_ENTRIES)
  3719. return -TARGET_EINVAL;
  3720. seg_32bit = ldt_info.flags & 1;
  3721. contents = (ldt_info.flags >> 1) & 3;
  3722. read_exec_only = (ldt_info.flags >> 3) & 1;
  3723. limit_in_pages = (ldt_info.flags >> 4) & 1;
  3724. seg_not_present = (ldt_info.flags >> 5) & 1;
  3725. useable = (ldt_info.flags >> 6) & 1;
  3726. #ifdef TARGET_ABI32
  3727. lm = 0;
  3728. #else
  3729. lm = (ldt_info.flags >> 7) & 1;
  3730. #endif
  3731. if (contents == 3) {
  3732. if (oldmode)
  3733. return -TARGET_EINVAL;
  3734. if (seg_not_present == 0)
  3735. return -TARGET_EINVAL;
  3736. }
  3737. /* allocate the LDT */
  3738. if (!ldt_table) {
  3739. env->ldt.base = target_mmap(0,
  3740. TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE,
  3741. PROT_READ|PROT_WRITE,
  3742. MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
  3743. if (env->ldt.base == -1)
  3744. return -TARGET_ENOMEM;
  3745. memset(g2h(env->ldt.base), 0,
  3746. TARGET_LDT_ENTRIES * TARGET_LDT_ENTRY_SIZE);
  3747. env->ldt.limit = 0xffff;
  3748. ldt_table = g2h(env->ldt.base);
  3749. }
  3750. /* NOTE: same code as Linux kernel */
  3751. /* Allow LDTs to be cleared by the user. */
  3752. if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
  3753. if (oldmode ||
  3754. (contents == 0 &&
  3755. read_exec_only == 1 &&
  3756. seg_32bit == 0 &&
  3757. limit_in_pages == 0 &&
  3758. seg_not_present == 1 &&
  3759. useable == 0 )) {
  3760. entry_1 = 0;
  3761. entry_2 = 0;
  3762. goto install;
  3763. }
  3764. }
  3765. entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
  3766. (ldt_info.limit & 0x0ffff);
  3767. entry_2 = (ldt_info.base_addr & 0xff000000) |
  3768. ((ldt_info.base_addr & 0x00ff0000) >> 16) |
  3769. (ldt_info.limit & 0xf0000) |
  3770. ((read_exec_only ^ 1) << 9) |
  3771. (contents << 10) |
  3772. ((seg_not_present ^ 1) << 15) |
  3773. (seg_32bit << 22) |
  3774. (limit_in_pages << 23) |
  3775. (lm << 21) |
  3776. 0x7000;
  3777. if (!oldmode)
  3778. entry_2 |= (useable << 20);
  3779. /* Install the new entry ... */
  3780. install:
  3781. lp = (uint32_t *)(ldt_table + (ldt_info.entry_number << 3));
  3782. lp[0] = tswap32(entry_1);
  3783. lp[1] = tswap32(entry_2);
  3784. return 0;
  3785. }
  3786. /* specific and weird i386 syscalls */
  3787. static abi_long do_modify_ldt(CPUX86State *env, int func, abi_ulong ptr,
  3788. unsigned long bytecount)
  3789. {
  3790. abi_long ret;
  3791. switch (func) {
  3792. case 0:
  3793. ret = read_ldt(ptr, bytecount);
  3794. break;
  3795. case 1:
  3796. ret = write_ldt(env, ptr, bytecount, 1);
  3797. break;
  3798. case 0x11:
  3799. ret = write_ldt(env, ptr, bytecount, 0);
  3800. break;
  3801. default:
  3802. ret = -TARGET_ENOSYS;
  3803. break;
  3804. }
  3805. return ret;
  3806. }
  3807. #if defined(TARGET_I386) && defined(TARGET_ABI32)
  3808. abi_long do_set_thread_area(CPUX86State *env, abi_ulong ptr)
  3809. {
  3810. uint64_t *gdt_table = g2h(env->gdt.base);
  3811. struct target_modify_ldt_ldt_s ldt_info;
  3812. struct target_modify_ldt_ldt_s *target_ldt_info;
  3813. int seg_32bit, contents, read_exec_only, limit_in_pages;
  3814. int seg_not_present, useable, lm;
  3815. uint32_t *lp, entry_1, entry_2;
  3816. int i;
  3817. lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
  3818. if (!target_ldt_info)
  3819. return -TARGET_EFAULT;
  3820. ldt_info.entry_number = tswap32(target_ldt_info->entry_number);
  3821. ldt_info.base_addr = tswapal(target_ldt_info->base_addr);
  3822. ldt_info.limit = tswap32(target_ldt_info->limit);
  3823. ldt_info.flags = tswap32(target_ldt_info->flags);
  3824. if (ldt_info.entry_number == -1) {
  3825. for (i=TARGET_GDT_ENTRY_TLS_MIN; i<=TARGET_GDT_ENTRY_TLS_MAX; i++) {
  3826. if (gdt_table[i] == 0) {
  3827. ldt_info.entry_number = i;
  3828. target_ldt_info->entry_number = tswap32(i);
  3829. break;
  3830. }
  3831. }
  3832. }
  3833. unlock_user_struct(target_ldt_info, ptr, 1);
  3834. if (ldt_info.entry_number < TARGET_GDT_ENTRY_TLS_MIN ||
  3835. ldt_info.entry_number > TARGET_GDT_ENTRY_TLS_MAX)
  3836. return -TARGET_EINVAL;
  3837. seg_32bit = ldt_info.flags & 1;
  3838. contents = (ldt_info.flags >> 1) & 3;
  3839. read_exec_only = (ldt_info.flags >> 3) & 1;
  3840. limit_in_pages = (ldt_info.flags >> 4) & 1;
  3841. seg_not_present = (ldt_info.flags >> 5) & 1;
  3842. useable = (ldt_info.flags >> 6) & 1;
  3843. #ifdef TARGET_ABI32
  3844. lm = 0;
  3845. #else
  3846. lm = (ldt_info.flags >> 7) & 1;
  3847. #endif
  3848. if (contents == 3) {
  3849. if (seg_not_present == 0)
  3850. return -TARGET_EINVAL;
  3851. }
  3852. /* NOTE: same code as Linux kernel */
  3853. /* Allow LDTs to be cleared by the user. */
  3854. if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
  3855. if ((contents == 0 &&
  3856. read_exec_only == 1 &&
  3857. seg_32bit == 0 &&
  3858. limit_in_pages == 0 &&
  3859. seg_not_present == 1 &&
  3860. useable == 0 )) {
  3861. entry_1 = 0;
  3862. entry_2 = 0;
  3863. goto install;
  3864. }
  3865. }
  3866. entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
  3867. (ldt_info.limit & 0x0ffff);
  3868. entry_2 = (ldt_info.base_addr & 0xff000000) |
  3869. ((ldt_info.base_addr & 0x00ff0000) >> 16) |
  3870. (ldt_info.limit & 0xf0000) |
  3871. ((read_exec_only ^ 1) << 9) |
  3872. (contents << 10) |
  3873. ((seg_not_present ^ 1) << 15) |
  3874. (seg_32bit << 22) |
  3875. (limit_in_pages << 23) |
  3876. (useable << 20) |
  3877. (lm << 21) |
  3878. 0x7000;
  3879. /* Install the new entry ... */
  3880. install:
  3881. lp = (uint32_t *)(gdt_table + ldt_info.entry_number);
  3882. lp[0] = tswap32(entry_1);
  3883. lp[1] = tswap32(entry_2);
  3884. return 0;
  3885. }
  3886. static abi_long do_get_thread_area(CPUX86State *env, abi_ulong ptr)
  3887. {
  3888. struct target_modify_ldt_ldt_s *target_ldt_info;
  3889. uint64_t *gdt_table = g2h(env->gdt.base);
  3890. uint32_t base_addr, limit, flags;
  3891. int seg_32bit, contents, read_exec_only, limit_in_pages, idx;
  3892. int seg_not_present, useable, lm;
  3893. uint32_t *lp, entry_1, entry_2;
  3894. lock_user_struct(VERIFY_WRITE, target_ldt_info, ptr, 1);
  3895. if (!target_ldt_info)
  3896. return -TARGET_EFAULT;
  3897. idx = tswap32(target_ldt_info->entry_number);
  3898. if (idx < TARGET_GDT_ENTRY_TLS_MIN ||
  3899. idx > TARGET_GDT_ENTRY_TLS_MAX) {
  3900. unlock_user_struct(target_ldt_info, ptr, 1);
  3901. return -TARGET_EINVAL;
  3902. }
  3903. lp = (uint32_t *)(gdt_table + idx);
  3904. entry_1 = tswap32(lp[0]);
  3905. entry_2 = tswap32(lp[1]);
  3906. read_exec_only = ((entry_2 >> 9) & 1) ^ 1;
  3907. contents = (entry_2 >> 10) & 3;
  3908. seg_not_present = ((entry_2 >> 15) & 1) ^ 1;
  3909. seg_32bit = (entry_2 >> 22) & 1;
  3910. limit_in_pages = (entry_2 >> 23) & 1;
  3911. useable = (entry_2 >> 20) & 1;
  3912. #ifdef TARGET_ABI32
  3913. lm = 0;
  3914. #else
  3915. lm = (entry_2 >> 21) & 1;
  3916. #endif
  3917. flags = (seg_32bit << 0) | (contents << 1) |
  3918. (read_exec_only << 3) | (limit_in_pages << 4) |
  3919. (seg_not_present << 5) | (useable << 6) | (lm << 7);
  3920. limit = (entry_1 & 0xffff) | (entry_2 & 0xf0000);
  3921. base_addr = (entry_1 >> 16) |
  3922. (entry_2 & 0xff000000) |
  3923. ((entry_2 & 0xff) << 16);
  3924. target_ldt_info->base_addr = tswapal(base_addr);
  3925. target_ldt_info->limit = tswap32(limit);
  3926. target_ldt_info->flags = tswap32(flags);
  3927. unlock_user_struct(target_ldt_info, ptr, 1);
  3928. return 0;
  3929. }
  3930. #endif /* TARGET_I386 && TARGET_ABI32 */
  3931. #ifndef TARGET_ABI32
  3932. abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
  3933. {
  3934. abi_long ret = 0;
  3935. abi_ulong val;
  3936. int idx;
  3937. switch(code) {
  3938. case TARGET_ARCH_SET_GS:
  3939. case TARGET_ARCH_SET_FS:
  3940. if (code == TARGET_ARCH_SET_GS)
  3941. idx = R_GS;
  3942. else
  3943. idx = R_FS;
  3944. cpu_x86_load_seg(env, idx, 0);
  3945. env->segs[idx].base = addr;
  3946. break;
  3947. case TARGET_ARCH_GET_GS:
  3948. case TARGET_ARCH_GET_FS:
  3949. if (code == TARGET_ARCH_GET_GS)
  3950. idx = R_GS;
  3951. else
  3952. idx = R_FS;
  3953. val = env->segs[idx].base;
  3954. if (put_user(val, addr, abi_ulong))
  3955. ret = -TARGET_EFAULT;
  3956. break;
  3957. default:
  3958. ret = -TARGET_EINVAL;
  3959. break;
  3960. }
  3961. return ret;
  3962. }
  3963. #endif
  3964. #endif /* defined(TARGET_I386) */
  3965. #define NEW_STACK_SIZE 0x40000
  3966. static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER;
  3967. typedef struct {
  3968. CPUArchState *env;
  3969. pthread_mutex_t mutex;
  3970. pthread_cond_t cond;
  3971. pthread_t thread;
  3972. uint32_t tid;
  3973. abi_ulong child_tidptr;
  3974. abi_ulong parent_tidptr;
  3975. sigset_t sigmask;
  3976. } new_thread_info;
  3977. static void *clone_func(void *arg)
  3978. {
  3979. new_thread_info *info = arg;
  3980. CPUArchState *env;
  3981. CPUState *cpu;
  3982. TaskState *ts;
  3983. env = info->env;
  3984. cpu = ENV_GET_CPU(env);
  3985. thread_cpu = cpu;
  3986. ts = (TaskState *)cpu->opaque;
  3987. info->tid = gettid();
  3988. cpu->host_tid = info->tid;
  3989. task_settid(ts);
  3990. if (info->child_tidptr)
  3991. put_user_u32(info->tid, info->child_tidptr);
  3992. if (info->parent_tidptr)
  3993. put_user_u32(info->tid, info->parent_tidptr);
  3994. /* Enable signals. */
  3995. sigprocmask(SIG_SETMASK, &info->sigmask, NULL);
  3996. /* Signal to the parent that we're ready. */
  3997. pthread_mutex_lock(&info->mutex);
  3998. pthread_cond_broadcast(&info->cond);
  3999. pthread_mutex_unlock(&info->mutex);
  4000. /* Wait until the parent has finshed initializing the tls state. */
  4001. pthread_mutex_lock(&clone_lock);
  4002. pthread_mutex_unlock(&clone_lock);
  4003. cpu_loop(env);
  4004. /* never exits */
  4005. return NULL;
  4006. }
  4007. /* do_fork() Must return host values and target errnos (unlike most
  4008. do_*() functions). */
  4009. static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp,
  4010. abi_ulong parent_tidptr, target_ulong newtls,
  4011. abi_ulong child_tidptr)
  4012. {
  4013. CPUState *cpu = ENV_GET_CPU(env);
  4014. int ret;
  4015. TaskState *ts;
  4016. CPUState *new_cpu;
  4017. CPUArchState *new_env;
  4018. unsigned int nptl_flags;
  4019. sigset_t sigmask;
  4020. /* Emulate vfork() with fork() */
  4021. if (flags & CLONE_VFORK)
  4022. flags &= ~(CLONE_VFORK | CLONE_VM);
  4023. if (flags & CLONE_VM) {
  4024. TaskState *parent_ts = (TaskState *)cpu->opaque;
  4025. new_thread_info info;
  4026. pthread_attr_t attr;
  4027. ts = g_malloc0(sizeof(TaskState));
  4028. init_task_state(ts);
  4029. /* we create a new CPU instance. */
  4030. new_env = cpu_copy(env);
  4031. /* Init regs that differ from the parent. */
  4032. cpu_clone_regs(new_env, newsp);
  4033. new_cpu = ENV_GET_CPU(new_env);
  4034. new_cpu->opaque = ts;
  4035. ts->bprm = parent_ts->bprm;
  4036. ts->info = parent_ts->info;
  4037. nptl_flags = flags;
  4038. flags &= ~CLONE_NPTL_FLAGS2;
  4039. if (nptl_flags & CLONE_CHILD_CLEARTID) {
  4040. ts->child_tidptr = child_tidptr;
  4041. }
  4042. if (nptl_flags & CLONE_SETTLS)
  4043. cpu_set_tls (new_env, newtls);
  4044. /* Grab a mutex so that thread setup appears atomic. */
  4045. pthread_mutex_lock(&clone_lock);
  4046. memset(&info, 0, sizeof(info));
  4047. pthread_mutex_init(&info.mutex, NULL);
  4048. pthread_mutex_lock(&info.mutex);
  4049. pthread_cond_init(&info.cond, NULL);
  4050. info.env = new_env;
  4051. if (nptl_flags & CLONE_CHILD_SETTID)
  4052. info.child_tidptr = child_tidptr;
  4053. if (nptl_flags & CLONE_PARENT_SETTID)
  4054. info.parent_tidptr = parent_tidptr;
  4055. ret = pthread_attr_init(&attr);
  4056. ret = pthread_attr_setstacksize(&attr, NEW_STACK_SIZE);
  4057. ret = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
  4058. /* It is not safe to deliver signals until the child has finished
  4059. initializing, so temporarily block all signals. */
  4060. sigfillset(&sigmask);
  4061. sigprocmask(SIG_BLOCK, &sigmask, &info.sigmask);
  4062. ret = pthread_create(&info.thread, &attr, clone_func, &info);
  4063. /* TODO: Free new CPU state if thread creation failed. */
  4064. sigprocmask(SIG_SETMASK, &info.sigmask, NULL);
  4065. pthread_attr_destroy(&attr);
  4066. if (ret == 0) {
  4067. /* Wait for the child to initialize. */
  4068. pthread_cond_wait(&info.cond, &info.mutex);
  4069. ret = info.tid;
  4070. if (flags & CLONE_PARENT_SETTID)
  4071. put_user_u32(ret, parent_tidptr);
  4072. } else {
  4073. ret = -1;
  4074. }
  4075. pthread_mutex_unlock(&info.mutex);
  4076. pthread_cond_destroy(&info.cond);
  4077. pthread_mutex_destroy(&info.mutex);
  4078. pthread_mutex_unlock(&clone_lock);
  4079. } else {
  4080. /* if no CLONE_VM, we consider it is a fork */
  4081. if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0)
  4082. return -EINVAL;
  4083. fork_start();
  4084. ret = fork();
  4085. if (ret == 0) {
  4086. /* Child Process. */
  4087. cpu_clone_regs(env, newsp);
  4088. fork_end(1);
  4089. /* There is a race condition here. The parent process could
  4090. theoretically read the TID in the child process before the child
  4091. tid is set. This would require using either ptrace
  4092. (not implemented) or having *_tidptr to point at a shared memory
  4093. mapping. We can't repeat the spinlock hack used above because
  4094. the child process gets its own copy of the lock. */
  4095. if (flags & CLONE_CHILD_SETTID)
  4096. put_user_u32(gettid(), child_tidptr);
  4097. if (flags & CLONE_PARENT_SETTID)
  4098. put_user_u32(gettid(), parent_tidptr);
  4099. ts = (TaskState *)cpu->opaque;
  4100. if (flags & CLONE_SETTLS)
  4101. cpu_set_tls (env, newtls);
  4102. if (flags & CLONE_CHILD_CLEARTID)
  4103. ts->child_tidptr = child_tidptr;
  4104. } else {
  4105. fork_end(0);
  4106. }
  4107. }
  4108. return ret;
  4109. }
  4110. /* warning : doesn't handle linux specific flags... */
  4111. static int target_to_host_fcntl_cmd(int cmd)
  4112. {
  4113. switch(cmd) {
  4114. case TARGET_F_DUPFD:
  4115. case TARGET_F_GETFD:
  4116. case TARGET_F_SETFD:
  4117. case TARGET_F_GETFL:
  4118. case TARGET_F_SETFL:
  4119. return cmd;
  4120. case TARGET_F_GETLK:
  4121. return F_GETLK;
  4122. case TARGET_F_SETLK:
  4123. return F_SETLK;
  4124. case TARGET_F_SETLKW:
  4125. return F_SETLKW;
  4126. case TARGET_F_GETOWN:
  4127. return F_GETOWN;
  4128. case TARGET_F_SETOWN:
  4129. return F_SETOWN;
  4130. case TARGET_F_GETSIG:
  4131. return F_GETSIG;
  4132. case TARGET_F_SETSIG:
  4133. return F_SETSIG;
  4134. #if TARGET_ABI_BITS == 32
  4135. case TARGET_F_GETLK64:
  4136. return F_GETLK64;
  4137. case TARGET_F_SETLK64:
  4138. return F_SETLK64;
  4139. case TARGET_F_SETLKW64:
  4140. return F_SETLKW64;
  4141. #endif
  4142. case TARGET_F_SETLEASE:
  4143. return F_SETLEASE;
  4144. case TARGET_F_GETLEASE:
  4145. return F_GETLEASE;
  4146. #ifdef F_DUPFD_CLOEXEC
  4147. case TARGET_F_DUPFD_CLOEXEC:
  4148. return F_DUPFD_CLOEXEC;
  4149. #endif
  4150. case TARGET_F_NOTIFY:
  4151. return F_NOTIFY;
  4152. #ifdef F_GETOWN_EX
  4153. case TARGET_F_GETOWN_EX:
  4154. return F_GETOWN_EX;
  4155. #endif
  4156. #ifdef F_SETOWN_EX
  4157. case TARGET_F_SETOWN_EX:
  4158. return F_SETOWN_EX;
  4159. #endif
  4160. default:
  4161. return -TARGET_EINVAL;
  4162. }
  4163. return -TARGET_EINVAL;
  4164. }
  4165. #define TRANSTBL_CONVERT(a) { -1, TARGET_##a, -1, a }
  4166. static const bitmask_transtbl flock_tbl[] = {
  4167. TRANSTBL_CONVERT(F_RDLCK),
  4168. TRANSTBL_CONVERT(F_WRLCK),
  4169. TRANSTBL_CONVERT(F_UNLCK),
  4170. TRANSTBL_CONVERT(F_EXLCK),
  4171. TRANSTBL_CONVERT(F_SHLCK),
  4172. { 0, 0, 0, 0 }
  4173. };
  4174. static abi_long do_fcntl(int fd, int cmd, abi_ulong arg)
  4175. {
  4176. struct flock fl;
  4177. struct target_flock *target_fl;
  4178. struct flock64 fl64;
  4179. struct target_flock64 *target_fl64;
  4180. #ifdef F_GETOWN_EX
  4181. struct f_owner_ex fox;
  4182. struct target_f_owner_ex *target_fox;
  4183. #endif
  4184. abi_long ret;
  4185. int host_cmd = target_to_host_fcntl_cmd(cmd);
  4186. if (host_cmd == -TARGET_EINVAL)
  4187. return host_cmd;
  4188. switch(cmd) {
  4189. case TARGET_F_GETLK:
  4190. if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
  4191. return -TARGET_EFAULT;
  4192. fl.l_type =
  4193. target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
  4194. fl.l_whence = tswap16(target_fl->l_whence);
  4195. fl.l_start = tswapal(target_fl->l_start);
  4196. fl.l_len = tswapal(target_fl->l_len);
  4197. fl.l_pid = tswap32(target_fl->l_pid);
  4198. unlock_user_struct(target_fl, arg, 0);
  4199. ret = get_errno(fcntl(fd, host_cmd, &fl));
  4200. if (ret == 0) {
  4201. if (!lock_user_struct(VERIFY_WRITE, target_fl, arg, 0))
  4202. return -TARGET_EFAULT;
  4203. target_fl->l_type =
  4204. host_to_target_bitmask(tswap16(fl.l_type), flock_tbl);
  4205. target_fl->l_whence = tswap16(fl.l_whence);
  4206. target_fl->l_start = tswapal(fl.l_start);
  4207. target_fl->l_len = tswapal(fl.l_len);
  4208. target_fl->l_pid = tswap32(fl.l_pid);
  4209. unlock_user_struct(target_fl, arg, 1);
  4210. }
  4211. break;
  4212. case TARGET_F_SETLK:
  4213. case TARGET_F_SETLKW:
  4214. if (!lock_user_struct(VERIFY_READ, target_fl, arg, 1))
  4215. return -TARGET_EFAULT;
  4216. fl.l_type =
  4217. target_to_host_bitmask(tswap16(target_fl->l_type), flock_tbl);
  4218. fl.l_whence = tswap16(target_fl->l_whence);
  4219. fl.l_start = tswapal(target_fl->l_start);
  4220. fl.l_len = tswapal(target_fl->l_len);
  4221. fl.l_pid = tswap32(target_fl->l_pid);
  4222. unlock_user_struct(target_fl, arg, 0);
  4223. ret = get_errno(fcntl(fd, host_cmd, &fl));
  4224. break;
  4225. case TARGET_F_GETLK64:
  4226. if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
  4227. return -TARGET_EFAULT;
  4228. fl64.l_type =
  4229. target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
  4230. fl64.l_whence = tswap16(target_fl64->l_whence);
  4231. fl64.l_start = tswap64(target_fl64->l_start);
  4232. fl64.l_len = tswap64(target_fl64->l_len);
  4233. fl64.l_pid = tswap32(target_fl64->l_pid);
  4234. unlock_user_struct(target_fl64, arg, 0);
  4235. ret = get_errno(fcntl(fd, host_cmd, &fl64));
  4236. if (ret == 0) {
  4237. if (!lock_user_struct(VERIFY_WRITE, target_fl64, arg, 0))
  4238. return -TARGET_EFAULT;
  4239. target_fl64->l_type =
  4240. host_to_target_bitmask(tswap16(fl64.l_type), flock_tbl) >> 1;
  4241. target_fl64->l_whence = tswap16(fl64.l_whence);
  4242. target_fl64->l_start = tswap64(fl64.l_start);
  4243. target_fl64->l_len = tswap64(fl64.l_len);
  4244. target_fl64->l_pid = tswap32(fl64.l_pid);
  4245. unlock_user_struct(target_fl64, arg, 1);
  4246. }
  4247. break;
  4248. case TARGET_F_SETLK64:
  4249. case TARGET_F_SETLKW64:
  4250. if (!lock_user_struct(VERIFY_READ, target_fl64, arg, 1))
  4251. return -TARGET_EFAULT;
  4252. fl64.l_type =
  4253. target_to_host_bitmask(tswap16(target_fl64->l_type), flock_tbl) >> 1;
  4254. fl64.l_whence = tswap16(target_fl64->l_whence);
  4255. fl64.l_start = tswap64(target_fl64->l_start);
  4256. fl64.l_len = tswap64(target_fl64->l_len);
  4257. fl64.l_pid = tswap32(target_fl64->l_pid);
  4258. unlock_user_struct(target_fl64, arg, 0);
  4259. ret = get_errno(fcntl(fd, host_cmd, &fl64));
  4260. break;
  4261. case TARGET_F_GETFL:
  4262. ret = get_errno(fcntl(fd, host_cmd, arg));
  4263. if (ret >= 0) {
  4264. ret = host_to_target_bitmask(ret, fcntl_flags_tbl);
  4265. }
  4266. break;
  4267. case TARGET_F_SETFL:
  4268. ret = get_errno(fcntl(fd, host_cmd, target_to_host_bitmask(arg, fcntl_flags_tbl)));
  4269. break;
  4270. #ifdef F_GETOWN_EX
  4271. case TARGET_F_GETOWN_EX:
  4272. ret = get_errno(fcntl(fd, host_cmd, &fox));
  4273. if (ret >= 0) {
  4274. if (!lock_user_struct(VERIFY_WRITE, target_fox, arg, 0))
  4275. return -TARGET_EFAULT;
  4276. target_fox->type = tswap32(fox.type);
  4277. target_fox->pid = tswap32(fox.pid);
  4278. unlock_user_struct(target_fox, arg, 1);
  4279. }
  4280. break;
  4281. #endif
  4282. #ifdef F_SETOWN_EX
  4283. case TARGET_F_SETOWN_EX:
  4284. if (!lock_user_struct(VERIFY_READ, target_fox, arg, 1))
  4285. return -TARGET_EFAULT;
  4286. fox.type = tswap32(target_fox->type);
  4287. fox.pid = tswap32(target_fox->pid);
  4288. unlock_user_struct(target_fox, arg, 0);
  4289. ret = get_errno(fcntl(fd, host_cmd, &fox));
  4290. break;
  4291. #endif
  4292. case TARGET_F_SETOWN:
  4293. case TARGET_F_GETOWN:
  4294. case TARGET_F_SETSIG:
  4295. case TARGET_F_GETSIG:
  4296. case TARGET_F_SETLEASE:
  4297. case TARGET_F_GETLEASE:
  4298. ret = get_errno(fcntl(fd, host_cmd, arg));
  4299. break;
  4300. default:
  4301. ret = get_errno(fcntl(fd, cmd, arg));
  4302. break;
  4303. }
  4304. return ret;
  4305. }
  4306. #ifdef USE_UID16
  4307. static inline int high2lowuid(int uid)
  4308. {
  4309. if (uid > 65535)
  4310. return 65534;
  4311. else
  4312. return uid;
  4313. }
  4314. static inline int high2lowgid(int gid)
  4315. {
  4316. if (gid > 65535)
  4317. return 65534;
  4318. else
  4319. return gid;
  4320. }
  4321. static inline int low2highuid(int uid)
  4322. {
  4323. if ((int16_t)uid == -1)
  4324. return -1;
  4325. else
  4326. return uid;
  4327. }
  4328. static inline int low2highgid(int gid)
  4329. {
  4330. if ((int16_t)gid == -1)
  4331. return -1;
  4332. else
  4333. return gid;
  4334. }
  4335. static inline int tswapid(int id)
  4336. {
  4337. return tswap16(id);
  4338. }
  4339. #define put_user_id(x, gaddr) put_user_u16(x, gaddr)
  4340. #else /* !USE_UID16 */
  4341. static inline int high2lowuid(int uid)
  4342. {
  4343. return uid;
  4344. }
  4345. static inline int high2lowgid(int gid)
  4346. {
  4347. return gid;
  4348. }
  4349. static inline int low2highuid(int uid)
  4350. {
  4351. return uid;
  4352. }
  4353. static inline int low2highgid(int gid)
  4354. {
  4355. return gid;
  4356. }
  4357. static inline int tswapid(int id)
  4358. {
  4359. return tswap32(id);
  4360. }
  4361. #define put_user_id(x, gaddr) put_user_u32(x, gaddr)
  4362. #endif /* USE_UID16 */
  4363. void syscall_init(void)
  4364. {
  4365. IOCTLEntry *ie;
  4366. const argtype *arg_type;
  4367. int size;
  4368. int i;
  4369. #define STRUCT(name, ...) thunk_register_struct(STRUCT_ ## name, #name, struct_ ## name ## _def);
  4370. #define STRUCT_SPECIAL(name) thunk_register_struct_direct(STRUCT_ ## name, #name, &struct_ ## name ## _def);
  4371. #include "syscall_types.h"
  4372. #undef STRUCT
  4373. #undef STRUCT_SPECIAL
  4374. /* Build target_to_host_errno_table[] table from
  4375. * host_to_target_errno_table[]. */
  4376. for (i = 0; i < ERRNO_TABLE_SIZE; i++) {
  4377. target_to_host_errno_table[host_to_target_errno_table[i]] = i;
  4378. }
  4379. /* we patch the ioctl size if necessary. We rely on the fact that
  4380. no ioctl has all the bits at '1' in the size field */
  4381. ie = ioctl_entries;
  4382. while (ie->target_cmd != 0) {
  4383. if (((ie->target_cmd >> TARGET_IOC_SIZESHIFT) & TARGET_IOC_SIZEMASK) ==
  4384. TARGET_IOC_SIZEMASK) {
  4385. arg_type = ie->arg_type;
  4386. if (arg_type[0] != TYPE_PTR) {
  4387. fprintf(stderr, "cannot patch size for ioctl 0x%x\n",
  4388. ie->target_cmd);
  4389. exit(1);
  4390. }
  4391. arg_type++;
  4392. size = thunk_type_size(arg_type, 0);
  4393. ie->target_cmd = (ie->target_cmd &
  4394. ~(TARGET_IOC_SIZEMASK << TARGET_IOC_SIZESHIFT)) |
  4395. (size << TARGET_IOC_SIZESHIFT);
  4396. }
  4397. /* automatic consistency check if same arch */
  4398. #if (defined(__i386__) && defined(TARGET_I386) && defined(TARGET_ABI32)) || \
  4399. (defined(__x86_64__) && defined(TARGET_X86_64))
  4400. if (unlikely(ie->target_cmd != ie->host_cmd)) {
  4401. fprintf(stderr, "ERROR: ioctl(%s): target=0x%x host=0x%x\n",
  4402. ie->name, ie->target_cmd, ie->host_cmd);
  4403. }
  4404. #endif
  4405. ie++;
  4406. }
  4407. }
  4408. #if TARGET_ABI_BITS == 32
  4409. static inline uint64_t target_offset64(uint32_t word0, uint32_t word1)
  4410. {
  4411. #ifdef TARGET_WORDS_BIGENDIAN
  4412. return ((uint64_t)word0 << 32) | word1;
  4413. #else
  4414. return ((uint64_t)word1 << 32) | word0;
  4415. #endif
  4416. }
  4417. #else /* TARGET_ABI_BITS == 32 */
  4418. static inline uint64_t target_offset64(uint64_t word0, uint64_t word1)
  4419. {
  4420. return word0;
  4421. }
  4422. #endif /* TARGET_ABI_BITS != 32 */
  4423. #ifdef TARGET_NR_truncate64
  4424. static inline abi_long target_truncate64(void *cpu_env, const char *arg1,
  4425. abi_long arg2,
  4426. abi_long arg3,
  4427. abi_long arg4)
  4428. {
  4429. if (regpairs_aligned(cpu_env)) {
  4430. arg2 = arg3;
  4431. arg3 = arg4;
  4432. }
  4433. return get_errno(truncate64(arg1, target_offset64(arg2, arg3)));
  4434. }
  4435. #endif
  4436. #ifdef TARGET_NR_ftruncate64
  4437. static inline abi_long target_ftruncate64(void *cpu_env, abi_long arg1,
  4438. abi_long arg2,
  4439. abi_long arg3,
  4440. abi_long arg4)
  4441. {
  4442. if (regpairs_aligned(cpu_env)) {
  4443. arg2 = arg3;
  4444. arg3 = arg4;
  4445. }
  4446. return get_errno(ftruncate64(arg1, target_offset64(arg2, arg3)));
  4447. }
  4448. #endif
  4449. static inline abi_long target_to_host_timespec(struct timespec *host_ts,
  4450. abi_ulong target_addr)
  4451. {
  4452. struct target_timespec *target_ts;
  4453. if (!lock_user_struct(VERIFY_READ, target_ts, target_addr, 1))
  4454. return -TARGET_EFAULT;
  4455. host_ts->tv_sec = tswapal(target_ts->tv_sec);
  4456. host_ts->tv_nsec = tswapal(target_ts->tv_nsec);
  4457. unlock_user_struct(target_ts, target_addr, 0);
  4458. return 0;
  4459. }
  4460. static inline abi_long host_to_target_timespec(abi_ulong target_addr,
  4461. struct timespec *host_ts)
  4462. {
  4463. struct target_timespec *target_ts;
  4464. if (!lock_user_struct(VERIFY_WRITE, target_ts, target_addr, 0))
  4465. return -TARGET_EFAULT;
  4466. target_ts->tv_sec = tswapal(host_ts->tv_sec);
  4467. target_ts->tv_nsec = tswapal(host_ts->tv_nsec);
  4468. unlock_user_struct(target_ts, target_addr, 1);
  4469. return 0;
  4470. }
  4471. static inline abi_long target_to_host_itimerspec(struct itimerspec *host_itspec,
  4472. abi_ulong target_addr)
  4473. {
  4474. struct target_itimerspec *target_itspec;
  4475. if (!lock_user_struct(VERIFY_READ, target_itspec, target_addr, 1)) {
  4476. return -TARGET_EFAULT;
  4477. }
  4478. host_itspec->it_interval.tv_sec =
  4479. tswapal(target_itspec->it_interval.tv_sec);
  4480. host_itspec->it_interval.tv_nsec =
  4481. tswapal(target_itspec->it_interval.tv_nsec);
  4482. host_itspec->it_value.tv_sec = tswapal(target_itspec->it_value.tv_sec);
  4483. host_itspec->it_value.tv_nsec = tswapal(target_itspec->it_value.tv_nsec);
  4484. unlock_user_struct(target_itspec, target_addr, 1);
  4485. return 0;
  4486. }
  4487. static inline abi_long host_to_target_itimerspec(abi_ulong target_addr,
  4488. struct itimerspec *host_its)
  4489. {
  4490. struct target_itimerspec *target_itspec;
  4491. if (!lock_user_struct(VERIFY_WRITE, target_itspec, target_addr, 0)) {
  4492. return -TARGET_EFAULT;
  4493. }
  4494. target_itspec->it_interval.tv_sec = tswapal(host_its->it_interval.tv_sec);
  4495. target_itspec->it_interval.tv_nsec = tswapal(host_its->it_interval.tv_nsec);
  4496. target_itspec->it_value.tv_sec = tswapal(host_its->it_value.tv_sec);
  4497. target_itspec->it_value.tv_nsec = tswapal(host_its->it_value.tv_nsec);
  4498. unlock_user_struct(target_itspec, target_addr, 0);
  4499. return 0;
  4500. }
  4501. static inline abi_long target_to_host_sigevent(struct sigevent *host_sevp,
  4502. abi_ulong target_addr)
  4503. {
  4504. struct target_sigevent *target_sevp;
  4505. if (!lock_user_struct(VERIFY_READ, target_sevp, target_addr, 1)) {
  4506. return -TARGET_EFAULT;
  4507. }
  4508. /* This union is awkward on 64 bit systems because it has a 32 bit
  4509. * integer and a pointer in it; we follow the conversion approach
  4510. * used for handling sigval types in signal.c so the guest should get
  4511. * the correct value back even if we did a 64 bit byteswap and it's
  4512. * using the 32 bit integer.
  4513. */
  4514. host_sevp->sigev_value.sival_ptr =
  4515. (void *)(uintptr_t)tswapal(target_sevp->sigev_value.sival_ptr);
  4516. host_sevp->sigev_signo =
  4517. target_to_host_signal(tswap32(target_sevp->sigev_signo));
  4518. host_sevp->sigev_notify = tswap32(target_sevp->sigev_notify);
  4519. host_sevp->_sigev_un._tid = tswap32(target_sevp->_sigev_un._tid);
  4520. unlock_user_struct(target_sevp, target_addr, 1);
  4521. return 0;
  4522. }
  4523. #if defined(TARGET_NR_mlockall)
  4524. static inline int target_to_host_mlockall_arg(int arg)
  4525. {
  4526. int result = 0;
  4527. if (arg & TARGET_MLOCKALL_MCL_CURRENT) {
  4528. result |= MCL_CURRENT;
  4529. }
  4530. if (arg & TARGET_MLOCKALL_MCL_FUTURE) {
  4531. result |= MCL_FUTURE;
  4532. }
  4533. return result;
  4534. }
  4535. #endif
  4536. #if defined(TARGET_NR_stat64) || defined(TARGET_NR_newfstatat)
  4537. static inline abi_long host_to_target_stat64(void *cpu_env,
  4538. abi_ulong target_addr,
  4539. struct stat *host_st)
  4540. {
  4541. #if defined(TARGET_ARM) && defined(TARGET_ABI32)
  4542. if (((CPUARMState *)cpu_env)->eabi) {
  4543. struct target_eabi_stat64 *target_st;
  4544. if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
  4545. return -TARGET_EFAULT;
  4546. memset(target_st, 0, sizeof(struct target_eabi_stat64));
  4547. __put_user(host_st->st_dev, &target_st->st_dev);
  4548. __put_user(host_st->st_ino, &target_st->st_ino);
  4549. #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
  4550. __put_user(host_st->st_ino, &target_st->__st_ino);
  4551. #endif
  4552. __put_user(host_st->st_mode, &target_st->st_mode);
  4553. __put_user(host_st->st_nlink, &target_st->st_nlink);
  4554. __put_user(host_st->st_uid, &target_st->st_uid);
  4555. __put_user(host_st->st_gid, &target_st->st_gid);
  4556. __put_user(host_st->st_rdev, &target_st->st_rdev);
  4557. __put_user(host_st->st_size, &target_st->st_size);
  4558. __put_user(host_st->st_blksize, &target_st->st_blksize);
  4559. __put_user(host_st->st_blocks, &target_st->st_blocks);
  4560. __put_user(host_st->st_atime, &target_st->target_st_atime);
  4561. __put_user(host_st->st_mtime, &target_st->target_st_mtime);
  4562. __put_user(host_st->st_ctime, &target_st->target_st_ctime);
  4563. unlock_user_struct(target_st, target_addr, 1);
  4564. } else
  4565. #endif
  4566. {
  4567. #if defined(TARGET_HAS_STRUCT_STAT64)
  4568. struct target_stat64 *target_st;
  4569. #else
  4570. struct target_stat *target_st;
  4571. #endif
  4572. if (!lock_user_struct(VERIFY_WRITE, target_st, target_addr, 0))
  4573. return -TARGET_EFAULT;
  4574. memset(target_st, 0, sizeof(*target_st));
  4575. __put_user(host_st->st_dev, &target_st->st_dev);
  4576. __put_user(host_st->st_ino, &target_st->st_ino);
  4577. #ifdef TARGET_STAT64_HAS_BROKEN_ST_INO
  4578. __put_user(host_st->st_ino, &target_st->__st_ino);
  4579. #endif
  4580. __put_user(host_st->st_mode, &target_st->st_mode);
  4581. __put_user(host_st->st_nlink, &target_st->st_nlink);
  4582. __put_user(host_st->st_uid, &target_st->st_uid);
  4583. __put_user(host_st->st_gid, &target_st->st_gid);
  4584. __put_user(host_st->st_rdev, &target_st->st_rdev);
  4585. /* XXX: better use of kernel struct */
  4586. __put_user(host_st->st_size, &target_st->st_size);
  4587. __put_user(host_st->st_blksize, &target_st->st_blksize);
  4588. __put_user(host_st->st_blocks, &target_st->st_blocks);
  4589. __put_user(host_st->st_atime, &target_st->target_st_atime);
  4590. __put_user(host_st->st_mtime, &target_st->target_st_mtime);
  4591. __put_user(host_st->st_ctime, &target_st->target_st_ctime);
  4592. unlock_user_struct(target_st, target_addr, 1);
  4593. }
  4594. return 0;
  4595. }
  4596. #endif
  4597. /* ??? Using host futex calls even when target atomic operations
  4598. are not really atomic probably breaks things. However implementing
  4599. futexes locally would make futexes shared between multiple processes
  4600. tricky. However they're probably useless because guest atomic
  4601. operations won't work either. */
  4602. static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout,
  4603. target_ulong uaddr2, int val3)
  4604. {
  4605. struct timespec ts, *pts;
  4606. int base_op;
  4607. /* ??? We assume FUTEX_* constants are the same on both host
  4608. and target. */
  4609. #ifdef FUTEX_CMD_MASK
  4610. base_op = op & FUTEX_CMD_MASK;
  4611. #else
  4612. base_op = op;
  4613. #endif
  4614. switch (base_op) {
  4615. case FUTEX_WAIT:
  4616. case FUTEX_WAIT_BITSET:
  4617. if (timeout) {
  4618. pts = &ts;
  4619. target_to_host_timespec(pts, timeout);
  4620. } else {
  4621. pts = NULL;
  4622. }
  4623. return get_errno(sys_futex(g2h(uaddr), op, tswap32(val),
  4624. pts, NULL, val3));
  4625. case FUTEX_WAKE:
  4626. return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
  4627. case FUTEX_FD:
  4628. return get_errno(sys_futex(g2h(uaddr), op, val, NULL, NULL, 0));
  4629. case FUTEX_REQUEUE:
  4630. case FUTEX_CMP_REQUEUE:
  4631. case FUTEX_WAKE_OP:
  4632. /* For FUTEX_REQUEUE, FUTEX_CMP_REQUEUE, and FUTEX_WAKE_OP, the
  4633. TIMEOUT parameter is interpreted as a uint32_t by the kernel.
  4634. But the prototype takes a `struct timespec *'; insert casts
  4635. to satisfy the compiler. We do not need to tswap TIMEOUT
  4636. since it's not compared to guest memory. */
  4637. pts = (struct timespec *)(uintptr_t) timeout;
  4638. return get_errno(sys_futex(g2h(uaddr), op, val, pts,
  4639. g2h(uaddr2),
  4640. (base_op == FUTEX_CMP_REQUEUE
  4641. ? tswap32(val3)
  4642. : val3)));
  4643. default:
  4644. return -TARGET_ENOSYS;
  4645. }
  4646. }
  4647. /* Map host to target signal numbers for the wait family of syscalls.
  4648. Assume all other status bits are the same. */
  4649. int host_to_target_waitstatus(int status)
  4650. {
  4651. if (WIFSIGNALED(status)) {
  4652. return host_to_target_signal(WTERMSIG(status)) | (status & ~0x7f);
  4653. }
  4654. if (WIFSTOPPED(status)) {
  4655. return (host_to_target_signal(WSTOPSIG(status)) << 8)
  4656. | (status & 0xff);
  4657. }
  4658. return status;
  4659. }
  4660. static int open_self_cmdline(void *cpu_env, int fd)
  4661. {
  4662. int fd_orig = -1;
  4663. bool word_skipped = false;
  4664. fd_orig = open("/proc/self/cmdline", O_RDONLY);
  4665. if (fd_orig < 0) {
  4666. return fd_orig;
  4667. }
  4668. while (true) {
  4669. ssize_t nb_read;
  4670. char buf[128];
  4671. char *cp_buf = buf;
  4672. nb_read = read(fd_orig, buf, sizeof(buf));
  4673. if (nb_read < 0) {
  4674. fd_orig = close(fd_orig);
  4675. return -1;
  4676. } else if (nb_read == 0) {
  4677. break;
  4678. }
  4679. if (!word_skipped) {
  4680. /* Skip the first string, which is the path to qemu-*-static
  4681. instead of the actual command. */
  4682. cp_buf = memchr(buf, 0, sizeof(buf));
  4683. if (cp_buf) {
  4684. /* Null byte found, skip one string */
  4685. cp_buf++;
  4686. nb_read -= cp_buf - buf;
  4687. word_skipped = true;
  4688. }
  4689. }
  4690. if (word_skipped) {
  4691. if (write(fd, cp_buf, nb_read) != nb_read) {
  4692. close(fd_orig);
  4693. return -1;
  4694. }
  4695. }
  4696. }
  4697. return close(fd_orig);
  4698. }
  4699. static int open_self_maps(void *cpu_env, int fd)
  4700. {
  4701. CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
  4702. TaskState *ts = cpu->opaque;
  4703. FILE *fp;
  4704. char *line = NULL;
  4705. size_t len = 0;
  4706. ssize_t read;
  4707. fp = fopen("/proc/self/maps", "r");
  4708. if (fp == NULL) {
  4709. return -EACCES;
  4710. }
  4711. while ((read = getline(&line, &len, fp)) != -1) {
  4712. int fields, dev_maj, dev_min, inode;
  4713. uint64_t min, max, offset;
  4714. char flag_r, flag_w, flag_x, flag_p;
  4715. char path[512] = "";
  4716. fields = sscanf(line, "%"PRIx64"-%"PRIx64" %c%c%c%c %"PRIx64" %x:%x %d"
  4717. " %512s", &min, &max, &flag_r, &flag_w, &flag_x,
  4718. &flag_p, &offset, &dev_maj, &dev_min, &inode, path);
  4719. if ((fields < 10) || (fields > 11)) {
  4720. continue;
  4721. }
  4722. if (h2g_valid(min)) {
  4723. int flags = page_get_flags(h2g(min));
  4724. max = h2g_valid(max - 1) ? max : (uintptr_t)g2h(GUEST_ADDR_MAX);
  4725. if (page_check_range(h2g(min), max - min, flags) == -1) {
  4726. continue;
  4727. }
  4728. if (h2g(min) == ts->info->stack_limit) {
  4729. pstrcpy(path, sizeof(path), " [stack]");
  4730. }
  4731. dprintf(fd, TARGET_ABI_FMT_lx "-" TARGET_ABI_FMT_lx
  4732. " %c%c%c%c %08" PRIx64 " %02x:%02x %d %s%s\n",
  4733. h2g(min), h2g(max - 1) + 1, flag_r, flag_w,
  4734. flag_x, flag_p, offset, dev_maj, dev_min, inode,
  4735. path[0] ? " " : "", path);
  4736. }
  4737. }
  4738. free(line);
  4739. fclose(fp);
  4740. return 0;
  4741. }
  4742. static int open_self_stat(void *cpu_env, int fd)
  4743. {
  4744. CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
  4745. TaskState *ts = cpu->opaque;
  4746. abi_ulong start_stack = ts->info->start_stack;
  4747. int i;
  4748. for (i = 0; i < 44; i++) {
  4749. char buf[128];
  4750. int len;
  4751. uint64_t val = 0;
  4752. if (i == 0) {
  4753. /* pid */
  4754. val = getpid();
  4755. snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
  4756. } else if (i == 1) {
  4757. /* app name */
  4758. snprintf(buf, sizeof(buf), "(%s) ", ts->bprm->argv[0]);
  4759. } else if (i == 27) {
  4760. /* stack bottom */
  4761. val = start_stack;
  4762. snprintf(buf, sizeof(buf), "%"PRId64 " ", val);
  4763. } else {
  4764. /* for the rest, there is MasterCard */
  4765. snprintf(buf, sizeof(buf), "0%c", i == 43 ? '\n' : ' ');
  4766. }
  4767. len = strlen(buf);
  4768. if (write(fd, buf, len) != len) {
  4769. return -1;
  4770. }
  4771. }
  4772. return 0;
  4773. }
  4774. static int open_self_auxv(void *cpu_env, int fd)
  4775. {
  4776. CPUState *cpu = ENV_GET_CPU((CPUArchState *)cpu_env);
  4777. TaskState *ts = cpu->opaque;
  4778. abi_ulong auxv = ts->info->saved_auxv;
  4779. abi_ulong len = ts->info->auxv_len;
  4780. char *ptr;
  4781. /*
  4782. * Auxiliary vector is stored in target process stack.
  4783. * read in whole auxv vector and copy it to file
  4784. */
  4785. ptr = lock_user(VERIFY_READ, auxv, len, 0);
  4786. if (ptr != NULL) {
  4787. while (len > 0) {
  4788. ssize_t r;
  4789. r = write(fd, ptr, len);
  4790. if (r <= 0) {
  4791. break;
  4792. }
  4793. len -= r;
  4794. ptr += r;
  4795. }
  4796. lseek(fd, 0, SEEK_SET);
  4797. unlock_user(ptr, auxv, len);
  4798. }
  4799. return 0;
  4800. }
  4801. static int is_proc_myself(const char *filename, const char *entry)
  4802. {
  4803. if (!strncmp(filename, "/proc/", strlen("/proc/"))) {
  4804. filename += strlen("/proc/");
  4805. if (!strncmp(filename, "self/", strlen("self/"))) {
  4806. filename += strlen("self/");
  4807. } else if (*filename >= '1' && *filename <= '9') {
  4808. char myself[80];
  4809. snprintf(myself, sizeof(myself), "%d/", getpid());
  4810. if (!strncmp(filename, myself, strlen(myself))) {
  4811. filename += strlen(myself);
  4812. } else {
  4813. return 0;
  4814. }
  4815. } else {
  4816. return 0;
  4817. }
  4818. if (!strcmp(filename, entry)) {
  4819. return 1;
  4820. }
  4821. }
  4822. return 0;
  4823. }
  4824. #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
  4825. static int is_proc(const char *filename, const char *entry)
  4826. {
  4827. return strcmp(filename, entry) == 0;
  4828. }
  4829. static int open_net_route(void *cpu_env, int fd)
  4830. {
  4831. FILE *fp;
  4832. char *line = NULL;
  4833. size_t len = 0;
  4834. ssize_t read;
  4835. fp = fopen("/proc/net/route", "r");
  4836. if (fp == NULL) {
  4837. return -EACCES;
  4838. }
  4839. /* read header */
  4840. read = getline(&line, &len, fp);
  4841. dprintf(fd, "%s", line);
  4842. /* read routes */
  4843. while ((read = getline(&line, &len, fp)) != -1) {
  4844. char iface[16];
  4845. uint32_t dest, gw, mask;
  4846. unsigned int flags, refcnt, use, metric, mtu, window, irtt;
  4847. sscanf(line, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
  4848. iface, &dest, &gw, &flags, &refcnt, &use, &metric,
  4849. &mask, &mtu, &window, &irtt);
  4850. dprintf(fd, "%s\t%08x\t%08x\t%04x\t%d\t%d\t%d\t%08x\t%d\t%u\t%u\n",
  4851. iface, tswap32(dest), tswap32(gw), flags, refcnt, use,
  4852. metric, tswap32(mask), mtu, window, irtt);
  4853. }
  4854. free(line);
  4855. fclose(fp);
  4856. return 0;
  4857. }
  4858. #endif
  4859. static int do_openat(void *cpu_env, int dirfd, const char *pathname, int flags, mode_t mode)
  4860. {
  4861. struct fake_open {
  4862. const char *filename;
  4863. int (*fill)(void *cpu_env, int fd);
  4864. int (*cmp)(const char *s1, const char *s2);
  4865. };
  4866. const struct fake_open *fake_open;
  4867. static const struct fake_open fakes[] = {
  4868. { "maps", open_self_maps, is_proc_myself },
  4869. { "stat", open_self_stat, is_proc_myself },
  4870. { "auxv", open_self_auxv, is_proc_myself },
  4871. { "cmdline", open_self_cmdline, is_proc_myself },
  4872. #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
  4873. { "/proc/net/route", open_net_route, is_proc },
  4874. #endif
  4875. { NULL, NULL, NULL }
  4876. };
  4877. if (is_proc_myself(pathname, "exe")) {
  4878. int execfd = qemu_getauxval(AT_EXECFD);
  4879. return execfd ? execfd : get_errno(sys_openat(dirfd, exec_path, flags, mode));
  4880. }
  4881. for (fake_open = fakes; fake_open->filename; fake_open++) {
  4882. if (fake_open->cmp(pathname, fake_open->filename)) {
  4883. break;
  4884. }
  4885. }
  4886. if (fake_open->filename) {
  4887. const char *tmpdir;
  4888. char filename[PATH_MAX];
  4889. int fd, r;
  4890. /* create temporary file to map stat to */
  4891. tmpdir = getenv("TMPDIR");
  4892. if (!tmpdir)
  4893. tmpdir = "/tmp";
  4894. snprintf(filename, sizeof(filename), "%s/qemu-open.XXXXXX", tmpdir);
  4895. fd = mkstemp(filename);
  4896. if (fd < 0) {
  4897. return fd;
  4898. }
  4899. unlink(filename);
  4900. if ((r = fake_open->fill(cpu_env, fd))) {
  4901. close(fd);
  4902. return r;
  4903. }
  4904. lseek(fd, 0, SEEK_SET);
  4905. return fd;
  4906. }
  4907. return get_errno(sys_openat(dirfd, path(pathname), flags, mode));
  4908. }
  4909. #define TIMER_MAGIC 0x0caf0000
  4910. #define TIMER_MAGIC_MASK 0xffff0000
  4911. /* Convert QEMU provided timer ID back to internal 16bit index format */
  4912. static target_timer_t get_timer_id(abi_long arg)
  4913. {
  4914. target_timer_t timerid = arg;
  4915. if ((timerid & TIMER_MAGIC_MASK) != TIMER_MAGIC) {
  4916. return -TARGET_EINVAL;
  4917. }
  4918. timerid &= 0xffff;
  4919. if (timerid >= ARRAY_SIZE(g_posix_timers)) {
  4920. return -TARGET_EINVAL;
  4921. }
  4922. return timerid;
  4923. }
  4924. /* do_syscall() should always have a single exit point at the end so
  4925. that actions, such as logging of syscall results, can be performed.
  4926. All errnos that do_syscall() returns must be -TARGET_<errcode>. */
  4927. abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
  4928. abi_long arg2, abi_long arg3, abi_long arg4,
  4929. abi_long arg5, abi_long arg6, abi_long arg7,
  4930. abi_long arg8)
  4931. {
  4932. CPUState *cpu = ENV_GET_CPU(cpu_env);
  4933. abi_long ret;
  4934. struct stat st;
  4935. struct statfs stfs;
  4936. void *p;
  4937. #ifdef DEBUG
  4938. gemu_log("syscall %d", num);
  4939. #endif
  4940. if(do_strace)
  4941. print_syscall(num, arg1, arg2, arg3, arg4, arg5, arg6);
  4942. switch(num) {
  4943. case TARGET_NR_exit:
  4944. /* In old applications this may be used to implement _exit(2).
  4945. However in threaded applictions it is used for thread termination,
  4946. and _exit_group is used for application termination.
  4947. Do thread termination if we have more then one thread. */
  4948. /* FIXME: This probably breaks if a signal arrives. We should probably
  4949. be disabling signals. */
  4950. if (CPU_NEXT(first_cpu)) {
  4951. TaskState *ts;
  4952. cpu_list_lock();
  4953. /* Remove the CPU from the list. */
  4954. QTAILQ_REMOVE(&cpus, cpu, node);
  4955. cpu_list_unlock();
  4956. ts = cpu->opaque;
  4957. if (ts->child_tidptr) {
  4958. put_user_u32(0, ts->child_tidptr);
  4959. sys_futex(g2h(ts->child_tidptr), FUTEX_WAKE, INT_MAX,
  4960. NULL, NULL, 0);
  4961. }
  4962. thread_cpu = NULL;
  4963. object_unref(OBJECT(cpu));
  4964. g_free(ts);
  4965. pthread_exit(NULL);
  4966. }
  4967. #ifdef TARGET_GPROF
  4968. _mcleanup();
  4969. #endif
  4970. gdb_exit(cpu_env, arg1);
  4971. _exit(arg1);
  4972. ret = 0; /* avoid warning */
  4973. break;
  4974. case TARGET_NR_read:
  4975. if (arg3 == 0)
  4976. ret = 0;
  4977. else {
  4978. if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
  4979. goto efault;
  4980. ret = get_errno(read(arg1, p, arg3));
  4981. unlock_user(p, arg2, ret);
  4982. }
  4983. break;
  4984. case TARGET_NR_write:
  4985. if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
  4986. goto efault;
  4987. ret = get_errno(write(arg1, p, arg3));
  4988. unlock_user(p, arg2, 0);
  4989. break;
  4990. case TARGET_NR_open:
  4991. if (!(p = lock_user_string(arg1)))
  4992. goto efault;
  4993. ret = get_errno(do_openat(cpu_env, AT_FDCWD, p,
  4994. target_to_host_bitmask(arg2, fcntl_flags_tbl),
  4995. arg3));
  4996. unlock_user(p, arg1, 0);
  4997. break;
  4998. case TARGET_NR_openat:
  4999. if (!(p = lock_user_string(arg2)))
  5000. goto efault;
  5001. ret = get_errno(do_openat(cpu_env, arg1, p,
  5002. target_to_host_bitmask(arg3, fcntl_flags_tbl),
  5003. arg4));
  5004. unlock_user(p, arg2, 0);
  5005. break;
  5006. case TARGET_NR_close:
  5007. ret = get_errno(close(arg1));
  5008. break;
  5009. case TARGET_NR_brk:
  5010. ret = do_brk(arg1);
  5011. break;
  5012. case TARGET_NR_fork:
  5013. ret = get_errno(do_fork(cpu_env, SIGCHLD, 0, 0, 0, 0));
  5014. break;
  5015. #ifdef TARGET_NR_waitpid
  5016. case TARGET_NR_waitpid:
  5017. {
  5018. int status;
  5019. ret = get_errno(waitpid(arg1, &status, arg3));
  5020. if (!is_error(ret) && arg2 && ret
  5021. && put_user_s32(host_to_target_waitstatus(status), arg2))
  5022. goto efault;
  5023. }
  5024. break;
  5025. #endif
  5026. #ifdef TARGET_NR_waitid
  5027. case TARGET_NR_waitid:
  5028. {
  5029. siginfo_t info;
  5030. info.si_pid = 0;
  5031. ret = get_errno(waitid(arg1, arg2, &info, arg4));
  5032. if (!is_error(ret) && arg3 && info.si_pid != 0) {
  5033. if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0)))
  5034. goto efault;
  5035. host_to_target_siginfo(p, &info);
  5036. unlock_user(p, arg3, sizeof(target_siginfo_t));
  5037. }
  5038. }
  5039. break;
  5040. #endif
  5041. #ifdef TARGET_NR_creat /* not on alpha */
  5042. case TARGET_NR_creat:
  5043. if (!(p = lock_user_string(arg1)))
  5044. goto efault;
  5045. ret = get_errno(creat(p, arg2));
  5046. unlock_user(p, arg1, 0);
  5047. break;
  5048. #endif
  5049. case TARGET_NR_link:
  5050. {
  5051. void * p2;
  5052. p = lock_user_string(arg1);
  5053. p2 = lock_user_string(arg2);
  5054. if (!p || !p2)
  5055. ret = -TARGET_EFAULT;
  5056. else
  5057. ret = get_errno(link(p, p2));
  5058. unlock_user(p2, arg2, 0);
  5059. unlock_user(p, arg1, 0);
  5060. }
  5061. break;
  5062. #if defined(TARGET_NR_linkat)
  5063. case TARGET_NR_linkat:
  5064. {
  5065. void * p2 = NULL;
  5066. if (!arg2 || !arg4)
  5067. goto efault;
  5068. p = lock_user_string(arg2);
  5069. p2 = lock_user_string(arg4);
  5070. if (!p || !p2)
  5071. ret = -TARGET_EFAULT;
  5072. else
  5073. ret = get_errno(linkat(arg1, p, arg3, p2, arg5));
  5074. unlock_user(p, arg2, 0);
  5075. unlock_user(p2, arg4, 0);
  5076. }
  5077. break;
  5078. #endif
  5079. case TARGET_NR_unlink:
  5080. if (!(p = lock_user_string(arg1)))
  5081. goto efault;
  5082. ret = get_errno(unlink(p));
  5083. unlock_user(p, arg1, 0);
  5084. break;
  5085. #if defined(TARGET_NR_unlinkat)
  5086. case TARGET_NR_unlinkat:
  5087. if (!(p = lock_user_string(arg2)))
  5088. goto efault;
  5089. ret = get_errno(unlinkat(arg1, p, arg3));
  5090. unlock_user(p, arg2, 0);
  5091. break;
  5092. #endif
  5093. case TARGET_NR_execve:
  5094. {
  5095. char **argp, **envp;
  5096. int argc, envc;
  5097. abi_ulong gp;
  5098. abi_ulong guest_argp;
  5099. abi_ulong guest_envp;
  5100. abi_ulong addr;
  5101. char **q;
  5102. int total_size = 0;
  5103. argc = 0;
  5104. guest_argp = arg2;
  5105. for (gp = guest_argp; gp; gp += sizeof(abi_ulong)) {
  5106. if (get_user_ual(addr, gp))
  5107. goto efault;
  5108. if (!addr)
  5109. break;
  5110. argc++;
  5111. }
  5112. envc = 0;
  5113. guest_envp = arg3;
  5114. for (gp = guest_envp; gp; gp += sizeof(abi_ulong)) {
  5115. if (get_user_ual(addr, gp))
  5116. goto efault;
  5117. if (!addr)
  5118. break;
  5119. envc++;
  5120. }
  5121. argp = alloca((argc + 1) * sizeof(void *));
  5122. envp = alloca((envc + 1) * sizeof(void *));
  5123. for (gp = guest_argp, q = argp; gp;
  5124. gp += sizeof(abi_ulong), q++) {
  5125. if (get_user_ual(addr, gp))
  5126. goto execve_efault;
  5127. if (!addr)
  5128. break;
  5129. if (!(*q = lock_user_string(addr)))
  5130. goto execve_efault;
  5131. total_size += strlen(*q) + 1;
  5132. }
  5133. *q = NULL;
  5134. for (gp = guest_envp, q = envp; gp;
  5135. gp += sizeof(abi_ulong), q++) {
  5136. if (get_user_ual(addr, gp))
  5137. goto execve_efault;
  5138. if (!addr)
  5139. break;
  5140. if (!(*q = lock_user_string(addr)))
  5141. goto execve_efault;
  5142. total_size += strlen(*q) + 1;
  5143. }
  5144. *q = NULL;
  5145. /* This case will not be caught by the host's execve() if its
  5146. page size is bigger than the target's. */
  5147. if (total_size > MAX_ARG_PAGES * TARGET_PAGE_SIZE) {
  5148. ret = -TARGET_E2BIG;
  5149. goto execve_end;
  5150. }
  5151. if (!(p = lock_user_string(arg1)))
  5152. goto execve_efault;
  5153. ret = get_errno(execve(p, argp, envp));
  5154. unlock_user(p, arg1, 0);
  5155. goto execve_end;
  5156. execve_efault:
  5157. ret = -TARGET_EFAULT;
  5158. execve_end:
  5159. for (gp = guest_argp, q = argp; *q;
  5160. gp += sizeof(abi_ulong), q++) {
  5161. if (get_user_ual(addr, gp)
  5162. || !addr)
  5163. break;
  5164. unlock_user(*q, addr, 0);
  5165. }
  5166. for (gp = guest_envp, q = envp; *q;
  5167. gp += sizeof(abi_ulong), q++) {
  5168. if (get_user_ual(addr, gp)
  5169. || !addr)
  5170. break;
  5171. unlock_user(*q, addr, 0);
  5172. }
  5173. }
  5174. break;
  5175. case TARGET_NR_chdir:
  5176. if (!(p = lock_user_string(arg1)))
  5177. goto efault;
  5178. ret = get_errno(chdir(p));
  5179. unlock_user(p, arg1, 0);
  5180. break;
  5181. #ifdef TARGET_NR_time
  5182. case TARGET_NR_time:
  5183. {
  5184. time_t host_time;
  5185. ret = get_errno(time(&host_time));
  5186. if (!is_error(ret)
  5187. && arg1
  5188. && put_user_sal(host_time, arg1))
  5189. goto efault;
  5190. }
  5191. break;
  5192. #endif
  5193. case TARGET_NR_mknod:
  5194. if (!(p = lock_user_string(arg1)))
  5195. goto efault;
  5196. ret = get_errno(mknod(p, arg2, arg3));
  5197. unlock_user(p, arg1, 0);
  5198. break;
  5199. #if defined(TARGET_NR_mknodat)
  5200. case TARGET_NR_mknodat:
  5201. if (!(p = lock_user_string(arg2)))
  5202. goto efault;
  5203. ret = get_errno(mknodat(arg1, p, arg3, arg4));
  5204. unlock_user(p, arg2, 0);
  5205. break;
  5206. #endif
  5207. case TARGET_NR_chmod:
  5208. if (!(p = lock_user_string(arg1)))
  5209. goto efault;
  5210. ret = get_errno(chmod(p, arg2));
  5211. unlock_user(p, arg1, 0);
  5212. break;
  5213. #ifdef TARGET_NR_break
  5214. case TARGET_NR_break:
  5215. goto unimplemented;
  5216. #endif
  5217. #ifdef TARGET_NR_oldstat
  5218. case TARGET_NR_oldstat:
  5219. goto unimplemented;
  5220. #endif
  5221. case TARGET_NR_lseek:
  5222. ret = get_errno(lseek(arg1, arg2, arg3));
  5223. break;
  5224. #if defined(TARGET_NR_getxpid) && defined(TARGET_ALPHA)
  5225. /* Alpha specific */
  5226. case TARGET_NR_getxpid:
  5227. ((CPUAlphaState *)cpu_env)->ir[IR_A4] = getppid();
  5228. ret = get_errno(getpid());
  5229. break;
  5230. #endif
  5231. #ifdef TARGET_NR_getpid
  5232. case TARGET_NR_getpid:
  5233. ret = get_errno(getpid());
  5234. break;
  5235. #endif
  5236. case TARGET_NR_mount:
  5237. {
  5238. /* need to look at the data field */
  5239. void *p2, *p3;
  5240. if (arg1) {
  5241. p = lock_user_string(arg1);
  5242. if (!p) {
  5243. goto efault;
  5244. }
  5245. } else {
  5246. p = NULL;
  5247. }
  5248. p2 = lock_user_string(arg2);
  5249. if (!p2) {
  5250. if (arg1) {
  5251. unlock_user(p, arg1, 0);
  5252. }
  5253. goto efault;
  5254. }
  5255. if (arg3) {
  5256. p3 = lock_user_string(arg3);
  5257. if (!p3) {
  5258. if (arg1) {
  5259. unlock_user(p, arg1, 0);
  5260. }
  5261. unlock_user(p2, arg2, 0);
  5262. goto efault;
  5263. }
  5264. } else {
  5265. p3 = NULL;
  5266. }
  5267. /* FIXME - arg5 should be locked, but it isn't clear how to
  5268. * do that since it's not guaranteed to be a NULL-terminated
  5269. * string.
  5270. */
  5271. if (!arg5) {
  5272. ret = mount(p, p2, p3, (unsigned long)arg4, NULL);
  5273. } else {
  5274. ret = mount(p, p2, p3, (unsigned long)arg4, g2h(arg5));
  5275. }
  5276. ret = get_errno(ret);
  5277. if (arg1) {
  5278. unlock_user(p, arg1, 0);
  5279. }
  5280. unlock_user(p2, arg2, 0);
  5281. if (arg3) {
  5282. unlock_user(p3, arg3, 0);
  5283. }
  5284. }
  5285. break;
  5286. #ifdef TARGET_NR_umount
  5287. case TARGET_NR_umount:
  5288. if (!(p = lock_user_string(arg1)))
  5289. goto efault;
  5290. ret = get_errno(umount(p));
  5291. unlock_user(p, arg1, 0);
  5292. break;
  5293. #endif
  5294. #ifdef TARGET_NR_stime /* not on alpha */
  5295. case TARGET_NR_stime:
  5296. {
  5297. time_t host_time;
  5298. if (get_user_sal(host_time, arg1))
  5299. goto efault;
  5300. ret = get_errno(stime(&host_time));
  5301. }
  5302. break;
  5303. #endif
  5304. case TARGET_NR_ptrace:
  5305. goto unimplemented;
  5306. #ifdef TARGET_NR_alarm /* not on alpha */
  5307. case TARGET_NR_alarm:
  5308. ret = alarm(arg1);
  5309. break;
  5310. #endif
  5311. #ifdef TARGET_NR_oldfstat
  5312. case TARGET_NR_oldfstat:
  5313. goto unimplemented;
  5314. #endif
  5315. #ifdef TARGET_NR_pause /* not on alpha */
  5316. case TARGET_NR_pause:
  5317. ret = get_errno(pause());
  5318. break;
  5319. #endif
  5320. #ifdef TARGET_NR_utime
  5321. case TARGET_NR_utime:
  5322. {
  5323. struct utimbuf tbuf, *host_tbuf;
  5324. struct target_utimbuf *target_tbuf;
  5325. if (arg2) {
  5326. if (!lock_user_struct(VERIFY_READ, target_tbuf, arg2, 1))
  5327. goto efault;
  5328. tbuf.actime = tswapal(target_tbuf->actime);
  5329. tbuf.modtime = tswapal(target_tbuf->modtime);
  5330. unlock_user_struct(target_tbuf, arg2, 0);
  5331. host_tbuf = &tbuf;
  5332. } else {
  5333. host_tbuf = NULL;
  5334. }
  5335. if (!(p = lock_user_string(arg1)))
  5336. goto efault;
  5337. ret = get_errno(utime(p, host_tbuf));
  5338. unlock_user(p, arg1, 0);
  5339. }
  5340. break;
  5341. #endif
  5342. case TARGET_NR_utimes:
  5343. {
  5344. struct timeval *tvp, tv[2];
  5345. if (arg2) {
  5346. if (copy_from_user_timeval(&tv[0], arg2)
  5347. || copy_from_user_timeval(&tv[1],
  5348. arg2 + sizeof(struct target_timeval)))
  5349. goto efault;
  5350. tvp = tv;
  5351. } else {
  5352. tvp = NULL;
  5353. }
  5354. if (!(p = lock_user_string(arg1)))
  5355. goto efault;
  5356. ret = get_errno(utimes(p, tvp));
  5357. unlock_user(p, arg1, 0);
  5358. }
  5359. break;
  5360. #if defined(TARGET_NR_futimesat)
  5361. case TARGET_NR_futimesat:
  5362. {
  5363. struct timeval *tvp, tv[2];
  5364. if (arg3) {
  5365. if (copy_from_user_timeval(&tv[0], arg3)
  5366. || copy_from_user_timeval(&tv[1],
  5367. arg3 + sizeof(struct target_timeval)))
  5368. goto efault;
  5369. tvp = tv;
  5370. } else {
  5371. tvp = NULL;
  5372. }
  5373. if (!(p = lock_user_string(arg2)))
  5374. goto efault;
  5375. ret = get_errno(futimesat(arg1, path(p), tvp));
  5376. unlock_user(p, arg2, 0);
  5377. }
  5378. break;
  5379. #endif
  5380. #ifdef TARGET_NR_stty
  5381. case TARGET_NR_stty:
  5382. goto unimplemented;
  5383. #endif
  5384. #ifdef TARGET_NR_gtty
  5385. case TARGET_NR_gtty:
  5386. goto unimplemented;
  5387. #endif
  5388. case TARGET_NR_access:
  5389. if (!(p = lock_user_string(arg1)))
  5390. goto efault;
  5391. ret = get_errno(access(path(p), arg2));
  5392. unlock_user(p, arg1, 0);
  5393. break;
  5394. #if defined(TARGET_NR_faccessat) && defined(__NR_faccessat)
  5395. case TARGET_NR_faccessat:
  5396. if (!(p = lock_user_string(arg2)))
  5397. goto efault;
  5398. ret = get_errno(faccessat(arg1, p, arg3, 0));
  5399. unlock_user(p, arg2, 0);
  5400. break;
  5401. #endif
  5402. #ifdef TARGET_NR_nice /* not on alpha */
  5403. case TARGET_NR_nice:
  5404. ret = get_errno(nice(arg1));
  5405. break;
  5406. #endif
  5407. #ifdef TARGET_NR_ftime
  5408. case TARGET_NR_ftime:
  5409. goto unimplemented;
  5410. #endif
  5411. case TARGET_NR_sync:
  5412. sync();
  5413. ret = 0;
  5414. break;
  5415. case TARGET_NR_kill:
  5416. ret = get_errno(kill(arg1, target_to_host_signal(arg2)));
  5417. break;
  5418. case TARGET_NR_rename:
  5419. {
  5420. void *p2;
  5421. p = lock_user_string(arg1);
  5422. p2 = lock_user_string(arg2);
  5423. if (!p || !p2)
  5424. ret = -TARGET_EFAULT;
  5425. else
  5426. ret = get_errno(rename(p, p2));
  5427. unlock_user(p2, arg2, 0);
  5428. unlock_user(p, arg1, 0);
  5429. }
  5430. break;
  5431. #if defined(TARGET_NR_renameat)
  5432. case TARGET_NR_renameat:
  5433. {
  5434. void *p2;
  5435. p = lock_user_string(arg2);
  5436. p2 = lock_user_string(arg4);
  5437. if (!p || !p2)
  5438. ret = -TARGET_EFAULT;
  5439. else
  5440. ret = get_errno(renameat(arg1, p, arg3, p2));
  5441. unlock_user(p2, arg4, 0);
  5442. unlock_user(p, arg2, 0);
  5443. }
  5444. break;
  5445. #endif
  5446. case TARGET_NR_mkdir:
  5447. if (!(p = lock_user_string(arg1)))
  5448. goto efault;
  5449. ret = get_errno(mkdir(p, arg2));
  5450. unlock_user(p, arg1, 0);
  5451. break;
  5452. #if defined(TARGET_NR_mkdirat)
  5453. case TARGET_NR_mkdirat:
  5454. if (!(p = lock_user_string(arg2)))
  5455. goto efault;
  5456. ret = get_errno(mkdirat(arg1, p, arg3));
  5457. unlock_user(p, arg2, 0);
  5458. break;
  5459. #endif
  5460. case TARGET_NR_rmdir:
  5461. if (!(p = lock_user_string(arg1)))
  5462. goto efault;
  5463. ret = get_errno(rmdir(p));
  5464. unlock_user(p, arg1, 0);
  5465. break;
  5466. case TARGET_NR_dup:
  5467. ret = get_errno(dup(arg1));
  5468. break;
  5469. case TARGET_NR_pipe:
  5470. ret = do_pipe(cpu_env, arg1, 0, 0);
  5471. break;
  5472. #ifdef TARGET_NR_pipe2
  5473. case TARGET_NR_pipe2:
  5474. ret = do_pipe(cpu_env, arg1,
  5475. target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
  5476. break;
  5477. #endif
  5478. case TARGET_NR_times:
  5479. {
  5480. struct target_tms *tmsp;
  5481. struct tms tms;
  5482. ret = get_errno(times(&tms));
  5483. if (arg1) {
  5484. tmsp = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_tms), 0);
  5485. if (!tmsp)
  5486. goto efault;
  5487. tmsp->tms_utime = tswapal(host_to_target_clock_t(tms.tms_utime));
  5488. tmsp->tms_stime = tswapal(host_to_target_clock_t(tms.tms_stime));
  5489. tmsp->tms_cutime = tswapal(host_to_target_clock_t(tms.tms_cutime));
  5490. tmsp->tms_cstime = tswapal(host_to_target_clock_t(tms.tms_cstime));
  5491. }
  5492. if (!is_error(ret))
  5493. ret = host_to_target_clock_t(ret);
  5494. }
  5495. break;
  5496. #ifdef TARGET_NR_prof
  5497. case TARGET_NR_prof:
  5498. goto unimplemented;
  5499. #endif
  5500. #ifdef TARGET_NR_signal
  5501. case TARGET_NR_signal:
  5502. goto unimplemented;
  5503. #endif
  5504. case TARGET_NR_acct:
  5505. if (arg1 == 0) {
  5506. ret = get_errno(acct(NULL));
  5507. } else {
  5508. if (!(p = lock_user_string(arg1)))
  5509. goto efault;
  5510. ret = get_errno(acct(path(p)));
  5511. unlock_user(p, arg1, 0);
  5512. }
  5513. break;
  5514. #ifdef TARGET_NR_umount2
  5515. case TARGET_NR_umount2:
  5516. if (!(p = lock_user_string(arg1)))
  5517. goto efault;
  5518. ret = get_errno(umount2(p, arg2));
  5519. unlock_user(p, arg1, 0);
  5520. break;
  5521. #endif
  5522. #ifdef TARGET_NR_lock
  5523. case TARGET_NR_lock:
  5524. goto unimplemented;
  5525. #endif
  5526. case TARGET_NR_ioctl:
  5527. ret = do_ioctl(arg1, arg2, arg3);
  5528. break;
  5529. case TARGET_NR_fcntl:
  5530. ret = do_fcntl(arg1, arg2, arg3);
  5531. break;
  5532. #ifdef TARGET_NR_mpx
  5533. case TARGET_NR_mpx:
  5534. goto unimplemented;
  5535. #endif
  5536. case TARGET_NR_setpgid:
  5537. ret = get_errno(setpgid(arg1, arg2));
  5538. break;
  5539. #ifdef TARGET_NR_ulimit
  5540. case TARGET_NR_ulimit:
  5541. goto unimplemented;
  5542. #endif
  5543. #ifdef TARGET_NR_oldolduname
  5544. case TARGET_NR_oldolduname:
  5545. goto unimplemented;
  5546. #endif
  5547. case TARGET_NR_umask:
  5548. ret = get_errno(umask(arg1));
  5549. break;
  5550. case TARGET_NR_chroot:
  5551. if (!(p = lock_user_string(arg1)))
  5552. goto efault;
  5553. ret = get_errno(chroot(p));
  5554. unlock_user(p, arg1, 0);
  5555. break;
  5556. case TARGET_NR_ustat:
  5557. goto unimplemented;
  5558. case TARGET_NR_dup2:
  5559. ret = get_errno(dup2(arg1, arg2));
  5560. break;
  5561. #if defined(CONFIG_DUP3) && defined(TARGET_NR_dup3)
  5562. case TARGET_NR_dup3:
  5563. ret = get_errno(dup3(arg1, arg2, arg3));
  5564. break;
  5565. #endif
  5566. #ifdef TARGET_NR_getppid /* not on alpha */
  5567. case TARGET_NR_getppid:
  5568. ret = get_errno(getppid());
  5569. break;
  5570. #endif
  5571. case TARGET_NR_getpgrp:
  5572. ret = get_errno(getpgrp());
  5573. break;
  5574. case TARGET_NR_setsid:
  5575. ret = get_errno(setsid());
  5576. break;
  5577. #ifdef TARGET_NR_sigaction
  5578. case TARGET_NR_sigaction:
  5579. {
  5580. #if defined(TARGET_ALPHA)
  5581. struct target_sigaction act, oact, *pact = 0;
  5582. struct target_old_sigaction *old_act;
  5583. if (arg2) {
  5584. if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
  5585. goto efault;
  5586. act._sa_handler = old_act->_sa_handler;
  5587. target_siginitset(&act.sa_mask, old_act->sa_mask);
  5588. act.sa_flags = old_act->sa_flags;
  5589. act.sa_restorer = 0;
  5590. unlock_user_struct(old_act, arg2, 0);
  5591. pact = &act;
  5592. }
  5593. ret = get_errno(do_sigaction(arg1, pact, &oact));
  5594. if (!is_error(ret) && arg3) {
  5595. if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
  5596. goto efault;
  5597. old_act->_sa_handler = oact._sa_handler;
  5598. old_act->sa_mask = oact.sa_mask.sig[0];
  5599. old_act->sa_flags = oact.sa_flags;
  5600. unlock_user_struct(old_act, arg3, 1);
  5601. }
  5602. #elif defined(TARGET_MIPS)
  5603. struct target_sigaction act, oact, *pact, *old_act;
  5604. if (arg2) {
  5605. if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
  5606. goto efault;
  5607. act._sa_handler = old_act->_sa_handler;
  5608. target_siginitset(&act.sa_mask, old_act->sa_mask.sig[0]);
  5609. act.sa_flags = old_act->sa_flags;
  5610. unlock_user_struct(old_act, arg2, 0);
  5611. pact = &act;
  5612. } else {
  5613. pact = NULL;
  5614. }
  5615. ret = get_errno(do_sigaction(arg1, pact, &oact));
  5616. if (!is_error(ret) && arg3) {
  5617. if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
  5618. goto efault;
  5619. old_act->_sa_handler = oact._sa_handler;
  5620. old_act->sa_flags = oact.sa_flags;
  5621. old_act->sa_mask.sig[0] = oact.sa_mask.sig[0];
  5622. old_act->sa_mask.sig[1] = 0;
  5623. old_act->sa_mask.sig[2] = 0;
  5624. old_act->sa_mask.sig[3] = 0;
  5625. unlock_user_struct(old_act, arg3, 1);
  5626. }
  5627. #else
  5628. struct target_old_sigaction *old_act;
  5629. struct target_sigaction act, oact, *pact;
  5630. if (arg2) {
  5631. if (!lock_user_struct(VERIFY_READ, old_act, arg2, 1))
  5632. goto efault;
  5633. act._sa_handler = old_act->_sa_handler;
  5634. target_siginitset(&act.sa_mask, old_act->sa_mask);
  5635. act.sa_flags = old_act->sa_flags;
  5636. act.sa_restorer = old_act->sa_restorer;
  5637. unlock_user_struct(old_act, arg2, 0);
  5638. pact = &act;
  5639. } else {
  5640. pact = NULL;
  5641. }
  5642. ret = get_errno(do_sigaction(arg1, pact, &oact));
  5643. if (!is_error(ret) && arg3) {
  5644. if (!lock_user_struct(VERIFY_WRITE, old_act, arg3, 0))
  5645. goto efault;
  5646. old_act->_sa_handler = oact._sa_handler;
  5647. old_act->sa_mask = oact.sa_mask.sig[0];
  5648. old_act->sa_flags = oact.sa_flags;
  5649. old_act->sa_restorer = oact.sa_restorer;
  5650. unlock_user_struct(old_act, arg3, 1);
  5651. }
  5652. #endif
  5653. }
  5654. break;
  5655. #endif
  5656. case TARGET_NR_rt_sigaction:
  5657. {
  5658. #if defined(TARGET_ALPHA)
  5659. struct target_sigaction act, oact, *pact = 0;
  5660. struct target_rt_sigaction *rt_act;
  5661. /* ??? arg4 == sizeof(sigset_t). */
  5662. if (arg2) {
  5663. if (!lock_user_struct(VERIFY_READ, rt_act, arg2, 1))
  5664. goto efault;
  5665. act._sa_handler = rt_act->_sa_handler;
  5666. act.sa_mask = rt_act->sa_mask;
  5667. act.sa_flags = rt_act->sa_flags;
  5668. act.sa_restorer = arg5;
  5669. unlock_user_struct(rt_act, arg2, 0);
  5670. pact = &act;
  5671. }
  5672. ret = get_errno(do_sigaction(arg1, pact, &oact));
  5673. if (!is_error(ret) && arg3) {
  5674. if (!lock_user_struct(VERIFY_WRITE, rt_act, arg3, 0))
  5675. goto efault;
  5676. rt_act->_sa_handler = oact._sa_handler;
  5677. rt_act->sa_mask = oact.sa_mask;
  5678. rt_act->sa_flags = oact.sa_flags;
  5679. unlock_user_struct(rt_act, arg3, 1);
  5680. }
  5681. #else
  5682. struct target_sigaction *act;
  5683. struct target_sigaction *oact;
  5684. if (arg2) {
  5685. if (!lock_user_struct(VERIFY_READ, act, arg2, 1))
  5686. goto efault;
  5687. } else
  5688. act = NULL;
  5689. if (arg3) {
  5690. if (!lock_user_struct(VERIFY_WRITE, oact, arg3, 0)) {
  5691. ret = -TARGET_EFAULT;
  5692. goto rt_sigaction_fail;
  5693. }
  5694. } else
  5695. oact = NULL;
  5696. ret = get_errno(do_sigaction(arg1, act, oact));
  5697. rt_sigaction_fail:
  5698. if (act)
  5699. unlock_user_struct(act, arg2, 0);
  5700. if (oact)
  5701. unlock_user_struct(oact, arg3, 1);
  5702. #endif
  5703. }
  5704. break;
  5705. #ifdef TARGET_NR_sgetmask /* not on alpha */
  5706. case TARGET_NR_sgetmask:
  5707. {
  5708. sigset_t cur_set;
  5709. abi_ulong target_set;
  5710. do_sigprocmask(0, NULL, &cur_set);
  5711. host_to_target_old_sigset(&target_set, &cur_set);
  5712. ret = target_set;
  5713. }
  5714. break;
  5715. #endif
  5716. #ifdef TARGET_NR_ssetmask /* not on alpha */
  5717. case TARGET_NR_ssetmask:
  5718. {
  5719. sigset_t set, oset, cur_set;
  5720. abi_ulong target_set = arg1;
  5721. do_sigprocmask(0, NULL, &cur_set);
  5722. target_to_host_old_sigset(&set, &target_set);
  5723. sigorset(&set, &set, &cur_set);
  5724. do_sigprocmask(SIG_SETMASK, &set, &oset);
  5725. host_to_target_old_sigset(&target_set, &oset);
  5726. ret = target_set;
  5727. }
  5728. break;
  5729. #endif
  5730. #ifdef TARGET_NR_sigprocmask
  5731. case TARGET_NR_sigprocmask:
  5732. {
  5733. #if defined(TARGET_ALPHA)
  5734. sigset_t set, oldset;
  5735. abi_ulong mask;
  5736. int how;
  5737. switch (arg1) {
  5738. case TARGET_SIG_BLOCK:
  5739. how = SIG_BLOCK;
  5740. break;
  5741. case TARGET_SIG_UNBLOCK:
  5742. how = SIG_UNBLOCK;
  5743. break;
  5744. case TARGET_SIG_SETMASK:
  5745. how = SIG_SETMASK;
  5746. break;
  5747. default:
  5748. ret = -TARGET_EINVAL;
  5749. goto fail;
  5750. }
  5751. mask = arg2;
  5752. target_to_host_old_sigset(&set, &mask);
  5753. ret = get_errno(do_sigprocmask(how, &set, &oldset));
  5754. if (!is_error(ret)) {
  5755. host_to_target_old_sigset(&mask, &oldset);
  5756. ret = mask;
  5757. ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
  5758. }
  5759. #else
  5760. sigset_t set, oldset, *set_ptr;
  5761. int how;
  5762. if (arg2) {
  5763. switch (arg1) {
  5764. case TARGET_SIG_BLOCK:
  5765. how = SIG_BLOCK;
  5766. break;
  5767. case TARGET_SIG_UNBLOCK:
  5768. how = SIG_UNBLOCK;
  5769. break;
  5770. case TARGET_SIG_SETMASK:
  5771. how = SIG_SETMASK;
  5772. break;
  5773. default:
  5774. ret = -TARGET_EINVAL;
  5775. goto fail;
  5776. }
  5777. if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
  5778. goto efault;
  5779. target_to_host_old_sigset(&set, p);
  5780. unlock_user(p, arg2, 0);
  5781. set_ptr = &set;
  5782. } else {
  5783. how = 0;
  5784. set_ptr = NULL;
  5785. }
  5786. ret = get_errno(do_sigprocmask(how, set_ptr, &oldset));
  5787. if (!is_error(ret) && arg3) {
  5788. if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
  5789. goto efault;
  5790. host_to_target_old_sigset(p, &oldset);
  5791. unlock_user(p, arg3, sizeof(target_sigset_t));
  5792. }
  5793. #endif
  5794. }
  5795. break;
  5796. #endif
  5797. case TARGET_NR_rt_sigprocmask:
  5798. {
  5799. int how = arg1;
  5800. sigset_t set, oldset, *set_ptr;
  5801. if (arg2) {
  5802. switch(how) {
  5803. case TARGET_SIG_BLOCK:
  5804. how = SIG_BLOCK;
  5805. break;
  5806. case TARGET_SIG_UNBLOCK:
  5807. how = SIG_UNBLOCK;
  5808. break;
  5809. case TARGET_SIG_SETMASK:
  5810. how = SIG_SETMASK;
  5811. break;
  5812. default:
  5813. ret = -TARGET_EINVAL;
  5814. goto fail;
  5815. }
  5816. if (!(p = lock_user(VERIFY_READ, arg2, sizeof(target_sigset_t), 1)))
  5817. goto efault;
  5818. target_to_host_sigset(&set, p);
  5819. unlock_user(p, arg2, 0);
  5820. set_ptr = &set;
  5821. } else {
  5822. how = 0;
  5823. set_ptr = NULL;
  5824. }
  5825. ret = get_errno(do_sigprocmask(how, set_ptr, &oldset));
  5826. if (!is_error(ret) && arg3) {
  5827. if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_sigset_t), 0)))
  5828. goto efault;
  5829. host_to_target_sigset(p, &oldset);
  5830. unlock_user(p, arg3, sizeof(target_sigset_t));
  5831. }
  5832. }
  5833. break;
  5834. #ifdef TARGET_NR_sigpending
  5835. case TARGET_NR_sigpending:
  5836. {
  5837. sigset_t set;
  5838. ret = get_errno(sigpending(&set));
  5839. if (!is_error(ret)) {
  5840. if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
  5841. goto efault;
  5842. host_to_target_old_sigset(p, &set);
  5843. unlock_user(p, arg1, sizeof(target_sigset_t));
  5844. }
  5845. }
  5846. break;
  5847. #endif
  5848. case TARGET_NR_rt_sigpending:
  5849. {
  5850. sigset_t set;
  5851. ret = get_errno(sigpending(&set));
  5852. if (!is_error(ret)) {
  5853. if (!(p = lock_user(VERIFY_WRITE, arg1, sizeof(target_sigset_t), 0)))
  5854. goto efault;
  5855. host_to_target_sigset(p, &set);
  5856. unlock_user(p, arg1, sizeof(target_sigset_t));
  5857. }
  5858. }
  5859. break;
  5860. #ifdef TARGET_NR_sigsuspend
  5861. case TARGET_NR_sigsuspend:
  5862. {
  5863. sigset_t set;
  5864. #if defined(TARGET_ALPHA)
  5865. abi_ulong mask = arg1;
  5866. target_to_host_old_sigset(&set, &mask);
  5867. #else
  5868. if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
  5869. goto efault;
  5870. target_to_host_old_sigset(&set, p);
  5871. unlock_user(p, arg1, 0);
  5872. #endif
  5873. ret = get_errno(sigsuspend(&set));
  5874. }
  5875. break;
  5876. #endif
  5877. case TARGET_NR_rt_sigsuspend:
  5878. {
  5879. sigset_t set;
  5880. if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
  5881. goto efault;
  5882. target_to_host_sigset(&set, p);
  5883. unlock_user(p, arg1, 0);
  5884. ret = get_errno(sigsuspend(&set));
  5885. }
  5886. break;
  5887. case TARGET_NR_rt_sigtimedwait:
  5888. {
  5889. sigset_t set;
  5890. struct timespec uts, *puts;
  5891. siginfo_t uinfo;
  5892. if (!(p = lock_user(VERIFY_READ, arg1, sizeof(target_sigset_t), 1)))
  5893. goto efault;
  5894. target_to_host_sigset(&set, p);
  5895. unlock_user(p, arg1, 0);
  5896. if (arg3) {
  5897. puts = &uts;
  5898. target_to_host_timespec(puts, arg3);
  5899. } else {
  5900. puts = NULL;
  5901. }
  5902. ret = get_errno(sigtimedwait(&set, &uinfo, puts));
  5903. if (!is_error(ret)) {
  5904. if (arg2) {
  5905. p = lock_user(VERIFY_WRITE, arg2, sizeof(target_siginfo_t),
  5906. 0);
  5907. if (!p) {
  5908. goto efault;
  5909. }
  5910. host_to_target_siginfo(p, &uinfo);
  5911. unlock_user(p, arg2, sizeof(target_siginfo_t));
  5912. }
  5913. ret = host_to_target_signal(ret);
  5914. }
  5915. }
  5916. break;
  5917. case TARGET_NR_rt_sigqueueinfo:
  5918. {
  5919. siginfo_t uinfo;
  5920. if (!(p = lock_user(VERIFY_READ, arg3, sizeof(target_sigset_t), 1)))
  5921. goto efault;
  5922. target_to_host_siginfo(&uinfo, p);
  5923. unlock_user(p, arg1, 0);
  5924. ret = get_errno(sys_rt_sigqueueinfo(arg1, arg2, &uinfo));
  5925. }
  5926. break;
  5927. #ifdef TARGET_NR_sigreturn
  5928. case TARGET_NR_sigreturn:
  5929. /* NOTE: ret is eax, so not transcoding must be done */
  5930. ret = do_sigreturn(cpu_env);
  5931. break;
  5932. #endif
  5933. case TARGET_NR_rt_sigreturn:
  5934. /* NOTE: ret is eax, so not transcoding must be done */
  5935. ret = do_rt_sigreturn(cpu_env);
  5936. break;
  5937. case TARGET_NR_sethostname:
  5938. if (!(p = lock_user_string(arg1)))
  5939. goto efault;
  5940. ret = get_errno(sethostname(p, arg2));
  5941. unlock_user(p, arg1, 0);
  5942. break;
  5943. case TARGET_NR_setrlimit:
  5944. {
  5945. int resource = target_to_host_resource(arg1);
  5946. struct target_rlimit *target_rlim;
  5947. struct rlimit rlim;
  5948. if (!lock_user_struct(VERIFY_READ, target_rlim, arg2, 1))
  5949. goto efault;
  5950. rlim.rlim_cur = target_to_host_rlim(target_rlim->rlim_cur);
  5951. rlim.rlim_max = target_to_host_rlim(target_rlim->rlim_max);
  5952. unlock_user_struct(target_rlim, arg2, 0);
  5953. ret = get_errno(setrlimit(resource, &rlim));
  5954. }
  5955. break;
  5956. case TARGET_NR_getrlimit:
  5957. {
  5958. int resource = target_to_host_resource(arg1);
  5959. struct target_rlimit *target_rlim;
  5960. struct rlimit rlim;
  5961. ret = get_errno(getrlimit(resource, &rlim));
  5962. if (!is_error(ret)) {
  5963. if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
  5964. goto efault;
  5965. target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
  5966. target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
  5967. unlock_user_struct(target_rlim, arg2, 1);
  5968. }
  5969. }
  5970. break;
  5971. case TARGET_NR_getrusage:
  5972. {
  5973. struct rusage rusage;
  5974. ret = get_errno(getrusage(arg1, &rusage));
  5975. if (!is_error(ret)) {
  5976. ret = host_to_target_rusage(arg2, &rusage);
  5977. }
  5978. }
  5979. break;
  5980. case TARGET_NR_gettimeofday:
  5981. {
  5982. struct timeval tv;
  5983. ret = get_errno(gettimeofday(&tv, NULL));
  5984. if (!is_error(ret)) {
  5985. if (copy_to_user_timeval(arg1, &tv))
  5986. goto efault;
  5987. }
  5988. }
  5989. break;
  5990. case TARGET_NR_settimeofday:
  5991. {
  5992. struct timeval tv, *ptv = NULL;
  5993. struct timezone tz, *ptz = NULL;
  5994. if (arg1) {
  5995. if (copy_from_user_timeval(&tv, arg1)) {
  5996. goto efault;
  5997. }
  5998. ptv = &tv;
  5999. }
  6000. if (arg2) {
  6001. if (copy_from_user_timezone(&tz, arg2)) {
  6002. goto efault;
  6003. }
  6004. ptz = &tz;
  6005. }
  6006. ret = get_errno(settimeofday(ptv, ptz));
  6007. }
  6008. break;
  6009. #if defined(TARGET_NR_select)
  6010. case TARGET_NR_select:
  6011. #if defined(TARGET_S390X) || defined(TARGET_ALPHA)
  6012. ret = do_select(arg1, arg2, arg3, arg4, arg5);
  6013. #else
  6014. {
  6015. struct target_sel_arg_struct *sel;
  6016. abi_ulong inp, outp, exp, tvp;
  6017. long nsel;
  6018. if (!lock_user_struct(VERIFY_READ, sel, arg1, 1))
  6019. goto efault;
  6020. nsel = tswapal(sel->n);
  6021. inp = tswapal(sel->inp);
  6022. outp = tswapal(sel->outp);
  6023. exp = tswapal(sel->exp);
  6024. tvp = tswapal(sel->tvp);
  6025. unlock_user_struct(sel, arg1, 0);
  6026. ret = do_select(nsel, inp, outp, exp, tvp);
  6027. }
  6028. #endif
  6029. break;
  6030. #endif
  6031. #ifdef TARGET_NR_pselect6
  6032. case TARGET_NR_pselect6:
  6033. {
  6034. abi_long rfd_addr, wfd_addr, efd_addr, n, ts_addr;
  6035. fd_set rfds, wfds, efds;
  6036. fd_set *rfds_ptr, *wfds_ptr, *efds_ptr;
  6037. struct timespec ts, *ts_ptr;
  6038. /*
  6039. * The 6th arg is actually two args smashed together,
  6040. * so we cannot use the C library.
  6041. */
  6042. sigset_t set;
  6043. struct {
  6044. sigset_t *set;
  6045. size_t size;
  6046. } sig, *sig_ptr;
  6047. abi_ulong arg_sigset, arg_sigsize, *arg7;
  6048. target_sigset_t *target_sigset;
  6049. n = arg1;
  6050. rfd_addr = arg2;
  6051. wfd_addr = arg3;
  6052. efd_addr = arg4;
  6053. ts_addr = arg5;
  6054. ret = copy_from_user_fdset_ptr(&rfds, &rfds_ptr, rfd_addr, n);
  6055. if (ret) {
  6056. goto fail;
  6057. }
  6058. ret = copy_from_user_fdset_ptr(&wfds, &wfds_ptr, wfd_addr, n);
  6059. if (ret) {
  6060. goto fail;
  6061. }
  6062. ret = copy_from_user_fdset_ptr(&efds, &efds_ptr, efd_addr, n);
  6063. if (ret) {
  6064. goto fail;
  6065. }
  6066. /*
  6067. * This takes a timespec, and not a timeval, so we cannot
  6068. * use the do_select() helper ...
  6069. */
  6070. if (ts_addr) {
  6071. if (target_to_host_timespec(&ts, ts_addr)) {
  6072. goto efault;
  6073. }
  6074. ts_ptr = &ts;
  6075. } else {
  6076. ts_ptr = NULL;
  6077. }
  6078. /* Extract the two packed args for the sigset */
  6079. if (arg6) {
  6080. sig_ptr = &sig;
  6081. sig.size = _NSIG / 8;
  6082. arg7 = lock_user(VERIFY_READ, arg6, sizeof(*arg7) * 2, 1);
  6083. if (!arg7) {
  6084. goto efault;
  6085. }
  6086. arg_sigset = tswapal(arg7[0]);
  6087. arg_sigsize = tswapal(arg7[1]);
  6088. unlock_user(arg7, arg6, 0);
  6089. if (arg_sigset) {
  6090. sig.set = &set;
  6091. if (arg_sigsize != sizeof(*target_sigset)) {
  6092. /* Like the kernel, we enforce correct size sigsets */
  6093. ret = -TARGET_EINVAL;
  6094. goto fail;
  6095. }
  6096. target_sigset = lock_user(VERIFY_READ, arg_sigset,
  6097. sizeof(*target_sigset), 1);
  6098. if (!target_sigset) {
  6099. goto efault;
  6100. }
  6101. target_to_host_sigset(&set, target_sigset);
  6102. unlock_user(target_sigset, arg_sigset, 0);
  6103. } else {
  6104. sig.set = NULL;
  6105. }
  6106. } else {
  6107. sig_ptr = NULL;
  6108. }
  6109. ret = get_errno(sys_pselect6(n, rfds_ptr, wfds_ptr, efds_ptr,
  6110. ts_ptr, sig_ptr));
  6111. if (!is_error(ret)) {
  6112. if (rfd_addr && copy_to_user_fdset(rfd_addr, &rfds, n))
  6113. goto efault;
  6114. if (wfd_addr && copy_to_user_fdset(wfd_addr, &wfds, n))
  6115. goto efault;
  6116. if (efd_addr && copy_to_user_fdset(efd_addr, &efds, n))
  6117. goto efault;
  6118. if (ts_addr && host_to_target_timespec(ts_addr, &ts))
  6119. goto efault;
  6120. }
  6121. }
  6122. break;
  6123. #endif
  6124. case TARGET_NR_symlink:
  6125. {
  6126. void *p2;
  6127. p = lock_user_string(arg1);
  6128. p2 = lock_user_string(arg2);
  6129. if (!p || !p2)
  6130. ret = -TARGET_EFAULT;
  6131. else
  6132. ret = get_errno(symlink(p, p2));
  6133. unlock_user(p2, arg2, 0);
  6134. unlock_user(p, arg1, 0);
  6135. }
  6136. break;
  6137. #if defined(TARGET_NR_symlinkat)
  6138. case TARGET_NR_symlinkat:
  6139. {
  6140. void *p2;
  6141. p = lock_user_string(arg1);
  6142. p2 = lock_user_string(arg3);
  6143. if (!p || !p2)
  6144. ret = -TARGET_EFAULT;
  6145. else
  6146. ret = get_errno(symlinkat(p, arg2, p2));
  6147. unlock_user(p2, arg3, 0);
  6148. unlock_user(p, arg1, 0);
  6149. }
  6150. break;
  6151. #endif
  6152. #ifdef TARGET_NR_oldlstat
  6153. case TARGET_NR_oldlstat:
  6154. goto unimplemented;
  6155. #endif
  6156. case TARGET_NR_readlink:
  6157. {
  6158. void *p2;
  6159. p = lock_user_string(arg1);
  6160. p2 = lock_user(VERIFY_WRITE, arg2, arg3, 0);
  6161. if (!p || !p2) {
  6162. ret = -TARGET_EFAULT;
  6163. } else if (!arg3) {
  6164. /* Short circuit this for the magic exe check. */
  6165. ret = -TARGET_EINVAL;
  6166. } else if (is_proc_myself((const char *)p, "exe")) {
  6167. char real[PATH_MAX], *temp;
  6168. temp = realpath(exec_path, real);
  6169. /* Return value is # of bytes that we wrote to the buffer. */
  6170. if (temp == NULL) {
  6171. ret = get_errno(-1);
  6172. } else {
  6173. /* Don't worry about sign mismatch as earlier mapping
  6174. * logic would have thrown a bad address error. */
  6175. ret = MIN(strlen(real), arg3);
  6176. /* We cannot NUL terminate the string. */
  6177. memcpy(p2, real, ret);
  6178. }
  6179. } else {
  6180. ret = get_errno(readlink(path(p), p2, arg3));
  6181. }
  6182. unlock_user(p2, arg2, ret);
  6183. unlock_user(p, arg1, 0);
  6184. }
  6185. break;
  6186. #if defined(TARGET_NR_readlinkat)
  6187. case TARGET_NR_readlinkat:
  6188. {
  6189. void *p2;
  6190. p = lock_user_string(arg2);
  6191. p2 = lock_user(VERIFY_WRITE, arg3, arg4, 0);
  6192. if (!p || !p2) {
  6193. ret = -TARGET_EFAULT;
  6194. } else if (is_proc_myself((const char *)p, "exe")) {
  6195. char real[PATH_MAX], *temp;
  6196. temp = realpath(exec_path, real);
  6197. ret = temp == NULL ? get_errno(-1) : strlen(real) ;
  6198. snprintf((char *)p2, arg4, "%s", real);
  6199. } else {
  6200. ret = get_errno(readlinkat(arg1, path(p), p2, arg4));
  6201. }
  6202. unlock_user(p2, arg3, ret);
  6203. unlock_user(p, arg2, 0);
  6204. }
  6205. break;
  6206. #endif
  6207. #ifdef TARGET_NR_uselib
  6208. case TARGET_NR_uselib:
  6209. goto unimplemented;
  6210. #endif
  6211. #ifdef TARGET_NR_swapon
  6212. case TARGET_NR_swapon:
  6213. if (!(p = lock_user_string(arg1)))
  6214. goto efault;
  6215. ret = get_errno(swapon(p, arg2));
  6216. unlock_user(p, arg1, 0);
  6217. break;
  6218. #endif
  6219. case TARGET_NR_reboot:
  6220. if (arg3 == LINUX_REBOOT_CMD_RESTART2) {
  6221. /* arg4 must be ignored in all other cases */
  6222. p = lock_user_string(arg4);
  6223. if (!p) {
  6224. goto efault;
  6225. }
  6226. ret = get_errno(reboot(arg1, arg2, arg3, p));
  6227. unlock_user(p, arg4, 0);
  6228. } else {
  6229. ret = get_errno(reboot(arg1, arg2, arg3, NULL));
  6230. }
  6231. break;
  6232. #ifdef TARGET_NR_readdir
  6233. case TARGET_NR_readdir:
  6234. goto unimplemented;
  6235. #endif
  6236. #ifdef TARGET_NR_mmap
  6237. case TARGET_NR_mmap:
  6238. #if (defined(TARGET_I386) && defined(TARGET_ABI32)) || \
  6239. (defined(TARGET_ARM) && defined(TARGET_ABI32)) || \
  6240. defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_MICROBLAZE) \
  6241. || defined(TARGET_S390X)
  6242. {
  6243. abi_ulong *v;
  6244. abi_ulong v1, v2, v3, v4, v5, v6;
  6245. if (!(v = lock_user(VERIFY_READ, arg1, 6 * sizeof(abi_ulong), 1)))
  6246. goto efault;
  6247. v1 = tswapal(v[0]);
  6248. v2 = tswapal(v[1]);
  6249. v3 = tswapal(v[2]);
  6250. v4 = tswapal(v[3]);
  6251. v5 = tswapal(v[4]);
  6252. v6 = tswapal(v[5]);
  6253. unlock_user(v, arg1, 0);
  6254. ret = get_errno(target_mmap(v1, v2, v3,
  6255. target_to_host_bitmask(v4, mmap_flags_tbl),
  6256. v5, v6));
  6257. }
  6258. #else
  6259. ret = get_errno(target_mmap(arg1, arg2, arg3,
  6260. target_to_host_bitmask(arg4, mmap_flags_tbl),
  6261. arg5,
  6262. arg6));
  6263. #endif
  6264. break;
  6265. #endif
  6266. #ifdef TARGET_NR_mmap2
  6267. case TARGET_NR_mmap2:
  6268. #ifndef MMAP_SHIFT
  6269. #define MMAP_SHIFT 12
  6270. #endif
  6271. ret = get_errno(target_mmap(arg1, arg2, arg3,
  6272. target_to_host_bitmask(arg4, mmap_flags_tbl),
  6273. arg5,
  6274. arg6 << MMAP_SHIFT));
  6275. break;
  6276. #endif
  6277. case TARGET_NR_munmap:
  6278. ret = get_errno(target_munmap(arg1, arg2));
  6279. break;
  6280. case TARGET_NR_mprotect:
  6281. {
  6282. TaskState *ts = cpu->opaque;
  6283. /* Special hack to detect libc making the stack executable. */
  6284. if ((arg3 & PROT_GROWSDOWN)
  6285. && arg1 >= ts->info->stack_limit
  6286. && arg1 <= ts->info->start_stack) {
  6287. arg3 &= ~PROT_GROWSDOWN;
  6288. arg2 = arg2 + arg1 - ts->info->stack_limit;
  6289. arg1 = ts->info->stack_limit;
  6290. }
  6291. }
  6292. ret = get_errno(target_mprotect(arg1, arg2, arg3));
  6293. break;
  6294. #ifdef TARGET_NR_mremap
  6295. case TARGET_NR_mremap:
  6296. ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
  6297. break;
  6298. #endif
  6299. /* ??? msync/mlock/munlock are broken for softmmu. */
  6300. #ifdef TARGET_NR_msync
  6301. case TARGET_NR_msync:
  6302. ret = get_errno(msync(g2h(arg1), arg2, arg3));
  6303. break;
  6304. #endif
  6305. #ifdef TARGET_NR_mlock
  6306. case TARGET_NR_mlock:
  6307. ret = get_errno(mlock(g2h(arg1), arg2));
  6308. break;
  6309. #endif
  6310. #ifdef TARGET_NR_munlock
  6311. case TARGET_NR_munlock:
  6312. ret = get_errno(munlock(g2h(arg1), arg2));
  6313. break;
  6314. #endif
  6315. #ifdef TARGET_NR_mlockall
  6316. case TARGET_NR_mlockall:
  6317. ret = get_errno(mlockall(target_to_host_mlockall_arg(arg1)));
  6318. break;
  6319. #endif
  6320. #ifdef TARGET_NR_munlockall
  6321. case TARGET_NR_munlockall:
  6322. ret = get_errno(munlockall());
  6323. break;
  6324. #endif
  6325. case TARGET_NR_truncate:
  6326. if (!(p = lock_user_string(arg1)))
  6327. goto efault;
  6328. ret = get_errno(truncate(p, arg2));
  6329. unlock_user(p, arg1, 0);
  6330. break;
  6331. case TARGET_NR_ftruncate:
  6332. ret = get_errno(ftruncate(arg1, arg2));
  6333. break;
  6334. case TARGET_NR_fchmod:
  6335. ret = get_errno(fchmod(arg1, arg2));
  6336. break;
  6337. #if defined(TARGET_NR_fchmodat)
  6338. case TARGET_NR_fchmodat:
  6339. if (!(p = lock_user_string(arg2)))
  6340. goto efault;
  6341. ret = get_errno(fchmodat(arg1, p, arg3, 0));
  6342. unlock_user(p, arg2, 0);
  6343. break;
  6344. #endif
  6345. case TARGET_NR_getpriority:
  6346. /* Note that negative values are valid for getpriority, so we must
  6347. differentiate based on errno settings. */
  6348. errno = 0;
  6349. ret = getpriority(arg1, arg2);
  6350. if (ret == -1 && errno != 0) {
  6351. ret = -host_to_target_errno(errno);
  6352. break;
  6353. }
  6354. #ifdef TARGET_ALPHA
  6355. /* Return value is the unbiased priority. Signal no error. */
  6356. ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
  6357. #else
  6358. /* Return value is a biased priority to avoid negative numbers. */
  6359. ret = 20 - ret;
  6360. #endif
  6361. break;
  6362. case TARGET_NR_setpriority:
  6363. ret = get_errno(setpriority(arg1, arg2, arg3));
  6364. break;
  6365. #ifdef TARGET_NR_profil
  6366. case TARGET_NR_profil:
  6367. goto unimplemented;
  6368. #endif
  6369. case TARGET_NR_statfs:
  6370. if (!(p = lock_user_string(arg1)))
  6371. goto efault;
  6372. ret = get_errno(statfs(path(p), &stfs));
  6373. unlock_user(p, arg1, 0);
  6374. convert_statfs:
  6375. if (!is_error(ret)) {
  6376. struct target_statfs *target_stfs;
  6377. if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg2, 0))
  6378. goto efault;
  6379. __put_user(stfs.f_type, &target_stfs->f_type);
  6380. __put_user(stfs.f_bsize, &target_stfs->f_bsize);
  6381. __put_user(stfs.f_blocks, &target_stfs->f_blocks);
  6382. __put_user(stfs.f_bfree, &target_stfs->f_bfree);
  6383. __put_user(stfs.f_bavail, &target_stfs->f_bavail);
  6384. __put_user(stfs.f_files, &target_stfs->f_files);
  6385. __put_user(stfs.f_ffree, &target_stfs->f_ffree);
  6386. __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
  6387. __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
  6388. __put_user(stfs.f_namelen, &target_stfs->f_namelen);
  6389. __put_user(stfs.f_frsize, &target_stfs->f_frsize);
  6390. memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
  6391. unlock_user_struct(target_stfs, arg2, 1);
  6392. }
  6393. break;
  6394. case TARGET_NR_fstatfs:
  6395. ret = get_errno(fstatfs(arg1, &stfs));
  6396. goto convert_statfs;
  6397. #ifdef TARGET_NR_statfs64
  6398. case TARGET_NR_statfs64:
  6399. if (!(p = lock_user_string(arg1)))
  6400. goto efault;
  6401. ret = get_errno(statfs(path(p), &stfs));
  6402. unlock_user(p, arg1, 0);
  6403. convert_statfs64:
  6404. if (!is_error(ret)) {
  6405. struct target_statfs64 *target_stfs;
  6406. if (!lock_user_struct(VERIFY_WRITE, target_stfs, arg3, 0))
  6407. goto efault;
  6408. __put_user(stfs.f_type, &target_stfs->f_type);
  6409. __put_user(stfs.f_bsize, &target_stfs->f_bsize);
  6410. __put_user(stfs.f_blocks, &target_stfs->f_blocks);
  6411. __put_user(stfs.f_bfree, &target_stfs->f_bfree);
  6412. __put_user(stfs.f_bavail, &target_stfs->f_bavail);
  6413. __put_user(stfs.f_files, &target_stfs->f_files);
  6414. __put_user(stfs.f_ffree, &target_stfs->f_ffree);
  6415. __put_user(stfs.f_fsid.__val[0], &target_stfs->f_fsid.val[0]);
  6416. __put_user(stfs.f_fsid.__val[1], &target_stfs->f_fsid.val[1]);
  6417. __put_user(stfs.f_namelen, &target_stfs->f_namelen);
  6418. __put_user(stfs.f_frsize, &target_stfs->f_frsize);
  6419. memset(target_stfs->f_spare, 0, sizeof(target_stfs->f_spare));
  6420. unlock_user_struct(target_stfs, arg3, 1);
  6421. }
  6422. break;
  6423. case TARGET_NR_fstatfs64:
  6424. ret = get_errno(fstatfs(arg1, &stfs));
  6425. goto convert_statfs64;
  6426. #endif
  6427. #ifdef TARGET_NR_ioperm
  6428. case TARGET_NR_ioperm:
  6429. goto unimplemented;
  6430. #endif
  6431. #ifdef TARGET_NR_socketcall
  6432. case TARGET_NR_socketcall:
  6433. ret = do_socketcall(arg1, arg2);
  6434. break;
  6435. #endif
  6436. #ifdef TARGET_NR_accept
  6437. case TARGET_NR_accept:
  6438. ret = do_accept4(arg1, arg2, arg3, 0);
  6439. break;
  6440. #endif
  6441. #ifdef TARGET_NR_accept4
  6442. case TARGET_NR_accept4:
  6443. #ifdef CONFIG_ACCEPT4
  6444. ret = do_accept4(arg1, arg2, arg3, arg4);
  6445. #else
  6446. goto unimplemented;
  6447. #endif
  6448. break;
  6449. #endif
  6450. #ifdef TARGET_NR_bind
  6451. case TARGET_NR_bind:
  6452. ret = do_bind(arg1, arg2, arg3);
  6453. break;
  6454. #endif
  6455. #ifdef TARGET_NR_connect
  6456. case TARGET_NR_connect:
  6457. ret = do_connect(arg1, arg2, arg3);
  6458. break;
  6459. #endif
  6460. #ifdef TARGET_NR_getpeername
  6461. case TARGET_NR_getpeername:
  6462. ret = do_getpeername(arg1, arg2, arg3);
  6463. break;
  6464. #endif
  6465. #ifdef TARGET_NR_getsockname
  6466. case TARGET_NR_getsockname:
  6467. ret = do_getsockname(arg1, arg2, arg3);
  6468. break;
  6469. #endif
  6470. #ifdef TARGET_NR_getsockopt
  6471. case TARGET_NR_getsockopt:
  6472. ret = do_getsockopt(arg1, arg2, arg3, arg4, arg5);
  6473. break;
  6474. #endif
  6475. #ifdef TARGET_NR_listen
  6476. case TARGET_NR_listen:
  6477. ret = get_errno(listen(arg1, arg2));
  6478. break;
  6479. #endif
  6480. #ifdef TARGET_NR_recv
  6481. case TARGET_NR_recv:
  6482. ret = do_recvfrom(arg1, arg2, arg3, arg4, 0, 0);
  6483. break;
  6484. #endif
  6485. #ifdef TARGET_NR_recvfrom
  6486. case TARGET_NR_recvfrom:
  6487. ret = do_recvfrom(arg1, arg2, arg3, arg4, arg5, arg6);
  6488. break;
  6489. #endif
  6490. #ifdef TARGET_NR_recvmsg
  6491. case TARGET_NR_recvmsg:
  6492. ret = do_sendrecvmsg(arg1, arg2, arg3, 0);
  6493. break;
  6494. #endif
  6495. #ifdef TARGET_NR_send
  6496. case TARGET_NR_send:
  6497. ret = do_sendto(arg1, arg2, arg3, arg4, 0, 0);
  6498. break;
  6499. #endif
  6500. #ifdef TARGET_NR_sendmsg
  6501. case TARGET_NR_sendmsg:
  6502. ret = do_sendrecvmsg(arg1, arg2, arg3, 1);
  6503. break;
  6504. #endif
  6505. #ifdef TARGET_NR_sendmmsg
  6506. case TARGET_NR_sendmmsg:
  6507. ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 1);
  6508. break;
  6509. case TARGET_NR_recvmmsg:
  6510. ret = do_sendrecvmmsg(arg1, arg2, arg3, arg4, 0);
  6511. break;
  6512. #endif
  6513. #ifdef TARGET_NR_sendto
  6514. case TARGET_NR_sendto:
  6515. ret = do_sendto(arg1, arg2, arg3, arg4, arg5, arg6);
  6516. break;
  6517. #endif
  6518. #ifdef TARGET_NR_shutdown
  6519. case TARGET_NR_shutdown:
  6520. ret = get_errno(shutdown(arg1, arg2));
  6521. break;
  6522. #endif
  6523. #ifdef TARGET_NR_socket
  6524. case TARGET_NR_socket:
  6525. ret = do_socket(arg1, arg2, arg3);
  6526. break;
  6527. #endif
  6528. #ifdef TARGET_NR_socketpair
  6529. case TARGET_NR_socketpair:
  6530. ret = do_socketpair(arg1, arg2, arg3, arg4);
  6531. break;
  6532. #endif
  6533. #ifdef TARGET_NR_setsockopt
  6534. case TARGET_NR_setsockopt:
  6535. ret = do_setsockopt(arg1, arg2, arg3, arg4, (socklen_t) arg5);
  6536. break;
  6537. #endif
  6538. case TARGET_NR_syslog:
  6539. if (!(p = lock_user_string(arg2)))
  6540. goto efault;
  6541. ret = get_errno(sys_syslog((int)arg1, p, (int)arg3));
  6542. unlock_user(p, arg2, 0);
  6543. break;
  6544. case TARGET_NR_setitimer:
  6545. {
  6546. struct itimerval value, ovalue, *pvalue;
  6547. if (arg2) {
  6548. pvalue = &value;
  6549. if (copy_from_user_timeval(&pvalue->it_interval, arg2)
  6550. || copy_from_user_timeval(&pvalue->it_value,
  6551. arg2 + sizeof(struct target_timeval)))
  6552. goto efault;
  6553. } else {
  6554. pvalue = NULL;
  6555. }
  6556. ret = get_errno(setitimer(arg1, pvalue, &ovalue));
  6557. if (!is_error(ret) && arg3) {
  6558. if (copy_to_user_timeval(arg3,
  6559. &ovalue.it_interval)
  6560. || copy_to_user_timeval(arg3 + sizeof(struct target_timeval),
  6561. &ovalue.it_value))
  6562. goto efault;
  6563. }
  6564. }
  6565. break;
  6566. case TARGET_NR_getitimer:
  6567. {
  6568. struct itimerval value;
  6569. ret = get_errno(getitimer(arg1, &value));
  6570. if (!is_error(ret) && arg2) {
  6571. if (copy_to_user_timeval(arg2,
  6572. &value.it_interval)
  6573. || copy_to_user_timeval(arg2 + sizeof(struct target_timeval),
  6574. &value.it_value))
  6575. goto efault;
  6576. }
  6577. }
  6578. break;
  6579. case TARGET_NR_stat:
  6580. if (!(p = lock_user_string(arg1)))
  6581. goto efault;
  6582. ret = get_errno(stat(path(p), &st));
  6583. unlock_user(p, arg1, 0);
  6584. goto do_stat;
  6585. case TARGET_NR_lstat:
  6586. if (!(p = lock_user_string(arg1)))
  6587. goto efault;
  6588. ret = get_errno(lstat(path(p), &st));
  6589. unlock_user(p, arg1, 0);
  6590. goto do_stat;
  6591. case TARGET_NR_fstat:
  6592. {
  6593. ret = get_errno(fstat(arg1, &st));
  6594. do_stat:
  6595. if (!is_error(ret)) {
  6596. struct target_stat *target_st;
  6597. if (!lock_user_struct(VERIFY_WRITE, target_st, arg2, 0))
  6598. goto efault;
  6599. memset(target_st, 0, sizeof(*target_st));
  6600. __put_user(st.st_dev, &target_st->st_dev);
  6601. __put_user(st.st_ino, &target_st->st_ino);
  6602. __put_user(st.st_mode, &target_st->st_mode);
  6603. __put_user(st.st_uid, &target_st->st_uid);
  6604. __put_user(st.st_gid, &target_st->st_gid);
  6605. __put_user(st.st_nlink, &target_st->st_nlink);
  6606. __put_user(st.st_rdev, &target_st->st_rdev);
  6607. __put_user(st.st_size, &target_st->st_size);
  6608. __put_user(st.st_blksize, &target_st->st_blksize);
  6609. __put_user(st.st_blocks, &target_st->st_blocks);
  6610. __put_user(st.st_atime, &target_st->target_st_atime);
  6611. __put_user(st.st_mtime, &target_st->target_st_mtime);
  6612. __put_user(st.st_ctime, &target_st->target_st_ctime);
  6613. unlock_user_struct(target_st, arg2, 1);
  6614. }
  6615. }
  6616. break;
  6617. #ifdef TARGET_NR_olduname
  6618. case TARGET_NR_olduname:
  6619. goto unimplemented;
  6620. #endif
  6621. #ifdef TARGET_NR_iopl
  6622. case TARGET_NR_iopl:
  6623. goto unimplemented;
  6624. #endif
  6625. case TARGET_NR_vhangup:
  6626. ret = get_errno(vhangup());
  6627. break;
  6628. #ifdef TARGET_NR_idle
  6629. case TARGET_NR_idle:
  6630. goto unimplemented;
  6631. #endif
  6632. #ifdef TARGET_NR_syscall
  6633. case TARGET_NR_syscall:
  6634. ret = do_syscall(cpu_env, arg1 & 0xffff, arg2, arg3, arg4, arg5,
  6635. arg6, arg7, arg8, 0);
  6636. break;
  6637. #endif
  6638. case TARGET_NR_wait4:
  6639. {
  6640. int status;
  6641. abi_long status_ptr = arg2;
  6642. struct rusage rusage, *rusage_ptr;
  6643. abi_ulong target_rusage = arg4;
  6644. abi_long rusage_err;
  6645. if (target_rusage)
  6646. rusage_ptr = &rusage;
  6647. else
  6648. rusage_ptr = NULL;
  6649. ret = get_errno(wait4(arg1, &status, arg3, rusage_ptr));
  6650. if (!is_error(ret)) {
  6651. if (status_ptr && ret) {
  6652. status = host_to_target_waitstatus(status);
  6653. if (put_user_s32(status, status_ptr))
  6654. goto efault;
  6655. }
  6656. if (target_rusage) {
  6657. rusage_err = host_to_target_rusage(target_rusage, &rusage);
  6658. if (rusage_err) {
  6659. ret = rusage_err;
  6660. }
  6661. }
  6662. }
  6663. }
  6664. break;
  6665. #ifdef TARGET_NR_swapoff
  6666. case TARGET_NR_swapoff:
  6667. if (!(p = lock_user_string(arg1)))
  6668. goto efault;
  6669. ret = get_errno(swapoff(p));
  6670. unlock_user(p, arg1, 0);
  6671. break;
  6672. #endif
  6673. case TARGET_NR_sysinfo:
  6674. {
  6675. struct target_sysinfo *target_value;
  6676. struct sysinfo value;
  6677. ret = get_errno(sysinfo(&value));
  6678. if (!is_error(ret) && arg1)
  6679. {
  6680. if (!lock_user_struct(VERIFY_WRITE, target_value, arg1, 0))
  6681. goto efault;
  6682. __put_user(value.uptime, &target_value->uptime);
  6683. __put_user(value.loads[0], &target_value->loads[0]);
  6684. __put_user(value.loads[1], &target_value->loads[1]);
  6685. __put_user(value.loads[2], &target_value->loads[2]);
  6686. __put_user(value.totalram, &target_value->totalram);
  6687. __put_user(value.freeram, &target_value->freeram);
  6688. __put_user(value.sharedram, &target_value->sharedram);
  6689. __put_user(value.bufferram, &target_value->bufferram);
  6690. __put_user(value.totalswap, &target_value->totalswap);
  6691. __put_user(value.freeswap, &target_value->freeswap);
  6692. __put_user(value.procs, &target_value->procs);
  6693. __put_user(value.totalhigh, &target_value->totalhigh);
  6694. __put_user(value.freehigh, &target_value->freehigh);
  6695. __put_user(value.mem_unit, &target_value->mem_unit);
  6696. unlock_user_struct(target_value, arg1, 1);
  6697. }
  6698. }
  6699. break;
  6700. #ifdef TARGET_NR_ipc
  6701. case TARGET_NR_ipc:
  6702. ret = do_ipc(arg1, arg2, arg3, arg4, arg5, arg6);
  6703. break;
  6704. #endif
  6705. #ifdef TARGET_NR_semget
  6706. case TARGET_NR_semget:
  6707. ret = get_errno(semget(arg1, arg2, arg3));
  6708. break;
  6709. #endif
  6710. #ifdef TARGET_NR_semop
  6711. case TARGET_NR_semop:
  6712. ret = do_semop(arg1, arg2, arg3);
  6713. break;
  6714. #endif
  6715. #ifdef TARGET_NR_semctl
  6716. case TARGET_NR_semctl:
  6717. ret = do_semctl(arg1, arg2, arg3, (union target_semun)(abi_ulong)arg4);
  6718. break;
  6719. #endif
  6720. #ifdef TARGET_NR_msgctl
  6721. case TARGET_NR_msgctl:
  6722. ret = do_msgctl(arg1, arg2, arg3);
  6723. break;
  6724. #endif
  6725. #ifdef TARGET_NR_msgget
  6726. case TARGET_NR_msgget:
  6727. ret = get_errno(msgget(arg1, arg2));
  6728. break;
  6729. #endif
  6730. #ifdef TARGET_NR_msgrcv
  6731. case TARGET_NR_msgrcv:
  6732. ret = do_msgrcv(arg1, arg2, arg3, arg4, arg5);
  6733. break;
  6734. #endif
  6735. #ifdef TARGET_NR_msgsnd
  6736. case TARGET_NR_msgsnd:
  6737. ret = do_msgsnd(arg1, arg2, arg3, arg4);
  6738. break;
  6739. #endif
  6740. #ifdef TARGET_NR_shmget
  6741. case TARGET_NR_shmget:
  6742. ret = get_errno(shmget(arg1, arg2, arg3));
  6743. break;
  6744. #endif
  6745. #ifdef TARGET_NR_shmctl
  6746. case TARGET_NR_shmctl:
  6747. ret = do_shmctl(arg1, arg2, arg3);
  6748. break;
  6749. #endif
  6750. #ifdef TARGET_NR_shmat
  6751. case TARGET_NR_shmat:
  6752. ret = do_shmat(arg1, arg2, arg3);
  6753. break;
  6754. #endif
  6755. #ifdef TARGET_NR_shmdt
  6756. case TARGET_NR_shmdt:
  6757. ret = do_shmdt(arg1);
  6758. break;
  6759. #endif
  6760. case TARGET_NR_fsync:
  6761. ret = get_errno(fsync(arg1));
  6762. break;
  6763. case TARGET_NR_clone:
  6764. /* Linux manages to have three different orderings for its
  6765. * arguments to clone(); the BACKWARDS and BACKWARDS2 defines
  6766. * match the kernel's CONFIG_CLONE_* settings.
  6767. * Microblaze is further special in that it uses a sixth
  6768. * implicit argument to clone for the TLS pointer.
  6769. */
  6770. #if defined(TARGET_MICROBLAZE)
  6771. ret = get_errno(do_fork(cpu_env, arg1, arg2, arg4, arg6, arg5));
  6772. #elif defined(TARGET_CLONE_BACKWARDS)
  6773. ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg4, arg5));
  6774. #elif defined(TARGET_CLONE_BACKWARDS2)
  6775. ret = get_errno(do_fork(cpu_env, arg2, arg1, arg3, arg5, arg4));
  6776. #else
  6777. ret = get_errno(do_fork(cpu_env, arg1, arg2, arg3, arg5, arg4));
  6778. #endif
  6779. break;
  6780. #ifdef __NR_exit_group
  6781. /* new thread calls */
  6782. case TARGET_NR_exit_group:
  6783. #ifdef TARGET_GPROF
  6784. _mcleanup();
  6785. #endif
  6786. gdb_exit(cpu_env, arg1);
  6787. ret = get_errno(exit_group(arg1));
  6788. break;
  6789. #endif
  6790. case TARGET_NR_setdomainname:
  6791. if (!(p = lock_user_string(arg1)))
  6792. goto efault;
  6793. ret = get_errno(setdomainname(p, arg2));
  6794. unlock_user(p, arg1, 0);
  6795. break;
  6796. case TARGET_NR_uname:
  6797. /* no need to transcode because we use the linux syscall */
  6798. {
  6799. struct new_utsname * buf;
  6800. if (!lock_user_struct(VERIFY_WRITE, buf, arg1, 0))
  6801. goto efault;
  6802. ret = get_errno(sys_uname(buf));
  6803. if (!is_error(ret)) {
  6804. /* Overrite the native machine name with whatever is being
  6805. emulated. */
  6806. strcpy (buf->machine, cpu_to_uname_machine(cpu_env));
  6807. /* Allow the user to override the reported release. */
  6808. if (qemu_uname_release && *qemu_uname_release)
  6809. strcpy (buf->release, qemu_uname_release);
  6810. }
  6811. unlock_user_struct(buf, arg1, 1);
  6812. }
  6813. break;
  6814. #ifdef TARGET_I386
  6815. case TARGET_NR_modify_ldt:
  6816. ret = do_modify_ldt(cpu_env, arg1, arg2, arg3);
  6817. break;
  6818. #if !defined(TARGET_X86_64)
  6819. case TARGET_NR_vm86old:
  6820. goto unimplemented;
  6821. case TARGET_NR_vm86:
  6822. ret = do_vm86(cpu_env, arg1, arg2);
  6823. break;
  6824. #endif
  6825. #endif
  6826. case TARGET_NR_adjtimex:
  6827. goto unimplemented;
  6828. #ifdef TARGET_NR_create_module
  6829. case TARGET_NR_create_module:
  6830. #endif
  6831. case TARGET_NR_init_module:
  6832. case TARGET_NR_delete_module:
  6833. #ifdef TARGET_NR_get_kernel_syms
  6834. case TARGET_NR_get_kernel_syms:
  6835. #endif
  6836. goto unimplemented;
  6837. case TARGET_NR_quotactl:
  6838. goto unimplemented;
  6839. case TARGET_NR_getpgid:
  6840. ret = get_errno(getpgid(arg1));
  6841. break;
  6842. case TARGET_NR_fchdir:
  6843. ret = get_errno(fchdir(arg1));
  6844. break;
  6845. #ifdef TARGET_NR_bdflush /* not on x86_64 */
  6846. case TARGET_NR_bdflush:
  6847. goto unimplemented;
  6848. #endif
  6849. #ifdef TARGET_NR_sysfs
  6850. case TARGET_NR_sysfs:
  6851. goto unimplemented;
  6852. #endif
  6853. case TARGET_NR_personality:
  6854. ret = get_errno(personality(arg1));
  6855. break;
  6856. #ifdef TARGET_NR_afs_syscall
  6857. case TARGET_NR_afs_syscall:
  6858. goto unimplemented;
  6859. #endif
  6860. #ifdef TARGET_NR__llseek /* Not on alpha */
  6861. case TARGET_NR__llseek:
  6862. {
  6863. int64_t res;
  6864. #if !defined(__NR_llseek)
  6865. res = lseek(arg1, ((uint64_t)arg2 << 32) | arg3, arg5);
  6866. if (res == -1) {
  6867. ret = get_errno(res);
  6868. } else {
  6869. ret = 0;
  6870. }
  6871. #else
  6872. ret = get_errno(_llseek(arg1, arg2, arg3, &res, arg5));
  6873. #endif
  6874. if ((ret == 0) && put_user_s64(res, arg4)) {
  6875. goto efault;
  6876. }
  6877. }
  6878. break;
  6879. #endif
  6880. case TARGET_NR_getdents:
  6881. #ifdef __NR_getdents
  6882. #if TARGET_ABI_BITS == 32 && HOST_LONG_BITS == 64
  6883. {
  6884. struct target_dirent *target_dirp;
  6885. struct linux_dirent *dirp;
  6886. abi_long count = arg3;
  6887. dirp = malloc(count);
  6888. if (!dirp) {
  6889. ret = -TARGET_ENOMEM;
  6890. goto fail;
  6891. }
  6892. ret = get_errno(sys_getdents(arg1, dirp, count));
  6893. if (!is_error(ret)) {
  6894. struct linux_dirent *de;
  6895. struct target_dirent *tde;
  6896. int len = ret;
  6897. int reclen, treclen;
  6898. int count1, tnamelen;
  6899. count1 = 0;
  6900. de = dirp;
  6901. if (!(target_dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
  6902. goto efault;
  6903. tde = target_dirp;
  6904. while (len > 0) {
  6905. reclen = de->d_reclen;
  6906. tnamelen = reclen - offsetof(struct linux_dirent, d_name);
  6907. assert(tnamelen >= 0);
  6908. treclen = tnamelen + offsetof(struct target_dirent, d_name);
  6909. assert(count1 + treclen <= count);
  6910. tde->d_reclen = tswap16(treclen);
  6911. tde->d_ino = tswapal(de->d_ino);
  6912. tde->d_off = tswapal(de->d_off);
  6913. memcpy(tde->d_name, de->d_name, tnamelen);
  6914. de = (struct linux_dirent *)((char *)de + reclen);
  6915. len -= reclen;
  6916. tde = (struct target_dirent *)((char *)tde + treclen);
  6917. count1 += treclen;
  6918. }
  6919. ret = count1;
  6920. unlock_user(target_dirp, arg2, ret);
  6921. }
  6922. free(dirp);
  6923. }
  6924. #else
  6925. {
  6926. struct linux_dirent *dirp;
  6927. abi_long count = arg3;
  6928. if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
  6929. goto efault;
  6930. ret = get_errno(sys_getdents(arg1, dirp, count));
  6931. if (!is_error(ret)) {
  6932. struct linux_dirent *de;
  6933. int len = ret;
  6934. int reclen;
  6935. de = dirp;
  6936. while (len > 0) {
  6937. reclen = de->d_reclen;
  6938. if (reclen > len)
  6939. break;
  6940. de->d_reclen = tswap16(reclen);
  6941. tswapls(&de->d_ino);
  6942. tswapls(&de->d_off);
  6943. de = (struct linux_dirent *)((char *)de + reclen);
  6944. len -= reclen;
  6945. }
  6946. }
  6947. unlock_user(dirp, arg2, ret);
  6948. }
  6949. #endif
  6950. #else
  6951. /* Implement getdents in terms of getdents64 */
  6952. {
  6953. struct linux_dirent64 *dirp;
  6954. abi_long count = arg3;
  6955. dirp = lock_user(VERIFY_WRITE, arg2, count, 0);
  6956. if (!dirp) {
  6957. goto efault;
  6958. }
  6959. ret = get_errno(sys_getdents64(arg1, dirp, count));
  6960. if (!is_error(ret)) {
  6961. /* Convert the dirent64 structs to target dirent. We do this
  6962. * in-place, since we can guarantee that a target_dirent is no
  6963. * larger than a dirent64; however this means we have to be
  6964. * careful to read everything before writing in the new format.
  6965. */
  6966. struct linux_dirent64 *de;
  6967. struct target_dirent *tde;
  6968. int len = ret;
  6969. int tlen = 0;
  6970. de = dirp;
  6971. tde = (struct target_dirent *)dirp;
  6972. while (len > 0) {
  6973. int namelen, treclen;
  6974. int reclen = de->d_reclen;
  6975. uint64_t ino = de->d_ino;
  6976. int64_t off = de->d_off;
  6977. uint8_t type = de->d_type;
  6978. namelen = strlen(de->d_name);
  6979. treclen = offsetof(struct target_dirent, d_name)
  6980. + namelen + 2;
  6981. treclen = QEMU_ALIGN_UP(treclen, sizeof(abi_long));
  6982. memmove(tde->d_name, de->d_name, namelen + 1);
  6983. tde->d_ino = tswapal(ino);
  6984. tde->d_off = tswapal(off);
  6985. tde->d_reclen = tswap16(treclen);
  6986. /* The target_dirent type is in what was formerly a padding
  6987. * byte at the end of the structure:
  6988. */
  6989. *(((char *)tde) + treclen - 1) = type;
  6990. de = (struct linux_dirent64 *)((char *)de + reclen);
  6991. tde = (struct target_dirent *)((char *)tde + treclen);
  6992. len -= reclen;
  6993. tlen += treclen;
  6994. }
  6995. ret = tlen;
  6996. }
  6997. unlock_user(dirp, arg2, ret);
  6998. }
  6999. #endif
  7000. break;
  7001. #if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
  7002. case TARGET_NR_getdents64:
  7003. {
  7004. struct linux_dirent64 *dirp;
  7005. abi_long count = arg3;
  7006. if (!(dirp = lock_user(VERIFY_WRITE, arg2, count, 0)))
  7007. goto efault;
  7008. ret = get_errno(sys_getdents64(arg1, dirp, count));
  7009. if (!is_error(ret)) {
  7010. struct linux_dirent64 *de;
  7011. int len = ret;
  7012. int reclen;
  7013. de = dirp;
  7014. while (len > 0) {
  7015. reclen = de->d_reclen;
  7016. if (reclen > len)
  7017. break;
  7018. de->d_reclen = tswap16(reclen);
  7019. tswap64s((uint64_t *)&de->d_ino);
  7020. tswap64s((uint64_t *)&de->d_off);
  7021. de = (struct linux_dirent64 *)((char *)de + reclen);
  7022. len -= reclen;
  7023. }
  7024. }
  7025. unlock_user(dirp, arg2, ret);
  7026. }
  7027. break;
  7028. #endif /* TARGET_NR_getdents64 */
  7029. #if defined(TARGET_NR__newselect)
  7030. case TARGET_NR__newselect:
  7031. ret = do_select(arg1, arg2, arg3, arg4, arg5);
  7032. break;
  7033. #endif
  7034. #if defined(TARGET_NR_poll) || defined(TARGET_NR_ppoll)
  7035. # ifdef TARGET_NR_poll
  7036. case TARGET_NR_poll:
  7037. # endif
  7038. # ifdef TARGET_NR_ppoll
  7039. case TARGET_NR_ppoll:
  7040. # endif
  7041. {
  7042. struct target_pollfd *target_pfd;
  7043. unsigned int nfds = arg2;
  7044. int timeout = arg3;
  7045. struct pollfd *pfd;
  7046. unsigned int i;
  7047. target_pfd = lock_user(VERIFY_WRITE, arg1, sizeof(struct target_pollfd) * nfds, 1);
  7048. if (!target_pfd)
  7049. goto efault;
  7050. pfd = alloca(sizeof(struct pollfd) * nfds);
  7051. for(i = 0; i < nfds; i++) {
  7052. pfd[i].fd = tswap32(target_pfd[i].fd);
  7053. pfd[i].events = tswap16(target_pfd[i].events);
  7054. }
  7055. # ifdef TARGET_NR_ppoll
  7056. if (num == TARGET_NR_ppoll) {
  7057. struct timespec _timeout_ts, *timeout_ts = &_timeout_ts;
  7058. target_sigset_t *target_set;
  7059. sigset_t _set, *set = &_set;
  7060. if (arg3) {
  7061. if (target_to_host_timespec(timeout_ts, arg3)) {
  7062. unlock_user(target_pfd, arg1, 0);
  7063. goto efault;
  7064. }
  7065. } else {
  7066. timeout_ts = NULL;
  7067. }
  7068. if (arg4) {
  7069. target_set = lock_user(VERIFY_READ, arg4, sizeof(target_sigset_t), 1);
  7070. if (!target_set) {
  7071. unlock_user(target_pfd, arg1, 0);
  7072. goto efault;
  7073. }
  7074. target_to_host_sigset(set, target_set);
  7075. } else {
  7076. set = NULL;
  7077. }
  7078. ret = get_errno(sys_ppoll(pfd, nfds, timeout_ts, set, _NSIG/8));
  7079. if (!is_error(ret) && arg3) {
  7080. host_to_target_timespec(arg3, timeout_ts);
  7081. }
  7082. if (arg4) {
  7083. unlock_user(target_set, arg4, 0);
  7084. }
  7085. } else
  7086. # endif
  7087. ret = get_errno(poll(pfd, nfds, timeout));
  7088. if (!is_error(ret)) {
  7089. for(i = 0; i < nfds; i++) {
  7090. target_pfd[i].revents = tswap16(pfd[i].revents);
  7091. }
  7092. }
  7093. unlock_user(target_pfd, arg1, sizeof(struct target_pollfd) * nfds);
  7094. }
  7095. break;
  7096. #endif
  7097. case TARGET_NR_flock:
  7098. /* NOTE: the flock constant seems to be the same for every
  7099. Linux platform */
  7100. ret = get_errno(flock(arg1, arg2));
  7101. break;
  7102. case TARGET_NR_readv:
  7103. {
  7104. struct iovec *vec = lock_iovec(VERIFY_WRITE, arg2, arg3, 0);
  7105. if (vec != NULL) {
  7106. ret = get_errno(readv(arg1, vec, arg3));
  7107. unlock_iovec(vec, arg2, arg3, 1);
  7108. } else {
  7109. ret = -host_to_target_errno(errno);
  7110. }
  7111. }
  7112. break;
  7113. case TARGET_NR_writev:
  7114. {
  7115. struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
  7116. if (vec != NULL) {
  7117. ret = get_errno(writev(arg1, vec, arg3));
  7118. unlock_iovec(vec, arg2, arg3, 0);
  7119. } else {
  7120. ret = -host_to_target_errno(errno);
  7121. }
  7122. }
  7123. break;
  7124. case TARGET_NR_getsid:
  7125. ret = get_errno(getsid(arg1));
  7126. break;
  7127. #if defined(TARGET_NR_fdatasync) /* Not on alpha (osf_datasync ?) */
  7128. case TARGET_NR_fdatasync:
  7129. ret = get_errno(fdatasync(arg1));
  7130. break;
  7131. #endif
  7132. case TARGET_NR__sysctl:
  7133. /* We don't implement this, but ENOTDIR is always a safe
  7134. return value. */
  7135. ret = -TARGET_ENOTDIR;
  7136. break;
  7137. case TARGET_NR_sched_getaffinity:
  7138. {
  7139. unsigned int mask_size;
  7140. unsigned long *mask;
  7141. /*
  7142. * sched_getaffinity needs multiples of ulong, so need to take
  7143. * care of mismatches between target ulong and host ulong sizes.
  7144. */
  7145. if (arg2 & (sizeof(abi_ulong) - 1)) {
  7146. ret = -TARGET_EINVAL;
  7147. break;
  7148. }
  7149. mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
  7150. mask = alloca(mask_size);
  7151. ret = get_errno(sys_sched_getaffinity(arg1, mask_size, mask));
  7152. if (!is_error(ret)) {
  7153. if (ret > arg2) {
  7154. /* More data returned than the caller's buffer will fit.
  7155. * This only happens if sizeof(abi_long) < sizeof(long)
  7156. * and the caller passed us a buffer holding an odd number
  7157. * of abi_longs. If the host kernel is actually using the
  7158. * extra 4 bytes then fail EINVAL; otherwise we can just
  7159. * ignore them and only copy the interesting part.
  7160. */
  7161. int numcpus = sysconf(_SC_NPROCESSORS_CONF);
  7162. if (numcpus > arg2 * 8) {
  7163. ret = -TARGET_EINVAL;
  7164. break;
  7165. }
  7166. ret = arg2;
  7167. }
  7168. if (copy_to_user(arg3, mask, ret)) {
  7169. goto efault;
  7170. }
  7171. }
  7172. }
  7173. break;
  7174. case TARGET_NR_sched_setaffinity:
  7175. {
  7176. unsigned int mask_size;
  7177. unsigned long *mask;
  7178. /*
  7179. * sched_setaffinity needs multiples of ulong, so need to take
  7180. * care of mismatches between target ulong and host ulong sizes.
  7181. */
  7182. if (arg2 & (sizeof(abi_ulong) - 1)) {
  7183. ret = -TARGET_EINVAL;
  7184. break;
  7185. }
  7186. mask_size = (arg2 + (sizeof(*mask) - 1)) & ~(sizeof(*mask) - 1);
  7187. mask = alloca(mask_size);
  7188. if (!lock_user_struct(VERIFY_READ, p, arg3, 1)) {
  7189. goto efault;
  7190. }
  7191. memcpy(mask, p, arg2);
  7192. unlock_user_struct(p, arg2, 0);
  7193. ret = get_errno(sys_sched_setaffinity(arg1, mask_size, mask));
  7194. }
  7195. break;
  7196. case TARGET_NR_sched_setparam:
  7197. {
  7198. struct sched_param *target_schp;
  7199. struct sched_param schp;
  7200. if (arg2 == 0) {
  7201. return -TARGET_EINVAL;
  7202. }
  7203. if (!lock_user_struct(VERIFY_READ, target_schp, arg2, 1))
  7204. goto efault;
  7205. schp.sched_priority = tswap32(target_schp->sched_priority);
  7206. unlock_user_struct(target_schp, arg2, 0);
  7207. ret = get_errno(sched_setparam(arg1, &schp));
  7208. }
  7209. break;
  7210. case TARGET_NR_sched_getparam:
  7211. {
  7212. struct sched_param *target_schp;
  7213. struct sched_param schp;
  7214. if (arg2 == 0) {
  7215. return -TARGET_EINVAL;
  7216. }
  7217. ret = get_errno(sched_getparam(arg1, &schp));
  7218. if (!is_error(ret)) {
  7219. if (!lock_user_struct(VERIFY_WRITE, target_schp, arg2, 0))
  7220. goto efault;
  7221. target_schp->sched_priority = tswap32(schp.sched_priority);
  7222. unlock_user_struct(target_schp, arg2, 1);
  7223. }
  7224. }
  7225. break;
  7226. case TARGET_NR_sched_setscheduler:
  7227. {
  7228. struct sched_param *target_schp;
  7229. struct sched_param schp;
  7230. if (arg3 == 0) {
  7231. return -TARGET_EINVAL;
  7232. }
  7233. if (!lock_user_struct(VERIFY_READ, target_schp, arg3, 1))
  7234. goto efault;
  7235. schp.sched_priority = tswap32(target_schp->sched_priority);
  7236. unlock_user_struct(target_schp, arg3, 0);
  7237. ret = get_errno(sched_setscheduler(arg1, arg2, &schp));
  7238. }
  7239. break;
  7240. case TARGET_NR_sched_getscheduler:
  7241. ret = get_errno(sched_getscheduler(arg1));
  7242. break;
  7243. case TARGET_NR_sched_yield:
  7244. ret = get_errno(sched_yield());
  7245. break;
  7246. case TARGET_NR_sched_get_priority_max:
  7247. ret = get_errno(sched_get_priority_max(arg1));
  7248. break;
  7249. case TARGET_NR_sched_get_priority_min:
  7250. ret = get_errno(sched_get_priority_min(arg1));
  7251. break;
  7252. case TARGET_NR_sched_rr_get_interval:
  7253. {
  7254. struct timespec ts;
  7255. ret = get_errno(sched_rr_get_interval(arg1, &ts));
  7256. if (!is_error(ret)) {
  7257. ret = host_to_target_timespec(arg2, &ts);
  7258. }
  7259. }
  7260. break;
  7261. case TARGET_NR_nanosleep:
  7262. {
  7263. struct timespec req, rem;
  7264. target_to_host_timespec(&req, arg1);
  7265. ret = get_errno(nanosleep(&req, &rem));
  7266. if (is_error(ret) && arg2) {
  7267. host_to_target_timespec(arg2, &rem);
  7268. }
  7269. }
  7270. break;
  7271. #ifdef TARGET_NR_query_module
  7272. case TARGET_NR_query_module:
  7273. goto unimplemented;
  7274. #endif
  7275. #ifdef TARGET_NR_nfsservctl
  7276. case TARGET_NR_nfsservctl:
  7277. goto unimplemented;
  7278. #endif
  7279. case TARGET_NR_prctl:
  7280. switch (arg1) {
  7281. case PR_GET_PDEATHSIG:
  7282. {
  7283. int deathsig;
  7284. ret = get_errno(prctl(arg1, &deathsig, arg3, arg4, arg5));
  7285. if (!is_error(ret) && arg2
  7286. && put_user_ual(deathsig, arg2)) {
  7287. goto efault;
  7288. }
  7289. break;
  7290. }
  7291. #ifdef PR_GET_NAME
  7292. case PR_GET_NAME:
  7293. {
  7294. void *name = lock_user(VERIFY_WRITE, arg2, 16, 1);
  7295. if (!name) {
  7296. goto efault;
  7297. }
  7298. ret = get_errno(prctl(arg1, (unsigned long)name,
  7299. arg3, arg4, arg5));
  7300. unlock_user(name, arg2, 16);
  7301. break;
  7302. }
  7303. case PR_SET_NAME:
  7304. {
  7305. void *name = lock_user(VERIFY_READ, arg2, 16, 1);
  7306. if (!name) {
  7307. goto efault;
  7308. }
  7309. ret = get_errno(prctl(arg1, (unsigned long)name,
  7310. arg3, arg4, arg5));
  7311. unlock_user(name, arg2, 0);
  7312. break;
  7313. }
  7314. #endif
  7315. default:
  7316. /* Most prctl options have no pointer arguments */
  7317. ret = get_errno(prctl(arg1, arg2, arg3, arg4, arg5));
  7318. break;
  7319. }
  7320. break;
  7321. #ifdef TARGET_NR_arch_prctl
  7322. case TARGET_NR_arch_prctl:
  7323. #if defined(TARGET_I386) && !defined(TARGET_ABI32)
  7324. ret = do_arch_prctl(cpu_env, arg1, arg2);
  7325. break;
  7326. #else
  7327. goto unimplemented;
  7328. #endif
  7329. #endif
  7330. #ifdef TARGET_NR_pread64
  7331. case TARGET_NR_pread64:
  7332. if (regpairs_aligned(cpu_env)) {
  7333. arg4 = arg5;
  7334. arg5 = arg6;
  7335. }
  7336. if (!(p = lock_user(VERIFY_WRITE, arg2, arg3, 0)))
  7337. goto efault;
  7338. ret = get_errno(pread64(arg1, p, arg3, target_offset64(arg4, arg5)));
  7339. unlock_user(p, arg2, ret);
  7340. break;
  7341. case TARGET_NR_pwrite64:
  7342. if (regpairs_aligned(cpu_env)) {
  7343. arg4 = arg5;
  7344. arg5 = arg6;
  7345. }
  7346. if (!(p = lock_user(VERIFY_READ, arg2, arg3, 1)))
  7347. goto efault;
  7348. ret = get_errno(pwrite64(arg1, p, arg3, target_offset64(arg4, arg5)));
  7349. unlock_user(p, arg2, 0);
  7350. break;
  7351. #endif
  7352. case TARGET_NR_getcwd:
  7353. if (!(p = lock_user(VERIFY_WRITE, arg1, arg2, 0)))
  7354. goto efault;
  7355. ret = get_errno(sys_getcwd1(p, arg2));
  7356. unlock_user(p, arg1, ret);
  7357. break;
  7358. case TARGET_NR_capget:
  7359. case TARGET_NR_capset:
  7360. {
  7361. struct target_user_cap_header *target_header;
  7362. struct target_user_cap_data *target_data = NULL;
  7363. struct __user_cap_header_struct header;
  7364. struct __user_cap_data_struct data[2];
  7365. struct __user_cap_data_struct *dataptr = NULL;
  7366. int i, target_datalen;
  7367. int data_items = 1;
  7368. if (!lock_user_struct(VERIFY_WRITE, target_header, arg1, 1)) {
  7369. goto efault;
  7370. }
  7371. header.version = tswap32(target_header->version);
  7372. header.pid = tswap32(target_header->pid);
  7373. if (header.version != _LINUX_CAPABILITY_VERSION) {
  7374. /* Version 2 and up takes pointer to two user_data structs */
  7375. data_items = 2;
  7376. }
  7377. target_datalen = sizeof(*target_data) * data_items;
  7378. if (arg2) {
  7379. if (num == TARGET_NR_capget) {
  7380. target_data = lock_user(VERIFY_WRITE, arg2, target_datalen, 0);
  7381. } else {
  7382. target_data = lock_user(VERIFY_READ, arg2, target_datalen, 1);
  7383. }
  7384. if (!target_data) {
  7385. unlock_user_struct(target_header, arg1, 0);
  7386. goto efault;
  7387. }
  7388. if (num == TARGET_NR_capset) {
  7389. for (i = 0; i < data_items; i++) {
  7390. data[i].effective = tswap32(target_data[i].effective);
  7391. data[i].permitted = tswap32(target_data[i].permitted);
  7392. data[i].inheritable = tswap32(target_data[i].inheritable);
  7393. }
  7394. }
  7395. dataptr = data;
  7396. }
  7397. if (num == TARGET_NR_capget) {
  7398. ret = get_errno(capget(&header, dataptr));
  7399. } else {
  7400. ret = get_errno(capset(&header, dataptr));
  7401. }
  7402. /* The kernel always updates version for both capget and capset */
  7403. target_header->version = tswap32(header.version);
  7404. unlock_user_struct(target_header, arg1, 1);
  7405. if (arg2) {
  7406. if (num == TARGET_NR_capget) {
  7407. for (i = 0; i < data_items; i++) {
  7408. target_data[i].effective = tswap32(data[i].effective);
  7409. target_data[i].permitted = tswap32(data[i].permitted);
  7410. target_data[i].inheritable = tswap32(data[i].inheritable);
  7411. }
  7412. unlock_user(target_data, arg2, target_datalen);
  7413. } else {
  7414. unlock_user(target_data, arg2, 0);
  7415. }
  7416. }
  7417. break;
  7418. }
  7419. case TARGET_NR_sigaltstack:
  7420. #if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
  7421. defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
  7422. defined(TARGET_M68K) || defined(TARGET_S390X) || defined(TARGET_OPENRISC)
  7423. ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
  7424. break;
  7425. #else
  7426. goto unimplemented;
  7427. #endif
  7428. #ifdef CONFIG_SENDFILE
  7429. case TARGET_NR_sendfile:
  7430. {
  7431. off_t *offp = NULL;
  7432. off_t off;
  7433. if (arg3) {
  7434. ret = get_user_sal(off, arg3);
  7435. if (is_error(ret)) {
  7436. break;
  7437. }
  7438. offp = &off;
  7439. }
  7440. ret = get_errno(sendfile(arg1, arg2, offp, arg4));
  7441. if (!is_error(ret) && arg3) {
  7442. abi_long ret2 = put_user_sal(off, arg3);
  7443. if (is_error(ret2)) {
  7444. ret = ret2;
  7445. }
  7446. }
  7447. break;
  7448. }
  7449. #ifdef TARGET_NR_sendfile64
  7450. case TARGET_NR_sendfile64:
  7451. {
  7452. off_t *offp = NULL;
  7453. off_t off;
  7454. if (arg3) {
  7455. ret = get_user_s64(off, arg3);
  7456. if (is_error(ret)) {
  7457. break;
  7458. }
  7459. offp = &off;
  7460. }
  7461. ret = get_errno(sendfile(arg1, arg2, offp, arg4));
  7462. if (!is_error(ret) && arg3) {
  7463. abi_long ret2 = put_user_s64(off, arg3);
  7464. if (is_error(ret2)) {
  7465. ret = ret2;
  7466. }
  7467. }
  7468. break;
  7469. }
  7470. #endif
  7471. #else
  7472. case TARGET_NR_sendfile:
  7473. #ifdef TARGET_NR_sendfile64
  7474. case TARGET_NR_sendfile64:
  7475. #endif
  7476. goto unimplemented;
  7477. #endif
  7478. #ifdef TARGET_NR_getpmsg
  7479. case TARGET_NR_getpmsg:
  7480. goto unimplemented;
  7481. #endif
  7482. #ifdef TARGET_NR_putpmsg
  7483. case TARGET_NR_putpmsg:
  7484. goto unimplemented;
  7485. #endif
  7486. #ifdef TARGET_NR_vfork
  7487. case TARGET_NR_vfork:
  7488. ret = get_errno(do_fork(cpu_env, CLONE_VFORK | CLONE_VM | SIGCHLD,
  7489. 0, 0, 0, 0));
  7490. break;
  7491. #endif
  7492. #ifdef TARGET_NR_ugetrlimit
  7493. case TARGET_NR_ugetrlimit:
  7494. {
  7495. struct rlimit rlim;
  7496. int resource = target_to_host_resource(arg1);
  7497. ret = get_errno(getrlimit(resource, &rlim));
  7498. if (!is_error(ret)) {
  7499. struct target_rlimit *target_rlim;
  7500. if (!lock_user_struct(VERIFY_WRITE, target_rlim, arg2, 0))
  7501. goto efault;
  7502. target_rlim->rlim_cur = host_to_target_rlim(rlim.rlim_cur);
  7503. target_rlim->rlim_max = host_to_target_rlim(rlim.rlim_max);
  7504. unlock_user_struct(target_rlim, arg2, 1);
  7505. }
  7506. break;
  7507. }
  7508. #endif
  7509. #ifdef TARGET_NR_truncate64
  7510. case TARGET_NR_truncate64:
  7511. if (!(p = lock_user_string(arg1)))
  7512. goto efault;
  7513. ret = target_truncate64(cpu_env, p, arg2, arg3, arg4);
  7514. unlock_user(p, arg1, 0);
  7515. break;
  7516. #endif
  7517. #ifdef TARGET_NR_ftruncate64
  7518. case TARGET_NR_ftruncate64:
  7519. ret = target_ftruncate64(cpu_env, arg1, arg2, arg3, arg4);
  7520. break;
  7521. #endif
  7522. #ifdef TARGET_NR_stat64
  7523. case TARGET_NR_stat64:
  7524. if (!(p = lock_user_string(arg1)))
  7525. goto efault;
  7526. ret = get_errno(stat(path(p), &st));
  7527. unlock_user(p, arg1, 0);
  7528. if (!is_error(ret))
  7529. ret = host_to_target_stat64(cpu_env, arg2, &st);
  7530. break;
  7531. #endif
  7532. #ifdef TARGET_NR_lstat64
  7533. case TARGET_NR_lstat64:
  7534. if (!(p = lock_user_string(arg1)))
  7535. goto efault;
  7536. ret = get_errno(lstat(path(p), &st));
  7537. unlock_user(p, arg1, 0);
  7538. if (!is_error(ret))
  7539. ret = host_to_target_stat64(cpu_env, arg2, &st);
  7540. break;
  7541. #endif
  7542. #ifdef TARGET_NR_fstat64
  7543. case TARGET_NR_fstat64:
  7544. ret = get_errno(fstat(arg1, &st));
  7545. if (!is_error(ret))
  7546. ret = host_to_target_stat64(cpu_env, arg2, &st);
  7547. break;
  7548. #endif
  7549. #if (defined(TARGET_NR_fstatat64) || defined(TARGET_NR_newfstatat))
  7550. #ifdef TARGET_NR_fstatat64
  7551. case TARGET_NR_fstatat64:
  7552. #endif
  7553. #ifdef TARGET_NR_newfstatat
  7554. case TARGET_NR_newfstatat:
  7555. #endif
  7556. if (!(p = lock_user_string(arg2)))
  7557. goto efault;
  7558. ret = get_errno(fstatat(arg1, path(p), &st, arg4));
  7559. if (!is_error(ret))
  7560. ret = host_to_target_stat64(cpu_env, arg3, &st);
  7561. break;
  7562. #endif
  7563. case TARGET_NR_lchown:
  7564. if (!(p = lock_user_string(arg1)))
  7565. goto efault;
  7566. ret = get_errno(lchown(p, low2highuid(arg2), low2highgid(arg3)));
  7567. unlock_user(p, arg1, 0);
  7568. break;
  7569. #ifdef TARGET_NR_getuid
  7570. case TARGET_NR_getuid:
  7571. ret = get_errno(high2lowuid(getuid()));
  7572. break;
  7573. #endif
  7574. #ifdef TARGET_NR_getgid
  7575. case TARGET_NR_getgid:
  7576. ret = get_errno(high2lowgid(getgid()));
  7577. break;
  7578. #endif
  7579. #ifdef TARGET_NR_geteuid
  7580. case TARGET_NR_geteuid:
  7581. ret = get_errno(high2lowuid(geteuid()));
  7582. break;
  7583. #endif
  7584. #ifdef TARGET_NR_getegid
  7585. case TARGET_NR_getegid:
  7586. ret = get_errno(high2lowgid(getegid()));
  7587. break;
  7588. #endif
  7589. case TARGET_NR_setreuid:
  7590. ret = get_errno(setreuid(low2highuid(arg1), low2highuid(arg2)));
  7591. break;
  7592. case TARGET_NR_setregid:
  7593. ret = get_errno(setregid(low2highgid(arg1), low2highgid(arg2)));
  7594. break;
  7595. case TARGET_NR_getgroups:
  7596. {
  7597. int gidsetsize = arg1;
  7598. target_id *target_grouplist;
  7599. gid_t *grouplist;
  7600. int i;
  7601. grouplist = alloca(gidsetsize * sizeof(gid_t));
  7602. ret = get_errno(getgroups(gidsetsize, grouplist));
  7603. if (gidsetsize == 0)
  7604. break;
  7605. if (!is_error(ret)) {
  7606. target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * sizeof(target_id), 0);
  7607. if (!target_grouplist)
  7608. goto efault;
  7609. for(i = 0;i < ret; i++)
  7610. target_grouplist[i] = tswapid(high2lowgid(grouplist[i]));
  7611. unlock_user(target_grouplist, arg2, gidsetsize * sizeof(target_id));
  7612. }
  7613. }
  7614. break;
  7615. case TARGET_NR_setgroups:
  7616. {
  7617. int gidsetsize = arg1;
  7618. target_id *target_grouplist;
  7619. gid_t *grouplist = NULL;
  7620. int i;
  7621. if (gidsetsize) {
  7622. grouplist = alloca(gidsetsize * sizeof(gid_t));
  7623. target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * sizeof(target_id), 1);
  7624. if (!target_grouplist) {
  7625. ret = -TARGET_EFAULT;
  7626. goto fail;
  7627. }
  7628. for (i = 0; i < gidsetsize; i++) {
  7629. grouplist[i] = low2highgid(tswapid(target_grouplist[i]));
  7630. }
  7631. unlock_user(target_grouplist, arg2, 0);
  7632. }
  7633. ret = get_errno(setgroups(gidsetsize, grouplist));
  7634. }
  7635. break;
  7636. case TARGET_NR_fchown:
  7637. ret = get_errno(fchown(arg1, low2highuid(arg2), low2highgid(arg3)));
  7638. break;
  7639. #if defined(TARGET_NR_fchownat)
  7640. case TARGET_NR_fchownat:
  7641. if (!(p = lock_user_string(arg2)))
  7642. goto efault;
  7643. ret = get_errno(fchownat(arg1, p, low2highuid(arg3),
  7644. low2highgid(arg4), arg5));
  7645. unlock_user(p, arg2, 0);
  7646. break;
  7647. #endif
  7648. #ifdef TARGET_NR_setresuid
  7649. case TARGET_NR_setresuid:
  7650. ret = get_errno(setresuid(low2highuid(arg1),
  7651. low2highuid(arg2),
  7652. low2highuid(arg3)));
  7653. break;
  7654. #endif
  7655. #ifdef TARGET_NR_getresuid
  7656. case TARGET_NR_getresuid:
  7657. {
  7658. uid_t ruid, euid, suid;
  7659. ret = get_errno(getresuid(&ruid, &euid, &suid));
  7660. if (!is_error(ret)) {
  7661. if (put_user_id(high2lowuid(ruid), arg1)
  7662. || put_user_id(high2lowuid(euid), arg2)
  7663. || put_user_id(high2lowuid(suid), arg3))
  7664. goto efault;
  7665. }
  7666. }
  7667. break;
  7668. #endif
  7669. #ifdef TARGET_NR_getresgid
  7670. case TARGET_NR_setresgid:
  7671. ret = get_errno(setresgid(low2highgid(arg1),
  7672. low2highgid(arg2),
  7673. low2highgid(arg3)));
  7674. break;
  7675. #endif
  7676. #ifdef TARGET_NR_getresgid
  7677. case TARGET_NR_getresgid:
  7678. {
  7679. gid_t rgid, egid, sgid;
  7680. ret = get_errno(getresgid(&rgid, &egid, &sgid));
  7681. if (!is_error(ret)) {
  7682. if (put_user_id(high2lowgid(rgid), arg1)
  7683. || put_user_id(high2lowgid(egid), arg2)
  7684. || put_user_id(high2lowgid(sgid), arg3))
  7685. goto efault;
  7686. }
  7687. }
  7688. break;
  7689. #endif
  7690. case TARGET_NR_chown:
  7691. if (!(p = lock_user_string(arg1)))
  7692. goto efault;
  7693. ret = get_errno(chown(p, low2highuid(arg2), low2highgid(arg3)));
  7694. unlock_user(p, arg1, 0);
  7695. break;
  7696. case TARGET_NR_setuid:
  7697. ret = get_errno(setuid(low2highuid(arg1)));
  7698. break;
  7699. case TARGET_NR_setgid:
  7700. ret = get_errno(setgid(low2highgid(arg1)));
  7701. break;
  7702. case TARGET_NR_setfsuid:
  7703. ret = get_errno(setfsuid(arg1));
  7704. break;
  7705. case TARGET_NR_setfsgid:
  7706. ret = get_errno(setfsgid(arg1));
  7707. break;
  7708. #ifdef TARGET_NR_lchown32
  7709. case TARGET_NR_lchown32:
  7710. if (!(p = lock_user_string(arg1)))
  7711. goto efault;
  7712. ret = get_errno(lchown(p, arg2, arg3));
  7713. unlock_user(p, arg1, 0);
  7714. break;
  7715. #endif
  7716. #ifdef TARGET_NR_getuid32
  7717. case TARGET_NR_getuid32:
  7718. ret = get_errno(getuid());
  7719. break;
  7720. #endif
  7721. #if defined(TARGET_NR_getxuid) && defined(TARGET_ALPHA)
  7722. /* Alpha specific */
  7723. case TARGET_NR_getxuid:
  7724. {
  7725. uid_t euid;
  7726. euid=geteuid();
  7727. ((CPUAlphaState *)cpu_env)->ir[IR_A4]=euid;
  7728. }
  7729. ret = get_errno(getuid());
  7730. break;
  7731. #endif
  7732. #if defined(TARGET_NR_getxgid) && defined(TARGET_ALPHA)
  7733. /* Alpha specific */
  7734. case TARGET_NR_getxgid:
  7735. {
  7736. uid_t egid;
  7737. egid=getegid();
  7738. ((CPUAlphaState *)cpu_env)->ir[IR_A4]=egid;
  7739. }
  7740. ret = get_errno(getgid());
  7741. break;
  7742. #endif
  7743. #if defined(TARGET_NR_osf_getsysinfo) && defined(TARGET_ALPHA)
  7744. /* Alpha specific */
  7745. case TARGET_NR_osf_getsysinfo:
  7746. ret = -TARGET_EOPNOTSUPP;
  7747. switch (arg1) {
  7748. case TARGET_GSI_IEEE_FP_CONTROL:
  7749. {
  7750. uint64_t swcr, fpcr = cpu_alpha_load_fpcr (cpu_env);
  7751. /* Copied from linux ieee_fpcr_to_swcr. */
  7752. swcr = (fpcr >> 35) & SWCR_STATUS_MASK;
  7753. swcr |= (fpcr >> 36) & SWCR_MAP_DMZ;
  7754. swcr |= (~fpcr >> 48) & (SWCR_TRAP_ENABLE_INV
  7755. | SWCR_TRAP_ENABLE_DZE
  7756. | SWCR_TRAP_ENABLE_OVF);
  7757. swcr |= (~fpcr >> 57) & (SWCR_TRAP_ENABLE_UNF
  7758. | SWCR_TRAP_ENABLE_INE);
  7759. swcr |= (fpcr >> 47) & SWCR_MAP_UMZ;
  7760. swcr |= (~fpcr >> 41) & SWCR_TRAP_ENABLE_DNO;
  7761. if (put_user_u64 (swcr, arg2))
  7762. goto efault;
  7763. ret = 0;
  7764. }
  7765. break;
  7766. /* case GSI_IEEE_STATE_AT_SIGNAL:
  7767. -- Not implemented in linux kernel.
  7768. case GSI_UACPROC:
  7769. -- Retrieves current unaligned access state; not much used.
  7770. case GSI_PROC_TYPE:
  7771. -- Retrieves implver information; surely not used.
  7772. case GSI_GET_HWRPB:
  7773. -- Grabs a copy of the HWRPB; surely not used.
  7774. */
  7775. }
  7776. break;
  7777. #endif
  7778. #if defined(TARGET_NR_osf_setsysinfo) && defined(TARGET_ALPHA)
  7779. /* Alpha specific */
  7780. case TARGET_NR_osf_setsysinfo:
  7781. ret = -TARGET_EOPNOTSUPP;
  7782. switch (arg1) {
  7783. case TARGET_SSI_IEEE_FP_CONTROL:
  7784. {
  7785. uint64_t swcr, fpcr, orig_fpcr;
  7786. if (get_user_u64 (swcr, arg2)) {
  7787. goto efault;
  7788. }
  7789. orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
  7790. fpcr = orig_fpcr & FPCR_DYN_MASK;
  7791. /* Copied from linux ieee_swcr_to_fpcr. */
  7792. fpcr |= (swcr & SWCR_STATUS_MASK) << 35;
  7793. fpcr |= (swcr & SWCR_MAP_DMZ) << 36;
  7794. fpcr |= (~swcr & (SWCR_TRAP_ENABLE_INV
  7795. | SWCR_TRAP_ENABLE_DZE
  7796. | SWCR_TRAP_ENABLE_OVF)) << 48;
  7797. fpcr |= (~swcr & (SWCR_TRAP_ENABLE_UNF
  7798. | SWCR_TRAP_ENABLE_INE)) << 57;
  7799. fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
  7800. fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
  7801. cpu_alpha_store_fpcr(cpu_env, fpcr);
  7802. ret = 0;
  7803. }
  7804. break;
  7805. case TARGET_SSI_IEEE_RAISE_EXCEPTION:
  7806. {
  7807. uint64_t exc, fpcr, orig_fpcr;
  7808. int si_code;
  7809. if (get_user_u64(exc, arg2)) {
  7810. goto efault;
  7811. }
  7812. orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
  7813. /* We only add to the exception status here. */
  7814. fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
  7815. cpu_alpha_store_fpcr(cpu_env, fpcr);
  7816. ret = 0;
  7817. /* Old exceptions are not signaled. */
  7818. fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
  7819. /* If any exceptions set by this call,
  7820. and are unmasked, send a signal. */
  7821. si_code = 0;
  7822. if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
  7823. si_code = TARGET_FPE_FLTRES;
  7824. }
  7825. if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
  7826. si_code = TARGET_FPE_FLTUND;
  7827. }
  7828. if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
  7829. si_code = TARGET_FPE_FLTOVF;
  7830. }
  7831. if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
  7832. si_code = TARGET_FPE_FLTDIV;
  7833. }
  7834. if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
  7835. si_code = TARGET_FPE_FLTINV;
  7836. }
  7837. if (si_code != 0) {
  7838. target_siginfo_t info;
  7839. info.si_signo = SIGFPE;
  7840. info.si_errno = 0;
  7841. info.si_code = si_code;
  7842. info._sifields._sigfault._addr
  7843. = ((CPUArchState *)cpu_env)->pc;
  7844. queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
  7845. }
  7846. }
  7847. break;
  7848. /* case SSI_NVPAIRS:
  7849. -- Used with SSIN_UACPROC to enable unaligned accesses.
  7850. case SSI_IEEE_STATE_AT_SIGNAL:
  7851. case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
  7852. -- Not implemented in linux kernel
  7853. */
  7854. }
  7855. break;
  7856. #endif
  7857. #ifdef TARGET_NR_osf_sigprocmask
  7858. /* Alpha specific. */
  7859. case TARGET_NR_osf_sigprocmask:
  7860. {
  7861. abi_ulong mask;
  7862. int how;
  7863. sigset_t set, oldset;
  7864. switch(arg1) {
  7865. case TARGET_SIG_BLOCK:
  7866. how = SIG_BLOCK;
  7867. break;
  7868. case TARGET_SIG_UNBLOCK:
  7869. how = SIG_UNBLOCK;
  7870. break;
  7871. case TARGET_SIG_SETMASK:
  7872. how = SIG_SETMASK;
  7873. break;
  7874. default:
  7875. ret = -TARGET_EINVAL;
  7876. goto fail;
  7877. }
  7878. mask = arg2;
  7879. target_to_host_old_sigset(&set, &mask);
  7880. do_sigprocmask(how, &set, &oldset);
  7881. host_to_target_old_sigset(&mask, &oldset);
  7882. ret = mask;
  7883. }
  7884. break;
  7885. #endif
  7886. #ifdef TARGET_NR_getgid32
  7887. case TARGET_NR_getgid32:
  7888. ret = get_errno(getgid());
  7889. break;
  7890. #endif
  7891. #ifdef TARGET_NR_geteuid32
  7892. case TARGET_NR_geteuid32:
  7893. ret = get_errno(geteuid());
  7894. break;
  7895. #endif
  7896. #ifdef TARGET_NR_getegid32
  7897. case TARGET_NR_getegid32:
  7898. ret = get_errno(getegid());
  7899. break;
  7900. #endif
  7901. #ifdef TARGET_NR_setreuid32
  7902. case TARGET_NR_setreuid32:
  7903. ret = get_errno(setreuid(arg1, arg2));
  7904. break;
  7905. #endif
  7906. #ifdef TARGET_NR_setregid32
  7907. case TARGET_NR_setregid32:
  7908. ret = get_errno(setregid(arg1, arg2));
  7909. break;
  7910. #endif
  7911. #ifdef TARGET_NR_getgroups32
  7912. case TARGET_NR_getgroups32:
  7913. {
  7914. int gidsetsize = arg1;
  7915. uint32_t *target_grouplist;
  7916. gid_t *grouplist;
  7917. int i;
  7918. grouplist = alloca(gidsetsize * sizeof(gid_t));
  7919. ret = get_errno(getgroups(gidsetsize, grouplist));
  7920. if (gidsetsize == 0)
  7921. break;
  7922. if (!is_error(ret)) {
  7923. target_grouplist = lock_user(VERIFY_WRITE, arg2, gidsetsize * 4, 0);
  7924. if (!target_grouplist) {
  7925. ret = -TARGET_EFAULT;
  7926. goto fail;
  7927. }
  7928. for(i = 0;i < ret; i++)
  7929. target_grouplist[i] = tswap32(grouplist[i]);
  7930. unlock_user(target_grouplist, arg2, gidsetsize * 4);
  7931. }
  7932. }
  7933. break;
  7934. #endif
  7935. #ifdef TARGET_NR_setgroups32
  7936. case TARGET_NR_setgroups32:
  7937. {
  7938. int gidsetsize = arg1;
  7939. uint32_t *target_grouplist;
  7940. gid_t *grouplist;
  7941. int i;
  7942. grouplist = alloca(gidsetsize * sizeof(gid_t));
  7943. target_grouplist = lock_user(VERIFY_READ, arg2, gidsetsize * 4, 1);
  7944. if (!target_grouplist) {
  7945. ret = -TARGET_EFAULT;
  7946. goto fail;
  7947. }
  7948. for(i = 0;i < gidsetsize; i++)
  7949. grouplist[i] = tswap32(target_grouplist[i]);
  7950. unlock_user(target_grouplist, arg2, 0);
  7951. ret = get_errno(setgroups(gidsetsize, grouplist));
  7952. }
  7953. break;
  7954. #endif
  7955. #ifdef TARGET_NR_fchown32
  7956. case TARGET_NR_fchown32:
  7957. ret = get_errno(fchown(arg1, arg2, arg3));
  7958. break;
  7959. #endif
  7960. #ifdef TARGET_NR_setresuid32
  7961. case TARGET_NR_setresuid32:
  7962. ret = get_errno(setresuid(arg1, arg2, arg3));
  7963. break;
  7964. #endif
  7965. #ifdef TARGET_NR_getresuid32
  7966. case TARGET_NR_getresuid32:
  7967. {
  7968. uid_t ruid, euid, suid;
  7969. ret = get_errno(getresuid(&ruid, &euid, &suid));
  7970. if (!is_error(ret)) {
  7971. if (put_user_u32(ruid, arg1)
  7972. || put_user_u32(euid, arg2)
  7973. || put_user_u32(suid, arg3))
  7974. goto efault;
  7975. }
  7976. }
  7977. break;
  7978. #endif
  7979. #ifdef TARGET_NR_setresgid32
  7980. case TARGET_NR_setresgid32:
  7981. ret = get_errno(setresgid(arg1, arg2, arg3));
  7982. break;
  7983. #endif
  7984. #ifdef TARGET_NR_getresgid32
  7985. case TARGET_NR_getresgid32:
  7986. {
  7987. gid_t rgid, egid, sgid;
  7988. ret = get_errno(getresgid(&rgid, &egid, &sgid));
  7989. if (!is_error(ret)) {
  7990. if (put_user_u32(rgid, arg1)
  7991. || put_user_u32(egid, arg2)
  7992. || put_user_u32(sgid, arg3))
  7993. goto efault;
  7994. }
  7995. }
  7996. break;
  7997. #endif
  7998. #ifdef TARGET_NR_chown32
  7999. case TARGET_NR_chown32:
  8000. if (!(p = lock_user_string(arg1)))
  8001. goto efault;
  8002. ret = get_errno(chown(p, arg2, arg3));
  8003. unlock_user(p, arg1, 0);
  8004. break;
  8005. #endif
  8006. #ifdef TARGET_NR_setuid32
  8007. case TARGET_NR_setuid32:
  8008. ret = get_errno(setuid(arg1));
  8009. break;
  8010. #endif
  8011. #ifdef TARGET_NR_setgid32
  8012. case TARGET_NR_setgid32:
  8013. ret = get_errno(setgid(arg1));
  8014. break;
  8015. #endif
  8016. #ifdef TARGET_NR_setfsuid32
  8017. case TARGET_NR_setfsuid32:
  8018. ret = get_errno(setfsuid(arg1));
  8019. break;
  8020. #endif
  8021. #ifdef TARGET_NR_setfsgid32
  8022. case TARGET_NR_setfsgid32:
  8023. ret = get_errno(setfsgid(arg1));
  8024. break;
  8025. #endif
  8026. case TARGET_NR_pivot_root:
  8027. goto unimplemented;
  8028. #ifdef TARGET_NR_mincore
  8029. case TARGET_NR_mincore:
  8030. {
  8031. void *a;
  8032. ret = -TARGET_EFAULT;
  8033. if (!(a = lock_user(VERIFY_READ, arg1,arg2, 0)))
  8034. goto efault;
  8035. if (!(p = lock_user_string(arg3)))
  8036. goto mincore_fail;
  8037. ret = get_errno(mincore(a, arg2, p));
  8038. unlock_user(p, arg3, ret);
  8039. mincore_fail:
  8040. unlock_user(a, arg1, 0);
  8041. }
  8042. break;
  8043. #endif
  8044. #ifdef TARGET_NR_arm_fadvise64_64
  8045. case TARGET_NR_arm_fadvise64_64:
  8046. {
  8047. /*
  8048. * arm_fadvise64_64 looks like fadvise64_64 but
  8049. * with different argument order
  8050. */
  8051. abi_long temp;
  8052. temp = arg3;
  8053. arg3 = arg4;
  8054. arg4 = temp;
  8055. }
  8056. #endif
  8057. #if defined(TARGET_NR_fadvise64_64) || defined(TARGET_NR_arm_fadvise64_64) || defined(TARGET_NR_fadvise64)
  8058. #ifdef TARGET_NR_fadvise64_64
  8059. case TARGET_NR_fadvise64_64:
  8060. #endif
  8061. #ifdef TARGET_NR_fadvise64
  8062. case TARGET_NR_fadvise64:
  8063. #endif
  8064. #ifdef TARGET_S390X
  8065. switch (arg4) {
  8066. case 4: arg4 = POSIX_FADV_NOREUSE + 1; break; /* make sure it's an invalid value */
  8067. case 5: arg4 = POSIX_FADV_NOREUSE + 2; break; /* ditto */
  8068. case 6: arg4 = POSIX_FADV_DONTNEED; break;
  8069. case 7: arg4 = POSIX_FADV_NOREUSE; break;
  8070. default: break;
  8071. }
  8072. #endif
  8073. ret = -posix_fadvise(arg1, arg2, arg3, arg4);
  8074. break;
  8075. #endif
  8076. #ifdef TARGET_NR_madvise
  8077. case TARGET_NR_madvise:
  8078. /* A straight passthrough may not be safe because qemu sometimes
  8079. turns private file-backed mappings into anonymous mappings.
  8080. This will break MADV_DONTNEED.
  8081. This is a hint, so ignoring and returning success is ok. */
  8082. ret = get_errno(0);
  8083. break;
  8084. #endif
  8085. #if TARGET_ABI_BITS == 32
  8086. case TARGET_NR_fcntl64:
  8087. {
  8088. int cmd;
  8089. struct flock64 fl;
  8090. struct target_flock64 *target_fl;
  8091. #ifdef TARGET_ARM
  8092. struct target_eabi_flock64 *target_efl;
  8093. #endif
  8094. cmd = target_to_host_fcntl_cmd(arg2);
  8095. if (cmd == -TARGET_EINVAL) {
  8096. ret = cmd;
  8097. break;
  8098. }
  8099. switch(arg2) {
  8100. case TARGET_F_GETLK64:
  8101. #ifdef TARGET_ARM
  8102. if (((CPUARMState *)cpu_env)->eabi) {
  8103. if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
  8104. goto efault;
  8105. fl.l_type = tswap16(target_efl->l_type);
  8106. fl.l_whence = tswap16(target_efl->l_whence);
  8107. fl.l_start = tswap64(target_efl->l_start);
  8108. fl.l_len = tswap64(target_efl->l_len);
  8109. fl.l_pid = tswap32(target_efl->l_pid);
  8110. unlock_user_struct(target_efl, arg3, 0);
  8111. } else
  8112. #endif
  8113. {
  8114. if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
  8115. goto efault;
  8116. fl.l_type = tswap16(target_fl->l_type);
  8117. fl.l_whence = tswap16(target_fl->l_whence);
  8118. fl.l_start = tswap64(target_fl->l_start);
  8119. fl.l_len = tswap64(target_fl->l_len);
  8120. fl.l_pid = tswap32(target_fl->l_pid);
  8121. unlock_user_struct(target_fl, arg3, 0);
  8122. }
  8123. ret = get_errno(fcntl(arg1, cmd, &fl));
  8124. if (ret == 0) {
  8125. #ifdef TARGET_ARM
  8126. if (((CPUARMState *)cpu_env)->eabi) {
  8127. if (!lock_user_struct(VERIFY_WRITE, target_efl, arg3, 0))
  8128. goto efault;
  8129. target_efl->l_type = tswap16(fl.l_type);
  8130. target_efl->l_whence = tswap16(fl.l_whence);
  8131. target_efl->l_start = tswap64(fl.l_start);
  8132. target_efl->l_len = tswap64(fl.l_len);
  8133. target_efl->l_pid = tswap32(fl.l_pid);
  8134. unlock_user_struct(target_efl, arg3, 1);
  8135. } else
  8136. #endif
  8137. {
  8138. if (!lock_user_struct(VERIFY_WRITE, target_fl, arg3, 0))
  8139. goto efault;
  8140. target_fl->l_type = tswap16(fl.l_type);
  8141. target_fl->l_whence = tswap16(fl.l_whence);
  8142. target_fl->l_start = tswap64(fl.l_start);
  8143. target_fl->l_len = tswap64(fl.l_len);
  8144. target_fl->l_pid = tswap32(fl.l_pid);
  8145. unlock_user_struct(target_fl, arg3, 1);
  8146. }
  8147. }
  8148. break;
  8149. case TARGET_F_SETLK64:
  8150. case TARGET_F_SETLKW64:
  8151. #ifdef TARGET_ARM
  8152. if (((CPUARMState *)cpu_env)->eabi) {
  8153. if (!lock_user_struct(VERIFY_READ, target_efl, arg3, 1))
  8154. goto efault;
  8155. fl.l_type = tswap16(target_efl->l_type);
  8156. fl.l_whence = tswap16(target_efl->l_whence);
  8157. fl.l_start = tswap64(target_efl->l_start);
  8158. fl.l_len = tswap64(target_efl->l_len);
  8159. fl.l_pid = tswap32(target_efl->l_pid);
  8160. unlock_user_struct(target_efl, arg3, 0);
  8161. } else
  8162. #endif
  8163. {
  8164. if (!lock_user_struct(VERIFY_READ, target_fl, arg3, 1))
  8165. goto efault;
  8166. fl.l_type = tswap16(target_fl->l_type);
  8167. fl.l_whence = tswap16(target_fl->l_whence);
  8168. fl.l_start = tswap64(target_fl->l_start);
  8169. fl.l_len = tswap64(target_fl->l_len);
  8170. fl.l_pid = tswap32(target_fl->l_pid);
  8171. unlock_user_struct(target_fl, arg3, 0);
  8172. }
  8173. ret = get_errno(fcntl(arg1, cmd, &fl));
  8174. break;
  8175. default:
  8176. ret = do_fcntl(arg1, arg2, arg3);
  8177. break;
  8178. }
  8179. break;
  8180. }
  8181. #endif
  8182. #ifdef TARGET_NR_cacheflush
  8183. case TARGET_NR_cacheflush:
  8184. /* self-modifying code is handled automatically, so nothing needed */
  8185. ret = 0;
  8186. break;
  8187. #endif
  8188. #ifdef TARGET_NR_security
  8189. case TARGET_NR_security:
  8190. goto unimplemented;
  8191. #endif
  8192. #ifdef TARGET_NR_getpagesize
  8193. case TARGET_NR_getpagesize:
  8194. ret = TARGET_PAGE_SIZE;
  8195. break;
  8196. #endif
  8197. case TARGET_NR_gettid:
  8198. ret = get_errno(gettid());
  8199. break;
  8200. #ifdef TARGET_NR_readahead
  8201. case TARGET_NR_readahead:
  8202. #if TARGET_ABI_BITS == 32
  8203. if (regpairs_aligned(cpu_env)) {
  8204. arg2 = arg3;
  8205. arg3 = arg4;
  8206. arg4 = arg5;
  8207. }
  8208. ret = get_errno(readahead(arg1, ((off64_t)arg3 << 32) | arg2, arg4));
  8209. #else
  8210. ret = get_errno(readahead(arg1, arg2, arg3));
  8211. #endif
  8212. break;
  8213. #endif
  8214. #ifdef CONFIG_ATTR
  8215. #ifdef TARGET_NR_setxattr
  8216. case TARGET_NR_listxattr:
  8217. case TARGET_NR_llistxattr:
  8218. {
  8219. void *p, *b = 0;
  8220. if (arg2) {
  8221. b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
  8222. if (!b) {
  8223. ret = -TARGET_EFAULT;
  8224. break;
  8225. }
  8226. }
  8227. p = lock_user_string(arg1);
  8228. if (p) {
  8229. if (num == TARGET_NR_listxattr) {
  8230. ret = get_errno(listxattr(p, b, arg3));
  8231. } else {
  8232. ret = get_errno(llistxattr(p, b, arg3));
  8233. }
  8234. } else {
  8235. ret = -TARGET_EFAULT;
  8236. }
  8237. unlock_user(p, arg1, 0);
  8238. unlock_user(b, arg2, arg3);
  8239. break;
  8240. }
  8241. case TARGET_NR_flistxattr:
  8242. {
  8243. void *b = 0;
  8244. if (arg2) {
  8245. b = lock_user(VERIFY_WRITE, arg2, arg3, 0);
  8246. if (!b) {
  8247. ret = -TARGET_EFAULT;
  8248. break;
  8249. }
  8250. }
  8251. ret = get_errno(flistxattr(arg1, b, arg3));
  8252. unlock_user(b, arg2, arg3);
  8253. break;
  8254. }
  8255. case TARGET_NR_setxattr:
  8256. case TARGET_NR_lsetxattr:
  8257. {
  8258. void *p, *n, *v = 0;
  8259. if (arg3) {
  8260. v = lock_user(VERIFY_READ, arg3, arg4, 1);
  8261. if (!v) {
  8262. ret = -TARGET_EFAULT;
  8263. break;
  8264. }
  8265. }
  8266. p = lock_user_string(arg1);
  8267. n = lock_user_string(arg2);
  8268. if (p && n) {
  8269. if (num == TARGET_NR_setxattr) {
  8270. ret = get_errno(setxattr(p, n, v, arg4, arg5));
  8271. } else {
  8272. ret = get_errno(lsetxattr(p, n, v, arg4, arg5));
  8273. }
  8274. } else {
  8275. ret = -TARGET_EFAULT;
  8276. }
  8277. unlock_user(p, arg1, 0);
  8278. unlock_user(n, arg2, 0);
  8279. unlock_user(v, arg3, 0);
  8280. }
  8281. break;
  8282. case TARGET_NR_fsetxattr:
  8283. {
  8284. void *n, *v = 0;
  8285. if (arg3) {
  8286. v = lock_user(VERIFY_READ, arg3, arg4, 1);
  8287. if (!v) {
  8288. ret = -TARGET_EFAULT;
  8289. break;
  8290. }
  8291. }
  8292. n = lock_user_string(arg2);
  8293. if (n) {
  8294. ret = get_errno(fsetxattr(arg1, n, v, arg4, arg5));
  8295. } else {
  8296. ret = -TARGET_EFAULT;
  8297. }
  8298. unlock_user(n, arg2, 0);
  8299. unlock_user(v, arg3, 0);
  8300. }
  8301. break;
  8302. case TARGET_NR_getxattr:
  8303. case TARGET_NR_lgetxattr:
  8304. {
  8305. void *p, *n, *v = 0;
  8306. if (arg3) {
  8307. v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
  8308. if (!v) {
  8309. ret = -TARGET_EFAULT;
  8310. break;
  8311. }
  8312. }
  8313. p = lock_user_string(arg1);
  8314. n = lock_user_string(arg2);
  8315. if (p && n) {
  8316. if (num == TARGET_NR_getxattr) {
  8317. ret = get_errno(getxattr(p, n, v, arg4));
  8318. } else {
  8319. ret = get_errno(lgetxattr(p, n, v, arg4));
  8320. }
  8321. } else {
  8322. ret = -TARGET_EFAULT;
  8323. }
  8324. unlock_user(p, arg1, 0);
  8325. unlock_user(n, arg2, 0);
  8326. unlock_user(v, arg3, arg4);
  8327. }
  8328. break;
  8329. case TARGET_NR_fgetxattr:
  8330. {
  8331. void *n, *v = 0;
  8332. if (arg3) {
  8333. v = lock_user(VERIFY_WRITE, arg3, arg4, 0);
  8334. if (!v) {
  8335. ret = -TARGET_EFAULT;
  8336. break;
  8337. }
  8338. }
  8339. n = lock_user_string(arg2);
  8340. if (n) {
  8341. ret = get_errno(fgetxattr(arg1, n, v, arg4));
  8342. } else {
  8343. ret = -TARGET_EFAULT;
  8344. }
  8345. unlock_user(n, arg2, 0);
  8346. unlock_user(v, arg3, arg4);
  8347. }
  8348. break;
  8349. case TARGET_NR_removexattr:
  8350. case TARGET_NR_lremovexattr:
  8351. {
  8352. void *p, *n;
  8353. p = lock_user_string(arg1);
  8354. n = lock_user_string(arg2);
  8355. if (p && n) {
  8356. if (num == TARGET_NR_removexattr) {
  8357. ret = get_errno(removexattr(p, n));
  8358. } else {
  8359. ret = get_errno(lremovexattr(p, n));
  8360. }
  8361. } else {
  8362. ret = -TARGET_EFAULT;
  8363. }
  8364. unlock_user(p, arg1, 0);
  8365. unlock_user(n, arg2, 0);
  8366. }
  8367. break;
  8368. case TARGET_NR_fremovexattr:
  8369. {
  8370. void *n;
  8371. n = lock_user_string(arg2);
  8372. if (n) {
  8373. ret = get_errno(fremovexattr(arg1, n));
  8374. } else {
  8375. ret = -TARGET_EFAULT;
  8376. }
  8377. unlock_user(n, arg2, 0);
  8378. }
  8379. break;
  8380. #endif
  8381. #endif /* CONFIG_ATTR */
  8382. #ifdef TARGET_NR_set_thread_area
  8383. case TARGET_NR_set_thread_area:
  8384. #if defined(TARGET_MIPS)
  8385. ((CPUMIPSState *) cpu_env)->active_tc.CP0_UserLocal = arg1;
  8386. ret = 0;
  8387. break;
  8388. #elif defined(TARGET_CRIS)
  8389. if (arg1 & 0xff)
  8390. ret = -TARGET_EINVAL;
  8391. else {
  8392. ((CPUCRISState *) cpu_env)->pregs[PR_PID] = arg1;
  8393. ret = 0;
  8394. }
  8395. break;
  8396. #elif defined(TARGET_I386) && defined(TARGET_ABI32)
  8397. ret = do_set_thread_area(cpu_env, arg1);
  8398. break;
  8399. #elif defined(TARGET_M68K)
  8400. {
  8401. TaskState *ts = cpu->opaque;
  8402. ts->tp_value = arg1;
  8403. ret = 0;
  8404. break;
  8405. }
  8406. #else
  8407. goto unimplemented_nowarn;
  8408. #endif
  8409. #endif
  8410. #ifdef TARGET_NR_get_thread_area
  8411. case TARGET_NR_get_thread_area:
  8412. #if defined(TARGET_I386) && defined(TARGET_ABI32)
  8413. ret = do_get_thread_area(cpu_env, arg1);
  8414. break;
  8415. #elif defined(TARGET_M68K)
  8416. {
  8417. TaskState *ts = cpu->opaque;
  8418. ret = ts->tp_value;
  8419. break;
  8420. }
  8421. #else
  8422. goto unimplemented_nowarn;
  8423. #endif
  8424. #endif
  8425. #ifdef TARGET_NR_getdomainname
  8426. case TARGET_NR_getdomainname:
  8427. goto unimplemented_nowarn;
  8428. #endif
  8429. #ifdef TARGET_NR_clock_gettime
  8430. case TARGET_NR_clock_gettime:
  8431. {
  8432. struct timespec ts;
  8433. ret = get_errno(clock_gettime(arg1, &ts));
  8434. if (!is_error(ret)) {
  8435. host_to_target_timespec(arg2, &ts);
  8436. }
  8437. break;
  8438. }
  8439. #endif
  8440. #ifdef TARGET_NR_clock_getres
  8441. case TARGET_NR_clock_getres:
  8442. {
  8443. struct timespec ts;
  8444. ret = get_errno(clock_getres(arg1, &ts));
  8445. if (!is_error(ret)) {
  8446. host_to_target_timespec(arg2, &ts);
  8447. }
  8448. break;
  8449. }
  8450. #endif
  8451. #ifdef TARGET_NR_clock_nanosleep
  8452. case TARGET_NR_clock_nanosleep:
  8453. {
  8454. struct timespec ts;
  8455. target_to_host_timespec(&ts, arg3);
  8456. ret = get_errno(clock_nanosleep(arg1, arg2, &ts, arg4 ? &ts : NULL));
  8457. if (arg4)
  8458. host_to_target_timespec(arg4, &ts);
  8459. #if defined(TARGET_PPC)
  8460. /* clock_nanosleep is odd in that it returns positive errno values.
  8461. * On PPC, CR0 bit 3 should be set in such a situation. */
  8462. if (ret) {
  8463. ((CPUPPCState *)cpu_env)->crf[0] |= 1;
  8464. }
  8465. #endif
  8466. break;
  8467. }
  8468. #endif
  8469. #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
  8470. case TARGET_NR_set_tid_address:
  8471. ret = get_errno(set_tid_address((int *)g2h(arg1)));
  8472. break;
  8473. #endif
  8474. #if defined(TARGET_NR_tkill) && defined(__NR_tkill)
  8475. case TARGET_NR_tkill:
  8476. ret = get_errno(sys_tkill((int)arg1, target_to_host_signal(arg2)));
  8477. break;
  8478. #endif
  8479. #if defined(TARGET_NR_tgkill) && defined(__NR_tgkill)
  8480. case TARGET_NR_tgkill:
  8481. ret = get_errno(sys_tgkill((int)arg1, (int)arg2,
  8482. target_to_host_signal(arg3)));
  8483. break;
  8484. #endif
  8485. #ifdef TARGET_NR_set_robust_list
  8486. case TARGET_NR_set_robust_list:
  8487. case TARGET_NR_get_robust_list:
  8488. /* The ABI for supporting robust futexes has userspace pass
  8489. * the kernel a pointer to a linked list which is updated by
  8490. * userspace after the syscall; the list is walked by the kernel
  8491. * when the thread exits. Since the linked list in QEMU guest
  8492. * memory isn't a valid linked list for the host and we have
  8493. * no way to reliably intercept the thread-death event, we can't
  8494. * support these. Silently return ENOSYS so that guest userspace
  8495. * falls back to a non-robust futex implementation (which should
  8496. * be OK except in the corner case of the guest crashing while
  8497. * holding a mutex that is shared with another process via
  8498. * shared memory).
  8499. */
  8500. goto unimplemented_nowarn;
  8501. #endif
  8502. #if defined(TARGET_NR_utimensat)
  8503. case TARGET_NR_utimensat:
  8504. {
  8505. struct timespec *tsp, ts[2];
  8506. if (!arg3) {
  8507. tsp = NULL;
  8508. } else {
  8509. target_to_host_timespec(ts, arg3);
  8510. target_to_host_timespec(ts+1, arg3+sizeof(struct target_timespec));
  8511. tsp = ts;
  8512. }
  8513. if (!arg2)
  8514. ret = get_errno(sys_utimensat(arg1, NULL, tsp, arg4));
  8515. else {
  8516. if (!(p = lock_user_string(arg2))) {
  8517. ret = -TARGET_EFAULT;
  8518. goto fail;
  8519. }
  8520. ret = get_errno(sys_utimensat(arg1, path(p), tsp, arg4));
  8521. unlock_user(p, arg2, 0);
  8522. }
  8523. }
  8524. break;
  8525. #endif
  8526. case TARGET_NR_futex:
  8527. ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
  8528. break;
  8529. #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init)
  8530. case TARGET_NR_inotify_init:
  8531. ret = get_errno(sys_inotify_init());
  8532. break;
  8533. #endif
  8534. #ifdef CONFIG_INOTIFY1
  8535. #if defined(TARGET_NR_inotify_init1) && defined(__NR_inotify_init1)
  8536. case TARGET_NR_inotify_init1:
  8537. ret = get_errno(sys_inotify_init1(arg1));
  8538. break;
  8539. #endif
  8540. #endif
  8541. #if defined(TARGET_NR_inotify_add_watch) && defined(__NR_inotify_add_watch)
  8542. case TARGET_NR_inotify_add_watch:
  8543. p = lock_user_string(arg2);
  8544. ret = get_errno(sys_inotify_add_watch(arg1, path(p), arg3));
  8545. unlock_user(p, arg2, 0);
  8546. break;
  8547. #endif
  8548. #if defined(TARGET_NR_inotify_rm_watch) && defined(__NR_inotify_rm_watch)
  8549. case TARGET_NR_inotify_rm_watch:
  8550. ret = get_errno(sys_inotify_rm_watch(arg1, arg2));
  8551. break;
  8552. #endif
  8553. #if defined(TARGET_NR_mq_open) && defined(__NR_mq_open)
  8554. case TARGET_NR_mq_open:
  8555. {
  8556. struct mq_attr posix_mq_attr, *attrp;
  8557. p = lock_user_string(arg1 - 1);
  8558. if (arg4 != 0) {
  8559. copy_from_user_mq_attr (&posix_mq_attr, arg4);
  8560. attrp = &posix_mq_attr;
  8561. } else {
  8562. attrp = 0;
  8563. }
  8564. ret = get_errno(mq_open(p, arg2, arg3, attrp));
  8565. unlock_user (p, arg1, 0);
  8566. }
  8567. break;
  8568. case TARGET_NR_mq_unlink:
  8569. p = lock_user_string(arg1 - 1);
  8570. ret = get_errno(mq_unlink(p));
  8571. unlock_user (p, arg1, 0);
  8572. break;
  8573. case TARGET_NR_mq_timedsend:
  8574. {
  8575. struct timespec ts;
  8576. p = lock_user (VERIFY_READ, arg2, arg3, 1);
  8577. if (arg5 != 0) {
  8578. target_to_host_timespec(&ts, arg5);
  8579. ret = get_errno(mq_timedsend(arg1, p, arg3, arg4, &ts));
  8580. host_to_target_timespec(arg5, &ts);
  8581. }
  8582. else
  8583. ret = get_errno(mq_send(arg1, p, arg3, arg4));
  8584. unlock_user (p, arg2, arg3);
  8585. }
  8586. break;
  8587. case TARGET_NR_mq_timedreceive:
  8588. {
  8589. struct timespec ts;
  8590. unsigned int prio;
  8591. p = lock_user (VERIFY_READ, arg2, arg3, 1);
  8592. if (arg5 != 0) {
  8593. target_to_host_timespec(&ts, arg5);
  8594. ret = get_errno(mq_timedreceive(arg1, p, arg3, &prio, &ts));
  8595. host_to_target_timespec(arg5, &ts);
  8596. }
  8597. else
  8598. ret = get_errno(mq_receive(arg1, p, arg3, &prio));
  8599. unlock_user (p, arg2, arg3);
  8600. if (arg4 != 0)
  8601. put_user_u32(prio, arg4);
  8602. }
  8603. break;
  8604. /* Not implemented for now... */
  8605. /* case TARGET_NR_mq_notify: */
  8606. /* break; */
  8607. case TARGET_NR_mq_getsetattr:
  8608. {
  8609. struct mq_attr posix_mq_attr_in, posix_mq_attr_out;
  8610. ret = 0;
  8611. if (arg3 != 0) {
  8612. ret = mq_getattr(arg1, &posix_mq_attr_out);
  8613. copy_to_user_mq_attr(arg3, &posix_mq_attr_out);
  8614. }
  8615. if (arg2 != 0) {
  8616. copy_from_user_mq_attr(&posix_mq_attr_in, arg2);
  8617. ret |= mq_setattr(arg1, &posix_mq_attr_in, &posix_mq_attr_out);
  8618. }
  8619. }
  8620. break;
  8621. #endif
  8622. #ifdef CONFIG_SPLICE
  8623. #ifdef TARGET_NR_tee
  8624. case TARGET_NR_tee:
  8625. {
  8626. ret = get_errno(tee(arg1,arg2,arg3,arg4));
  8627. }
  8628. break;
  8629. #endif
  8630. #ifdef TARGET_NR_splice
  8631. case TARGET_NR_splice:
  8632. {
  8633. loff_t loff_in, loff_out;
  8634. loff_t *ploff_in = NULL, *ploff_out = NULL;
  8635. if(arg2) {
  8636. get_user_u64(loff_in, arg2);
  8637. ploff_in = &loff_in;
  8638. }
  8639. if(arg4) {
  8640. get_user_u64(loff_out, arg2);
  8641. ploff_out = &loff_out;
  8642. }
  8643. ret = get_errno(splice(arg1, ploff_in, arg3, ploff_out, arg5, arg6));
  8644. }
  8645. break;
  8646. #endif
  8647. #ifdef TARGET_NR_vmsplice
  8648. case TARGET_NR_vmsplice:
  8649. {
  8650. struct iovec *vec = lock_iovec(VERIFY_READ, arg2, arg3, 1);
  8651. if (vec != NULL) {
  8652. ret = get_errno(vmsplice(arg1, vec, arg3, arg4));
  8653. unlock_iovec(vec, arg2, arg3, 0);
  8654. } else {
  8655. ret = -host_to_target_errno(errno);
  8656. }
  8657. }
  8658. break;
  8659. #endif
  8660. #endif /* CONFIG_SPLICE */
  8661. #ifdef CONFIG_EVENTFD
  8662. #if defined(TARGET_NR_eventfd)
  8663. case TARGET_NR_eventfd:
  8664. ret = get_errno(eventfd(arg1, 0));
  8665. break;
  8666. #endif
  8667. #if defined(TARGET_NR_eventfd2)
  8668. case TARGET_NR_eventfd2:
  8669. {
  8670. int host_flags = arg2 & (~(TARGET_O_NONBLOCK | TARGET_O_CLOEXEC));
  8671. if (arg2 & TARGET_O_NONBLOCK) {
  8672. host_flags |= O_NONBLOCK;
  8673. }
  8674. if (arg2 & TARGET_O_CLOEXEC) {
  8675. host_flags |= O_CLOEXEC;
  8676. }
  8677. ret = get_errno(eventfd(arg1, host_flags));
  8678. break;
  8679. }
  8680. #endif
  8681. #endif /* CONFIG_EVENTFD */
  8682. #if defined(CONFIG_FALLOCATE) && defined(TARGET_NR_fallocate)
  8683. case TARGET_NR_fallocate:
  8684. #if TARGET_ABI_BITS == 32
  8685. ret = get_errno(fallocate(arg1, arg2, target_offset64(arg3, arg4),
  8686. target_offset64(arg5, arg6)));
  8687. #else
  8688. ret = get_errno(fallocate(arg1, arg2, arg3, arg4));
  8689. #endif
  8690. break;
  8691. #endif
  8692. #if defined(CONFIG_SYNC_FILE_RANGE)
  8693. #if defined(TARGET_NR_sync_file_range)
  8694. case TARGET_NR_sync_file_range:
  8695. #if TARGET_ABI_BITS == 32
  8696. #if defined(TARGET_MIPS)
  8697. ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
  8698. target_offset64(arg5, arg6), arg7));
  8699. #else
  8700. ret = get_errno(sync_file_range(arg1, target_offset64(arg2, arg3),
  8701. target_offset64(arg4, arg5), arg6));
  8702. #endif /* !TARGET_MIPS */
  8703. #else
  8704. ret = get_errno(sync_file_range(arg1, arg2, arg3, arg4));
  8705. #endif
  8706. break;
  8707. #endif
  8708. #if defined(TARGET_NR_sync_file_range2)
  8709. case TARGET_NR_sync_file_range2:
  8710. /* This is like sync_file_range but the arguments are reordered */
  8711. #if TARGET_ABI_BITS == 32
  8712. ret = get_errno(sync_file_range(arg1, target_offset64(arg3, arg4),
  8713. target_offset64(arg5, arg6), arg2));
  8714. #else
  8715. ret = get_errno(sync_file_range(arg1, arg3, arg4, arg2));
  8716. #endif
  8717. break;
  8718. #endif
  8719. #endif
  8720. #if defined(CONFIG_EPOLL)
  8721. #if defined(TARGET_NR_epoll_create)
  8722. case TARGET_NR_epoll_create:
  8723. ret = get_errno(epoll_create(arg1));
  8724. break;
  8725. #endif
  8726. #if defined(TARGET_NR_epoll_create1) && defined(CONFIG_EPOLL_CREATE1)
  8727. case TARGET_NR_epoll_create1:
  8728. ret = get_errno(epoll_create1(arg1));
  8729. break;
  8730. #endif
  8731. #if defined(TARGET_NR_epoll_ctl)
  8732. case TARGET_NR_epoll_ctl:
  8733. {
  8734. struct epoll_event ep;
  8735. struct epoll_event *epp = 0;
  8736. if (arg4) {
  8737. struct target_epoll_event *target_ep;
  8738. if (!lock_user_struct(VERIFY_READ, target_ep, arg4, 1)) {
  8739. goto efault;
  8740. }
  8741. ep.events = tswap32(target_ep->events);
  8742. /* The epoll_data_t union is just opaque data to the kernel,
  8743. * so we transfer all 64 bits across and need not worry what
  8744. * actual data type it is.
  8745. */
  8746. ep.data.u64 = tswap64(target_ep->data.u64);
  8747. unlock_user_struct(target_ep, arg4, 0);
  8748. epp = &ep;
  8749. }
  8750. ret = get_errno(epoll_ctl(arg1, arg2, arg3, epp));
  8751. break;
  8752. }
  8753. #endif
  8754. #if defined(TARGET_NR_epoll_pwait) && defined(CONFIG_EPOLL_PWAIT)
  8755. #define IMPLEMENT_EPOLL_PWAIT
  8756. #endif
  8757. #if defined(TARGET_NR_epoll_wait) || defined(IMPLEMENT_EPOLL_PWAIT)
  8758. #if defined(TARGET_NR_epoll_wait)
  8759. case TARGET_NR_epoll_wait:
  8760. #endif
  8761. #if defined(IMPLEMENT_EPOLL_PWAIT)
  8762. case TARGET_NR_epoll_pwait:
  8763. #endif
  8764. {
  8765. struct target_epoll_event *target_ep;
  8766. struct epoll_event *ep;
  8767. int epfd = arg1;
  8768. int maxevents = arg3;
  8769. int timeout = arg4;
  8770. target_ep = lock_user(VERIFY_WRITE, arg2,
  8771. maxevents * sizeof(struct target_epoll_event), 1);
  8772. if (!target_ep) {
  8773. goto efault;
  8774. }
  8775. ep = alloca(maxevents * sizeof(struct epoll_event));
  8776. switch (num) {
  8777. #if defined(IMPLEMENT_EPOLL_PWAIT)
  8778. case TARGET_NR_epoll_pwait:
  8779. {
  8780. target_sigset_t *target_set;
  8781. sigset_t _set, *set = &_set;
  8782. if (arg5) {
  8783. target_set = lock_user(VERIFY_READ, arg5,
  8784. sizeof(target_sigset_t), 1);
  8785. if (!target_set) {
  8786. unlock_user(target_ep, arg2, 0);
  8787. goto efault;
  8788. }
  8789. target_to_host_sigset(set, target_set);
  8790. unlock_user(target_set, arg5, 0);
  8791. } else {
  8792. set = NULL;
  8793. }
  8794. ret = get_errno(epoll_pwait(epfd, ep, maxevents, timeout, set));
  8795. break;
  8796. }
  8797. #endif
  8798. #if defined(TARGET_NR_epoll_wait)
  8799. case TARGET_NR_epoll_wait:
  8800. ret = get_errno(epoll_wait(epfd, ep, maxevents, timeout));
  8801. break;
  8802. #endif
  8803. default:
  8804. ret = -TARGET_ENOSYS;
  8805. }
  8806. if (!is_error(ret)) {
  8807. int i;
  8808. for (i = 0; i < ret; i++) {
  8809. target_ep[i].events = tswap32(ep[i].events);
  8810. target_ep[i].data.u64 = tswap64(ep[i].data.u64);
  8811. }
  8812. }
  8813. unlock_user(target_ep, arg2, ret * sizeof(struct target_epoll_event));
  8814. break;
  8815. }
  8816. #endif
  8817. #endif
  8818. #ifdef TARGET_NR_prlimit64
  8819. case TARGET_NR_prlimit64:
  8820. {
  8821. /* args: pid, resource number, ptr to new rlimit, ptr to old rlimit */
  8822. struct target_rlimit64 *target_rnew, *target_rold;
  8823. struct host_rlimit64 rnew, rold, *rnewp = 0;
  8824. if (arg3) {
  8825. if (!lock_user_struct(VERIFY_READ, target_rnew, arg3, 1)) {
  8826. goto efault;
  8827. }
  8828. rnew.rlim_cur = tswap64(target_rnew->rlim_cur);
  8829. rnew.rlim_max = tswap64(target_rnew->rlim_max);
  8830. unlock_user_struct(target_rnew, arg3, 0);
  8831. rnewp = &rnew;
  8832. }
  8833. ret = get_errno(sys_prlimit64(arg1, arg2, rnewp, arg4 ? &rold : 0));
  8834. if (!is_error(ret) && arg4) {
  8835. if (!lock_user_struct(VERIFY_WRITE, target_rold, arg4, 1)) {
  8836. goto efault;
  8837. }
  8838. target_rold->rlim_cur = tswap64(rold.rlim_cur);
  8839. target_rold->rlim_max = tswap64(rold.rlim_max);
  8840. unlock_user_struct(target_rold, arg4, 1);
  8841. }
  8842. break;
  8843. }
  8844. #endif
  8845. #ifdef TARGET_NR_gethostname
  8846. case TARGET_NR_gethostname:
  8847. {
  8848. char *name = lock_user(VERIFY_WRITE, arg1, arg2, 0);
  8849. if (name) {
  8850. ret = get_errno(gethostname(name, arg2));
  8851. unlock_user(name, arg1, arg2);
  8852. } else {
  8853. ret = -TARGET_EFAULT;
  8854. }
  8855. break;
  8856. }
  8857. #endif
  8858. #ifdef TARGET_NR_atomic_cmpxchg_32
  8859. case TARGET_NR_atomic_cmpxchg_32:
  8860. {
  8861. /* should use start_exclusive from main.c */
  8862. abi_ulong mem_value;
  8863. if (get_user_u32(mem_value, arg6)) {
  8864. target_siginfo_t info;
  8865. info.si_signo = SIGSEGV;
  8866. info.si_errno = 0;
  8867. info.si_code = TARGET_SEGV_MAPERR;
  8868. info._sifields._sigfault._addr = arg6;
  8869. queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
  8870. ret = 0xdeadbeef;
  8871. }
  8872. if (mem_value == arg2)
  8873. put_user_u32(arg1, arg6);
  8874. ret = mem_value;
  8875. break;
  8876. }
  8877. #endif
  8878. #ifdef TARGET_NR_atomic_barrier
  8879. case TARGET_NR_atomic_barrier:
  8880. {
  8881. /* Like the kernel implementation and the qemu arm barrier, no-op this? */
  8882. ret = 0;
  8883. break;
  8884. }
  8885. #endif
  8886. #ifdef TARGET_NR_timer_create
  8887. case TARGET_NR_timer_create:
  8888. {
  8889. /* args: clockid_t clockid, struct sigevent *sevp, timer_t *timerid */
  8890. struct sigevent host_sevp = { {0}, }, *phost_sevp = NULL;
  8891. int clkid = arg1;
  8892. int timer_index = next_free_host_timer();
  8893. if (timer_index < 0) {
  8894. ret = -TARGET_EAGAIN;
  8895. } else {
  8896. timer_t *phtimer = g_posix_timers + timer_index;
  8897. if (arg2) {
  8898. phost_sevp = &host_sevp;
  8899. ret = target_to_host_sigevent(phost_sevp, arg2);
  8900. if (ret != 0) {
  8901. break;
  8902. }
  8903. }
  8904. ret = get_errno(timer_create(clkid, phost_sevp, phtimer));
  8905. if (ret) {
  8906. phtimer = NULL;
  8907. } else {
  8908. if (put_user(TIMER_MAGIC | timer_index, arg3, target_timer_t)) {
  8909. goto efault;
  8910. }
  8911. }
  8912. }
  8913. break;
  8914. }
  8915. #endif
  8916. #ifdef TARGET_NR_timer_settime
  8917. case TARGET_NR_timer_settime:
  8918. {
  8919. /* args: timer_t timerid, int flags, const struct itimerspec *new_value,
  8920. * struct itimerspec * old_value */
  8921. target_timer_t timerid = get_timer_id(arg1);
  8922. if (timerid < 0) {
  8923. ret = timerid;
  8924. } else if (arg3 == 0) {
  8925. ret = -TARGET_EINVAL;
  8926. } else {
  8927. timer_t htimer = g_posix_timers[timerid];
  8928. struct itimerspec hspec_new = {{0},}, hspec_old = {{0},};
  8929. target_to_host_itimerspec(&hspec_new, arg3);
  8930. ret = get_errno(
  8931. timer_settime(htimer, arg2, &hspec_new, &hspec_old));
  8932. host_to_target_itimerspec(arg2, &hspec_old);
  8933. }
  8934. break;
  8935. }
  8936. #endif
  8937. #ifdef TARGET_NR_timer_gettime
  8938. case TARGET_NR_timer_gettime:
  8939. {
  8940. /* args: timer_t timerid, struct itimerspec *curr_value */
  8941. target_timer_t timerid = get_timer_id(arg1);
  8942. if (timerid < 0) {
  8943. ret = timerid;
  8944. } else if (!arg2) {
  8945. ret = -TARGET_EFAULT;
  8946. } else {
  8947. timer_t htimer = g_posix_timers[timerid];
  8948. struct itimerspec hspec;
  8949. ret = get_errno(timer_gettime(htimer, &hspec));
  8950. if (host_to_target_itimerspec(arg2, &hspec)) {
  8951. ret = -TARGET_EFAULT;
  8952. }
  8953. }
  8954. break;
  8955. }
  8956. #endif
  8957. #ifdef TARGET_NR_timer_getoverrun
  8958. case TARGET_NR_timer_getoverrun:
  8959. {
  8960. /* args: timer_t timerid */
  8961. target_timer_t timerid = get_timer_id(arg1);
  8962. if (timerid < 0) {
  8963. ret = timerid;
  8964. } else {
  8965. timer_t htimer = g_posix_timers[timerid];
  8966. ret = get_errno(timer_getoverrun(htimer));
  8967. }
  8968. break;
  8969. }
  8970. #endif
  8971. #ifdef TARGET_NR_timer_delete
  8972. case TARGET_NR_timer_delete:
  8973. {
  8974. /* args: timer_t timerid */
  8975. target_timer_t timerid = get_timer_id(arg1);
  8976. if (timerid < 0) {
  8977. ret = timerid;
  8978. } else {
  8979. timer_t htimer = g_posix_timers[timerid];
  8980. ret = get_errno(timer_delete(htimer));
  8981. g_posix_timers[timerid] = 0;
  8982. }
  8983. break;
  8984. }
  8985. #endif
  8986. #if defined(TARGET_NR_timerfd_create) && defined(CONFIG_TIMERFD)
  8987. case TARGET_NR_timerfd_create:
  8988. ret = get_errno(timerfd_create(arg1,
  8989. target_to_host_bitmask(arg2, fcntl_flags_tbl)));
  8990. break;
  8991. #endif
  8992. #if defined(TARGET_NR_timerfd_gettime) && defined(CONFIG_TIMERFD)
  8993. case TARGET_NR_timerfd_gettime:
  8994. {
  8995. struct itimerspec its_curr;
  8996. ret = get_errno(timerfd_gettime(arg1, &its_curr));
  8997. if (arg2 && host_to_target_itimerspec(arg2, &its_curr)) {
  8998. goto efault;
  8999. }
  9000. }
  9001. break;
  9002. #endif
  9003. #if defined(TARGET_NR_timerfd_settime) && defined(CONFIG_TIMERFD)
  9004. case TARGET_NR_timerfd_settime:
  9005. {
  9006. struct itimerspec its_new, its_old, *p_new;
  9007. if (arg3) {
  9008. if (target_to_host_itimerspec(&its_new, arg3)) {
  9009. goto efault;
  9010. }
  9011. p_new = &its_new;
  9012. } else {
  9013. p_new = NULL;
  9014. }
  9015. ret = get_errno(timerfd_settime(arg1, arg2, p_new, &its_old));
  9016. if (arg4 && host_to_target_itimerspec(arg4, &its_old)) {
  9017. goto efault;
  9018. }
  9019. }
  9020. break;
  9021. #endif
  9022. #if defined(TARGET_NR_ioprio_get) && defined(__NR_ioprio_get)
  9023. case TARGET_NR_ioprio_get:
  9024. ret = get_errno(ioprio_get(arg1, arg2));
  9025. break;
  9026. #endif
  9027. #if defined(TARGET_NR_ioprio_set) && defined(__NR_ioprio_set)
  9028. case TARGET_NR_ioprio_set:
  9029. ret = get_errno(ioprio_set(arg1, arg2, arg3));
  9030. break;
  9031. #endif
  9032. #if defined(TARGET_NR_setns) && defined(CONFIG_SETNS)
  9033. case TARGET_NR_setns:
  9034. ret = get_errno(setns(arg1, arg2));
  9035. break;
  9036. #endif
  9037. #if defined(TARGET_NR_unshare) && defined(CONFIG_SETNS)
  9038. case TARGET_NR_unshare:
  9039. ret = get_errno(unshare(arg1));
  9040. break;
  9041. #endif
  9042. default:
  9043. unimplemented:
  9044. gemu_log("qemu: Unsupported syscall: %d\n", num);
  9045. #if defined(TARGET_NR_setxattr) || defined(TARGET_NR_get_thread_area) || defined(TARGET_NR_getdomainname) || defined(TARGET_NR_set_robust_list)
  9046. unimplemented_nowarn:
  9047. #endif
  9048. ret = -TARGET_ENOSYS;
  9049. break;
  9050. }
  9051. fail:
  9052. #ifdef DEBUG
  9053. gemu_log(" = " TARGET_ABI_FMT_ld "\n", ret);
  9054. #endif
  9055. if(do_strace)
  9056. print_syscall_ret(num, ret);
  9057. return ret;
  9058. efault:
  9059. ret = -TARGET_EFAULT;
  9060. goto fail;
  9061. }