css.c 78 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723
  1. /*
  2. * Channel subsystem base support.
  3. *
  4. * Copyright 2012 IBM Corp.
  5. * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
  6. *
  7. * This work is licensed under the terms of the GNU GPL, version 2 or (at
  8. * your option) any later version. See the COPYING file in the top-level
  9. * directory.
  10. */
  11. #include "qemu/osdep.h"
  12. #include "qapi/error.h"
  13. #include "qapi/visitor.h"
  14. #include "qemu/bitops.h"
  15. #include "qemu/error-report.h"
  16. #include "exec/address-spaces.h"
  17. #include "hw/s390x/ioinst.h"
  18. #include "hw/qdev-properties.h"
  19. #include "hw/s390x/css.h"
  20. #include "trace.h"
  21. #include "hw/s390x/s390_flic.h"
  22. #include "hw/s390x/s390-virtio-ccw.h"
  23. #include "hw/s390x/s390-ccw.h"
  24. bool css_migration_enabled = true;
  25. typedef struct CrwContainer {
  26. CRW crw;
  27. QTAILQ_ENTRY(CrwContainer) sibling;
  28. } CrwContainer;
  29. static const VMStateDescription vmstate_crw = {
  30. .name = "s390_crw",
  31. .version_id = 1,
  32. .minimum_version_id = 1,
  33. .fields = (const VMStateField[]) {
  34. VMSTATE_UINT16(flags, CRW),
  35. VMSTATE_UINT16(rsid, CRW),
  36. VMSTATE_END_OF_LIST()
  37. },
  38. };
  39. static const VMStateDescription vmstate_crw_container = {
  40. .name = "s390_crw_container",
  41. .version_id = 1,
  42. .minimum_version_id = 1,
  43. .fields = (const VMStateField[]) {
  44. VMSTATE_STRUCT(crw, CrwContainer, 0, vmstate_crw, CRW),
  45. VMSTATE_END_OF_LIST()
  46. },
  47. };
  48. typedef struct ChpInfo {
  49. uint8_t in_use;
  50. uint8_t type;
  51. uint8_t is_virtual;
  52. } ChpInfo;
  53. static const VMStateDescription vmstate_chp_info = {
  54. .name = "s390_chp_info",
  55. .version_id = 1,
  56. .minimum_version_id = 1,
  57. .fields = (const VMStateField[]) {
  58. VMSTATE_UINT8(in_use, ChpInfo),
  59. VMSTATE_UINT8(type, ChpInfo),
  60. VMSTATE_UINT8(is_virtual, ChpInfo),
  61. VMSTATE_END_OF_LIST()
  62. }
  63. };
  64. typedef struct SubchSet {
  65. SubchDev *sch[MAX_SCHID + 1];
  66. unsigned long schids_used[BITS_TO_LONGS(MAX_SCHID + 1)];
  67. unsigned long devnos_used[BITS_TO_LONGS(MAX_SCHID + 1)];
  68. } SubchSet;
  69. static const VMStateDescription vmstate_scsw = {
  70. .name = "s390_scsw",
  71. .version_id = 1,
  72. .minimum_version_id = 1,
  73. .fields = (const VMStateField[]) {
  74. VMSTATE_UINT16(flags, SCSW),
  75. VMSTATE_UINT16(ctrl, SCSW),
  76. VMSTATE_UINT32(cpa, SCSW),
  77. VMSTATE_UINT8(dstat, SCSW),
  78. VMSTATE_UINT8(cstat, SCSW),
  79. VMSTATE_UINT16(count, SCSW),
  80. VMSTATE_END_OF_LIST()
  81. }
  82. };
  83. static const VMStateDescription vmstate_pmcw = {
  84. .name = "s390_pmcw",
  85. .version_id = 1,
  86. .minimum_version_id = 1,
  87. .fields = (const VMStateField[]) {
  88. VMSTATE_UINT32(intparm, PMCW),
  89. VMSTATE_UINT16(flags, PMCW),
  90. VMSTATE_UINT16(devno, PMCW),
  91. VMSTATE_UINT8(lpm, PMCW),
  92. VMSTATE_UINT8(pnom, PMCW),
  93. VMSTATE_UINT8(lpum, PMCW),
  94. VMSTATE_UINT8(pim, PMCW),
  95. VMSTATE_UINT16(mbi, PMCW),
  96. VMSTATE_UINT8(pom, PMCW),
  97. VMSTATE_UINT8(pam, PMCW),
  98. VMSTATE_UINT8_ARRAY(chpid, PMCW, 8),
  99. VMSTATE_UINT32(chars, PMCW),
  100. VMSTATE_END_OF_LIST()
  101. }
  102. };
  103. static const VMStateDescription vmstate_schib = {
  104. .name = "s390_schib",
  105. .version_id = 1,
  106. .minimum_version_id = 1,
  107. .fields = (const VMStateField[]) {
  108. VMSTATE_STRUCT(pmcw, SCHIB, 0, vmstate_pmcw, PMCW),
  109. VMSTATE_STRUCT(scsw, SCHIB, 0, vmstate_scsw, SCSW),
  110. VMSTATE_UINT64(mba, SCHIB),
  111. VMSTATE_UINT8_ARRAY(mda, SCHIB, 4),
  112. VMSTATE_END_OF_LIST()
  113. }
  114. };
  115. static const VMStateDescription vmstate_ccw1 = {
  116. .name = "s390_ccw1",
  117. .version_id = 1,
  118. .minimum_version_id = 1,
  119. .fields = (const VMStateField[]) {
  120. VMSTATE_UINT8(cmd_code, CCW1),
  121. VMSTATE_UINT8(flags, CCW1),
  122. VMSTATE_UINT16(count, CCW1),
  123. VMSTATE_UINT32(cda, CCW1),
  124. VMSTATE_END_OF_LIST()
  125. }
  126. };
  127. static const VMStateDescription vmstate_ciw = {
  128. .name = "s390_ciw",
  129. .version_id = 1,
  130. .minimum_version_id = 1,
  131. .fields = (const VMStateField[]) {
  132. VMSTATE_UINT8(type, CIW),
  133. VMSTATE_UINT8(command, CIW),
  134. VMSTATE_UINT16(count, CIW),
  135. VMSTATE_END_OF_LIST()
  136. }
  137. };
  138. static const VMStateDescription vmstate_sense_id = {
  139. .name = "s390_sense_id",
  140. .version_id = 1,
  141. .minimum_version_id = 1,
  142. .fields = (const VMStateField[]) {
  143. VMSTATE_UINT8(reserved, SenseId),
  144. VMSTATE_UINT16(cu_type, SenseId),
  145. VMSTATE_UINT8(cu_model, SenseId),
  146. VMSTATE_UINT16(dev_type, SenseId),
  147. VMSTATE_UINT8(dev_model, SenseId),
  148. VMSTATE_UINT8(unused, SenseId),
  149. VMSTATE_STRUCT_ARRAY(ciw, SenseId, MAX_CIWS, 0, vmstate_ciw, CIW),
  150. VMSTATE_END_OF_LIST()
  151. }
  152. };
  153. static const VMStateDescription vmstate_orb = {
  154. .name = "s390_orb",
  155. .version_id = 1,
  156. .minimum_version_id = 1,
  157. .fields = (const VMStateField[]) {
  158. VMSTATE_UINT32(intparm, ORB),
  159. VMSTATE_UINT16(ctrl0, ORB),
  160. VMSTATE_UINT8(lpm, ORB),
  161. VMSTATE_UINT8(ctrl1, ORB),
  162. VMSTATE_UINT32(cpa, ORB),
  163. VMSTATE_END_OF_LIST()
  164. }
  165. };
  166. static bool vmstate_schdev_orb_needed(void *opaque)
  167. {
  168. return css_migration_enabled;
  169. }
  170. static const VMStateDescription vmstate_schdev_orb = {
  171. .name = "s390_subch_dev/orb",
  172. .version_id = 1,
  173. .minimum_version_id = 1,
  174. .needed = vmstate_schdev_orb_needed,
  175. .fields = (const VMStateField[]) {
  176. VMSTATE_STRUCT(orb, SubchDev, 1, vmstate_orb, ORB),
  177. VMSTATE_END_OF_LIST()
  178. }
  179. };
  180. static int subch_dev_post_load(void *opaque, int version_id);
  181. static int subch_dev_pre_save(void *opaque);
  182. const char err_hint_devno[] = "Devno mismatch, tried to load wrong section!"
  183. " Likely reason: some sequences of plug and unplug can break"
  184. " migration for machine versions prior to 2.7 (known design flaw).";
  185. const VMStateDescription vmstate_subch_dev = {
  186. .name = "s390_subch_dev",
  187. .version_id = 1,
  188. .minimum_version_id = 1,
  189. .post_load = subch_dev_post_load,
  190. .pre_save = subch_dev_pre_save,
  191. .fields = (const VMStateField[]) {
  192. VMSTATE_UINT8_EQUAL(cssid, SubchDev, "Bug!"),
  193. VMSTATE_UINT8_EQUAL(ssid, SubchDev, "Bug!"),
  194. VMSTATE_UINT16(migrated_schid, SubchDev),
  195. VMSTATE_UINT16_EQUAL(devno, SubchDev, err_hint_devno),
  196. VMSTATE_BOOL(thinint_active, SubchDev),
  197. VMSTATE_STRUCT(curr_status, SubchDev, 0, vmstate_schib, SCHIB),
  198. VMSTATE_UINT8_ARRAY(sense_data, SubchDev, 32),
  199. VMSTATE_UINT64(channel_prog, SubchDev),
  200. VMSTATE_STRUCT(last_cmd, SubchDev, 0, vmstate_ccw1, CCW1),
  201. VMSTATE_BOOL(last_cmd_valid, SubchDev),
  202. VMSTATE_STRUCT(id, SubchDev, 0, vmstate_sense_id, SenseId),
  203. VMSTATE_BOOL(ccw_fmt_1, SubchDev),
  204. VMSTATE_UINT8(ccw_no_data_cnt, SubchDev),
  205. VMSTATE_END_OF_LIST()
  206. },
  207. .subsections = (const VMStateDescription * const []) {
  208. &vmstate_schdev_orb,
  209. NULL
  210. }
  211. };
  212. typedef struct IndAddrPtrTmp {
  213. IndAddr **parent;
  214. uint64_t addr;
  215. int32_t len;
  216. } IndAddrPtrTmp;
  217. static int post_load_ind_addr(void *opaque, int version_id)
  218. {
  219. IndAddrPtrTmp *ptmp = opaque;
  220. IndAddr **ind_addr = ptmp->parent;
  221. if (ptmp->len != 0) {
  222. *ind_addr = get_indicator(ptmp->addr, ptmp->len);
  223. } else {
  224. *ind_addr = NULL;
  225. }
  226. return 0;
  227. }
  228. static int pre_save_ind_addr(void *opaque)
  229. {
  230. IndAddrPtrTmp *ptmp = opaque;
  231. IndAddr *ind_addr = *(ptmp->parent);
  232. if (ind_addr != NULL) {
  233. ptmp->len = ind_addr->len;
  234. ptmp->addr = ind_addr->addr;
  235. } else {
  236. ptmp->len = 0;
  237. ptmp->addr = 0L;
  238. }
  239. return 0;
  240. }
  241. static const VMStateDescription vmstate_ind_addr_tmp = {
  242. .name = "s390_ind_addr_tmp",
  243. .pre_save = pre_save_ind_addr,
  244. .post_load = post_load_ind_addr,
  245. .fields = (const VMStateField[]) {
  246. VMSTATE_INT32(len, IndAddrPtrTmp),
  247. VMSTATE_UINT64(addr, IndAddrPtrTmp),
  248. VMSTATE_END_OF_LIST()
  249. }
  250. };
  251. const VMStateDescription vmstate_ind_addr = {
  252. .name = "s390_ind_addr_tmp",
  253. .fields = (const VMStateField[]) {
  254. VMSTATE_WITH_TMP(IndAddr*, IndAddrPtrTmp, vmstate_ind_addr_tmp),
  255. VMSTATE_END_OF_LIST()
  256. }
  257. };
  258. typedef struct CssImage {
  259. SubchSet *sch_set[MAX_SSID + 1];
  260. ChpInfo chpids[MAX_CHPID + 1];
  261. } CssImage;
  262. static const VMStateDescription vmstate_css_img = {
  263. .name = "s390_css_img",
  264. .version_id = 1,
  265. .minimum_version_id = 1,
  266. .fields = (const VMStateField[]) {
  267. /* Subchannel sets have no relevant state. */
  268. VMSTATE_STRUCT_ARRAY(chpids, CssImage, MAX_CHPID + 1, 0,
  269. vmstate_chp_info, ChpInfo),
  270. VMSTATE_END_OF_LIST()
  271. }
  272. };
  273. typedef struct IoAdapter {
  274. uint32_t id;
  275. uint8_t type;
  276. uint8_t isc;
  277. uint8_t flags;
  278. } IoAdapter;
  279. typedef struct ChannelSubSys {
  280. QTAILQ_HEAD(, CrwContainer) pending_crws;
  281. bool sei_pending;
  282. bool do_crw_mchk;
  283. bool crws_lost;
  284. uint8_t max_cssid;
  285. uint8_t max_ssid;
  286. bool chnmon_active;
  287. uint64_t chnmon_area;
  288. CssImage *css[MAX_CSSID + 1];
  289. uint8_t default_cssid;
  290. /* don't migrate, see css_register_io_adapters */
  291. IoAdapter *io_adapters[CSS_IO_ADAPTER_TYPE_NUMS][MAX_ISC + 1];
  292. /* don't migrate, see get_indicator and IndAddrPtrTmp */
  293. QTAILQ_HEAD(, IndAddr) indicator_addresses;
  294. } ChannelSubSys;
  295. static const VMStateDescription vmstate_css = {
  296. .name = "s390_css",
  297. .version_id = 1,
  298. .minimum_version_id = 1,
  299. .fields = (const VMStateField[]) {
  300. VMSTATE_QTAILQ_V(pending_crws, ChannelSubSys, 1, vmstate_crw_container,
  301. CrwContainer, sibling),
  302. VMSTATE_BOOL(sei_pending, ChannelSubSys),
  303. VMSTATE_BOOL(do_crw_mchk, ChannelSubSys),
  304. VMSTATE_BOOL(crws_lost, ChannelSubSys),
  305. /* These were kind of migrated by virtio */
  306. VMSTATE_UINT8(max_cssid, ChannelSubSys),
  307. VMSTATE_UINT8(max_ssid, ChannelSubSys),
  308. VMSTATE_BOOL(chnmon_active, ChannelSubSys),
  309. VMSTATE_UINT64(chnmon_area, ChannelSubSys),
  310. VMSTATE_ARRAY_OF_POINTER_TO_STRUCT(css, ChannelSubSys, MAX_CSSID + 1,
  311. 0, vmstate_css_img, CssImage),
  312. VMSTATE_UINT8(default_cssid, ChannelSubSys),
  313. VMSTATE_END_OF_LIST()
  314. }
  315. };
  316. static ChannelSubSys channel_subsys = {
  317. .pending_crws = QTAILQ_HEAD_INITIALIZER(channel_subsys.pending_crws),
  318. .do_crw_mchk = true,
  319. .sei_pending = false,
  320. .crws_lost = false,
  321. .chnmon_active = false,
  322. .indicator_addresses =
  323. QTAILQ_HEAD_INITIALIZER(channel_subsys.indicator_addresses),
  324. };
  325. static int subch_dev_pre_save(void *opaque)
  326. {
  327. SubchDev *s = opaque;
  328. /* Prepare remote_schid for save */
  329. s->migrated_schid = s->schid;
  330. return 0;
  331. }
  332. static int subch_dev_post_load(void *opaque, int version_id)
  333. {
  334. SubchDev *s = opaque;
  335. /* Re-assign the subchannel to remote_schid if necessary */
  336. if (s->migrated_schid != s->schid) {
  337. if (css_find_subch(true, s->cssid, s->ssid, s->schid) == s) {
  338. /*
  339. * Cleanup the slot before moving to s->migrated_schid provided
  340. * it still belongs to us, i.e. it was not changed by previous
  341. * invocation of this function.
  342. */
  343. css_subch_assign(s->cssid, s->ssid, s->schid, s->devno, NULL);
  344. }
  345. /* It's OK to re-assign without a prior de-assign. */
  346. s->schid = s->migrated_schid;
  347. css_subch_assign(s->cssid, s->ssid, s->schid, s->devno, s);
  348. }
  349. if (css_migration_enabled) {
  350. /* No compat voodoo to do ;) */
  351. return 0;
  352. }
  353. /*
  354. * Hack alert. If we don't migrate the channel subsystem status
  355. * we still need to find out if the guest enabled mss/mcss-e.
  356. * If the subchannel is enabled, it certainly was able to access it,
  357. * so adjust the max_ssid/max_cssid values for relevant ssid/cssid
  358. * values. This is not watertight, but better than nothing.
  359. */
  360. if (s->curr_status.pmcw.flags & PMCW_FLAGS_MASK_ENA) {
  361. if (s->ssid) {
  362. channel_subsys.max_ssid = MAX_SSID;
  363. }
  364. if (s->cssid != channel_subsys.default_cssid) {
  365. channel_subsys.max_cssid = MAX_CSSID;
  366. }
  367. }
  368. return 0;
  369. }
  370. void css_register_vmstate(void)
  371. {
  372. if (css_migration_enabled) {
  373. vmstate_register(NULL, 0, &vmstate_css, &channel_subsys);
  374. }
  375. }
  376. IndAddr *get_indicator(hwaddr ind_addr, int len)
  377. {
  378. IndAddr *indicator;
  379. QTAILQ_FOREACH(indicator, &channel_subsys.indicator_addresses, sibling) {
  380. if (indicator->addr == ind_addr) {
  381. indicator->refcnt++;
  382. return indicator;
  383. }
  384. }
  385. indicator = g_new0(IndAddr, 1);
  386. indicator->addr = ind_addr;
  387. indicator->len = len;
  388. indicator->refcnt = 1;
  389. QTAILQ_INSERT_TAIL(&channel_subsys.indicator_addresses,
  390. indicator, sibling);
  391. return indicator;
  392. }
  393. static int s390_io_adapter_map(AdapterInfo *adapter, uint64_t map_addr,
  394. bool do_map)
  395. {
  396. S390FLICState *fs = s390_get_flic();
  397. S390FLICStateClass *fsc = s390_get_flic_class(fs);
  398. return fsc->io_adapter_map(fs, adapter->adapter_id, map_addr, do_map);
  399. }
  400. void release_indicator(AdapterInfo *adapter, IndAddr *indicator)
  401. {
  402. assert(indicator->refcnt > 0);
  403. indicator->refcnt--;
  404. if (indicator->refcnt > 0) {
  405. return;
  406. }
  407. QTAILQ_REMOVE(&channel_subsys.indicator_addresses, indicator, sibling);
  408. if (indicator->map) {
  409. s390_io_adapter_map(adapter, indicator->map, false);
  410. }
  411. g_free(indicator);
  412. }
  413. int map_indicator(AdapterInfo *adapter, IndAddr *indicator)
  414. {
  415. int ret;
  416. if (indicator->map) {
  417. return 0; /* already mapped is not an error */
  418. }
  419. indicator->map = indicator->addr;
  420. ret = s390_io_adapter_map(adapter, indicator->map, true);
  421. if ((ret != 0) && (ret != -ENOSYS)) {
  422. goto out_err;
  423. }
  424. return 0;
  425. out_err:
  426. indicator->map = 0;
  427. return ret;
  428. }
  429. int css_create_css_image(uint8_t cssid, bool default_image)
  430. {
  431. trace_css_new_image(cssid, default_image ? "(default)" : "");
  432. /* 255 is reserved */
  433. if (cssid == 255) {
  434. return -EINVAL;
  435. }
  436. if (channel_subsys.css[cssid]) {
  437. return -EBUSY;
  438. }
  439. channel_subsys.css[cssid] = g_new0(CssImage, 1);
  440. if (default_image) {
  441. channel_subsys.default_cssid = cssid;
  442. }
  443. return 0;
  444. }
  445. uint32_t css_get_adapter_id(CssIoAdapterType type, uint8_t isc)
  446. {
  447. if (type >= CSS_IO_ADAPTER_TYPE_NUMS || isc > MAX_ISC ||
  448. !channel_subsys.io_adapters[type][isc]) {
  449. return -1;
  450. }
  451. return channel_subsys.io_adapters[type][isc]->id;
  452. }
  453. /**
  454. * css_register_io_adapters: Register I/O adapters per ISC during init
  455. *
  456. * @swap: an indication if byte swap is needed.
  457. * @maskable: an indication if the adapter is subject to the mask operation.
  458. * @flags: further characteristics of the adapter.
  459. * e.g. suppressible, an indication if the adapter is subject to AIS.
  460. * @errp: location to store error information.
  461. */
  462. void css_register_io_adapters(CssIoAdapterType type, bool swap, bool maskable,
  463. uint8_t flags, Error **errp)
  464. {
  465. uint32_t id;
  466. int ret, isc;
  467. IoAdapter *adapter;
  468. S390FLICState *fs = s390_get_flic();
  469. S390FLICStateClass *fsc = s390_get_flic_class(fs);
  470. /*
  471. * Disallow multiple registrations for the same device type.
  472. * Report an error if registering for an already registered type.
  473. */
  474. if (channel_subsys.io_adapters[type][0]) {
  475. error_setg(errp, "Adapters for type %d already registered", type);
  476. }
  477. for (isc = 0; isc <= MAX_ISC; isc++) {
  478. id = (type << 3) | isc;
  479. ret = fsc->register_io_adapter(fs, id, isc, swap, maskable, flags);
  480. if (ret == 0) {
  481. adapter = g_new0(IoAdapter, 1);
  482. adapter->id = id;
  483. adapter->isc = isc;
  484. adapter->type = type;
  485. adapter->flags = flags;
  486. channel_subsys.io_adapters[type][isc] = adapter;
  487. } else {
  488. error_setg_errno(errp, -ret, "Unexpected error %d when "
  489. "registering adapter %d", ret, id);
  490. break;
  491. }
  492. }
  493. /*
  494. * No need to free registered adapters in kvm: kvm will clean up
  495. * when the machine goes away.
  496. */
  497. if (ret) {
  498. for (isc--; isc >= 0; isc--) {
  499. g_free(channel_subsys.io_adapters[type][isc]);
  500. channel_subsys.io_adapters[type][isc] = NULL;
  501. }
  502. }
  503. }
  504. static void css_clear_io_interrupt(uint16_t subchannel_id,
  505. uint16_t subchannel_nr)
  506. {
  507. Error *err = NULL;
  508. static bool no_clear_irq;
  509. S390FLICState *fs = s390_get_flic();
  510. S390FLICStateClass *fsc = s390_get_flic_class(fs);
  511. int r;
  512. if (unlikely(no_clear_irq)) {
  513. return;
  514. }
  515. r = fsc->clear_io_irq(fs, subchannel_id, subchannel_nr);
  516. switch (r) {
  517. case 0:
  518. break;
  519. case -ENOSYS:
  520. no_clear_irq = true;
  521. /*
  522. * Ignore unavailability, as the user can't do anything
  523. * about it anyway.
  524. */
  525. break;
  526. default:
  527. error_setg_errno(&err, -r, "unexpected error condition");
  528. error_propagate(&error_abort, err);
  529. }
  530. }
  531. static inline uint16_t css_do_build_subchannel_id(uint8_t cssid, uint8_t ssid)
  532. {
  533. if (channel_subsys.max_cssid > 0) {
  534. return (cssid << 8) | (1 << 3) | (ssid << 1) | 1;
  535. }
  536. return (ssid << 1) | 1;
  537. }
  538. uint16_t css_build_subchannel_id(SubchDev *sch)
  539. {
  540. return css_do_build_subchannel_id(sch->cssid, sch->ssid);
  541. }
  542. void css_inject_io_interrupt(SubchDev *sch)
  543. {
  544. uint8_t isc = (sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_ISC) >> 11;
  545. trace_css_io_interrupt(sch->cssid, sch->ssid, sch->schid,
  546. sch->curr_status.pmcw.intparm, isc, "");
  547. s390_io_interrupt(css_build_subchannel_id(sch),
  548. sch->schid,
  549. sch->curr_status.pmcw.intparm,
  550. isc << 27);
  551. }
  552. void css_conditional_io_interrupt(SubchDev *sch)
  553. {
  554. /*
  555. * If the subchannel is not enabled, it is not made status pending
  556. * (see PoP p. 16-17, "Status Control").
  557. */
  558. if (!(sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_ENA)) {
  559. return;
  560. }
  561. /*
  562. * If the subchannel is not currently status pending, make it pending
  563. * with alert status.
  564. */
  565. if (!(sch->curr_status.scsw.ctrl & SCSW_STCTL_STATUS_PEND)) {
  566. uint8_t isc = (sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_ISC) >> 11;
  567. trace_css_io_interrupt(sch->cssid, sch->ssid, sch->schid,
  568. sch->curr_status.pmcw.intparm, isc,
  569. "(unsolicited)");
  570. sch->curr_status.scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL;
  571. sch->curr_status.scsw.ctrl |=
  572. SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
  573. /* Inject an I/O interrupt. */
  574. s390_io_interrupt(css_build_subchannel_id(sch),
  575. sch->schid,
  576. sch->curr_status.pmcw.intparm,
  577. isc << 27);
  578. }
  579. }
  580. int css_do_sic(S390CPU *cpu, uint8_t isc, uint16_t mode)
  581. {
  582. CPUS390XState *env = &cpu->env;
  583. S390FLICState *fs = s390_get_flic();
  584. S390FLICStateClass *fsc = s390_get_flic_class(fs);
  585. int r;
  586. if (env->psw.mask & PSW_MASK_PSTATE) {
  587. r = -PGM_PRIVILEGED;
  588. goto out;
  589. }
  590. trace_css_do_sic(mode, isc);
  591. switch (mode) {
  592. case SIC_IRQ_MODE_ALL:
  593. case SIC_IRQ_MODE_SINGLE:
  594. break;
  595. default:
  596. r = -PGM_OPERAND;
  597. goto out;
  598. }
  599. r = fsc->modify_ais_mode(fs, isc, mode) ? -PGM_OPERATION : 0;
  600. out:
  601. return r;
  602. }
  603. void css_adapter_interrupt(CssIoAdapterType type, uint8_t isc)
  604. {
  605. S390FLICState *fs = s390_get_flic();
  606. S390FLICStateClass *fsc = s390_get_flic_class(fs);
  607. uint32_t io_int_word = (isc << 27) | IO_INT_WORD_AI;
  608. IoAdapter *adapter = channel_subsys.io_adapters[type][isc];
  609. if (!adapter) {
  610. return;
  611. }
  612. trace_css_adapter_interrupt(isc);
  613. if (fs->ais_supported) {
  614. if (fsc->inject_airq(fs, type, isc, adapter->flags)) {
  615. error_report("Failed to inject airq with AIS supported");
  616. exit(1);
  617. }
  618. } else {
  619. s390_io_interrupt(0, 0, 0, io_int_word);
  620. }
  621. }
  622. static void sch_handle_clear_func(SubchDev *sch)
  623. {
  624. SCHIB *schib = &sch->curr_status;
  625. int path;
  626. /* Path management: In our simple css, we always choose the only path. */
  627. path = 0x80;
  628. /* Reset values prior to 'issuing the clear signal'. */
  629. schib->pmcw.lpum = 0;
  630. schib->pmcw.pom = 0xff;
  631. schib->scsw.flags &= ~SCSW_FLAGS_MASK_PNO;
  632. /* We always 'attempt to issue the clear signal', and we always succeed. */
  633. sch->channel_prog = 0x0;
  634. sch->last_cmd_valid = false;
  635. schib->scsw.ctrl &= ~SCSW_ACTL_CLEAR_PEND;
  636. schib->scsw.ctrl |= SCSW_STCTL_STATUS_PEND;
  637. schib->scsw.dstat = 0;
  638. schib->scsw.cstat = 0;
  639. schib->pmcw.lpum = path;
  640. }
  641. static void sch_handle_halt_func(SubchDev *sch)
  642. {
  643. SCHIB *schib = &sch->curr_status;
  644. hwaddr curr_ccw = sch->channel_prog;
  645. int path;
  646. /* Path management: In our simple css, we always choose the only path. */
  647. path = 0x80;
  648. /* We always 'attempt to issue the halt signal', and we always succeed. */
  649. sch->channel_prog = 0x0;
  650. sch->last_cmd_valid = false;
  651. schib->scsw.ctrl &= ~SCSW_ACTL_HALT_PEND;
  652. schib->scsw.ctrl |= SCSW_STCTL_STATUS_PEND;
  653. if ((schib->scsw.ctrl & (SCSW_ACTL_SUBCH_ACTIVE |
  654. SCSW_ACTL_DEVICE_ACTIVE)) ||
  655. !((schib->scsw.ctrl & SCSW_ACTL_START_PEND) ||
  656. (schib->scsw.ctrl & SCSW_ACTL_SUSP))) {
  657. schib->scsw.dstat = SCSW_DSTAT_DEVICE_END;
  658. }
  659. if ((schib->scsw.ctrl & (SCSW_ACTL_SUBCH_ACTIVE |
  660. SCSW_ACTL_DEVICE_ACTIVE)) ||
  661. (schib->scsw.ctrl & SCSW_ACTL_SUSP)) {
  662. schib->scsw.cpa = curr_ccw + 8;
  663. }
  664. schib->scsw.cstat = 0;
  665. schib->pmcw.lpum = path;
  666. }
  667. /*
  668. * As the SenseId struct cannot be packed (would cause unaligned accesses), we
  669. * have to copy the individual fields to an unstructured area using the correct
  670. * layout (see SA22-7204-01 "Common I/O-Device Commands").
  671. */
  672. static void copy_sense_id_to_guest(uint8_t *dest, SenseId *src)
  673. {
  674. int i;
  675. dest[0] = src->reserved;
  676. stw_be_p(dest + 1, src->cu_type);
  677. dest[3] = src->cu_model;
  678. stw_be_p(dest + 4, src->dev_type);
  679. dest[6] = src->dev_model;
  680. dest[7] = src->unused;
  681. for (i = 0; i < ARRAY_SIZE(src->ciw); i++) {
  682. dest[8 + i * 4] = src->ciw[i].type;
  683. dest[9 + i * 4] = src->ciw[i].command;
  684. stw_be_p(dest + 10 + i * 4, src->ciw[i].count);
  685. }
  686. }
  687. static CCW1 copy_ccw_from_guest(hwaddr addr, bool fmt1)
  688. {
  689. CCW0 tmp0;
  690. CCW1 tmp1;
  691. CCW1 ret;
  692. if (fmt1) {
  693. cpu_physical_memory_read(addr, &tmp1, sizeof(tmp1));
  694. ret.cmd_code = tmp1.cmd_code;
  695. ret.flags = tmp1.flags;
  696. ret.count = be16_to_cpu(tmp1.count);
  697. ret.cda = be32_to_cpu(tmp1.cda);
  698. } else {
  699. cpu_physical_memory_read(addr, &tmp0, sizeof(tmp0));
  700. if ((tmp0.cmd_code & 0x0f) == CCW_CMD_TIC) {
  701. ret.cmd_code = CCW_CMD_TIC;
  702. ret.flags = 0;
  703. ret.count = 0;
  704. } else {
  705. ret.cmd_code = tmp0.cmd_code;
  706. ret.flags = tmp0.flags;
  707. ret.count = be16_to_cpu(tmp0.count);
  708. }
  709. ret.cda = be16_to_cpu(tmp0.cda1) | (tmp0.cda0 << 16);
  710. }
  711. return ret;
  712. }
  713. /**
  714. * If out of bounds marks the stream broken. If broken returns -EINVAL,
  715. * otherwise the requested length (may be zero)
  716. */
  717. static inline int cds_check_len(CcwDataStream *cds, int len)
  718. {
  719. if (cds->at_byte + len > cds->count) {
  720. cds->flags |= CDS_F_STREAM_BROKEN;
  721. }
  722. return cds->flags & CDS_F_STREAM_BROKEN ? -EINVAL : len;
  723. }
  724. static inline bool cds_ccw_addrs_ok(hwaddr addr, int len, bool ccw_fmt1)
  725. {
  726. return (addr + len) < (ccw_fmt1 ? (1UL << 31) : (1UL << 24));
  727. }
  728. static int ccw_dstream_rw_noflags(CcwDataStream *cds, void *buff, int len,
  729. CcwDataStreamOp op)
  730. {
  731. int ret;
  732. ret = cds_check_len(cds, len);
  733. if (ret <= 0) {
  734. return ret;
  735. }
  736. if (!cds_ccw_addrs_ok(cds->cda, len, cds->flags & CDS_F_FMT)) {
  737. return -EINVAL; /* channel program check */
  738. }
  739. if (op == CDS_OP_A) {
  740. goto incr;
  741. }
  742. if (!cds->do_skip) {
  743. ret = address_space_rw(&address_space_memory, cds->cda,
  744. MEMTXATTRS_UNSPECIFIED, buff, len, op);
  745. } else {
  746. ret = MEMTX_OK;
  747. }
  748. if (ret != MEMTX_OK) {
  749. cds->flags |= CDS_F_STREAM_BROKEN;
  750. return -EINVAL;
  751. }
  752. incr:
  753. cds->at_byte += len;
  754. cds->cda += len;
  755. return 0;
  756. }
  757. /* returns values between 1 and bsz, where bsz is a power of 2 */
  758. static inline uint16_t ida_continuous_left(hwaddr cda, uint64_t bsz)
  759. {
  760. return bsz - (cda & (bsz - 1));
  761. }
  762. static inline uint64_t ccw_ida_block_size(uint8_t flags)
  763. {
  764. if ((flags & CDS_F_C64) && !(flags & CDS_F_I2K)) {
  765. return 1ULL << 12;
  766. }
  767. return 1ULL << 11;
  768. }
  769. static inline int ida_read_next_idaw(CcwDataStream *cds)
  770. {
  771. union {uint64_t fmt2; uint32_t fmt1; } idaw;
  772. int ret;
  773. hwaddr idaw_addr;
  774. bool idaw_fmt2 = cds->flags & CDS_F_C64;
  775. bool ccw_fmt1 = cds->flags & CDS_F_FMT;
  776. if (idaw_fmt2) {
  777. idaw_addr = cds->cda_orig + sizeof(idaw.fmt2) * cds->at_idaw;
  778. if (idaw_addr & 0x07 || !cds_ccw_addrs_ok(idaw_addr, 0, ccw_fmt1)) {
  779. return -EINVAL; /* channel program check */
  780. }
  781. ret = address_space_read(&address_space_memory, idaw_addr,
  782. MEMTXATTRS_UNSPECIFIED, &idaw.fmt2,
  783. sizeof(idaw.fmt2));
  784. cds->cda = be64_to_cpu(idaw.fmt2);
  785. } else {
  786. idaw_addr = cds->cda_orig + sizeof(idaw.fmt1) * cds->at_idaw;
  787. if (idaw_addr & 0x03 || !cds_ccw_addrs_ok(idaw_addr, 0, ccw_fmt1)) {
  788. return -EINVAL; /* channel program check */
  789. }
  790. ret = address_space_read(&address_space_memory, idaw_addr,
  791. MEMTXATTRS_UNSPECIFIED, &idaw.fmt1,
  792. sizeof(idaw.fmt1));
  793. cds->cda = be64_to_cpu(idaw.fmt1);
  794. if (cds->cda & 0x80000000) {
  795. return -EINVAL; /* channel program check */
  796. }
  797. }
  798. ++(cds->at_idaw);
  799. if (ret != MEMTX_OK) {
  800. /* assume inaccessible address */
  801. return -EINVAL; /* channel program check */
  802. }
  803. return 0;
  804. }
  805. static int ccw_dstream_rw_ida(CcwDataStream *cds, void *buff, int len,
  806. CcwDataStreamOp op)
  807. {
  808. uint64_t bsz = ccw_ida_block_size(cds->flags);
  809. int ret = 0;
  810. uint16_t cont_left, iter_len;
  811. ret = cds_check_len(cds, len);
  812. if (ret <= 0) {
  813. return ret;
  814. }
  815. if (!cds->at_idaw) {
  816. /* read first idaw */
  817. ret = ida_read_next_idaw(cds);
  818. if (ret) {
  819. goto err;
  820. }
  821. cont_left = ida_continuous_left(cds->cda, bsz);
  822. } else {
  823. cont_left = ida_continuous_left(cds->cda, bsz);
  824. if (cont_left == bsz) {
  825. ret = ida_read_next_idaw(cds);
  826. if (ret) {
  827. goto err;
  828. }
  829. if (cds->cda & (bsz - 1)) {
  830. ret = -EINVAL; /* channel program check */
  831. goto err;
  832. }
  833. }
  834. }
  835. do {
  836. iter_len = MIN(len, cont_left);
  837. if (op != CDS_OP_A) {
  838. if (!cds->do_skip) {
  839. ret = address_space_rw(&address_space_memory, cds->cda,
  840. MEMTXATTRS_UNSPECIFIED, buff, iter_len,
  841. op);
  842. } else {
  843. ret = MEMTX_OK;
  844. }
  845. if (ret != MEMTX_OK) {
  846. /* assume inaccessible address */
  847. ret = -EINVAL; /* channel program check */
  848. goto err;
  849. }
  850. }
  851. cds->at_byte += iter_len;
  852. cds->cda += iter_len;
  853. len -= iter_len;
  854. if (!len) {
  855. break;
  856. }
  857. ret = ida_read_next_idaw(cds);
  858. if (ret) {
  859. goto err;
  860. }
  861. cont_left = bsz;
  862. } while (true);
  863. return ret;
  864. err:
  865. cds->flags |= CDS_F_STREAM_BROKEN;
  866. return ret;
  867. }
  868. void ccw_dstream_init(CcwDataStream *cds, CCW1 const *ccw, ORB const *orb)
  869. {
  870. /*
  871. * We don't support MIDA (an optional facility) yet and we
  872. * catch this earlier. Just for expressing the precondition.
  873. */
  874. g_assert(!(orb->ctrl1 & ORB_CTRL1_MASK_MIDAW));
  875. cds->flags = (orb->ctrl0 & ORB_CTRL0_MASK_I2K ? CDS_F_I2K : 0) |
  876. (orb->ctrl0 & ORB_CTRL0_MASK_C64 ? CDS_F_C64 : 0) |
  877. (orb->ctrl0 & ORB_CTRL0_MASK_FMT ? CDS_F_FMT : 0) |
  878. (ccw->flags & CCW_FLAG_IDA ? CDS_F_IDA : 0);
  879. cds->count = ccw->count;
  880. cds->cda_orig = ccw->cda;
  881. /* skip is only effective for read, read backwards, or sense commands */
  882. cds->do_skip = (ccw->flags & CCW_FLAG_SKIP) &&
  883. ((ccw->cmd_code & 0x0f) == CCW_CMD_BASIC_SENSE ||
  884. (ccw->cmd_code & 0x03) == 0x02 /* read */ ||
  885. (ccw->cmd_code & 0x0f) == 0x0c /* read backwards */);
  886. ccw_dstream_rewind(cds);
  887. if (!(cds->flags & CDS_F_IDA)) {
  888. cds->op_handler = ccw_dstream_rw_noflags;
  889. } else {
  890. cds->op_handler = ccw_dstream_rw_ida;
  891. }
  892. }
  893. static int css_interpret_ccw(SubchDev *sch, hwaddr ccw_addr,
  894. bool suspend_allowed)
  895. {
  896. int ret;
  897. bool check_len;
  898. int len;
  899. CCW1 ccw;
  900. if (!ccw_addr) {
  901. return -EINVAL; /* channel-program check */
  902. }
  903. /* Check doubleword aligned and 31 or 24 (fmt 0) bit addressable. */
  904. if (ccw_addr & (sch->ccw_fmt_1 ? 0x80000007 : 0xff000007)) {
  905. return -EINVAL;
  906. }
  907. /* Translate everything to format-1 ccws - the information is the same. */
  908. ccw = copy_ccw_from_guest(ccw_addr, sch->ccw_fmt_1);
  909. /* Check for invalid command codes. */
  910. if ((ccw.cmd_code & 0x0f) == 0) {
  911. return -EINVAL;
  912. }
  913. if (((ccw.cmd_code & 0x0f) == CCW_CMD_TIC) &&
  914. ((ccw.cmd_code & 0xf0) != 0)) {
  915. return -EINVAL;
  916. }
  917. if (!sch->ccw_fmt_1 && (ccw.count == 0) &&
  918. (ccw.cmd_code != CCW_CMD_TIC)) {
  919. return -EINVAL;
  920. }
  921. /* We don't support MIDA. */
  922. if (ccw.flags & CCW_FLAG_MIDA) {
  923. return -EINVAL;
  924. }
  925. if (ccw.flags & CCW_FLAG_SUSPEND) {
  926. return suspend_allowed ? -EINPROGRESS : -EINVAL;
  927. }
  928. check_len = !((ccw.flags & CCW_FLAG_SLI) && !(ccw.flags & CCW_FLAG_DC));
  929. if (!ccw.cda) {
  930. if (sch->ccw_no_data_cnt == 255) {
  931. return -EINVAL;
  932. }
  933. sch->ccw_no_data_cnt++;
  934. }
  935. /* Look at the command. */
  936. ccw_dstream_init(&sch->cds, &ccw, &(sch->orb));
  937. switch (ccw.cmd_code) {
  938. case CCW_CMD_NOOP:
  939. /* Nothing to do. */
  940. ret = 0;
  941. break;
  942. case CCW_CMD_BASIC_SENSE:
  943. if (check_len) {
  944. if (ccw.count != sizeof(sch->sense_data)) {
  945. ret = -EINVAL;
  946. break;
  947. }
  948. }
  949. len = MIN(ccw.count, sizeof(sch->sense_data));
  950. ret = ccw_dstream_write_buf(&sch->cds, sch->sense_data, len);
  951. sch->curr_status.scsw.count = ccw_dstream_residual_count(&sch->cds);
  952. if (!ret) {
  953. memset(sch->sense_data, 0, sizeof(sch->sense_data));
  954. }
  955. break;
  956. case CCW_CMD_SENSE_ID:
  957. {
  958. /* According to SA22-7204-01, Sense-ID can store up to 256 bytes */
  959. uint8_t sense_id[256];
  960. copy_sense_id_to_guest(sense_id, &sch->id);
  961. /* Sense ID information is device specific. */
  962. if (check_len) {
  963. if (ccw.count != sizeof(sense_id)) {
  964. ret = -EINVAL;
  965. break;
  966. }
  967. }
  968. len = MIN(ccw.count, sizeof(sense_id));
  969. /*
  970. * Only indicate 0xff in the first sense byte if we actually
  971. * have enough place to store at least bytes 0-3.
  972. */
  973. if (len >= 4) {
  974. sense_id[0] = 0xff;
  975. } else {
  976. sense_id[0] = 0;
  977. }
  978. ret = ccw_dstream_write_buf(&sch->cds, sense_id, len);
  979. if (!ret) {
  980. sch->curr_status.scsw.count = ccw_dstream_residual_count(&sch->cds);
  981. }
  982. break;
  983. }
  984. case CCW_CMD_TIC:
  985. if (sch->last_cmd_valid && (sch->last_cmd.cmd_code == CCW_CMD_TIC)) {
  986. ret = -EINVAL;
  987. break;
  988. }
  989. if (ccw.flags || ccw.count) {
  990. /* We have already sanitized these if converted from fmt 0. */
  991. ret = -EINVAL;
  992. break;
  993. }
  994. sch->channel_prog = ccw.cda;
  995. ret = -EAGAIN;
  996. break;
  997. default:
  998. if (sch->ccw_cb) {
  999. /* Handle device specific commands. */
  1000. ret = sch->ccw_cb(sch, ccw);
  1001. } else {
  1002. ret = -ENOSYS;
  1003. }
  1004. break;
  1005. }
  1006. sch->last_cmd = ccw;
  1007. sch->last_cmd_valid = true;
  1008. if (ret == 0) {
  1009. if (ccw.flags & CCW_FLAG_CC) {
  1010. sch->channel_prog += 8;
  1011. ret = -EAGAIN;
  1012. }
  1013. }
  1014. return ret;
  1015. }
  1016. static void sch_handle_start_func_virtual(SubchDev *sch)
  1017. {
  1018. SCHIB *schib = &sch->curr_status;
  1019. int path;
  1020. int ret;
  1021. bool suspend_allowed;
  1022. /* Path management: In our simple css, we always choose the only path. */
  1023. path = 0x80;
  1024. if (!(schib->scsw.ctrl & SCSW_ACTL_SUSP)) {
  1025. /* Start Function triggered via ssch, i.e. we have an ORB */
  1026. ORB *orb = &sch->orb;
  1027. schib->scsw.cstat = 0;
  1028. schib->scsw.dstat = 0;
  1029. /* Look at the orb and try to execute the channel program. */
  1030. schib->pmcw.intparm = orb->intparm;
  1031. if (!(orb->lpm & path)) {
  1032. /* Generate a deferred cc 3 condition. */
  1033. schib->scsw.flags |= SCSW_FLAGS_MASK_CC;
  1034. schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL;
  1035. schib->scsw.ctrl |= (SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND);
  1036. return;
  1037. }
  1038. sch->ccw_fmt_1 = !!(orb->ctrl0 & ORB_CTRL0_MASK_FMT);
  1039. schib->scsw.flags |= (sch->ccw_fmt_1) ? SCSW_FLAGS_MASK_FMT : 0;
  1040. sch->ccw_no_data_cnt = 0;
  1041. suspend_allowed = !!(orb->ctrl0 & ORB_CTRL0_MASK_SPND);
  1042. } else {
  1043. /* Start Function resumed via rsch */
  1044. schib->scsw.ctrl &= ~(SCSW_ACTL_SUSP | SCSW_ACTL_RESUME_PEND);
  1045. /* The channel program had been suspended before. */
  1046. suspend_allowed = true;
  1047. }
  1048. sch->last_cmd_valid = false;
  1049. do {
  1050. ret = css_interpret_ccw(sch, sch->channel_prog, suspend_allowed);
  1051. switch (ret) {
  1052. case -EAGAIN:
  1053. /* ccw chain, continue processing */
  1054. break;
  1055. case 0:
  1056. /* success */
  1057. schib->scsw.ctrl &= ~SCSW_ACTL_START_PEND;
  1058. schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL;
  1059. schib->scsw.ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
  1060. SCSW_STCTL_STATUS_PEND;
  1061. schib->scsw.dstat = SCSW_DSTAT_CHANNEL_END | SCSW_DSTAT_DEVICE_END;
  1062. schib->scsw.cpa = sch->channel_prog + 8;
  1063. break;
  1064. case -EIO:
  1065. /* I/O errors, status depends on specific devices */
  1066. break;
  1067. case -ENOSYS:
  1068. /* unsupported command, generate unit check (command reject) */
  1069. schib->scsw.ctrl &= ~SCSW_ACTL_START_PEND;
  1070. schib->scsw.dstat = SCSW_DSTAT_UNIT_CHECK;
  1071. /* Set sense bit 0 in ecw0. */
  1072. sch->sense_data[0] = 0x80;
  1073. schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL;
  1074. schib->scsw.ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
  1075. SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
  1076. schib->scsw.cpa = sch->channel_prog + 8;
  1077. break;
  1078. case -EINPROGRESS:
  1079. /* channel program has been suspended */
  1080. schib->scsw.ctrl &= ~SCSW_ACTL_START_PEND;
  1081. schib->scsw.ctrl |= SCSW_ACTL_SUSP;
  1082. break;
  1083. default:
  1084. /* error, generate channel program check */
  1085. schib->scsw.ctrl &= ~SCSW_ACTL_START_PEND;
  1086. schib->scsw.cstat = SCSW_CSTAT_PROG_CHECK;
  1087. schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL;
  1088. schib->scsw.ctrl |= SCSW_STCTL_PRIMARY | SCSW_STCTL_SECONDARY |
  1089. SCSW_STCTL_ALERT | SCSW_STCTL_STATUS_PEND;
  1090. schib->scsw.cpa = sch->channel_prog + 8;
  1091. break;
  1092. }
  1093. } while (ret == -EAGAIN);
  1094. }
  1095. static IOInstEnding sch_handle_halt_func_passthrough(SubchDev *sch)
  1096. {
  1097. int ret;
  1098. ret = s390_ccw_halt(sch);
  1099. if (ret == -ENOSYS) {
  1100. sch_handle_halt_func(sch);
  1101. return IOINST_CC_EXPECTED;
  1102. }
  1103. /*
  1104. * Some conditions may have been detected prior to starting the halt
  1105. * function; map them to the correct cc.
  1106. * Note that we map both -ENODEV and -EACCES to cc 3 (there's not really
  1107. * anything else we can do.)
  1108. */
  1109. switch (ret) {
  1110. case -EBUSY:
  1111. return IOINST_CC_BUSY;
  1112. case -ENODEV:
  1113. case -EACCES:
  1114. return IOINST_CC_NOT_OPERATIONAL;
  1115. default:
  1116. return IOINST_CC_EXPECTED;
  1117. }
  1118. }
  1119. static IOInstEnding sch_handle_clear_func_passthrough(SubchDev *sch)
  1120. {
  1121. int ret;
  1122. ret = s390_ccw_clear(sch);
  1123. if (ret == -ENOSYS) {
  1124. sch_handle_clear_func(sch);
  1125. return IOINST_CC_EXPECTED;
  1126. }
  1127. /*
  1128. * Some conditions may have been detected prior to starting the clear
  1129. * function; map them to the correct cc.
  1130. * Note that we map both -ENODEV and -EACCES to cc 3 (there's not really
  1131. * anything else we can do.)
  1132. */
  1133. switch (ret) {
  1134. case -ENODEV:
  1135. case -EACCES:
  1136. return IOINST_CC_NOT_OPERATIONAL;
  1137. default:
  1138. return IOINST_CC_EXPECTED;
  1139. }
  1140. }
  1141. static IOInstEnding sch_handle_start_func_passthrough(SubchDev *sch)
  1142. {
  1143. SCHIB *schib = &sch->curr_status;
  1144. ORB *orb = &sch->orb;
  1145. if (!(schib->scsw.ctrl & SCSW_ACTL_SUSP)) {
  1146. assert(orb != NULL);
  1147. schib->pmcw.intparm = orb->intparm;
  1148. }
  1149. return s390_ccw_cmd_request(sch);
  1150. }
  1151. /*
  1152. * On real machines, this would run asynchronously to the main vcpus.
  1153. * We might want to make some parts of the ssch handling (interpreting
  1154. * read/writes) asynchronous later on if we start supporting more than
  1155. * our current very simple devices.
  1156. */
  1157. IOInstEnding do_subchannel_work_virtual(SubchDev *sch)
  1158. {
  1159. SCHIB *schib = &sch->curr_status;
  1160. if (schib->scsw.ctrl & SCSW_FCTL_CLEAR_FUNC) {
  1161. sch_handle_clear_func(sch);
  1162. } else if (schib->scsw.ctrl & SCSW_FCTL_HALT_FUNC) {
  1163. sch_handle_halt_func(sch);
  1164. } else if (schib->scsw.ctrl & SCSW_FCTL_START_FUNC) {
  1165. /* Triggered by both ssch and rsch. */
  1166. sch_handle_start_func_virtual(sch);
  1167. }
  1168. css_inject_io_interrupt(sch);
  1169. /* inst must succeed if this func is called */
  1170. return IOINST_CC_EXPECTED;
  1171. }
  1172. IOInstEnding do_subchannel_work_passthrough(SubchDev *sch)
  1173. {
  1174. SCHIB *schib = &sch->curr_status;
  1175. if (schib->scsw.ctrl & SCSW_FCTL_CLEAR_FUNC) {
  1176. return sch_handle_clear_func_passthrough(sch);
  1177. } else if (schib->scsw.ctrl & SCSW_FCTL_HALT_FUNC) {
  1178. return sch_handle_halt_func_passthrough(sch);
  1179. } else if (schib->scsw.ctrl & SCSW_FCTL_START_FUNC) {
  1180. return sch_handle_start_func_passthrough(sch);
  1181. }
  1182. return IOINST_CC_EXPECTED;
  1183. }
  1184. static IOInstEnding do_subchannel_work(SubchDev *sch)
  1185. {
  1186. if (!sch->do_subchannel_work) {
  1187. return IOINST_CC_STATUS_PRESENT;
  1188. }
  1189. g_assert(sch->curr_status.scsw.ctrl & SCSW_CTRL_MASK_FCTL);
  1190. return sch->do_subchannel_work(sch);
  1191. }
  1192. static void copy_pmcw_to_guest(PMCW *dest, const PMCW *src)
  1193. {
  1194. int i;
  1195. dest->intparm = cpu_to_be32(src->intparm);
  1196. dest->flags = cpu_to_be16(src->flags);
  1197. dest->devno = cpu_to_be16(src->devno);
  1198. dest->lpm = src->lpm;
  1199. dest->pnom = src->pnom;
  1200. dest->lpum = src->lpum;
  1201. dest->pim = src->pim;
  1202. dest->mbi = cpu_to_be16(src->mbi);
  1203. dest->pom = src->pom;
  1204. dest->pam = src->pam;
  1205. for (i = 0; i < ARRAY_SIZE(dest->chpid); i++) {
  1206. dest->chpid[i] = src->chpid[i];
  1207. }
  1208. dest->chars = cpu_to_be32(src->chars);
  1209. }
  1210. void copy_scsw_to_guest(SCSW *dest, const SCSW *src)
  1211. {
  1212. dest->flags = cpu_to_be16(src->flags);
  1213. dest->ctrl = cpu_to_be16(src->ctrl);
  1214. dest->cpa = cpu_to_be32(src->cpa);
  1215. dest->dstat = src->dstat;
  1216. dest->cstat = src->cstat;
  1217. dest->count = cpu_to_be16(src->count);
  1218. }
  1219. static void copy_schib_to_guest(SCHIB *dest, const SCHIB *src)
  1220. {
  1221. int i;
  1222. /*
  1223. * We copy the PMCW and SCSW in and out of local variables to
  1224. * avoid taking the address of members of a packed struct.
  1225. */
  1226. PMCW src_pmcw, dest_pmcw;
  1227. SCSW src_scsw, dest_scsw;
  1228. src_pmcw = src->pmcw;
  1229. copy_pmcw_to_guest(&dest_pmcw, &src_pmcw);
  1230. dest->pmcw = dest_pmcw;
  1231. src_scsw = src->scsw;
  1232. copy_scsw_to_guest(&dest_scsw, &src_scsw);
  1233. dest->scsw = dest_scsw;
  1234. dest->mba = cpu_to_be64(src->mba);
  1235. for (i = 0; i < ARRAY_SIZE(dest->mda); i++) {
  1236. dest->mda[i] = src->mda[i];
  1237. }
  1238. }
  1239. void copy_esw_to_guest(ESW *dest, const ESW *src)
  1240. {
  1241. dest->word0 = cpu_to_be32(src->word0);
  1242. dest->erw = cpu_to_be32(src->erw);
  1243. dest->word2 = cpu_to_be64(src->word2);
  1244. dest->word4 = cpu_to_be32(src->word4);
  1245. }
  1246. IOInstEnding css_do_stsch(SubchDev *sch, SCHIB *schib)
  1247. {
  1248. int ret;
  1249. /*
  1250. * For some subchannels, we may want to update parts of
  1251. * the schib (e.g., update path masks from the host device
  1252. * for passthrough subchannels).
  1253. */
  1254. ret = s390_ccw_store(sch);
  1255. /* Use current status. */
  1256. copy_schib_to_guest(schib, &sch->curr_status);
  1257. return ret;
  1258. }
  1259. static void copy_pmcw_from_guest(PMCW *dest, const PMCW *src)
  1260. {
  1261. int i;
  1262. dest->intparm = be32_to_cpu(src->intparm);
  1263. dest->flags = be16_to_cpu(src->flags);
  1264. dest->devno = be16_to_cpu(src->devno);
  1265. dest->lpm = src->lpm;
  1266. dest->pnom = src->pnom;
  1267. dest->lpum = src->lpum;
  1268. dest->pim = src->pim;
  1269. dest->mbi = be16_to_cpu(src->mbi);
  1270. dest->pom = src->pom;
  1271. dest->pam = src->pam;
  1272. for (i = 0; i < ARRAY_SIZE(dest->chpid); i++) {
  1273. dest->chpid[i] = src->chpid[i];
  1274. }
  1275. dest->chars = be32_to_cpu(src->chars);
  1276. }
  1277. static void copy_scsw_from_guest(SCSW *dest, const SCSW *src)
  1278. {
  1279. dest->flags = be16_to_cpu(src->flags);
  1280. dest->ctrl = be16_to_cpu(src->ctrl);
  1281. dest->cpa = be32_to_cpu(src->cpa);
  1282. dest->dstat = src->dstat;
  1283. dest->cstat = src->cstat;
  1284. dest->count = be16_to_cpu(src->count);
  1285. }
  1286. static void copy_schib_from_guest(SCHIB *dest, const SCHIB *src)
  1287. {
  1288. int i;
  1289. /*
  1290. * We copy the PMCW and SCSW in and out of local variables to
  1291. * avoid taking the address of members of a packed struct.
  1292. */
  1293. PMCW src_pmcw, dest_pmcw;
  1294. SCSW src_scsw, dest_scsw;
  1295. src_pmcw = src->pmcw;
  1296. copy_pmcw_from_guest(&dest_pmcw, &src_pmcw);
  1297. dest->pmcw = dest_pmcw;
  1298. src_scsw = src->scsw;
  1299. copy_scsw_from_guest(&dest_scsw, &src_scsw);
  1300. dest->scsw = dest_scsw;
  1301. dest->mba = be64_to_cpu(src->mba);
  1302. for (i = 0; i < ARRAY_SIZE(dest->mda); i++) {
  1303. dest->mda[i] = src->mda[i];
  1304. }
  1305. }
  1306. IOInstEnding css_do_msch(SubchDev *sch, const SCHIB *orig_schib)
  1307. {
  1308. SCHIB *schib = &sch->curr_status;
  1309. uint16_t oldflags;
  1310. SCHIB schib_copy;
  1311. if (!(schib->pmcw.flags & PMCW_FLAGS_MASK_DNV)) {
  1312. return IOINST_CC_EXPECTED;
  1313. }
  1314. if (schib->scsw.ctrl & SCSW_STCTL_STATUS_PEND) {
  1315. return IOINST_CC_STATUS_PRESENT;
  1316. }
  1317. if (schib->scsw.ctrl &
  1318. (SCSW_FCTL_START_FUNC|SCSW_FCTL_HALT_FUNC|SCSW_FCTL_CLEAR_FUNC)) {
  1319. return IOINST_CC_BUSY;
  1320. }
  1321. copy_schib_from_guest(&schib_copy, orig_schib);
  1322. /* Only update the program-modifiable fields. */
  1323. schib->pmcw.intparm = schib_copy.pmcw.intparm;
  1324. oldflags = schib->pmcw.flags;
  1325. schib->pmcw.flags &= ~(PMCW_FLAGS_MASK_ISC | PMCW_FLAGS_MASK_ENA |
  1326. PMCW_FLAGS_MASK_LM | PMCW_FLAGS_MASK_MME |
  1327. PMCW_FLAGS_MASK_MP);
  1328. schib->pmcw.flags |= schib_copy.pmcw.flags &
  1329. (PMCW_FLAGS_MASK_ISC | PMCW_FLAGS_MASK_ENA |
  1330. PMCW_FLAGS_MASK_LM | PMCW_FLAGS_MASK_MME |
  1331. PMCW_FLAGS_MASK_MP);
  1332. schib->pmcw.lpm = schib_copy.pmcw.lpm;
  1333. schib->pmcw.mbi = schib_copy.pmcw.mbi;
  1334. schib->pmcw.pom = schib_copy.pmcw.pom;
  1335. schib->pmcw.chars &= ~(PMCW_CHARS_MASK_MBFC | PMCW_CHARS_MASK_CSENSE);
  1336. schib->pmcw.chars |= schib_copy.pmcw.chars &
  1337. (PMCW_CHARS_MASK_MBFC | PMCW_CHARS_MASK_CSENSE);
  1338. schib->mba = schib_copy.mba;
  1339. /* Has the channel been disabled? */
  1340. if (sch->disable_cb && (oldflags & PMCW_FLAGS_MASK_ENA) != 0
  1341. && (schib->pmcw.flags & PMCW_FLAGS_MASK_ENA) == 0) {
  1342. sch->disable_cb(sch);
  1343. }
  1344. return IOINST_CC_EXPECTED;
  1345. }
  1346. IOInstEnding css_do_xsch(SubchDev *sch)
  1347. {
  1348. SCHIB *schib = &sch->curr_status;
  1349. if (~(schib->pmcw.flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) {
  1350. return IOINST_CC_NOT_OPERATIONAL;
  1351. }
  1352. if (schib->scsw.ctrl & SCSW_CTRL_MASK_STCTL) {
  1353. return IOINST_CC_STATUS_PRESENT;
  1354. }
  1355. if (!(schib->scsw.ctrl & SCSW_CTRL_MASK_FCTL) ||
  1356. ((schib->scsw.ctrl & SCSW_CTRL_MASK_FCTL) != SCSW_FCTL_START_FUNC) ||
  1357. (!(schib->scsw.ctrl &
  1358. (SCSW_ACTL_RESUME_PEND | SCSW_ACTL_START_PEND | SCSW_ACTL_SUSP))) ||
  1359. (schib->scsw.ctrl & SCSW_ACTL_SUBCH_ACTIVE)) {
  1360. return IOINST_CC_BUSY;
  1361. }
  1362. /* Cancel the current operation. */
  1363. schib->scsw.ctrl &= ~(SCSW_FCTL_START_FUNC |
  1364. SCSW_ACTL_RESUME_PEND |
  1365. SCSW_ACTL_START_PEND |
  1366. SCSW_ACTL_SUSP);
  1367. sch->channel_prog = 0x0;
  1368. sch->last_cmd_valid = false;
  1369. schib->scsw.dstat = 0;
  1370. schib->scsw.cstat = 0;
  1371. return IOINST_CC_EXPECTED;
  1372. }
  1373. IOInstEnding css_do_csch(SubchDev *sch)
  1374. {
  1375. SCHIB *schib = &sch->curr_status;
  1376. uint16_t old_scsw_ctrl;
  1377. IOInstEnding ccode;
  1378. if (~(schib->pmcw.flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) {
  1379. return IOINST_CC_NOT_OPERATIONAL;
  1380. }
  1381. /*
  1382. * Save the current scsw.ctrl in case CSCH fails and we need
  1383. * to revert the scsw to the status quo ante.
  1384. */
  1385. old_scsw_ctrl = schib->scsw.ctrl;
  1386. /* Trigger the clear function. */
  1387. schib->scsw.ctrl &= ~(SCSW_CTRL_MASK_FCTL | SCSW_CTRL_MASK_ACTL);
  1388. schib->scsw.ctrl |= SCSW_FCTL_CLEAR_FUNC | SCSW_ACTL_CLEAR_PEND;
  1389. ccode = do_subchannel_work(sch);
  1390. if (ccode != IOINST_CC_EXPECTED) {
  1391. schib->scsw.ctrl = old_scsw_ctrl;
  1392. }
  1393. return ccode;
  1394. }
  1395. IOInstEnding css_do_hsch(SubchDev *sch)
  1396. {
  1397. SCHIB *schib = &sch->curr_status;
  1398. uint16_t old_scsw_ctrl;
  1399. IOInstEnding ccode;
  1400. if (~(schib->pmcw.flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) {
  1401. return IOINST_CC_NOT_OPERATIONAL;
  1402. }
  1403. if (((schib->scsw.ctrl & SCSW_CTRL_MASK_STCTL) == SCSW_STCTL_STATUS_PEND) ||
  1404. (schib->scsw.ctrl & (SCSW_STCTL_PRIMARY |
  1405. SCSW_STCTL_SECONDARY |
  1406. SCSW_STCTL_ALERT))) {
  1407. return IOINST_CC_STATUS_PRESENT;
  1408. }
  1409. if (schib->scsw.ctrl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) {
  1410. return IOINST_CC_BUSY;
  1411. }
  1412. /*
  1413. * Save the current scsw.ctrl in case HSCH fails and we need
  1414. * to revert the scsw to the status quo ante.
  1415. */
  1416. old_scsw_ctrl = schib->scsw.ctrl;
  1417. /* Trigger the halt function. */
  1418. schib->scsw.ctrl |= SCSW_FCTL_HALT_FUNC;
  1419. schib->scsw.ctrl &= ~SCSW_FCTL_START_FUNC;
  1420. if (((schib->scsw.ctrl & SCSW_CTRL_MASK_ACTL) ==
  1421. (SCSW_ACTL_SUBCH_ACTIVE | SCSW_ACTL_DEVICE_ACTIVE)) &&
  1422. ((schib->scsw.ctrl & SCSW_CTRL_MASK_STCTL) ==
  1423. SCSW_STCTL_INTERMEDIATE)) {
  1424. schib->scsw.ctrl &= ~SCSW_STCTL_STATUS_PEND;
  1425. }
  1426. schib->scsw.ctrl |= SCSW_ACTL_HALT_PEND;
  1427. ccode = do_subchannel_work(sch);
  1428. if (ccode != IOINST_CC_EXPECTED) {
  1429. schib->scsw.ctrl = old_scsw_ctrl;
  1430. }
  1431. return ccode;
  1432. }
  1433. static void css_update_chnmon(SubchDev *sch)
  1434. {
  1435. if (!(sch->curr_status.pmcw.flags & PMCW_FLAGS_MASK_MME)) {
  1436. /* Not active. */
  1437. return;
  1438. }
  1439. /* The counter is conveniently located at the beginning of the struct. */
  1440. if (sch->curr_status.pmcw.chars & PMCW_CHARS_MASK_MBFC) {
  1441. /* Format 1, per-subchannel area. */
  1442. uint32_t count;
  1443. count = address_space_ldl(&address_space_memory,
  1444. sch->curr_status.mba,
  1445. MEMTXATTRS_UNSPECIFIED,
  1446. NULL);
  1447. count++;
  1448. address_space_stl(&address_space_memory, sch->curr_status.mba, count,
  1449. MEMTXATTRS_UNSPECIFIED, NULL);
  1450. } else {
  1451. /* Format 0, global area. */
  1452. uint32_t offset;
  1453. uint16_t count;
  1454. offset = sch->curr_status.pmcw.mbi << 5;
  1455. count = address_space_lduw(&address_space_memory,
  1456. channel_subsys.chnmon_area + offset,
  1457. MEMTXATTRS_UNSPECIFIED,
  1458. NULL);
  1459. count++;
  1460. address_space_stw(&address_space_memory,
  1461. channel_subsys.chnmon_area + offset, count,
  1462. MEMTXATTRS_UNSPECIFIED, NULL);
  1463. }
  1464. }
  1465. IOInstEnding css_do_ssch(SubchDev *sch, ORB *orb)
  1466. {
  1467. SCHIB *schib = &sch->curr_status;
  1468. uint16_t old_scsw_ctrl, old_scsw_flags;
  1469. IOInstEnding ccode;
  1470. if (~(schib->pmcw.flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) {
  1471. return IOINST_CC_NOT_OPERATIONAL;
  1472. }
  1473. if (schib->scsw.ctrl & SCSW_STCTL_STATUS_PEND) {
  1474. return IOINST_CC_STATUS_PRESENT;
  1475. }
  1476. if (schib->scsw.ctrl & (SCSW_FCTL_START_FUNC |
  1477. SCSW_FCTL_HALT_FUNC |
  1478. SCSW_FCTL_CLEAR_FUNC)) {
  1479. return IOINST_CC_BUSY;
  1480. }
  1481. /* If monitoring is active, update counter. */
  1482. if (channel_subsys.chnmon_active) {
  1483. css_update_chnmon(sch);
  1484. }
  1485. sch->orb = *orb;
  1486. sch->channel_prog = orb->cpa;
  1487. /*
  1488. * Save the current scsw.ctrl and scsw.flags in case SSCH fails and we need
  1489. * to revert the scsw to the status quo ante.
  1490. */
  1491. old_scsw_ctrl = schib->scsw.ctrl;
  1492. old_scsw_flags = schib->scsw.flags;
  1493. /* Trigger the start function. */
  1494. schib->scsw.ctrl |= (SCSW_FCTL_START_FUNC | SCSW_ACTL_START_PEND);
  1495. schib->scsw.flags &= ~SCSW_FLAGS_MASK_PNO;
  1496. ccode = do_subchannel_work(sch);
  1497. if (ccode != IOINST_CC_EXPECTED) {
  1498. schib->scsw.ctrl = old_scsw_ctrl;
  1499. schib->scsw.flags = old_scsw_flags;
  1500. }
  1501. return ccode;
  1502. }
  1503. static void copy_irb_to_guest(IRB *dest, const IRB *src, const PMCW *pmcw,
  1504. int *irb_len)
  1505. {
  1506. int i;
  1507. uint16_t stctl = src->scsw.ctrl & SCSW_CTRL_MASK_STCTL;
  1508. uint16_t actl = src->scsw.ctrl & SCSW_CTRL_MASK_ACTL;
  1509. copy_scsw_to_guest(&dest->scsw, &src->scsw);
  1510. copy_esw_to_guest(&dest->esw, &src->esw);
  1511. for (i = 0; i < ARRAY_SIZE(dest->ecw); i++) {
  1512. dest->ecw[i] = cpu_to_be32(src->ecw[i]);
  1513. }
  1514. *irb_len = sizeof(*dest) - sizeof(dest->emw);
  1515. /* extended measurements enabled? */
  1516. if ((src->scsw.flags & SCSW_FLAGS_MASK_ESWF) ||
  1517. !(pmcw->flags & PMCW_FLAGS_MASK_TF) ||
  1518. !(pmcw->chars & PMCW_CHARS_MASK_XMWME)) {
  1519. return;
  1520. }
  1521. /* extended measurements pending? */
  1522. if (!(stctl & SCSW_STCTL_STATUS_PEND)) {
  1523. return;
  1524. }
  1525. if ((stctl & SCSW_STCTL_PRIMARY) ||
  1526. (stctl == SCSW_STCTL_SECONDARY) ||
  1527. ((stctl & SCSW_STCTL_INTERMEDIATE) && (actl & SCSW_ACTL_SUSP))) {
  1528. for (i = 0; i < ARRAY_SIZE(dest->emw); i++) {
  1529. dest->emw[i] = cpu_to_be32(src->emw[i]);
  1530. }
  1531. }
  1532. *irb_len = sizeof(*dest);
  1533. }
  1534. static void build_irb_sense_data(SubchDev *sch, IRB *irb)
  1535. {
  1536. int i;
  1537. /* Attention: sense_data is already BE! */
  1538. memcpy(irb->ecw, sch->sense_data, sizeof(sch->sense_data));
  1539. for (i = 0; i < ARRAY_SIZE(irb->ecw); i++) {
  1540. irb->ecw[i] = be32_to_cpu(irb->ecw[i]);
  1541. }
  1542. }
  1543. void build_irb_passthrough(SubchDev *sch, IRB *irb)
  1544. {
  1545. /* Copy ESW from hardware */
  1546. irb->esw = sch->esw;
  1547. /*
  1548. * If (irb->esw.erw & ESW_ERW_SENSE) is true, then the contents
  1549. * of the ECW is sense data. If false, then it is model-dependent
  1550. * information. Either way, copy it into the IRB for the guest to
  1551. * read/decide what to do with.
  1552. */
  1553. build_irb_sense_data(sch, irb);
  1554. }
  1555. void build_irb_virtual(SubchDev *sch, IRB *irb)
  1556. {
  1557. SCHIB *schib = &sch->curr_status;
  1558. uint16_t stctl = schib->scsw.ctrl & SCSW_CTRL_MASK_STCTL;
  1559. if (stctl & SCSW_STCTL_STATUS_PEND) {
  1560. if (schib->scsw.cstat & (SCSW_CSTAT_DATA_CHECK |
  1561. SCSW_CSTAT_CHN_CTRL_CHK |
  1562. SCSW_CSTAT_INTF_CTRL_CHK)) {
  1563. irb->scsw.flags |= SCSW_FLAGS_MASK_ESWF;
  1564. irb->esw.word0 = 0x04804000;
  1565. } else {
  1566. irb->esw.word0 = 0x00800000;
  1567. }
  1568. /* If a unit check is pending, copy sense data. */
  1569. if ((schib->scsw.dstat & SCSW_DSTAT_UNIT_CHECK) &&
  1570. (schib->pmcw.chars & PMCW_CHARS_MASK_CSENSE)) {
  1571. irb->scsw.flags |= SCSW_FLAGS_MASK_ESWF | SCSW_FLAGS_MASK_ECTL;
  1572. build_irb_sense_data(sch, irb);
  1573. irb->esw.erw = ESW_ERW_SENSE | (sizeof(sch->sense_data) << 8);
  1574. }
  1575. }
  1576. }
  1577. int css_do_tsch_get_irb(SubchDev *sch, IRB *target_irb, int *irb_len)
  1578. {
  1579. SCHIB *schib = &sch->curr_status;
  1580. PMCW p;
  1581. uint16_t stctl;
  1582. IRB irb;
  1583. if (~(schib->pmcw.flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) {
  1584. return 3;
  1585. }
  1586. stctl = schib->scsw.ctrl & SCSW_CTRL_MASK_STCTL;
  1587. /* Prepare the irb for the guest. */
  1588. memset(&irb, 0, sizeof(IRB));
  1589. /* Copy scsw from current status. */
  1590. irb.scsw = schib->scsw;
  1591. /* Build other IRB data, if necessary */
  1592. if (sch->irb_cb) {
  1593. sch->irb_cb(sch, &irb);
  1594. }
  1595. /* Store the irb to the guest. */
  1596. p = schib->pmcw;
  1597. copy_irb_to_guest(target_irb, &irb, &p, irb_len);
  1598. return ((stctl & SCSW_STCTL_STATUS_PEND) == 0);
  1599. }
  1600. void css_do_tsch_update_subch(SubchDev *sch)
  1601. {
  1602. SCHIB *schib = &sch->curr_status;
  1603. uint16_t stctl;
  1604. uint16_t fctl;
  1605. uint16_t actl;
  1606. stctl = schib->scsw.ctrl & SCSW_CTRL_MASK_STCTL;
  1607. fctl = schib->scsw.ctrl & SCSW_CTRL_MASK_FCTL;
  1608. actl = schib->scsw.ctrl & SCSW_CTRL_MASK_ACTL;
  1609. /* Clear conditions on subchannel, if applicable. */
  1610. if (stctl & SCSW_STCTL_STATUS_PEND) {
  1611. schib->scsw.ctrl &= ~SCSW_CTRL_MASK_STCTL;
  1612. if ((stctl != (SCSW_STCTL_INTERMEDIATE | SCSW_STCTL_STATUS_PEND)) ||
  1613. ((fctl & SCSW_FCTL_HALT_FUNC) &&
  1614. (actl & SCSW_ACTL_SUSP))) {
  1615. schib->scsw.ctrl &= ~SCSW_CTRL_MASK_FCTL;
  1616. }
  1617. if (stctl != (SCSW_STCTL_INTERMEDIATE | SCSW_STCTL_STATUS_PEND)) {
  1618. schib->scsw.flags &= ~SCSW_FLAGS_MASK_PNO;
  1619. schib->scsw.ctrl &= ~(SCSW_ACTL_RESUME_PEND |
  1620. SCSW_ACTL_START_PEND |
  1621. SCSW_ACTL_HALT_PEND |
  1622. SCSW_ACTL_CLEAR_PEND |
  1623. SCSW_ACTL_SUSP);
  1624. } else {
  1625. if ((actl & SCSW_ACTL_SUSP) &&
  1626. (fctl & SCSW_FCTL_START_FUNC)) {
  1627. schib->scsw.flags &= ~SCSW_FLAGS_MASK_PNO;
  1628. if (fctl & SCSW_FCTL_HALT_FUNC) {
  1629. schib->scsw.ctrl &= ~(SCSW_ACTL_RESUME_PEND |
  1630. SCSW_ACTL_START_PEND |
  1631. SCSW_ACTL_HALT_PEND |
  1632. SCSW_ACTL_CLEAR_PEND |
  1633. SCSW_ACTL_SUSP);
  1634. } else {
  1635. schib->scsw.ctrl &= ~SCSW_ACTL_RESUME_PEND;
  1636. }
  1637. }
  1638. }
  1639. /* Clear pending sense data. */
  1640. if (schib->pmcw.chars & PMCW_CHARS_MASK_CSENSE) {
  1641. memset(sch->sense_data, 0 , sizeof(sch->sense_data));
  1642. }
  1643. }
  1644. }
  1645. static void copy_crw_to_guest(CRW *dest, const CRW *src)
  1646. {
  1647. dest->flags = cpu_to_be16(src->flags);
  1648. dest->rsid = cpu_to_be16(src->rsid);
  1649. }
  1650. int css_do_stcrw(CRW *crw)
  1651. {
  1652. CrwContainer *crw_cont;
  1653. int ret;
  1654. crw_cont = QTAILQ_FIRST(&channel_subsys.pending_crws);
  1655. if (crw_cont) {
  1656. QTAILQ_REMOVE(&channel_subsys.pending_crws, crw_cont, sibling);
  1657. copy_crw_to_guest(crw, &crw_cont->crw);
  1658. g_free(crw_cont);
  1659. ret = 0;
  1660. } else {
  1661. /* List was empty, turn crw machine checks on again. */
  1662. memset(crw, 0, sizeof(*crw));
  1663. channel_subsys.do_crw_mchk = true;
  1664. ret = 1;
  1665. }
  1666. return ret;
  1667. }
  1668. static void copy_crw_from_guest(CRW *dest, const CRW *src)
  1669. {
  1670. dest->flags = be16_to_cpu(src->flags);
  1671. dest->rsid = be16_to_cpu(src->rsid);
  1672. }
  1673. void css_undo_stcrw(CRW *crw)
  1674. {
  1675. CrwContainer *crw_cont;
  1676. crw_cont = g_try_new0(CrwContainer, 1);
  1677. if (!crw_cont) {
  1678. channel_subsys.crws_lost = true;
  1679. return;
  1680. }
  1681. copy_crw_from_guest(&crw_cont->crw, crw);
  1682. QTAILQ_INSERT_HEAD(&channel_subsys.pending_crws, crw_cont, sibling);
  1683. }
  1684. int css_collect_chp_desc(int m, uint8_t cssid, uint8_t f_chpid, uint8_t l_chpid,
  1685. int rfmt, void *buf)
  1686. {
  1687. int i, desc_size;
  1688. uint32_t words[8];
  1689. uint32_t chpid_type_word;
  1690. CssImage *css;
  1691. if (!m && !cssid) {
  1692. css = channel_subsys.css[channel_subsys.default_cssid];
  1693. } else {
  1694. css = channel_subsys.css[cssid];
  1695. }
  1696. if (!css) {
  1697. return 0;
  1698. }
  1699. desc_size = 0;
  1700. for (i = f_chpid; i <= l_chpid; i++) {
  1701. if (css->chpids[i].in_use) {
  1702. chpid_type_word = 0x80000000 | (css->chpids[i].type << 8) | i;
  1703. if (rfmt == 0) {
  1704. words[0] = cpu_to_be32(chpid_type_word);
  1705. words[1] = 0;
  1706. memcpy(buf + desc_size, words, 8);
  1707. desc_size += 8;
  1708. } else if (rfmt == 1) {
  1709. words[0] = cpu_to_be32(chpid_type_word);
  1710. words[1] = 0;
  1711. words[2] = 0;
  1712. words[3] = 0;
  1713. words[4] = 0;
  1714. words[5] = 0;
  1715. words[6] = 0;
  1716. words[7] = 0;
  1717. memcpy(buf + desc_size, words, 32);
  1718. desc_size += 32;
  1719. }
  1720. }
  1721. }
  1722. return desc_size;
  1723. }
  1724. void css_do_schm(uint8_t mbk, int update, int dct, uint64_t mbo)
  1725. {
  1726. /* dct is currently ignored (not really meaningful for our devices) */
  1727. /* TODO: Don't ignore mbk. */
  1728. if (update && !channel_subsys.chnmon_active) {
  1729. /* Enable measuring. */
  1730. channel_subsys.chnmon_area = mbo;
  1731. channel_subsys.chnmon_active = true;
  1732. }
  1733. if (!update && channel_subsys.chnmon_active) {
  1734. /* Disable measuring. */
  1735. channel_subsys.chnmon_area = 0;
  1736. channel_subsys.chnmon_active = false;
  1737. }
  1738. }
  1739. IOInstEnding css_do_rsch(SubchDev *sch)
  1740. {
  1741. SCHIB *schib = &sch->curr_status;
  1742. if (~(schib->pmcw.flags) & (PMCW_FLAGS_MASK_DNV | PMCW_FLAGS_MASK_ENA)) {
  1743. return IOINST_CC_NOT_OPERATIONAL;
  1744. }
  1745. if (schib->scsw.ctrl & SCSW_STCTL_STATUS_PEND) {
  1746. return IOINST_CC_STATUS_PRESENT;
  1747. }
  1748. if (((schib->scsw.ctrl & SCSW_CTRL_MASK_FCTL) != SCSW_FCTL_START_FUNC) ||
  1749. (schib->scsw.ctrl & SCSW_ACTL_RESUME_PEND) ||
  1750. (!(schib->scsw.ctrl & SCSW_ACTL_SUSP))) {
  1751. return IOINST_CC_BUSY;
  1752. }
  1753. /* If monitoring is active, update counter. */
  1754. if (channel_subsys.chnmon_active) {
  1755. css_update_chnmon(sch);
  1756. }
  1757. schib->scsw.ctrl |= SCSW_ACTL_RESUME_PEND;
  1758. return do_subchannel_work(sch);
  1759. }
  1760. int css_do_rchp(uint8_t cssid, uint8_t chpid)
  1761. {
  1762. uint8_t real_cssid;
  1763. if (cssid > channel_subsys.max_cssid) {
  1764. return -EINVAL;
  1765. }
  1766. if (channel_subsys.max_cssid == 0) {
  1767. real_cssid = channel_subsys.default_cssid;
  1768. } else {
  1769. real_cssid = cssid;
  1770. }
  1771. if (!channel_subsys.css[real_cssid]) {
  1772. return -EINVAL;
  1773. }
  1774. if (!channel_subsys.css[real_cssid]->chpids[chpid].in_use) {
  1775. return -ENODEV;
  1776. }
  1777. if (!channel_subsys.css[real_cssid]->chpids[chpid].is_virtual) {
  1778. fprintf(stderr,
  1779. "rchp unsupported for non-virtual chpid %x.%02x!\n",
  1780. real_cssid, chpid);
  1781. return -ENODEV;
  1782. }
  1783. /* We don't really use a channel path, so we're done here. */
  1784. css_queue_crw(CRW_RSC_CHP, CRW_ERC_INIT, 1,
  1785. channel_subsys.max_cssid > 0 ? 1 : 0, chpid);
  1786. if (channel_subsys.max_cssid > 0) {
  1787. css_queue_crw(CRW_RSC_CHP, CRW_ERC_INIT, 1, 0, real_cssid << 8);
  1788. }
  1789. return 0;
  1790. }
  1791. bool css_schid_final(int m, uint8_t cssid, uint8_t ssid, uint16_t schid)
  1792. {
  1793. SubchSet *set;
  1794. uint8_t real_cssid;
  1795. real_cssid = (!m && (cssid == 0)) ? channel_subsys.default_cssid : cssid;
  1796. if (ssid > MAX_SSID ||
  1797. !channel_subsys.css[real_cssid] ||
  1798. !channel_subsys.css[real_cssid]->sch_set[ssid]) {
  1799. return true;
  1800. }
  1801. set = channel_subsys.css[real_cssid]->sch_set[ssid];
  1802. return schid > find_last_bit(set->schids_used,
  1803. (MAX_SCHID + 1) / sizeof(unsigned long));
  1804. }
  1805. unsigned int css_find_free_chpid(uint8_t cssid)
  1806. {
  1807. CssImage *css = channel_subsys.css[cssid];
  1808. unsigned int chpid;
  1809. if (!css) {
  1810. return MAX_CHPID + 1;
  1811. }
  1812. for (chpid = 0; chpid <= MAX_CHPID; chpid++) {
  1813. /* skip reserved chpid */
  1814. if (chpid == VIRTIO_CCW_CHPID) {
  1815. continue;
  1816. }
  1817. if (!css->chpids[chpid].in_use) {
  1818. return chpid;
  1819. }
  1820. }
  1821. return MAX_CHPID + 1;
  1822. }
  1823. static int css_add_chpid(uint8_t cssid, uint8_t chpid, uint8_t type,
  1824. bool is_virt)
  1825. {
  1826. CssImage *css;
  1827. trace_css_chpid_add(cssid, chpid, type);
  1828. css = channel_subsys.css[cssid];
  1829. if (!css) {
  1830. return -EINVAL;
  1831. }
  1832. if (css->chpids[chpid].in_use) {
  1833. return -EEXIST;
  1834. }
  1835. css->chpids[chpid].in_use = 1;
  1836. css->chpids[chpid].type = type;
  1837. css->chpids[chpid].is_virtual = is_virt;
  1838. css_generate_chp_crws(cssid, chpid);
  1839. return 0;
  1840. }
  1841. void css_sch_build_virtual_schib(SubchDev *sch, uint8_t chpid, uint8_t type)
  1842. {
  1843. SCHIB *schib = &sch->curr_status;
  1844. int i;
  1845. CssImage *css = channel_subsys.css[sch->cssid];
  1846. assert(css != NULL);
  1847. memset(&schib->pmcw, 0, sizeof(PMCW));
  1848. schib->pmcw.flags |= PMCW_FLAGS_MASK_DNV;
  1849. schib->pmcw.devno = sch->devno;
  1850. /* single path */
  1851. schib->pmcw.pim = 0x80;
  1852. schib->pmcw.pom = 0xff;
  1853. schib->pmcw.pam = 0x80;
  1854. schib->pmcw.chpid[0] = chpid;
  1855. if (!css->chpids[chpid].in_use) {
  1856. css_add_chpid(sch->cssid, chpid, type, true);
  1857. }
  1858. memset(&schib->scsw, 0, sizeof(SCSW));
  1859. schib->mba = 0;
  1860. for (i = 0; i < ARRAY_SIZE(schib->mda); i++) {
  1861. schib->mda[i] = 0;
  1862. }
  1863. }
  1864. SubchDev *css_find_subch(uint8_t m, uint8_t cssid, uint8_t ssid, uint16_t schid)
  1865. {
  1866. uint8_t real_cssid;
  1867. real_cssid = (!m && (cssid == 0)) ? channel_subsys.default_cssid : cssid;
  1868. if (!channel_subsys.css[real_cssid]) {
  1869. return NULL;
  1870. }
  1871. if (!channel_subsys.css[real_cssid]->sch_set[ssid]) {
  1872. return NULL;
  1873. }
  1874. return channel_subsys.css[real_cssid]->sch_set[ssid]->sch[schid];
  1875. }
  1876. /**
  1877. * Return free device number in subchannel set.
  1878. *
  1879. * Return index of the first free device number in the subchannel set
  1880. * identified by @p cssid and @p ssid, beginning the search at @p
  1881. * start and wrapping around at MAX_DEVNO. Return a value exceeding
  1882. * MAX_SCHID if there are no free device numbers in the subchannel
  1883. * set.
  1884. */
  1885. static uint32_t css_find_free_devno(uint8_t cssid, uint8_t ssid,
  1886. uint16_t start)
  1887. {
  1888. uint32_t round;
  1889. for (round = 0; round <= MAX_DEVNO; round++) {
  1890. uint16_t devno = (start + round) % MAX_DEVNO;
  1891. if (!css_devno_used(cssid, ssid, devno)) {
  1892. return devno;
  1893. }
  1894. }
  1895. return MAX_DEVNO + 1;
  1896. }
  1897. /**
  1898. * Return first free subchannel (id) in subchannel set.
  1899. *
  1900. * Return index of the first free subchannel in the subchannel set
  1901. * identified by @p cssid and @p ssid, if there is any. Return a value
  1902. * exceeding MAX_SCHID if there are no free subchannels in the
  1903. * subchannel set.
  1904. */
  1905. static uint32_t css_find_free_subch(uint8_t cssid, uint8_t ssid)
  1906. {
  1907. uint32_t schid;
  1908. for (schid = 0; schid <= MAX_SCHID; schid++) {
  1909. if (!css_find_subch(1, cssid, ssid, schid)) {
  1910. return schid;
  1911. }
  1912. }
  1913. return MAX_SCHID + 1;
  1914. }
  1915. /**
  1916. * Return first free subchannel (id) in subchannel set for a device number
  1917. *
  1918. * Verify the device number @p devno is not used yet in the subchannel
  1919. * set identified by @p cssid and @p ssid. Set @p schid to the index
  1920. * of the first free subchannel in the subchannel set, if there is
  1921. * any. Return true if everything succeeded and false otherwise.
  1922. */
  1923. static bool css_find_free_subch_for_devno(uint8_t cssid, uint8_t ssid,
  1924. uint16_t devno, uint16_t *schid,
  1925. Error **errp)
  1926. {
  1927. uint32_t free_schid;
  1928. assert(schid);
  1929. if (css_devno_used(cssid, ssid, devno)) {
  1930. error_setg(errp, "Device %x.%x.%04x already exists",
  1931. cssid, ssid, devno);
  1932. return false;
  1933. }
  1934. free_schid = css_find_free_subch(cssid, ssid);
  1935. if (free_schid > MAX_SCHID) {
  1936. error_setg(errp, "No free subchannel found for %x.%x.%04x",
  1937. cssid, ssid, devno);
  1938. return false;
  1939. }
  1940. *schid = free_schid;
  1941. return true;
  1942. }
  1943. /**
  1944. * Return first free subchannel (id) and device number
  1945. *
  1946. * Locate the first free subchannel and first free device number in
  1947. * any of the subchannel sets of the channel subsystem identified by
  1948. * @p cssid. Return false if no free subchannel / device number could
  1949. * be found. Otherwise set @p ssid, @p devno and @p schid to identify
  1950. * the available subchannel and device number and return true.
  1951. *
  1952. * May modify @p ssid, @p devno and / or @p schid even if no free
  1953. * subchannel / device number could be found.
  1954. */
  1955. static bool css_find_free_subch_and_devno(uint8_t cssid, uint8_t *ssid,
  1956. uint16_t *devno, uint16_t *schid,
  1957. Error **errp)
  1958. {
  1959. uint32_t free_schid, free_devno;
  1960. assert(ssid && devno && schid);
  1961. for (*ssid = 0; *ssid <= MAX_SSID; (*ssid)++) {
  1962. free_schid = css_find_free_subch(cssid, *ssid);
  1963. if (free_schid > MAX_SCHID) {
  1964. continue;
  1965. }
  1966. free_devno = css_find_free_devno(cssid, *ssid, free_schid);
  1967. if (free_devno > MAX_DEVNO) {
  1968. continue;
  1969. }
  1970. *schid = free_schid;
  1971. *devno = free_devno;
  1972. return true;
  1973. }
  1974. error_setg(errp, "Virtual channel subsystem is full!");
  1975. return false;
  1976. }
  1977. bool css_subch_visible(SubchDev *sch)
  1978. {
  1979. if (sch->ssid > channel_subsys.max_ssid) {
  1980. return false;
  1981. }
  1982. if (sch->cssid != channel_subsys.default_cssid) {
  1983. return (channel_subsys.max_cssid > 0);
  1984. }
  1985. return true;
  1986. }
  1987. bool css_present(uint8_t cssid)
  1988. {
  1989. return (channel_subsys.css[cssid] != NULL);
  1990. }
  1991. bool css_devno_used(uint8_t cssid, uint8_t ssid, uint16_t devno)
  1992. {
  1993. if (!channel_subsys.css[cssid]) {
  1994. return false;
  1995. }
  1996. if (!channel_subsys.css[cssid]->sch_set[ssid]) {
  1997. return false;
  1998. }
  1999. return !!test_bit(devno,
  2000. channel_subsys.css[cssid]->sch_set[ssid]->devnos_used);
  2001. }
  2002. void css_subch_assign(uint8_t cssid, uint8_t ssid, uint16_t schid,
  2003. uint16_t devno, SubchDev *sch)
  2004. {
  2005. CssImage *css;
  2006. SubchSet *s_set;
  2007. trace_css_assign_subch(sch ? "assign" : "deassign", cssid, ssid, schid,
  2008. devno);
  2009. if (!channel_subsys.css[cssid]) {
  2010. fprintf(stderr,
  2011. "Suspicious call to %s (%x.%x.%04x) for non-existing css!\n",
  2012. __func__, cssid, ssid, schid);
  2013. return;
  2014. }
  2015. css = channel_subsys.css[cssid];
  2016. if (!css->sch_set[ssid]) {
  2017. css->sch_set[ssid] = g_new0(SubchSet, 1);
  2018. }
  2019. s_set = css->sch_set[ssid];
  2020. s_set->sch[schid] = sch;
  2021. if (sch) {
  2022. set_bit(schid, s_set->schids_used);
  2023. set_bit(devno, s_set->devnos_used);
  2024. } else {
  2025. clear_bit(schid, s_set->schids_used);
  2026. clear_bit(devno, s_set->devnos_used);
  2027. }
  2028. }
  2029. void css_crw_add_to_queue(CRW crw)
  2030. {
  2031. CrwContainer *crw_cont;
  2032. trace_css_crw((crw.flags & CRW_FLAGS_MASK_RSC) >> 8,
  2033. crw.flags & CRW_FLAGS_MASK_ERC,
  2034. crw.rsid,
  2035. (crw.flags & CRW_FLAGS_MASK_C) ? "(chained)" : "");
  2036. /* TODO: Maybe use a static crw pool? */
  2037. crw_cont = g_try_new0(CrwContainer, 1);
  2038. if (!crw_cont) {
  2039. channel_subsys.crws_lost = true;
  2040. return;
  2041. }
  2042. crw_cont->crw = crw;
  2043. QTAILQ_INSERT_TAIL(&channel_subsys.pending_crws, crw_cont, sibling);
  2044. if (channel_subsys.do_crw_mchk) {
  2045. channel_subsys.do_crw_mchk = false;
  2046. /* Inject crw pending machine check. */
  2047. s390_crw_mchk();
  2048. }
  2049. }
  2050. void css_queue_crw(uint8_t rsc, uint8_t erc, int solicited,
  2051. int chain, uint16_t rsid)
  2052. {
  2053. CRW crw;
  2054. crw.flags = (rsc << 8) | erc;
  2055. if (solicited) {
  2056. crw.flags |= CRW_FLAGS_MASK_S;
  2057. }
  2058. if (chain) {
  2059. crw.flags |= CRW_FLAGS_MASK_C;
  2060. }
  2061. crw.rsid = rsid;
  2062. if (channel_subsys.crws_lost) {
  2063. crw.flags |= CRW_FLAGS_MASK_R;
  2064. channel_subsys.crws_lost = false;
  2065. }
  2066. css_crw_add_to_queue(crw);
  2067. }
  2068. void css_generate_sch_crws(uint8_t cssid, uint8_t ssid, uint16_t schid,
  2069. int hotplugged, int add)
  2070. {
  2071. uint8_t guest_cssid;
  2072. bool chain_crw;
  2073. if (add && !hotplugged) {
  2074. return;
  2075. }
  2076. if (channel_subsys.max_cssid == 0) {
  2077. /* Default cssid shows up as 0. */
  2078. guest_cssid = (cssid == channel_subsys.default_cssid) ? 0 : cssid;
  2079. } else {
  2080. /* Show real cssid to the guest. */
  2081. guest_cssid = cssid;
  2082. }
  2083. /*
  2084. * Only notify for higher subchannel sets/channel subsystems if the
  2085. * guest has enabled it.
  2086. */
  2087. if ((ssid > channel_subsys.max_ssid) ||
  2088. (guest_cssid > channel_subsys.max_cssid) ||
  2089. ((channel_subsys.max_cssid == 0) &&
  2090. (cssid != channel_subsys.default_cssid))) {
  2091. return;
  2092. }
  2093. chain_crw = (channel_subsys.max_ssid > 0) ||
  2094. (channel_subsys.max_cssid > 0);
  2095. css_queue_crw(CRW_RSC_SUBCH, CRW_ERC_IPI, 0, chain_crw ? 1 : 0, schid);
  2096. if (chain_crw) {
  2097. css_queue_crw(CRW_RSC_SUBCH, CRW_ERC_IPI, 0, 0,
  2098. (guest_cssid << 8) | (ssid << 4));
  2099. }
  2100. /* RW_ERC_IPI --> clear pending interrupts */
  2101. css_clear_io_interrupt(css_do_build_subchannel_id(cssid, ssid), schid);
  2102. }
  2103. void css_generate_chp_crws(uint8_t cssid, uint8_t chpid)
  2104. {
  2105. /* TODO */
  2106. }
  2107. void css_generate_css_crws(uint8_t cssid)
  2108. {
  2109. if (!channel_subsys.sei_pending) {
  2110. css_queue_crw(CRW_RSC_CSS, CRW_ERC_EVENT, 0, 0, cssid);
  2111. }
  2112. channel_subsys.sei_pending = true;
  2113. }
  2114. void css_clear_sei_pending(void)
  2115. {
  2116. channel_subsys.sei_pending = false;
  2117. }
  2118. int css_enable_mcsse(void)
  2119. {
  2120. trace_css_enable_facility("mcsse");
  2121. channel_subsys.max_cssid = MAX_CSSID;
  2122. return 0;
  2123. }
  2124. int css_enable_mss(void)
  2125. {
  2126. trace_css_enable_facility("mss");
  2127. channel_subsys.max_ssid = MAX_SSID;
  2128. return 0;
  2129. }
  2130. void css_reset_sch(SubchDev *sch)
  2131. {
  2132. SCHIB *schib = &sch->curr_status;
  2133. if ((schib->pmcw.flags & PMCW_FLAGS_MASK_ENA) != 0 && sch->disable_cb) {
  2134. sch->disable_cb(sch);
  2135. }
  2136. schib->pmcw.intparm = 0;
  2137. schib->pmcw.flags &= ~(PMCW_FLAGS_MASK_ISC | PMCW_FLAGS_MASK_ENA |
  2138. PMCW_FLAGS_MASK_LM | PMCW_FLAGS_MASK_MME |
  2139. PMCW_FLAGS_MASK_MP | PMCW_FLAGS_MASK_TF);
  2140. schib->pmcw.flags |= PMCW_FLAGS_MASK_DNV;
  2141. schib->pmcw.devno = sch->devno;
  2142. schib->pmcw.pim = 0x80;
  2143. schib->pmcw.lpm = schib->pmcw.pim;
  2144. schib->pmcw.pnom = 0;
  2145. schib->pmcw.lpum = 0;
  2146. schib->pmcw.mbi = 0;
  2147. schib->pmcw.pom = 0xff;
  2148. schib->pmcw.pam = 0x80;
  2149. schib->pmcw.chars &= ~(PMCW_CHARS_MASK_MBFC | PMCW_CHARS_MASK_XMWME |
  2150. PMCW_CHARS_MASK_CSENSE);
  2151. memset(&schib->scsw, 0, sizeof(schib->scsw));
  2152. schib->mba = 0;
  2153. sch->channel_prog = 0x0;
  2154. sch->last_cmd_valid = false;
  2155. sch->thinint_active = false;
  2156. }
  2157. void css_reset(void)
  2158. {
  2159. CrwContainer *crw_cont;
  2160. /* Clean up monitoring. */
  2161. channel_subsys.chnmon_active = false;
  2162. channel_subsys.chnmon_area = 0;
  2163. /* Clear pending CRWs. */
  2164. while ((crw_cont = QTAILQ_FIRST(&channel_subsys.pending_crws))) {
  2165. QTAILQ_REMOVE(&channel_subsys.pending_crws, crw_cont, sibling);
  2166. g_free(crw_cont);
  2167. }
  2168. channel_subsys.sei_pending = false;
  2169. channel_subsys.do_crw_mchk = true;
  2170. channel_subsys.crws_lost = false;
  2171. /* Reset maximum ids. */
  2172. channel_subsys.max_cssid = 0;
  2173. channel_subsys.max_ssid = 0;
  2174. }
  2175. static void get_css_devid(Object *obj, Visitor *v, const char *name,
  2176. void *opaque, Error **errp)
  2177. {
  2178. const Property *prop = opaque;
  2179. CssDevId *dev_id = object_field_prop_ptr(obj, prop);
  2180. char buffer[] = "xx.x.xxxx";
  2181. char *p = buffer;
  2182. int r;
  2183. if (dev_id->valid) {
  2184. r = snprintf(buffer, sizeof(buffer), "%02x.%1x.%04x", dev_id->cssid,
  2185. dev_id->ssid, dev_id->devid);
  2186. assert(r == sizeof(buffer) - 1);
  2187. /* drop leading zero */
  2188. if (dev_id->cssid <= 0xf) {
  2189. p++;
  2190. }
  2191. } else {
  2192. snprintf(buffer, sizeof(buffer), "<unset>");
  2193. }
  2194. visit_type_str(v, name, &p, errp);
  2195. }
  2196. /*
  2197. * parse <cssid>.<ssid>.<devid> and assert valid range for cssid/ssid
  2198. */
  2199. static void set_css_devid(Object *obj, Visitor *v, const char *name,
  2200. void *opaque, Error **errp)
  2201. {
  2202. const Property *prop = opaque;
  2203. CssDevId *dev_id = object_field_prop_ptr(obj, prop);
  2204. char *str;
  2205. int num, n1, n2;
  2206. unsigned int cssid, ssid, devid;
  2207. if (!visit_type_str(v, name, &str, errp)) {
  2208. return;
  2209. }
  2210. num = sscanf(str, "%2x.%1x%n.%4x%n", &cssid, &ssid, &n1, &devid, &n2);
  2211. if (num != 3 || (n2 - n1) != 5 || strlen(str) != n2) {
  2212. error_set_from_qdev_prop_error(errp, EINVAL, obj, name, str);
  2213. goto out;
  2214. }
  2215. if ((cssid > MAX_CSSID) || (ssid > MAX_SSID)) {
  2216. error_setg(errp, "Invalid cssid or ssid: cssid %x, ssid %x",
  2217. cssid, ssid);
  2218. goto out;
  2219. }
  2220. dev_id->cssid = cssid;
  2221. dev_id->ssid = ssid;
  2222. dev_id->devid = devid;
  2223. dev_id->valid = true;
  2224. out:
  2225. g_free(str);
  2226. }
  2227. const PropertyInfo css_devid_propinfo = {
  2228. .type = "str",
  2229. .description = "Identifier of an I/O device in the channel "
  2230. "subsystem, example: fe.1.23ab",
  2231. .get = get_css_devid,
  2232. .set = set_css_devid,
  2233. };
  2234. const PropertyInfo css_devid_ro_propinfo = {
  2235. .type = "str",
  2236. .description = "Read-only identifier of an I/O device in the channel "
  2237. "subsystem, example: fe.1.23ab",
  2238. .get = get_css_devid,
  2239. };
  2240. SubchDev *css_create_sch(CssDevId bus_id, Error **errp)
  2241. {
  2242. uint16_t schid = 0;
  2243. SubchDev *sch;
  2244. if (bus_id.valid) {
  2245. if (!channel_subsys.css[bus_id.cssid]) {
  2246. css_create_css_image(bus_id.cssid, false);
  2247. }
  2248. if (!css_find_free_subch_for_devno(bus_id.cssid, bus_id.ssid,
  2249. bus_id.devid, &schid, errp)) {
  2250. return NULL;
  2251. }
  2252. } else {
  2253. for (bus_id.cssid = channel_subsys.default_cssid;;) {
  2254. if (!channel_subsys.css[bus_id.cssid]) {
  2255. css_create_css_image(bus_id.cssid, false);
  2256. }
  2257. if (css_find_free_subch_and_devno(bus_id.cssid, &bus_id.ssid,
  2258. &bus_id.devid, &schid,
  2259. NULL)) {
  2260. break;
  2261. }
  2262. bus_id.cssid = (bus_id.cssid + 1) % MAX_CSSID;
  2263. if (bus_id.cssid == channel_subsys.default_cssid) {
  2264. error_setg(errp, "Virtual channel subsystem is full!");
  2265. return NULL;
  2266. }
  2267. }
  2268. }
  2269. sch = g_new0(SubchDev, 1);
  2270. sch->cssid = bus_id.cssid;
  2271. sch->ssid = bus_id.ssid;
  2272. sch->devno = bus_id.devid;
  2273. sch->schid = schid;
  2274. css_subch_assign(sch->cssid, sch->ssid, schid, sch->devno, sch);
  2275. return sch;
  2276. }
  2277. static int css_sch_get_chpids(SubchDev *sch, CssDevId *dev_id)
  2278. {
  2279. char *fid_path;
  2280. FILE *fd;
  2281. uint32_t chpid[8];
  2282. int i;
  2283. SCHIB *schib = &sch->curr_status;
  2284. fid_path = g_strdup_printf("/sys/bus/css/devices/%x.%x.%04x/chpids",
  2285. dev_id->cssid, dev_id->ssid, dev_id->devid);
  2286. fd = fopen(fid_path, "r");
  2287. if (fd == NULL) {
  2288. error_report("%s: open %s failed", __func__, fid_path);
  2289. g_free(fid_path);
  2290. return -EINVAL;
  2291. }
  2292. if (fscanf(fd, "%x %x %x %x %x %x %x %x",
  2293. &chpid[0], &chpid[1], &chpid[2], &chpid[3],
  2294. &chpid[4], &chpid[5], &chpid[6], &chpid[7]) != 8) {
  2295. fclose(fd);
  2296. g_free(fid_path);
  2297. return -EINVAL;
  2298. }
  2299. for (i = 0; i < ARRAY_SIZE(schib->pmcw.chpid); i++) {
  2300. schib->pmcw.chpid[i] = chpid[i];
  2301. }
  2302. fclose(fd);
  2303. g_free(fid_path);
  2304. return 0;
  2305. }
  2306. static int css_sch_get_path_masks(SubchDev *sch, CssDevId *dev_id)
  2307. {
  2308. char *fid_path;
  2309. FILE *fd;
  2310. uint32_t pim, pam, pom;
  2311. SCHIB *schib = &sch->curr_status;
  2312. fid_path = g_strdup_printf("/sys/bus/css/devices/%x.%x.%04x/pimpampom",
  2313. dev_id->cssid, dev_id->ssid, dev_id->devid);
  2314. fd = fopen(fid_path, "r");
  2315. if (fd == NULL) {
  2316. error_report("%s: open %s failed", __func__, fid_path);
  2317. g_free(fid_path);
  2318. return -EINVAL;
  2319. }
  2320. if (fscanf(fd, "%x %x %x", &pim, &pam, &pom) != 3) {
  2321. fclose(fd);
  2322. g_free(fid_path);
  2323. return -EINVAL;
  2324. }
  2325. schib->pmcw.pim = pim;
  2326. schib->pmcw.pam = pam;
  2327. schib->pmcw.pom = pom;
  2328. fclose(fd);
  2329. g_free(fid_path);
  2330. return 0;
  2331. }
  2332. static int css_sch_get_chpid_type(uint8_t chpid, uint32_t *type,
  2333. CssDevId *dev_id)
  2334. {
  2335. char *fid_path;
  2336. FILE *fd;
  2337. fid_path = g_strdup_printf("/sys/devices/css%x/chp0.%02x/type",
  2338. dev_id->cssid, chpid);
  2339. fd = fopen(fid_path, "r");
  2340. if (fd == NULL) {
  2341. error_report("%s: open %s failed", __func__, fid_path);
  2342. g_free(fid_path);
  2343. return -EINVAL;
  2344. }
  2345. if (fscanf(fd, "%x", type) != 1) {
  2346. fclose(fd);
  2347. g_free(fid_path);
  2348. return -EINVAL;
  2349. }
  2350. fclose(fd);
  2351. g_free(fid_path);
  2352. return 0;
  2353. }
  2354. /*
  2355. * We currently retrieve the real device information from sysfs to build the
  2356. * guest subchannel information block without considering the migration feature.
  2357. * We need to revisit this problem when we want to add migration support.
  2358. */
  2359. int css_sch_build_schib(SubchDev *sch, CssDevId *dev_id)
  2360. {
  2361. CssImage *css = channel_subsys.css[sch->cssid];
  2362. SCHIB *schib = &sch->curr_status;
  2363. uint32_t type;
  2364. int i, ret;
  2365. assert(css != NULL);
  2366. memset(&schib->pmcw, 0, sizeof(PMCW));
  2367. schib->pmcw.flags |= PMCW_FLAGS_MASK_DNV;
  2368. /* We are dealing with I/O subchannels only. */
  2369. schib->pmcw.devno = sch->devno;
  2370. /* Grab path mask from sysfs. */
  2371. ret = css_sch_get_path_masks(sch, dev_id);
  2372. if (ret) {
  2373. return ret;
  2374. }
  2375. /* Grab chpids from sysfs. */
  2376. ret = css_sch_get_chpids(sch, dev_id);
  2377. if (ret) {
  2378. return ret;
  2379. }
  2380. /* Build chpid type. */
  2381. for (i = 0; i < ARRAY_SIZE(schib->pmcw.chpid); i++) {
  2382. if (schib->pmcw.chpid[i] && !css->chpids[schib->pmcw.chpid[i]].in_use) {
  2383. ret = css_sch_get_chpid_type(schib->pmcw.chpid[i], &type, dev_id);
  2384. if (ret) {
  2385. return ret;
  2386. }
  2387. css_add_chpid(sch->cssid, schib->pmcw.chpid[i], type, false);
  2388. }
  2389. }
  2390. memset(&schib->scsw, 0, sizeof(SCSW));
  2391. schib->mba = 0;
  2392. for (i = 0; i < ARRAY_SIZE(schib->mda); i++) {
  2393. schib->mda[i] = 0;
  2394. }
  2395. return 0;
  2396. }