cpu.h 42 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278
  1. /*
  2. * S/390 virtual CPU header
  3. *
  4. * Copyright (c) 2009 Ulrich Hecht
  5. *
  6. * This library is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2 of the License, or (at your option) any later version.
  10. *
  11. * This library is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * Contributions after 2012-10-29 are licensed under the terms of the
  17. * GNU GPL, version 2 or (at your option) any later version.
  18. *
  19. * You should have received a copy of the GNU (Lesser) General Public
  20. * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  21. */
  22. #ifndef CPU_S390X_H
  23. #define CPU_S390X_H
  24. #include "config.h"
  25. #include "qemu-common.h"
  26. #define TARGET_LONG_BITS 64
  27. #define ELF_MACHINE_UNAME "S390X"
  28. #define CPUArchState struct CPUS390XState
  29. #include "exec/cpu-defs.h"
  30. #define TARGET_PAGE_BITS 12
  31. #define TARGET_PHYS_ADDR_SPACE_BITS 64
  32. #define TARGET_VIRT_ADDR_SPACE_BITS 64
  33. #include "exec/cpu-all.h"
  34. #include "fpu/softfloat.h"
  35. #define NB_MMU_MODES 3
  36. #define TARGET_INSN_START_EXTRA_WORDS 1
  37. #define MMU_MODE0_SUFFIX _primary
  38. #define MMU_MODE1_SUFFIX _secondary
  39. #define MMU_MODE2_SUFFIX _home
  40. #define MMU_USER_IDX 0
  41. #define MAX_EXT_QUEUE 16
  42. #define MAX_IO_QUEUE 16
  43. #define MAX_MCHK_QUEUE 16
  44. #define PSW_MCHK_MASK 0x0004000000000000
  45. #define PSW_IO_MASK 0x0200000000000000
  46. typedef struct PSW {
  47. uint64_t mask;
  48. uint64_t addr;
  49. } PSW;
  50. typedef struct ExtQueue {
  51. uint32_t code;
  52. uint32_t param;
  53. uint32_t param64;
  54. } ExtQueue;
  55. typedef struct IOIntQueue {
  56. uint16_t id;
  57. uint16_t nr;
  58. uint32_t parm;
  59. uint32_t word;
  60. } IOIntQueue;
  61. typedef struct MchkQueue {
  62. uint16_t type;
  63. } MchkQueue;
  64. typedef struct CPUS390XState {
  65. uint64_t regs[16]; /* GP registers */
  66. /*
  67. * The floating point registers are part of the vector registers.
  68. * vregs[0][0] -> vregs[15][0] are 16 floating point registers
  69. */
  70. CPU_DoubleU vregs[32][2]; /* vector registers */
  71. uint32_t aregs[16]; /* access registers */
  72. uint32_t fpc; /* floating-point control register */
  73. uint32_t cc_op;
  74. float_status fpu_status; /* passed to softfloat lib */
  75. /* The low part of a 128-bit return, or remainder of a divide. */
  76. uint64_t retxl;
  77. PSW psw;
  78. uint64_t cc_src;
  79. uint64_t cc_dst;
  80. uint64_t cc_vr;
  81. uint64_t __excp_addr;
  82. uint64_t psa;
  83. uint32_t int_pgm_code;
  84. uint32_t int_pgm_ilen;
  85. uint32_t int_svc_code;
  86. uint32_t int_svc_ilen;
  87. uint64_t per_address;
  88. uint16_t per_perc_atmid;
  89. uint64_t cregs[16]; /* control registers */
  90. ExtQueue ext_queue[MAX_EXT_QUEUE];
  91. IOIntQueue io_queue[MAX_IO_QUEUE][8];
  92. MchkQueue mchk_queue[MAX_MCHK_QUEUE];
  93. int pending_int;
  94. int ext_index;
  95. int io_index[8];
  96. int mchk_index;
  97. uint64_t ckc;
  98. uint64_t cputm;
  99. uint32_t todpr;
  100. uint64_t pfault_token;
  101. uint64_t pfault_compare;
  102. uint64_t pfault_select;
  103. uint64_t gbea;
  104. uint64_t pp;
  105. CPU_COMMON
  106. /* reset does memset(0) up to here */
  107. uint32_t cpu_num;
  108. uint32_t machine_type;
  109. uint64_t tod_offset;
  110. uint64_t tod_basetime;
  111. QEMUTimer *tod_timer;
  112. QEMUTimer *cpu_timer;
  113. /*
  114. * The cpu state represents the logical state of a cpu. In contrast to other
  115. * architectures, there is a difference between a halt and a stop on s390.
  116. * If all cpus are either stopped (including check stop) or in the disabled
  117. * wait state, the vm can be shut down.
  118. */
  119. #define CPU_STATE_UNINITIALIZED 0x00
  120. #define CPU_STATE_STOPPED 0x01
  121. #define CPU_STATE_CHECK_STOP 0x02
  122. #define CPU_STATE_OPERATING 0x03
  123. #define CPU_STATE_LOAD 0x04
  124. uint8_t cpu_state;
  125. /* currently processed sigp order */
  126. uint8_t sigp_order;
  127. } CPUS390XState;
  128. static inline CPU_DoubleU *get_freg(CPUS390XState *cs, int nr)
  129. {
  130. return &cs->vregs[nr][0];
  131. }
  132. #include "cpu-qom.h"
  133. #include <sysemu/kvm.h>
  134. /* distinguish between 24 bit and 31 bit addressing */
  135. #define HIGH_ORDER_BIT 0x80000000
  136. /* Interrupt Codes */
  137. /* Program Interrupts */
  138. #define PGM_OPERATION 0x0001
  139. #define PGM_PRIVILEGED 0x0002
  140. #define PGM_EXECUTE 0x0003
  141. #define PGM_PROTECTION 0x0004
  142. #define PGM_ADDRESSING 0x0005
  143. #define PGM_SPECIFICATION 0x0006
  144. #define PGM_DATA 0x0007
  145. #define PGM_FIXPT_OVERFLOW 0x0008
  146. #define PGM_FIXPT_DIVIDE 0x0009
  147. #define PGM_DEC_OVERFLOW 0x000a
  148. #define PGM_DEC_DIVIDE 0x000b
  149. #define PGM_HFP_EXP_OVERFLOW 0x000c
  150. #define PGM_HFP_EXP_UNDERFLOW 0x000d
  151. #define PGM_HFP_SIGNIFICANCE 0x000e
  152. #define PGM_HFP_DIVIDE 0x000f
  153. #define PGM_SEGMENT_TRANS 0x0010
  154. #define PGM_PAGE_TRANS 0x0011
  155. #define PGM_TRANS_SPEC 0x0012
  156. #define PGM_SPECIAL_OP 0x0013
  157. #define PGM_OPERAND 0x0015
  158. #define PGM_TRACE_TABLE 0x0016
  159. #define PGM_SPACE_SWITCH 0x001c
  160. #define PGM_HFP_SQRT 0x001d
  161. #define PGM_PC_TRANS_SPEC 0x001f
  162. #define PGM_AFX_TRANS 0x0020
  163. #define PGM_ASX_TRANS 0x0021
  164. #define PGM_LX_TRANS 0x0022
  165. #define PGM_EX_TRANS 0x0023
  166. #define PGM_PRIM_AUTH 0x0024
  167. #define PGM_SEC_AUTH 0x0025
  168. #define PGM_ALET_SPEC 0x0028
  169. #define PGM_ALEN_SPEC 0x0029
  170. #define PGM_ALE_SEQ 0x002a
  171. #define PGM_ASTE_VALID 0x002b
  172. #define PGM_ASTE_SEQ 0x002c
  173. #define PGM_EXT_AUTH 0x002d
  174. #define PGM_STACK_FULL 0x0030
  175. #define PGM_STACK_EMPTY 0x0031
  176. #define PGM_STACK_SPEC 0x0032
  177. #define PGM_STACK_TYPE 0x0033
  178. #define PGM_STACK_OP 0x0034
  179. #define PGM_ASCE_TYPE 0x0038
  180. #define PGM_REG_FIRST_TRANS 0x0039
  181. #define PGM_REG_SEC_TRANS 0x003a
  182. #define PGM_REG_THIRD_TRANS 0x003b
  183. #define PGM_MONITOR 0x0040
  184. #define PGM_PER 0x0080
  185. #define PGM_CRYPTO 0x0119
  186. /* External Interrupts */
  187. #define EXT_INTERRUPT_KEY 0x0040
  188. #define EXT_CLOCK_COMP 0x1004
  189. #define EXT_CPU_TIMER 0x1005
  190. #define EXT_MALFUNCTION 0x1200
  191. #define EXT_EMERGENCY 0x1201
  192. #define EXT_EXTERNAL_CALL 0x1202
  193. #define EXT_ETR 0x1406
  194. #define EXT_SERVICE 0x2401
  195. #define EXT_VIRTIO 0x2603
  196. /* PSW defines */
  197. #undef PSW_MASK_PER
  198. #undef PSW_MASK_DAT
  199. #undef PSW_MASK_IO
  200. #undef PSW_MASK_EXT
  201. #undef PSW_MASK_KEY
  202. #undef PSW_SHIFT_KEY
  203. #undef PSW_MASK_MCHECK
  204. #undef PSW_MASK_WAIT
  205. #undef PSW_MASK_PSTATE
  206. #undef PSW_MASK_ASC
  207. #undef PSW_MASK_CC
  208. #undef PSW_MASK_PM
  209. #undef PSW_MASK_64
  210. #undef PSW_MASK_32
  211. #undef PSW_MASK_ESA_ADDR
  212. #define PSW_MASK_PER 0x4000000000000000ULL
  213. #define PSW_MASK_DAT 0x0400000000000000ULL
  214. #define PSW_MASK_IO 0x0200000000000000ULL
  215. #define PSW_MASK_EXT 0x0100000000000000ULL
  216. #define PSW_MASK_KEY 0x00F0000000000000ULL
  217. #define PSW_SHIFT_KEY 56
  218. #define PSW_MASK_MCHECK 0x0004000000000000ULL
  219. #define PSW_MASK_WAIT 0x0002000000000000ULL
  220. #define PSW_MASK_PSTATE 0x0001000000000000ULL
  221. #define PSW_MASK_ASC 0x0000C00000000000ULL
  222. #define PSW_MASK_CC 0x0000300000000000ULL
  223. #define PSW_MASK_PM 0x00000F0000000000ULL
  224. #define PSW_MASK_64 0x0000000100000000ULL
  225. #define PSW_MASK_32 0x0000000080000000ULL
  226. #define PSW_MASK_ESA_ADDR 0x000000007fffffffULL
  227. #undef PSW_ASC_PRIMARY
  228. #undef PSW_ASC_ACCREG
  229. #undef PSW_ASC_SECONDARY
  230. #undef PSW_ASC_HOME
  231. #define PSW_ASC_PRIMARY 0x0000000000000000ULL
  232. #define PSW_ASC_ACCREG 0x0000400000000000ULL
  233. #define PSW_ASC_SECONDARY 0x0000800000000000ULL
  234. #define PSW_ASC_HOME 0x0000C00000000000ULL
  235. /* tb flags */
  236. #define FLAG_MASK_PER (PSW_MASK_PER >> 32)
  237. #define FLAG_MASK_DAT (PSW_MASK_DAT >> 32)
  238. #define FLAG_MASK_IO (PSW_MASK_IO >> 32)
  239. #define FLAG_MASK_EXT (PSW_MASK_EXT >> 32)
  240. #define FLAG_MASK_KEY (PSW_MASK_KEY >> 32)
  241. #define FLAG_MASK_MCHECK (PSW_MASK_MCHECK >> 32)
  242. #define FLAG_MASK_WAIT (PSW_MASK_WAIT >> 32)
  243. #define FLAG_MASK_PSTATE (PSW_MASK_PSTATE >> 32)
  244. #define FLAG_MASK_ASC (PSW_MASK_ASC >> 32)
  245. #define FLAG_MASK_CC (PSW_MASK_CC >> 32)
  246. #define FLAG_MASK_PM (PSW_MASK_PM >> 32)
  247. #define FLAG_MASK_64 (PSW_MASK_64 >> 32)
  248. #define FLAG_MASK_32 0x00001000
  249. /* Control register 0 bits */
  250. #define CR0_LOWPROT 0x0000000010000000ULL
  251. #define CR0_EDAT 0x0000000000800000ULL
  252. /* MMU */
  253. #define MMU_PRIMARY_IDX 0
  254. #define MMU_SECONDARY_IDX 1
  255. #define MMU_HOME_IDX 2
  256. static inline int cpu_mmu_index (CPUS390XState *env, bool ifetch)
  257. {
  258. switch (env->psw.mask & PSW_MASK_ASC) {
  259. case PSW_ASC_PRIMARY:
  260. return MMU_PRIMARY_IDX;
  261. case PSW_ASC_SECONDARY:
  262. return MMU_SECONDARY_IDX;
  263. case PSW_ASC_HOME:
  264. return MMU_HOME_IDX;
  265. case PSW_ASC_ACCREG:
  266. /* Fallthrough: access register mode is not yet supported */
  267. default:
  268. abort();
  269. }
  270. }
  271. static inline uint64_t cpu_mmu_idx_to_asc(int mmu_idx)
  272. {
  273. switch (mmu_idx) {
  274. case MMU_PRIMARY_IDX:
  275. return PSW_ASC_PRIMARY;
  276. case MMU_SECONDARY_IDX:
  277. return PSW_ASC_SECONDARY;
  278. case MMU_HOME_IDX:
  279. return PSW_ASC_HOME;
  280. default:
  281. abort();
  282. }
  283. }
  284. static inline void cpu_get_tb_cpu_state(CPUS390XState* env, target_ulong *pc,
  285. target_ulong *cs_base, int *flags)
  286. {
  287. *pc = env->psw.addr;
  288. *cs_base = 0;
  289. *flags = ((env->psw.mask >> 32) & ~FLAG_MASK_CC) |
  290. ((env->psw.mask & PSW_MASK_32) ? FLAG_MASK_32 : 0);
  291. }
  292. /* While the PoO talks about ILC (a number between 1-3) what is actually
  293. stored in LowCore is shifted left one bit (an even between 2-6). As
  294. this is the actual length of the insn and therefore more useful, that
  295. is what we want to pass around and manipulate. To make sure that we
  296. have applied this distinction universally, rename the "ILC" to "ILEN". */
  297. static inline int get_ilen(uint8_t opc)
  298. {
  299. switch (opc >> 6) {
  300. case 0:
  301. return 2;
  302. case 1:
  303. case 2:
  304. return 4;
  305. default:
  306. return 6;
  307. }
  308. }
  309. /* PER bits from control register 9 */
  310. #define PER_CR9_EVENT_BRANCH 0x80000000
  311. #define PER_CR9_EVENT_IFETCH 0x40000000
  312. #define PER_CR9_EVENT_STORE 0x20000000
  313. #define PER_CR9_EVENT_STORE_REAL 0x08000000
  314. #define PER_CR9_EVENT_NULLIFICATION 0x01000000
  315. #define PER_CR9_CONTROL_BRANCH_ADDRESS 0x00800000
  316. #define PER_CR9_CONTROL_ALTERATION 0x00200000
  317. /* PER bits from the PER CODE/ATMID/AI in lowcore */
  318. #define PER_CODE_EVENT_BRANCH 0x8000
  319. #define PER_CODE_EVENT_IFETCH 0x4000
  320. #define PER_CODE_EVENT_STORE 0x2000
  321. #define PER_CODE_EVENT_STORE_REAL 0x0800
  322. #define PER_CODE_EVENT_NULLIFICATION 0x0100
  323. /* Compute the ATMID field that is stored in the per_perc_atmid lowcore
  324. entry when a PER exception is triggered. */
  325. static inline uint8_t get_per_atmid(CPUS390XState *env)
  326. {
  327. return ((env->psw.mask & PSW_MASK_64) ? (1 << 7) : 0) |
  328. ( (1 << 6) ) |
  329. ((env->psw.mask & PSW_MASK_32) ? (1 << 5) : 0) |
  330. ((env->psw.mask & PSW_MASK_DAT)? (1 << 4) : 0) |
  331. ((env->psw.mask & PSW_ASC_SECONDARY)? (1 << 3) : 0) |
  332. ((env->psw.mask & PSW_ASC_ACCREG)? (1 << 2) : 0);
  333. }
  334. /* Check if an address is within the PER starting address and the PER
  335. ending address. The address range might loop. */
  336. static inline bool get_per_in_range(CPUS390XState *env, uint64_t addr)
  337. {
  338. if (env->cregs[10] <= env->cregs[11]) {
  339. return env->cregs[10] <= addr && addr <= env->cregs[11];
  340. } else {
  341. return env->cregs[10] <= addr || addr <= env->cregs[11];
  342. }
  343. }
  344. #ifndef CONFIG_USER_ONLY
  345. /* In several cases of runtime exceptions, we havn't recorded the true
  346. instruction length. Use these codes when raising exceptions in order
  347. to re-compute the length by examining the insn in memory. */
  348. #define ILEN_LATER 0x20
  349. #define ILEN_LATER_INC 0x21
  350. void trigger_pgm_exception(CPUS390XState *env, uint32_t code, uint32_t ilen);
  351. #endif
  352. S390CPU *cpu_s390x_init(const char *cpu_model);
  353. void s390x_translate_init(void);
  354. int cpu_s390x_exec(CPUState *cpu);
  355. /* you can call this signal handler from your SIGBUS and SIGSEGV
  356. signal handlers to inform the virtual CPU of exceptions. non zero
  357. is returned if the signal was handled by the virtual CPU. */
  358. int cpu_s390x_signal_handler(int host_signum, void *pinfo,
  359. void *puc);
  360. int s390_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
  361. int mmu_idx);
  362. #include "ioinst.h"
  363. #ifndef CONFIG_USER_ONLY
  364. void do_restart_interrupt(CPUS390XState *env);
  365. static inline hwaddr decode_basedisp_s(CPUS390XState *env, uint32_t ipb,
  366. uint8_t *ar)
  367. {
  368. hwaddr addr = 0;
  369. uint8_t reg;
  370. reg = ipb >> 28;
  371. if (reg > 0) {
  372. addr = env->regs[reg];
  373. }
  374. addr += (ipb >> 16) & 0xfff;
  375. if (ar) {
  376. *ar = reg;
  377. }
  378. return addr;
  379. }
  380. /* Base/displacement are at the same locations. */
  381. #define decode_basedisp_rs decode_basedisp_s
  382. /* helper functions for run_on_cpu() */
  383. static inline void s390_do_cpu_reset(void *arg)
  384. {
  385. CPUState *cs = arg;
  386. S390CPUClass *scc = S390_CPU_GET_CLASS(cs);
  387. scc->cpu_reset(cs);
  388. }
  389. static inline void s390_do_cpu_full_reset(void *arg)
  390. {
  391. CPUState *cs = arg;
  392. cpu_reset(cs);
  393. }
  394. void s390x_tod_timer(void *opaque);
  395. void s390x_cpu_timer(void *opaque);
  396. int s390_virtio_hypercall(CPUS390XState *env);
  397. void s390_virtio_irq(int config_change, uint64_t token);
  398. #ifdef CONFIG_KVM
  399. void kvm_s390_virtio_irq(int config_change, uint64_t token);
  400. void kvm_s390_service_interrupt(uint32_t parm);
  401. void kvm_s390_vcpu_interrupt(S390CPU *cpu, struct kvm_s390_irq *irq);
  402. void kvm_s390_floating_interrupt(struct kvm_s390_irq *irq);
  403. int kvm_s390_inject_flic(struct kvm_s390_irq *irq);
  404. void kvm_s390_access_exception(S390CPU *cpu, uint16_t code, uint64_t te_code);
  405. int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, uint8_t ar, void *hostbuf,
  406. int len, bool is_write);
  407. int kvm_s390_get_clock(uint8_t *tod_high, uint64_t *tod_clock);
  408. int kvm_s390_set_clock(uint8_t *tod_high, uint64_t *tod_clock);
  409. #else
  410. static inline void kvm_s390_virtio_irq(int config_change, uint64_t token)
  411. {
  412. }
  413. static inline void kvm_s390_service_interrupt(uint32_t parm)
  414. {
  415. }
  416. static inline int kvm_s390_get_clock(uint8_t *tod_high, uint64_t *tod_low)
  417. {
  418. return -ENOSYS;
  419. }
  420. static inline int kvm_s390_set_clock(uint8_t *tod_high, uint64_t *tod_low)
  421. {
  422. return -ENOSYS;
  423. }
  424. static inline int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, uint8_t ar,
  425. void *hostbuf, int len, bool is_write)
  426. {
  427. return -ENOSYS;
  428. }
  429. static inline void kvm_s390_access_exception(S390CPU *cpu, uint16_t code,
  430. uint64_t te_code)
  431. {
  432. }
  433. #endif
  434. static inline int s390_get_clock(uint8_t *tod_high, uint64_t *tod_low)
  435. {
  436. if (kvm_enabled()) {
  437. return kvm_s390_get_clock(tod_high, tod_low);
  438. }
  439. /* Fixme TCG */
  440. *tod_high = 0;
  441. *tod_low = 0;
  442. return 0;
  443. }
  444. static inline int s390_set_clock(uint8_t *tod_high, uint64_t *tod_low)
  445. {
  446. if (kvm_enabled()) {
  447. return kvm_s390_set_clock(tod_high, tod_low);
  448. }
  449. /* Fixme TCG */
  450. return 0;
  451. }
  452. S390CPU *s390_cpu_addr2state(uint16_t cpu_addr);
  453. unsigned int s390_cpu_halt(S390CPU *cpu);
  454. void s390_cpu_unhalt(S390CPU *cpu);
  455. unsigned int s390_cpu_set_state(uint8_t cpu_state, S390CPU *cpu);
  456. static inline uint8_t s390_cpu_get_state(S390CPU *cpu)
  457. {
  458. return cpu->env.cpu_state;
  459. }
  460. void gtod_save(QEMUFile *f, void *opaque);
  461. int gtod_load(QEMUFile *f, void *opaque, int version_id);
  462. /* service interrupts are floating therefore we must not pass an cpustate */
  463. void s390_sclp_extint(uint32_t parm);
  464. /* from s390-virtio-bus */
  465. extern const hwaddr virtio_size;
  466. #else
  467. static inline unsigned int s390_cpu_halt(S390CPU *cpu)
  468. {
  469. return 0;
  470. }
  471. static inline void s390_cpu_unhalt(S390CPU *cpu)
  472. {
  473. }
  474. static inline unsigned int s390_cpu_set_state(uint8_t cpu_state, S390CPU *cpu)
  475. {
  476. return 0;
  477. }
  478. #endif
  479. void cpu_lock(void);
  480. void cpu_unlock(void);
  481. typedef struct SubchDev SubchDev;
  482. #ifndef CONFIG_USER_ONLY
  483. extern void subsystem_reset(void);
  484. SubchDev *css_find_subch(uint8_t m, uint8_t cssid, uint8_t ssid,
  485. uint16_t schid);
  486. bool css_subch_visible(SubchDev *sch);
  487. void css_conditional_io_interrupt(SubchDev *sch);
  488. int css_do_stsch(SubchDev *sch, SCHIB *schib);
  489. bool css_schid_final(int m, uint8_t cssid, uint8_t ssid, uint16_t schid);
  490. int css_do_msch(SubchDev *sch, const SCHIB *schib);
  491. int css_do_xsch(SubchDev *sch);
  492. int css_do_csch(SubchDev *sch);
  493. int css_do_hsch(SubchDev *sch);
  494. int css_do_ssch(SubchDev *sch, ORB *orb);
  495. int css_do_tsch_get_irb(SubchDev *sch, IRB *irb, int *irb_len);
  496. void css_do_tsch_update_subch(SubchDev *sch);
  497. int css_do_stcrw(CRW *crw);
  498. void css_undo_stcrw(CRW *crw);
  499. int css_do_tpi(IOIntCode *int_code, int lowcore);
  500. int css_collect_chp_desc(int m, uint8_t cssid, uint8_t f_chpid, uint8_t l_chpid,
  501. int rfmt, void *buf);
  502. void css_do_schm(uint8_t mbk, int update, int dct, uint64_t mbo);
  503. int css_enable_mcsse(void);
  504. int css_enable_mss(void);
  505. int css_do_rsch(SubchDev *sch);
  506. int css_do_rchp(uint8_t cssid, uint8_t chpid);
  507. bool css_present(uint8_t cssid);
  508. #endif
  509. #define cpu_init(model) CPU(cpu_s390x_init(model))
  510. #define cpu_exec cpu_s390x_exec
  511. #define cpu_signal_handler cpu_s390x_signal_handler
  512. void s390_cpu_list(FILE *f, fprintf_function cpu_fprintf);
  513. #define cpu_list s390_cpu_list
  514. #include "exec/exec-all.h"
  515. #define EXCP_EXT 1 /* external interrupt */
  516. #define EXCP_SVC 2 /* supervisor call (syscall) */
  517. #define EXCP_PGM 3 /* program interruption */
  518. #define EXCP_IO 7 /* I/O interrupt */
  519. #define EXCP_MCHK 8 /* machine check */
  520. #define INTERRUPT_EXT (1 << 0)
  521. #define INTERRUPT_TOD (1 << 1)
  522. #define INTERRUPT_CPUTIMER (1 << 2)
  523. #define INTERRUPT_IO (1 << 3)
  524. #define INTERRUPT_MCHK (1 << 4)
  525. /* Program Status Word. */
  526. #define S390_PSWM_REGNUM 0
  527. #define S390_PSWA_REGNUM 1
  528. /* General Purpose Registers. */
  529. #define S390_R0_REGNUM 2
  530. #define S390_R1_REGNUM 3
  531. #define S390_R2_REGNUM 4
  532. #define S390_R3_REGNUM 5
  533. #define S390_R4_REGNUM 6
  534. #define S390_R5_REGNUM 7
  535. #define S390_R6_REGNUM 8
  536. #define S390_R7_REGNUM 9
  537. #define S390_R8_REGNUM 10
  538. #define S390_R9_REGNUM 11
  539. #define S390_R10_REGNUM 12
  540. #define S390_R11_REGNUM 13
  541. #define S390_R12_REGNUM 14
  542. #define S390_R13_REGNUM 15
  543. #define S390_R14_REGNUM 16
  544. #define S390_R15_REGNUM 17
  545. /* Total Core Registers. */
  546. #define S390_NUM_CORE_REGS 18
  547. /* CC optimization */
  548. enum cc_op {
  549. CC_OP_CONST0 = 0, /* CC is 0 */
  550. CC_OP_CONST1, /* CC is 1 */
  551. CC_OP_CONST2, /* CC is 2 */
  552. CC_OP_CONST3, /* CC is 3 */
  553. CC_OP_DYNAMIC, /* CC calculation defined by env->cc_op */
  554. CC_OP_STATIC, /* CC value is env->cc_op */
  555. CC_OP_NZ, /* env->cc_dst != 0 */
  556. CC_OP_LTGT_32, /* signed less/greater than (32bit) */
  557. CC_OP_LTGT_64, /* signed less/greater than (64bit) */
  558. CC_OP_LTUGTU_32, /* unsigned less/greater than (32bit) */
  559. CC_OP_LTUGTU_64, /* unsigned less/greater than (64bit) */
  560. CC_OP_LTGT0_32, /* signed less/greater than 0 (32bit) */
  561. CC_OP_LTGT0_64, /* signed less/greater than 0 (64bit) */
  562. CC_OP_ADD_64, /* overflow on add (64bit) */
  563. CC_OP_ADDU_64, /* overflow on unsigned add (64bit) */
  564. CC_OP_ADDC_64, /* overflow on unsigned add-carry (64bit) */
  565. CC_OP_SUB_64, /* overflow on subtraction (64bit) */
  566. CC_OP_SUBU_64, /* overflow on unsigned subtraction (64bit) */
  567. CC_OP_SUBB_64, /* overflow on unsigned sub-borrow (64bit) */
  568. CC_OP_ABS_64, /* sign eval on abs (64bit) */
  569. CC_OP_NABS_64, /* sign eval on nabs (64bit) */
  570. CC_OP_ADD_32, /* overflow on add (32bit) */
  571. CC_OP_ADDU_32, /* overflow on unsigned add (32bit) */
  572. CC_OP_ADDC_32, /* overflow on unsigned add-carry (32bit) */
  573. CC_OP_SUB_32, /* overflow on subtraction (32bit) */
  574. CC_OP_SUBU_32, /* overflow on unsigned subtraction (32bit) */
  575. CC_OP_SUBB_32, /* overflow on unsigned sub-borrow (32bit) */
  576. CC_OP_ABS_32, /* sign eval on abs (64bit) */
  577. CC_OP_NABS_32, /* sign eval on nabs (64bit) */
  578. CC_OP_COMP_32, /* complement */
  579. CC_OP_COMP_64, /* complement */
  580. CC_OP_TM_32, /* test under mask (32bit) */
  581. CC_OP_TM_64, /* test under mask (64bit) */
  582. CC_OP_NZ_F32, /* FP dst != 0 (32bit) */
  583. CC_OP_NZ_F64, /* FP dst != 0 (64bit) */
  584. CC_OP_NZ_F128, /* FP dst != 0 (128bit) */
  585. CC_OP_ICM, /* insert characters under mask */
  586. CC_OP_SLA_32, /* Calculate shift left signed (32bit) */
  587. CC_OP_SLA_64, /* Calculate shift left signed (64bit) */
  588. CC_OP_FLOGR, /* find leftmost one */
  589. CC_OP_MAX
  590. };
  591. static const char *cc_names[] = {
  592. [CC_OP_CONST0] = "CC_OP_CONST0",
  593. [CC_OP_CONST1] = "CC_OP_CONST1",
  594. [CC_OP_CONST2] = "CC_OP_CONST2",
  595. [CC_OP_CONST3] = "CC_OP_CONST3",
  596. [CC_OP_DYNAMIC] = "CC_OP_DYNAMIC",
  597. [CC_OP_STATIC] = "CC_OP_STATIC",
  598. [CC_OP_NZ] = "CC_OP_NZ",
  599. [CC_OP_LTGT_32] = "CC_OP_LTGT_32",
  600. [CC_OP_LTGT_64] = "CC_OP_LTGT_64",
  601. [CC_OP_LTUGTU_32] = "CC_OP_LTUGTU_32",
  602. [CC_OP_LTUGTU_64] = "CC_OP_LTUGTU_64",
  603. [CC_OP_LTGT0_32] = "CC_OP_LTGT0_32",
  604. [CC_OP_LTGT0_64] = "CC_OP_LTGT0_64",
  605. [CC_OP_ADD_64] = "CC_OP_ADD_64",
  606. [CC_OP_ADDU_64] = "CC_OP_ADDU_64",
  607. [CC_OP_ADDC_64] = "CC_OP_ADDC_64",
  608. [CC_OP_SUB_64] = "CC_OP_SUB_64",
  609. [CC_OP_SUBU_64] = "CC_OP_SUBU_64",
  610. [CC_OP_SUBB_64] = "CC_OP_SUBB_64",
  611. [CC_OP_ABS_64] = "CC_OP_ABS_64",
  612. [CC_OP_NABS_64] = "CC_OP_NABS_64",
  613. [CC_OP_ADD_32] = "CC_OP_ADD_32",
  614. [CC_OP_ADDU_32] = "CC_OP_ADDU_32",
  615. [CC_OP_ADDC_32] = "CC_OP_ADDC_32",
  616. [CC_OP_SUB_32] = "CC_OP_SUB_32",
  617. [CC_OP_SUBU_32] = "CC_OP_SUBU_32",
  618. [CC_OP_SUBB_32] = "CC_OP_SUBB_32",
  619. [CC_OP_ABS_32] = "CC_OP_ABS_32",
  620. [CC_OP_NABS_32] = "CC_OP_NABS_32",
  621. [CC_OP_COMP_32] = "CC_OP_COMP_32",
  622. [CC_OP_COMP_64] = "CC_OP_COMP_64",
  623. [CC_OP_TM_32] = "CC_OP_TM_32",
  624. [CC_OP_TM_64] = "CC_OP_TM_64",
  625. [CC_OP_NZ_F32] = "CC_OP_NZ_F32",
  626. [CC_OP_NZ_F64] = "CC_OP_NZ_F64",
  627. [CC_OP_NZ_F128] = "CC_OP_NZ_F128",
  628. [CC_OP_ICM] = "CC_OP_ICM",
  629. [CC_OP_SLA_32] = "CC_OP_SLA_32",
  630. [CC_OP_SLA_64] = "CC_OP_SLA_64",
  631. [CC_OP_FLOGR] = "CC_OP_FLOGR",
  632. };
  633. static inline const char *cc_name(int cc_op)
  634. {
  635. return cc_names[cc_op];
  636. }
  637. static inline void setcc(S390CPU *cpu, uint64_t cc)
  638. {
  639. CPUS390XState *env = &cpu->env;
  640. env->psw.mask &= ~(3ull << 44);
  641. env->psw.mask |= (cc & 3) << 44;
  642. env->cc_op = cc;
  643. }
  644. typedef struct LowCore
  645. {
  646. /* prefix area: defined by architecture */
  647. uint32_t ccw1[2]; /* 0x000 */
  648. uint32_t ccw2[4]; /* 0x008 */
  649. uint8_t pad1[0x80-0x18]; /* 0x018 */
  650. uint32_t ext_params; /* 0x080 */
  651. uint16_t cpu_addr; /* 0x084 */
  652. uint16_t ext_int_code; /* 0x086 */
  653. uint16_t svc_ilen; /* 0x088 */
  654. uint16_t svc_code; /* 0x08a */
  655. uint16_t pgm_ilen; /* 0x08c */
  656. uint16_t pgm_code; /* 0x08e */
  657. uint32_t data_exc_code; /* 0x090 */
  658. uint16_t mon_class_num; /* 0x094 */
  659. uint16_t per_perc_atmid; /* 0x096 */
  660. uint64_t per_address; /* 0x098 */
  661. uint8_t exc_access_id; /* 0x0a0 */
  662. uint8_t per_access_id; /* 0x0a1 */
  663. uint8_t op_access_id; /* 0x0a2 */
  664. uint8_t ar_access_id; /* 0x0a3 */
  665. uint8_t pad2[0xA8-0xA4]; /* 0x0a4 */
  666. uint64_t trans_exc_code; /* 0x0a8 */
  667. uint64_t monitor_code; /* 0x0b0 */
  668. uint16_t subchannel_id; /* 0x0b8 */
  669. uint16_t subchannel_nr; /* 0x0ba */
  670. uint32_t io_int_parm; /* 0x0bc */
  671. uint32_t io_int_word; /* 0x0c0 */
  672. uint8_t pad3[0xc8-0xc4]; /* 0x0c4 */
  673. uint32_t stfl_fac_list; /* 0x0c8 */
  674. uint8_t pad4[0xe8-0xcc]; /* 0x0cc */
  675. uint32_t mcck_interruption_code[2]; /* 0x0e8 */
  676. uint8_t pad5[0xf4-0xf0]; /* 0x0f0 */
  677. uint32_t external_damage_code; /* 0x0f4 */
  678. uint64_t failing_storage_address; /* 0x0f8 */
  679. uint8_t pad6[0x110-0x100]; /* 0x100 */
  680. uint64_t per_breaking_event_addr; /* 0x110 */
  681. uint8_t pad7[0x120-0x118]; /* 0x118 */
  682. PSW restart_old_psw; /* 0x120 */
  683. PSW external_old_psw; /* 0x130 */
  684. PSW svc_old_psw; /* 0x140 */
  685. PSW program_old_psw; /* 0x150 */
  686. PSW mcck_old_psw; /* 0x160 */
  687. PSW io_old_psw; /* 0x170 */
  688. uint8_t pad8[0x1a0-0x180]; /* 0x180 */
  689. PSW restart_new_psw; /* 0x1a0 */
  690. PSW external_new_psw; /* 0x1b0 */
  691. PSW svc_new_psw; /* 0x1c0 */
  692. PSW program_new_psw; /* 0x1d0 */
  693. PSW mcck_new_psw; /* 0x1e0 */
  694. PSW io_new_psw; /* 0x1f0 */
  695. PSW return_psw; /* 0x200 */
  696. uint8_t irb[64]; /* 0x210 */
  697. uint64_t sync_enter_timer; /* 0x250 */
  698. uint64_t async_enter_timer; /* 0x258 */
  699. uint64_t exit_timer; /* 0x260 */
  700. uint64_t last_update_timer; /* 0x268 */
  701. uint64_t user_timer; /* 0x270 */
  702. uint64_t system_timer; /* 0x278 */
  703. uint64_t last_update_clock; /* 0x280 */
  704. uint64_t steal_clock; /* 0x288 */
  705. PSW return_mcck_psw; /* 0x290 */
  706. uint8_t pad9[0xc00-0x2a0]; /* 0x2a0 */
  707. /* System info area */
  708. uint64_t save_area[16]; /* 0xc00 */
  709. uint8_t pad10[0xd40-0xc80]; /* 0xc80 */
  710. uint64_t kernel_stack; /* 0xd40 */
  711. uint64_t thread_info; /* 0xd48 */
  712. uint64_t async_stack; /* 0xd50 */
  713. uint64_t kernel_asce; /* 0xd58 */
  714. uint64_t user_asce; /* 0xd60 */
  715. uint64_t panic_stack; /* 0xd68 */
  716. uint64_t user_exec_asce; /* 0xd70 */
  717. uint8_t pad11[0xdc0-0xd78]; /* 0xd78 */
  718. /* SMP info area: defined by DJB */
  719. uint64_t clock_comparator; /* 0xdc0 */
  720. uint64_t ext_call_fast; /* 0xdc8 */
  721. uint64_t percpu_offset; /* 0xdd0 */
  722. uint64_t current_task; /* 0xdd8 */
  723. uint32_t softirq_pending; /* 0xde0 */
  724. uint32_t pad_0x0de4; /* 0xde4 */
  725. uint64_t int_clock; /* 0xde8 */
  726. uint8_t pad12[0xe00-0xdf0]; /* 0xdf0 */
  727. /* 0xe00 is used as indicator for dump tools */
  728. /* whether the kernel died with panic() or not */
  729. uint32_t panic_magic; /* 0xe00 */
  730. uint8_t pad13[0x11b8-0xe04]; /* 0xe04 */
  731. /* 64 bit extparam used for pfault, diag 250 etc */
  732. uint64_t ext_params2; /* 0x11B8 */
  733. uint8_t pad14[0x1200-0x11C0]; /* 0x11C0 */
  734. /* System info area */
  735. uint64_t floating_pt_save_area[16]; /* 0x1200 */
  736. uint64_t gpregs_save_area[16]; /* 0x1280 */
  737. uint32_t st_status_fixed_logout[4]; /* 0x1300 */
  738. uint8_t pad15[0x1318-0x1310]; /* 0x1310 */
  739. uint32_t prefixreg_save_area; /* 0x1318 */
  740. uint32_t fpt_creg_save_area; /* 0x131c */
  741. uint8_t pad16[0x1324-0x1320]; /* 0x1320 */
  742. uint32_t tod_progreg_save_area; /* 0x1324 */
  743. uint32_t cpu_timer_save_area[2]; /* 0x1328 */
  744. uint32_t clock_comp_save_area[2]; /* 0x1330 */
  745. uint8_t pad17[0x1340-0x1338]; /* 0x1338 */
  746. uint32_t access_regs_save_area[16]; /* 0x1340 */
  747. uint64_t cregs_save_area[16]; /* 0x1380 */
  748. /* align to the top of the prefix area */
  749. uint8_t pad18[0x2000-0x1400]; /* 0x1400 */
  750. } QEMU_PACKED LowCore;
  751. /* STSI */
  752. #define STSI_LEVEL_MASK 0x00000000f0000000ULL
  753. #define STSI_LEVEL_CURRENT 0x0000000000000000ULL
  754. #define STSI_LEVEL_1 0x0000000010000000ULL
  755. #define STSI_LEVEL_2 0x0000000020000000ULL
  756. #define STSI_LEVEL_3 0x0000000030000000ULL
  757. #define STSI_R0_RESERVED_MASK 0x000000000fffff00ULL
  758. #define STSI_R0_SEL1_MASK 0x00000000000000ffULL
  759. #define STSI_R1_RESERVED_MASK 0x00000000ffff0000ULL
  760. #define STSI_R1_SEL2_MASK 0x000000000000ffffULL
  761. /* Basic Machine Configuration */
  762. struct sysib_111 {
  763. uint32_t res1[8];
  764. uint8_t manuf[16];
  765. uint8_t type[4];
  766. uint8_t res2[12];
  767. uint8_t model[16];
  768. uint8_t sequence[16];
  769. uint8_t plant[4];
  770. uint8_t res3[156];
  771. };
  772. /* Basic Machine CPU */
  773. struct sysib_121 {
  774. uint32_t res1[80];
  775. uint8_t sequence[16];
  776. uint8_t plant[4];
  777. uint8_t res2[2];
  778. uint16_t cpu_addr;
  779. uint8_t res3[152];
  780. };
  781. /* Basic Machine CPUs */
  782. struct sysib_122 {
  783. uint8_t res1[32];
  784. uint32_t capability;
  785. uint16_t total_cpus;
  786. uint16_t active_cpus;
  787. uint16_t standby_cpus;
  788. uint16_t reserved_cpus;
  789. uint16_t adjustments[2026];
  790. };
  791. /* LPAR CPU */
  792. struct sysib_221 {
  793. uint32_t res1[80];
  794. uint8_t sequence[16];
  795. uint8_t plant[4];
  796. uint16_t cpu_id;
  797. uint16_t cpu_addr;
  798. uint8_t res3[152];
  799. };
  800. /* LPAR CPUs */
  801. struct sysib_222 {
  802. uint32_t res1[32];
  803. uint16_t lpar_num;
  804. uint8_t res2;
  805. uint8_t lcpuc;
  806. uint16_t total_cpus;
  807. uint16_t conf_cpus;
  808. uint16_t standby_cpus;
  809. uint16_t reserved_cpus;
  810. uint8_t name[8];
  811. uint32_t caf;
  812. uint8_t res3[16];
  813. uint16_t dedicated_cpus;
  814. uint16_t shared_cpus;
  815. uint8_t res4[180];
  816. };
  817. /* VM CPUs */
  818. struct sysib_322 {
  819. uint8_t res1[31];
  820. uint8_t count;
  821. struct {
  822. uint8_t res2[4];
  823. uint16_t total_cpus;
  824. uint16_t conf_cpus;
  825. uint16_t standby_cpus;
  826. uint16_t reserved_cpus;
  827. uint8_t name[8];
  828. uint32_t caf;
  829. uint8_t cpi[16];
  830. uint8_t res5[3];
  831. uint8_t ext_name_encoding;
  832. uint32_t res3;
  833. uint8_t uuid[16];
  834. } vm[8];
  835. uint8_t res4[1504];
  836. uint8_t ext_names[8][256];
  837. };
  838. /* MMU defines */
  839. #define _ASCE_ORIGIN ~0xfffULL /* segment table origin */
  840. #define _ASCE_SUBSPACE 0x200 /* subspace group control */
  841. #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
  842. #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
  843. #define _ASCE_SPACE_SWITCH 0x40 /* space switch event */
  844. #define _ASCE_REAL_SPACE 0x20 /* real space control */
  845. #define _ASCE_TYPE_MASK 0x0c /* asce table type mask */
  846. #define _ASCE_TYPE_REGION1 0x0c /* region first table type */
  847. #define _ASCE_TYPE_REGION2 0x08 /* region second table type */
  848. #define _ASCE_TYPE_REGION3 0x04 /* region third table type */
  849. #define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */
  850. #define _ASCE_TABLE_LENGTH 0x03 /* region table length */
  851. #define _REGION_ENTRY_ORIGIN ~0xfffULL /* region/segment table origin */
  852. #define _REGION_ENTRY_RO 0x200 /* region/segment protection bit */
  853. #define _REGION_ENTRY_TF 0xc0 /* region/segment table offset */
  854. #define _REGION_ENTRY_INV 0x20 /* invalid region table entry */
  855. #define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */
  856. #define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
  857. #define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
  858. #define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */
  859. #define _REGION_ENTRY_LENGTH 0x03 /* region third length */
  860. #define _SEGMENT_ENTRY_ORIGIN ~0x7ffULL /* segment table origin */
  861. #define _SEGMENT_ENTRY_FC 0x400 /* format control */
  862. #define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */
  863. #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */
  864. #define _PAGE_RO 0x200 /* HW read-only bit */
  865. #define _PAGE_INVALID 0x400 /* HW invalid bit */
  866. #define _PAGE_RES0 0x800 /* bit must be zero */
  867. #define SK_C (0x1 << 1)
  868. #define SK_R (0x1 << 2)
  869. #define SK_F (0x1 << 3)
  870. #define SK_ACC_MASK (0xf << 4)
  871. /* SIGP order codes */
  872. #define SIGP_SENSE 0x01
  873. #define SIGP_EXTERNAL_CALL 0x02
  874. #define SIGP_EMERGENCY 0x03
  875. #define SIGP_START 0x04
  876. #define SIGP_STOP 0x05
  877. #define SIGP_RESTART 0x06
  878. #define SIGP_STOP_STORE_STATUS 0x09
  879. #define SIGP_INITIAL_CPU_RESET 0x0b
  880. #define SIGP_CPU_RESET 0x0c
  881. #define SIGP_SET_PREFIX 0x0d
  882. #define SIGP_STORE_STATUS_ADDR 0x0e
  883. #define SIGP_SET_ARCH 0x12
  884. #define SIGP_STORE_ADTL_STATUS 0x17
  885. /* SIGP condition codes */
  886. #define SIGP_CC_ORDER_CODE_ACCEPTED 0
  887. #define SIGP_CC_STATUS_STORED 1
  888. #define SIGP_CC_BUSY 2
  889. #define SIGP_CC_NOT_OPERATIONAL 3
  890. /* SIGP status bits */
  891. #define SIGP_STAT_EQUIPMENT_CHECK 0x80000000UL
  892. #define SIGP_STAT_INCORRECT_STATE 0x00000200UL
  893. #define SIGP_STAT_INVALID_PARAMETER 0x00000100UL
  894. #define SIGP_STAT_EXT_CALL_PENDING 0x00000080UL
  895. #define SIGP_STAT_STOPPED 0x00000040UL
  896. #define SIGP_STAT_OPERATOR_INTERV 0x00000020UL
  897. #define SIGP_STAT_CHECK_STOP 0x00000010UL
  898. #define SIGP_STAT_INOPERATIVE 0x00000004UL
  899. #define SIGP_STAT_INVALID_ORDER 0x00000002UL
  900. #define SIGP_STAT_RECEIVER_CHECK 0x00000001UL
  901. /* SIGP SET ARCHITECTURE modes */
  902. #define SIGP_MODE_ESA_S390 0
  903. #define SIGP_MODE_Z_ARCH_TRANS_ALL_PSW 1
  904. #define SIGP_MODE_Z_ARCH_TRANS_CUR_PSW 2
  905. void load_psw(CPUS390XState *env, uint64_t mask, uint64_t addr);
  906. int mmu_translate(CPUS390XState *env, target_ulong vaddr, int rw, uint64_t asc,
  907. target_ulong *raddr, int *flags, bool exc);
  908. int sclp_service_call(CPUS390XState *env, uint64_t sccb, uint32_t code);
  909. uint32_t calc_cc(CPUS390XState *env, uint32_t cc_op, uint64_t src, uint64_t dst,
  910. uint64_t vr);
  911. void s390_cpu_recompute_watchpoints(CPUState *cs);
  912. int s390_cpu_virt_mem_rw(S390CPU *cpu, vaddr laddr, uint8_t ar, void *hostbuf,
  913. int len, bool is_write);
  914. #define s390_cpu_virt_mem_read(cpu, laddr, ar, dest, len) \
  915. s390_cpu_virt_mem_rw(cpu, laddr, ar, dest, len, false)
  916. #define s390_cpu_virt_mem_write(cpu, laddr, ar, dest, len) \
  917. s390_cpu_virt_mem_rw(cpu, laddr, ar, dest, len, true)
  918. #define s390_cpu_virt_mem_check_write(cpu, laddr, ar, len) \
  919. s390_cpu_virt_mem_rw(cpu, laddr, ar, NULL, len, true)
  920. /* The value of the TOD clock for 1.1.1970. */
  921. #define TOD_UNIX_EPOCH 0x7d91048bca000000ULL
  922. /* Converts ns to s390's clock format */
  923. static inline uint64_t time2tod(uint64_t ns) {
  924. return (ns << 9) / 125;
  925. }
  926. /* Converts s390's clock format to ns */
  927. static inline uint64_t tod2time(uint64_t t) {
  928. return (t * 125) >> 9;
  929. }
  930. static inline void cpu_inject_ext(S390CPU *cpu, uint32_t code, uint32_t param,
  931. uint64_t param64)
  932. {
  933. CPUS390XState *env = &cpu->env;
  934. if (env->ext_index == MAX_EXT_QUEUE - 1) {
  935. /* ugh - can't queue anymore. Let's drop. */
  936. return;
  937. }
  938. env->ext_index++;
  939. assert(env->ext_index < MAX_EXT_QUEUE);
  940. env->ext_queue[env->ext_index].code = code;
  941. env->ext_queue[env->ext_index].param = param;
  942. env->ext_queue[env->ext_index].param64 = param64;
  943. env->pending_int |= INTERRUPT_EXT;
  944. cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
  945. }
  946. static inline void cpu_inject_io(S390CPU *cpu, uint16_t subchannel_id,
  947. uint16_t subchannel_number,
  948. uint32_t io_int_parm, uint32_t io_int_word)
  949. {
  950. CPUS390XState *env = &cpu->env;
  951. int isc = IO_INT_WORD_ISC(io_int_word);
  952. if (env->io_index[isc] == MAX_IO_QUEUE - 1) {
  953. /* ugh - can't queue anymore. Let's drop. */
  954. return;
  955. }
  956. env->io_index[isc]++;
  957. assert(env->io_index[isc] < MAX_IO_QUEUE);
  958. env->io_queue[env->io_index[isc]][isc].id = subchannel_id;
  959. env->io_queue[env->io_index[isc]][isc].nr = subchannel_number;
  960. env->io_queue[env->io_index[isc]][isc].parm = io_int_parm;
  961. env->io_queue[env->io_index[isc]][isc].word = io_int_word;
  962. env->pending_int |= INTERRUPT_IO;
  963. cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
  964. }
  965. static inline void cpu_inject_crw_mchk(S390CPU *cpu)
  966. {
  967. CPUS390XState *env = &cpu->env;
  968. if (env->mchk_index == MAX_MCHK_QUEUE - 1) {
  969. /* ugh - can't queue anymore. Let's drop. */
  970. return;
  971. }
  972. env->mchk_index++;
  973. assert(env->mchk_index < MAX_MCHK_QUEUE);
  974. env->mchk_queue[env->mchk_index].type = 1;
  975. env->pending_int |= INTERRUPT_MCHK;
  976. cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
  977. }
  978. /* from s390-virtio-ccw */
  979. #define MEM_SECTION_SIZE 0x10000000UL
  980. #define MAX_AVAIL_SLOTS 32
  981. /* fpu_helper.c */
  982. uint32_t set_cc_nz_f32(float32 v);
  983. uint32_t set_cc_nz_f64(float64 v);
  984. uint32_t set_cc_nz_f128(float128 v);
  985. /* misc_helper.c */
  986. #ifndef CONFIG_USER_ONLY
  987. int handle_diag_288(CPUS390XState *env, uint64_t r1, uint64_t r3);
  988. void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3);
  989. #endif
  990. void program_interrupt(CPUS390XState *env, uint32_t code, int ilen);
  991. void QEMU_NORETURN runtime_exception(CPUS390XState *env, int excp,
  992. uintptr_t retaddr);
  993. #ifdef CONFIG_KVM
  994. void kvm_s390_io_interrupt(uint16_t subchannel_id,
  995. uint16_t subchannel_nr, uint32_t io_int_parm,
  996. uint32_t io_int_word);
  997. void kvm_s390_crw_mchk(void);
  998. void kvm_s390_enable_css_support(S390CPU *cpu);
  999. int kvm_s390_assign_subch_ioeventfd(EventNotifier *notifier, uint32_t sch,
  1000. int vq, bool assign);
  1001. int kvm_s390_cpu_restart(S390CPU *cpu);
  1002. int kvm_s390_get_memslot_count(KVMState *s);
  1003. void kvm_s390_clear_cmma_callback(void *opaque);
  1004. int kvm_s390_set_cpu_state(S390CPU *cpu, uint8_t cpu_state);
  1005. void kvm_s390_reset_vcpu(S390CPU *cpu);
  1006. int kvm_s390_set_mem_limit(KVMState *s, uint64_t new_limit, uint64_t *hw_limit);
  1007. void kvm_s390_vcpu_interrupt_pre_save(S390CPU *cpu);
  1008. int kvm_s390_vcpu_interrupt_post_load(S390CPU *cpu);
  1009. #else
  1010. static inline void kvm_s390_io_interrupt(uint16_t subchannel_id,
  1011. uint16_t subchannel_nr,
  1012. uint32_t io_int_parm,
  1013. uint32_t io_int_word)
  1014. {
  1015. }
  1016. static inline void kvm_s390_crw_mchk(void)
  1017. {
  1018. }
  1019. static inline void kvm_s390_enable_css_support(S390CPU *cpu)
  1020. {
  1021. }
  1022. static inline int kvm_s390_assign_subch_ioeventfd(EventNotifier *notifier,
  1023. uint32_t sch, int vq,
  1024. bool assign)
  1025. {
  1026. return -ENOSYS;
  1027. }
  1028. static inline int kvm_s390_cpu_restart(S390CPU *cpu)
  1029. {
  1030. return -ENOSYS;
  1031. }
  1032. static inline void kvm_s390_clear_cmma_callback(void *opaque)
  1033. {
  1034. }
  1035. static inline int kvm_s390_get_memslot_count(KVMState *s)
  1036. {
  1037. return MAX_AVAIL_SLOTS;
  1038. }
  1039. static inline int kvm_s390_set_cpu_state(S390CPU *cpu, uint8_t cpu_state)
  1040. {
  1041. return -ENOSYS;
  1042. }
  1043. static inline void kvm_s390_reset_vcpu(S390CPU *cpu)
  1044. {
  1045. }
  1046. static inline int kvm_s390_set_mem_limit(KVMState *s, uint64_t new_limit,
  1047. uint64_t *hw_limit)
  1048. {
  1049. return 0;
  1050. }
  1051. static inline void kvm_s390_vcpu_interrupt_pre_save(S390CPU *cpu)
  1052. {
  1053. }
  1054. static inline int kvm_s390_vcpu_interrupt_post_load(S390CPU *cpu)
  1055. {
  1056. return 0;
  1057. }
  1058. #endif
  1059. static inline int s390_set_memory_limit(uint64_t new_limit, uint64_t *hw_limit)
  1060. {
  1061. if (kvm_enabled()) {
  1062. return kvm_s390_set_mem_limit(kvm_state, new_limit, hw_limit);
  1063. }
  1064. return 0;
  1065. }
  1066. static inline void cmma_reset(S390CPU *cpu)
  1067. {
  1068. if (kvm_enabled()) {
  1069. CPUState *cs = CPU(cpu);
  1070. kvm_s390_clear_cmma_callback(cs->kvm_state);
  1071. }
  1072. }
  1073. static inline int s390_cpu_restart(S390CPU *cpu)
  1074. {
  1075. if (kvm_enabled()) {
  1076. return kvm_s390_cpu_restart(cpu);
  1077. }
  1078. return -ENOSYS;
  1079. }
  1080. static inline int s390_get_memslot_count(KVMState *s)
  1081. {
  1082. if (kvm_enabled()) {
  1083. return kvm_s390_get_memslot_count(s);
  1084. } else {
  1085. return MAX_AVAIL_SLOTS;
  1086. }
  1087. }
  1088. void s390_io_interrupt(uint16_t subchannel_id, uint16_t subchannel_nr,
  1089. uint32_t io_int_parm, uint32_t io_int_word);
  1090. void s390_crw_mchk(void);
  1091. static inline int s390_assign_subch_ioeventfd(EventNotifier *notifier,
  1092. uint32_t sch_id, int vq,
  1093. bool assign)
  1094. {
  1095. return kvm_s390_assign_subch_ioeventfd(notifier, sch_id, vq, assign);
  1096. }
  1097. #ifdef CONFIG_KVM
  1098. static inline bool vregs_needed(void *opaque)
  1099. {
  1100. if (kvm_enabled()) {
  1101. return kvm_check_extension(kvm_state, KVM_CAP_S390_VECTOR_REGISTERS);
  1102. }
  1103. return 0;
  1104. }
  1105. #else
  1106. static inline bool vregs_needed(void *opaque)
  1107. {
  1108. return 0;
  1109. }
  1110. #endif
  1111. #endif