memory.h 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746
  1. /* SPDX-License-Identifier: MIT */
  2. /******************************************************************************
  3. * memory.h
  4. *
  5. * Memory reservation and information.
  6. *
  7. * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
  8. */
  9. #ifndef __XEN_PUBLIC_MEMORY_H__
  10. #define __XEN_PUBLIC_MEMORY_H__
  11. #include "xen.h"
  12. #include "physdev.h"
  13. /*
  14. * Increase or decrease the specified domain's memory reservation. Returns the
  15. * number of extents successfully allocated or freed.
  16. * arg == addr of struct xen_memory_reservation.
  17. */
  18. #define XENMEM_increase_reservation 0
  19. #define XENMEM_decrease_reservation 1
  20. #define XENMEM_populate_physmap 6
  21. #if __XEN_INTERFACE_VERSION__ >= 0x00030209
  22. /*
  23. * Maximum # bits addressable by the user of the allocated region (e.g., I/O
  24. * devices often have a 32-bit limitation even in 64-bit systems). If zero
  25. * then the user has no addressing restriction. This field is not used by
  26. * XENMEM_decrease_reservation.
  27. */
  28. #define XENMEMF_address_bits(x) (x)
  29. #define XENMEMF_get_address_bits(x) ((x) & 0xffu)
  30. /* NUMA node to allocate from. */
  31. #define XENMEMF_node(x) (((x) + 1) << 8)
  32. #define XENMEMF_get_node(x) ((((x) >> 8) - 1) & 0xffu)
  33. /* Flag to populate physmap with populate-on-demand entries */
  34. #define XENMEMF_populate_on_demand (1<<16)
  35. /* Flag to request allocation only from the node specified */
  36. #define XENMEMF_exact_node_request (1<<17)
  37. #define XENMEMF_exact_node(n) (XENMEMF_node(n) | XENMEMF_exact_node_request)
  38. /* Flag to indicate the node specified is virtual node */
  39. #define XENMEMF_vnode (1<<18)
  40. #endif
  41. struct xen_memory_reservation {
  42. /*
  43. * XENMEM_increase_reservation:
  44. * OUT: MFN (*not* GMFN) bases of extents that were allocated
  45. * XENMEM_decrease_reservation:
  46. * IN: GMFN bases of extents to free
  47. * XENMEM_populate_physmap:
  48. * IN: GPFN bases of extents to populate with memory
  49. * OUT: GMFN bases of extents that were allocated
  50. * (NB. This command also updates the mach_to_phys translation table)
  51. * XENMEM_claim_pages:
  52. * IN: must be zero
  53. */
  54. XEN_GUEST_HANDLE(xen_pfn_t) extent_start;
  55. /* Number of extents, and size/alignment of each (2^extent_order pages). */
  56. xen_ulong_t nr_extents;
  57. unsigned int extent_order;
  58. #if __XEN_INTERFACE_VERSION__ >= 0x00030209
  59. /* XENMEMF flags. */
  60. unsigned int mem_flags;
  61. #else
  62. unsigned int address_bits;
  63. #endif
  64. /*
  65. * Domain whose reservation is being changed.
  66. * Unprivileged domains can specify only DOMID_SELF.
  67. */
  68. domid_t domid;
  69. };
  70. typedef struct xen_memory_reservation xen_memory_reservation_t;
  71. DEFINE_XEN_GUEST_HANDLE(xen_memory_reservation_t);
  72. /*
  73. * An atomic exchange of memory pages. If return code is zero then
  74. * @out.extent_list provides GMFNs of the newly-allocated memory.
  75. * Returns zero on complete success, otherwise a negative error code.
  76. * On complete success then always @nr_exchanged == @in.nr_extents.
  77. * On partial success @nr_exchanged indicates how much work was done.
  78. *
  79. * Note that only PV guests can use this operation.
  80. */
  81. #define XENMEM_exchange 11
  82. struct xen_memory_exchange {
  83. /*
  84. * [IN] Details of memory extents to be exchanged (GMFN bases).
  85. * Note that @in.address_bits is ignored and unused.
  86. */
  87. struct xen_memory_reservation in;
  88. /*
  89. * [IN/OUT] Details of new memory extents.
  90. * We require that:
  91. * 1. @in.domid == @out.domid
  92. * 2. @in.nr_extents << @in.extent_order ==
  93. * @out.nr_extents << @out.extent_order
  94. * 3. @in.extent_start and @out.extent_start lists must not overlap
  95. * 4. @out.extent_start lists GPFN bases to be populated
  96. * 5. @out.extent_start is overwritten with allocated GMFN bases
  97. */
  98. struct xen_memory_reservation out;
  99. /*
  100. * [OUT] Number of input extents that were successfully exchanged:
  101. * 1. The first @nr_exchanged input extents were successfully
  102. * deallocated.
  103. * 2. The corresponding first entries in the output extent list correctly
  104. * indicate the GMFNs that were successfully exchanged.
  105. * 3. All other input and output extents are untouched.
  106. * 4. If not all input exents are exchanged then the return code of this
  107. * command will be non-zero.
  108. * 5. THIS FIELD MUST BE INITIALISED TO ZERO BY THE CALLER!
  109. */
  110. xen_ulong_t nr_exchanged;
  111. };
  112. typedef struct xen_memory_exchange xen_memory_exchange_t;
  113. DEFINE_XEN_GUEST_HANDLE(xen_memory_exchange_t);
  114. /*
  115. * Returns the maximum machine frame number of mapped RAM in this system.
  116. * This command always succeeds (it never returns an error code).
  117. * arg == NULL.
  118. */
  119. #define XENMEM_maximum_ram_page 2
  120. struct xen_memory_domain {
  121. /* [IN] Domain information is being queried for. */
  122. domid_t domid;
  123. };
  124. /*
  125. * Returns the current or maximum memory reservation, in pages, of the
  126. * specified domain (may be DOMID_SELF). Returns -ve errcode on failure.
  127. * arg == addr of struct xen_memory_domain.
  128. */
  129. #define XENMEM_current_reservation 3
  130. #define XENMEM_maximum_reservation 4
  131. /*
  132. * Returns the maximum GFN in use by the specified domain (may be DOMID_SELF).
  133. * Returns -ve errcode on failure.
  134. * arg == addr of struct xen_memory_domain.
  135. */
  136. #define XENMEM_maximum_gpfn 14
  137. /*
  138. * Returns a list of MFN bases of 2MB extents comprising the machine_to_phys
  139. * mapping table. Architectures which do not have a m2p table do not implement
  140. * this command.
  141. * arg == addr of xen_machphys_mfn_list_t.
  142. */
  143. #define XENMEM_machphys_mfn_list 5
  144. struct xen_machphys_mfn_list {
  145. /*
  146. * Size of the 'extent_start' array. Fewer entries will be filled if the
  147. * machphys table is smaller than max_extents * 2MB.
  148. */
  149. unsigned int max_extents;
  150. /*
  151. * Pointer to buffer to fill with list of extent starts. If there are
  152. * any large discontiguities in the machine address space, 2MB gaps in
  153. * the machphys table will be represented by an MFN base of zero.
  154. */
  155. XEN_GUEST_HANDLE(xen_pfn_t) extent_start;
  156. /*
  157. * Number of extents written to the above array. This will be smaller
  158. * than 'max_extents' if the machphys table is smaller than max_e * 2MB.
  159. */
  160. unsigned int nr_extents;
  161. };
  162. typedef struct xen_machphys_mfn_list xen_machphys_mfn_list_t;
  163. DEFINE_XEN_GUEST_HANDLE(xen_machphys_mfn_list_t);
  164. /*
  165. * For a compat caller, this is identical to XENMEM_machphys_mfn_list.
  166. *
  167. * For a non compat caller, this functions similarly to
  168. * XENMEM_machphys_mfn_list, but returns the mfns making up the compatibility
  169. * m2p table.
  170. */
  171. #define XENMEM_machphys_compat_mfn_list 25
  172. /*
  173. * Returns the location in virtual address space of the machine_to_phys
  174. * mapping table. Architectures which do not have a m2p table, or which do not
  175. * map it by default into guest address space, do not implement this command.
  176. * arg == addr of xen_machphys_mapping_t.
  177. */
  178. #define XENMEM_machphys_mapping 12
  179. struct xen_machphys_mapping {
  180. xen_ulong_t v_start, v_end; /* Start and end virtual addresses. */
  181. xen_ulong_t max_mfn; /* Maximum MFN that can be looked up. */
  182. };
  183. typedef struct xen_machphys_mapping xen_machphys_mapping_t;
  184. DEFINE_XEN_GUEST_HANDLE(xen_machphys_mapping_t);
  185. /* Source mapping space. */
  186. /* ` enum phys_map_space { */
  187. #define XENMAPSPACE_shared_info 0 /* shared info page */
  188. #define XENMAPSPACE_grant_table 1 /* grant table page */
  189. #define XENMAPSPACE_gmfn 2 /* GMFN */
  190. #define XENMAPSPACE_gmfn_range 3 /* GMFN range, XENMEM_add_to_physmap only. */
  191. #define XENMAPSPACE_gmfn_foreign 4 /* GMFN from another dom,
  192. * XENMEM_add_to_physmap_batch only. */
  193. #define XENMAPSPACE_dev_mmio 5 /* device mmio region
  194. ARM only; the region is mapped in
  195. Stage-2 using the Normal Memory
  196. Inner/Outer Write-Back Cacheable
  197. memory attribute. */
  198. /* ` } */
  199. /*
  200. * Sets the GPFN at which a particular page appears in the specified guest's
  201. * physical address space (translated guests only).
  202. * arg == addr of xen_add_to_physmap_t.
  203. */
  204. #define XENMEM_add_to_physmap 7
  205. struct xen_add_to_physmap {
  206. /* Which domain to change the mapping for. */
  207. domid_t domid;
  208. /* Number of pages to go through for gmfn_range */
  209. uint16_t size;
  210. unsigned int space; /* => enum phys_map_space */
  211. #define XENMAPIDX_grant_table_status 0x80000000
  212. /* Index into space being mapped. */
  213. xen_ulong_t idx;
  214. /* GPFN in domid where the source mapping page should appear. */
  215. xen_pfn_t gpfn;
  216. };
  217. typedef struct xen_add_to_physmap xen_add_to_physmap_t;
  218. DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_t);
  219. /* A batched version of add_to_physmap. */
  220. #define XENMEM_add_to_physmap_batch 23
  221. struct xen_add_to_physmap_batch {
  222. /* IN */
  223. /* Which domain to change the mapping for. */
  224. domid_t domid;
  225. uint16_t space; /* => enum phys_map_space */
  226. /* Number of pages to go through */
  227. uint16_t size;
  228. #if __XEN_INTERFACE_VERSION__ < 0x00040700
  229. domid_t foreign_domid; /* IFF gmfn_foreign. Should be 0 for other spaces. */
  230. #else
  231. union xen_add_to_physmap_batch_extra {
  232. domid_t foreign_domid; /* gmfn_foreign */
  233. uint16_t res0; /* All the other spaces. Should be 0 */
  234. } u;
  235. #endif
  236. /* Indexes into space being mapped. */
  237. XEN_GUEST_HANDLE(xen_ulong_t) idxs;
  238. /* GPFN in domid where the source mapping page should appear. */
  239. XEN_GUEST_HANDLE(xen_pfn_t) gpfns;
  240. /* OUT */
  241. /* Per index error code. */
  242. XEN_GUEST_HANDLE(int) errs;
  243. };
  244. typedef struct xen_add_to_physmap_batch xen_add_to_physmap_batch_t;
  245. DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_batch_t);
  246. #if __XEN_INTERFACE_VERSION__ < 0x00040400
  247. #define XENMEM_add_to_physmap_range XENMEM_add_to_physmap_batch
  248. #define xen_add_to_physmap_range xen_add_to_physmap_batch
  249. typedef struct xen_add_to_physmap_batch xen_add_to_physmap_range_t;
  250. DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_range_t);
  251. #endif
  252. /*
  253. * Unmaps the page appearing at a particular GPFN from the specified guest's
  254. * physical address space (translated guests only).
  255. * arg == addr of xen_remove_from_physmap_t.
  256. */
  257. #define XENMEM_remove_from_physmap 15
  258. struct xen_remove_from_physmap {
  259. /* Which domain to change the mapping for. */
  260. domid_t domid;
  261. /* GPFN of the current mapping of the page. */
  262. xen_pfn_t gpfn;
  263. };
  264. typedef struct xen_remove_from_physmap xen_remove_from_physmap_t;
  265. DEFINE_XEN_GUEST_HANDLE(xen_remove_from_physmap_t);
  266. /*** REMOVED ***/
  267. /*#define XENMEM_translate_gpfn_list 8*/
  268. /*
  269. * Returns the pseudo-physical memory map as it was when the domain
  270. * was started (specified by XENMEM_set_memory_map).
  271. * arg == addr of xen_memory_map_t.
  272. */
  273. #define XENMEM_memory_map 9
  274. struct xen_memory_map {
  275. /*
  276. * On call the number of entries which can be stored in buffer. On
  277. * return the number of entries which have been stored in
  278. * buffer.
  279. */
  280. unsigned int nr_entries;
  281. /*
  282. * Entries in the buffer are in the same format as returned by the
  283. * BIOS INT 0x15 EAX=0xE820 call.
  284. */
  285. XEN_GUEST_HANDLE(void) buffer;
  286. };
  287. typedef struct xen_memory_map xen_memory_map_t;
  288. DEFINE_XEN_GUEST_HANDLE(xen_memory_map_t);
  289. /*
  290. * Returns the real physical memory map. Passes the same structure as
  291. * XENMEM_memory_map.
  292. * Specifying buffer as NULL will return the number of entries required
  293. * to store the complete memory map.
  294. * arg == addr of xen_memory_map_t.
  295. */
  296. #define XENMEM_machine_memory_map 10
  297. /*
  298. * Set the pseudo-physical memory map of a domain, as returned by
  299. * XENMEM_memory_map.
  300. * arg == addr of xen_foreign_memory_map_t.
  301. */
  302. #define XENMEM_set_memory_map 13
  303. struct xen_foreign_memory_map {
  304. domid_t domid;
  305. struct xen_memory_map map;
  306. };
  307. typedef struct xen_foreign_memory_map xen_foreign_memory_map_t;
  308. DEFINE_XEN_GUEST_HANDLE(xen_foreign_memory_map_t);
  309. #define XENMEM_set_pod_target 16
  310. #define XENMEM_get_pod_target 17
  311. struct xen_pod_target {
  312. /* IN */
  313. uint64_t target_pages;
  314. /* OUT */
  315. uint64_t tot_pages;
  316. uint64_t pod_cache_pages;
  317. uint64_t pod_entries;
  318. /* IN */
  319. domid_t domid;
  320. };
  321. typedef struct xen_pod_target xen_pod_target_t;
  322. #if defined(__XEN__) || defined(__XEN_TOOLS__)
  323. #ifndef uint64_aligned_t
  324. #define uint64_aligned_t uint64_t
  325. #endif
  326. /*
  327. * Get the number of MFNs saved through memory sharing.
  328. * The call never fails.
  329. */
  330. #define XENMEM_get_sharing_freed_pages 18
  331. #define XENMEM_get_sharing_shared_pages 19
  332. #define XENMEM_paging_op 20
  333. #define XENMEM_paging_op_nominate 0
  334. #define XENMEM_paging_op_evict 1
  335. #define XENMEM_paging_op_prep 2
  336. struct xen_mem_paging_op {
  337. uint8_t op; /* XENMEM_paging_op_* */
  338. domid_t domain;
  339. /* IN: (XENMEM_paging_op_prep) buffer to immediately fill page from */
  340. XEN_GUEST_HANDLE_64(const_uint8) buffer;
  341. /* IN: gfn of page being operated on */
  342. uint64_aligned_t gfn;
  343. };
  344. typedef struct xen_mem_paging_op xen_mem_paging_op_t;
  345. DEFINE_XEN_GUEST_HANDLE(xen_mem_paging_op_t);
  346. #define XENMEM_access_op 21
  347. #define XENMEM_access_op_set_access 0
  348. #define XENMEM_access_op_get_access 1
  349. /*
  350. * XENMEM_access_op_enable_emulate and XENMEM_access_op_disable_emulate are
  351. * currently unused, but since they have been in use please do not reuse them.
  352. *
  353. * #define XENMEM_access_op_enable_emulate 2
  354. * #define XENMEM_access_op_disable_emulate 3
  355. */
  356. #define XENMEM_access_op_set_access_multi 4
  357. typedef enum {
  358. XENMEM_access_n,
  359. XENMEM_access_r,
  360. XENMEM_access_w,
  361. XENMEM_access_rw,
  362. XENMEM_access_x,
  363. XENMEM_access_rx,
  364. XENMEM_access_wx,
  365. XENMEM_access_rwx,
  366. /*
  367. * Page starts off as r-x, but automatically
  368. * change to r-w on a write
  369. */
  370. XENMEM_access_rx2rw,
  371. /*
  372. * Log access: starts off as n, automatically
  373. * goes to rwx, generating an event without
  374. * pausing the vcpu
  375. */
  376. XENMEM_access_n2rwx,
  377. /* Take the domain default */
  378. XENMEM_access_default
  379. } xenmem_access_t;
  380. struct xen_mem_access_op {
  381. /* XENMEM_access_op_* */
  382. uint8_t op;
  383. /* xenmem_access_t */
  384. uint8_t access;
  385. domid_t domid;
  386. /*
  387. * Number of pages for set op (or size of pfn_list for
  388. * XENMEM_access_op_set_access_multi)
  389. * Ignored on setting default access and other ops
  390. */
  391. uint32_t nr;
  392. /*
  393. * First pfn for set op
  394. * pfn for get op
  395. * ~0ull is used to set and get the default access for pages
  396. */
  397. uint64_aligned_t pfn;
  398. /*
  399. * List of pfns to set access for
  400. * Used only with XENMEM_access_op_set_access_multi
  401. */
  402. XEN_GUEST_HANDLE(const_uint64) pfn_list;
  403. /*
  404. * Corresponding list of access settings for pfn_list
  405. * Used only with XENMEM_access_op_set_access_multi
  406. */
  407. XEN_GUEST_HANDLE(const_uint8) access_list;
  408. };
  409. typedef struct xen_mem_access_op xen_mem_access_op_t;
  410. DEFINE_XEN_GUEST_HANDLE(xen_mem_access_op_t);
  411. #define XENMEM_sharing_op 22
  412. #define XENMEM_sharing_op_nominate_gfn 0
  413. #define XENMEM_sharing_op_nominate_gref 1
  414. #define XENMEM_sharing_op_share 2
  415. #define XENMEM_sharing_op_debug_gfn 3
  416. #define XENMEM_sharing_op_debug_mfn 4
  417. #define XENMEM_sharing_op_debug_gref 5
  418. #define XENMEM_sharing_op_add_physmap 6
  419. #define XENMEM_sharing_op_audit 7
  420. #define XENMEM_sharing_op_range_share 8
  421. #define XENMEM_sharing_op_fork 9
  422. #define XENMEM_sharing_op_fork_reset 10
  423. #define XENMEM_SHARING_OP_S_HANDLE_INVALID (-10)
  424. #define XENMEM_SHARING_OP_C_HANDLE_INVALID (-9)
  425. /* The following allows sharing of grant refs. This is useful
  426. * for sharing utilities sitting as "filters" in IO backends
  427. * (e.g. memshr + blktap(2)). The IO backend is only exposed
  428. * to grant references, and this allows sharing of the grefs */
  429. #define XENMEM_SHARING_OP_FIELD_IS_GREF_FLAG (xen_mk_ullong(1) << 62)
  430. #define XENMEM_SHARING_OP_FIELD_MAKE_GREF(field, val) \
  431. (field) = (XENMEM_SHARING_OP_FIELD_IS_GREF_FLAG | val)
  432. #define XENMEM_SHARING_OP_FIELD_IS_GREF(field) \
  433. ((field) & XENMEM_SHARING_OP_FIELD_IS_GREF_FLAG)
  434. #define XENMEM_SHARING_OP_FIELD_GET_GREF(field) \
  435. ((field) & (~XENMEM_SHARING_OP_FIELD_IS_GREF_FLAG))
  436. struct xen_mem_sharing_op {
  437. uint8_t op; /* XENMEM_sharing_op_* */
  438. domid_t domain;
  439. union {
  440. struct mem_sharing_op_nominate { /* OP_NOMINATE_xxx */
  441. union {
  442. uint64_aligned_t gfn; /* IN: gfn to nominate */
  443. uint32_t grant_ref; /* IN: grant ref to nominate */
  444. } u;
  445. uint64_aligned_t handle; /* OUT: the handle */
  446. } nominate;
  447. struct mem_sharing_op_share { /* OP_SHARE/ADD_PHYSMAP */
  448. uint64_aligned_t source_gfn; /* IN: the gfn of the source page */
  449. uint64_aligned_t source_handle; /* IN: handle to the source page */
  450. uint64_aligned_t client_gfn; /* IN: the client gfn */
  451. uint64_aligned_t client_handle; /* IN: handle to the client page */
  452. domid_t client_domain; /* IN: the client domain id */
  453. } share;
  454. struct mem_sharing_op_range { /* OP_RANGE_SHARE */
  455. uint64_aligned_t first_gfn; /* IN: the first gfn */
  456. uint64_aligned_t last_gfn; /* IN: the last gfn */
  457. uint64_aligned_t opaque; /* Must be set to 0 */
  458. domid_t client_domain; /* IN: the client domain id */
  459. uint16_t _pad[3]; /* Must be set to 0 */
  460. } range;
  461. struct mem_sharing_op_debug { /* OP_DEBUG_xxx */
  462. union {
  463. uint64_aligned_t gfn; /* IN: gfn to debug */
  464. uint64_aligned_t mfn; /* IN: mfn to debug */
  465. uint32_t gref; /* IN: gref to debug */
  466. } u;
  467. } debug;
  468. struct mem_sharing_op_fork { /* OP_FORK{,_RESET} */
  469. domid_t parent_domain; /* IN: parent's domain id */
  470. /* Only makes sense for short-lived forks */
  471. #define XENMEM_FORK_WITH_IOMMU_ALLOWED (1u << 0)
  472. /* Only makes sense for short-lived forks */
  473. #define XENMEM_FORK_BLOCK_INTERRUPTS (1u << 1)
  474. #define XENMEM_FORK_RESET_STATE (1u << 2)
  475. #define XENMEM_FORK_RESET_MEMORY (1u << 3)
  476. uint16_t flags; /* IN: optional settings */
  477. uint32_t pad; /* Must be set to 0 */
  478. } fork;
  479. } u;
  480. };
  481. typedef struct xen_mem_sharing_op xen_mem_sharing_op_t;
  482. DEFINE_XEN_GUEST_HANDLE(xen_mem_sharing_op_t);
  483. /*
  484. * Attempt to stake a claim for a domain on a quantity of pages
  485. * of system RAM, but _not_ assign specific pageframes. Only
  486. * arithmetic is performed so the hypercall is very fast and need
  487. * not be preemptible, thus sidestepping time-of-check-time-of-use
  488. * races for memory allocation. Returns 0 if the hypervisor page
  489. * allocator has atomically and successfully claimed the requested
  490. * number of pages, else non-zero.
  491. *
  492. * Any domain may have only one active claim. When sufficient memory
  493. * has been allocated to resolve the claim, the claim silently expires.
  494. * Claiming zero pages effectively resets any outstanding claim and
  495. * is always successful.
  496. *
  497. * Note that a valid claim may be staked even after memory has been
  498. * allocated for a domain. In this case, the claim is not incremental,
  499. * i.e. if the domain's total page count is 3, and a claim is staked
  500. * for 10, only 7 additional pages are claimed.
  501. *
  502. * Caller must be privileged or the hypercall fails.
  503. */
  504. #define XENMEM_claim_pages 24
  505. /*
  506. * XENMEM_claim_pages flags - the are no flags at this time.
  507. * The zero value is appropriate.
  508. */
  509. /*
  510. * With some legacy devices, certain guest-physical addresses cannot safely
  511. * be used for other purposes, e.g. to map guest RAM. This hypercall
  512. * enumerates those regions so the toolstack can avoid using them.
  513. */
  514. #define XENMEM_reserved_device_memory_map 27
  515. struct xen_reserved_device_memory {
  516. xen_pfn_t start_pfn;
  517. xen_ulong_t nr_pages;
  518. };
  519. typedef struct xen_reserved_device_memory xen_reserved_device_memory_t;
  520. DEFINE_XEN_GUEST_HANDLE(xen_reserved_device_memory_t);
  521. struct xen_reserved_device_memory_map {
  522. #define XENMEM_RDM_ALL 1 /* Request all regions (ignore dev union). */
  523. /* IN */
  524. uint32_t flags;
  525. /*
  526. * IN/OUT
  527. *
  528. * Gets set to the required number of entries when too low,
  529. * signaled by error code -ERANGE.
  530. */
  531. unsigned int nr_entries;
  532. /* OUT */
  533. XEN_GUEST_HANDLE(xen_reserved_device_memory_t) buffer;
  534. /* IN */
  535. union {
  536. physdev_pci_device_t pci;
  537. } dev;
  538. };
  539. typedef struct xen_reserved_device_memory_map xen_reserved_device_memory_map_t;
  540. DEFINE_XEN_GUEST_HANDLE(xen_reserved_device_memory_map_t);
  541. #endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
  542. /*
  543. * Get the pages for a particular guest resource, so that they can be
  544. * mapped directly by a tools domain.
  545. */
  546. #define XENMEM_acquire_resource 28
  547. struct xen_mem_acquire_resource {
  548. /* IN - The domain whose resource is to be mapped */
  549. domid_t domid;
  550. /* IN - the type of resource */
  551. uint16_t type;
  552. #define XENMEM_resource_ioreq_server 0
  553. #define XENMEM_resource_grant_table 1
  554. #define XENMEM_resource_vmtrace_buf 2
  555. /*
  556. * IN - a type-specific resource identifier, which must be zero
  557. * unless stated otherwise.
  558. *
  559. * type == XENMEM_resource_ioreq_server -> id == ioreq server id
  560. * type == XENMEM_resource_grant_table -> id defined below
  561. */
  562. uint32_t id;
  563. #define XENMEM_resource_grant_table_id_shared 0
  564. #define XENMEM_resource_grant_table_id_status 1
  565. /*
  566. * IN/OUT
  567. *
  568. * As an IN parameter number of frames of the resource to be mapped.
  569. * This value may be updated over the course of the operation.
  570. *
  571. * When frame_list is NULL and nr_frames is 0, this is interpreted as a
  572. * request for the size of the resource, which shall be returned in the
  573. * nr_frames field.
  574. *
  575. * The size of a resource will never be zero, but a nonzero result doesn't
  576. * guarantee that a subsequent mapping request will be successful. There
  577. * are further type/id specific constraints which may change between the
  578. * two calls.
  579. */
  580. uint32_t nr_frames;
  581. /*
  582. * Padding field, must be zero on input.
  583. * In a previous version this was an output field with the lowest bit
  584. * named XENMEM_rsrc_acq_caller_owned. Future versions of this interface
  585. * will not reuse this bit as an output with the field being zero on
  586. * input.
  587. */
  588. uint32_t pad;
  589. /*
  590. * IN - the index of the initial frame to be mapped. This parameter
  591. * is ignored if nr_frames is 0. This value may be updated
  592. * over the course of the operation.
  593. */
  594. uint64_t frame;
  595. #define XENMEM_resource_ioreq_server_frame_bufioreq 0
  596. #define XENMEM_resource_ioreq_server_frame_ioreq(n) (1 + (n))
  597. /*
  598. * IN/OUT - If the tools domain is PV then, upon return, frame_list
  599. * will be populated with the MFNs of the resource.
  600. * If the tools domain is HVM then it is expected that, on
  601. * entry, frame_list will be populated with a list of GFNs
  602. * that will be mapped to the MFNs of the resource.
  603. * If -EIO is returned then the frame_list has only been
  604. * partially mapped and it is up to the caller to unmap all
  605. * the GFNs.
  606. * This parameter may be NULL if nr_frames is 0. This
  607. * value may be updated over the course of the operation.
  608. */
  609. XEN_GUEST_HANDLE(xen_pfn_t) frame_list;
  610. };
  611. typedef struct xen_mem_acquire_resource xen_mem_acquire_resource_t;
  612. DEFINE_XEN_GUEST_HANDLE(xen_mem_acquire_resource_t);
  613. /*
  614. * XENMEM_get_vnumainfo used by guest to get
  615. * vNUMA topology from hypervisor.
  616. */
  617. #define XENMEM_get_vnumainfo 26
  618. /* vNUMA node memory ranges */
  619. struct xen_vmemrange {
  620. uint64_t start, end;
  621. unsigned int flags;
  622. unsigned int nid;
  623. };
  624. typedef struct xen_vmemrange xen_vmemrange_t;
  625. DEFINE_XEN_GUEST_HANDLE(xen_vmemrange_t);
  626. /*
  627. * vNUMA topology specifies vNUMA node number, distance table,
  628. * memory ranges and vcpu mapping provided for guests.
  629. * XENMEM_get_vnumainfo hypercall expects to see from guest
  630. * nr_vnodes, nr_vmemranges and nr_vcpus to indicate available memory.
  631. * After filling guests structures, nr_vnodes, nr_vmemranges and nr_vcpus
  632. * copied back to guest. Domain returns expected values of nr_vnodes,
  633. * nr_vmemranges and nr_vcpus to guest if the values where incorrect.
  634. */
  635. struct xen_vnuma_topology_info {
  636. /* IN */
  637. domid_t domid;
  638. uint16_t pad;
  639. /* IN/OUT */
  640. unsigned int nr_vnodes;
  641. unsigned int nr_vcpus;
  642. unsigned int nr_vmemranges;
  643. /* OUT */
  644. union {
  645. XEN_GUEST_HANDLE(uint) h;
  646. uint64_t pad;
  647. } vdistance;
  648. union {
  649. XEN_GUEST_HANDLE(uint) h;
  650. uint64_t pad;
  651. } vcpu_to_vnode;
  652. union {
  653. XEN_GUEST_HANDLE(xen_vmemrange_t) h;
  654. uint64_t pad;
  655. } vmemrange;
  656. };
  657. typedef struct xen_vnuma_topology_info xen_vnuma_topology_info_t;
  658. DEFINE_XEN_GUEST_HANDLE(xen_vnuma_topology_info_t);
  659. /* Next available subop number is 29 */
  660. #endif /* __XEN_PUBLIC_MEMORY_H__ */
  661. /*
  662. * Local variables:
  663. * mode: C
  664. * c-file-style: "BSD"
  665. * c-basic-offset: 4
  666. * tab-width: 4
  667. * indent-tabs-mode: nil
  668. * End:
  669. */