mmap.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792
  1. /*
  2. * mmap support for qemu
  3. *
  4. * Copyright (c) 2003 - 2008 Fabrice Bellard
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #include "qemu/osdep.h"
  20. #include "exec/page-protection.h"
  21. #include "qemu.h"
  22. static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER;
  23. static __thread int mmap_lock_count;
  24. void mmap_lock(void)
  25. {
  26. if (mmap_lock_count++ == 0) {
  27. pthread_mutex_lock(&mmap_mutex);
  28. }
  29. }
  30. void mmap_unlock(void)
  31. {
  32. assert(mmap_lock_count > 0);
  33. if (--mmap_lock_count == 0) {
  34. pthread_mutex_unlock(&mmap_mutex);
  35. }
  36. }
  37. bool have_mmap_lock(void)
  38. {
  39. return mmap_lock_count > 0 ? true : false;
  40. }
  41. /* Grab lock to make sure things are in a consistent state after fork(). */
  42. void mmap_fork_start(void)
  43. {
  44. if (mmap_lock_count)
  45. abort();
  46. pthread_mutex_lock(&mmap_mutex);
  47. }
  48. void mmap_fork_end(int child)
  49. {
  50. if (child)
  51. pthread_mutex_init(&mmap_mutex, NULL);
  52. else
  53. pthread_mutex_unlock(&mmap_mutex);
  54. }
  55. /* NOTE: all the constants are the HOST ones, but addresses are target. */
  56. int target_mprotect(abi_ulong start, abi_ulong len, int prot)
  57. {
  58. abi_ulong end, host_start, host_end, addr;
  59. int prot1, ret;
  60. qemu_log_mask(CPU_LOG_PAGE, "mprotect: start=0x" TARGET_ABI_FMT_lx
  61. " len=0x" TARGET_ABI_FMT_lx " prot=%c%c%c\n", start, len,
  62. prot & PROT_READ ? 'r' : '-',
  63. prot & PROT_WRITE ? 'w' : '-',
  64. prot & PROT_EXEC ? 'x' : '-');
  65. if ((start & ~TARGET_PAGE_MASK) != 0)
  66. return -EINVAL;
  67. len = TARGET_PAGE_ALIGN(len);
  68. end = start + len;
  69. if (end < start)
  70. return -EINVAL;
  71. prot &= PROT_READ | PROT_WRITE | PROT_EXEC;
  72. if (len == 0)
  73. return 0;
  74. mmap_lock();
  75. host_start = start & qemu_host_page_mask;
  76. host_end = HOST_PAGE_ALIGN(end);
  77. if (start > host_start) {
  78. /* handle host page containing start */
  79. prot1 = prot;
  80. for (addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) {
  81. prot1 |= page_get_flags(addr);
  82. }
  83. if (host_end == host_start + qemu_host_page_size) {
  84. for (addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
  85. prot1 |= page_get_flags(addr);
  86. }
  87. end = host_end;
  88. }
  89. ret = mprotect(g2h_untagged(host_start),
  90. qemu_host_page_size, prot1 & PAGE_RWX);
  91. if (ret != 0)
  92. goto error;
  93. host_start += qemu_host_page_size;
  94. }
  95. if (end < host_end) {
  96. prot1 = prot;
  97. for (addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
  98. prot1 |= page_get_flags(addr);
  99. }
  100. ret = mprotect(g2h_untagged(host_end - qemu_host_page_size),
  101. qemu_host_page_size, prot1 & PAGE_RWX);
  102. if (ret != 0)
  103. goto error;
  104. host_end -= qemu_host_page_size;
  105. }
  106. /* handle the pages in the middle */
  107. if (host_start < host_end) {
  108. ret = mprotect(g2h_untagged(host_start), host_end - host_start, prot);
  109. if (ret != 0)
  110. goto error;
  111. }
  112. page_set_flags(start, start + len - 1, prot | PAGE_VALID);
  113. mmap_unlock();
  114. return 0;
  115. error:
  116. mmap_unlock();
  117. return ret;
  118. }
  119. /*
  120. * Perform a pread on behalf of target_mmap. We can reach EOF, we can be
  121. * interrupted by signals, and in general there's no good error return path.
  122. * If @zero, zero the rest of the block at EOF.
  123. * Return true on success.
  124. */
  125. static bool mmap_pread(int fd, void *p, size_t len, off_t offset, bool zero)
  126. {
  127. while (1) {
  128. ssize_t r = pread(fd, p, len, offset);
  129. if (likely(r == len)) {
  130. /* Complete */
  131. return true;
  132. }
  133. if (r == 0) {
  134. /* EOF */
  135. if (zero) {
  136. memset(p, 0, len);
  137. }
  138. return true;
  139. }
  140. if (r > 0) {
  141. /* Short read */
  142. p += r;
  143. len -= r;
  144. offset += r;
  145. } else if (errno != EINTR) {
  146. /* Error */
  147. return false;
  148. }
  149. }
  150. }
  151. /*
  152. * map an incomplete host page
  153. *
  154. * mmap_frag can be called with a valid fd, if flags doesn't contain one of
  155. * MAP_ANON, MAP_STACK, MAP_GUARD. If we need to map a page in those cases, we
  156. * pass fd == -1. However, if flags contains MAP_GUARD then MAP_ANON cannot be
  157. * added.
  158. *
  159. * * If fd is valid (not -1) we want to map the pages with MAP_ANON.
  160. * * If flags contains MAP_GUARD we don't want to add MAP_ANON because it
  161. * will be rejected. See kern_mmap's enforcing of constraints for MAP_GUARD
  162. * in sys/vm/vm_mmap.c.
  163. * * If flags contains MAP_ANON it doesn't matter if we add it or not.
  164. * * If flags contains MAP_STACK, mmap adds MAP_ANON when called so doesn't
  165. * matter if we add it or not either. See enforcing of constraints for
  166. * MAP_STACK in kern_mmap.
  167. *
  168. * Don't add MAP_ANON for the flags that use fd == -1 without specifying the
  169. * flags directly, with the assumption that future flags that require fd == -1
  170. * will also not require MAP_ANON.
  171. */
  172. static int mmap_frag(abi_ulong real_start,
  173. abi_ulong start, abi_ulong end,
  174. int prot, int flags, int fd, abi_ulong offset)
  175. {
  176. abi_ulong real_end, addr;
  177. void *host_start;
  178. int prot1, prot_new;
  179. real_end = real_start + qemu_host_page_size;
  180. host_start = g2h_untagged(real_start);
  181. /* get the protection of the target pages outside the mapping */
  182. prot1 = 0;
  183. for (addr = real_start; addr < real_end; addr++) {
  184. if (addr < start || addr >= end)
  185. prot1 |= page_get_flags(addr);
  186. }
  187. if (prot1 == 0) {
  188. /* no page was there, so we allocate one. See also above. */
  189. void *p = mmap(host_start, qemu_host_page_size, prot,
  190. flags | ((fd != -1) ? MAP_ANON : 0), -1, 0);
  191. if (p == MAP_FAILED)
  192. return -1;
  193. prot1 = prot;
  194. }
  195. prot1 &= PAGE_RWX;
  196. prot_new = prot | prot1;
  197. if (fd != -1) {
  198. /* msync() won't work here, so we return an error if write is
  199. possible while it is a shared mapping */
  200. if ((flags & TARGET_BSD_MAP_FLAGMASK) == MAP_SHARED &&
  201. (prot & PROT_WRITE))
  202. return -1;
  203. /* adjust protection to be able to read */
  204. if (!(prot1 & PROT_WRITE))
  205. mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE);
  206. /* read the corresponding file data */
  207. if (!mmap_pread(fd, g2h_untagged(start), end - start, offset, true)) {
  208. return -1;
  209. }
  210. /* put final protection */
  211. if (prot_new != (prot1 | PROT_WRITE))
  212. mprotect(host_start, qemu_host_page_size, prot_new);
  213. } else {
  214. if (prot_new != prot1) {
  215. mprotect(host_start, qemu_host_page_size, prot_new);
  216. }
  217. if (prot_new & PROT_WRITE) {
  218. memset(g2h_untagged(start), 0, end - start);
  219. }
  220. }
  221. return 0;
  222. }
  223. #if HOST_LONG_BITS == 64 && TARGET_ABI_BITS == 64
  224. # define TASK_UNMAPPED_BASE (1ul << 38)
  225. #else
  226. # define TASK_UNMAPPED_BASE 0x40000000
  227. #endif
  228. abi_ulong mmap_next_start = TASK_UNMAPPED_BASE;
  229. /*
  230. * Subroutine of mmap_find_vma, used when we have pre-allocated a chunk of guest
  231. * address space.
  232. */
  233. static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
  234. abi_ulong alignment)
  235. {
  236. abi_ulong ret;
  237. ret = page_find_range_empty(start, reserved_va, size, alignment);
  238. if (ret == -1 && start > TARGET_PAGE_SIZE) {
  239. /* Restart at the beginning of the address space. */
  240. ret = page_find_range_empty(TARGET_PAGE_SIZE, start - 1,
  241. size, alignment);
  242. }
  243. return ret;
  244. }
  245. /*
  246. * Find and reserve a free memory area of size 'size'. The search
  247. * starts at 'start'.
  248. * It must be called with mmap_lock() held.
  249. * Return -1 if error.
  250. */
  251. static abi_ulong mmap_find_vma_aligned(abi_ulong start, abi_ulong size,
  252. abi_ulong alignment)
  253. {
  254. void *ptr, *prev;
  255. abi_ulong addr;
  256. int flags;
  257. int wrapped, repeat;
  258. /* If 'start' == 0, then a default start address is used. */
  259. if (start == 0) {
  260. start = mmap_next_start;
  261. } else {
  262. start &= qemu_host_page_mask;
  263. }
  264. size = HOST_PAGE_ALIGN(size);
  265. if (reserved_va) {
  266. return mmap_find_vma_reserved(start, size,
  267. (alignment != 0 ? 1 << alignment :
  268. MAX(qemu_host_page_size, TARGET_PAGE_SIZE)));
  269. }
  270. addr = start;
  271. wrapped = repeat = 0;
  272. prev = 0;
  273. flags = MAP_ANON | MAP_PRIVATE;
  274. if (alignment != 0) {
  275. flags |= MAP_ALIGNED(alignment);
  276. }
  277. for (;; prev = ptr) {
  278. /*
  279. * Reserve needed memory area to avoid a race.
  280. * It should be discarded using:
  281. * - mmap() with MAP_FIXED flag
  282. * - mremap() with MREMAP_FIXED flag
  283. * - shmat() with SHM_REMAP flag
  284. */
  285. ptr = mmap(g2h_untagged(addr), size, PROT_NONE,
  286. flags, -1, 0);
  287. /* ENOMEM, if host address space has no memory */
  288. if (ptr == MAP_FAILED) {
  289. return (abi_ulong)-1;
  290. }
  291. /*
  292. * Count the number of sequential returns of the same address.
  293. * This is used to modify the search algorithm below.
  294. */
  295. repeat = (ptr == prev ? repeat + 1 : 0);
  296. if (h2g_valid(ptr + size - 1)) {
  297. addr = h2g(ptr);
  298. if ((addr & ~TARGET_PAGE_MASK) == 0) {
  299. /* Success. */
  300. if (start == mmap_next_start && addr >= TASK_UNMAPPED_BASE) {
  301. mmap_next_start = addr + size;
  302. }
  303. return addr;
  304. }
  305. /* The address is not properly aligned for the target. */
  306. switch (repeat) {
  307. case 0:
  308. /*
  309. * Assume the result that the kernel gave us is the
  310. * first with enough free space, so start again at the
  311. * next higher target page.
  312. */
  313. addr = TARGET_PAGE_ALIGN(addr);
  314. break;
  315. case 1:
  316. /*
  317. * Sometimes the kernel decides to perform the allocation
  318. * at the top end of memory instead.
  319. */
  320. addr &= TARGET_PAGE_MASK;
  321. break;
  322. case 2:
  323. /* Start over at low memory. */
  324. addr = 0;
  325. break;
  326. default:
  327. /* Fail. This unaligned block must the last. */
  328. addr = -1;
  329. break;
  330. }
  331. } else {
  332. /*
  333. * Since the result the kernel gave didn't fit, start
  334. * again at low memory. If any repetition, fail.
  335. */
  336. addr = (repeat ? -1 : 0);
  337. }
  338. /* Unmap and try again. */
  339. munmap(ptr, size);
  340. /* ENOMEM if we checked the whole of the target address space. */
  341. if (addr == (abi_ulong)-1) {
  342. return (abi_ulong)-1;
  343. } else if (addr == 0) {
  344. if (wrapped) {
  345. return (abi_ulong)-1;
  346. }
  347. wrapped = 1;
  348. /*
  349. * Don't actually use 0 when wrapping, instead indicate
  350. * that we'd truly like an allocation in low memory.
  351. */
  352. addr = TARGET_PAGE_SIZE;
  353. } else if (wrapped && addr >= start) {
  354. return (abi_ulong)-1;
  355. }
  356. }
  357. }
  358. abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size)
  359. {
  360. return mmap_find_vma_aligned(start, size, 0);
  361. }
  362. /* NOTE: all the constants are the HOST ones */
  363. abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
  364. int flags, int fd, off_t offset)
  365. {
  366. abi_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len;
  367. mmap_lock();
  368. if (qemu_loglevel_mask(CPU_LOG_PAGE)) {
  369. qemu_log("mmap: start=0x" TARGET_ABI_FMT_lx
  370. " len=0x" TARGET_ABI_FMT_lx " prot=%c%c%c flags=",
  371. start, len,
  372. prot & PROT_READ ? 'r' : '-',
  373. prot & PROT_WRITE ? 'w' : '-',
  374. prot & PROT_EXEC ? 'x' : '-');
  375. if (flags & MAP_ALIGNMENT_MASK) {
  376. qemu_log("MAP_ALIGNED(%u) ",
  377. (flags & MAP_ALIGNMENT_MASK) >> MAP_ALIGNMENT_SHIFT);
  378. }
  379. if (flags & MAP_GUARD) {
  380. qemu_log("MAP_GUARD ");
  381. }
  382. if (flags & MAP_FIXED) {
  383. qemu_log("MAP_FIXED ");
  384. }
  385. if (flags & MAP_ANON) {
  386. qemu_log("MAP_ANON ");
  387. }
  388. if (flags & MAP_EXCL) {
  389. qemu_log("MAP_EXCL ");
  390. }
  391. if (flags & MAP_PRIVATE) {
  392. qemu_log("MAP_PRIVATE ");
  393. }
  394. if (flags & MAP_SHARED) {
  395. qemu_log("MAP_SHARED ");
  396. }
  397. if (flags & MAP_NOCORE) {
  398. qemu_log("MAP_NOCORE ");
  399. }
  400. if (flags & MAP_STACK) {
  401. qemu_log("MAP_STACK ");
  402. }
  403. qemu_log("fd=%d offset=0x%lx\n", fd, offset);
  404. }
  405. if ((flags & MAP_ANON) && fd != -1) {
  406. errno = EINVAL;
  407. goto fail;
  408. }
  409. if (flags & MAP_STACK) {
  410. if ((fd != -1) || ((prot & (PROT_READ | PROT_WRITE)) !=
  411. (PROT_READ | PROT_WRITE))) {
  412. errno = EINVAL;
  413. goto fail;
  414. }
  415. }
  416. if ((flags & MAP_GUARD) && (prot != PROT_NONE || fd != -1 ||
  417. offset != 0 || (flags & (MAP_SHARED | MAP_PRIVATE |
  418. /* MAP_PREFAULT | */ /* MAP_PREFAULT not in mman.h */
  419. MAP_PREFAULT_READ | MAP_ANON | MAP_STACK)) != 0)) {
  420. errno = EINVAL;
  421. goto fail;
  422. }
  423. if (offset & ~TARGET_PAGE_MASK) {
  424. errno = EINVAL;
  425. goto fail;
  426. }
  427. if (len == 0) {
  428. errno = EINVAL;
  429. goto fail;
  430. }
  431. /* Check for overflows */
  432. len = TARGET_PAGE_ALIGN(len);
  433. if (len == 0) {
  434. errno = ENOMEM;
  435. goto fail;
  436. }
  437. real_start = start & qemu_host_page_mask;
  438. host_offset = offset & qemu_host_page_mask;
  439. /*
  440. * If the user is asking for the kernel to find a location, do that
  441. * before we truncate the length for mapping files below.
  442. */
  443. if (!(flags & MAP_FIXED)) {
  444. host_len = len + offset - host_offset;
  445. host_len = HOST_PAGE_ALIGN(host_len);
  446. if ((flags & MAP_ALIGNMENT_MASK) != 0)
  447. start = mmap_find_vma_aligned(real_start, host_len,
  448. (flags & MAP_ALIGNMENT_MASK) >> MAP_ALIGNMENT_SHIFT);
  449. else
  450. start = mmap_find_vma(real_start, host_len);
  451. if (start == (abi_ulong)-1) {
  452. errno = ENOMEM;
  453. goto fail;
  454. }
  455. }
  456. /*
  457. * When mapping files into a memory area larger than the file, accesses
  458. * to pages beyond the file size will cause a SIGBUS.
  459. *
  460. * For example, if mmaping a file of 100 bytes on a host with 4K pages
  461. * emulating a target with 8K pages, the target expects to be able to
  462. * access the first 8K. But the host will trap us on any access beyond
  463. * 4K.
  464. *
  465. * When emulating a target with a larger page-size than the hosts, we
  466. * may need to truncate file maps at EOF and add extra anonymous pages
  467. * up to the targets page boundary.
  468. */
  469. if ((qemu_real_host_page_size() < qemu_host_page_size) && fd != -1) {
  470. struct stat sb;
  471. if (fstat(fd, &sb) == -1) {
  472. goto fail;
  473. }
  474. /* Are we trying to create a map beyond EOF?. */
  475. if (offset + len > sb.st_size) {
  476. /*
  477. * If so, truncate the file map at eof aligned with
  478. * the hosts real pagesize. Additional anonymous maps
  479. * will be created beyond EOF.
  480. */
  481. len = REAL_HOST_PAGE_ALIGN(sb.st_size - offset);
  482. }
  483. }
  484. if (!(flags & MAP_FIXED)) {
  485. unsigned long host_start;
  486. void *p;
  487. host_len = len + offset - host_offset;
  488. host_len = HOST_PAGE_ALIGN(host_len);
  489. /*
  490. * Note: we prefer to control the mapping address. It is
  491. * especially important if qemu_host_page_size >
  492. * qemu_real_host_page_size
  493. */
  494. p = mmap(g2h_untagged(start), host_len, prot,
  495. flags | MAP_FIXED | ((fd != -1) ? MAP_ANON : 0), -1, 0);
  496. if (p == MAP_FAILED)
  497. goto fail;
  498. /* update start so that it points to the file position at 'offset' */
  499. host_start = (unsigned long)p;
  500. if (fd != -1) {
  501. p = mmap(g2h_untagged(start), len, prot,
  502. flags | MAP_FIXED, fd, host_offset);
  503. if (p == MAP_FAILED) {
  504. munmap(g2h_untagged(start), host_len);
  505. goto fail;
  506. }
  507. host_start += offset - host_offset;
  508. }
  509. start = h2g(host_start);
  510. } else {
  511. if (start & ~TARGET_PAGE_MASK) {
  512. errno = EINVAL;
  513. goto fail;
  514. }
  515. end = start + len;
  516. real_end = HOST_PAGE_ALIGN(end);
  517. /*
  518. * Test if requested memory area fits target address space
  519. * It can fail only on 64-bit host with 32-bit target.
  520. * On any other target/host host mmap() handles this error correctly.
  521. */
  522. if (!guest_range_valid_untagged(start, len)) {
  523. errno = EINVAL;
  524. goto fail;
  525. }
  526. /*
  527. * worst case: we cannot map the file because the offset is not
  528. * aligned, so we read it
  529. */
  530. if (fd != -1 &&
  531. (offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) {
  532. /*
  533. * msync() won't work here, so we return an error if write is
  534. * possible while it is a shared mapping
  535. */
  536. if ((flags & TARGET_BSD_MAP_FLAGMASK) == MAP_SHARED &&
  537. (prot & PROT_WRITE)) {
  538. errno = EINVAL;
  539. goto fail;
  540. }
  541. retaddr = target_mmap(start, len, prot | PROT_WRITE,
  542. MAP_FIXED | MAP_PRIVATE | MAP_ANON,
  543. -1, 0);
  544. if (retaddr == -1)
  545. goto fail;
  546. if (!mmap_pread(fd, g2h_untagged(start), len, offset, false)) {
  547. goto fail;
  548. }
  549. if (!(prot & PROT_WRITE)) {
  550. ret = target_mprotect(start, len, prot);
  551. assert(ret == 0);
  552. }
  553. goto the_end;
  554. }
  555. /* Reject the mapping if any page within the range is mapped */
  556. if ((flags & MAP_EXCL) && !page_check_range_empty(start, end - 1)) {
  557. errno = EINVAL;
  558. goto fail;
  559. }
  560. /* handle the start of the mapping */
  561. if (start > real_start) {
  562. if (real_end == real_start + qemu_host_page_size) {
  563. /* one single host page */
  564. ret = mmap_frag(real_start, start, end,
  565. prot, flags, fd, offset);
  566. if (ret == -1)
  567. goto fail;
  568. goto the_end1;
  569. }
  570. ret = mmap_frag(real_start, start, real_start + qemu_host_page_size,
  571. prot, flags, fd, offset);
  572. if (ret == -1)
  573. goto fail;
  574. real_start += qemu_host_page_size;
  575. }
  576. /* handle the end of the mapping */
  577. if (end < real_end) {
  578. ret = mmap_frag(real_end - qemu_host_page_size,
  579. real_end - qemu_host_page_size, end,
  580. prot, flags, fd,
  581. offset + real_end - qemu_host_page_size - start);
  582. if (ret == -1)
  583. goto fail;
  584. real_end -= qemu_host_page_size;
  585. }
  586. /* map the middle (easier) */
  587. if (real_start < real_end) {
  588. void *p;
  589. unsigned long offset1;
  590. if (flags & MAP_ANON)
  591. offset1 = 0;
  592. else
  593. offset1 = offset + real_start - start;
  594. p = mmap(g2h_untagged(real_start), real_end - real_start,
  595. prot, flags, fd, offset1);
  596. if (p == MAP_FAILED)
  597. goto fail;
  598. }
  599. }
  600. the_end1:
  601. page_set_flags(start, start + len - 1, prot | PAGE_VALID);
  602. the_end:
  603. #ifdef DEBUG_MMAP
  604. printf("ret=0x" TARGET_ABI_FMT_lx "\n", start);
  605. page_dump(stdout);
  606. printf("\n");
  607. #endif
  608. mmap_unlock();
  609. return start;
  610. fail:
  611. mmap_unlock();
  612. return -1;
  613. }
  614. void mmap_reserve(abi_ulong start, abi_ulong size)
  615. {
  616. abi_ulong real_start;
  617. abi_ulong real_end;
  618. abi_ulong addr;
  619. abi_ulong end;
  620. int prot;
  621. real_start = start & qemu_host_page_mask;
  622. real_end = HOST_PAGE_ALIGN(start + size);
  623. end = start + size;
  624. if (start > real_start) {
  625. /* handle host page containing start */
  626. prot = 0;
  627. for (addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
  628. prot |= page_get_flags(addr);
  629. }
  630. if (real_end == real_start + qemu_host_page_size) {
  631. for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
  632. prot |= page_get_flags(addr);
  633. }
  634. end = real_end;
  635. }
  636. if (prot != 0) {
  637. real_start += qemu_host_page_size;
  638. }
  639. }
  640. if (end < real_end) {
  641. prot = 0;
  642. for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
  643. prot |= page_get_flags(addr);
  644. }
  645. if (prot != 0) {
  646. real_end -= qemu_host_page_size;
  647. }
  648. }
  649. if (real_start != real_end) {
  650. mmap(g2h_untagged(real_start), real_end - real_start, PROT_NONE,
  651. MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0);
  652. }
  653. }
  654. int target_munmap(abi_ulong start, abi_ulong len)
  655. {
  656. abi_ulong end, real_start, real_end, addr;
  657. int prot, ret;
  658. #ifdef DEBUG_MMAP
  659. printf("munmap: start=0x" TARGET_ABI_FMT_lx " len=0x"
  660. TARGET_ABI_FMT_lx "\n",
  661. start, len);
  662. #endif
  663. if (start & ~TARGET_PAGE_MASK)
  664. return -EINVAL;
  665. len = TARGET_PAGE_ALIGN(len);
  666. if (len == 0)
  667. return -EINVAL;
  668. mmap_lock();
  669. end = start + len;
  670. real_start = start & qemu_host_page_mask;
  671. real_end = HOST_PAGE_ALIGN(end);
  672. if (start > real_start) {
  673. /* handle host page containing start */
  674. prot = 0;
  675. for (addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
  676. prot |= page_get_flags(addr);
  677. }
  678. if (real_end == real_start + qemu_host_page_size) {
  679. for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
  680. prot |= page_get_flags(addr);
  681. }
  682. end = real_end;
  683. }
  684. if (prot != 0)
  685. real_start += qemu_host_page_size;
  686. }
  687. if (end < real_end) {
  688. prot = 0;
  689. for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
  690. prot |= page_get_flags(addr);
  691. }
  692. if (prot != 0)
  693. real_end -= qemu_host_page_size;
  694. }
  695. ret = 0;
  696. /* unmap what we can */
  697. if (real_start < real_end) {
  698. if (reserved_va) {
  699. mmap_reserve(real_start, real_end - real_start);
  700. } else {
  701. ret = munmap(g2h_untagged(real_start), real_end - real_start);
  702. }
  703. }
  704. if (ret == 0) {
  705. page_set_flags(start, start + len - 1, 0);
  706. }
  707. mmap_unlock();
  708. return ret;
  709. }
  710. int target_msync(abi_ulong start, abi_ulong len, int flags)
  711. {
  712. abi_ulong end;
  713. if (start & ~TARGET_PAGE_MASK)
  714. return -EINVAL;
  715. len = TARGET_PAGE_ALIGN(len);
  716. end = start + len;
  717. if (end < start)
  718. return -EINVAL;
  719. if (end == start)
  720. return 0;
  721. start &= qemu_host_page_mask;
  722. return msync(g2h_untagged(start), end - start, flags);
  723. }