mmap.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791
  1. /*
  2. * mmap support for qemu
  3. *
  4. * Copyright (c) 2003 - 2008 Fabrice Bellard
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #include "qemu/osdep.h"
  20. #include "qemu.h"
  21. static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER;
  22. static __thread int mmap_lock_count;
  23. void mmap_lock(void)
  24. {
  25. if (mmap_lock_count++ == 0) {
  26. pthread_mutex_lock(&mmap_mutex);
  27. }
  28. }
  29. void mmap_unlock(void)
  30. {
  31. if (--mmap_lock_count == 0) {
  32. pthread_mutex_unlock(&mmap_mutex);
  33. }
  34. }
  35. bool have_mmap_lock(void)
  36. {
  37. return mmap_lock_count > 0 ? true : false;
  38. }
  39. /* Grab lock to make sure things are in a consistent state after fork(). */
  40. void mmap_fork_start(void)
  41. {
  42. if (mmap_lock_count)
  43. abort();
  44. pthread_mutex_lock(&mmap_mutex);
  45. }
  46. void mmap_fork_end(int child)
  47. {
  48. if (child)
  49. pthread_mutex_init(&mmap_mutex, NULL);
  50. else
  51. pthread_mutex_unlock(&mmap_mutex);
  52. }
  53. /* NOTE: all the constants are the HOST ones, but addresses are target. */
  54. int target_mprotect(abi_ulong start, abi_ulong len, int prot)
  55. {
  56. abi_ulong end, host_start, host_end, addr;
  57. int prot1, ret;
  58. qemu_log_mask(CPU_LOG_PAGE, "mprotect: start=0x" TARGET_ABI_FMT_lx
  59. " len=0x" TARGET_ABI_FMT_lx " prot=%c%c%c\n", start, len,
  60. prot & PROT_READ ? 'r' : '-',
  61. prot & PROT_WRITE ? 'w' : '-',
  62. prot & PROT_EXEC ? 'x' : '-');
  63. if ((start & ~TARGET_PAGE_MASK) != 0)
  64. return -EINVAL;
  65. len = TARGET_PAGE_ALIGN(len);
  66. end = start + len;
  67. if (end < start)
  68. return -EINVAL;
  69. prot &= PROT_READ | PROT_WRITE | PROT_EXEC;
  70. if (len == 0)
  71. return 0;
  72. mmap_lock();
  73. host_start = start & qemu_host_page_mask;
  74. host_end = HOST_PAGE_ALIGN(end);
  75. if (start > host_start) {
  76. /* handle host page containing start */
  77. prot1 = prot;
  78. for (addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) {
  79. prot1 |= page_get_flags(addr);
  80. }
  81. if (host_end == host_start + qemu_host_page_size) {
  82. for (addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
  83. prot1 |= page_get_flags(addr);
  84. }
  85. end = host_end;
  86. }
  87. ret = mprotect(g2h_untagged(host_start),
  88. qemu_host_page_size, prot1 & PAGE_BITS);
  89. if (ret != 0)
  90. goto error;
  91. host_start += qemu_host_page_size;
  92. }
  93. if (end < host_end) {
  94. prot1 = prot;
  95. for (addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
  96. prot1 |= page_get_flags(addr);
  97. }
  98. ret = mprotect(g2h_untagged(host_end - qemu_host_page_size),
  99. qemu_host_page_size, prot1 & PAGE_BITS);
  100. if (ret != 0)
  101. goto error;
  102. host_end -= qemu_host_page_size;
  103. }
  104. /* handle the pages in the middle */
  105. if (host_start < host_end) {
  106. ret = mprotect(g2h_untagged(host_start), host_end - host_start, prot);
  107. if (ret != 0)
  108. goto error;
  109. }
  110. page_set_flags(start, start + len - 1, prot | PAGE_VALID);
  111. mmap_unlock();
  112. return 0;
  113. error:
  114. mmap_unlock();
  115. return ret;
  116. }
  117. /*
  118. * map an incomplete host page
  119. *
  120. * mmap_frag can be called with a valid fd, if flags doesn't contain one of
  121. * MAP_ANON, MAP_STACK, MAP_GUARD. If we need to map a page in those cases, we
  122. * pass fd == -1. However, if flags contains MAP_GUARD then MAP_ANON cannot be
  123. * added.
  124. *
  125. * * If fd is valid (not -1) we want to map the pages with MAP_ANON.
  126. * * If flags contains MAP_GUARD we don't want to add MAP_ANON because it
  127. * will be rejected. See kern_mmap's enforcing of constraints for MAP_GUARD
  128. * in sys/vm/vm_mmap.c.
  129. * * If flags contains MAP_ANON it doesn't matter if we add it or not.
  130. * * If flags contains MAP_STACK, mmap adds MAP_ANON when called so doesn't
  131. * matter if we add it or not either. See enforcing of constraints for
  132. * MAP_STACK in kern_mmap.
  133. *
  134. * Don't add MAP_ANON for the flags that use fd == -1 without specifying the
  135. * flags directly, with the assumption that future flags that require fd == -1
  136. * will also not require MAP_ANON.
  137. */
  138. static int mmap_frag(abi_ulong real_start,
  139. abi_ulong start, abi_ulong end,
  140. int prot, int flags, int fd, abi_ulong offset)
  141. {
  142. abi_ulong real_end, addr;
  143. void *host_start;
  144. int prot1, prot_new;
  145. real_end = real_start + qemu_host_page_size;
  146. host_start = g2h_untagged(real_start);
  147. /* get the protection of the target pages outside the mapping */
  148. prot1 = 0;
  149. for (addr = real_start; addr < real_end; addr++) {
  150. if (addr < start || addr >= end)
  151. prot1 |= page_get_flags(addr);
  152. }
  153. if (prot1 == 0) {
  154. /* no page was there, so we allocate one. See also above. */
  155. void *p = mmap(host_start, qemu_host_page_size, prot,
  156. flags | ((fd != -1) ? MAP_ANON : 0), -1, 0);
  157. if (p == MAP_FAILED)
  158. return -1;
  159. prot1 = prot;
  160. }
  161. prot1 &= PAGE_BITS;
  162. prot_new = prot | prot1;
  163. if (fd != -1) {
  164. /* msync() won't work here, so we return an error if write is
  165. possible while it is a shared mapping */
  166. if ((flags & TARGET_BSD_MAP_FLAGMASK) == MAP_SHARED &&
  167. (prot & PROT_WRITE))
  168. return -1;
  169. /* adjust protection to be able to read */
  170. if (!(prot1 & PROT_WRITE))
  171. mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE);
  172. /* read the corresponding file data */
  173. if (pread(fd, g2h_untagged(start), end - start, offset) == -1) {
  174. return -1;
  175. }
  176. /* put final protection */
  177. if (prot_new != (prot1 | PROT_WRITE))
  178. mprotect(host_start, qemu_host_page_size, prot_new);
  179. } else {
  180. if (prot_new != prot1) {
  181. mprotect(host_start, qemu_host_page_size, prot_new);
  182. }
  183. if (prot_new & PROT_WRITE) {
  184. memset(g2h_untagged(start), 0, end - start);
  185. }
  186. }
  187. return 0;
  188. }
  189. #if HOST_LONG_BITS == 64 && TARGET_ABI_BITS == 64
  190. # define TASK_UNMAPPED_BASE (1ul << 38)
  191. #else
  192. # define TASK_UNMAPPED_BASE 0x40000000
  193. #endif
  194. abi_ulong mmap_next_start = TASK_UNMAPPED_BASE;
  195. unsigned long last_brk;
  196. /*
  197. * Subroutine of mmap_find_vma, used when we have pre-allocated a chunk of guest
  198. * address space.
  199. */
  200. static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
  201. abi_ulong alignment)
  202. {
  203. abi_ulong addr;
  204. abi_ulong end_addr;
  205. int prot;
  206. int looped = 0;
  207. if (size > reserved_va) {
  208. return (abi_ulong)-1;
  209. }
  210. size = HOST_PAGE_ALIGN(size) + alignment;
  211. end_addr = start + size;
  212. if (end_addr > reserved_va) {
  213. end_addr = reserved_va + 1;
  214. }
  215. addr = end_addr - qemu_host_page_size;
  216. while (1) {
  217. if (addr > end_addr) {
  218. if (looped) {
  219. return (abi_ulong)-1;
  220. }
  221. end_addr = reserved_va + 1;
  222. addr = end_addr - qemu_host_page_size;
  223. looped = 1;
  224. continue;
  225. }
  226. prot = page_get_flags(addr);
  227. if (prot) {
  228. end_addr = addr;
  229. }
  230. if (end_addr - addr >= size) {
  231. break;
  232. }
  233. addr -= qemu_host_page_size;
  234. }
  235. if (start == mmap_next_start) {
  236. mmap_next_start = addr;
  237. }
  238. /* addr is sufficiently low to align it up */
  239. if (alignment != 0) {
  240. addr = (addr + alignment) & ~(alignment - 1);
  241. }
  242. return addr;
  243. }
  244. /*
  245. * Find and reserve a free memory area of size 'size'. The search
  246. * starts at 'start'.
  247. * It must be called with mmap_lock() held.
  248. * Return -1 if error.
  249. */
  250. static abi_ulong mmap_find_vma_aligned(abi_ulong start, abi_ulong size,
  251. abi_ulong alignment)
  252. {
  253. void *ptr, *prev;
  254. abi_ulong addr;
  255. int flags;
  256. int wrapped, repeat;
  257. /* If 'start' == 0, then a default start address is used. */
  258. if (start == 0) {
  259. start = mmap_next_start;
  260. } else {
  261. start &= qemu_host_page_mask;
  262. }
  263. size = HOST_PAGE_ALIGN(size);
  264. if (reserved_va) {
  265. return mmap_find_vma_reserved(start, size,
  266. (alignment != 0 ? 1 << alignment : 0));
  267. }
  268. addr = start;
  269. wrapped = repeat = 0;
  270. prev = 0;
  271. flags = MAP_ANON | MAP_PRIVATE;
  272. if (alignment != 0) {
  273. flags |= MAP_ALIGNED(alignment);
  274. }
  275. for (;; prev = ptr) {
  276. /*
  277. * Reserve needed memory area to avoid a race.
  278. * It should be discarded using:
  279. * - mmap() with MAP_FIXED flag
  280. * - mremap() with MREMAP_FIXED flag
  281. * - shmat() with SHM_REMAP flag
  282. */
  283. ptr = mmap(g2h_untagged(addr), size, PROT_NONE,
  284. flags, -1, 0);
  285. /* ENOMEM, if host address space has no memory */
  286. if (ptr == MAP_FAILED) {
  287. return (abi_ulong)-1;
  288. }
  289. /*
  290. * Count the number of sequential returns of the same address.
  291. * This is used to modify the search algorithm below.
  292. */
  293. repeat = (ptr == prev ? repeat + 1 : 0);
  294. if (h2g_valid(ptr + size - 1)) {
  295. addr = h2g(ptr);
  296. if ((addr & ~TARGET_PAGE_MASK) == 0) {
  297. /* Success. */
  298. if (start == mmap_next_start && addr >= TASK_UNMAPPED_BASE) {
  299. mmap_next_start = addr + size;
  300. }
  301. return addr;
  302. }
  303. /* The address is not properly aligned for the target. */
  304. switch (repeat) {
  305. case 0:
  306. /*
  307. * Assume the result that the kernel gave us is the
  308. * first with enough free space, so start again at the
  309. * next higher target page.
  310. */
  311. addr = TARGET_PAGE_ALIGN(addr);
  312. break;
  313. case 1:
  314. /*
  315. * Sometimes the kernel decides to perform the allocation
  316. * at the top end of memory instead.
  317. */
  318. addr &= TARGET_PAGE_MASK;
  319. break;
  320. case 2:
  321. /* Start over at low memory. */
  322. addr = 0;
  323. break;
  324. default:
  325. /* Fail. This unaligned block must the last. */
  326. addr = -1;
  327. break;
  328. }
  329. } else {
  330. /*
  331. * Since the result the kernel gave didn't fit, start
  332. * again at low memory. If any repetition, fail.
  333. */
  334. addr = (repeat ? -1 : 0);
  335. }
  336. /* Unmap and try again. */
  337. munmap(ptr, size);
  338. /* ENOMEM if we checked the whole of the target address space. */
  339. if (addr == (abi_ulong)-1) {
  340. return (abi_ulong)-1;
  341. } else if (addr == 0) {
  342. if (wrapped) {
  343. return (abi_ulong)-1;
  344. }
  345. wrapped = 1;
  346. /*
  347. * Don't actually use 0 when wrapping, instead indicate
  348. * that we'd truly like an allocation in low memory.
  349. */
  350. addr = TARGET_PAGE_SIZE;
  351. } else if (wrapped && addr >= start) {
  352. return (abi_ulong)-1;
  353. }
  354. }
  355. }
  356. abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size)
  357. {
  358. return mmap_find_vma_aligned(start, size, 0);
  359. }
  360. /* NOTE: all the constants are the HOST ones */
  361. abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
  362. int flags, int fd, off_t offset)
  363. {
  364. abi_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len;
  365. mmap_lock();
  366. if (qemu_loglevel_mask(CPU_LOG_PAGE)) {
  367. qemu_log("mmap: start=0x" TARGET_ABI_FMT_lx
  368. " len=0x" TARGET_ABI_FMT_lx " prot=%c%c%c flags=",
  369. start, len,
  370. prot & PROT_READ ? 'r' : '-',
  371. prot & PROT_WRITE ? 'w' : '-',
  372. prot & PROT_EXEC ? 'x' : '-');
  373. if (flags & MAP_ALIGNMENT_MASK) {
  374. qemu_log("MAP_ALIGNED(%u) ",
  375. (flags & MAP_ALIGNMENT_MASK) >> MAP_ALIGNMENT_SHIFT);
  376. }
  377. if (flags & MAP_GUARD) {
  378. qemu_log("MAP_GUARD ");
  379. }
  380. if (flags & MAP_FIXED) {
  381. qemu_log("MAP_FIXED ");
  382. }
  383. if (flags & MAP_ANON) {
  384. qemu_log("MAP_ANON ");
  385. }
  386. if (flags & MAP_EXCL) {
  387. qemu_log("MAP_EXCL ");
  388. }
  389. if (flags & MAP_PRIVATE) {
  390. qemu_log("MAP_PRIVATE ");
  391. }
  392. if (flags & MAP_SHARED) {
  393. qemu_log("MAP_SHARED ");
  394. }
  395. if (flags & MAP_NOCORE) {
  396. qemu_log("MAP_NOCORE ");
  397. }
  398. if (flags & MAP_STACK) {
  399. qemu_log("MAP_STACK ");
  400. }
  401. qemu_log("fd=%d offset=0x%lx\n", fd, offset);
  402. }
  403. if ((flags & MAP_ANON) && fd != -1) {
  404. errno = EINVAL;
  405. goto fail;
  406. }
  407. if (flags & MAP_STACK) {
  408. if ((fd != -1) || ((prot & (PROT_READ | PROT_WRITE)) !=
  409. (PROT_READ | PROT_WRITE))) {
  410. errno = EINVAL;
  411. goto fail;
  412. }
  413. }
  414. if ((flags & MAP_GUARD) && (prot != PROT_NONE || fd != -1 ||
  415. offset != 0 || (flags & (MAP_SHARED | MAP_PRIVATE |
  416. /* MAP_PREFAULT | */ /* MAP_PREFAULT not in mman.h */
  417. MAP_PREFAULT_READ | MAP_ANON | MAP_STACK)) != 0)) {
  418. errno = EINVAL;
  419. goto fail;
  420. }
  421. if (offset & ~TARGET_PAGE_MASK) {
  422. errno = EINVAL;
  423. goto fail;
  424. }
  425. if (len == 0) {
  426. errno = EINVAL;
  427. goto fail;
  428. }
  429. /* Check for overflows */
  430. len = TARGET_PAGE_ALIGN(len);
  431. if (len == 0) {
  432. errno = ENOMEM;
  433. goto fail;
  434. }
  435. real_start = start & qemu_host_page_mask;
  436. host_offset = offset & qemu_host_page_mask;
  437. /*
  438. * If the user is asking for the kernel to find a location, do that
  439. * before we truncate the length for mapping files below.
  440. */
  441. if (!(flags & MAP_FIXED)) {
  442. host_len = len + offset - host_offset;
  443. host_len = HOST_PAGE_ALIGN(host_len);
  444. if ((flags & MAP_ALIGNMENT_MASK) != 0)
  445. start = mmap_find_vma_aligned(real_start, host_len,
  446. (flags & MAP_ALIGNMENT_MASK) >> MAP_ALIGNMENT_SHIFT);
  447. else
  448. start = mmap_find_vma(real_start, host_len);
  449. if (start == (abi_ulong)-1) {
  450. errno = ENOMEM;
  451. goto fail;
  452. }
  453. }
  454. /*
  455. * When mapping files into a memory area larger than the file, accesses
  456. * to pages beyond the file size will cause a SIGBUS.
  457. *
  458. * For example, if mmaping a file of 100 bytes on a host with 4K pages
  459. * emulating a target with 8K pages, the target expects to be able to
  460. * access the first 8K. But the host will trap us on any access beyond
  461. * 4K.
  462. *
  463. * When emulating a target with a larger page-size than the hosts, we
  464. * may need to truncate file maps at EOF and add extra anonymous pages
  465. * up to the targets page boundary.
  466. */
  467. if ((qemu_real_host_page_size() < qemu_host_page_size) && fd != -1) {
  468. struct stat sb;
  469. if (fstat(fd, &sb) == -1) {
  470. goto fail;
  471. }
  472. /* Are we trying to create a map beyond EOF?. */
  473. if (offset + len > sb.st_size) {
  474. /*
  475. * If so, truncate the file map at eof aligned with
  476. * the hosts real pagesize. Additional anonymous maps
  477. * will be created beyond EOF.
  478. */
  479. len = REAL_HOST_PAGE_ALIGN(sb.st_size - offset);
  480. }
  481. }
  482. if (!(flags & MAP_FIXED)) {
  483. unsigned long host_start;
  484. void *p;
  485. host_len = len + offset - host_offset;
  486. host_len = HOST_PAGE_ALIGN(host_len);
  487. /*
  488. * Note: we prefer to control the mapping address. It is
  489. * especially important if qemu_host_page_size >
  490. * qemu_real_host_page_size
  491. */
  492. p = mmap(g2h_untagged(start), host_len, prot,
  493. flags | MAP_FIXED | ((fd != -1) ? MAP_ANON : 0), -1, 0);
  494. if (p == MAP_FAILED)
  495. goto fail;
  496. /* update start so that it points to the file position at 'offset' */
  497. host_start = (unsigned long)p;
  498. if (fd != -1) {
  499. p = mmap(g2h_untagged(start), len, prot,
  500. flags | MAP_FIXED, fd, host_offset);
  501. if (p == MAP_FAILED) {
  502. munmap(g2h_untagged(start), host_len);
  503. goto fail;
  504. }
  505. host_start += offset - host_offset;
  506. }
  507. start = h2g(host_start);
  508. } else {
  509. if (start & ~TARGET_PAGE_MASK) {
  510. errno = EINVAL;
  511. goto fail;
  512. }
  513. end = start + len;
  514. real_end = HOST_PAGE_ALIGN(end);
  515. /*
  516. * Test if requested memory area fits target address space
  517. * It can fail only on 64-bit host with 32-bit target.
  518. * On any other target/host host mmap() handles this error correctly.
  519. */
  520. if (!guest_range_valid_untagged(start, len)) {
  521. errno = EINVAL;
  522. goto fail;
  523. }
  524. /*
  525. * worst case: we cannot map the file because the offset is not
  526. * aligned, so we read it
  527. */
  528. if (fd != -1 &&
  529. (offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) {
  530. /*
  531. * msync() won't work here, so we return an error if write is
  532. * possible while it is a shared mapping
  533. */
  534. if ((flags & TARGET_BSD_MAP_FLAGMASK) == MAP_SHARED &&
  535. (prot & PROT_WRITE)) {
  536. errno = EINVAL;
  537. goto fail;
  538. }
  539. retaddr = target_mmap(start, len, prot | PROT_WRITE,
  540. MAP_FIXED | MAP_PRIVATE | MAP_ANON,
  541. -1, 0);
  542. if (retaddr == -1)
  543. goto fail;
  544. if (pread(fd, g2h_untagged(start), len, offset) == -1) {
  545. goto fail;
  546. }
  547. if (!(prot & PROT_WRITE)) {
  548. ret = target_mprotect(start, len, prot);
  549. assert(ret == 0);
  550. }
  551. goto the_end;
  552. }
  553. /* Reject the mapping if any page within the range is mapped */
  554. if ((flags & MAP_EXCL) && page_check_range(start, len, 0) < 0) {
  555. errno = EINVAL;
  556. goto fail;
  557. }
  558. /* handle the start of the mapping */
  559. if (start > real_start) {
  560. if (real_end == real_start + qemu_host_page_size) {
  561. /* one single host page */
  562. ret = mmap_frag(real_start, start, end,
  563. prot, flags, fd, offset);
  564. if (ret == -1)
  565. goto fail;
  566. goto the_end1;
  567. }
  568. ret = mmap_frag(real_start, start, real_start + qemu_host_page_size,
  569. prot, flags, fd, offset);
  570. if (ret == -1)
  571. goto fail;
  572. real_start += qemu_host_page_size;
  573. }
  574. /* handle the end of the mapping */
  575. if (end < real_end) {
  576. ret = mmap_frag(real_end - qemu_host_page_size,
  577. real_end - qemu_host_page_size, end,
  578. prot, flags, fd,
  579. offset + real_end - qemu_host_page_size - start);
  580. if (ret == -1)
  581. goto fail;
  582. real_end -= qemu_host_page_size;
  583. }
  584. /* map the middle (easier) */
  585. if (real_start < real_end) {
  586. void *p;
  587. unsigned long offset1;
  588. if (flags & MAP_ANON)
  589. offset1 = 0;
  590. else
  591. offset1 = offset + real_start - start;
  592. p = mmap(g2h_untagged(real_start), real_end - real_start,
  593. prot, flags, fd, offset1);
  594. if (p == MAP_FAILED)
  595. goto fail;
  596. }
  597. }
  598. the_end1:
  599. page_set_flags(start, start + len - 1, prot | PAGE_VALID);
  600. the_end:
  601. #ifdef DEBUG_MMAP
  602. printf("ret=0x" TARGET_ABI_FMT_lx "\n", start);
  603. page_dump(stdout);
  604. printf("\n");
  605. #endif
  606. mmap_unlock();
  607. return start;
  608. fail:
  609. mmap_unlock();
  610. return -1;
  611. }
  612. static void mmap_reserve(abi_ulong start, abi_ulong size)
  613. {
  614. abi_ulong real_start;
  615. abi_ulong real_end;
  616. abi_ulong addr;
  617. abi_ulong end;
  618. int prot;
  619. real_start = start & qemu_host_page_mask;
  620. real_end = HOST_PAGE_ALIGN(start + size);
  621. end = start + size;
  622. if (start > real_start) {
  623. /* handle host page containing start */
  624. prot = 0;
  625. for (addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
  626. prot |= page_get_flags(addr);
  627. }
  628. if (real_end == real_start + qemu_host_page_size) {
  629. for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
  630. prot |= page_get_flags(addr);
  631. }
  632. end = real_end;
  633. }
  634. if (prot != 0) {
  635. real_start += qemu_host_page_size;
  636. }
  637. }
  638. if (end < real_end) {
  639. prot = 0;
  640. for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
  641. prot |= page_get_flags(addr);
  642. }
  643. if (prot != 0) {
  644. real_end -= qemu_host_page_size;
  645. }
  646. }
  647. if (real_start != real_end) {
  648. mmap(g2h_untagged(real_start), real_end - real_start, PROT_NONE,
  649. MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0);
  650. }
  651. }
  652. int target_munmap(abi_ulong start, abi_ulong len)
  653. {
  654. abi_ulong end, real_start, real_end, addr;
  655. int prot, ret;
  656. #ifdef DEBUG_MMAP
  657. printf("munmap: start=0x" TARGET_ABI_FMT_lx " len=0x"
  658. TARGET_ABI_FMT_lx "\n",
  659. start, len);
  660. #endif
  661. if (start & ~TARGET_PAGE_MASK)
  662. return -EINVAL;
  663. len = TARGET_PAGE_ALIGN(len);
  664. if (len == 0)
  665. return -EINVAL;
  666. mmap_lock();
  667. end = start + len;
  668. real_start = start & qemu_host_page_mask;
  669. real_end = HOST_PAGE_ALIGN(end);
  670. if (start > real_start) {
  671. /* handle host page containing start */
  672. prot = 0;
  673. for (addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
  674. prot |= page_get_flags(addr);
  675. }
  676. if (real_end == real_start + qemu_host_page_size) {
  677. for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
  678. prot |= page_get_flags(addr);
  679. }
  680. end = real_end;
  681. }
  682. if (prot != 0)
  683. real_start += qemu_host_page_size;
  684. }
  685. if (end < real_end) {
  686. prot = 0;
  687. for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
  688. prot |= page_get_flags(addr);
  689. }
  690. if (prot != 0)
  691. real_end -= qemu_host_page_size;
  692. }
  693. ret = 0;
  694. /* unmap what we can */
  695. if (real_start < real_end) {
  696. if (reserved_va) {
  697. mmap_reserve(real_start, real_end - real_start);
  698. } else {
  699. ret = munmap(g2h_untagged(real_start), real_end - real_start);
  700. }
  701. }
  702. if (ret == 0) {
  703. page_set_flags(start, start + len - 1, 0);
  704. }
  705. mmap_unlock();
  706. return ret;
  707. }
  708. int target_msync(abi_ulong start, abi_ulong len, int flags)
  709. {
  710. abi_ulong end;
  711. if (start & ~TARGET_PAGE_MASK)
  712. return -EINVAL;
  713. len = TARGET_PAGE_ALIGN(len);
  714. end = start + len;
  715. if (end < start)
  716. return -EINVAL;
  717. if (end == start)
  718. return 0;
  719. start &= qemu_host_page_mask;
  720. return msync(g2h_untagged(start), end - start, flags);
  721. }