2
0

mmap.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737
  1. /*
  2. * mmap support for qemu
  3. *
  4. * Copyright (c) 2003 Fabrice Bellard
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #include "qemu/osdep.h"
  20. #include "trace.h"
  21. #include "exec/log.h"
  22. #include "qemu.h"
  23. static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER;
  24. static __thread int mmap_lock_count;
  25. void mmap_lock(void)
  26. {
  27. if (mmap_lock_count++ == 0) {
  28. pthread_mutex_lock(&mmap_mutex);
  29. }
  30. }
  31. void mmap_unlock(void)
  32. {
  33. if (--mmap_lock_count == 0) {
  34. pthread_mutex_unlock(&mmap_mutex);
  35. }
  36. }
  37. bool have_mmap_lock(void)
  38. {
  39. return mmap_lock_count > 0 ? true : false;
  40. }
  41. /* Grab lock to make sure things are in a consistent state after fork(). */
  42. void mmap_fork_start(void)
  43. {
  44. if (mmap_lock_count)
  45. abort();
  46. pthread_mutex_lock(&mmap_mutex);
  47. }
  48. void mmap_fork_end(int child)
  49. {
  50. if (child)
  51. pthread_mutex_init(&mmap_mutex, NULL);
  52. else
  53. pthread_mutex_unlock(&mmap_mutex);
  54. }
  55. /* NOTE: all the constants are the HOST ones, but addresses are target. */
  56. int target_mprotect(abi_ulong start, abi_ulong len, int prot)
  57. {
  58. abi_ulong end, host_start, host_end, addr;
  59. int prot1, ret;
  60. trace_target_mprotect(start, len, prot);
  61. if ((start & ~TARGET_PAGE_MASK) != 0)
  62. return -TARGET_EINVAL;
  63. len = TARGET_PAGE_ALIGN(len);
  64. end = start + len;
  65. if (!guest_range_valid(start, len)) {
  66. return -TARGET_ENOMEM;
  67. }
  68. prot &= PROT_READ | PROT_WRITE | PROT_EXEC;
  69. if (len == 0)
  70. return 0;
  71. mmap_lock();
  72. host_start = start & qemu_host_page_mask;
  73. host_end = HOST_PAGE_ALIGN(end);
  74. if (start > host_start) {
  75. /* handle host page containing start */
  76. prot1 = prot;
  77. for(addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) {
  78. prot1 |= page_get_flags(addr);
  79. }
  80. if (host_end == host_start + qemu_host_page_size) {
  81. for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
  82. prot1 |= page_get_flags(addr);
  83. }
  84. end = host_end;
  85. }
  86. ret = mprotect(g2h(host_start), qemu_host_page_size, prot1 & PAGE_BITS);
  87. if (ret != 0)
  88. goto error;
  89. host_start += qemu_host_page_size;
  90. }
  91. if (end < host_end) {
  92. prot1 = prot;
  93. for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
  94. prot1 |= page_get_flags(addr);
  95. }
  96. ret = mprotect(g2h(host_end - qemu_host_page_size), qemu_host_page_size,
  97. prot1 & PAGE_BITS);
  98. if (ret != 0)
  99. goto error;
  100. host_end -= qemu_host_page_size;
  101. }
  102. /* handle the pages in the middle */
  103. if (host_start < host_end) {
  104. ret = mprotect(g2h(host_start), host_end - host_start, prot);
  105. if (ret != 0)
  106. goto error;
  107. }
  108. page_set_flags(start, start + len, prot | PAGE_VALID);
  109. mmap_unlock();
  110. return 0;
  111. error:
  112. mmap_unlock();
  113. return ret;
  114. }
  115. /* map an incomplete host page */
  116. static int mmap_frag(abi_ulong real_start,
  117. abi_ulong start, abi_ulong end,
  118. int prot, int flags, int fd, abi_ulong offset)
  119. {
  120. abi_ulong real_end, addr;
  121. void *host_start;
  122. int prot1, prot_new;
  123. real_end = real_start + qemu_host_page_size;
  124. host_start = g2h(real_start);
  125. /* get the protection of the target pages outside the mapping */
  126. prot1 = 0;
  127. for(addr = real_start; addr < real_end; addr++) {
  128. if (addr < start || addr >= end)
  129. prot1 |= page_get_flags(addr);
  130. }
  131. if (prot1 == 0) {
  132. /* no page was there, so we allocate one */
  133. void *p = mmap(host_start, qemu_host_page_size, prot,
  134. flags | MAP_ANONYMOUS, -1, 0);
  135. if (p == MAP_FAILED)
  136. return -1;
  137. prot1 = prot;
  138. }
  139. prot1 &= PAGE_BITS;
  140. prot_new = prot | prot1;
  141. if (!(flags & MAP_ANONYMOUS)) {
  142. /* msync() won't work here, so we return an error if write is
  143. possible while it is a shared mapping */
  144. if ((flags & MAP_TYPE) == MAP_SHARED &&
  145. (prot & PROT_WRITE))
  146. return -1;
  147. /* adjust protection to be able to read */
  148. if (!(prot1 & PROT_WRITE))
  149. mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE);
  150. /* read the corresponding file data */
  151. if (pread(fd, g2h(start), end - start, offset) == -1)
  152. return -1;
  153. /* put final protection */
  154. if (prot_new != (prot1 | PROT_WRITE))
  155. mprotect(host_start, qemu_host_page_size, prot_new);
  156. } else {
  157. if (prot_new != prot1) {
  158. mprotect(host_start, qemu_host_page_size, prot_new);
  159. }
  160. if (prot_new & PROT_WRITE) {
  161. memset(g2h(start), 0, end - start);
  162. }
  163. }
  164. return 0;
  165. }
  166. #if HOST_LONG_BITS == 64 && TARGET_ABI_BITS == 64
  167. #ifdef TARGET_AARCH64
  168. # define TASK_UNMAPPED_BASE 0x5500000000
  169. #else
  170. # define TASK_UNMAPPED_BASE (1ul << 38)
  171. #endif
  172. #else
  173. # define TASK_UNMAPPED_BASE 0x40000000
  174. #endif
  175. abi_ulong mmap_next_start = TASK_UNMAPPED_BASE;
  176. unsigned long last_brk;
  177. /* Subroutine of mmap_find_vma, used when we have pre-allocated a chunk
  178. of guest address space. */
  179. static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
  180. abi_ulong align)
  181. {
  182. abi_ulong addr, end_addr, incr = qemu_host_page_size;
  183. int prot;
  184. bool looped = false;
  185. if (size > reserved_va) {
  186. return (abi_ulong)-1;
  187. }
  188. /* Note that start and size have already been aligned by mmap_find_vma. */
  189. end_addr = start + size;
  190. if (start > reserved_va - size) {
  191. /* Start at the top of the address space. */
  192. end_addr = ((reserved_va - size) & -align) + size;
  193. looped = true;
  194. }
  195. /* Search downward from END_ADDR, checking to see if a page is in use. */
  196. addr = end_addr;
  197. while (1) {
  198. addr -= incr;
  199. if (addr > end_addr) {
  200. if (looped) {
  201. /* Failure. The entire address space has been searched. */
  202. return (abi_ulong)-1;
  203. }
  204. /* Re-start at the top of the address space. */
  205. addr = end_addr = ((reserved_va - size) & -align) + size;
  206. looped = true;
  207. } else {
  208. prot = page_get_flags(addr);
  209. if (prot) {
  210. /* Page in use. Restart below this page. */
  211. addr = end_addr = ((addr - size) & -align) + size;
  212. } else if (addr && addr + size == end_addr) {
  213. /* Success! All pages between ADDR and END_ADDR are free. */
  214. if (start == mmap_next_start) {
  215. mmap_next_start = addr;
  216. }
  217. return addr;
  218. }
  219. }
  220. }
  221. }
  222. /*
  223. * Find and reserve a free memory area of size 'size'. The search
  224. * starts at 'start'.
  225. * It must be called with mmap_lock() held.
  226. * Return -1 if error.
  227. */
  228. abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong align)
  229. {
  230. void *ptr, *prev;
  231. abi_ulong addr;
  232. int wrapped, repeat;
  233. align = MAX(align, qemu_host_page_size);
  234. /* If 'start' == 0, then a default start address is used. */
  235. if (start == 0) {
  236. start = mmap_next_start;
  237. } else {
  238. start &= qemu_host_page_mask;
  239. }
  240. start = ROUND_UP(start, align);
  241. size = HOST_PAGE_ALIGN(size);
  242. if (reserved_va) {
  243. return mmap_find_vma_reserved(start, size, align);
  244. }
  245. addr = start;
  246. wrapped = repeat = 0;
  247. prev = 0;
  248. for (;; prev = ptr) {
  249. /*
  250. * Reserve needed memory area to avoid a race.
  251. * It should be discarded using:
  252. * - mmap() with MAP_FIXED flag
  253. * - mremap() with MREMAP_FIXED flag
  254. * - shmat() with SHM_REMAP flag
  255. */
  256. ptr = mmap(g2h(addr), size, PROT_NONE,
  257. MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
  258. /* ENOMEM, if host address space has no memory */
  259. if (ptr == MAP_FAILED) {
  260. return (abi_ulong)-1;
  261. }
  262. /* Count the number of sequential returns of the same address.
  263. This is used to modify the search algorithm below. */
  264. repeat = (ptr == prev ? repeat + 1 : 0);
  265. if (h2g_valid(ptr + size - 1)) {
  266. addr = h2g(ptr);
  267. if ((addr & (align - 1)) == 0) {
  268. /* Success. */
  269. if (start == mmap_next_start && addr >= TASK_UNMAPPED_BASE) {
  270. mmap_next_start = addr + size;
  271. }
  272. return addr;
  273. }
  274. /* The address is not properly aligned for the target. */
  275. switch (repeat) {
  276. case 0:
  277. /* Assume the result that the kernel gave us is the
  278. first with enough free space, so start again at the
  279. next higher target page. */
  280. addr = ROUND_UP(addr, align);
  281. break;
  282. case 1:
  283. /* Sometimes the kernel decides to perform the allocation
  284. at the top end of memory instead. */
  285. addr &= -align;
  286. break;
  287. case 2:
  288. /* Start over at low memory. */
  289. addr = 0;
  290. break;
  291. default:
  292. /* Fail. This unaligned block must the last. */
  293. addr = -1;
  294. break;
  295. }
  296. } else {
  297. /* Since the result the kernel gave didn't fit, start
  298. again at low memory. If any repetition, fail. */
  299. addr = (repeat ? -1 : 0);
  300. }
  301. /* Unmap and try again. */
  302. munmap(ptr, size);
  303. /* ENOMEM if we checked the whole of the target address space. */
  304. if (addr == (abi_ulong)-1) {
  305. return (abi_ulong)-1;
  306. } else if (addr == 0) {
  307. if (wrapped) {
  308. return (abi_ulong)-1;
  309. }
  310. wrapped = 1;
  311. /* Don't actually use 0 when wrapping, instead indicate
  312. that we'd truly like an allocation in low memory. */
  313. addr = (mmap_min_addr > TARGET_PAGE_SIZE
  314. ? TARGET_PAGE_ALIGN(mmap_min_addr)
  315. : TARGET_PAGE_SIZE);
  316. } else if (wrapped && addr >= start) {
  317. return (abi_ulong)-1;
  318. }
  319. }
  320. }
  321. /* NOTE: all the constants are the HOST ones */
  322. abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
  323. int flags, int fd, abi_ulong offset)
  324. {
  325. abi_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len;
  326. mmap_lock();
  327. trace_target_mmap(start, len, prot, flags, fd, offset);
  328. if (!len) {
  329. errno = EINVAL;
  330. goto fail;
  331. }
  332. /* Also check for overflows... */
  333. len = TARGET_PAGE_ALIGN(len);
  334. if (!len) {
  335. errno = ENOMEM;
  336. goto fail;
  337. }
  338. if (offset & ~TARGET_PAGE_MASK) {
  339. errno = EINVAL;
  340. goto fail;
  341. }
  342. real_start = start & qemu_host_page_mask;
  343. host_offset = offset & qemu_host_page_mask;
  344. /* If the user is asking for the kernel to find a location, do that
  345. before we truncate the length for mapping files below. */
  346. if (!(flags & MAP_FIXED)) {
  347. host_len = len + offset - host_offset;
  348. host_len = HOST_PAGE_ALIGN(host_len);
  349. start = mmap_find_vma(real_start, host_len, TARGET_PAGE_SIZE);
  350. if (start == (abi_ulong)-1) {
  351. errno = ENOMEM;
  352. goto fail;
  353. }
  354. }
  355. /* When mapping files into a memory area larger than the file, accesses
  356. to pages beyond the file size will cause a SIGBUS.
  357. For example, if mmaping a file of 100 bytes on a host with 4K pages
  358. emulating a target with 8K pages, the target expects to be able to
  359. access the first 8K. But the host will trap us on any access beyond
  360. 4K.
  361. When emulating a target with a larger page-size than the hosts, we
  362. may need to truncate file maps at EOF and add extra anonymous pages
  363. up to the targets page boundary. */
  364. if ((qemu_real_host_page_size < qemu_host_page_size) &&
  365. !(flags & MAP_ANONYMOUS)) {
  366. struct stat sb;
  367. if (fstat (fd, &sb) == -1)
  368. goto fail;
  369. /* Are we trying to create a map beyond EOF?. */
  370. if (offset + len > sb.st_size) {
  371. /* If so, truncate the file map at eof aligned with
  372. the hosts real pagesize. Additional anonymous maps
  373. will be created beyond EOF. */
  374. len = REAL_HOST_PAGE_ALIGN(sb.st_size - offset);
  375. }
  376. }
  377. if (!(flags & MAP_FIXED)) {
  378. unsigned long host_start;
  379. void *p;
  380. host_len = len + offset - host_offset;
  381. host_len = HOST_PAGE_ALIGN(host_len);
  382. /* Note: we prefer to control the mapping address. It is
  383. especially important if qemu_host_page_size >
  384. qemu_real_host_page_size */
  385. p = mmap(g2h(start), host_len, prot,
  386. flags | MAP_FIXED | MAP_ANONYMOUS, -1, 0);
  387. if (p == MAP_FAILED)
  388. goto fail;
  389. /* update start so that it points to the file position at 'offset' */
  390. host_start = (unsigned long)p;
  391. if (!(flags & MAP_ANONYMOUS)) {
  392. p = mmap(g2h(start), len, prot,
  393. flags | MAP_FIXED, fd, host_offset);
  394. if (p == MAP_FAILED) {
  395. munmap(g2h(start), host_len);
  396. goto fail;
  397. }
  398. host_start += offset - host_offset;
  399. }
  400. start = h2g(host_start);
  401. } else {
  402. if (start & ~TARGET_PAGE_MASK) {
  403. errno = EINVAL;
  404. goto fail;
  405. }
  406. end = start + len;
  407. real_end = HOST_PAGE_ALIGN(end);
  408. /*
  409. * Test if requested memory area fits target address space
  410. * It can fail only on 64-bit host with 32-bit target.
  411. * On any other target/host host mmap() handles this error correctly.
  412. */
  413. if (end < start || !guest_range_valid(start, len)) {
  414. errno = ENOMEM;
  415. goto fail;
  416. }
  417. /* worst case: we cannot map the file because the offset is not
  418. aligned, so we read it */
  419. if (!(flags & MAP_ANONYMOUS) &&
  420. (offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) {
  421. /* msync() won't work here, so we return an error if write is
  422. possible while it is a shared mapping */
  423. if ((flags & MAP_TYPE) == MAP_SHARED &&
  424. (prot & PROT_WRITE)) {
  425. errno = EINVAL;
  426. goto fail;
  427. }
  428. retaddr = target_mmap(start, len, prot | PROT_WRITE,
  429. MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
  430. -1, 0);
  431. if (retaddr == -1)
  432. goto fail;
  433. if (pread(fd, g2h(start), len, offset) == -1)
  434. goto fail;
  435. if (!(prot & PROT_WRITE)) {
  436. ret = target_mprotect(start, len, prot);
  437. assert(ret == 0);
  438. }
  439. goto the_end;
  440. }
  441. /* handle the start of the mapping */
  442. if (start > real_start) {
  443. if (real_end == real_start + qemu_host_page_size) {
  444. /* one single host page */
  445. ret = mmap_frag(real_start, start, end,
  446. prot, flags, fd, offset);
  447. if (ret == -1)
  448. goto fail;
  449. goto the_end1;
  450. }
  451. ret = mmap_frag(real_start, start, real_start + qemu_host_page_size,
  452. prot, flags, fd, offset);
  453. if (ret == -1)
  454. goto fail;
  455. real_start += qemu_host_page_size;
  456. }
  457. /* handle the end of the mapping */
  458. if (end < real_end) {
  459. ret = mmap_frag(real_end - qemu_host_page_size,
  460. real_end - qemu_host_page_size, end,
  461. prot, flags, fd,
  462. offset + real_end - qemu_host_page_size - start);
  463. if (ret == -1)
  464. goto fail;
  465. real_end -= qemu_host_page_size;
  466. }
  467. /* map the middle (easier) */
  468. if (real_start < real_end) {
  469. void *p;
  470. unsigned long offset1;
  471. if (flags & MAP_ANONYMOUS)
  472. offset1 = 0;
  473. else
  474. offset1 = offset + real_start - start;
  475. p = mmap(g2h(real_start), real_end - real_start,
  476. prot, flags, fd, offset1);
  477. if (p == MAP_FAILED)
  478. goto fail;
  479. }
  480. }
  481. the_end1:
  482. page_set_flags(start, start + len, prot | PAGE_VALID);
  483. the_end:
  484. trace_target_mmap_complete(start);
  485. if (qemu_loglevel_mask(CPU_LOG_PAGE)) {
  486. log_page_dump(__func__);
  487. }
  488. tb_invalidate_phys_range(start, start + len);
  489. mmap_unlock();
  490. return start;
  491. fail:
  492. mmap_unlock();
  493. return -1;
  494. }
  495. static void mmap_reserve(abi_ulong start, abi_ulong size)
  496. {
  497. abi_ulong real_start;
  498. abi_ulong real_end;
  499. abi_ulong addr;
  500. abi_ulong end;
  501. int prot;
  502. real_start = start & qemu_host_page_mask;
  503. real_end = HOST_PAGE_ALIGN(start + size);
  504. end = start + size;
  505. if (start > real_start) {
  506. /* handle host page containing start */
  507. prot = 0;
  508. for (addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
  509. prot |= page_get_flags(addr);
  510. }
  511. if (real_end == real_start + qemu_host_page_size) {
  512. for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
  513. prot |= page_get_flags(addr);
  514. }
  515. end = real_end;
  516. }
  517. if (prot != 0)
  518. real_start += qemu_host_page_size;
  519. }
  520. if (end < real_end) {
  521. prot = 0;
  522. for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
  523. prot |= page_get_flags(addr);
  524. }
  525. if (prot != 0)
  526. real_end -= qemu_host_page_size;
  527. }
  528. if (real_start != real_end) {
  529. mmap(g2h(real_start), real_end - real_start, PROT_NONE,
  530. MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE,
  531. -1, 0);
  532. }
  533. }
  534. int target_munmap(abi_ulong start, abi_ulong len)
  535. {
  536. abi_ulong end, real_start, real_end, addr;
  537. int prot, ret;
  538. trace_target_munmap(start, len);
  539. if (start & ~TARGET_PAGE_MASK)
  540. return -TARGET_EINVAL;
  541. len = TARGET_PAGE_ALIGN(len);
  542. if (len == 0 || !guest_range_valid(start, len)) {
  543. return -TARGET_EINVAL;
  544. }
  545. mmap_lock();
  546. end = start + len;
  547. real_start = start & qemu_host_page_mask;
  548. real_end = HOST_PAGE_ALIGN(end);
  549. if (start > real_start) {
  550. /* handle host page containing start */
  551. prot = 0;
  552. for(addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
  553. prot |= page_get_flags(addr);
  554. }
  555. if (real_end == real_start + qemu_host_page_size) {
  556. for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
  557. prot |= page_get_flags(addr);
  558. }
  559. end = real_end;
  560. }
  561. if (prot != 0)
  562. real_start += qemu_host_page_size;
  563. }
  564. if (end < real_end) {
  565. prot = 0;
  566. for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
  567. prot |= page_get_flags(addr);
  568. }
  569. if (prot != 0)
  570. real_end -= qemu_host_page_size;
  571. }
  572. ret = 0;
  573. /* unmap what we can */
  574. if (real_start < real_end) {
  575. if (reserved_va) {
  576. mmap_reserve(real_start, real_end - real_start);
  577. } else {
  578. ret = munmap(g2h(real_start), real_end - real_start);
  579. }
  580. }
  581. if (ret == 0) {
  582. page_set_flags(start, start + len, 0);
  583. tb_invalidate_phys_range(start, start + len);
  584. }
  585. mmap_unlock();
  586. return ret;
  587. }
  588. abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
  589. abi_ulong new_size, unsigned long flags,
  590. abi_ulong new_addr)
  591. {
  592. int prot;
  593. void *host_addr;
  594. if (!guest_range_valid(old_addr, old_size) ||
  595. ((flags & MREMAP_FIXED) &&
  596. !guest_range_valid(new_addr, new_size))) {
  597. errno = ENOMEM;
  598. return -1;
  599. }
  600. mmap_lock();
  601. if (flags & MREMAP_FIXED) {
  602. host_addr = mremap(g2h(old_addr), old_size, new_size,
  603. flags, g2h(new_addr));
  604. if (reserved_va && host_addr != MAP_FAILED) {
  605. /* If new and old addresses overlap then the above mremap will
  606. already have failed with EINVAL. */
  607. mmap_reserve(old_addr, old_size);
  608. }
  609. } else if (flags & MREMAP_MAYMOVE) {
  610. abi_ulong mmap_start;
  611. mmap_start = mmap_find_vma(0, new_size, TARGET_PAGE_SIZE);
  612. if (mmap_start == -1) {
  613. errno = ENOMEM;
  614. host_addr = MAP_FAILED;
  615. } else {
  616. host_addr = mremap(g2h(old_addr), old_size, new_size,
  617. flags | MREMAP_FIXED, g2h(mmap_start));
  618. if (reserved_va) {
  619. mmap_reserve(old_addr, old_size);
  620. }
  621. }
  622. } else {
  623. int prot = 0;
  624. if (reserved_va && old_size < new_size) {
  625. abi_ulong addr;
  626. for (addr = old_addr + old_size;
  627. addr < old_addr + new_size;
  628. addr++) {
  629. prot |= page_get_flags(addr);
  630. }
  631. }
  632. if (prot == 0) {
  633. host_addr = mremap(g2h(old_addr), old_size, new_size, flags);
  634. if (host_addr != MAP_FAILED && reserved_va && old_size > new_size) {
  635. mmap_reserve(old_addr + old_size, old_size - new_size);
  636. }
  637. } else {
  638. errno = ENOMEM;
  639. host_addr = MAP_FAILED;
  640. }
  641. /* Check if address fits target address space */
  642. if ((unsigned long)host_addr + new_size > (abi_ulong)-1) {
  643. /* Revert mremap() changes */
  644. host_addr = mremap(g2h(old_addr), new_size, old_size, flags);
  645. errno = ENOMEM;
  646. host_addr = MAP_FAILED;
  647. }
  648. }
  649. if (host_addr == MAP_FAILED) {
  650. new_addr = -1;
  651. } else {
  652. new_addr = h2g(host_addr);
  653. prot = page_get_flags(old_addr);
  654. page_set_flags(old_addr, old_addr + old_size, 0);
  655. page_set_flags(new_addr, new_addr + new_size, prot | PAGE_VALID);
  656. }
  657. tb_invalidate_phys_range(new_addr, new_addr + new_size);
  658. mmap_unlock();
  659. return new_addr;
  660. }