|
@@ -460,6 +460,7 @@ enum {
|
|
|
static bool init_guest_commpage(void)
|
|
|
{
|
|
|
ARMCPU *cpu = ARM_CPU(thread_cpu);
|
|
|
+ int host_page_size = qemu_real_host_page_size();
|
|
|
abi_ptr commpage;
|
|
|
void *want;
|
|
|
void *addr;
|
|
@@ -472,10 +473,12 @@ static bool init_guest_commpage(void)
|
|
|
return true;
|
|
|
}
|
|
|
|
|
|
- commpage = HI_COMMPAGE & -qemu_host_page_size;
|
|
|
+ commpage = HI_COMMPAGE & -host_page_size;
|
|
|
want = g2h_untagged(commpage);
|
|
|
- addr = mmap(want, qemu_host_page_size, PROT_READ | PROT_WRITE,
|
|
|
- MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
|
|
|
+ addr = mmap(want, host_page_size, PROT_READ | PROT_WRITE,
|
|
|
+ MAP_ANONYMOUS | MAP_PRIVATE |
|
|
|
+ (commpage < reserved_va ? MAP_FIXED : MAP_FIXED_NOREPLACE),
|
|
|
+ -1, 0);
|
|
|
|
|
|
if (addr == MAP_FAILED) {
|
|
|
perror("Allocating guest commpage");
|
|
@@ -488,12 +491,12 @@ static bool init_guest_commpage(void)
|
|
|
/* Set kernel helper versions; rest of page is 0. */
|
|
|
__put_user(5, (uint32_t *)g2h_untagged(0xffff0ffcu));
|
|
|
|
|
|
- if (mprotect(addr, qemu_host_page_size, PROT_READ)) {
|
|
|
+ if (mprotect(addr, host_page_size, PROT_READ)) {
|
|
|
perror("Protecting guest commpage");
|
|
|
exit(EXIT_FAILURE);
|
|
|
}
|
|
|
|
|
|
- page_set_flags(commpage, commpage | ~qemu_host_page_mask,
|
|
|
+ page_set_flags(commpage, commpage | (host_page_size - 1),
|
|
|
PAGE_READ | PAGE_EXEC | PAGE_VALID);
|
|
|
return true;
|
|
|
}
|
|
@@ -1532,10 +1535,14 @@ static bool init_guest_commpage(void)
|
|
|
0x3a, 0x68, 0x3b, 0x00, /* trap 0 */
|
|
|
};
|
|
|
|
|
|
- void *want = g2h_untagged(LO_COMMPAGE & -qemu_host_page_size);
|
|
|
- void *addr = mmap(want, qemu_host_page_size, PROT_READ | PROT_WRITE,
|
|
|
- MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
|
|
|
+ int host_page_size = qemu_real_host_page_size();
|
|
|
+ void *want, *addr;
|
|
|
|
|
|
+ want = g2h_untagged(LO_COMMPAGE & -host_page_size);
|
|
|
+ addr = mmap(want, host_page_size, PROT_READ | PROT_WRITE,
|
|
|
+ MAP_ANONYMOUS | MAP_PRIVATE |
|
|
|
+ (reserved_va ? MAP_FIXED : MAP_FIXED_NOREPLACE),
|
|
|
+ -1, 0);
|
|
|
if (addr == MAP_FAILED) {
|
|
|
perror("Allocating guest commpage");
|
|
|
exit(EXIT_FAILURE);
|
|
@@ -1544,9 +1551,9 @@ static bool init_guest_commpage(void)
|
|
|
return false;
|
|
|
}
|
|
|
|
|
|
- memcpy(addr, kuser_page, sizeof(kuser_page));
|
|
|
+ memcpy(g2h_untagged(LO_COMMPAGE), kuser_page, sizeof(kuser_page));
|
|
|
|
|
|
- if (mprotect(addr, qemu_host_page_size, PROT_READ)) {
|
|
|
+ if (mprotect(addr, host_page_size, PROT_READ)) {
|
|
|
perror("Protecting guest commpage");
|
|
|
exit(EXIT_FAILURE);
|
|
|
}
|
|
@@ -1970,16 +1977,20 @@ static inline void init_thread(struct target_pt_regs *regs,
|
|
|
|
|
|
static bool init_guest_commpage(void)
|
|
|
{
|
|
|
- void *want = g2h_untagged(LO_COMMPAGE);
|
|
|
- void *addr = mmap(want, qemu_host_page_size, PROT_NONE,
|
|
|
- MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
|
|
|
+ /* If reserved_va, then we have already mapped 0 page on the host. */
|
|
|
+ if (!reserved_va) {
|
|
|
+ void *want, *addr;
|
|
|
|
|
|
- if (addr == MAP_FAILED) {
|
|
|
- perror("Allocating guest commpage");
|
|
|
- exit(EXIT_FAILURE);
|
|
|
- }
|
|
|
- if (addr != want) {
|
|
|
- return false;
|
|
|
+ want = g2h_untagged(LO_COMMPAGE);
|
|
|
+ addr = mmap(want, TARGET_PAGE_SIZE, PROT_NONE,
|
|
|
+ MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED_NOREPLACE, -1, 0);
|
|
|
+ if (addr == MAP_FAILED) {
|
|
|
+ perror("Allocating guest commpage");
|
|
|
+ exit(EXIT_FAILURE);
|
|
|
+ }
|
|
|
+ if (addr != want) {
|
|
|
+ return false;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -2679,13 +2690,7 @@ static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc,
|
|
|
NEW_AUX_ENT(AT_PHDR, (abi_ulong)(info->load_addr + exec->e_phoff));
|
|
|
NEW_AUX_ENT(AT_PHENT, (abi_ulong)(sizeof (struct elf_phdr)));
|
|
|
NEW_AUX_ENT(AT_PHNUM, (abi_ulong)(exec->e_phnum));
|
|
|
- if ((info->alignment & ~qemu_host_page_mask) != 0) {
|
|
|
- /* Target doesn't support host page size alignment */
|
|
|
- NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(TARGET_PAGE_SIZE));
|
|
|
- } else {
|
|
|
- NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(MAX(TARGET_PAGE_SIZE,
|
|
|
- qemu_host_page_size)));
|
|
|
- }
|
|
|
+ NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(TARGET_PAGE_SIZE));
|
|
|
NEW_AUX_ENT(AT_BASE, (abi_ulong)(interp_info ? interp_info->load_addr : 0));
|
|
|
NEW_AUX_ENT(AT_FLAGS, (abi_ulong)0);
|
|
|
NEW_AUX_ENT(AT_ENTRY, info->entry);
|
|
@@ -2893,7 +2898,7 @@ static bool pgb_addr_set(PGBAddrs *ga, abi_ulong guest_loaddr,
|
|
|
|
|
|
/* Add any HI_COMMPAGE not covered by reserved_va. */
|
|
|
if (reserved_va < HI_COMMPAGE) {
|
|
|
- ga->bounds[n][0] = HI_COMMPAGE & qemu_host_page_mask;
|
|
|
+ ga->bounds[n][0] = HI_COMMPAGE & qemu_real_host_page_mask();
|
|
|
ga->bounds[n][1] = HI_COMMPAGE + TARGET_PAGE_SIZE - 1;
|
|
|
n++;
|
|
|
}
|
|
@@ -3017,8 +3022,6 @@ static void pgb_dynamic(const char *image_name, uintptr_t guest_loaddr,
|
|
|
uintptr_t brk, ret;
|
|
|
PGBAddrs ga;
|
|
|
|
|
|
- assert(QEMU_IS_ALIGNED(guest_loaddr, align));
|
|
|
-
|
|
|
/* Try the identity map first. */
|
|
|
if (pgb_addr_set(&ga, guest_loaddr, guest_hiaddr, true)) {
|
|
|
brk = (uintptr_t)sbrk(0);
|
|
@@ -3075,7 +3078,7 @@ void probe_guest_base(const char *image_name, abi_ulong guest_loaddr,
|
|
|
abi_ulong guest_hiaddr)
|
|
|
{
|
|
|
/* In order to use host shmat, we must be able to honor SHMLBA. */
|
|
|
- uintptr_t align = MAX(SHMLBA, qemu_host_page_size);
|
|
|
+ uintptr_t align = MAX(SHMLBA, TARGET_PAGE_SIZE);
|
|
|
|
|
|
/* Sanity check the guest binary. */
|
|
|
if (reserved_va) {
|
|
@@ -3912,8 +3915,9 @@ int load_elf_binary(struct linux_binprm *bprm, struct image_info *info)
|
|
|
and some applications "depend" upon this behavior. Since
|
|
|
we do not have the power to recompile these, we emulate
|
|
|
the SVr4 behavior. Sigh. */
|
|
|
- target_mmap(0, qemu_host_page_size, PROT_READ | PROT_EXEC,
|
|
|
- MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
|
|
+ target_mmap(0, TARGET_PAGE_SIZE, PROT_READ | PROT_EXEC,
|
|
|
+ MAP_FIXED_NOREPLACE | MAP_PRIVATE | MAP_ANONYMOUS,
|
|
|
+ -1, 0);
|
|
|
}
|
|
|
#ifdef TARGET_MIPS
|
|
|
info->interp_fp_abi = interp_info.fp_abi;
|
|
@@ -3963,6 +3967,8 @@ int load_elf_binary(struct linux_binprm *bprm, struct image_info *info)
|
|
|
}
|
|
|
|
|
|
#ifdef USE_ELF_CORE_DUMP
|
|
|
+#include "exec/translate-all.h"
|
|
|
+
|
|
|
/*
|
|
|
* Definitions to generate Intel SVR4-like core files.
|
|
|
* These mostly have the same names as the SVR4 types with "target_elf_"
|
|
@@ -4002,18 +4008,6 @@ int load_elf_binary(struct linux_binprm *bprm, struct image_info *info)
|
|
|
* Example for ARM target is provided in this file.
|
|
|
*/
|
|
|
|
|
|
-/* An ELF note in memory */
|
|
|
-struct memelfnote {
|
|
|
- const char *name;
|
|
|
- size_t namesz;
|
|
|
- size_t namesz_rounded;
|
|
|
- int type;
|
|
|
- size_t datasz;
|
|
|
- size_t datasz_rounded;
|
|
|
- void *data;
|
|
|
- size_t notesz;
|
|
|
-};
|
|
|
-
|
|
|
struct target_elf_siginfo {
|
|
|
abi_int si_signo; /* signal number */
|
|
|
abi_int si_code; /* extra code */
|
|
@@ -4053,77 +4047,6 @@ struct target_elf_prpsinfo {
|
|
|
char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
|
|
|
};
|
|
|
|
|
|
-/* Here is the structure in which status of each thread is captured. */
|
|
|
-struct elf_thread_status {
|
|
|
- QTAILQ_ENTRY(elf_thread_status) ets_link;
|
|
|
- struct target_elf_prstatus prstatus; /* NT_PRSTATUS */
|
|
|
-#if 0
|
|
|
- elf_fpregset_t fpu; /* NT_PRFPREG */
|
|
|
- struct task_struct *thread;
|
|
|
- elf_fpxregset_t xfpu; /* ELF_CORE_XFPREG_TYPE */
|
|
|
-#endif
|
|
|
- struct memelfnote notes[1];
|
|
|
- int num_notes;
|
|
|
-};
|
|
|
-
|
|
|
-struct elf_note_info {
|
|
|
- struct memelfnote *notes;
|
|
|
- struct target_elf_prstatus *prstatus; /* NT_PRSTATUS */
|
|
|
- struct target_elf_prpsinfo *psinfo; /* NT_PRPSINFO */
|
|
|
-
|
|
|
- QTAILQ_HEAD(, elf_thread_status) thread_list;
|
|
|
-#if 0
|
|
|
- /*
|
|
|
- * Current version of ELF coredump doesn't support
|
|
|
- * dumping fp regs etc.
|
|
|
- */
|
|
|
- elf_fpregset_t *fpu;
|
|
|
- elf_fpxregset_t *xfpu;
|
|
|
- int thread_status_size;
|
|
|
-#endif
|
|
|
- int notes_size;
|
|
|
- int numnote;
|
|
|
-};
|
|
|
-
|
|
|
-struct vm_area_struct {
|
|
|
- target_ulong vma_start; /* start vaddr of memory region */
|
|
|
- target_ulong vma_end; /* end vaddr of memory region */
|
|
|
- abi_ulong vma_flags; /* protection etc. flags for the region */
|
|
|
- QTAILQ_ENTRY(vm_area_struct) vma_link;
|
|
|
-};
|
|
|
-
|
|
|
-struct mm_struct {
|
|
|
- QTAILQ_HEAD(, vm_area_struct) mm_mmap;
|
|
|
- int mm_count; /* number of mappings */
|
|
|
-};
|
|
|
-
|
|
|
-static struct mm_struct *vma_init(void);
|
|
|
-static void vma_delete(struct mm_struct *);
|
|
|
-static int vma_add_mapping(struct mm_struct *, target_ulong,
|
|
|
- target_ulong, abi_ulong);
|
|
|
-static int vma_get_mapping_count(const struct mm_struct *);
|
|
|
-static struct vm_area_struct *vma_first(const struct mm_struct *);
|
|
|
-static struct vm_area_struct *vma_next(struct vm_area_struct *);
|
|
|
-static abi_ulong vma_dump_size(const struct vm_area_struct *);
|
|
|
-static int vma_walker(void *priv, target_ulong start, target_ulong end,
|
|
|
- unsigned long flags);
|
|
|
-
|
|
|
-static void fill_elf_header(struct elfhdr *, int, uint16_t, uint32_t);
|
|
|
-static void fill_note(struct memelfnote *, const char *, int,
|
|
|
- unsigned int, void *);
|
|
|
-static void fill_prstatus(struct target_elf_prstatus *, const TaskState *, int);
|
|
|
-static int fill_psinfo(struct target_elf_prpsinfo *, const TaskState *);
|
|
|
-static void fill_auxv_note(struct memelfnote *, const TaskState *);
|
|
|
-static void fill_elf_note_phdr(struct elf_phdr *, int, off_t);
|
|
|
-static size_t note_size(const struct memelfnote *);
|
|
|
-static void free_note_info(struct elf_note_info *);
|
|
|
-static int fill_note_info(struct elf_note_info *, long, const CPUArchState *);
|
|
|
-static void fill_thread_info(struct elf_note_info *, const CPUArchState *);
|
|
|
-
|
|
|
-static int dump_write(int, const void *, size_t);
|
|
|
-static int write_note(struct memelfnote *, int);
|
|
|
-static int write_note_info(struct elf_note_info *, int);
|
|
|
-
|
|
|
#ifdef BSWAP_NEEDED
|
|
|
static void bswap_prstatus(struct target_elf_prstatus *prstatus)
|
|
|
{
|
|
@@ -4165,146 +4088,67 @@ static inline void bswap_psinfo(struct target_elf_prpsinfo *p) {}
|
|
|
static inline void bswap_note(struct elf_note *en) { }
|
|
|
#endif /* BSWAP_NEEDED */
|
|
|
|
|
|
-/*
|
|
|
- * Minimal support for linux memory regions. These are needed
|
|
|
- * when we are finding out what memory exactly belongs to
|
|
|
- * emulated process. No locks needed here, as long as
|
|
|
- * thread that received the signal is stopped.
|
|
|
- */
|
|
|
-
|
|
|
-static struct mm_struct *vma_init(void)
|
|
|
-{
|
|
|
- struct mm_struct *mm;
|
|
|
-
|
|
|
- if ((mm = g_malloc(sizeof (*mm))) == NULL)
|
|
|
- return (NULL);
|
|
|
-
|
|
|
- mm->mm_count = 0;
|
|
|
- QTAILQ_INIT(&mm->mm_mmap);
|
|
|
-
|
|
|
- return (mm);
|
|
|
-}
|
|
|
-
|
|
|
-static void vma_delete(struct mm_struct *mm)
|
|
|
-{
|
|
|
- struct vm_area_struct *vma;
|
|
|
-
|
|
|
- while ((vma = vma_first(mm)) != NULL) {
|
|
|
- QTAILQ_REMOVE(&mm->mm_mmap, vma, vma_link);
|
|
|
- g_free(vma);
|
|
|
- }
|
|
|
- g_free(mm);
|
|
|
-}
|
|
|
-
|
|
|
-static int vma_add_mapping(struct mm_struct *mm, target_ulong start,
|
|
|
- target_ulong end, abi_ulong flags)
|
|
|
-{
|
|
|
- struct vm_area_struct *vma;
|
|
|
-
|
|
|
- if ((vma = g_malloc0(sizeof (*vma))) == NULL)
|
|
|
- return (-1);
|
|
|
-
|
|
|
- vma->vma_start = start;
|
|
|
- vma->vma_end = end;
|
|
|
- vma->vma_flags = flags;
|
|
|
-
|
|
|
- QTAILQ_INSERT_TAIL(&mm->mm_mmap, vma, vma_link);
|
|
|
- mm->mm_count++;
|
|
|
-
|
|
|
- return (0);
|
|
|
-}
|
|
|
-
|
|
|
-static struct vm_area_struct *vma_first(const struct mm_struct *mm)
|
|
|
-{
|
|
|
- return (QTAILQ_FIRST(&mm->mm_mmap));
|
|
|
-}
|
|
|
-
|
|
|
-static struct vm_area_struct *vma_next(struct vm_area_struct *vma)
|
|
|
-{
|
|
|
- return (QTAILQ_NEXT(vma, vma_link));
|
|
|
-}
|
|
|
-
|
|
|
-static int vma_get_mapping_count(const struct mm_struct *mm)
|
|
|
-{
|
|
|
- return (mm->mm_count);
|
|
|
-}
|
|
|
-
|
|
|
/*
|
|
|
* Calculate file (dump) size of given memory region.
|
|
|
*/
|
|
|
-static abi_ulong vma_dump_size(const struct vm_area_struct *vma)
|
|
|
+static size_t vma_dump_size(target_ulong start, target_ulong end,
|
|
|
+ unsigned long flags)
|
|
|
{
|
|
|
- /* if we cannot even read the first page, skip it */
|
|
|
- if (!access_ok_untagged(VERIFY_READ, vma->vma_start, TARGET_PAGE_SIZE))
|
|
|
- return (0);
|
|
|
+ /* The area must be readable. */
|
|
|
+ if (!(flags & PAGE_READ)) {
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
|
|
|
/*
|
|
|
* Usually we don't dump executable pages as they contain
|
|
|
* non-writable code that debugger can read directly from
|
|
|
- * target library etc. However, thread stacks are marked
|
|
|
- * also executable so we read in first page of given region
|
|
|
- * and check whether it contains elf header. If there is
|
|
|
- * no elf header, we dump it.
|
|
|
+ * target library etc. If there is no elf header, we dump it.
|
|
|
*/
|
|
|
- if (vma->vma_flags & PROT_EXEC) {
|
|
|
- char page[TARGET_PAGE_SIZE];
|
|
|
-
|
|
|
- if (copy_from_user(page, vma->vma_start, sizeof (page))) {
|
|
|
- return 0;
|
|
|
- }
|
|
|
- if ((page[EI_MAG0] == ELFMAG0) &&
|
|
|
- (page[EI_MAG1] == ELFMAG1) &&
|
|
|
- (page[EI_MAG2] == ELFMAG2) &&
|
|
|
- (page[EI_MAG3] == ELFMAG3)) {
|
|
|
- /*
|
|
|
- * Mappings are possibly from ELF binary. Don't dump
|
|
|
- * them.
|
|
|
- */
|
|
|
- return (0);
|
|
|
- }
|
|
|
+ if (!(flags & PAGE_WRITE_ORG) &&
|
|
|
+ (flags & PAGE_EXEC) &&
|
|
|
+ memcmp(g2h_untagged(start), ELFMAG, SELFMAG) == 0) {
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
- return (vma->vma_end - vma->vma_start);
|
|
|
+ return end - start;
|
|
|
}
|
|
|
|
|
|
-static int vma_walker(void *priv, target_ulong start, target_ulong end,
|
|
|
- unsigned long flags)
|
|
|
+static size_t size_note(const char *name, size_t datasz)
|
|
|
{
|
|
|
- struct mm_struct *mm = (struct mm_struct *)priv;
|
|
|
+ size_t namesz = strlen(name) + 1;
|
|
|
|
|
|
- vma_add_mapping(mm, start, end, flags);
|
|
|
- return (0);
|
|
|
+ namesz = ROUND_UP(namesz, 4);
|
|
|
+ datasz = ROUND_UP(datasz, 4);
|
|
|
+
|
|
|
+ return sizeof(struct elf_note) + namesz + datasz;
|
|
|
}
|
|
|
|
|
|
-static void fill_note(struct memelfnote *note, const char *name, int type,
|
|
|
- unsigned int sz, void *data)
|
|
|
+static void *fill_note(void **pptr, int type, const char *name, size_t datasz)
|
|
|
{
|
|
|
- unsigned int namesz;
|
|
|
+ void *ptr = *pptr;
|
|
|
+ struct elf_note *n = ptr;
|
|
|
+ size_t namesz = strlen(name) + 1;
|
|
|
|
|
|
- namesz = strlen(name) + 1;
|
|
|
- note->name = name;
|
|
|
- note->namesz = namesz;
|
|
|
- note->namesz_rounded = roundup(namesz, sizeof (int32_t));
|
|
|
- note->type = type;
|
|
|
- note->datasz = sz;
|
|
|
- note->datasz_rounded = roundup(sz, sizeof (int32_t));
|
|
|
+ n->n_namesz = namesz;
|
|
|
+ n->n_descsz = datasz;
|
|
|
+ n->n_type = type;
|
|
|
+ bswap_note(n);
|
|
|
|
|
|
- note->data = data;
|
|
|
+ ptr += sizeof(*n);
|
|
|
+ memcpy(ptr, name, namesz);
|
|
|
|
|
|
- /*
|
|
|
- * We calculate rounded up note size here as specified by
|
|
|
- * ELF document.
|
|
|
- */
|
|
|
- note->notesz = sizeof (struct elf_note) +
|
|
|
- note->namesz_rounded + note->datasz_rounded;
|
|
|
+ namesz = ROUND_UP(namesz, 4);
|
|
|
+ datasz = ROUND_UP(datasz, 4);
|
|
|
+
|
|
|
+ *pptr = ptr + namesz + datasz;
|
|
|
+ return ptr + namesz;
|
|
|
}
|
|
|
|
|
|
static void fill_elf_header(struct elfhdr *elf, int segs, uint16_t machine,
|
|
|
uint32_t flags)
|
|
|
{
|
|
|
- (void) memset(elf, 0, sizeof(*elf));
|
|
|
+ memcpy(elf->e_ident, ELFMAG, SELFMAG);
|
|
|
|
|
|
- (void) memcpy(elf->e_ident, ELFMAG, SELFMAG);
|
|
|
elf->e_ident[EI_CLASS] = ELF_CLASS;
|
|
|
elf->e_ident[EI_DATA] = ELF_DATA;
|
|
|
elf->e_ident[EI_VERSION] = EV_CURRENT;
|
|
@@ -4322,95 +4166,79 @@ static void fill_elf_header(struct elfhdr *elf, int segs, uint16_t machine,
|
|
|
bswap_ehdr(elf);
|
|
|
}
|
|
|
|
|
|
-static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, off_t offset)
|
|
|
+static void fill_elf_note_phdr(struct elf_phdr *phdr, size_t sz, off_t offset)
|
|
|
{
|
|
|
phdr->p_type = PT_NOTE;
|
|
|
phdr->p_offset = offset;
|
|
|
- phdr->p_vaddr = 0;
|
|
|
- phdr->p_paddr = 0;
|
|
|
phdr->p_filesz = sz;
|
|
|
- phdr->p_memsz = 0;
|
|
|
- phdr->p_flags = 0;
|
|
|
- phdr->p_align = 0;
|
|
|
|
|
|
bswap_phdr(phdr, 1);
|
|
|
}
|
|
|
|
|
|
-static size_t note_size(const struct memelfnote *note)
|
|
|
+static void fill_prstatus_note(void *data, const TaskState *ts,
|
|
|
+ CPUState *cpu, int signr)
|
|
|
{
|
|
|
- return (note->notesz);
|
|
|
-}
|
|
|
+ /*
|
|
|
+ * Because note memory is only aligned to 4, and target_elf_prstatus
|
|
|
+ * may well have higher alignment requirements, fill locally and
|
|
|
+ * memcpy to the destination afterward.
|
|
|
+ */
|
|
|
+ struct target_elf_prstatus prstatus = {
|
|
|
+ .pr_info.si_signo = signr,
|
|
|
+ .pr_cursig = signr,
|
|
|
+ .pr_pid = ts->ts_tid,
|
|
|
+ .pr_ppid = getppid(),
|
|
|
+ .pr_pgrp = getpgrp(),
|
|
|
+ .pr_sid = getsid(0),
|
|
|
+ };
|
|
|
|
|
|
-static void fill_prstatus(struct target_elf_prstatus *prstatus,
|
|
|
- const TaskState *ts, int signr)
|
|
|
-{
|
|
|
- (void) memset(prstatus, 0, sizeof (*prstatus));
|
|
|
- prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
|
|
|
- prstatus->pr_pid = ts->ts_tid;
|
|
|
- prstatus->pr_ppid = getppid();
|
|
|
- prstatus->pr_pgrp = getpgrp();
|
|
|
- prstatus->pr_sid = getsid(0);
|
|
|
-
|
|
|
- bswap_prstatus(prstatus);
|
|
|
+ elf_core_copy_regs(&prstatus.pr_reg, cpu_env(cpu));
|
|
|
+ bswap_prstatus(&prstatus);
|
|
|
+ memcpy(data, &prstatus, sizeof(prstatus));
|
|
|
}
|
|
|
|
|
|
-static int fill_psinfo(struct target_elf_prpsinfo *psinfo, const TaskState *ts)
|
|
|
+static void fill_prpsinfo_note(void *data, const TaskState *ts)
|
|
|
{
|
|
|
+ /*
|
|
|
+ * Because note memory is only aligned to 4, and target_elf_prpsinfo
|
|
|
+ * may well have higher alignment requirements, fill locally and
|
|
|
+ * memcpy to the destination afterward.
|
|
|
+ */
|
|
|
+ struct target_elf_prpsinfo psinfo;
|
|
|
char *base_filename;
|
|
|
- unsigned int i, len;
|
|
|
-
|
|
|
- (void) memset(psinfo, 0, sizeof (*psinfo));
|
|
|
+ size_t len;
|
|
|
|
|
|
len = ts->info->env_strings - ts->info->arg_strings;
|
|
|
- if (len >= ELF_PRARGSZ)
|
|
|
- len = ELF_PRARGSZ - 1;
|
|
|
- if (copy_from_user(&psinfo->pr_psargs, ts->info->arg_strings, len)) {
|
|
|
- return -EFAULT;
|
|
|
- }
|
|
|
- for (i = 0; i < len; i++)
|
|
|
- if (psinfo->pr_psargs[i] == 0)
|
|
|
- psinfo->pr_psargs[i] = ' ';
|
|
|
- psinfo->pr_psargs[len] = 0;
|
|
|
-
|
|
|
- psinfo->pr_pid = getpid();
|
|
|
- psinfo->pr_ppid = getppid();
|
|
|
- psinfo->pr_pgrp = getpgrp();
|
|
|
- psinfo->pr_sid = getsid(0);
|
|
|
- psinfo->pr_uid = getuid();
|
|
|
- psinfo->pr_gid = getgid();
|
|
|
+ len = MIN(len, ELF_PRARGSZ);
|
|
|
+ memcpy(&psinfo.pr_psargs, g2h_untagged(ts->info->arg_strings), len);
|
|
|
+ for (size_t i = 0; i < len; i++) {
|
|
|
+ if (psinfo.pr_psargs[i] == 0) {
|
|
|
+ psinfo.pr_psargs[i] = ' ';
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ psinfo.pr_pid = getpid();
|
|
|
+ psinfo.pr_ppid = getppid();
|
|
|
+ psinfo.pr_pgrp = getpgrp();
|
|
|
+ psinfo.pr_sid = getsid(0);
|
|
|
+ psinfo.pr_uid = getuid();
|
|
|
+ psinfo.pr_gid = getgid();
|
|
|
|
|
|
base_filename = g_path_get_basename(ts->bprm->filename);
|
|
|
/*
|
|
|
* Using strncpy here is fine: at max-length,
|
|
|
* this field is not NUL-terminated.
|
|
|
*/
|
|
|
- (void) strncpy(psinfo->pr_fname, base_filename,
|
|
|
- sizeof(psinfo->pr_fname));
|
|
|
-
|
|
|
+ strncpy(psinfo.pr_fname, base_filename, sizeof(psinfo.pr_fname));
|
|
|
g_free(base_filename);
|
|
|
- bswap_psinfo(psinfo);
|
|
|
- return (0);
|
|
|
+
|
|
|
+ bswap_psinfo(&psinfo);
|
|
|
+ memcpy(data, &psinfo, sizeof(psinfo));
|
|
|
}
|
|
|
|
|
|
-static void fill_auxv_note(struct memelfnote *note, const TaskState *ts)
|
|
|
+static void fill_auxv_note(void *data, const TaskState *ts)
|
|
|
{
|
|
|
- elf_addr_t auxv = (elf_addr_t)ts->info->saved_auxv;
|
|
|
- elf_addr_t orig_auxv = auxv;
|
|
|
- void *ptr;
|
|
|
- int len = ts->info->auxv_len;
|
|
|
-
|
|
|
- /*
|
|
|
- * Auxiliary vector is stored in target process stack. It contains
|
|
|
- * {type, value} pairs that we need to dump into note. This is not
|
|
|
- * strictly necessary but we do it here for sake of completeness.
|
|
|
- */
|
|
|
-
|
|
|
- /* read in whole auxv vector and copy it to memelfnote */
|
|
|
- ptr = lock_user(VERIFY_READ, orig_auxv, len, 0);
|
|
|
- if (ptr != NULL) {
|
|
|
- fill_note(note, "CORE", NT_AUXV, len, ptr);
|
|
|
- unlock_user(ptr, auxv, len);
|
|
|
- }
|
|
|
+ memcpy(data, g2h_untagged(ts->info->saved_auxv), ts->info->auxv_len);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -4434,27 +4262,9 @@ static int dump_write(int fd, const void *ptr, size_t size)
|
|
|
{
|
|
|
const char *bufp = (const char *)ptr;
|
|
|
ssize_t bytes_written, bytes_left;
|
|
|
- struct rlimit dumpsize;
|
|
|
- off_t pos;
|
|
|
|
|
|
bytes_written = 0;
|
|
|
- getrlimit(RLIMIT_CORE, &dumpsize);
|
|
|
- if ((pos = lseek(fd, 0, SEEK_CUR))==-1) {
|
|
|
- if (errno == ESPIPE) { /* not a seekable stream */
|
|
|
- bytes_left = size;
|
|
|
- } else {
|
|
|
- return pos;
|
|
|
- }
|
|
|
- } else {
|
|
|
- if (dumpsize.rlim_cur <= pos) {
|
|
|
- return -1;
|
|
|
- } else if (dumpsize.rlim_cur == RLIM_INFINITY) {
|
|
|
- bytes_left = size;
|
|
|
- } else {
|
|
|
- size_t limit_left=dumpsize.rlim_cur - pos;
|
|
|
- bytes_left = limit_left >= size ? size : limit_left ;
|
|
|
- }
|
|
|
- }
|
|
|
+ bytes_left = size;
|
|
|
|
|
|
/*
|
|
|
* In normal conditions, single write(2) should do but
|
|
@@ -4476,135 +4286,76 @@ static int dump_write(int fd, const void *ptr, size_t size)
|
|
|
return (0);
|
|
|
}
|
|
|
|
|
|
-static int write_note(struct memelfnote *men, int fd)
|
|
|
+static int wmr_page_unprotect_regions(void *opaque, target_ulong start,
|
|
|
+ target_ulong end, unsigned long flags)
|
|
|
{
|
|
|
- struct elf_note en;
|
|
|
-
|
|
|
- en.n_namesz = men->namesz;
|
|
|
- en.n_type = men->type;
|
|
|
- en.n_descsz = men->datasz;
|
|
|
+ if ((flags & (PAGE_WRITE | PAGE_WRITE_ORG)) == PAGE_WRITE_ORG) {
|
|
|
+ size_t step = MAX(TARGET_PAGE_SIZE, qemu_real_host_page_size());
|
|
|
|
|
|
- bswap_note(&en);
|
|
|
-
|
|
|
- if (dump_write(fd, &en, sizeof(en)) != 0)
|
|
|
- return (-1);
|
|
|
- if (dump_write(fd, men->name, men->namesz_rounded) != 0)
|
|
|
- return (-1);
|
|
|
- if (dump_write(fd, men->data, men->datasz_rounded) != 0)
|
|
|
- return (-1);
|
|
|
-
|
|
|
- return (0);
|
|
|
-}
|
|
|
-
|
|
|
-static void fill_thread_info(struct elf_note_info *info, const CPUArchState *env)
|
|
|
-{
|
|
|
- CPUState *cpu = env_cpu((CPUArchState *)env);
|
|
|
- TaskState *ts = (TaskState *)cpu->opaque;
|
|
|
- struct elf_thread_status *ets;
|
|
|
-
|
|
|
- ets = g_malloc0(sizeof (*ets));
|
|
|
- ets->num_notes = 1; /* only prstatus is dumped */
|
|
|
- fill_prstatus(&ets->prstatus, ts, 0);
|
|
|
- elf_core_copy_regs(&ets->prstatus.pr_reg, env);
|
|
|
- fill_note(&ets->notes[0], "CORE", NT_PRSTATUS, sizeof (ets->prstatus),
|
|
|
- &ets->prstatus);
|
|
|
-
|
|
|
- QTAILQ_INSERT_TAIL(&info->thread_list, ets, ets_link);
|
|
|
-
|
|
|
- info->notes_size += note_size(&ets->notes[0]);
|
|
|
+ while (1) {
|
|
|
+ page_unprotect(start, 0);
|
|
|
+ if (end - start <= step) {
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ start += step;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
-static void init_note_info(struct elf_note_info *info)
|
|
|
-{
|
|
|
- /* Initialize the elf_note_info structure so that it is at
|
|
|
- * least safe to call free_note_info() on it. Must be
|
|
|
- * called before calling fill_note_info().
|
|
|
- */
|
|
|
- memset(info, 0, sizeof (*info));
|
|
|
- QTAILQ_INIT(&info->thread_list);
|
|
|
-}
|
|
|
+typedef struct {
|
|
|
+ unsigned count;
|
|
|
+ size_t size;
|
|
|
+} CountAndSizeRegions;
|
|
|
|
|
|
-static int fill_note_info(struct elf_note_info *info,
|
|
|
- long signr, const CPUArchState *env)
|
|
|
+static int wmr_count_and_size_regions(void *opaque, target_ulong start,
|
|
|
+ target_ulong end, unsigned long flags)
|
|
|
{
|
|
|
-#define NUMNOTES 3
|
|
|
- CPUState *cpu = env_cpu((CPUArchState *)env);
|
|
|
- TaskState *ts = (TaskState *)cpu->opaque;
|
|
|
- int i;
|
|
|
-
|
|
|
- info->notes = g_new0(struct memelfnote, NUMNOTES);
|
|
|
- if (info->notes == NULL)
|
|
|
- return (-ENOMEM);
|
|
|
- info->prstatus = g_malloc0(sizeof (*info->prstatus));
|
|
|
- if (info->prstatus == NULL)
|
|
|
- return (-ENOMEM);
|
|
|
- info->psinfo = g_malloc0(sizeof (*info->psinfo));
|
|
|
- if (info->prstatus == NULL)
|
|
|
- return (-ENOMEM);
|
|
|
-
|
|
|
- /*
|
|
|
- * First fill in status (and registers) of current thread
|
|
|
- * including process info & aux vector.
|
|
|
- */
|
|
|
- fill_prstatus(info->prstatus, ts, signr);
|
|
|
- elf_core_copy_regs(&info->prstatus->pr_reg, env);
|
|
|
- fill_note(&info->notes[0], "CORE", NT_PRSTATUS,
|
|
|
- sizeof (*info->prstatus), info->prstatus);
|
|
|
- fill_psinfo(info->psinfo, ts);
|
|
|
- fill_note(&info->notes[1], "CORE", NT_PRPSINFO,
|
|
|
- sizeof (*info->psinfo), info->psinfo);
|
|
|
- fill_auxv_note(&info->notes[2], ts);
|
|
|
- info->numnote = 3;
|
|
|
-
|
|
|
- info->notes_size = 0;
|
|
|
- for (i = 0; i < info->numnote; i++)
|
|
|
- info->notes_size += note_size(&info->notes[i]);
|
|
|
-
|
|
|
- /* read and fill status of all threads */
|
|
|
- WITH_QEMU_LOCK_GUARD(&qemu_cpu_list_lock) {
|
|
|
- CPU_FOREACH(cpu) {
|
|
|
- if (cpu == thread_cpu) {
|
|
|
- continue;
|
|
|
- }
|
|
|
- fill_thread_info(info, cpu_env(cpu));
|
|
|
- }
|
|
|
- }
|
|
|
+ CountAndSizeRegions *css = opaque;
|
|
|
|
|
|
- return (0);
|
|
|
+ css->count++;
|
|
|
+ css->size += vma_dump_size(start, end, flags);
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
-static void free_note_info(struct elf_note_info *info)
|
|
|
+typedef struct {
|
|
|
+ struct elf_phdr *phdr;
|
|
|
+ off_t offset;
|
|
|
+} FillRegionPhdr;
|
|
|
+
|
|
|
+static int wmr_fill_region_phdr(void *opaque, target_ulong start,
|
|
|
+ target_ulong end, unsigned long flags)
|
|
|
{
|
|
|
- struct elf_thread_status *ets;
|
|
|
+ FillRegionPhdr *d = opaque;
|
|
|
+ struct elf_phdr *phdr = d->phdr;
|
|
|
|
|
|
- while (!QTAILQ_EMPTY(&info->thread_list)) {
|
|
|
- ets = QTAILQ_FIRST(&info->thread_list);
|
|
|
- QTAILQ_REMOVE(&info->thread_list, ets, ets_link);
|
|
|
- g_free(ets);
|
|
|
- }
|
|
|
+ phdr->p_type = PT_LOAD;
|
|
|
+ phdr->p_vaddr = start;
|
|
|
+ phdr->p_paddr = 0;
|
|
|
+ phdr->p_filesz = vma_dump_size(start, end, flags);
|
|
|
+ phdr->p_offset = d->offset;
|
|
|
+ d->offset += phdr->p_filesz;
|
|
|
+ phdr->p_memsz = end - start;
|
|
|
+ phdr->p_flags = (flags & PAGE_READ ? PF_R : 0)
|
|
|
+ | (flags & PAGE_WRITE_ORG ? PF_W : 0)
|
|
|
+ | (flags & PAGE_EXEC ? PF_X : 0);
|
|
|
+ phdr->p_align = ELF_EXEC_PAGESIZE;
|
|
|
|
|
|
- g_free(info->prstatus);
|
|
|
- g_free(info->psinfo);
|
|
|
- g_free(info->notes);
|
|
|
+ bswap_phdr(phdr, 1);
|
|
|
+ d->phdr = phdr + 1;
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
-static int write_note_info(struct elf_note_info *info, int fd)
|
|
|
+static int wmr_write_region(void *opaque, target_ulong start,
|
|
|
+ target_ulong end, unsigned long flags)
|
|
|
{
|
|
|
- struct elf_thread_status *ets;
|
|
|
- int i, error = 0;
|
|
|
+ int fd = *(int *)opaque;
|
|
|
+ size_t size = vma_dump_size(start, end, flags);
|
|
|
|
|
|
- /* write prstatus, psinfo and auxv for current thread */
|
|
|
- for (i = 0; i < info->numnote; i++)
|
|
|
- if ((error = write_note(&info->notes[i], fd)) != 0)
|
|
|
- return (error);
|
|
|
-
|
|
|
- /* write prstatus for each thread */
|
|
|
- QTAILQ_FOREACH(ets, &info->thread_list, ets_link) {
|
|
|
- if ((error = write_note(&ets->notes[0], fd)) != 0)
|
|
|
- return (error);
|
|
|
+ if (!size) {
|
|
|
+ return 0;
|
|
|
}
|
|
|
-
|
|
|
- return (0);
|
|
|
+ return dump_write(fd, g2h_untagged(start), size);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -4654,151 +4405,125 @@ static int elf_core_dump(int signr, const CPUArchState *env)
|
|
|
{
|
|
|
const CPUState *cpu = env_cpu((CPUArchState *)env);
|
|
|
const TaskState *ts = (const TaskState *)cpu->opaque;
|
|
|
- struct vm_area_struct *vma = NULL;
|
|
|
- g_autofree char *corefile = NULL;
|
|
|
- struct elf_note_info info;
|
|
|
- struct elfhdr elf;
|
|
|
- struct elf_phdr phdr;
|
|
|
struct rlimit dumpsize;
|
|
|
- struct mm_struct *mm = NULL;
|
|
|
- off_t offset = 0, data_offset = 0;
|
|
|
- int segs = 0;
|
|
|
+ CountAndSizeRegions css;
|
|
|
+ off_t offset, note_offset, data_offset;
|
|
|
+ size_t note_size;
|
|
|
+ int cpus, ret;
|
|
|
int fd = -1;
|
|
|
-
|
|
|
- init_note_info(&info);
|
|
|
-
|
|
|
- errno = 0;
|
|
|
+ CPUState *cpu_iter;
|
|
|
|
|
|
if (prctl(PR_GET_DUMPABLE) == 0) {
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
- if (getrlimit(RLIMIT_CORE, &dumpsize) == 0 && dumpsize.rlim_cur == 0) {
|
|
|
+ if (getrlimit(RLIMIT_CORE, &dumpsize) < 0 || dumpsize.rlim_cur == 0) {
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
- corefile = core_dump_filename(ts);
|
|
|
+ cpu_list_lock();
|
|
|
+ mmap_lock();
|
|
|
|
|
|
- if ((fd = open(corefile, O_WRONLY | O_CREAT,
|
|
|
- S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH)) < 0)
|
|
|
- return (-errno);
|
|
|
+ /* By unprotecting, we merge vmas that might be split. */
|
|
|
+ walk_memory_regions(NULL, wmr_page_unprotect_regions);
|
|
|
|
|
|
/*
|
|
|
* Walk through target process memory mappings and
|
|
|
- * set up structure containing this information. After
|
|
|
- * this point vma_xxx functions can be used.
|
|
|
+ * set up structure containing this information.
|
|
|
*/
|
|
|
- if ((mm = vma_init()) == NULL)
|
|
|
- goto out;
|
|
|
+ memset(&css, 0, sizeof(css));
|
|
|
+ walk_memory_regions(&css, wmr_count_and_size_regions);
|
|
|
|
|
|
- walk_memory_regions(mm, vma_walker);
|
|
|
- segs = vma_get_mapping_count(mm);
|
|
|
-
|
|
|
- /*
|
|
|
- * Construct valid coredump ELF header. We also
|
|
|
- * add one more segment for notes.
|
|
|
- */
|
|
|
- fill_elf_header(&elf, segs + 1, ELF_MACHINE, 0);
|
|
|
- if (dump_write(fd, &elf, sizeof (elf)) != 0)
|
|
|
- goto out;
|
|
|
+ cpus = 0;
|
|
|
+ CPU_FOREACH(cpu_iter) {
|
|
|
+ cpus++;
|
|
|
+ }
|
|
|
|
|
|
- /* fill in the in-memory version of notes */
|
|
|
- if (fill_note_info(&info, signr, env) < 0)
|
|
|
- goto out;
|
|
|
+ offset = sizeof(struct elfhdr);
|
|
|
+ offset += (css.count + 1) * sizeof(struct elf_phdr);
|
|
|
+ note_offset = offset;
|
|
|
|
|
|
- offset += sizeof (elf); /* elf header */
|
|
|
- offset += (segs + 1) * sizeof (struct elf_phdr); /* program headers */
|
|
|
+ offset += size_note("CORE", ts->info->auxv_len);
|
|
|
+ offset += size_note("CORE", sizeof(struct target_elf_prpsinfo));
|
|
|
+ offset += size_note("CORE", sizeof(struct target_elf_prstatus)) * cpus;
|
|
|
+ note_size = offset - note_offset;
|
|
|
+ data_offset = ROUND_UP(offset, ELF_EXEC_PAGESIZE);
|
|
|
|
|
|
- /* write out notes program header */
|
|
|
- fill_elf_note_phdr(&phdr, info.notes_size, offset);
|
|
|
+ /* Do not dump if the corefile size exceeds the limit. */
|
|
|
+ if (dumpsize.rlim_cur != RLIM_INFINITY
|
|
|
+ && dumpsize.rlim_cur < data_offset + css.size) {
|
|
|
+ errno = 0;
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
|
|
|
- offset += info.notes_size;
|
|
|
- if (dump_write(fd, &phdr, sizeof (phdr)) != 0)
|
|
|
+ {
|
|
|
+ g_autofree char *corefile = core_dump_filename(ts);
|
|
|
+ fd = open(corefile, O_WRONLY | O_CREAT | O_TRUNC,
|
|
|
+ S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
|
|
|
+ }
|
|
|
+ if (fd < 0) {
|
|
|
goto out;
|
|
|
+ }
|
|
|
|
|
|
/*
|
|
|
- * ELF specification wants data to start at page boundary so
|
|
|
- * we align it here.
|
|
|
+ * There is a fair amount of alignment padding within the notes
|
|
|
+ * as well as preceeding the process memory. Allocate a zeroed
|
|
|
+ * block to hold it all. Write all of the headers directly into
|
|
|
+ * this buffer and then write it out as a block.
|
|
|
*/
|
|
|
- data_offset = offset = roundup(offset, ELF_EXEC_PAGESIZE);
|
|
|
+ {
|
|
|
+ g_autofree void *header = g_malloc0(data_offset);
|
|
|
+ FillRegionPhdr frp;
|
|
|
+ void *hptr, *dptr;
|
|
|
+
|
|
|
+ /* Create elf file header. */
|
|
|
+ hptr = header;
|
|
|
+ fill_elf_header(hptr, css.count + 1, ELF_MACHINE, 0);
|
|
|
+ hptr += sizeof(struct elfhdr);
|
|
|
+
|
|
|
+ /* Create elf program headers. */
|
|
|
+ fill_elf_note_phdr(hptr, note_size, note_offset);
|
|
|
+ hptr += sizeof(struct elf_phdr);
|
|
|
+
|
|
|
+ frp.phdr = hptr;
|
|
|
+ frp.offset = data_offset;
|
|
|
+ walk_memory_regions(&frp, wmr_fill_region_phdr);
|
|
|
+ hptr = frp.phdr;
|
|
|
+
|
|
|
+ /* Create the notes. */
|
|
|
+ dptr = fill_note(&hptr, NT_AUXV, "CORE", ts->info->auxv_len);
|
|
|
+ fill_auxv_note(dptr, ts);
|
|
|
+
|
|
|
+ dptr = fill_note(&hptr, NT_PRPSINFO, "CORE",
|
|
|
+ sizeof(struct target_elf_prpsinfo));
|
|
|
+ fill_prpsinfo_note(dptr, ts);
|
|
|
+
|
|
|
+ CPU_FOREACH(cpu_iter) {
|
|
|
+ dptr = fill_note(&hptr, NT_PRSTATUS, "CORE",
|
|
|
+ sizeof(struct target_elf_prstatus));
|
|
|
+ fill_prstatus_note(dptr, ts, cpu_iter,
|
|
|
+ cpu_iter == cpu ? signr : 0);
|
|
|
+ }
|
|
|
|
|
|
- /*
|
|
|
- * Write program headers for memory regions mapped in
|
|
|
- * the target process.
|
|
|
- */
|
|
|
- for (vma = vma_first(mm); vma != NULL; vma = vma_next(vma)) {
|
|
|
- (void) memset(&phdr, 0, sizeof (phdr));
|
|
|
-
|
|
|
- phdr.p_type = PT_LOAD;
|
|
|
- phdr.p_offset = offset;
|
|
|
- phdr.p_vaddr = vma->vma_start;
|
|
|
- phdr.p_paddr = 0;
|
|
|
- phdr.p_filesz = vma_dump_size(vma);
|
|
|
- offset += phdr.p_filesz;
|
|
|
- phdr.p_memsz = vma->vma_end - vma->vma_start;
|
|
|
- phdr.p_flags = vma->vma_flags & PROT_READ ? PF_R : 0;
|
|
|
- if (vma->vma_flags & PROT_WRITE)
|
|
|
- phdr.p_flags |= PF_W;
|
|
|
- if (vma->vma_flags & PROT_EXEC)
|
|
|
- phdr.p_flags |= PF_X;
|
|
|
- phdr.p_align = ELF_EXEC_PAGESIZE;
|
|
|
-
|
|
|
- bswap_phdr(&phdr, 1);
|
|
|
- if (dump_write(fd, &phdr, sizeof(phdr)) != 0) {
|
|
|
+ if (dump_write(fd, header, data_offset) < 0) {
|
|
|
goto out;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
- * Next we write notes just after program headers. No
|
|
|
- * alignment needed here.
|
|
|
+ * Finally write process memory into the corefile as well.
|
|
|
*/
|
|
|
- if (write_note_info(&info, fd) < 0)
|
|
|
+ if (walk_memory_regions(&fd, wmr_write_region) < 0) {
|
|
|
goto out;
|
|
|
-
|
|
|
- /* align data to page boundary */
|
|
|
- if (lseek(fd, data_offset, SEEK_SET) != data_offset)
|
|
|
- goto out;
|
|
|
-
|
|
|
- /*
|
|
|
- * Finally we can dump process memory into corefile as well.
|
|
|
- */
|
|
|
- for (vma = vma_first(mm); vma != NULL; vma = vma_next(vma)) {
|
|
|
- abi_ulong addr;
|
|
|
- abi_ulong end;
|
|
|
-
|
|
|
- end = vma->vma_start + vma_dump_size(vma);
|
|
|
-
|
|
|
- for (addr = vma->vma_start; addr < end;
|
|
|
- addr += TARGET_PAGE_SIZE) {
|
|
|
- char page[TARGET_PAGE_SIZE];
|
|
|
- int error;
|
|
|
-
|
|
|
- /*
|
|
|
- * Read in page from target process memory and
|
|
|
- * write it to coredump file.
|
|
|
- */
|
|
|
- error = copy_from_user(page, addr, sizeof (page));
|
|
|
- if (error != 0) {
|
|
|
- (void) fprintf(stderr, "unable to dump " TARGET_ABI_FMT_lx "\n",
|
|
|
- addr);
|
|
|
- errno = -error;
|
|
|
- goto out;
|
|
|
- }
|
|
|
- if (dump_write(fd, page, TARGET_PAGE_SIZE) < 0)
|
|
|
- goto out;
|
|
|
- }
|
|
|
}
|
|
|
+ errno = 0;
|
|
|
|
|
|
out:
|
|
|
- free_note_info(&info);
|
|
|
- if (mm != NULL)
|
|
|
- vma_delete(mm);
|
|
|
- (void) close(fd);
|
|
|
-
|
|
|
- if (errno != 0)
|
|
|
- return (-errno);
|
|
|
- return (0);
|
|
|
+ ret = -errno;
|
|
|
+ mmap_unlock();
|
|
|
+ cpu_list_unlock();
|
|
|
+ close(fd);
|
|
|
+ return ret;
|
|
|
}
|
|
|
#endif /* USE_ELF_CORE_DUMP */
|
|
|
|