mmap-alloc: unfold qemu_ram_mmap()

Unfold parts of qemu_ram_mmap() for the sake of understanding, moving
declarations to the top, and keeping architecture-specifics in the
ifdef-else blocks. No changes in the function behaviour.

Give ptr and ptr1 meaningful names:
ptr -> guardptr : pointer to the PROT_NONE guard region
ptr1 -> ptr : pointer to the mapped memory returned to caller

Backports commit 2044c3e7116eeac0449dcb4a4130cc8f8b9310da from qemu
This commit is contained in:
Murilo Opsfelder Araujo 2019-02-05 16:49:35 -05:00 committed by Lioncash
parent 0b7f1ff086
commit 22e3feb162
No known key found for this signature in database
GPG key ID: 4E3C3CC1031BA9C7

View file

@ -17,11 +17,19 @@
void *qemu_ram_mmap(int fd, size_t size, size_t align, bool shared)
{
int flags;
int guardfd;
size_t offset;
size_t total;
void *guardptr;
void *ptr;
/*
* Note: this always allocates at least one extra page of virtual address
* space, even if size is already aligned.
*/
size_t total = size + align;
total = size + align;
#if defined(__powerpc64__) && defined(__linux__)
/* On ppc64 mappings in the same segment (aka slice) must share the same
* page size. Since we will be re-allocating part of this segment
@ -31,16 +39,21 @@ void *qemu_ram_mmap(int fd, size_t size, size_t align, bool shared)
* We do this unless we are using the system page size, in which case
* anonymous memory is OK.
*/
int anonfd = fd == -1 || qemu_fd_getpagesize(fd) == getpagesize() ? -1 : fd;
int flags = anonfd == -1 ? MAP_ANONYMOUS : MAP_NORESERVE;
void *ptr = mmap(0, total, PROT_NONE, flags | MAP_PRIVATE, anonfd, 0);
flags = MAP_PRIVATE;
if (fd == -1 || qemu_fd_getpagesize(fd) == getpagesize()) {
guardfd = -1;
flags |= MAP_ANONYMOUS;
} else {
guardfd = fd;
flags |= MAP_NORESERVE;
}
#else
void *ptr = mmap(0, total, PROT_NONE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
guardfd = -1;
flags = MAP_PRIVATE | MAP_ANONYMOUS;
#endif
size_t offset;
void *ptr1;
guardptr = mmap(0, total, PROT_NONE, flags, guardfd, 0);
if (ptr == MAP_FAILED) {
if (guardptr == MAP_FAILED) {
return MAP_FAILED;
}
@ -49,19 +62,20 @@ void *qemu_ram_mmap(int fd, size_t size, size_t align, bool shared)
/* Always align to host page size */
assert(align >= getpagesize());
offset = QEMU_ALIGN_UP((uintptr_t)ptr, align) - (uintptr_t)ptr;
ptr1 = mmap(ptr + offset, size, PROT_READ | PROT_WRITE,
MAP_FIXED |
(fd == -1 ? MAP_ANONYMOUS : 0) |
(shared ? MAP_SHARED : MAP_PRIVATE),
fd, 0);
if (ptr1 == MAP_FAILED) {
munmap(ptr, total);
flags = MAP_FIXED;
flags |= fd == -1 ? MAP_ANONYMOUS : 0;
flags |= shared ? MAP_SHARED : MAP_PRIVATE;
offset = QEMU_ALIGN_UP((uintptr_t)guardptr, align) - (uintptr_t)guardptr;
ptr = mmap(guardptr + offset, size, PROT_READ | PROT_WRITE, flags, fd, 0);
if (ptr == MAP_FAILED) {
munmap(guardptr, total);
return MAP_FAILED;
}
if (offset > 0) {
munmap(ptr, offset);
munmap(guardptr, offset);
}
/*
@ -70,10 +84,10 @@ void *qemu_ram_mmap(int fd, size_t size, size_t align, bool shared)
*/
total -= offset;
if (total > size + getpagesize()) {
munmap(ptr1 + size + getpagesize(), total - size - getpagesize());
munmap(ptr + size + getpagesize(), total - size - getpagesize());
}
return ptr1;
return ptr;
}
void qemu_ram_munmap(void *ptr, size_t size)