2018-02-16 20:17:07 +00:00
|
|
|
/*
|
|
|
|
* Support for RAM backed by mmaped host memory.
|
|
|
|
*
|
|
|
|
* Copyright (c) 2015 Red Hat, Inc.
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Michael S. Tsirkin <mst@redhat.com>
|
|
|
|
*
|
|
|
|
* This work is licensed under the terms of the GNU GPL, version 2 or
|
|
|
|
* later. See the COPYING file in the top-level directory.
|
|
|
|
*/
|
2018-02-19 06:27:52 +00:00
|
|
|
|
|
|
|
#include "qemu/osdep.h"
|
2018-02-25 09:10:25 +00:00
|
|
|
#include "qemu/mmap-alloc.h"
|
2018-03-02 04:53:33 +00:00
|
|
|
#include "qemu/host-utils.h"
|
2018-02-16 20:17:07 +00:00
|
|
|
#include <sys/mman.h>
|
|
|
|
|
|
|
|
void *qemu_ram_mmap(int fd, size_t size, size_t align, bool shared)
|
|
|
|
{
|
2018-03-02 04:53:33 +00:00
|
|
|
/*
|
|
|
|
* Note: this always allocates at least one extra page of virtual address
|
|
|
|
* space, even if size is already aligned.
|
|
|
|
*/
|
|
|
|
size_t total = size + align;
|
|
|
|
#if defined(__powerpc64__) && defined(__linux__)
|
|
|
|
/* On ppc64 mappings in the same segment (aka slice) must share the same
|
|
|
|
* page size. Since we will be re-allocating part of this segment
|
|
|
|
* from the supplied fd, we should make sure to use the same page size, to
|
|
|
|
* this end we mmap the supplied fd. In this case, set MAP_NORESERVE to
|
|
|
|
* avoid allocating backing store memory.
|
|
|
|
* We do this unless we are using the system page size, in which case
|
|
|
|
* anonymous memory is OK.
|
|
|
|
*/
|
|
|
|
int anonfd = fd == -1 || qemu_fd_getpagesize(fd) == getpagesize() ? -1 : fd;
|
|
|
|
int flags = anonfd == -1 ? MAP_ANONYMOUS : MAP_NORESERVE;
|
|
|
|
void *ptr = mmap(0, total, PROT_NONE, flags | MAP_PRIVATE, anonfd, 0);
|
|
|
|
#else
|
|
|
|
void *ptr = mmap(0, total, PROT_NONE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
|
|
|
|
#endif
|
|
|
|
size_t offset;
|
2018-02-16 20:17:07 +00:00
|
|
|
void *ptr1;
|
|
|
|
|
|
|
|
if (ptr == MAP_FAILED) {
|
2018-02-17 19:11:46 +00:00
|
|
|
return MAP_FAILED;
|
2018-02-16 20:17:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Make sure align is a power of 2 */
|
2018-03-02 04:53:33 +00:00
|
|
|
assert(is_power_of_2(align));
|
2018-02-16 20:17:07 +00:00
|
|
|
/* Always align to host page size */
|
|
|
|
assert(align >= getpagesize());
|
|
|
|
|
2018-03-02 04:53:33 +00:00
|
|
|
offset = QEMU_ALIGN_UP((uintptr_t)ptr, align) - (uintptr_t)ptr;
|
2018-02-16 20:17:07 +00:00
|
|
|
ptr1 = mmap(ptr + offset, size, PROT_READ | PROT_WRITE,
|
|
|
|
MAP_FIXED |
|
|
|
|
(fd == -1 ? MAP_ANONYMOUS : 0) |
|
|
|
|
(shared ? MAP_SHARED : MAP_PRIVATE),
|
|
|
|
fd, 0);
|
|
|
|
if (ptr1 == MAP_FAILED) {
|
|
|
|
munmap(ptr, total);
|
2018-02-17 19:11:46 +00:00
|
|
|
return MAP_FAILED;
|
2018-02-16 20:17:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (offset > 0) {
|
2018-03-02 04:55:13 +00:00
|
|
|
munmap(ptr, offset);
|
2018-02-16 20:17:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Leave a single PROT_NONE page allocated after the RAM block, to serve as
|
|
|
|
* a guard page guarding against potential buffer overflows.
|
|
|
|
*/
|
2018-03-02 04:55:13 +00:00
|
|
|
total -= offset;
|
2018-02-16 20:17:07 +00:00
|
|
|
if (total > size + getpagesize()) {
|
2018-03-02 04:55:13 +00:00
|
|
|
munmap(ptr1 + size + getpagesize(), total - size - getpagesize());
|
2018-02-16 20:17:07 +00:00
|
|
|
}
|
|
|
|
|
2018-03-02 04:55:13 +00:00
|
|
|
return ptr1;
|
2018-02-16 20:17:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void qemu_ram_munmap(void *ptr, size_t size)
|
|
|
|
{
|
|
|
|
if (ptr) {
|
|
|
|
/* Unmap both the RAM block and the guard page */
|
|
|
|
munmap(ptr, size + getpagesize());
|
|
|
|
}
|
|
|
|
}
|