memory: Don't use memcpy for ram_device regions

With a vfio assigned device we lay down a base MemoryRegion registered
as an IO region, giving us read & write accessors. If the region
supports mmap, we lay down a higher priority sub-region MemoryRegion
on top of the base layer initialized as a RAM device pointer to the
mmap. Finally, if we have any quirks for the device (ie. address
ranges that need additional virtualization support), we put another IO
sub-region on top of the mmap MemoryRegion. When this is flattened,
we now potentially have sub-page mmap MemoryRegions exposed which
cannot be directly mapped through KVM.

This is as expected, but a subtle detail of this is that we end up
with two different access mechanisms through QEMU. If we disable the
mmap MemoryRegion, we make use of the IO MemoryRegion and service
accesses using pread and pwrite to the vfio device file descriptor.
If the mmap MemoryRegion is enabled and results in one of these
sub-page gaps, QEMU handles the access as RAM, using memcpy to the
mmap. Using either pread/pwrite or the mmap directly should be
correct, but using memcpy causes us problems. I expect that not only
does memcpy not necessarily honor the original width and alignment in
performing a copy, but it potentially also uses processor instructions
not intended for MMIO spaces. It turns out that this has been a
problem for Realtek NIC assignment, which has such a quirk that
creates a sub-page mmap MemoryRegion access.

To resolve this, we disable memory_access_is_direct() for ram_device
regions since QEMU assumes that it can use memcpy for those regions.
Instead we access through MemoryRegionOps, which replaces the memcpy
with simple de-references of standard sizes to the host memory.

With this patch we attempt to provide unrestricted access to the RAM
device, allowing byte through qword access as well as unaligned
access. The assumption here is that accesses initiated by the VM are
driven by a device specific driver, which knows the device
capabilities. If unaligned accesses are not supported by the device,
we don't want them to work in a VM by performing multiple aligned
accesses to compose the unaligned access. A down-side of this
philosophy is that the xp command from the monitor attempts to use
the largest available access weidth, unaware of the underlying
device. Using memcpy had this same restriction, but at least now an
operator can dump individual registers, even if blocks of device
memory may result in access widths beyond the capabilities of a
given device (RTL NICs only support up to dword).

Backports commit 1b16ded6a512809f99c133a97f19026fe612b2de from qemu
This commit is contained in:
Alex Williamson 2018-02-25 23:05:10 -05:00 committed by Lioncash
parent 5db45219c9
commit fe66c2e088
No known key found for this signature in database
GPG key ID: 4E3C3CC1031BA9C7
2 changed files with 77 additions and 2 deletions

View file

@ -1160,9 +1160,11 @@ void *qemu_map_ram_ptr(struct uc_struct *uc, RAMBlock *ram_block,
static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write) static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
{ {
if (is_write) { if (is_write) {
return memory_region_is_ram(mr) && !mr->readonly; return memory_region_is_ram(mr) &&
!mr->readonly && !memory_region_is_ram_device(mr);
} else { } else {
return memory_region_is_ram(mr) || memory_region_is_romd(mr); return (memory_region_is_ram(mr) && !memory_region_is_ram_device(mr)) ||
memory_region_is_romd(mr);
} }
} }

View file

@ -1012,6 +1012,77 @@ const MemoryRegionOps unassigned_mem_ops = {
{0,0,false,unassigned_mem_accepts}, {0,0,false,unassigned_mem_accepts},
}; };
static uint64_t memory_region_ram_device_read(struct uc_struct *uc,
void *opaque, hwaddr addr,
unsigned size)
{
MemoryRegion *mr = opaque;
uint64_t data = (uint64_t)~0;
switch (size) {
case 1:
data = *(uint8_t *)(mr->ram_block->host + addr);
break;
case 2:
data = *(uint16_t *)(mr->ram_block->host + addr);
break;
case 4:
data = *(uint32_t *)(mr->ram_block->host + addr);
break;
case 8:
data = *(uint64_t *)(mr->ram_block->host + addr);
break;
}
// Unicorn: commented out
//trace_memory_region_ram_device_read(get_cpu_index(), mr, addr, data, size);
return data;
}
static void memory_region_ram_device_write(struct uc_struct *uc,
void *opaque, hwaddr addr,
uint64_t data, unsigned size)
{
MemoryRegion *mr = opaque;
// Unicorn: commented out
//trace_memory_region_ram_device_write(get_cpu_index(), mr, addr, data, size);
switch (size) {
case 1:
*(uint8_t *)(mr->ram_block->host + addr) = (uint8_t)data;
break;
case 2:
*(uint16_t *)(mr->ram_block->host + addr) = (uint16_t)data;
break;
case 4:
*(uint32_t *)(mr->ram_block->host + addr) = (uint32_t)data;
break;
case 8:
*(uint64_t *)(mr->ram_block->host + addr) = data;
break;
}
}
static const MemoryRegionOps ram_device_mem_ops = {
memory_region_ram_device_read,
memory_region_ram_device_write,
NULL,
NULL,
DEVICE_NATIVE_ENDIAN,
// valid
{
1, 8,
true,
},
// impl
{
1, 8,
true,
},
};
bool memory_region_access_valid(MemoryRegion *mr, bool memory_region_access_valid(MemoryRegion *mr,
hwaddr addr, hwaddr addr,
unsigned size, unsigned size,
@ -1221,6 +1292,8 @@ void memory_region_init_ram_device_ptr(struct uc_struct *uc,
{ {
memory_region_init_ram_ptr(uc, mr, owner, name, size, ptr); memory_region_init_ram_ptr(uc, mr, owner, name, size, ptr);
mr->ram_device = true; mr->ram_device = true;
mr->ops = &ram_device_mem_ops;
mr->opaque = mr;
} }
void memory_region_init_alias(struct uc_struct *uc, MemoryRegion *mr, void memory_region_init_alias(struct uc_struct *uc, MemoryRegion *mr,