From 799bf1c3a52fece82be9a8fad98245f1dd1c72ab Mon Sep 17 00:00:00 2001 From: Peter Lieven Date: Sun, 25 Feb 2018 19:24:46 -0500 Subject: [PATCH] exec: avoid realloc in phys_map_node_reserve this is the first step in reducing the brk heap fragmentation created by the map->nodes memory allocation. Since the introduction of RCU the freeing of the PhysPageMaps is delayed so that sometimes several hundred are allocated at the same time. Even worse the memory for map->nodes is allocated and shortly afterwards reallocated. Since the number of nodes it grows to in the end is the same for all PhysPageMaps remember this value and at least avoid the reallocation. The large number of simultaneous allocations (about 450 x 70kB in my configuration) has to be addressed later. Backports commit 101420b886eec36990419bc9ed5b503622af8a0d from qemu --- include/uc_priv.h | 2 ++ qemu/exec.c | 32 ++++++++++++++++++-------------- uc.c | 2 ++ 3 files changed, 22 insertions(+), 14 deletions(-) diff --git a/include/uc_priv.h b/include/uc_priv.h index c7f85a61..f8b06743 100644 --- a/include/uc_priv.h +++ b/include/uc_priv.h @@ -182,6 +182,8 @@ struct uc_struct { MemoryRegion io_mem_unassigned; MemoryRegion io_mem_watch; RAMList ram_list; + // Renamed from "alloc_hint" in qemu. + unsigned phys_map_node_alloc_hint; // qemu/cpu-exec.c BounceBuffer bounce; diff --git a/qemu/exec.c b/qemu/exec.c index 6c6f3fc9..8b49d6b0 100644 --- a/qemu/exec.c +++ b/qemu/exec.c @@ -133,12 +133,13 @@ static void tcg_commit(MemoryListener *listener); #if !defined(CONFIG_USER_ONLY) -static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes) +static void phys_map_node_reserve(struct uc_struct *uc, PhysPageMap *map, unsigned nodes) { if (map->nodes_nb + nodes > map->nodes_nb_alloc) { - map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16); + map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, uc->phys_map_node_alloc_hint); map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes); map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc); + uc->phys_map_node_alloc_hint = map->nodes_nb_alloc; } } @@ -188,12 +189,13 @@ static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp, } } -static void phys_page_set(AddressSpaceDispatch *d, - hwaddr index, hwaddr nb, - uint16_t leaf) +static void phys_page_set(struct uc_struct *uc, + AddressSpaceDispatch *d, + hwaddr index, hwaddr nb, + uint16_t leaf) { /* Wildly overreserve - it doesn't matter much. */ - phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS); + phys_map_node_reserve(uc, &d->map, 3 * P_L2_LEVELS); phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1); } @@ -916,7 +918,8 @@ static void phys_sections_free(PhysPageMap *map) } static void register_subpage(struct uc_struct* uc, - AddressSpaceDispatch *d, MemoryRegionSection *section) + AddressSpaceDispatch *d, + MemoryRegionSection *section) { subpage_t *subpage; hwaddr base = section->offset_within_address_space @@ -932,21 +935,22 @@ static void register_subpage(struct uc_struct* uc, subpage = subpage_init(d->as, base); subsection.address_space = d->as; subsection.mr = &subpage->iomem; - phys_page_set(d, base >> TARGET_PAGE_BITS, 1, - phys_section_add(&d->map, &subsection)); + phys_page_set(uc, d, base >> TARGET_PAGE_BITS, 1, + phys_section_add(&d->map, &subsection)); } else { subpage = container_of(existing->mr, subpage_t, iomem); } start = section->offset_within_address_space & ~TARGET_PAGE_MASK; end = start + int128_get64(section->size) - 1; subpage_register(subpage, start, end, - phys_section_add(&d->map, section)); + phys_section_add(&d->map, section)); //g_free(subpage); } -static void register_multipage(AddressSpaceDispatch *d, - MemoryRegionSection *section) +static void register_multipage(struct uc_struct *uc, + AddressSpaceDispatch *d, + MemoryRegionSection *section) { hwaddr start_addr = section->offset_within_address_space; uint16_t section_index = phys_section_add(&d->map, section); @@ -954,7 +958,7 @@ static void register_multipage(AddressSpaceDispatch *d, TARGET_PAGE_BITS)); assert(num_pages); - phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index); + phys_page_set(uc, d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index); } static void mem_add(MemoryListener *listener, MemoryRegionSection *section) @@ -985,7 +989,7 @@ static void mem_add(MemoryListener *listener, MemoryRegionSection *section) register_subpage(as->uc, d, &now); } else { now.size = int128_and(now.size, int128_neg(page_size)); - register_multipage(d, &now); + register_multipage(as->uc, d, &now); } } } diff --git a/uc.c b/uc.c index bbce3424..073c8696 100644 --- a/uc.c +++ b/uc.c @@ -162,6 +162,8 @@ uc_err uc_open(uc_arch arch, uc_mode mode, uc_engine **result) uc->address_spaces.tqh_first = NULL; uc->address_spaces.tqh_last = &uc->address_spaces.tqh_first; + uc->phys_map_node_alloc_hint = 16; + switch(arch) { default: break;