aboutsummaryrefslogtreecommitdiff
path: root/exec.c
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2012-02-13 20:12:05 +0200
committerAvi Kivity <avi@redhat.com>2012-02-29 13:44:45 +0200
commitf7bf546118837a64d4b04dd650e3f64eaa25e623 (patch)
tree1d6a6cafa0e8bff0c965d5fe2b52a2766df86a53 /exec.c
parenta39184328611abfeb384f99d13d48b1c46d53bae (diff)
downloadqemu-f7bf546118837a64d4b04dd650e3f64eaa25e623.zip
qemu-f7bf546118837a64d4b04dd650e3f64eaa25e623.tar.gz
qemu-f7bf546118837a64d4b04dd650e3f64eaa25e623.tar.bz2
memory: switch phys_page_set() to a recursive implementation
Setting multiple pages at once requires backtracking to previous nodes; easiest to achieve via recursion. Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'exec.c')
-rw-r--r--exec.c67
1 files changed, 41 insertions, 26 deletions
diff --git a/exec.c b/exec.c
index 26e70c3..f4cd867 100644
--- a/exec.c
+++ b/exec.c
@@ -404,24 +404,30 @@ static inline PageDesc *page_find(tb_page_addr_t index)
#if !defined(CONFIG_USER_ONLY)
-static PhysPageEntry *phys_map_node_alloc(uint16_t *ptr)
+static void phys_map_node_reserve(unsigned nodes)
{
- unsigned i;
- uint16_t ret;
-
- /* Assign early to avoid the pointer being invalidated by g_renew() */
- *ptr = ret = phys_map_nodes_nb++;
- assert(ret != PHYS_MAP_NODE_NIL);
- if (ret == phys_map_nodes_nb_alloc) {
+ if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
typedef PhysPageEntry Node[L2_SIZE];
phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
+ phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
+ phys_map_nodes_nb + nodes);
phys_map_nodes = g_renew(Node, phys_map_nodes,
phys_map_nodes_nb_alloc);
}
+}
+
+static uint16_t phys_map_node_alloc(void)
+{
+ unsigned i;
+ uint16_t ret;
+
+ ret = phys_map_nodes_nb++;
+ assert(ret != PHYS_MAP_NODE_NIL);
+ assert(ret != phys_map_nodes_nb_alloc);
for (i = 0; i < L2_SIZE; ++i) {
phys_map_nodes[ret][i].u.node = PHYS_MAP_NODE_NIL;
}
- return phys_map_nodes[ret];
+ return ret;
}
static void phys_map_nodes_reset(void)
@@ -429,29 +435,38 @@ static void phys_map_nodes_reset(void)
phys_map_nodes_nb = 0;
}
-static void phys_page_set(target_phys_addr_t index, uint16_t leaf)
-{
- PhysPageEntry *lp, *p;
- int i, j;
- lp = &phys_map;
+static void phys_page_set_level(PhysPageEntry *lp, target_phys_addr_t index,
+ uint16_t leaf, int level)
+{
+ PhysPageEntry *p;
+ int i;
- /* Level 1..N. */
- for (i = P_L2_LEVELS - 1; i >= 0; i--) {
- if (lp->u.node == PHYS_MAP_NODE_NIL) {
- p = phys_map_node_alloc(&lp->u.node);
- if (i == 0) {
- for (j = 0; j < L2_SIZE; j++) {
- p[j].u.leaf = phys_section_unassigned;
- }
+ if (lp->u.node == PHYS_MAP_NODE_NIL) {
+ lp->u.node = phys_map_node_alloc();
+ p = phys_map_nodes[lp->u.node];
+ if (level == 0) {
+ for (i = 0; i < L2_SIZE; i++) {
+ p[i].u.leaf = phys_section_unassigned;
}
- } else {
- p = phys_map_nodes[lp->u.node];
}
- lp = &p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
+ } else {
+ p = phys_map_nodes[lp->u.node];
}
+ lp = &p[(index >> (level * L2_BITS)) & (L2_SIZE - 1)];
+
+ if (level == 0) {
+ lp->u.leaf = leaf;
+ } else {
+ phys_page_set_level(lp, index, leaf, level - 1);
+ }
+}
+
+static void phys_page_set(target_phys_addr_t index, uint16_t leaf)
+{
+ phys_map_node_reserve(P_L2_LEVELS);
- lp->u.leaf = leaf;
+ phys_page_set_level(&phys_map, index, leaf, P_L2_LEVELS - 1);
}
static MemoryRegionSection phys_page_find(target_phys_addr_t index)