aboutsummaryrefslogtreecommitdiff
path: root/core
diff options
context:
space:
mode:
authorJeremy Kerr <jk@ozlabs.org>2015-05-12 18:12:20 +0800
committerStewart Smith <stewart@linux.vnet.ibm.com>2015-05-21 14:36:04 +1000
commit5f4c0b3b86de905e7eac4acdb976254df145f1c1 (patch)
tree6e18381a8a278bdbe78b8ffe447352f7090f1463 /core
parenta653df93b2d22691ac1583584c8292e6e421464f (diff)
downloadskiboot-5f4c0b3b86de905e7eac4acdb976254df145f1c1.zip
skiboot-5f4c0b3b86de905e7eac4acdb976254df145f1c1.tar.gz
skiboot-5f4c0b3b86de905e7eac4acdb976254df145f1c1.tar.bz2
core: Move free-list locking to a separate per-region lock
Currently, we have a single lock for the entire mem_region system; this protects both the global region list, and the allocations from each region. This means we can't allocate memory while traversing the global region list, as any malloc/realloc/free will try to acquire the mem_region lock again. This change separates the locking into different functions. We keep the mem_region_lock to protect the regions list, and introduce a per-region lock to protect allocations from the regions' free_lists. Then we remove the open-coded invocations of mem_alloc, where we'd avoided malloc() due to the above locking issue. Signed-off-by: Jeremy Kerr <jk@ozlabs.org> Signed-off-by: Stewart Smith <stewart@linux.vnet.ibm.com>
Diffstat (limited to 'core')
-rw-r--r--core/malloc.c12
-rw-r--r--core/mem_region.c39
-rw-r--r--core/test/run-malloc-speed.c2
-rw-r--r--core/test/run-malloc.c28
-rw-r--r--core/test/run-mem_region.c2
-rw-r--r--core/test/run-mem_region_init.c2
6 files changed, 44 insertions, 41 deletions
diff --git a/core/malloc.c b/core/malloc.c
index 9e715f6..669cc59 100644
--- a/core/malloc.c
+++ b/core/malloc.c
@@ -25,9 +25,9 @@ void *__memalign(size_t blocksize, size_t bytes, const char *location)
{
void *p;
- lock(&mem_region_lock);
+ lock(&skiboot_heap.free_list_lock);
p = mem_alloc(&skiboot_heap, bytes, blocksize, location);
- unlock(&mem_region_lock);
+ unlock(&skiboot_heap.free_list_lock);
return p;
}
@@ -39,9 +39,9 @@ void *__malloc(size_t bytes, const char *location)
void __free(void *p, const char *location)
{
- lock(&mem_region_lock);
+ lock(&skiboot_heap.free_list_lock);
mem_free(&skiboot_heap, p, location);
- unlock(&mem_region_lock);
+ unlock(&skiboot_heap.free_list_lock);
}
void *__realloc(void *ptr, size_t size, const char *location)
@@ -56,7 +56,7 @@ void *__realloc(void *ptr, size_t size, const char *location)
if (!ptr)
return __malloc(size, location);
- lock(&mem_region_lock);
+ lock(&skiboot_heap.free_list_lock);
if (mem_resize(&skiboot_heap, ptr, size, location)) {
newptr = ptr;
} else {
@@ -70,7 +70,7 @@ void *__realloc(void *ptr, size_t size, const char *location)
mem_free(&skiboot_heap, ptr, location);
}
}
- unlock(&mem_region_lock);
+ unlock(&skiboot_heap.free_list_lock);
return newptr;
}
diff --git a/core/mem_region.c b/core/mem_region.c
index 5a496aa..405c2d4 100644
--- a/core/mem_region.c
+++ b/core/mem_region.c
@@ -30,6 +30,19 @@
#define POISON_MEM_REGION_WITH 0x99
#define POISON_MEM_REGION_LIMIT 1*1024*1024*1024
+/* Locking: The mem_region_lock protects the regions list from concurrent
+ * updates. Additions to, or removals from, the region list must be done
+ * with this lock held. This is typically done when we're establishing
+ * the memory & reserved regions.
+ *
+ * Each region has a lock (region->free_list_lock) to protect the free list
+ * from concurrent modification. This lock is used when we're allocating
+ * memory out of a specific region.
+ *
+ * If both locks are needed (eg, __local_alloc, where we need to find a region,
+ * then allocate from it), the mem_region_lock must be acquired before (and
+ * released after) the per-region lock.
+ */
struct lock mem_region_lock = LOCK_UNLOCKED;
static struct list_head regions = LIST_HEAD_INIT(regions);
@@ -542,9 +555,7 @@ static struct mem_region *new_region(const char *name,
{
struct mem_region *region;
- /* Avoid lock recursion, call mem_alloc directly. */
- region = mem_alloc(&skiboot_heap,
- sizeof(*region), __alignof__(*region), __location__);
+ region = malloc(sizeof(*region));
if (!region)
return NULL;
@@ -554,6 +565,7 @@ static struct mem_region *new_region(const char *name,
region->mem_node = mem_node;
region->type = type;
region->free_list.n.next = NULL;
+ init_lock(&region->free_list_lock);
return region;
}
@@ -629,8 +641,7 @@ static bool add_region(struct mem_region *region)
assert(r->start == region->start);
assert(r->len == region->len);
list_del_from(&regions, &r->list);
- /* We already hold mem_region lock */
- mem_free(&skiboot_heap, r, __location__);
+ free(r);
}
/* Finally, add in our own region. */
@@ -695,7 +706,9 @@ restart:
}
/* Second pass, match anything */
+ lock(&region->free_list_lock);
p = mem_alloc(region, size, align, location);
+ unlock(&region->free_list_lock);
if (p)
break;
}
@@ -801,10 +814,7 @@ void mem_region_init(void)
char *name;
len = strlen(names->prop + n) + 1;
-
- name = mem_alloc(&skiboot_heap, len,
- __alignof__(*name), __location__);
- memcpy(name, names->prop + n, len);
+ name = strdup(names->prop + n);
region = new_region(name,
dt_get_number(range, 2),
@@ -922,15 +932,8 @@ void mem_region_add_dt_reserved(void)
ranges_len += 2 * sizeof(uint64_t);
}
- /* Allocate property data with mem_alloc; malloc() acquires
- * mem_region_lock */
- names = mem_alloc(&skiboot_heap, names_len,
- __alignof__(*names), __location__);
- ranges = mem_alloc(&skiboot_heap, ranges_len,
- __alignof__(*ranges), __location__);
-
- name = names;
- range = ranges;
+ name = names = malloc(names_len);
+ range = ranges = malloc(ranges_len);
printf("Reserved regions:\n");
/* Second pass: populate property data */
diff --git a/core/test/run-malloc-speed.c b/core/test/run-malloc-speed.c
index 18c0de9..713a74b 100644
--- a/core/test/run-malloc-speed.c
+++ b/core/test/run-malloc-speed.c
@@ -90,7 +90,7 @@ int main(void)
+ skiboot_heap.len);
}
assert(mem_check(&skiboot_heap));
- assert(mem_region_lock.lock_val == 0);
+ assert(skiboot_heap.free_list_lock.lock_val == 0);
free(region_start(&skiboot_heap));
real_free(p);
return 0;
diff --git a/core/test/run-malloc.c b/core/test/run-malloc.c
index bbd2a48..723cb10 100644
--- a/core/test/run-malloc.c
+++ b/core/test/run-malloc.c
@@ -92,45 +92,45 @@ int main(void)
assert(p);
assert(p > (char *)test_heap);
assert(p + (1ULL << i) <= (char *)test_heap + TEST_HEAP_SIZE);
- assert(!mem_region_lock.lock_val);
+ assert(!skiboot_heap.free_list_lock.lock_val);
free(p);
- assert(!mem_region_lock.lock_val);
+ assert(!skiboot_heap.free_list_lock.lock_val);
assert(heap_empty());
}
/* Realloc as malloc. */
- mem_region_lock.lock_val = 0;
+ skiboot_heap.free_list_lock.lock_val = 0;
p = realloc(NULL, 100);
assert(p);
- assert(!mem_region_lock.lock_val);
+ assert(!skiboot_heap.free_list_lock.lock_val);
/* Realloc as free. */
p = realloc(p, 0);
assert(!p);
- assert(!mem_region_lock.lock_val);
+ assert(!skiboot_heap.free_list_lock.lock_val);
assert(heap_empty());
/* Realloc longer. */
p = realloc(NULL, 100);
assert(p);
- assert(!mem_region_lock.lock_val);
+ assert(!skiboot_heap.free_list_lock.lock_val);
p2 = realloc(p, 200);
assert(p2 == p);
- assert(!mem_region_lock.lock_val);
+ assert(!skiboot_heap.free_list_lock.lock_val);
free(p);
- assert(!mem_region_lock.lock_val);
+ assert(!skiboot_heap.free_list_lock.lock_val);
assert(heap_empty());
/* Realloc shorter. */
- mem_region_lock.lock_val = 0;
+ skiboot_heap.free_list_lock.lock_val = 0;
p = realloc(NULL, 100);
- assert(!mem_region_lock.lock_val);
+ assert(!skiboot_heap.free_list_lock.lock_val);
assert(p);
p2 = realloc(p, 1);
- assert(!mem_region_lock.lock_val);
+ assert(!skiboot_heap.free_list_lock.lock_val);
assert(p2 == p);
free(p);
- assert(!mem_region_lock.lock_val);
+ assert(!skiboot_heap.free_list_lock.lock_val);
assert(heap_empty());
/* zalloc failure */
@@ -152,7 +152,7 @@ int main(void)
assert(p2[63] == 'c');
free(p2);
assert(heap_empty());
- assert(!mem_region_lock.lock_val);
+ assert(!skiboot_heap.free_list_lock.lock_val);
/* Realloc with failure to allocate new size */
p2 = malloc(TEST_HEAP_SIZE - sizeof(struct alloc_hdr)*2);
@@ -176,7 +176,7 @@ int main(void)
free(p);
free(p4);
assert(heap_empty());
- assert(!mem_region_lock.lock_val);
+ assert(!skiboot_heap.free_list_lock.lock_val);
real_free(test_heap);
return 0;
diff --git a/core/test/run-mem_region.c b/core/test/run-mem_region.c
index db1ca02..e27459b 100644
--- a/core/test/run-mem_region.c
+++ b/core/test/run-mem_region.c
@@ -244,7 +244,7 @@ int main(void)
list_del(&r->list);
mem_free(&skiboot_heap, r, __location__);
}
- assert(mem_region_lock.lock_val == 0);
+ assert(skiboot_heap.free_list_lock.lock_val == 0);
__free(test_heap, "");
return 0;
}
diff --git a/core/test/run-mem_region_init.c b/core/test/run-mem_region_init.c
index bb606ef..7624057 100644
--- a/core/test/run-mem_region_init.c
+++ b/core/test/run-mem_region_init.c
@@ -177,7 +177,7 @@ int main(void)
}
assert(mem_check(&skiboot_heap));
}
- assert(mem_region_lock.lock_val == 0);
+ assert(skiboot_heap.free_list_lock.lock_val == 0);
real_free(heap);
return 0;
}