aboutsummaryrefslogtreecommitdiff
path: root/core/test
diff options
context:
space:
mode:
authorJeremy Kerr <jk@ozlabs.org>2015-05-12 18:12:20 +0800
committerStewart Smith <stewart@linux.vnet.ibm.com>2015-05-13 14:01:35 +1000
commitceea843752331258a01a683bf203e9fce6dcd546 (patch)
tree886ee046dc4403dfcb9c1f548e333190194415c8 /core/test
parentf96dd8b3d82543ea733a7ab0e854b8331875d524 (diff)
downloadskiboot-ceea843752331258a01a683bf203e9fce6dcd546.zip
skiboot-ceea843752331258a01a683bf203e9fce6dcd546.tar.gz
skiboot-ceea843752331258a01a683bf203e9fce6dcd546.tar.bz2
core: Add asserts for region free-list locking
This change adds asserts to the mem_region calls that should have the per-region lock held. To keep the tests working, they need the lock_held_by_me() function. The run-mem_region.c test has a bogus implementation of this, as it doesn't do any locking at the moment. This will be addressed in a later change. Signed-off-by: Jeremy Kerr <jk@ozlabs.org> Signed-off-by: Stewart Smith <stewart@linux.vnet.ibm.com>
Diffstat (limited to 'core/test')
-rw-r--r--core/test/run-malloc-speed.c5
-rw-r--r--core/test/run-malloc.c5
-rw-r--r--core/test/run-mem_region.c5
-rw-r--r--core/test/run-mem_region_init.c5
-rw-r--r--core/test/run-mem_region_release_unused.c8
-rw-r--r--core/test/run-mem_region_release_unused_noalloc.c5
6 files changed, 33 insertions, 0 deletions
diff --git a/core/test/run-malloc-speed.c b/core/test/run-malloc-speed.c
index 713a74b..279216e 100644
--- a/core/test/run-malloc-speed.c
+++ b/core/test/run-malloc-speed.c
@@ -67,6 +67,11 @@ void unlock(struct lock *l)
l->lock_val = 0;
}
+bool lock_held_by_me(struct lock *l)
+{
+ return l->lock_val;
+}
+
#define TEST_HEAP_ORDER 27
#define TEST_HEAP_SIZE (1ULL << TEST_HEAP_ORDER)
diff --git a/core/test/run-malloc.c b/core/test/run-malloc.c
index 723cb10..4623108 100644
--- a/core/test/run-malloc.c
+++ b/core/test/run-malloc.c
@@ -69,6 +69,11 @@ void unlock(struct lock *l)
l->lock_val = 0;
}
+bool lock_held_by_me(struct lock *l)
+{
+ return l->lock_val;
+}
+
static bool heap_empty(void)
{
const struct alloc_hdr *h = region_start(&skiboot_heap);
diff --git a/core/test/run-mem_region.c b/core/test/run-mem_region.c
index e27459b..b98fe71 100644
--- a/core/test/run-mem_region.c
+++ b/core/test/run-mem_region.c
@@ -72,6 +72,11 @@ void unlock(struct lock *l)
l->lock_val--;
}
+bool lock_held_by_me(struct lock *l __attribute__((unused)))
+{
+ return true;
+}
+
#define TEST_HEAP_ORDER 12
#define TEST_HEAP_SIZE (1ULL << TEST_HEAP_ORDER)
diff --git a/core/test/run-mem_region_init.c b/core/test/run-mem_region_init.c
index 7624057..7ab23d2 100644
--- a/core/test/run-mem_region_init.c
+++ b/core/test/run-mem_region_init.c
@@ -75,6 +75,11 @@ void unlock(struct lock *l)
l->lock_val = 0;
}
+bool lock_held_by_me(struct lock *l)
+{
+ return l->lock_val;
+}
+
/* We actually need a lot of room for the bitmaps! */
#define TEST_HEAP_ORDER 27
#define TEST_HEAP_SIZE (1ULL << TEST_HEAP_ORDER)
diff --git a/core/test/run-mem_region_release_unused.c b/core/test/run-mem_region_release_unused.c
index b8acffd..980f4c4 100644
--- a/core/test/run-mem_region_release_unused.c
+++ b/core/test/run-mem_region_release_unused.c
@@ -70,6 +70,11 @@ void unlock(struct lock *l)
l->lock_val--;
}
+bool lock_held_by_me(struct lock *l)
+{
+ return l->lock_val;
+}
+
#define TEST_HEAP_ORDER 12
#define TEST_HEAP_SIZE (1ULL << TEST_HEAP_ORDER)
@@ -130,7 +135,10 @@ int main(void)
assert(mem_check(other));
/* Allocate 1k from other region. */
+ lock(&other->free_list_lock);
mem_alloc(other, 1024, 1, "1k");
+ unlock(&other->free_list_lock);
+
mem_region_release_unused();
assert(mem_check(&skiboot_heap));
diff --git a/core/test/run-mem_region_release_unused_noalloc.c b/core/test/run-mem_region_release_unused_noalloc.c
index 8dadddb..82ff89a 100644
--- a/core/test/run-mem_region_release_unused_noalloc.c
+++ b/core/test/run-mem_region_release_unused_noalloc.c
@@ -70,6 +70,11 @@ void unlock(struct lock *l)
l->lock_val--;
}
+bool lock_held_by_me(struct lock *l)
+{
+ return l->lock_val;
+}
+
#define TEST_HEAP_ORDER 12
#define TEST_HEAP_SIZE (1ULL << TEST_HEAP_ORDER)