aboutsummaryrefslogtreecommitdiff
path: root/core/test
diff options
context:
space:
mode:
Diffstat (limited to 'core/test')
-rw-r--r--core/test/Makefile.check29
-rw-r--r--core/test/run-device.c118
-rw-r--r--core/test/run-malloc-speed.c94
-rw-r--r--core/test/run-malloc.c144
-rw-r--r--core/test/run-mem_region.c250
-rw-r--r--core/test/run-mem_region_init.c179
-rw-r--r--core/test/run-mem_region_release_unused.c177
-rw-r--r--core/test/run-mem_region_release_unused_noalloc.c159
-rw-r--r--core/test/run-msg.c256
-rw-r--r--core/test/run-trace.c386
-rw-r--r--core/test/stubs.c43
11 files changed, 1835 insertions, 0 deletions
diff --git a/core/test/Makefile.check b/core/test/Makefile.check
new file mode 100644
index 0000000..37dac46
--- /dev/null
+++ b/core/test/Makefile.check
@@ -0,0 +1,29 @@
+# -*-Makefile-*-
+CORE_TEST := core/test/run-device core/test/run-mem_region core/test/run-malloc core/test/run-malloc-speed core/test/run-mem_region_init core/test/run-mem_region_release_unused core/test/run-mem_region_release_unused_noalloc core/test/run-trace core/test/run-msg
+
+check: $(CORE_TEST:%=%-check)
+
+$(CORE_TEST:%=%-check) : %-check: %
+ $(VALGRIND) $<
+
+core/test/stubs.o: core/test/stubs.c
+ $(HOSTCC) $(HOSTCFLAGS) -g -c -o $@ $<
+
+$(CORE_TEST) : core/test/stubs.o
+
+$(CORE_TEST) : % : %.c
+ $(HOSTCC) $(HOSTCFLAGS) -O0 -g -I include -I . -I libfdt -o $@ $< core/test/stubs.o
+
+$(CORE_TEST): % : %.d
+
+core/test/stubs.o: core/test/stubs.d
+
+core/test/%.d: core/test/%.c
+ $(HOSTCC) $(HOSTCFLAGS) -I include -I . -I libfdt -M $< > $@
+
+-include core/test/*.d
+
+clean: core-test-clean
+
+core-test-clean:
+ $(RM) -f core/test/*.[od] $(CORE_TEST)
diff --git a/core/test/run-device.c b/core/test/run-device.c
new file mode 100644
index 0000000..fa9e951
--- /dev/null
+++ b/core/test/run-device.c
@@ -0,0 +1,118 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <skiboot.h>
+
+/* Override this for testing. */
+#define is_rodata(p) fake_is_rodata(p)
+
+char __rodata_start[16];
+#define __rodata_end (__rodata_start + sizeof(__rodata_start))
+
+static inline bool fake_is_rodata(const void *p)
+{
+ return ((char *)p >= __rodata_start && (char *)p < __rodata_end);
+}
+
+#define zalloc(bytes) calloc((bytes), 1)
+
+#include "../device.c"
+#include "../../ccan/list/list.c" /* For list_check */
+#include <assert.h>
+
+int main(void)
+{
+ struct dt_node *root, *c1, *c2, *gc1, *gc2, *gc3, *ggc1, *i;
+ const struct dt_property *p;
+ struct dt_property *p2;
+ unsigned int n;
+
+ root = dt_new_root("root");
+ assert(!list_top(&root->properties, struct dt_property, list));
+ c1 = dt_new(root, "c1");
+ assert(!list_top(&c1->properties, struct dt_property, list));
+ c2 = dt_new(root, "c2");
+ assert(!list_top(&c2->properties, struct dt_property, list));
+ gc1 = dt_new(c1, "gc1");
+ assert(!list_top(&gc1->properties, struct dt_property, list));
+ gc2 = dt_new(c1, "gc2");
+ assert(!list_top(&gc2->properties, struct dt_property, list));
+ gc3 = dt_new(c1, "gc3");
+ assert(!list_top(&gc3->properties, struct dt_property, list));
+ ggc1 = dt_new(gc1, "ggc1");
+ assert(!list_top(&ggc1->properties, struct dt_property, list));
+
+ for (n = 0, i = dt_first(root); i; i = dt_next(root, i), n++) {
+ assert(!list_top(&i->properties, struct dt_property, list));
+ dt_add_property_cells(i, "visited", 1);
+ }
+ assert(n == 6);
+
+ for (n = 0, i = dt_first(root); i; i = dt_next(root, i), n++) {
+ p = list_top(&i->properties, struct dt_property, list);
+ assert(strcmp(p->name, "visited") == 0);
+ assert(p->len == sizeof(u32));
+ assert(fdt32_to_cpu(*(u32 *)p->prop) == 1);
+ }
+ assert(n == 6);
+
+ dt_add_property_cells(c1, "some-property", 1, 2, 3);
+ p = dt_find_property(c1, "some-property");
+ assert(p);
+ assert(strcmp(p->name, "some-property") == 0);
+ assert(p->len == sizeof(u32) * 3);
+ assert(fdt32_to_cpu(*(u32 *)p->prop) == 1);
+ assert(fdt32_to_cpu(*((u32 *)p->prop + 1)) == 2);
+ assert(fdt32_to_cpu(*((u32 *)p->prop + 2)) == 3);
+
+ /* Test freeing a single node */
+ assert(!list_empty(&gc1->children));
+ dt_free(ggc1);
+ assert(list_empty(&gc1->children));
+
+ /* Test rodata logic. */
+ assert(!is_rodata("hello"));
+ assert(is_rodata(__rodata_start));
+ strcpy(__rodata_start, "name");
+ ggc1 = dt_new(root, __rodata_start);
+ assert(ggc1->name == __rodata_start);
+
+ /* Test string node. */
+ dt_add_property_string(ggc1, "somestring", "someval");
+ assert(dt_has_node_property(ggc1, "somestring", "someval"));
+ assert(!dt_has_node_property(ggc1, "somestrin", "someval"));
+ assert(!dt_has_node_property(ggc1, "somestring", "someva"));
+ assert(!dt_has_node_property(ggc1, "somestring", "somevale"));
+
+ /* Test resizing property. */
+ p = p2 = __dt_find_property(c1, "some-property");
+ assert(p);
+ n = p2->len;
+ while (p2 == p) {
+ n *= 2;
+ dt_resize_property(&p2, n);
+ }
+
+ assert(dt_find_property(c1, "some-property") == p2);
+ list_check(&c1->properties, "properties after resizing");
+
+ dt_del_property(c1, p2);
+ list_check(&c1->properties, "properties after delete");
+
+ /* No leaks for valgrind! */
+ dt_free(root);
+ return 0;
+}
diff --git a/core/test/run-malloc-speed.c b/core/test/run-malloc-speed.c
new file mode 100644
index 0000000..edc7589
--- /dev/null
+++ b/core/test/run-malloc-speed.c
@@ -0,0 +1,94 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <config.h>
+
+#define BITS_PER_LONG (sizeof(long) * 8)
+/* Don't include this, it's PPC-specific */
+#define __CPU_H
+static unsigned int cpu_max_pir = 1;
+struct cpu_thread {
+ unsigned int chip_id;
+};
+
+#include <stdlib.h>
+
+/* Use these before we undefine them below. */
+static inline void *real_malloc(size_t size)
+{
+ return malloc(size);
+}
+
+static inline void real_free(void *p)
+{
+ return free(p);
+}
+
+#include <skiboot.h>
+
+/* We need mem_region to accept __location__ */
+#define is_rodata(p) true
+#include "../malloc.c"
+#include "../mem_region.c"
+#include "../device.c"
+
+#undef malloc
+#undef free
+#undef realloc
+
+#include <assert.h>
+#include <stdio.h>
+
+char __rodata_start[1], __rodata_end[1];
+struct dt_node *dt_root;
+
+void lock(struct lock *l)
+{
+ assert(!l->lock_val);
+ l->lock_val = 1;
+}
+
+void unlock(struct lock *l)
+{
+ assert(l->lock_val);
+ l->lock_val = 0;
+}
+
+#define TEST_HEAP_ORDER 27
+#define TEST_HEAP_SIZE (1ULL << TEST_HEAP_ORDER)
+
+#define NUM_ALLOCS 4096
+
+int main(void)
+{
+ uint64_t i, len;
+ void *p[NUM_ALLOCS];
+
+ /* Use malloc for the heap, so valgrind can find issues. */
+ skiboot_heap.start = (unsigned long)real_malloc(skiboot_heap.len);
+
+ len = skiboot_heap.len / NUM_ALLOCS - sizeof(struct alloc_hdr);
+ for (i = 0; i < NUM_ALLOCS; i++) {
+ p[i] = __malloc(len, __location__);
+ assert(p[i] > region_start(&skiboot_heap));
+ assert(p[i] + len <= region_start(&skiboot_heap)
+ + skiboot_heap.len);
+ }
+ assert(mem_check(&skiboot_heap));
+ assert(mem_region_lock.lock_val == 0);
+ free(region_start(&skiboot_heap));
+ return 0;
+}
diff --git a/core/test/run-malloc.c b/core/test/run-malloc.c
new file mode 100644
index 0000000..226ce75
--- /dev/null
+++ b/core/test/run-malloc.c
@@ -0,0 +1,144 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <config.h>
+
+#define BITS_PER_LONG (sizeof(long) * 8)
+
+/* Don't include this, it's PPC-specific */
+#define __CPU_H
+static unsigned int cpu_max_pir = 1;
+struct cpu_thread {
+ unsigned int chip_id;
+};
+
+#include <skiboot.h>
+
+#define is_rodata(p) true
+
+#include "../mem_region.c"
+#include "../malloc.c"
+#include "../device.c"
+
+#include "mem_region-malloc.h"
+
+#define TEST_HEAP_ORDER 12
+#define TEST_HEAP_SIZE (1ULL << TEST_HEAP_ORDER)
+
+struct dt_node *dt_root;
+
+void lock(struct lock *l)
+{
+ assert(!l->lock_val);
+ l->lock_val = 1;
+}
+
+void unlock(struct lock *l)
+{
+ assert(l->lock_val);
+ l->lock_val = 0;
+}
+
+static bool heap_empty(void)
+{
+ const struct alloc_hdr *h = region_start(&skiboot_heap);
+ return h->num_longs == skiboot_heap.len / sizeof(long);
+}
+
+int main(void)
+{
+ char test_heap[TEST_HEAP_SIZE], *p, *p2, *p3, *p4;
+ size_t i;
+
+ /* Use malloc for the heap, so valgrind can find issues. */
+ skiboot_heap.start = (unsigned long)test_heap;
+ skiboot_heap.len = TEST_HEAP_SIZE;
+
+ /* Allocations of various sizes. */
+ for (i = 0; i < TEST_HEAP_ORDER; i++) {
+ p = malloc(1ULL << i);
+ assert(p);
+ assert(p > (char *)test_heap);
+ assert(p + (1ULL << i) <= (char *)test_heap + TEST_HEAP_SIZE);
+ assert(!mem_region_lock.lock_val);
+ free(p);
+ assert(!mem_region_lock.lock_val);
+ assert(heap_empty());
+ }
+
+ /* Realloc as malloc. */
+ mem_region_lock.lock_val = 0;
+ p = realloc(NULL, 100);
+ assert(p);
+ assert(!mem_region_lock.lock_val);
+
+ /* Realloc as free. */
+ p = realloc(p, 0);
+ assert(!p);
+ assert(!mem_region_lock.lock_val);
+ assert(heap_empty());
+
+ /* Realloc longer. */
+ p = realloc(NULL, 100);
+ assert(p);
+ assert(!mem_region_lock.lock_val);
+ p2 = realloc(p, 200);
+ assert(p2 == p);
+ assert(!mem_region_lock.lock_val);
+ free(p);
+ assert(!mem_region_lock.lock_val);
+ assert(heap_empty());
+
+ /* Realloc shorter. */
+ mem_region_lock.lock_val = 0;
+ p = realloc(NULL, 100);
+ assert(!mem_region_lock.lock_val);
+ assert(p);
+ p2 = realloc(p, 1);
+ assert(!mem_region_lock.lock_val);
+ assert(p2 == p);
+ free(p);
+ assert(!mem_region_lock.lock_val);
+ assert(heap_empty());
+
+ /* Realloc with move. */
+ p2 = malloc(TEST_HEAP_SIZE - 64 - sizeof(struct alloc_hdr)*2);
+ assert(p2);
+ p = malloc(64);
+ assert(p);
+ free(p2);
+
+ p2 = realloc(p, 128);
+ assert(p2 != p);
+ free(p2);
+ assert(heap_empty());
+ assert(!mem_region_lock.lock_val);
+
+ /* Reproduce bug BZ109128/SW257364 */
+ p = malloc(100);
+ p2 = malloc(100);
+ p3 = malloc(100);
+ p4 = malloc(100);
+ free(p2);
+ realloc(p,216);
+ free(p3);
+ free(p);
+ free(p4);
+ assert(heap_empty());
+ assert(!mem_region_lock.lock_val);
+
+ return 0;
+}
diff --git a/core/test/run-mem_region.c b/core/test/run-mem_region.c
new file mode 100644
index 0000000..f0ad2c2
--- /dev/null
+++ b/core/test/run-mem_region.c
@@ -0,0 +1,250 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <config.h>
+
+#define BITS_PER_LONG (sizeof(long) * 8)
+/* Don't include this, it's PPC-specific */
+#define __CPU_H
+static unsigned int cpu_max_pir = 1;
+struct cpu_thread {
+ unsigned int chip_id;
+};
+
+#include <stdlib.h>
+#include <string.h>
+
+/* Use these before we override definitions below. */
+static void *__malloc(size_t size, const char *location __attribute__((unused)))
+{
+ return malloc(size);
+}
+
+static void *__realloc(void *ptr, size_t size, const char *location __attribute__((unused)))
+{
+ return realloc(ptr, size);
+}
+
+static inline void __free(void *p, const char *location __attribute__((unused)))
+{
+ return free(p);
+}
+
+static void *__zalloc(size_t size, const char *location __attribute__((unused)))
+{
+ void *ptr = malloc(size);
+ memset(ptr, 0, size);
+ return ptr;
+}
+
+#include <skiboot.h>
+
+#define is_rodata(p) true
+
+#include "../mem_region.c"
+#include "../device.c"
+
+#include <assert.h>
+#include <stdio.h>
+
+struct dt_node *dt_root;
+
+void lock(struct lock *l)
+{
+ l->lock_val++;
+}
+
+void unlock(struct lock *l)
+{
+ l->lock_val--;
+}
+
+#define TEST_HEAP_ORDER 12
+#define TEST_HEAP_SIZE (1ULL << TEST_HEAP_ORDER)
+
+static bool heap_empty(void)
+{
+ const struct alloc_hdr *h = region_start(&skiboot_heap);
+ return h->num_longs == skiboot_heap.len / sizeof(long);
+}
+
+int main(void)
+{
+ char *test_heap;
+ void *p, *ptrs[100];
+ size_t i;
+ struct mem_region *r;
+
+ /* Use malloc for the heap, so valgrind can find issues. */
+ test_heap = __malloc(TEST_HEAP_SIZE, __location__);
+ skiboot_heap.start = (unsigned long)test_heap;
+ skiboot_heap.len = TEST_HEAP_SIZE;
+
+ /* Allocations of various sizes. */
+ for (i = 0; i < TEST_HEAP_ORDER; i++) {
+ p = mem_alloc(&skiboot_heap, 1ULL << i, 1, "here");
+ assert(p);
+ assert(mem_check(&skiboot_heap));
+ assert(!strcmp(((struct alloc_hdr *)p)[-1].location, "here"));
+ assert(p > (void *)test_heap);
+ assert(p + (1ULL << i) <= (void *)test_heap + TEST_HEAP_SIZE);
+ assert(mem_size(&skiboot_heap, p) >= 1ULL << i);
+ mem_free(&skiboot_heap, p, "freed");
+ assert(heap_empty());
+ assert(mem_check(&skiboot_heap));
+ assert(!strcmp(((struct alloc_hdr *)p)[-1].location, "freed"));
+ }
+ p = mem_alloc(&skiboot_heap, 1ULL << i, 1, "here");
+ assert(!p);
+ mem_free(&skiboot_heap, p, "freed");
+ assert(heap_empty());
+ assert(mem_check(&skiboot_heap));
+
+ /* Allocations of various alignments: use small alloc first. */
+ ptrs[0] = mem_alloc(&skiboot_heap, 1, 1, "small");
+ for (i = 0; ; i++) {
+ p = mem_alloc(&skiboot_heap, 1, 1ULL << i, "here");
+ assert(mem_check(&skiboot_heap));
+ /* We will eventually fail... */
+ if (!p) {
+ assert(i >= TEST_HEAP_ORDER);
+ break;
+ }
+ assert(p);
+ assert((long)p % (1ULL << i) == 0);
+ assert(p > (void *)test_heap);
+ assert(p + 1 <= (void *)test_heap + TEST_HEAP_SIZE);
+ mem_free(&skiboot_heap, p, "freed");
+ assert(mem_check(&skiboot_heap));
+ }
+ mem_free(&skiboot_heap, ptrs[0], "small freed");
+ assert(heap_empty());
+ assert(mem_check(&skiboot_heap));
+
+ /* Many little allocations, freed in reverse order. */
+ for (i = 0; i < 100; i++) {
+ ptrs[i] = mem_alloc(&skiboot_heap, sizeof(long), 1, "here");
+ assert(ptrs[i]);
+ assert(ptrs[i] > (void *)test_heap);
+ assert(ptrs[i] + sizeof(long)
+ <= (void *)test_heap + TEST_HEAP_SIZE);
+ assert(mem_check(&skiboot_heap));
+ }
+ for (i = 0; i < 100; i++)
+ mem_free(&skiboot_heap, ptrs[100 - 1 - i], "freed");
+
+ assert(heap_empty());
+ assert(mem_check(&skiboot_heap));
+
+ /* Check the prev_free gets updated properly. */
+ ptrs[0] = mem_alloc(&skiboot_heap, sizeof(long), 1, "ptrs[0]");
+ ptrs[1] = mem_alloc(&skiboot_heap, sizeof(long), 1, "ptrs[1]");
+ assert(ptrs[1] > ptrs[0]);
+ mem_free(&skiboot_heap, ptrs[0], "ptrs[0] free");
+ assert(mem_check(&skiboot_heap));
+ ptrs[0] = mem_alloc(&skiboot_heap, sizeof(long), 1, "ptrs[0] again");
+ assert(mem_check(&skiboot_heap));
+ mem_free(&skiboot_heap, ptrs[1], "ptrs[1] free");
+ mem_free(&skiboot_heap, ptrs[0], "ptrs[0] free");
+ assert(mem_check(&skiboot_heap));
+ assert(heap_empty());
+
+#if 0
+ printf("Heap map:\n");
+ for (i = 0; i < TEST_HEAP_SIZE / sizeof(long); i++) {
+ printf("%u", test_bit(skiboot_heap.bitmap, i));
+ if (i % 64 == 63)
+ printf("\n");
+ else if (i % 8 == 7)
+ printf(" ");
+ }
+#endif
+
+ /* Simple enlargement, then free */
+ p = mem_alloc(&skiboot_heap, 1, 1, "one byte");
+ assert(p);
+ assert(mem_resize(&skiboot_heap, p, 100, "hundred bytes"));
+ assert(mem_size(&skiboot_heap, p) >= 100);
+ assert(mem_check(&skiboot_heap));
+ assert(!strcmp(((struct alloc_hdr *)p)[-1].location, "hundred bytes"));
+ mem_free(&skiboot_heap, p, "freed");
+
+ /* Simple shrink, then free */
+ p = mem_alloc(&skiboot_heap, 100, 1, "100 bytes");
+ assert(p);
+ assert(mem_resize(&skiboot_heap, p, 1, "1 byte"));
+ assert(mem_size(&skiboot_heap, p) < 100);
+ assert(mem_check(&skiboot_heap));
+ assert(!strcmp(((struct alloc_hdr *)p)[-1].location, "1 byte"));
+ mem_free(&skiboot_heap, p, "freed");
+
+ /* Lots of resizing (enlarge). */
+ p = mem_alloc(&skiboot_heap, 1, 1, "one byte");
+ assert(p);
+ for (i = 1; i <= TEST_HEAP_SIZE - sizeof(struct alloc_hdr); i++) {
+ assert(mem_resize(&skiboot_heap, p, i, "enlarge"));
+ assert(mem_size(&skiboot_heap, p) >= i);
+ assert(mem_check(&skiboot_heap));
+ }
+
+ /* Can't make it larger though. */
+ assert(!mem_resize(&skiboot_heap, p, i, "enlarge"));
+
+ for (i = TEST_HEAP_SIZE - sizeof(struct alloc_hdr); i > 0; i--) {
+ assert(mem_resize(&skiboot_heap, p, i, "shrink"));
+ assert(mem_check(&skiboot_heap));
+ }
+
+ mem_free(&skiboot_heap, p, "freed");
+ assert(mem_check(&skiboot_heap));
+
+ /* Test splitting of a region. */
+ r = new_region("base", (unsigned long)test_heap,
+ TEST_HEAP_SIZE, NULL, REGION_SKIBOOT_HEAP);
+ assert(add_region(r));
+ r = new_region("splitter", (unsigned long)test_heap + TEST_HEAP_SIZE/4,
+ TEST_HEAP_SIZE/2, NULL, REGION_RESERVED);
+ assert(add_region(r));
+ /* Now we should have *three* regions. */
+ i = 0;
+ list_for_each(&regions, r, list) {
+ if (region_start(r) == test_heap) {
+ assert(r->len == TEST_HEAP_SIZE/4);
+ assert(strcmp(r->name, "base") == 0);
+ assert(r->type == REGION_SKIBOOT_HEAP);
+ } else if (region_start(r) == test_heap + TEST_HEAP_SIZE / 4) {
+ assert(r->len == TEST_HEAP_SIZE/2);
+ assert(strcmp(r->name, "splitter") == 0);
+ assert(r->type == REGION_RESERVED);
+ assert(!r->free_list.n.next);
+ } else if (region_start(r) == test_heap + TEST_HEAP_SIZE/4*3) {
+ assert(r->len == TEST_HEAP_SIZE/4);
+ assert(strcmp(r->name, "base") == 0);
+ assert(r->type == REGION_SKIBOOT_HEAP);
+ } else
+ abort();
+ assert(mem_check(r));
+ i++;
+ }
+ assert(i == 3);
+ while ((r = list_pop(&regions, struct mem_region, list)) != NULL) {
+ list_del(&r->list);
+ mem_free(&skiboot_heap, r, __location__);
+ }
+ assert(mem_region_lock.lock_val == 0);
+ __free(test_heap, "");
+ return 0;
+}
diff --git a/core/test/run-mem_region_init.c b/core/test/run-mem_region_init.c
new file mode 100644
index 0000000..a24cc7b
--- /dev/null
+++ b/core/test/run-mem_region_init.c
@@ -0,0 +1,179 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <config.h>
+
+#define BITS_PER_LONG (sizeof(long) * 8)
+/* Don't include this, it's PPC-specific */
+#define __CPU_H
+static unsigned int cpu_max_pir = 1;
+struct cpu_thread {
+ unsigned int chip_id;
+};
+
+#include <stdlib.h>
+
+/* Use these before we undefine them below. */
+static inline void *real_malloc(size_t size)
+{
+ return malloc(size);
+}
+
+static inline void real_free(void *p)
+{
+ return free(p);
+}
+
+#include "../malloc.c"
+
+#include <skiboot.h>
+/* We need mem_region to accept __location__ */
+#define is_rodata(p) true
+#include "../mem_region.c"
+
+/* But we need device tree to make copies of names. */
+#undef is_rodata
+#define is_rodata(p) false
+
+static inline char *skiboot_strdup(const char *str)
+{
+ char *ret = __malloc(strlen(str) + 1, "");
+ return memcpy(ret, str, strlen(str) + 1);
+}
+#undef strdup
+#define strdup skiboot_strdup
+
+#include "../device.c"
+
+#include <skiboot.h>
+
+#include <assert.h>
+#include <stdio.h>
+
+void lock(struct lock *l)
+{
+ assert(!l->lock_val);
+ l->lock_val = 1;
+}
+
+void unlock(struct lock *l)
+{
+ assert(l->lock_val);
+ l->lock_val = 0;
+}
+
+/* We actually need a lot of room for the bitmaps! */
+#define TEST_HEAP_ORDER 27
+#define TEST_HEAP_SIZE (1ULL << TEST_HEAP_ORDER)
+
+static void add_mem_node(uint64_t start, uint64_t len)
+{
+ struct dt_node *mem;
+ u64 reg[2];
+ char name[sizeof("memory@") + STR_MAX_CHARS(reg[0])];
+
+ /* reg contains start and length */
+ reg[0] = cpu_to_be64(start);
+ reg[1] = cpu_to_be64(len);
+
+ sprintf(name, "memory@%llx", (unsigned long long)start);
+
+ mem = dt_new(dt_root, name);
+ assert(mem);
+ dt_add_property_string(mem, "device_type", "memory");
+ dt_add_property(mem, "reg", reg, sizeof(reg));
+}
+
+void add_chip_dev_associativity(struct dt_node *dev __attribute__((unused)))
+{
+}
+
+int main(void)
+{
+ uint64_t end;
+ int builtins;
+ struct mem_region *r;
+ char *heap = real_malloc(TEST_HEAP_SIZE);
+
+ /* Use malloc for the heap, so valgrind can find issues. */
+ skiboot_heap.start = (unsigned long)heap;
+ skiboot_heap.len = TEST_HEAP_SIZE;
+ skiboot_os_reserve.len = (unsigned long)heap;
+
+ dt_root = dt_new_root("");
+ dt_add_property_cells(dt_root, "#address-cells", 2);
+ dt_add_property_cells(dt_root, "#size-cells", 2);
+
+ /* Make sure we overlap the heap, at least. */
+ add_mem_node(0, 0x100000000ULL);
+ add_mem_node(0x100000000ULL, 0x100000000ULL);
+ end = 0x200000000ULL;
+
+ /* Now convert. */
+ mem_region_init();
+ assert(mem_check(&skiboot_heap));
+
+ builtins = 0;
+ list_for_each(&regions, r, list) {
+ /* Regions must not overlap. */
+ struct mem_region *r2, *pre = NULL, *post = NULL;
+ list_for_each(&regions, r2, list) {
+ if (r == r2)
+ continue;
+ assert(!overlaps(r, r2));
+ }
+
+ /* But should have exact neighbours. */
+ list_for_each(&regions, r2, list) {
+ if (r == r2)
+ continue;
+ if (r2->start == r->start + r->len)
+ post = r2;
+ if (r2->start + r2->len == r->start)
+ pre = r2;
+ }
+ assert(r->start == 0 || pre);
+ assert(r->start + r->len == end || post);
+
+ if (r == &skiboot_code_and_text ||
+ r == &skiboot_heap ||
+ r == &skiboot_after_heap ||
+ r == &skiboot_cpu_stacks ||
+ r == &skiboot_os_reserve)
+ builtins++;
+ else
+ assert(r->type == REGION_SKIBOOT_HEAP);
+ assert(mem_check(r));
+ }
+ assert(builtins == 5);
+
+ dt_free(dt_root);
+
+ while ((r = list_pop(&regions, struct mem_region, list)) != NULL) {
+ list_del(&r->list);
+ if (r != &skiboot_code_and_text &&
+ r != &skiboot_heap &&
+ r != &skiboot_after_heap &&
+ r != &skiboot_os_reserve &&
+ r != &skiboot_cpu_stacks) {
+ free(r);
+ }
+ assert(mem_check(&skiboot_heap));
+ }
+ assert(mem_region_lock.lock_val == 0);
+ real_free(heap);
+ return 0;
+}
diff --git a/core/test/run-mem_region_release_unused.c b/core/test/run-mem_region_release_unused.c
new file mode 100644
index 0000000..e73cf25
--- /dev/null
+++ b/core/test/run-mem_region_release_unused.c
@@ -0,0 +1,177 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <config.h>
+
+#define BITS_PER_LONG (sizeof(long) * 8)
+/* Don't include this, it's PPC-specific */
+#define __CPU_H
+static unsigned int cpu_max_pir = 1;
+struct cpu_thread {
+ unsigned int chip_id;
+};
+
+#include <stdlib.h>
+
+static void *__malloc(size_t size, const char *location __attribute__((unused)))
+{
+ return malloc(size);
+}
+
+static void *__realloc(void *ptr, size_t size, const char *location __attribute__((unused)))
+{
+ return realloc(ptr, size);
+}
+
+static void *__zalloc(size_t size, const char *location __attribute__((unused)))
+{
+ return calloc(size, 1);
+}
+
+static inline void __free(void *p, const char *location __attribute__((unused)))
+{
+ return free(p);
+}
+
+#include <skiboot.h>
+
+/* We need mem_region to accept __location__ */
+#define is_rodata(p) true
+#include "../mem_region.c"
+
+/* But we need device tree to make copies of names. */
+#undef is_rodata
+#define is_rodata(p) false
+
+#include "../device.c"
+#include <assert.h>
+#include <stdio.h>
+
+void lock(struct lock *l)
+{
+ l->lock_val++;
+}
+
+void unlock(struct lock *l)
+{
+ l->lock_val--;
+}
+
+#define TEST_HEAP_ORDER 12
+#define TEST_HEAP_SIZE (1ULL << TEST_HEAP_ORDER)
+
+static void add_mem_node(uint64_t start, uint64_t len)
+{
+ struct dt_node *mem;
+ u64 reg[2];
+ char name[sizeof("memory@") + STR_MAX_CHARS(reg[0])];
+
+ /* reg contains start and length */
+ reg[0] = cpu_to_be64(start);
+ reg[1] = cpu_to_be64(len);
+
+ sprintf(name, "memory@%llx", (long long)start);
+
+ mem = dt_new(dt_root, name);
+ dt_add_property_string(mem, "device_type", "memory");
+ dt_add_property(mem, "reg", reg, sizeof(reg));
+}
+
+void add_chip_dev_associativity(struct dt_node *dev __attribute__((unused)))
+{
+}
+
+int main(void)
+{
+ uint64_t i;
+ struct mem_region *r, *other = NULL;
+ void *other_mem;
+ const char *last;
+
+ /* Use malloc for the heap, so valgrind can find issues. */
+ skiboot_heap.start = (unsigned long)malloc(TEST_HEAP_SIZE);
+ skiboot_heap.len = TEST_HEAP_SIZE;
+ skiboot_os_reserve.len = skiboot_heap.start;
+
+ dt_root = dt_new_root("");
+ dt_add_property_cells(dt_root, "#address-cells", 2);
+ dt_add_property_cells(dt_root, "#size-cells", 2);
+
+ other_mem = malloc(1024*1024);
+ add_mem_node((unsigned long)other_mem, 1024*1024);
+
+ /* Now convert. */
+ mem_region_init();
+
+ /* Find our node to allocate from */
+ list_for_each(&regions, r, list) {
+ if (region_start(r) == other_mem)
+ other = r;
+ }
+ /* This could happen if skiboot addresses clashed with our alloc. */
+ assert(other);
+ assert(mem_check(other));
+
+ /* Allocate 1k from other region. */
+ mem_alloc(other, 1024, 1, "1k");
+ mem_region_release_unused();
+
+ assert(mem_check(&skiboot_heap));
+
+ /* Now we expect it to be split. */
+ i = 0;
+ list_for_each(&regions, r, list) {
+ assert(mem_check(r));
+ i++;
+ if (r == &skiboot_os_reserve)
+ continue;
+ if (r == &skiboot_code_and_text)
+ continue;
+ if (r == &skiboot_heap)
+ continue;
+ if (r == &skiboot_after_heap)
+ continue;
+ if (r == &skiboot_cpu_stacks)
+ continue;
+ if (r == other) {
+ assert(r->type == REGION_SKIBOOT_HEAP);
+ assert(r->len < 1024 * 1024);
+ } else {
+ assert(r->type == REGION_OS);
+ assert(r->start == other->start + other->len);
+ assert(r->start + r->len == other->start + 1024*1024);
+ }
+ }
+ assert(i == 7);
+
+ last = NULL;
+ list_for_each(&regions, r, list) {
+ if (last != r->name &&
+ strncmp(r->name, NODE_REGION_PREFIX,
+ strlen(NODE_REGION_PREFIX)) == 0) {
+ /* It's safe to cast away const as this is
+ * only going to happen in test code */
+ free((void*)r->name);
+ break;
+ }
+ last = r->name;
+ }
+
+ dt_free(dt_root);
+ free((void *)(long)skiboot_heap.start);
+ free(other_mem);
+ return 0;
+}
diff --git a/core/test/run-mem_region_release_unused_noalloc.c b/core/test/run-mem_region_release_unused_noalloc.c
new file mode 100644
index 0000000..818e272
--- /dev/null
+++ b/core/test/run-mem_region_release_unused_noalloc.c
@@ -0,0 +1,159 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <config.h>
+
+#define BITS_PER_LONG (sizeof(long) * 8)
+/* Don't include this, it's PPC-specific */
+#define __CPU_H
+static unsigned int cpu_max_pir = 1;
+struct cpu_thread {
+ unsigned int chip_id;
+};
+
+#include <stdlib.h>
+
+static void *__malloc(size_t size, const char *location __attribute__((unused)))
+{
+ return malloc(size);
+}
+
+static void *__realloc(void *ptr, size_t size, const char *location __attribute__((unused)))
+{
+ return realloc(ptr, size);
+}
+
+static void *__zalloc(size_t size, const char *location __attribute__((unused)))
+{
+ return calloc(size, 1);
+}
+
+static inline void __free(void *p, const char *location __attribute__((unused)))
+{
+ return free(p);
+}
+
+#include <skiboot.h>
+
+/* We need mem_region to accept __location__ */
+#define is_rodata(p) true
+#include "../mem_region.c"
+
+/* But we need device tree to make copies of names. */
+#undef is_rodata
+#define is_rodata(p) false
+
+#include "../device.c"
+#include <assert.h>
+#include <stdio.h>
+
+void lock(struct lock *l)
+{
+ l->lock_val++;
+}
+
+void unlock(struct lock *l)
+{
+ l->lock_val--;
+}
+
+#define TEST_HEAP_ORDER 12
+#define TEST_HEAP_SIZE (1ULL << TEST_HEAP_ORDER)
+
+static void add_mem_node(uint64_t start, uint64_t len)
+{
+ struct dt_node *mem;
+ u64 reg[2];
+ char name[sizeof("memory@") + STR_MAX_CHARS(reg[0])];
+
+ /* reg contains start and length */
+ reg[0] = cpu_to_be64(start);
+ reg[1] = cpu_to_be64(len);
+
+ sprintf(name, "memory@%llx", (long long)start);
+
+ mem = dt_new(dt_root, name);
+ dt_add_property_string(mem, "device_type", "memory");
+ dt_add_property(mem, "reg", reg, sizeof(reg));
+}
+
+void add_chip_dev_associativity(struct dt_node *dev __attribute__((unused)))
+{
+}
+
+int main(void)
+{
+ uint64_t i;
+ struct mem_region *r;
+ const char *last;
+
+ /* Use malloc for the heap, so valgrind can find issues. */
+ skiboot_heap.start = (unsigned long)malloc(TEST_HEAP_SIZE);
+ skiboot_heap.len = TEST_HEAP_SIZE;
+ skiboot_os_reserve.len = skiboot_heap.start;
+
+ dt_root = dt_new_root("");
+ dt_add_property_cells(dt_root, "#address-cells", 2);
+ dt_add_property_cells(dt_root, "#size-cells", 2);
+
+ add_mem_node(0, 0x100000000ULL);
+ add_mem_node(0x100000000ULL, 0x100000000ULL);
+
+ mem_region_init();
+
+ mem_region_release_unused();
+
+ assert(mem_check(&skiboot_heap));
+
+ /* Now we expect it to be split. */
+ i = 0;
+ list_for_each(&regions, r, list) {
+ assert(mem_check(r));
+ i++;
+ if (r == &skiboot_os_reserve)
+ continue;
+ if (r == &skiboot_code_and_text)
+ continue;
+ if (r == &skiboot_heap)
+ continue;
+ if (r == &skiboot_after_heap)
+ continue;
+ if (r == &skiboot_cpu_stacks)
+ continue;
+
+ /* the memory nodes should all be available to the OS now */
+ assert(r->type == REGION_OS);
+ }
+ assert(i == 9);
+
+ last = NULL;
+ list_for_each(&regions, r, list) {
+ if (last != r->name &&
+ strncmp(r->name, NODE_REGION_PREFIX,
+ strlen(NODE_REGION_PREFIX)) == 0) {
+ /* It's safe to cast away the const as
+ * this never happens at runtime,
+ * only in test and only for valgrind
+ */
+ free((void*)r->name);
+ }
+ last = r->name;
+ }
+
+ dt_free(dt_root);
+ free((void *)(long)skiboot_heap.start);
+ return 0;
+}
diff --git a/core/test/run-msg.c b/core/test/run-msg.c
new file mode 100644
index 0000000..cd36408
--- /dev/null
+++ b/core/test/run-msg.c
@@ -0,0 +1,256 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <skiboot.h>
+#include <inttypes.h>
+#include <assert.h>
+
+static bool zalloc_should_fail = false;
+static void *zalloc(size_t size)
+{
+ if (zalloc_should_fail) {
+ errno = ENOMEM;
+ return NULL;
+ }
+
+ return calloc(size, 1);
+}
+
+#include "../opal-msg.c"
+
+void lock(struct lock *l)
+{
+ assert(!l->lock_val);
+ l->lock_val = 1;
+}
+
+void unlock(struct lock *l)
+{
+ assert(l->lock_val);
+ l->lock_val = 0;
+}
+
+void opal_update_pending_evt(uint64_t evt_mask, uint64_t evt_values)
+{
+ (void)evt_mask;
+ (void)evt_values;
+}
+
+static long magic = 8097883813087437089UL;
+static void callback(void *data)
+{
+ assert(*(uint64_t *)data == magic);
+}
+
+static size_t list_count(struct list_head *list)
+{
+ size_t count = 0;
+ struct opal_msg_entry *dummy;
+
+ list_for_each(list, dummy, link)
+ count++;
+ return count;
+}
+
+int main(void)
+{
+ struct opal_msg_entry* entry;
+ int free_size = OPAL_MAX_MSGS;
+ int nfree = free_size;
+ int npending = 0;
+ int r;
+ static struct opal_msg m;
+ uint64_t *m_ptr = (uint64_t *)&m;
+
+ opal_init_msg();
+
+ assert(list_count(&msg_pending_list) == npending);
+ assert(list_count(&msg_free_list) == nfree);
+
+ /* Callback. */
+ r = opal_queue_msg(0, &magic, callback, (u64)0, (u64)1, (u64)2);
+ assert(r == 0);
+
+ assert(list_count(&msg_pending_list) == ++npending);
+ assert(list_count(&msg_free_list) == --nfree);
+
+ r = opal_get_msg(m_ptr, sizeof(m));
+ assert(r == 0);
+
+ assert(m.params[0] == 0);
+ assert(m.params[1] == 1);
+ assert(m.params[2] == 2);
+
+ assert(list_count(&msg_pending_list) == --npending);
+ assert(list_count(&msg_free_list) == ++nfree);
+
+ /* No params. */
+ r = opal_queue_msg(0, NULL, NULL);
+ assert(r == 0);
+
+ assert(list_count(&msg_pending_list) == ++npending);
+ assert(list_count(&msg_free_list) == --nfree);
+
+ r = opal_get_msg(m_ptr, sizeof(m));
+ assert(r == 0);
+
+ assert(list_count(&msg_pending_list) == --npending);
+ assert(list_count(&msg_free_list) == ++nfree);
+
+ /* > 8 params (ARRAY_SIZE(entry->msg.params) */
+ r = opal_queue_msg(0, NULL, NULL, 0, 1, 2, 3, 4, 5, 6, 7, 0xBADDA7A);
+ assert(r == 0);
+
+ assert(list_count(&msg_pending_list) == ++npending);
+ assert(list_count(&msg_free_list) == --nfree);
+
+ r = opal_get_msg(m_ptr, sizeof(m));
+ assert(r == 0);
+
+ assert(list_count(&msg_pending_list) == --npending);
+ assert(list_count(&msg_free_list) == ++nfree);
+
+ assert(m.params[0] == 0);
+ assert(m.params[1] == 1);
+ assert(m.params[2] == 2);
+ assert(m.params[3] == 3);
+ assert(m.params[4] == 4);
+ assert(m.params[5] == 5);
+ assert(m.params[6] == 6);
+ assert(m.params[7] == 7);
+
+ /* 8 params (ARRAY_SIZE(entry->msg.params) */
+ r = opal_queue_msg(0, NULL, NULL, 0, 10, 20, 30, 40, 50, 60, 70);
+ assert(r == 0);
+
+ assert(list_count(&msg_pending_list) == ++npending);
+ assert(list_count(&msg_free_list) == --nfree);
+
+ r = opal_get_msg(m_ptr, sizeof(m));
+ assert(r == 0);
+
+ assert(list_count(&msg_pending_list) == --npending);
+ assert(list_count(&msg_free_list) == ++nfree);
+
+ assert(m.params[0] == 0);
+ assert(m.params[1] == 10);
+ assert(m.params[2] == 20);
+ assert(m.params[3] == 30);
+ assert(m.params[4] == 40);
+ assert(m.params[5] == 50);
+ assert(m.params[6] == 60);
+ assert(m.params[7] == 70);
+
+ /* Full list (no free nodes in pending). */
+ while (nfree > 0) {
+ r = opal_queue_msg(OPAL_MSG_ASYNC_COMP, NULL, NULL);
+ assert(r == 0);
+ assert(list_count(&msg_pending_list) == ++npending);
+ assert(list_count(&msg_free_list) == --nfree);
+ }
+ assert(list_count(&msg_free_list) == 0);
+ assert(nfree == 0);
+ assert(npending == OPAL_MAX_MSGS);
+
+ r = opal_queue_msg(OPAL_MSG_ASYNC_COMP, NULL, NULL);
+ assert(r == 0);
+
+ assert(list_count(&msg_pending_list) == OPAL_MAX_MSGS+1);
+ assert(list_count(&msg_pending_list) == ++npending);
+ assert(list_count(&msg_free_list) == nfree);
+
+ /* Make zalloc fail to test error handling. */
+ zalloc_should_fail = true;
+ r = opal_queue_msg(OPAL_MSG_ASYNC_COMP, NULL, NULL);
+ assert(r == OPAL_RESOURCE);
+
+ assert(list_count(&msg_pending_list) == OPAL_MAX_MSGS+1);
+ assert(list_count(&msg_pending_list) == npending);
+ assert(list_count(&msg_free_list) == nfree);
+
+ /* Empty list (no nodes). */
+ while(!list_empty(&msg_pending_list)) {
+ r = opal_get_msg(m_ptr, sizeof(m));
+ assert(r == 0);
+ npending--;
+ nfree++;
+ }
+ assert(list_count(&msg_pending_list) == npending);
+ assert(list_count(&msg_free_list) == nfree);
+ assert(npending == 0);
+ assert(nfree == OPAL_MAX_MSGS+1);
+
+ r = opal_queue_msg(OPAL_MSG_ASYNC_COMP, NULL, NULL);
+ assert(r == 0);
+
+ assert(list_count(&msg_pending_list) == ++npending);
+ assert(list_count(&msg_free_list) == --nfree);
+
+ /* Request invalid size. */
+ r = opal_get_msg(m_ptr, sizeof(m) - 1);
+ assert(r == OPAL_PARAMETER);
+
+ /* Pass null buffer. */
+ r = opal_get_msg(NULL, sizeof(m));
+ assert(r == OPAL_PARAMETER);
+
+ /* Get msg when none are pending. */
+ r = opal_get_msg(m_ptr, sizeof(m));
+ assert(r == 0);
+
+ r = opal_get_msg(m_ptr, sizeof(m));
+ assert(r == OPAL_RESOURCE);
+
+#define test_queue_num(type, val) \
+ r = opal_queue_msg(0, NULL, NULL, \
+ (type)val, (type)val, (type)val, (type)val, \
+ (type)val, (type)val, (type)val, (type)val); \
+ assert(r == 0); \
+ opal_get_msg(m_ptr, sizeof(m)); \
+ assert(r == OPAL_SUCCESS); \
+ assert(m.params[0] == (type)val); \
+ assert(m.params[1] == (type)val); \
+ assert(m.params[2] == (type)val); \
+ assert(m.params[3] == (type)val); \
+ assert(m.params[4] == (type)val); \
+ assert(m.params[5] == (type)val); \
+ assert(m.params[6] == (type)val); \
+ assert(m.params[7] == (type)val)
+
+ /* Test types of various widths */
+ test_queue_num(u64, -1);
+ test_queue_num(s64, -1);
+ test_queue_num(u32, -1);
+ test_queue_num(s32, -1);
+ test_queue_num(u16, -1);
+ test_queue_num(s16, -1);
+ test_queue_num(u8, -1);
+ test_queue_num(s8, -1);
+
+ /* Clean up the list to keep valgrind happy. */
+ while(!list_empty(&msg_free_list)) {
+ entry = list_pop(&msg_free_list, struct opal_msg_entry, link);
+ assert(entry);
+ free(entry);
+ }
+
+ while(!list_empty(&msg_pending_list)) {
+ entry = list_pop(&msg_pending_list, struct opal_msg_entry, link);
+ assert(entry);
+ free(entry);
+ }
+
+ return 0;
+}
diff --git a/core/test/run-trace.c b/core/test/run-trace.c
new file mode 100644
index 0000000..7dabebd
--- /dev/null
+++ b/core/test/run-trace.c
@@ -0,0 +1,386 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <config.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <sched.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+
+/* Don't include these: PPC-specific */
+#define __CPU_H
+#define __TIME_H
+#define __PROCESSOR_H
+
+#if defined(__i386__) || defined(__x86_64__)
+/* This is more than a lwsync, but it'll work */
+static void full_barrier(void)
+{
+ asm volatile("mfence" : : : "memory");
+}
+#define lwsync full_barrier
+#define sync full_barrier
+#else
+#error "Define sync & lwsync for this arch"
+#endif
+
+#define zalloc(size) calloc((size), 1)
+
+struct cpu_thread {
+ uint32_t pir;
+ uint32_t chip_id;
+ struct trace_info *trace;
+ int server_no;
+ bool is_secondary;
+ struct cpu_thread *primary;
+};
+static struct cpu_thread *this_cpu(void);
+
+#define CPUS 4
+
+static struct cpu_thread fake_cpus[CPUS];
+
+static inline struct cpu_thread *next_cpu(struct cpu_thread *cpu)
+{
+ if (cpu == NULL)
+ return &fake_cpus[0];
+ cpu++;
+ if (cpu == &fake_cpus[CPUS])
+ return NULL;
+ return cpu;
+}
+
+#define first_cpu() next_cpu(NULL)
+
+#define for_each_cpu(cpu) \
+ for (cpu = first_cpu(); cpu; cpu = next_cpu(cpu))
+
+static unsigned long timestamp;
+static unsigned long mftb(void)
+{
+ return timestamp;
+}
+
+static void *local_alloc(unsigned int chip_id,
+ size_t size, size_t align)
+{
+ void *p;
+
+ (void)chip_id;
+ if (posix_memalign(&p, align, size))
+ p = NULL;
+ return p;
+}
+
+struct dt_node;
+extern struct dt_node *opal_node;
+
+#include "../trace.c"
+
+#define rmb() lwsync()
+
+#include "../external/trace.c"
+#include "../device.c"
+
+char __rodata_start[1], __rodata_end[1];
+struct dt_node *opal_node;
+struct debug_descriptor debug_descriptor = {
+ .trace_mask = -1
+};
+
+void lock(struct lock *l)
+{
+ assert(!l->lock_val);
+ l->lock_val = 1;
+}
+
+void unlock(struct lock *l)
+{
+ assert(l->lock_val);
+ l->lock_val = 0;
+}
+
+struct cpu_thread *my_fake_cpu;
+static struct cpu_thread *this_cpu(void)
+{
+ return my_fake_cpu;
+}
+
+#include <sys/mman.h>
+#define PER_CHILD_TRACES (1024*1024)
+
+static void write_trace_entries(int id)
+{
+ void exit(int);
+ unsigned int i;
+ union trace trace;
+
+ timestamp = id;
+ for (i = 0; i < PER_CHILD_TRACES; i++) {
+ timestamp = i * CPUS + id;
+ assert(sizeof(trace.hdr) % 8 == 0);
+ /* First child never repeats, second repeats once, etc. */
+ trace_add(&trace, 3 + ((i / (id + 1)) % 0x40),
+ sizeof(trace.hdr));
+ }
+
+ /* Final entry has special type, so parent knows it's over. */
+ trace_add(&trace, 0x70, sizeof(trace.hdr));
+ exit(0);
+}
+
+static bool all_done(const bool done[])
+{
+ unsigned int i;
+
+ for (i = 0; i < CPUS; i++)
+ if (!done[i])
+ return false;
+ return true;
+}
+
+static void test_parallel(void)
+{
+ void *p;
+ unsigned int i, counts[CPUS] = { 0 }, overflows[CPUS] = { 0 };
+ unsigned int repeats[CPUS] = { 0 }, num_overflows[CPUS] = { 0 };
+ bool done[CPUS] = { false };
+ size_t len = sizeof(struct trace_info) + TBUF_SZ + sizeof(union trace);
+ int last = 0;
+
+ /* Use a shared mmap to test actual parallel buffers. */
+ i = (CPUS*len + getpagesize()-1)&~(getpagesize()-1);
+ p = mmap(NULL, i, PROT_READ|PROT_WRITE,
+ MAP_ANONYMOUS|MAP_SHARED, -1, 0);
+
+ for (i = 0; i < CPUS; i++) {
+ fake_cpus[i].trace = p + i * len;
+ fake_cpus[i].trace->tb.mask = TBUF_SZ - 1;
+ fake_cpus[i].trace->tb.max_size = sizeof(union trace);
+ fake_cpus[i].is_secondary = false;
+ }
+
+ for (i = 0; i < CPUS; i++) {
+ if (!fork()) {
+ /* Child. */
+ my_fake_cpu = &fake_cpus[i];
+ write_trace_entries(i);
+ }
+ }
+
+ while (!all_done(done)) {
+ union trace t;
+
+ for (i = 0; i < CPUS; i++) {
+ if (trace_get(&t, &fake_cpus[(i+last) % CPUS].trace->tb))
+ break;
+ }
+
+ if (i == CPUS) {
+ sched_yield();
+ continue;
+ }
+ i = (i + last) % CPUS;
+ last = i;
+
+ assert(t.hdr.cpu < CPUS);
+ assert(!done[t.hdr.cpu]);
+
+ if (t.hdr.type == TRACE_OVERFLOW) {
+ /* Conveniently, each record is 16 bytes here. */
+ assert(t.overflow.bytes_missed % 16 == 0);
+ overflows[i] += t.overflow.bytes_missed / 16;
+ num_overflows[i]++;
+ continue;
+ }
+
+ assert(t.hdr.timestamp % CPUS == t.hdr.cpu);
+ if (t.hdr.type == TRACE_REPEAT) {
+ assert(t.hdr.len_div_8 * 8 == sizeof(t.repeat));
+ assert(t.repeat.num != 0);
+ assert(t.repeat.num <= t.hdr.cpu);
+ repeats[t.hdr.cpu] += t.repeat.num;
+ } else if (t.hdr.type == 0x70) {
+ done[t.hdr.cpu] = true;
+ } else {
+ counts[t.hdr.cpu]++;
+ }
+ }
+
+ /* Gather children. */
+ for (i = 0; i < CPUS; i++) {
+ int status;
+ wait(&status);
+ }
+
+ for (i = 0; i < CPUS; i++) {
+ printf("Child %i: %u produced, %u overflows, %llu total\n", i,
+ counts[i], overflows[i],
+ (long long)fake_cpus[i].trace->tb.end);
+ assert(counts[i] + repeats[i] <= PER_CHILD_TRACES);
+ }
+ /* Child 0 never repeats. */
+ assert(repeats[0] == 0);
+ assert(counts[0] + overflows[0] == PER_CHILD_TRACES);
+
+ /*
+ * FIXME: Other children have some fuzz, since overflows may
+ * include repeat record we already read. And odd-numbered
+ * overflows may include more repeat records than normal
+ * records (they alternate).
+ */
+}
+
+int main(void)
+{
+ union trace minimal;
+ union trace large;
+ union trace trace;
+ unsigned int i, j;
+
+ opal_node = dt_new_root("opal");
+ for (i = 0; i < CPUS; i++) {
+ fake_cpus[i].server_no = i;
+ fake_cpus[i].is_secondary = (i & 0x1);
+ fake_cpus[i].primary = &fake_cpus[i & ~0x1];
+ }
+ init_trace_buffers();
+ my_fake_cpu = &fake_cpus[0];
+
+ for (i = 0; i < CPUS; i++) {
+ assert(trace_empty(&fake_cpus[i].trace->tb));
+ assert(!trace_get(&trace, &fake_cpus[i].trace->tb));
+ }
+
+ assert(sizeof(trace.hdr) % 8 == 0);
+ timestamp = 1;
+ trace_add(&minimal, 100, sizeof(trace.hdr));
+ assert(trace_get(&trace, &my_fake_cpu->trace->tb));
+ assert(trace.hdr.len_div_8 == minimal.hdr.len_div_8);
+ assert(trace.hdr.timestamp == timestamp);
+
+ /* Make it wrap once. */
+ for (i = 0; i < TBUF_SZ / (minimal.hdr.len_div_8 * 8) + 1; i++) {
+ timestamp = i;
+ trace_add(&minimal, 99 + (i%2), sizeof(trace.hdr));
+ }
+
+ assert(trace_get(&trace, &my_fake_cpu->trace->tb));
+ /* First one must be overflow marker. */
+ assert(trace.hdr.type == TRACE_OVERFLOW);
+ assert(trace.hdr.len_div_8 * 8 == sizeof(trace.overflow));
+ assert(trace.overflow.bytes_missed == minimal.hdr.len_div_8 * 8);
+
+ for (i = 0; i < TBUF_SZ / (minimal.hdr.len_div_8 * 8); i++) {
+ assert(trace_get(&trace, &my_fake_cpu->trace->tb));
+ assert(trace.hdr.len_div_8 == minimal.hdr.len_div_8);
+ assert(trace.hdr.timestamp == i+1);
+ assert(trace.hdr.type == 99 + ((i+1)%2));
+ }
+ assert(!trace_get(&trace, &my_fake_cpu->trace->tb));
+
+ /* Now put in some weird-length ones, to test overlap.
+ * Last power of 2, minus 8. */
+ for (j = 0; (1 << j) < sizeof(large); j++);
+ for (i = 0; i < TBUF_SZ; i++) {
+ timestamp = i;
+ trace_add(&large, 100 + (i%2), (1 << (j-1)));
+ }
+ assert(trace_get(&trace, &my_fake_cpu->trace->tb));
+ assert(trace.hdr.type == TRACE_OVERFLOW);
+ assert(trace_get(&trace, &my_fake_cpu->trace->tb));
+ assert(trace.hdr.len_div_8 == large.hdr.len_div_8);
+ i = trace.hdr.timestamp;
+ while (trace_get(&trace, &my_fake_cpu->trace->tb))
+ assert(trace.hdr.timestamp == ++i);
+
+ /* Test repeats. */
+ for (i = 0; i < 65538; i++) {
+ timestamp = i;
+ trace_add(&minimal, 100, sizeof(trace.hdr));
+ }
+ timestamp = i;
+ trace_add(&minimal, 101, sizeof(trace.hdr));
+ timestamp = i+1;
+ trace_add(&minimal, 101, sizeof(trace.hdr));
+
+ assert(trace_get(&trace, &my_fake_cpu->trace->tb));
+ assert(trace.hdr.timestamp == 0);
+ assert(trace.hdr.len_div_8 == minimal.hdr.len_div_8);
+ assert(trace.hdr.type == 100);
+ assert(trace_get(&trace, &my_fake_cpu->trace->tb));
+ assert(trace.hdr.type == TRACE_REPEAT);
+ assert(trace.hdr.len_div_8 * 8 == sizeof(trace.repeat));
+ assert(trace.repeat.num == 65535);
+ assert(trace.repeat.timestamp == 65535);
+ assert(trace_get(&trace, &my_fake_cpu->trace->tb));
+ assert(trace.hdr.timestamp == 65536);
+ assert(trace.hdr.len_div_8 == minimal.hdr.len_div_8);
+ assert(trace.hdr.type == 100);
+ assert(trace_get(&trace, &my_fake_cpu->trace->tb));
+ assert(trace.hdr.type == TRACE_REPEAT);
+ assert(trace.hdr.len_div_8 * 8 == sizeof(trace.repeat));
+ assert(trace.repeat.num == 1);
+ assert(trace.repeat.timestamp == 65537);
+
+ assert(trace_get(&trace, &my_fake_cpu->trace->tb));
+ assert(trace.hdr.timestamp == 65538);
+ assert(trace.hdr.len_div_8 == minimal.hdr.len_div_8);
+ assert(trace.hdr.type == 101);
+ assert(trace_get(&trace, &my_fake_cpu->trace->tb));
+ assert(trace.hdr.type == TRACE_REPEAT);
+ assert(trace.hdr.len_div_8 * 8 == sizeof(trace.repeat));
+ assert(trace.repeat.num == 1);
+ assert(trace.repeat.timestamp == 65539);
+
+ /* Now, test adding repeat while we're reading... */
+ timestamp = 0;
+ trace_add(&minimal, 100, sizeof(trace.hdr));
+ assert(trace_get(&trace, &my_fake_cpu->trace->tb));
+ assert(trace.hdr.timestamp == 0);
+ assert(trace.hdr.len_div_8 == minimal.hdr.len_div_8);
+ assert(trace.hdr.type == 100);
+
+ for (i = 1; i < TBUF_SZ; i++) {
+ timestamp = i;
+ trace_add(&minimal, 100, sizeof(trace.hdr));
+ assert(trace_get(&trace, &my_fake_cpu->trace->tb));
+ if (i % 65536 == 0) {
+ assert(trace.hdr.type == 100);
+ assert(trace.hdr.len_div_8 == minimal.hdr.len_div_8);
+ } else {
+ assert(trace.hdr.type == TRACE_REPEAT);
+ assert(trace.hdr.len_div_8 * 8 == sizeof(trace.repeat));
+ assert(trace.repeat.num == 1);
+ }
+ assert(trace.repeat.timestamp == i);
+ assert(!trace_get(&trace, &my_fake_cpu->trace->tb));
+ }
+
+ for (i = 0; i < CPUS; i++)
+ if (!fake_cpus[i].is_secondary)
+ free(fake_cpus[i].trace);
+
+ test_parallel();
+
+ return 0;
+}
diff --git a/core/test/stubs.c b/core/test/stubs.c
new file mode 100644
index 0000000..3233455
--- /dev/null
+++ b/core/test/stubs.c
@@ -0,0 +1,43 @@
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/* Add any stub functions required for linking here. */
+#include <stdlib.h>
+
+static void stub_function(void)
+{
+ abort();
+}
+
+#define STUB(fnname) \
+ void fnname(void) __attribute__((weak, alias ("stub_function")))
+
+STUB(fdt_begin_node);
+STUB(fdt_property);
+STUB(fdt_end_node);
+STUB(fdt_create);
+STUB(fdt_add_reservemap_entry);
+STUB(fdt_finish_reservemap);
+STUB(fdt_strerror);
+STUB(fdt_check_header);
+STUB(_fdt_check_node_offset);
+STUB(fdt_next_tag);
+STUB(fdt_string);
+STUB(fdt_get_name);
+STUB(dt_first);
+STUB(dt_next);
+STUB(dt_has_node_property);
+STUB(dt_get_address);
+STUB(add_chip_dev_associativity);