aboutsummaryrefslogtreecommitdiff
path: root/core/mem_region.c
diff options
context:
space:
mode:
authorJeremy Kerr <jk@ozlabs.org>2015-08-17 14:49:53 +0800
committerStewart Smith <stewart@linux.vnet.ibm.com>2015-08-17 17:04:14 +1000
commit58033e44b927c80257a72037aeed2cab682e0c85 (patch)
treef58166f8464d0d0aac22cff343d6ea4315493fdf /core/mem_region.c
parent44088be051802fcdda2e04a2667cd3ff8b71b75a (diff)
downloadskiboot-58033e44b927c80257a72037aeed2cab682e0c85.zip
skiboot-58033e44b927c80257a72037aeed2cab682e0c85.tar.gz
skiboot-58033e44b927c80257a72037aeed2cab682e0c85.tar.bz2
core/mem_region: Add mem_range_is_reserved()
This change adds a function to check whether a range of memory is covered by one or more reservations. Signed-off-by: Jeremy Kerr <jk@ozlabs.org> Signed-off-by: Stewart Smith <stewart@linux.vnet.ibm.com>
Diffstat (limited to 'core/mem_region.c')
-rw-r--r--core/mem_region.c43
1 files changed, 43 insertions, 0 deletions
diff --git a/core/mem_region.c b/core/mem_region.c
index b85b1e3..3ed8006 100644
--- a/core/mem_region.c
+++ b/core/mem_region.c
@@ -789,6 +789,49 @@ struct mem_region *find_mem_region(const char *name)
return NULL;
}
+bool mem_range_is_reserved(uint64_t start, uint64_t size)
+{
+ uint64_t end = start + size;
+ struct mem_region *region;
+
+ /* We may have the range covered by a number of regions, which could
+ * appear in any order. So, we look for a region that covers the
+ * start address, and bump start up to the end of that region.
+ *
+ * We repeat until we've either bumped past the end of the range,
+ * or we didn't find a matching region.
+ *
+ * This has a worst-case of O(n^2), but n is well bounded by the
+ * small number of reservations.
+ */
+ for (;;) {
+ bool found = false;
+
+ list_for_each(&regions, region, list) {
+ if (!region_is_reserved(region))
+ continue;
+
+ /* does this region overlap the start address, and
+ * have a non-zero size? */
+ if (region->start <= start &&
+ region->start + region->len > start &&
+ region->len) {
+ start = region->start + region->len;
+ found = true;
+ }
+ }
+
+ /* 'end' is the first byte outside of the range */
+ if (start >= end)
+ return true;
+
+ if (!found)
+ break;
+ }
+
+ return false;
+}
+
void adjust_cpu_stacks_alloc(void)
{
/* CPU stacks start at 0, then when we know max possible PIR,