aboutsummaryrefslogtreecommitdiff
path: root/lib/lmb.c
diff options
context:
space:
mode:
authorSimon Goldschmidt <simon.k.r.goldschmidt@gmail.com>2019-01-14 22:38:16 +0100
committerTom Rini <trini@konsulko.com>2019-01-16 16:37:00 -0500
commit0f7c51a676ca73f7950a7e4f9d8454e57324270c (patch)
treee704b8212a3cede9a80aacd9387dbd812071b214 /lib/lmb.c
parentd67f33cf4ee72fd9bc64d68cb51a77798b65cf3a (diff)
downloadu-boot-0f7c51a676ca73f7950a7e4f9d8454e57324270c.zip
u-boot-0f7c51a676ca73f7950a7e4f9d8454e57324270c.tar.gz
u-boot-0f7c51a676ca73f7950a7e4f9d8454e57324270c.tar.bz2
lib: lmb: reserving overlapping regions should fail
lmb_add_region handles overlapping regions wrong: instead of merging or rejecting to add a new reserved region that overlaps an existing one, it just adds the new region. Since internally the same function is used for lmb_alloc, change lmb_add_region to reject overlapping regions. Also, to keep reserved memory correct after 'free', reserved entries created by allocating memory must not set their size to a multiple of alignment but to the original size. This ensures the reserved region is completely removed when the caller calls 'lmb_free', as this one takes the same size as passed to 'lmb_alloc' etc. Add test to assert this. Reviewed-by: Simon Glass <sjg@chromium.org> Signed-off-by: Simon Goldschmidt <simon.k.r.goldschmidt@gmail.com>
Diffstat (limited to 'lib/lmb.c')
-rw-r--r--lib/lmb.c11
1 files changed, 4 insertions, 7 deletions
diff --git a/lib/lmb.c b/lib/lmb.c
index 6d3dcf4..cd297f8 100644
--- a/lib/lmb.c
+++ b/lib/lmb.c
@@ -131,6 +131,9 @@ static long lmb_add_region(struct lmb_region *rgn, phys_addr_t base, phys_size_t
rgn->region[i].size += size;
coalesced++;
break;
+ } else if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) {
+ /* regions overlap */
+ return -1;
}
}
@@ -269,11 +272,6 @@ static phys_addr_t lmb_align_down(phys_addr_t addr, phys_size_t size)
return addr & ~(size - 1);
}
-static phys_addr_t lmb_align_up(phys_addr_t addr, ulong size)
-{
- return (addr + (size - 1)) & ~(size - 1);
-}
-
phys_addr_t __lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align, phys_addr_t max_addr)
{
long i, j;
@@ -302,8 +300,7 @@ phys_addr_t __lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align, phy
if (j < 0) {
/* This area isn't reserved, take it */
if (lmb_add_region(&lmb->reserved, base,
- lmb_align_up(size,
- align)) < 0)
+ size) < 0)
return 0;
return base;
}