aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorPatrick Delaunay <patrick.delaunay@foss.st.com>2021-05-07 14:50:29 +0200
committerTom Rini <trini@konsulko.com>2021-06-07 10:48:40 -0400
commit59c0ea5df33fc4d9b62226d29e3b5c61d639303f (patch)
tree0e9a9f4f5036d537fa01ee598f49045ad50661ab /lib
parente3b64beda5dd1a6b6bedfd1fe0e50be1ddea7044 (diff)
downloadu-boot-59c0ea5df33fc4d9b62226d29e3b5c61d639303f.zip
u-boot-59c0ea5df33fc4d9b62226d29e3b5c61d639303f.tar.gz
u-boot-59c0ea5df33fc4d9b62226d29e3b5c61d639303f.tar.bz2
lmb: Add support of flags for no-map properties
Add "flags" in lmb_property to save the "no-map" property of reserved region and a new function lmb_reserve_flags() to check this flag. The default allocation use flags = LMB_NONE. The adjacent reserved memory region are merged only when they have the same flags value. This patch is partially based on flags support done in Linux kernel mm/memblock .c (previously lmb.c); it is why LMB_NOMAP = 0x4, it is aligned with MEMBLOCK_NOMAP value. Signed-off-by: Patrick Delaunay <patrick.delaunay@foss.st.com> Reviewed-by: Simon Glass <sjg@chromium.org>
Diffstat (limited to 'lib')
-rw-r--r--lib/lmb.c52
1 files changed, 43 insertions, 9 deletions
diff --git a/lib/lmb.c b/lib/lmb.c
index c08c4d9..69700bf 100644
--- a/lib/lmb.c
+++ b/lib/lmb.c
@@ -25,6 +25,8 @@ void lmb_dump_all_force(struct lmb *lmb)
(unsigned long long)lmb->memory.region[i].base);
printf(" .size = 0x%llx\n",
(unsigned long long)lmb->memory.region[i].size);
+ printf(" .flags = 0x%x\n",
+ lmb->memory.region[i].flags);
}
printf("\n reserved.cnt = 0x%lx\n", lmb->reserved.cnt);
@@ -33,6 +35,8 @@ void lmb_dump_all_force(struct lmb *lmb)
(unsigned long long)lmb->reserved.region[i].base);
printf(" .size = 0x%llx\n",
(unsigned long long)lmb->reserved.region[i].size);
+ printf(" .flags = 0x%x\n",
+ lmb->reserved.region[i].flags);
}
}
@@ -81,6 +85,7 @@ static void lmb_remove_region(struct lmb_region *rgn, unsigned long r)
for (i = r; i < rgn->cnt - 1; i++) {
rgn->region[i].base = rgn->region[i + 1].base;
rgn->region[i].size = rgn->region[i + 1].size;
+ rgn->region[i].flags = rgn->region[i + 1].flags;
}
rgn->cnt--;
}
@@ -144,7 +149,8 @@ void lmb_init_and_reserve_range(struct lmb *lmb, phys_addr_t base,
}
/* This routine called with relocation disabled. */
-static long lmb_add_region(struct lmb_region *rgn, phys_addr_t base, phys_size_t size)
+static long lmb_add_region_flags(struct lmb_region *rgn, phys_addr_t base,
+ phys_size_t size, enum lmb_flags flags)
{
unsigned long coalesced = 0;
long adjacent, i;
@@ -152,6 +158,7 @@ static long lmb_add_region(struct lmb_region *rgn, phys_addr_t base, phys_size_t
if (rgn->cnt == 0) {
rgn->region[0].base = base;
rgn->region[0].size = size;
+ rgn->region[0].flags = flags;
rgn->cnt = 1;
return 0;
}
@@ -160,18 +167,27 @@ static long lmb_add_region(struct lmb_region *rgn, phys_addr_t base, phys_size_t
for (i = 0; i < rgn->cnt; i++) {
phys_addr_t rgnbase = rgn->region[i].base;
phys_size_t rgnsize = rgn->region[i].size;
+ phys_size_t rgnflags = rgn->region[i].flags;
- if ((rgnbase == base) && (rgnsize == size))
- /* Already have this region, so we're done */
- return 0;
+ if (rgnbase == base && rgnsize == size) {
+ if (flags == rgnflags)
+ /* Already have this region, so we're done */
+ return 0;
+ else
+ return -1; /* regions with new flags */
+ }
adjacent = lmb_addrs_adjacent(base, size, rgnbase, rgnsize);
if (adjacent > 0) {
+ if (flags != rgnflags)
+ break;
rgn->region[i].base -= size;
rgn->region[i].size += size;
coalesced++;
break;
} else if (adjacent < 0) {
+ if (flags != rgnflags)
+ break;
rgn->region[i].size += size;
coalesced++;
break;
@@ -182,8 +198,10 @@ static long lmb_add_region(struct lmb_region *rgn, phys_addr_t base, phys_size_t
}
if ((i < rgn->cnt - 1) && lmb_regions_adjacent(rgn, i, i + 1)) {
- lmb_coalesce_regions(rgn, i, i + 1);
- coalesced++;
+ if (rgn->region[i].flags == rgn->region[i + 1].flags) {
+ lmb_coalesce_regions(rgn, i, i + 1);
+ coalesced++;
+ }
}
if (coalesced)
@@ -196,9 +214,11 @@ static long lmb_add_region(struct lmb_region *rgn, phys_addr_t base, phys_size_t
if (base < rgn->region[i].base) {
rgn->region[i + 1].base = rgn->region[i].base;
rgn->region[i + 1].size = rgn->region[i].size;
+ rgn->region[i + 1].flags = rgn->region[i].flags;
} else {
rgn->region[i + 1].base = base;
rgn->region[i + 1].size = size;
+ rgn->region[i + 1].flags = flags;
break;
}
}
@@ -206,6 +226,7 @@ static long lmb_add_region(struct lmb_region *rgn, phys_addr_t base, phys_size_t
if (base < rgn->region[0].base) {
rgn->region[0].base = base;
rgn->region[0].size = size;
+ rgn->region[0].flags = flags;
}
rgn->cnt++;
@@ -213,6 +234,12 @@ static long lmb_add_region(struct lmb_region *rgn, phys_addr_t base, phys_size_t
return 0;
}
+static long lmb_add_region(struct lmb_region *rgn, phys_addr_t base,
+ phys_size_t size)
+{
+ return lmb_add_region_flags(rgn, base, size, LMB_NONE);
+}
+
/* This routine may be called with relocation disabled. */
long lmb_add(struct lmb *lmb, phys_addr_t base, phys_size_t size)
{
@@ -267,14 +294,21 @@ long lmb_free(struct lmb *lmb, phys_addr_t base, phys_size_t size)
* beginging of the hole and add the region after hole.
*/
rgn->region[i].size = base - rgn->region[i].base;
- return lmb_add_region(rgn, end + 1, rgnend - end);
+ return lmb_add_region_flags(rgn, end + 1, rgnend - end,
+ rgn->region[i].flags);
}
-long lmb_reserve(struct lmb *lmb, phys_addr_t base, phys_size_t size)
+long lmb_reserve_flags(struct lmb *lmb, phys_addr_t base, phys_size_t size,
+ enum lmb_flags flags)
{
struct lmb_region *_rgn = &(lmb->reserved);
- return lmb_add_region(_rgn, base, size);
+ return lmb_add_region_flags(_rgn, base, size, flags);
+}
+
+long lmb_reserve(struct lmb *lmb, phys_addr_t base, phys_size_t size)
+{
+ return lmb_reserve_flags(lmb, base, size, LMB_NONE);
}
static long lmb_overlaps_region(struct lmb_region *rgn, phys_addr_t base,