summaryrefslogtreecommitdiff
path: root/lib/lmb.c
diff options
context:
space:
mode:
Diffstat (limited to 'lib/lmb.c')
-rw-r--r--lib/lmb.c184
1 files changed, 98 insertions, 86 deletions
diff --git a/lib/lmb.c b/lib/lmb.c
index bb6f232f6bc..e5a0677e3f9 100644
--- a/lib/lmb.c
+++ b/lib/lmb.c
@@ -317,8 +317,34 @@ static long _lmb_free(struct alist *lmb_rgn_lst, phys_addr_t base,
rgn[i].flags);
}
-static long lmb_overlaps_region(struct alist *lmb_rgn_lst, phys_addr_t base,
- phys_size_t size)
+/**
+ * lmb_overlap_checks() - perform checks to see if region can be allocated or reserved
+ * @lmb_rgn_lst: list of LMB regions
+ * @base: base address of region to be checked
+ * @size: size of region to be checked
+ * @flags: flag of the region to be checked (only for reservation requests)
+ * @alloc: if checks are to be done for allocation or reservation request
+ *
+ * Check if the region passed to the function overlaps with any one of
+ * the regions of the passed lmb region list.
+ *
+ * If the @alloc flag is set to true, this check stops as soon an
+ * overlapping region is found. The function can also be called to
+ * check if a reservation request can be satisfied, by setting
+ * @alloc to false. In that case, the function then iterates through
+ * all the regions in the list passed to ensure that the requested
+ * region does not overlap with any existing regions. An overlap is
+ * allowed only when the flag of the requested region and the existing
+ * region is LMB_NONE.
+ *
+ * Return: index of the overlapping region, -1 if no overlap is found
+ *
+ * When the function is called for a reservation request check, -1 will
+ * also be returned when there is an allowed overlap, i.e. requested
+ * region and existing regions have flags as LMB_NONE.
+ */
+static long lmb_overlap_checks(struct alist *lmb_rgn_lst, phys_addr_t base,
+ phys_size_t size, u32 flags, bool alloc)
{
unsigned long i;
struct lmb_region *rgn = lmb_rgn_lst->data;
@@ -326,9 +352,12 @@ static long lmb_overlaps_region(struct alist *lmb_rgn_lst, phys_addr_t base,
for (i = 0; i < lmb_rgn_lst->count; i++) {
phys_addr_t rgnbase = rgn[i].base;
phys_size_t rgnsize = rgn[i].size;
+ u32 rgnflags = rgn[i].flags;
- if (lmb_addrs_overlap(base, size, rgnbase, rgnsize))
- break;
+ if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) {
+ if (alloc || flags != LMB_NONE || flags != rgnflags)
+ break;
+ }
}
return (i < lmb_rgn_lst->count) ? i : -1;
@@ -390,7 +419,8 @@ phys_addr_t io_lmb_alloc(struct lmb *io_lmb, phys_size_t size, ulong align)
base = ALIGN_DOWN(lmbbase + lmbsize - size, align);
while (base && lmbbase <= base) {
- rgn = lmb_overlaps_region(&io_lmb->used_mem, base, size);
+ rgn = lmb_overlap_checks(&io_lmb->used_mem, base, size,
+ LMB_NOOVERWRITE, true);
if (rgn < 0) {
/* This area isn't reserved, take it */
if (lmb_add_region_flags(&io_lmb->used_mem, base,
@@ -488,6 +518,21 @@ void lmb_dump_all(void)
#endif
}
+static long lmb_reserve(phys_addr_t base, phys_size_t size, u32 flags)
+{
+ long ret = 0;
+ struct alist *lmb_rgn_lst = &lmb.used_mem;
+
+ if (lmb_overlap_checks(lmb_rgn_lst, base, size, flags, false) != -1)
+ return -EEXIST;
+
+ ret = lmb_add_region_flags(lmb_rgn_lst, base, size, flags);
+ if (ret)
+ return ret;
+
+ return lmb_map_update_notify(base, size, LMB_MAP_OP_RESERVE, flags);
+}
+
static void lmb_reserve_uboot_region(void)
{
int bank;
@@ -557,40 +602,7 @@ static __maybe_unused void lmb_reserve_common_spl(void)
}
}
-/**
- * lmb_can_reserve_region() - check if the region can be reserved
- * @base: base address of region to be reserved
- * @size: size of region to be reserved
- * @flags: flag of the region to be reserved
- *
- * Go through all the reserved regions and ensure that the requested
- * region does not overlap with any existing regions. An overlap is
- * allowed only when the flag of the request region and the existing
- * region is LMB_NONE.
- *
- * Return: true if region can be reserved, false otherwise
- */
-static bool lmb_can_reserve_region(phys_addr_t base, phys_size_t size,
- u32 flags)
-{
- uint i;
- struct lmb_region *lmb_reserved = lmb.used_mem.data;
-
- for (i = 0; i < lmb.used_mem.count; i++) {
- u32 rgnflags = lmb_reserved[i].flags;
- phys_addr_t rgnbase = lmb_reserved[i].base;
- phys_size_t rgnsize = lmb_reserved[i].size;
-
- if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) {
- if (flags != LMB_NONE || flags != rgnflags)
- return false;
- }
- }
-
- return true;
-}
-
-void lmb_add_memory(void)
+static void lmb_add_memory(void)
{
int i;
phys_addr_t bank_end;
@@ -640,8 +652,7 @@ long lmb_add(phys_addr_t base, phys_size_t size)
return lmb_map_update_notify(base, size, LMB_MAP_OP_ADD, LMB_NONE);
}
-long lmb_free_flags(phys_addr_t base, phys_size_t size,
- uint flags)
+long lmb_free(phys_addr_t base, phys_size_t size, u32 flags)
{
long ret;
@@ -652,36 +663,18 @@ long lmb_free_flags(phys_addr_t base, phys_size_t size,
return lmb_map_update_notify(base, size, LMB_MAP_OP_FREE, flags);
}
-long lmb_free(phys_addr_t base, phys_size_t size)
-{
- return lmb_free_flags(base, size, LMB_NONE);
-}
-
-long lmb_reserve(phys_addr_t base, phys_size_t size, u32 flags)
-{
- long ret = 0;
- struct alist *lmb_rgn_lst = &lmb.used_mem;
-
- if (!lmb_can_reserve_region(base, size, flags))
- return -EEXIST;
-
- ret = lmb_add_region_flags(lmb_rgn_lst, base, size, flags);
- if (ret)
- return ret;
-
- return lmb_map_update_notify(base, size, LMB_MAP_OP_RESERVE, flags);
-}
-
-static phys_addr_t _lmb_alloc_base(phys_size_t size, ulong align,
- phys_addr_t max_addr, u32 flags)
+static int _lmb_alloc_base(phys_size_t size, ulong align,
+ phys_addr_t *addr, u32 flags)
{
int ret;
long i, rgn;
+ phys_addr_t max_addr;
phys_addr_t base = 0;
phys_addr_t res_base;
struct lmb_region *lmb_used = lmb.used_mem.data;
struct lmb_region *lmb_memory = lmb.available_mem.data;
+ max_addr = *addr;
for (i = lmb.available_mem.count - 1; i >= 0; i--) {
phys_addr_t lmbbase = lmb_memory[i].base;
phys_size_t lmbsize = lmb_memory[i].size;
@@ -702,7 +695,8 @@ static phys_addr_t _lmb_alloc_base(phys_size_t size, ulong align,
}
while (base && lmbbase <= base) {
- rgn = lmb_overlaps_region(&lmb.used_mem, base, size);
+ rgn = lmb_overlap_checks(&lmb.used_mem, base, size,
+ LMB_NOOVERWRITE, true);
if (rgn < 0) {
/* This area isn't reserved, take it */
if (lmb_add_region_flags(&lmb.used_mem, base,
@@ -714,8 +708,8 @@ static phys_addr_t _lmb_alloc_base(phys_size_t size, ulong align,
flags);
if (ret)
return ret;
-
- return base;
+ *addr = base;
+ return 0;
}
res_base = lmb_used[rgn].base;
@@ -728,27 +722,17 @@ static phys_addr_t _lmb_alloc_base(phys_size_t size, ulong align,
log_debug("%s: Failed to allocate 0x%lx bytes below 0x%lx\n",
__func__, (ulong)size, (ulong)max_addr);
- return 0;
-}
-
-phys_addr_t lmb_alloc(phys_size_t size, ulong align)
-{
- return _lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE, LMB_NONE);
-}
-
-phys_addr_t lmb_alloc_base(phys_size_t size, ulong align, phys_addr_t max_addr,
- uint flags)
-{
- return _lmb_alloc_base(size, align, max_addr, flags);
+ return -1;
}
-int lmb_alloc_addr(phys_addr_t base, phys_size_t size, u32 flags)
+static int _lmb_alloc_addr(phys_addr_t base, phys_size_t size, u32 flags)
{
long rgn;
struct lmb_region *lmb_memory = lmb.available_mem.data;
/* Check if the requested address is in one of the memory regions */
- rgn = lmb_overlaps_region(&lmb.available_mem, base, size);
+ rgn = lmb_overlap_checks(&lmb.available_mem, base, size,
+ LMB_NOOVERWRITE, true);
if (rgn >= 0) {
/*
* Check if the requested end address is in the same memory
@@ -756,14 +740,41 @@ int lmb_alloc_addr(phys_addr_t base, phys_size_t size, u32 flags)
*/
if (lmb_addrs_overlap(lmb_memory[rgn].base,
lmb_memory[rgn].size,
- base + size - 1, 1)) {
+ base + size - 1, 1))
/* ok, reserve the memory */
- if (!lmb_reserve(base, size, flags))
- return 0;
- }
+ return lmb_reserve(base, size, flags);
}
- return -1;
+ return -EINVAL;
+}
+
+int lmb_alloc_mem(enum lmb_mem_type type, u64 align, phys_addr_t *addr,
+ phys_size_t size, u32 flags)
+{
+ int ret = -1;
+
+ if (!size)
+ return 0;
+
+ if (!addr)
+ return -EINVAL;
+
+ switch (type) {
+ case LMB_MEM_ALLOC_ANY:
+ *addr = LMB_ALLOC_ANYWHERE;
+ fallthrough;
+ case LMB_MEM_ALLOC_MAX:
+ ret = _lmb_alloc_base(size, align, addr, flags);
+ break;
+ case LMB_MEM_ALLOC_ADDR:
+ ret = _lmb_alloc_addr(*addr, size, flags);
+ break;
+ default:
+ log_debug("%s: Invalid memory allocation type requested %d\n",
+ __func__, type);
+ }
+
+ return ret;
}
/* Return number of bytes from a given address that are free */
@@ -775,7 +786,8 @@ phys_size_t lmb_get_free_size(phys_addr_t addr)
struct lmb_region *lmb_memory = lmb.available_mem.data;
/* check if the requested address is in the memory regions */
- rgn = lmb_overlaps_region(&lmb.available_mem, addr, 1);
+ rgn = lmb_overlap_checks(&lmb.available_mem, addr, 1, LMB_NOOVERWRITE,
+ true);
if (rgn >= 0) {
for (i = 0; i < lmb.used_mem.count; i++) {
if (addr < lmb_used[i].base) {