summaryrefslogtreecommitdiff
path: root/lib/lmb.c
diff options
context:
space:
mode:
Diffstat (limited to 'lib/lmb.c')
-rw-r--r--lib/lmb.c146
1 files changed, 87 insertions, 59 deletions
diff --git a/lib/lmb.c b/lib/lmb.c
index 93fc1bea07c..bb6f232f6bc 100644
--- a/lib/lmb.c
+++ b/lib/lmb.c
@@ -23,6 +23,9 @@
DECLARE_GLOBAL_DATA_PTR;
+#define LMB_RGN_OVERLAP 1
+#define LMB_RGN_ADJACENT 2
+
/*
* The following low level LMB functions must not access the global LMB memory
* map since they are also used to manage IOVA memory maps in iommu drivers like
@@ -49,8 +52,22 @@ static long lmb_addrs_adjacent(phys_addr_t base1, phys_size_t size1,
return 0;
}
-static long lmb_regions_overlap(struct alist *lmb_rgn_lst, unsigned long r1,
- unsigned long r2)
+/**
+ * lmb_regions_check() - Check if the regions overlap, or are adjacent
+ * @lmb_rgn_lst: List of LMB regions
+ * @r1: First region to check
+ * @r2: Second region to check
+ *
+ * Check if the two regions with matching flags, r1 and r2 are
+ * adjacent to each other, or if they overlap.
+ *
+ * Return:
+ * * %LMB_RGN_OVERLAP - Regions overlap
+ * * %LMB_RGN_ADJACENT - Regions adjacent to each other
+ * * 0 - Neither of the above, or flags mismatch
+ */
+static long lmb_regions_check(struct alist *lmb_rgn_lst, unsigned long r1,
+ unsigned long r2)
{
struct lmb_region *rgn = lmb_rgn_lst->data;
phys_addr_t base1 = rgn[r1].base;
@@ -58,19 +75,15 @@ static long lmb_regions_overlap(struct alist *lmb_rgn_lst, unsigned long r1,
phys_addr_t base2 = rgn[r2].base;
phys_size_t size2 = rgn[r2].size;
- return lmb_addrs_overlap(base1, size1, base2, size2);
-}
+ if (rgn[r1].flags != rgn[r2].flags)
+ return 0;
-static long lmb_regions_adjacent(struct alist *lmb_rgn_lst, unsigned long r1,
- unsigned long r2)
-{
- struct lmb_region *rgn = lmb_rgn_lst->data;
- phys_addr_t base1 = rgn[r1].base;
- phys_size_t size1 = rgn[r1].size;
- phys_addr_t base2 = rgn[r2].base;
- phys_size_t size2 = rgn[r2].size;
+ if (lmb_addrs_overlap(base1, size1, base2, size2))
+ return LMB_RGN_OVERLAP;
+ else if (lmb_addrs_adjacent(base1, size1, base2, size2))
+ return LMB_RGN_ADJACENT;
- return lmb_addrs_adjacent(base1, size1, base2, size2);
+ return 0;
}
static void lmb_remove_region(struct alist *lmb_rgn_lst, unsigned long r)
@@ -96,25 +109,6 @@ static void lmb_coalesce_regions(struct alist *lmb_rgn_lst, unsigned long r1,
lmb_remove_region(lmb_rgn_lst, r2);
}
-/*Assumption : base addr of region 1 < base addr of region 2*/
-static void lmb_fix_over_lap_regions(struct alist *lmb_rgn_lst,
- unsigned long r1, unsigned long r2)
-{
- struct lmb_region *rgn = lmb_rgn_lst->data;
-
- phys_addr_t base1 = rgn[r1].base;
- phys_size_t size1 = rgn[r1].size;
- phys_addr_t base2 = rgn[r2].base;
- phys_size_t size2 = rgn[r2].size;
-
- if (base1 + size1 > base2 + size2) {
- printf("This will not be a case any time\n");
- return;
- }
- rgn[r1].size = base2 + size2 - base1;
- lmb_remove_region(lmb_rgn_lst, r2);
-}
-
static long lmb_resize_regions(struct alist *lmb_rgn_lst,
unsigned long idx_start,
phys_addr_t base, phys_size_t size)
@@ -209,14 +203,11 @@ static long lmb_add_region_flags(struct alist *lmb_rgn_lst, phys_addr_t base,
break;
} else if (ret < 0) {
if (flags != rgnflags)
- break;
+ continue;
rgn[i].size += size;
coalesced++;
break;
} else if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) {
- if (flags != LMB_NONE)
- return -EEXIST;
-
ret = lmb_resize_regions(lmb_rgn_lst, i, base, size);
if (ret < 0)
return -1;
@@ -229,16 +220,21 @@ static long lmb_add_region_flags(struct alist *lmb_rgn_lst, phys_addr_t base,
}
if (lmb_rgn_lst->count && i < lmb_rgn_lst->count - 1) {
- rgn = lmb_rgn_lst->data;
- if (rgn[i].flags == rgn[i + 1].flags) {
- if (lmb_regions_adjacent(lmb_rgn_lst, i, i + 1)) {
- lmb_coalesce_regions(lmb_rgn_lst, i, i + 1);
- coalesced++;
- } else if (lmb_regions_overlap(lmb_rgn_lst, i, i + 1)) {
- /* fix overlapping area */
- lmb_fix_over_lap_regions(lmb_rgn_lst, i, i + 1);
- coalesced++;
- }
+ ret = lmb_regions_check(lmb_rgn_lst, i, i + 1);
+ if (ret == LMB_RGN_ADJACENT) {
+ lmb_coalesce_regions(lmb_rgn_lst, i, i + 1);
+ coalesced++;
+ } else if (ret == LMB_RGN_OVERLAP) {
+ /* fix overlapping areas */
+ phys_addr_t rgnbase = rgn[i].base;
+ phys_size_t rgnsize = rgn[i].size;
+
+ ret = lmb_resize_regions(lmb_rgn_lst, i,
+ rgnbase, rgnsize);
+ if (ret < 0)
+ return -1;
+
+ coalesced++;
}
}
@@ -561,6 +557,39 @@ static __maybe_unused void lmb_reserve_common_spl(void)
}
}
+/**
+ * lmb_can_reserve_region() - check if the region can be reserved
+ * @base: base address of region to be reserved
+ * @size: size of region to be reserved
+ * @flags: flag of the region to be reserved
+ *
+ * Go through all the reserved regions and ensure that the requested
+ * region does not overlap with any existing regions. An overlap is
+ * allowed only when the flag of the request region and the existing
+ * region is LMB_NONE.
+ *
+ * Return: true if region can be reserved, false otherwise
+ */
+static bool lmb_can_reserve_region(phys_addr_t base, phys_size_t size,
+ u32 flags)
+{
+ uint i;
+ struct lmb_region *lmb_reserved = lmb.used_mem.data;
+
+ for (i = 0; i < lmb.used_mem.count; i++) {
+ u32 rgnflags = lmb_reserved[i].flags;
+ phys_addr_t rgnbase = lmb_reserved[i].base;
+ phys_size_t rgnsize = lmb_reserved[i].size;
+
+ if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) {
+ if (flags != LMB_NONE || flags != rgnflags)
+ return false;
+ }
+ }
+
+ return true;
+}
+
void lmb_add_memory(void)
{
int i;
@@ -633,6 +662,9 @@ long lmb_reserve(phys_addr_t base, phys_size_t size, u32 flags)
long ret = 0;
struct alist *lmb_rgn_lst = &lmb.used_mem;
+ if (!lmb_can_reserve_region(base, size, flags))
+ return -EEXIST;
+
ret = lmb_add_region_flags(lmb_rgn_lst, base, size, flags);
if (ret)
return ret;
@@ -692,29 +724,25 @@ static phys_addr_t _lmb_alloc_base(phys_size_t size, ulong align,
base = ALIGN_DOWN(res_base - size, align);
}
}
+
+ log_debug("%s: Failed to allocate 0x%lx bytes below 0x%lx\n",
+ __func__, (ulong)size, (ulong)max_addr);
+
return 0;
}
phys_addr_t lmb_alloc(phys_size_t size, ulong align)
{
- return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE, LMB_NONE);
+ return _lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE, LMB_NONE);
}
phys_addr_t lmb_alloc_base(phys_size_t size, ulong align, phys_addr_t max_addr,
uint flags)
{
- phys_addr_t alloc;
-
- alloc = _lmb_alloc_base(size, align, max_addr, flags);
-
- if (alloc == 0)
- printf("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n",
- (ulong)size, (ulong)max_addr);
-
- return alloc;
+ return _lmb_alloc_base(size, align, max_addr, flags);
}
-phys_addr_t lmb_alloc_addr(phys_addr_t base, phys_size_t size, u32 flags)
+int lmb_alloc_addr(phys_addr_t base, phys_size_t size, u32 flags)
{
long rgn;
struct lmb_region *lmb_memory = lmb.available_mem.data;
@@ -731,11 +759,11 @@ phys_addr_t lmb_alloc_addr(phys_addr_t base, phys_size_t size, u32 flags)
base + size - 1, 1)) {
/* ok, reserve the memory */
if (!lmb_reserve(base, size, flags))
- return base;
+ return 0;
}
}
- return 0;
+ return -1;
}
/* Return number of bytes from a given address that are free */