diff options
author | Sughosh Ganu <sughosh.ganu@linaro.org> | 2024-08-26 17:29:18 +0530 |
---|---|---|
committer | Tom Rini <trini@konsulko.com> | 2024-09-03 14:08:50 -0600 |
commit | ed17a33fed296a87219b0ff702045ce488bc3771 (patch) | |
tree | e1ac01002f7dcd0e1c1adbf5139234038ea58f8f /lib | |
parent | a368850ae2551a4fcc5f9a2e9e8e90c056d4fe73 (diff) |
lmb: make LMB memory map persistent and global
The current LMB API's for allocating and reserving memory use a
per-caller based memory view. Memory allocated by a caller can then be
overwritten by another caller. Make these allocations and reservations
persistent using the alloced list data structure.
Two alloced lists are declared -- one for the available(free) memory,
and one for the used memory. Once full, the list can then be extended
at runtime.
[sjg: Use a stack to store pointer of lmb struct when running lmb tests]
Signed-off-by: Sughosh Ganu <sughosh.ganu@linaro.org>
Signed-off-by: Simon Glass <sjg@chromium.org>
[sjg: Optimise the logic to add a region in lmb_add_region_flags()]
Diffstat (limited to 'lib')
-rw-r--r-- | lib/efi_loader/efi_dt_fixup.c | 2 | ||||
-rw-r--r-- | lib/efi_loader/efi_helper.c | 2 | ||||
-rw-r--r-- | lib/lmb.c | 446 |
3 files changed, 266 insertions, 184 deletions
diff --git a/lib/efi_loader/efi_dt_fixup.c b/lib/efi_loader/efi_dt_fixup.c index 9886e6897cd..9d017804eea 100644 --- a/lib/efi_loader/efi_dt_fixup.c +++ b/lib/efi_loader/efi_dt_fixup.c @@ -172,7 +172,7 @@ efi_dt_fixup(struct efi_dt_fixup_protocol *this, void *dtb, } fdt_set_totalsize(dtb, *buffer_size); - if (image_setup_libfdt(&img, dtb, NULL)) { + if (image_setup_libfdt(&img, dtb, false)) { log_err("failed to process device tree\n"); ret = EFI_INVALID_PARAMETER; goto out; diff --git a/lib/efi_loader/efi_helper.c b/lib/efi_loader/efi_helper.c index 65d2116381a..9907b2b23cb 100644 --- a/lib/efi_loader/efi_helper.c +++ b/lib/efi_loader/efi_helper.c @@ -513,7 +513,7 @@ efi_status_t efi_install_fdt(void *fdt) return EFI_OUT_OF_RESOURCES; } - if (image_setup_libfdt(&img, fdt, NULL)) { + if (image_setup_libfdt(&img, fdt, false)) { log_err("ERROR: failed to process device tree\n"); return EFI_LOAD_ERROR; } diff --git a/lib/lmb.c b/lib/lmb.c index 4d39c0d1f90..0aa2d8bd3b1 100644 --- a/lib/lmb.c +++ b/lib/lmb.c @@ -6,6 +6,7 @@ * Copyright (C) 2001 Peter Bergner. */ +#include <alist.h> #include <efi_loader.h> #include <image.h> #include <mapmem.h> @@ -15,41 +16,46 @@ #include <asm/global_data.h> #include <asm/sections.h> +#include <linux/kernel.h> DECLARE_GLOBAL_DATA_PTR; #define LMB_ALLOC_ANYWHERE 0 +#define LMB_ALIST_INITIAL_SIZE 4 -static void lmb_dump_region(struct lmb_region *rgn, char *name) +static struct lmb lmb; + +static void lmb_dump_region(struct alist *lmb_rgn_lst, char *name) { + struct lmb_region *rgn = lmb_rgn_lst->data; unsigned long long base, size, end; enum lmb_flags flags; int i; - printf(" %s.cnt = 0x%lx / max = 0x%lx\n", name, rgn->cnt, rgn->max); + printf(" %s.count = 0x%x\n", name, lmb_rgn_lst->count); - for (i = 0; i < rgn->cnt; i++) { - base = rgn->region[i].base; - size = rgn->region[i].size; + for (i = 0; i < lmb_rgn_lst->count; i++) { + base = rgn[i].base; + size = rgn[i].size; end = base + size - 1; - flags = rgn->region[i].flags; + flags = rgn[i].flags; printf(" %s[%d]\t[0x%llx-0x%llx], 0x%08llx bytes flags: %x\n", name, i, base, end, size, flags); } } -void lmb_dump_all_force(struct lmb *lmb) +void lmb_dump_all_force(void) { printf("lmb_dump_all:\n"); - lmb_dump_region(&lmb->memory, "memory"); - lmb_dump_region(&lmb->reserved, "reserved"); + lmb_dump_region(&lmb.free_mem, "memory"); + lmb_dump_region(&lmb.used_mem, "reserved"); } -void lmb_dump_all(struct lmb *lmb) +void lmb_dump_all(void) { #ifdef DEBUG - lmb_dump_all_force(lmb); + lmb_dump_all_force(); #endif } @@ -73,79 +79,74 @@ static long lmb_addrs_adjacent(phys_addr_t base1, phys_size_t size1, return 0; } -static long lmb_regions_overlap(struct lmb_region *rgn, unsigned long r1, +static long lmb_regions_overlap(struct alist *lmb_rgn_lst, unsigned long r1, unsigned long r2) { - phys_addr_t base1 = rgn->region[r1].base; - phys_size_t size1 = rgn->region[r1].size; - phys_addr_t base2 = rgn->region[r2].base; - phys_size_t size2 = rgn->region[r2].size; + struct lmb_region *rgn = lmb_rgn_lst->data; + + phys_addr_t base1 = rgn[r1].base; + phys_size_t size1 = rgn[r1].size; + phys_addr_t base2 = rgn[r2].base; + phys_size_t size2 = rgn[r2].size; return lmb_addrs_overlap(base1, size1, base2, size2); } -static long lmb_regions_adjacent(struct lmb_region *rgn, unsigned long r1, + +static long lmb_regions_adjacent(struct alist *lmb_rgn_lst, unsigned long r1, unsigned long r2) { - phys_addr_t base1 = rgn->region[r1].base; - phys_size_t size1 = rgn->region[r1].size; - phys_addr_t base2 = rgn->region[r2].base; - phys_size_t size2 = rgn->region[r2].size; + struct lmb_region *rgn = lmb_rgn_lst->data; + + phys_addr_t base1 = rgn[r1].base; + phys_size_t size1 = rgn[r1].size; + phys_addr_t base2 = rgn[r2].base; + phys_size_t size2 = rgn[r2].size; return lmb_addrs_adjacent(base1, size1, base2, size2); } -static void lmb_remove_region(struct lmb_region *rgn, unsigned long r) +static void lmb_remove_region(struct alist *lmb_rgn_lst, unsigned long r) { unsigned long i; + struct lmb_region *rgn = lmb_rgn_lst->data; - for (i = r; i < rgn->cnt - 1; i++) { - rgn->region[i].base = rgn->region[i + 1].base; - rgn->region[i].size = rgn->region[i + 1].size; - rgn->region[i].flags = rgn->region[i + 1].flags; + for (i = r; i < lmb_rgn_lst->count - 1; i++) { + rgn[i].base = rgn[i + 1].base; + rgn[i].size = rgn[i + 1].size; + rgn[i].flags = rgn[i + 1].flags; } - rgn->cnt--; + lmb_rgn_lst->count--; } /* Assumption: base addr of region 1 < base addr of region 2 */ -static void lmb_coalesce_regions(struct lmb_region *rgn, unsigned long r1, +static void lmb_coalesce_regions(struct alist *lmb_rgn_lst, unsigned long r1, unsigned long r2) { - rgn->region[r1].size += rgn->region[r2].size; - lmb_remove_region(rgn, r2); + struct lmb_region *rgn = lmb_rgn_lst->data; + + rgn[r1].size += rgn[r2].size; + lmb_remove_region(lmb_rgn_lst, r2); } /*Assumption : base addr of region 1 < base addr of region 2*/ -static void lmb_fix_over_lap_regions(struct lmb_region *rgn, unsigned long r1, - unsigned long r2) +static void lmb_fix_over_lap_regions(struct alist *lmb_rgn_lst, + unsigned long r1, unsigned long r2) { - phys_addr_t base1 = rgn->region[r1].base; - phys_size_t size1 = rgn->region[r1].size; - phys_addr_t base2 = rgn->region[r2].base; - phys_size_t size2 = rgn->region[r2].size; + struct lmb_region *rgn = lmb_rgn_lst->data; + + phys_addr_t base1 = rgn[r1].base; + phys_size_t size1 = rgn[r1].size; + phys_addr_t base2 = rgn[r2].base; + phys_size_t size2 = rgn[r2].size; if (base1 + size1 > base2 + size2) { printf("This will not be a case any time\n"); return; } - rgn->region[r1].size = base2 + size2 - base1; - lmb_remove_region(rgn, r2); + rgn[r1].size = base2 + size2 - base1; + lmb_remove_region(lmb_rgn_lst, r2); } -void lmb_init(struct lmb *lmb) -{ -#if IS_ENABLED(CONFIG_LMB_USE_MAX_REGIONS) - lmb->memory.max = CONFIG_LMB_MAX_REGIONS; - lmb->reserved.max = CONFIG_LMB_MAX_REGIONS; -#else - lmb->memory.max = CONFIG_LMB_MEMORY_REGIONS; - lmb->reserved.max = CONFIG_LMB_RESERVED_REGIONS; - lmb->memory.region = lmb->memory_regions; - lmb->reserved.region = lmb->reserved_regions; -#endif - lmb->memory.cnt = 0; - lmb->reserved.cnt = 0; -} - -void arch_lmb_reserve_generic(struct lmb *lmb, ulong sp, ulong end, ulong align) +void arch_lmb_reserve_generic(ulong sp, ulong end, ulong align) { ulong bank_end; int bank; @@ -171,10 +172,10 @@ void arch_lmb_reserve_generic(struct lmb *lmb, ulong sp, ulong end, ulong align) if (bank_end > end) bank_end = end - 1; - lmb_reserve(lmb, sp, bank_end - sp + 1); + lmb_reserve(sp, bank_end - sp + 1); if (gd->flags & GD_FLG_SKIP_RELOC) - lmb_reserve(lmb, (phys_addr_t)(uintptr_t)_start, gd->mon_len); + lmb_reserve((phys_addr_t)(uintptr_t)_start, gd->mon_len); break; } @@ -186,10 +187,9 @@ void arch_lmb_reserve_generic(struct lmb *lmb, ulong sp, ulong end, ulong align) * Add reservations for all EFI memory areas that are not * EFI_CONVENTIONAL_MEMORY. * - * @lmb: lmb environment * Return: 0 on success, 1 on failure */ -static __maybe_unused int efi_lmb_reserve(struct lmb *lmb) +static __maybe_unused int efi_lmb_reserve(void) { struct efi_mem_desc *memmap = NULL, *map; efi_uintn_t i, map_size = 0; @@ -201,8 +201,7 @@ static __maybe_unused int efi_lmb_reserve(struct lmb *lmb) for (i = 0, map = memmap; i < map_size / sizeof(*map); ++map, ++i) { if (map->type != EFI_CONVENTIONAL_MEMORY) { - lmb_reserve_flags(lmb, - map_to_sysmem((void *)(uintptr_t) + lmb_reserve_flags(map_to_sysmem((void *)(uintptr_t) map->physical_start), map->num_pages * EFI_PAGE_SIZE, map->type == EFI_RESERVED_MEMORY_TYPE @@ -214,64 +213,69 @@ static __maybe_unused int efi_lmb_reserve(struct lmb *lmb) return 0; } -static void lmb_reserve_common(struct lmb *lmb, void *fdt_blob) +static void lmb_reserve_common(void *fdt_blob) { - arch_lmb_reserve(lmb); - board_lmb_reserve(lmb); + arch_lmb_reserve(); + board_lmb_reserve(); if (CONFIG_IS_ENABLED(OF_LIBFDT) && fdt_blob) - boot_fdt_add_mem_rsv_regions(lmb, fdt_blob); + boot_fdt_add_mem_rsv_regions(fdt_blob); if (CONFIG_IS_ENABLED(EFI_LOADER)) - efi_lmb_reserve(lmb); + efi_lmb_reserve(); } /* Initialize the struct, add memory and call arch/board reserve functions */ -void lmb_init_and_reserve(struct lmb *lmb, struct bd_info *bd, void *fdt_blob) +void lmb_init_and_reserve(struct bd_info *bd, void *fdt_blob) { int i; - lmb_init(lmb); - for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) { - if (bd->bi_dram[i].size) { - lmb_add(lmb, bd->bi_dram[i].start, - bd->bi_dram[i].size); - } + if (bd->bi_dram[i].size) + lmb_add(bd->bi_dram[i].start, bd->bi_dram[i].size); } - lmb_reserve_common(lmb, fdt_blob); + lmb_reserve_common(fdt_blob); } /* Initialize the struct, add memory and call arch/board reserve functions */ -void lmb_init_and_reserve_range(struct lmb *lmb, phys_addr_t base, - phys_size_t size, void *fdt_blob) +void lmb_init_and_reserve_range(phys_addr_t base, phys_size_t size, + void *fdt_blob) { - lmb_init(lmb); - lmb_add(lmb, base, size); - lmb_reserve_common(lmb, fdt_blob); + lmb_add(base, size); + lmb_reserve_common(fdt_blob); } -/* This routine called with relocation disabled. */ -static long lmb_add_region_flags(struct lmb_region *rgn, phys_addr_t base, +/** + * lmb_add_region_flags() - Add an lmb region to the given list + * @lmb_rgn_lst: LMB list to which region is to be added(free/used) + * @base: Start address of the region + * @size: Size of the region to be added + * @flags: Attributes of the LMB region + * + * Add a region of memory to the list. If the region does not exist, add + * it to the list. Depending on the attributes of the region to be added, + * the function might resize an already existing region or coalesce two + * adjacent regions. + * + * + * Returns: 0 if the region addition successful, -1 on failure + */ +static long lmb_add_region_flags(struct alist *lmb_rgn_lst, phys_addr_t base, phys_size_t size, enum lmb_flags flags) { unsigned long coalesced = 0; long adjacent, i; + struct lmb_region *rgn = lmb_rgn_lst->data; - if (rgn->cnt == 0) { - rgn->region[0].base = base; - rgn->region[0].size = size; - rgn->region[0].flags = flags; - rgn->cnt = 1; - return 0; - } + if (alist_err(lmb_rgn_lst)) + return -1; /* First try and coalesce this LMB with another. */ - for (i = 0; i < rgn->cnt; i++) { - phys_addr_t rgnbase = rgn->region[i].base; - phys_size_t rgnsize = rgn->region[i].size; - phys_size_t rgnflags = rgn->region[i].flags; + for (i = 0; i < lmb_rgn_lst->count; i++) { + phys_addr_t rgnbase = rgn[i].base; + phys_size_t rgnsize = rgn[i].size; + phys_size_t rgnflags = rgn[i].flags; phys_addr_t end = base + size - 1; phys_addr_t rgnend = rgnbase + rgnsize - 1; if (rgnbase <= base && end <= rgnend) { @@ -286,14 +290,14 @@ static long lmb_add_region_flags(struct lmb_region *rgn, phys_addr_t base, if (adjacent > 0) { if (flags != rgnflags) break; - rgn->region[i].base -= size; - rgn->region[i].size += size; + rgn[i].base -= size; + rgn[i].size += size; coalesced++; break; } else if (adjacent < 0) { if (flags != rgnflags) break; - rgn->region[i].size += size; + rgn[i].size += size; coalesced++; break; } else if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) { @@ -302,99 +306,98 @@ static long lmb_add_region_flags(struct lmb_region *rgn, phys_addr_t base, } } - if (i < rgn->cnt - 1 && rgn->region[i].flags == rgn->region[i + 1].flags) { - if (lmb_regions_adjacent(rgn, i, i + 1)) { - lmb_coalesce_regions(rgn, i, i + 1); - coalesced++; - } else if (lmb_regions_overlap(rgn, i, i + 1)) { - /* fix overlapping area */ - lmb_fix_over_lap_regions(rgn, i, i + 1); - coalesced++; + if (lmb_rgn_lst->count && i < lmb_rgn_lst->count - 1) { + rgn = lmb_rgn_lst->data; + if (rgn[i].flags == rgn[i + 1].flags) { + if (lmb_regions_adjacent(lmb_rgn_lst, i, i + 1)) { + lmb_coalesce_regions(lmb_rgn_lst, i, i + 1); + coalesced++; + } else if (lmb_regions_overlap(lmb_rgn_lst, i, i + 1)) { + /* fix overlapping area */ + lmb_fix_over_lap_regions(lmb_rgn_lst, i, i + 1); + coalesced++; + } } } if (coalesced) return coalesced; - if (rgn->cnt >= rgn->max) + + if (alist_full(lmb_rgn_lst) && + !alist_expand_by(lmb_rgn_lst, lmb_rgn_lst->alloc)) return -1; + rgn = lmb_rgn_lst->data; /* Couldn't coalesce the LMB, so add it to the sorted table. */ - for (i = rgn->cnt-1; i >= 0; i--) { - if (base < rgn->region[i].base) { - rgn->region[i + 1].base = rgn->region[i].base; - rgn->region[i + 1].size = rgn->region[i].size; - rgn->region[i + 1].flags = rgn->region[i].flags; + for (i = lmb_rgn_lst->count; i >= 0; i--) { + if (i && base < rgn[i - 1].base) { + rgn[i] = rgn[i - 1]; } else { - rgn->region[i + 1].base = base; - rgn->region[i + 1].size = size; - rgn->region[i + 1].flags = flags; + rgn[i].base = base; + rgn[i].size = size; + rgn[i].flags = flags; break; } } - if (base < rgn->region[0].base) { - rgn->region[0].base = base; - rgn->region[0].size = size; - rgn->region[0].flags = flags; - } - - rgn->cnt++; + lmb_rgn_lst->count++; return 0; } -static long lmb_add_region(struct lmb_region *rgn, phys_addr_t base, +static long lmb_add_region(struct alist *lmb_rgn_lst, phys_addr_t base, phys_size_t size) { - return lmb_add_region_flags(rgn, base, size, LMB_NONE); + return lmb_add_region_flags(lmb_rgn_lst, base, size, LMB_NONE); } /* This routine may be called with relocation disabled. */ -long lmb_add(struct lmb *lmb, phys_addr_t base, phys_size_t size) +long lmb_add(phys_addr_t base, phys_size_t size) { - struct lmb_region *_rgn = &(lmb->memory); + struct alist *lmb_rgn_lst = &lmb.free_mem; - return lmb_add_region(_rgn, base, size); + return lmb_add_region(lmb_rgn_lst, base, size); } -long lmb_free(struct lmb *lmb, phys_addr_t base, phys_size_t size) +long lmb_free(phys_addr_t base, phys_size_t size) { - struct lmb_region *rgn = &(lmb->reserved); + struct lmb_region *rgn; + struct alist *lmb_rgn_lst = &lmb.used_mem; phys_addr_t rgnbegin, rgnend; phys_addr_t end = base + size - 1; int i; rgnbegin = rgnend = 0; /* supress gcc warnings */ - + rgn = lmb_rgn_lst->data; /* Find the region where (base, size) belongs to */ - for (i = 0; i < rgn->cnt; i++) { - rgnbegin = rgn->region[i].base; - rgnend = rgnbegin + rgn->region[i].size - 1; + for (i = 0; i < lmb_rgn_lst->count; i++) { + rgnbegin = rgn[i].base; + rgnend = rgnbegin + rgn[i].size - 1; if ((rgnbegin <= base) && (end <= rgnend)) break; } /* Didn't find the region */ - if (i == rgn->cnt) + if (i == lmb_rgn_lst->count) return -1; /* Check to see if we are removing entire region */ if ((rgnbegin == base) && (rgnend == end)) { - lmb_remove_region(rgn, i); + lmb_remove_region(lmb_rgn_lst, i); return 0; } /* Check to see if region is matching at the front */ if (rgnbegin == base) { - rgn->region[i].base = end + 1; - rgn->region[i].size -= size; + rgn[i].base = end + 1; + rgn[i].size -= size; return 0; } /* Check to see if the region is matching at the end */ if (rgnend == end) { - rgn->region[i].size -= size; + rgn[i].size -= size; return 0; } @@ -402,37 +405,37 @@ long lmb_free(struct lmb *lmb, phys_addr_t base, phys_size_t size) * We need to split the entry - adjust the current one to the * beginging of the hole and add the region after hole. */ - rgn->region[i].size = base - rgn->region[i].base; - return lmb_add_region_flags(rgn, end + 1, rgnend - end, - rgn->region[i].flags); + rgn[i].size = base - rgn[i].base; + return lmb_add_region_flags(lmb_rgn_lst, end + 1, rgnend - end, + rgn[i].flags); } -long lmb_reserve_flags(struct lmb *lmb, phys_addr_t base, phys_size_t size, - enum lmb_flags flags) +long lmb_reserve_flags(phys_addr_t base, phys_size_t size, enum lmb_flags flags) { - struct lmb_region *_rgn = &(lmb->reserved); + struct alist *lmb_rgn_lst = &lmb.used_mem; - return lmb_add_region_flags(_rgn, base, size, flags); + return lmb_add_region_flags(lmb_rgn_lst, base, size, flags); } -long lmb_reserve(struct lmb *lmb, phys_addr_t base, phys_size_t size) +long lmb_reserve(phys_addr_t base, phys_size_t size) { - return lmb_reserve_flags(lmb, base, size, LMB_NONE); + return lmb_reserve_flags(base, size, LMB_NONE); } -static long lmb_overlaps_region(struct lmb_region *rgn, phys_addr_t base, +static long lmb_overlaps_region(struct alist *lmb_rgn_lst, phys_addr_t base, phys_size_t size) { unsigned long i; + struct lmb_region *rgn = lmb_rgn_lst->data; - for (i = 0; i < rgn->cnt; i++) { - phys_addr_t rgnbase = rgn->region[i].base; - phys_size_t rgnsize = rgn->region[i].size; + for (i = 0; i < lmb_rgn_lst->count; i++) { + phys_addr_t rgnbase = rgn[i].base; + phys_size_t rgnsize = rgn[i].size; if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) break; } - return (i < rgn->cnt) ? i : -1; + return (i < lmb_rgn_lst->count) ? i : -1; } static phys_addr_t lmb_align_down(phys_addr_t addr, phys_size_t size) @@ -440,16 +443,18 @@ static phys_addr_t lmb_align_down(phys_addr_t addr, phys_size_t size) return addr & ~(size - 1); } -static phys_addr_t __lmb_alloc_base(struct lmb *lmb, phys_size_t size, - ulong align, phys_addr_t max_addr) +static phys_addr_t __lmb_alloc_base(phys_size_t size, ulong align, + phys_addr_t max_addr) { long i, rgn; phys_addr_t base = 0; phys_addr_t res_base; + struct lmb_region *lmb_used = lmb.used_mem.data; + struct lmb_region *lmb_memory = lmb.free_mem.data; - for (i = lmb->memory.cnt - 1; i >= 0; i--) { - phys_addr_t lmbbase = lmb->memory.region[i].base; - phys_size_t lmbsize = lmb->memory.region[i].size; + for (i = lmb.free_mem.count - 1; i >= 0; i--) { + phys_addr_t lmbbase = lmb_memory[i].base; + phys_size_t lmbsize = lmb_memory[i].size; if (lmbsize < size) continue; @@ -465,15 +470,16 @@ static phys_addr_t __lmb_alloc_base(struct lmb *lmb, phys_size_t size, continue; while (base && lmbbase <= base) { - rgn = lmb_overlaps_region(&lmb->reserved, base, size); + rgn = lmb_overlaps_region(&lmb.used_mem, base, size); if (rgn < 0) { /* This area isn't reserved, take it */ - if (lmb_add_region(&lmb->reserved, base, + if (lmb_add_region(&lmb.used_mem, base, size) < 0) return 0; return base; } - res_base = lmb->reserved.region[rgn].base; + + res_base = lmb_used[rgn].base; if (res_base < size) break; base = lmb_align_down(res_base - size, align); @@ -482,16 +488,16 @@ static phys_addr_t __lmb_alloc_base(struct lmb *lmb, phys_size_t size, return 0; } -phys_addr_t lmb_alloc(struct lmb *lmb, phys_size_t size, ulong align) +phys_addr_t lmb_alloc(phys_size_t size, ulong align) { - return lmb_alloc_base(lmb, size, align, LMB_ALLOC_ANYWHERE); + return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE); } -phys_addr_t lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align, phys_addr_t max_addr) +phys_addr_t lmb_alloc_base(phys_size_t size, ulong align, phys_addr_t max_addr) { phys_addr_t alloc; - alloc = __lmb_alloc_base(lmb, size, align, max_addr); + alloc = __lmb_alloc_base(size, align, max_addr); if (alloc == 0) printf("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n", @@ -504,22 +510,23 @@ phys_addr_t lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align, phys_ * Try to allocate a specific address range: must be in defined memory but not * reserved */ -phys_addr_t lmb_alloc_addr(struct lmb *lmb, phys_addr_t base, phys_size_t size) +phys_addr_t lmb_alloc_addr(phys_addr_t base, phys_size_t size) { long rgn; + struct lmb_region *lmb_memory = lmb.free_mem.data; /* Check if the requested address is in one of the memory regions */ - rgn = lmb_overlaps_region(&lmb->memory, base, size); + rgn = lmb_overlaps_region(&lmb.free_mem, base, size); if (rgn >= 0) { /* * Check if the requested end address is in the same memory * region we found. */ - if (lmb_addrs_overlap(lmb->memory.region[rgn].base, - lmb->memory.region[rgn].size, + if (lmb_addrs_overlap(lmb_memory[rgn].base, + lmb_memory[rgn].size, base + size - 1, 1)) { /* ok, reserve the memory */ - if (lmb_reserve(lmb, base, size) >= 0) + if (lmb_reserve(base, size) >= 0) return base; } } @@ -527,51 +534,126 @@ phys_addr_t lmb_alloc_addr(struct lmb *lmb, phys_addr_t base, phys_size_t size) } /* Return number of bytes from a given address that are free */ -phys_size_t lmb_get_free_size(struct lmb *lmb, phys_addr_t addr) +phys_size_t lmb_get_free_size(phys_addr_t addr) { int i; long rgn; + struct lmb_region *lmb_used = lmb.used_mem.data; + struct lmb_region *lmb_memory = lmb.free_mem.data; /* check if the requested address is in the memory regions */ - rgn = lmb_overlaps_region(&lmb->memory, addr, 1); + rgn = lmb_overlaps_region(&lmb.free_mem, addr, 1); if (rgn >= 0) { - for (i = 0; i < lmb->reserved.cnt; i++) { - if (addr < lmb->reserved.region[i].base) { + for (i = 0; i < lmb.used_mem.count; i++) { + if (addr < lmb_used[i].base) { /* first reserved range > requested address */ - return lmb->reserved.region[i].base - addr; + return lmb_used[i].base - addr; } - if (lmb->reserved.region[i].base + - lmb->reserved.region[i].size > addr) { + if (lmb_used[i].base + + lmb_used[i].size > addr) { /* requested addr is in this reserved range */ return 0; } } /* if we come here: no reserved ranges above requested addr */ - return lmb->memory.region[lmb->memory.cnt - 1].base + - lmb->memory.region[lmb->memory.cnt - 1].size - addr; + return lmb_memory[lmb.free_mem.count - 1].base + + lmb_memory[lmb.free_mem.count - 1].size - addr; } return 0; } -int lmb_is_reserved_flags(struct lmb *lmb, phys_addr_t addr, int flags) +int lmb_is_reserved_flags(phys_addr_t addr, int flags) { int i; + struct lmb_region *lmb_used = lmb.used_mem.data; - for (i = 0; i < lmb->reserved.cnt; i++) { - phys_addr_t upper = lmb->reserved.region[i].base + - lmb->reserved.region[i].size - 1; - if ((addr >= lmb->reserved.region[i].base) && (addr <= upper)) - return (lmb->reserved.region[i].flags & flags) == flags; + for (i = 0; i < lmb.used_mem.count; i++) { + phys_addr_t upper = lmb_used[i].base + + lmb_used[i].size - 1; + if (addr >= lmb_used[i].base && addr <= upper) + return (lmb_used[i].flags & flags) == flags; } return 0; } -__weak void board_lmb_reserve(struct lmb *lmb) +__weak void board_lmb_reserve(void) { /* please define platform specific board_lmb_reserve() */ } -__weak void arch_lmb_reserve(struct lmb *lmb) +__weak void arch_lmb_reserve(void) { /* please define platform specific arch_lmb_reserve() */ } + +static int lmb_setup(void) +{ + bool ret; + + ret = alist_init(&lmb.free_mem, sizeof(struct lmb_region), + (uint)LMB_ALIST_INITIAL_SIZE); + if (!ret) { + log_debug("Unable to initialise the list for LMB free memory\n"); + return -ENOMEM; + } + + ret = alist_init(&lmb.used_mem, sizeof(struct lmb_region), + (uint)LMB_ALIST_INITIAL_SIZE); + if (!ret) { + log_debug("Unable to initialise the list for LMB used memory\n"); + return -ENOMEM; + } + + return 0; +} + +/** + * lmb_init() - Initialise the LMB module + * + * Initialise the LMB lists needed for keeping the memory map. There + * are two lists, in form of alloced list data structure. One for the + * available memory, and one for the used memory. Initialise the two + * lists as part of board init. Add memory to the available memory + * list and reserve common areas by adding them to the used memory + * list. + * + * Return: 0 on success, -ve on error + */ +int lmb_init(void) +{ + int ret; + + ret = lmb_setup(); + if (ret) { + log_info("Unable to init LMB\n"); + return ret; + } + + return 0; +} + +#if CONFIG_IS_ENABLED(UNIT_TEST) +struct lmb *lmb_get(void) +{ + return &lmb; +} + +int lmb_push(struct lmb *store) +{ + int ret; + + *store = lmb; + ret = lmb_setup(); + if (ret) + return ret; + + return 0; +} + +void lmb_pop(struct lmb *store) +{ + alist_uninit(&lmb.free_mem); + alist_uninit(&lmb.used_mem); + lmb = *store; +} +#endif /* UNIT_TEST */ |