diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig | 45 | ||||
-rw-r--r-- | lib/Makefile | 2 | ||||
-rw-r--r-- | lib/asm-offsets.c | 2 | ||||
-rw-r--r-- | lib/crypto/Kconfig | 2 | ||||
-rw-r--r-- | lib/efi_loader/Kconfig | 1 | ||||
-rw-r--r-- | lib/efi_loader/efi_dt_fixup.c | 2 | ||||
-rw-r--r-- | lib/efi_loader/efi_helper.c | 7 | ||||
-rw-r--r-- | lib/elf.c | 6 | ||||
-rw-r--r-- | lib/fwu_updates/fwu_mtd.c | 5 | ||||
-rw-r--r-- | lib/lmb.c | 693 |
10 files changed, 471 insertions, 294 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index 2059219a120..5f282ecb543 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -403,7 +403,7 @@ config TRACE_EARLY_CALL_DEPTH_LIMIT config TRACE_EARLY_ADDR hex "Address of early trace buffer in U-Boot" depends on TRACE_EARLY - default 0x00100000 + default 0x00200000 help Sets the address of the early trace buffer in U-Boot. This memory must be accessible before relocation. @@ -1102,42 +1102,19 @@ config LMB bool "Enable the logical memory blocks library (lmb)" default y if ARC || ARM || M68K || MICROBLAZE || MIPS || \ NIOS2 || PPC || RISCV || SANDBOX || SH || X86 || XTENSA + select ARCH_MISC_INIT if PPC help - Support the library logical memory blocks. - -config LMB_USE_MAX_REGIONS - bool "Use a common number of memory and reserved regions in lmb lib" - default y - help - Define the number of supported memory regions in the library logical - memory blocks. - This feature allow to reduce the lmb library size by using compiler - optimization when LMB_MEMORY_REGIONS == LMB_RESERVED_REGIONS. - -config LMB_MAX_REGIONS - int "Number of memory and reserved regions in lmb lib" - depends on LMB_USE_MAX_REGIONS - default 16 - help - Define the number of supported regions, memory and reserved, in the - library logical memory blocks. - -config LMB_MEMORY_REGIONS - int "Number of memory regions in lmb lib" - depends on !LMB_USE_MAX_REGIONS - default 8 - help - Define the number of supported memory regions in the library logical - memory blocks. - The minimal value is CONFIG_NR_DRAM_BANKS. + Support the library logical memory blocks. This will require + a malloc() implementation for defining the data structures + needed for maintaining the LMB memory map. -config LMB_RESERVED_REGIONS - int "Number of reserved regions in lmb lib" - depends on !LMB_USE_MAX_REGIONS - default 8 +config SPL_LMB + bool "Enable LMB module for SPL" + depends on SPL && SPL_FRAMEWORK && SPL_SYS_MALLOC help - Define the number of supported reserved regions in the library logical - memory blocks. + Enable support for Logical Memory Block library routines in + SPL. This will require a malloc() implementation for defining + the data structures needed for maintaining the LMB memory map. config PHANDLE_CHECK_SEQ bool "Enable phandle check while getting sequence number" diff --git a/lib/Makefile b/lib/Makefile index 81b503ab526..d300249f57c 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -118,7 +118,7 @@ obj-$(CONFIG_$(SPL_TPL_)OF_LIBFDT) += fdtdec.o fdtdec_common.o obj-y += hang.o obj-y += linux_compat.o obj-y += linux_string.o -obj-$(CONFIG_LMB) += lmb.o +obj-$(CONFIG_$(SPL_TPL_)LMB) += lmb.o obj-y += membuff.o obj-$(CONFIG_REGEX) += slre.o obj-y += string.o diff --git a/lib/asm-offsets.c b/lib/asm-offsets.c index 4e2dbda9a71..b6bbcbf76ca 100644 --- a/lib/asm-offsets.c +++ b/lib/asm-offsets.c @@ -44,7 +44,9 @@ int main(void) DEFINE(GD_NEW_GD, offsetof(struct global_data, new_gd)); +#if CONFIG_IS_ENABLED(ENV_SUPPORT) DEFINE(GD_ENV_ADDR, offsetof(struct global_data, env_addr)); +#endif return 0; } diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig index 6e0656ad1c5..742f6d9d4ed 100644 --- a/lib/crypto/Kconfig +++ b/lib/crypto/Kconfig @@ -1,6 +1,6 @@ menuconfig ASYMMETRIC_KEY_TYPE bool "Asymmetric (public-key cryptographic) key Support" - depends on FIT_SIGNATURE + depends on FIT_SIGNATURE || RSA_VERIFY_WITH_PKEY help This option provides support for a key type that holds the data for the asymmetric keys used for public key cryptographic operations such diff --git a/lib/efi_loader/Kconfig b/lib/efi_loader/Kconfig index 1179c31bb13..6ffefa9103f 100644 --- a/lib/efi_loader/Kconfig +++ b/lib/efi_loader/Kconfig @@ -364,7 +364,6 @@ endif config EFI_LOADER_BOUNCE_BUFFER bool "EFI Applications use bounce buffers for DMA operations" - depends on ARM64 help Some hardware does not support DMA to full 64bit addresses. For this hardware we can create a bounce buffer so that payloads don't have to diff --git a/lib/efi_loader/efi_dt_fixup.c b/lib/efi_loader/efi_dt_fixup.c index 9886e6897cd..9d017804eea 100644 --- a/lib/efi_loader/efi_dt_fixup.c +++ b/lib/efi_loader/efi_dt_fixup.c @@ -172,7 +172,7 @@ efi_dt_fixup(struct efi_dt_fixup_protocol *this, void *dtb, } fdt_set_totalsize(dtb, *buffer_size); - if (image_setup_libfdt(&img, dtb, NULL)) { + if (image_setup_libfdt(&img, dtb, false)) { log_err("failed to process device tree\n"); ret = EFI_INVALID_PARAMETER; goto out; diff --git a/lib/efi_loader/efi_helper.c b/lib/efi_loader/efi_helper.c index 65d2116381a..96f847652ec 100644 --- a/lib/efi_loader/efi_helper.c +++ b/lib/efi_loader/efi_helper.c @@ -74,6 +74,7 @@ out: */ struct efi_device_path *efi_get_dp_from_boot(const efi_guid_t *guid) { + struct efi_device_path *file_path = NULL; struct efi_load_option lo; void *var_value; efi_uintn_t size; @@ -92,11 +93,11 @@ struct efi_device_path *efi_get_dp_from_boot(const efi_guid_t *guid) if (ret != EFI_SUCCESS) goto err; - return efi_dp_from_lo(&lo, guid); + file_path = efi_dp_from_lo(&lo, guid); err: free(var_value); - return NULL; + return file_path; } /** @@ -513,7 +514,7 @@ efi_status_t efi_install_fdt(void *fdt) return EFI_OUT_OF_RESOURCES; } - if (image_setup_libfdt(&img, fdt, NULL)) { + if (image_setup_libfdt(&img, fdt, false)) { log_err("ERROR: failed to process device tree\n"); return EFI_LOAD_ERROR; } diff --git a/lib/elf.c b/lib/elf.c index 28ec87b8e48..e767a42a3b3 100644 --- a/lib/elf.c +++ b/lib/elf.c @@ -86,7 +86,7 @@ unsigned long load_elf64_image_phdr(unsigned long addr) phdr = (Elf64_Phdr *)(addr + (ulong)ehdr->e_phoff); /* Load each program header */ - for (i = 0; i < ehdr->e_phnum; ++i) { + for (i = 0; i < ehdr->e_phnum; ++i, ++phdr) { void *dst = (void *)(ulong)phdr->p_paddr; void *src = (void *)addr + phdr->p_offset; @@ -103,7 +103,6 @@ unsigned long load_elf64_image_phdr(unsigned long addr) phdr->p_memsz - phdr->p_filesz); flush_cache(rounddown((unsigned long)dst, ARCH_DMA_MINALIGN), roundup(phdr->p_memsz, ARCH_DMA_MINALIGN)); - ++phdr; } if (ehdr->e_machine == EM_PPC64 && (ehdr->e_flags & @@ -205,7 +204,7 @@ unsigned long load_elf_image_phdr(unsigned long addr) phdr = (Elf32_Phdr *)(addr + ehdr->e_phoff); /* Load each program header */ - for (i = 0; i < ehdr->e_phnum; ++i) { + for (i = 0; i < ehdr->e_phnum; ++i, ++phdr) { void *dst = (void *)(uintptr_t)phdr->p_paddr; void *src = (void *)addr + phdr->p_offset; @@ -222,7 +221,6 @@ unsigned long load_elf_image_phdr(unsigned long addr) phdr->p_memsz - phdr->p_filesz); flush_cache(rounddown((unsigned long)dst, ARCH_DMA_MINALIGN), roundup(phdr->p_memsz, ARCH_DMA_MINALIGN)); - ++phdr; } return ehdr->e_entry; diff --git a/lib/fwu_updates/fwu_mtd.c b/lib/fwu_updates/fwu_mtd.c index ccaba3f3115..c14203b9dd3 100644 --- a/lib/fwu_updates/fwu_mtd.c +++ b/lib/fwu_updates/fwu_mtd.c @@ -60,10 +60,7 @@ int fwu_mtd_get_alt_num(efi_guid_t *image_id, u8 *alt_num, if (ret) return -ENOENT; - nalt = 0; - list_for_each_entry(dfu, &dfu_list, list) - nalt++; - + nalt = list_count_nodes(&dfu_list); if (!nalt) { log_warning("No entities in dfu_alt_info\n"); dfu_free_entities(); diff --git a/lib/lmb.c b/lib/lmb.c index 44f98205310..3ed570fb29b 100644 --- a/lib/lmb.c +++ b/lib/lmb.c @@ -6,50 +6,72 @@ * Copyright (C) 2001 Peter Bergner. */ +#include <alist.h> #include <efi_loader.h> #include <image.h> #include <mapmem.h> #include <lmb.h> #include <log.h> #include <malloc.h> +#include <spl.h> #include <asm/global_data.h> #include <asm/sections.h> +#include <linux/kernel.h> +#include <linux/sizes.h> DECLARE_GLOBAL_DATA_PTR; #define LMB_ALLOC_ANYWHERE 0 +#define LMB_ALIST_INITIAL_SIZE 4 -static void lmb_dump_region(struct lmb_region *rgn, char *name) +static struct lmb lmb; + +static void lmb_print_region_flags(enum lmb_flags flags) +{ + u64 bitpos; + const char *flag_str[] = { "none", "no-map", "no-overwrite" }; + + do { + bitpos = flags ? fls(flags) - 1 : 0; + printf("%s", flag_str[bitpos]); + flags &= ~(1ull << bitpos); + puts(flags ? ", " : "\n"); + } while (flags); +} + +static void lmb_dump_region(struct alist *lmb_rgn_lst, char *name) { + struct lmb_region *rgn = lmb_rgn_lst->data; unsigned long long base, size, end; enum lmb_flags flags; int i; - printf(" %s.cnt = 0x%lx / max = 0x%lx\n", name, rgn->cnt, rgn->max); + printf(" %s.count = 0x%x\n", name, lmb_rgn_lst->count); - for (i = 0; i < rgn->cnt; i++) { - base = rgn->region[i].base; - size = rgn->region[i].size; + for (i = 0; i < lmb_rgn_lst->count; i++) { + base = rgn[i].base; + size = rgn[i].size; end = base + size - 1; - flags = rgn->region[i].flags; + flags = rgn[i].flags; - printf(" %s[%d]\t[0x%llx-0x%llx], 0x%08llx bytes flags: %x\n", - name, i, base, end, size, flags); + printf(" %s[%d]\t[0x%llx-0x%llx], 0x%08llx bytes flags: ", + name, i, base, end, size); + lmb_print_region_flags(flags); } } -void lmb_dump_all_force(struct lmb *lmb) +void lmb_dump_all_force(void) { printf("lmb_dump_all:\n"); - lmb_dump_region(&lmb->memory, "memory"); - lmb_dump_region(&lmb->reserved, "reserved"); + lmb_dump_region(&lmb.free_mem, "memory"); + lmb_dump_region(&lmb.used_mem, "reserved"); } -void lmb_dump_all(struct lmb *lmb) +void lmb_dump_all(void) { #ifdef DEBUG - lmb_dump_all_force(lmb); + lmb_dump_all_force(); #endif } @@ -73,111 +95,71 @@ static long lmb_addrs_adjacent(phys_addr_t base1, phys_size_t size1, return 0; } -static long lmb_regions_overlap(struct lmb_region *rgn, unsigned long r1, +static long lmb_regions_overlap(struct alist *lmb_rgn_lst, unsigned long r1, unsigned long r2) { - phys_addr_t base1 = rgn->region[r1].base; - phys_size_t size1 = rgn->region[r1].size; - phys_addr_t base2 = rgn->region[r2].base; - phys_size_t size2 = rgn->region[r2].size; + struct lmb_region *rgn = lmb_rgn_lst->data; + + phys_addr_t base1 = rgn[r1].base; + phys_size_t size1 = rgn[r1].size; + phys_addr_t base2 = rgn[r2].base; + phys_size_t size2 = rgn[r2].size; return lmb_addrs_overlap(base1, size1, base2, size2); } -static long lmb_regions_adjacent(struct lmb_region *rgn, unsigned long r1, + +static long lmb_regions_adjacent(struct alist *lmb_rgn_lst, unsigned long r1, unsigned long r2) { - phys_addr_t base1 = rgn->region[r1].base; - phys_size_t size1 = rgn->region[r1].size; - phys_addr_t base2 = rgn->region[r2].base; - phys_size_t size2 = rgn->region[r2].size; + struct lmb_region *rgn = lmb_rgn_lst->data; + + phys_addr_t base1 = rgn[r1].base; + phys_size_t size1 = rgn[r1].size; + phys_addr_t base2 = rgn[r2].base; + phys_size_t size2 = rgn[r2].size; return lmb_addrs_adjacent(base1, size1, base2, size2); } -static void lmb_remove_region(struct lmb_region *rgn, unsigned long r) +static void lmb_remove_region(struct alist *lmb_rgn_lst, unsigned long r) { unsigned long i; + struct lmb_region *rgn = lmb_rgn_lst->data; - for (i = r; i < rgn->cnt - 1; i++) { - rgn->region[i].base = rgn->region[i + 1].base; - rgn->region[i].size = rgn->region[i + 1].size; - rgn->region[i].flags = rgn->region[i + 1].flags; + for (i = r; i < lmb_rgn_lst->count - 1; i++) { + rgn[i].base = rgn[i + 1].base; + rgn[i].size = rgn[i + 1].size; + rgn[i].flags = rgn[i + 1].flags; } - rgn->cnt--; + lmb_rgn_lst->count--; } /* Assumption: base addr of region 1 < base addr of region 2 */ -static void lmb_coalesce_regions(struct lmb_region *rgn, unsigned long r1, +static void lmb_coalesce_regions(struct alist *lmb_rgn_lst, unsigned long r1, unsigned long r2) { - rgn->region[r1].size += rgn->region[r2].size; - lmb_remove_region(rgn, r2); + struct lmb_region *rgn = lmb_rgn_lst->data; + + rgn[r1].size += rgn[r2].size; + lmb_remove_region(lmb_rgn_lst, r2); } /*Assumption : base addr of region 1 < base addr of region 2*/ -static void lmb_fix_over_lap_regions(struct lmb_region *rgn, unsigned long r1, - unsigned long r2) +static void lmb_fix_over_lap_regions(struct alist *lmb_rgn_lst, + unsigned long r1, unsigned long r2) { - phys_addr_t base1 = rgn->region[r1].base; - phys_size_t size1 = rgn->region[r1].size; - phys_addr_t base2 = rgn->region[r2].base; - phys_size_t size2 = rgn->region[r2].size; + struct lmb_region *rgn = lmb_rgn_lst->data; + + phys_addr_t base1 = rgn[r1].base; + phys_size_t size1 = rgn[r1].size; + phys_addr_t base2 = rgn[r2].base; + phys_size_t size2 = rgn[r2].size; if (base1 + size1 > base2 + size2) { printf("This will not be a case any time\n"); return; } - rgn->region[r1].size = base2 + size2 - base1; - lmb_remove_region(rgn, r2); -} - -void lmb_init(struct lmb *lmb) -{ -#if IS_ENABLED(CONFIG_LMB_USE_MAX_REGIONS) - lmb->memory.max = CONFIG_LMB_MAX_REGIONS; - lmb->reserved.max = CONFIG_LMB_MAX_REGIONS; -#else - lmb->memory.max = CONFIG_LMB_MEMORY_REGIONS; - lmb->reserved.max = CONFIG_LMB_RESERVED_REGIONS; - lmb->memory.region = lmb->memory_regions; - lmb->reserved.region = lmb->reserved_regions; -#endif - lmb->memory.cnt = 0; - lmb->reserved.cnt = 0; -} - -void arch_lmb_reserve_generic(struct lmb *lmb, ulong sp, ulong end, ulong align) -{ - ulong bank_end; - int bank; - - /* - * Reserve memory from aligned address below the bottom of U-Boot stack - * until end of U-Boot area using LMB to prevent U-Boot from overwriting - * that memory. - */ - debug("## Current stack ends at 0x%08lx ", sp); - - /* adjust sp by 4K to be safe */ - sp -= align; - for (bank = 0; bank < CONFIG_NR_DRAM_BANKS; bank++) { - if (!gd->bd->bi_dram[bank].size || - sp < gd->bd->bi_dram[bank].start) - continue; - /* Watch out for RAM at end of address space! */ - bank_end = gd->bd->bi_dram[bank].start + - gd->bd->bi_dram[bank].size - 1; - if (sp > bank_end) - continue; - if (bank_end > end) - bank_end = end - 1; - - lmb_reserve(lmb, sp, bank_end - sp + 1); - - if (gd->flags & GD_FLG_SKIP_RELOC) - lmb_reserve(lmb, (phys_addr_t)(uintptr_t)_start, gd->mon_len); - - break; - } + rgn[r1].size = base2 + size2 - base1; + lmb_remove_region(lmb_rgn_lst, r2); } /** @@ -186,10 +168,9 @@ void arch_lmb_reserve_generic(struct lmb *lmb, ulong sp, ulong end, ulong align) * Add reservations for all EFI memory areas that are not * EFI_CONVENTIONAL_MEMORY. * - * @lmb: lmb environment * Return: 0 on success, 1 on failure */ -static __maybe_unused int efi_lmb_reserve(struct lmb *lmb) +static __maybe_unused int efi_lmb_reserve(void) { struct efi_mem_desc *memmap = NULL, *map; efi_uintn_t i, map_size = 0; @@ -201,8 +182,7 @@ static __maybe_unused int efi_lmb_reserve(struct lmb *lmb) for (i = 0, map = memmap; i < map_size / sizeof(*map); ++map, ++i) { if (map->type != EFI_CONVENTIONAL_MEMORY) { - lmb_reserve_flags(lmb, - map_to_sysmem((void *)(uintptr_t) + lmb_reserve_flags(map_to_sysmem((void *)(uintptr_t) map->physical_start), map->num_pages * EFI_PAGE_SIZE, map->type == EFI_RESERVED_MEMORY_TYPE @@ -214,64 +194,199 @@ static __maybe_unused int efi_lmb_reserve(struct lmb *lmb) return 0; } -static void lmb_reserve_common(struct lmb *lmb, void *fdt_blob) +static void lmb_reserve_uboot_region(void) +{ + int bank; + ulong end, bank_end; + phys_addr_t rsv_start; + + rsv_start = gd->start_addr_sp - CONFIG_STACK_SIZE; + end = gd->ram_top; + + /* + * Reserve memory from aligned address below the bottom of U-Boot stack + * until end of RAM area to prevent LMB from overwriting that memory. + */ + debug("## Current stack ends at 0x%08lx ", (ulong)rsv_start); + + /* adjust sp by 16K to be safe */ + rsv_start -= SZ_16K; + for (bank = 0; bank < CONFIG_NR_DRAM_BANKS; bank++) { + if (!gd->bd->bi_dram[bank].size || + rsv_start < gd->bd->bi_dram[bank].start) + continue; + /* Watch out for RAM at end of address space! */ + bank_end = gd->bd->bi_dram[bank].start + + gd->bd->bi_dram[bank].size - 1; + if (rsv_start > bank_end) + continue; + if (bank_end > end) + bank_end = end - 1; + + lmb_reserve_flags(rsv_start, bank_end - rsv_start + 1, + LMB_NOOVERWRITE); + + if (gd->flags & GD_FLG_SKIP_RELOC) + lmb_reserve_flags((phys_addr_t)(uintptr_t)_start, + gd->mon_len, LMB_NOOVERWRITE); + + break; + } +} + +static void lmb_reserve_common(void *fdt_blob) { - arch_lmb_reserve(lmb); - board_lmb_reserve(lmb); + lmb_reserve_uboot_region(); if (CONFIG_IS_ENABLED(OF_LIBFDT) && fdt_blob) - boot_fdt_add_mem_rsv_regions(lmb, fdt_blob); + boot_fdt_add_mem_rsv_regions(fdt_blob); if (CONFIG_IS_ENABLED(EFI_LOADER)) - efi_lmb_reserve(lmb); + efi_lmb_reserve(); +} + +static __maybe_unused void lmb_reserve_common_spl(void) +{ + phys_addr_t rsv_start; + phys_size_t rsv_size; + + /* + * Assume a SPL stack of 16KB. This must be + * more than enough for the SPL stage. + */ + if (IS_ENABLED(CONFIG_SPL_STACK_R_ADDR)) { + rsv_start = gd->start_addr_sp - 16384; + rsv_size = 16384; + lmb_reserve_flags(rsv_start, rsv_size, LMB_NOOVERWRITE); + } + + if (IS_ENABLED(CONFIG_SPL_SEPARATE_BSS)) { + /* Reserve the bss region */ + rsv_start = (phys_addr_t)(uintptr_t)__bss_start; + rsv_size = (phys_addr_t)(uintptr_t)__bss_end - + (phys_addr_t)(uintptr_t)__bss_start; + lmb_reserve_flags(rsv_start, rsv_size, LMB_NOOVERWRITE); + } } -/* Initialize the struct, add memory and call arch/board reserve functions */ -void lmb_init_and_reserve(struct lmb *lmb, struct bd_info *bd, void *fdt_blob) +/** + * lmb_add_memory() - Add memory range for LMB allocations + * + * Add the entire available memory range to the pool of memory that + * can be used by the LMB module for allocations. + * + * Return: None + */ +void lmb_add_memory(void) { int i; + phys_size_t size; + phys_addr_t rgn_top; + u64 ram_top = gd->ram_top; + struct bd_info *bd = gd->bd; - lmb_init(lmb); + /* Assume a 4GB ram_top if not defined */ + if (!ram_top) + ram_top = 0x100000000ULL; for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) { - if (bd->bi_dram[i].size) { - lmb_add(lmb, bd->bi_dram[i].start, - bd->bi_dram[i].size); + size = bd->bi_dram[i].size; + if (size) { + if (bd->bi_dram[i].start > ram_top) + continue; + + rgn_top = bd->bi_dram[i].start + + bd->bi_dram[i].size; + + if (rgn_top > ram_top) + size -= rgn_top - ram_top; + + lmb_add(bd->bi_dram[i].start, size); } } - - lmb_reserve_common(lmb, fdt_blob); } -/* Initialize the struct, add memory and call arch/board reserve functions */ -void lmb_init_and_reserve_range(struct lmb *lmb, phys_addr_t base, - phys_size_t size, void *fdt_blob) +static long lmb_resize_regions(struct alist *lmb_rgn_lst, + unsigned long idx_start, + phys_addr_t base, phys_size_t size) { - lmb_init(lmb); - lmb_add(lmb, base, size); - lmb_reserve_common(lmb, fdt_blob); + phys_size_t rgnsize; + unsigned long rgn_cnt, idx, idx_end; + phys_addr_t rgnbase, rgnend; + phys_addr_t mergebase, mergeend; + struct lmb_region *rgn = lmb_rgn_lst->data; + + rgn_cnt = 0; + idx = idx_start; + idx_end = idx_start; + + /* + * First thing to do is to identify how many regions + * the requested region overlaps. + * If the flags match, combine all these overlapping + * regions into a single region, and remove the merged + * regions. + */ + while (idx <= lmb_rgn_lst->count - 1) { + rgnbase = rgn[idx].base; + rgnsize = rgn[idx].size; + + if (lmb_addrs_overlap(base, size, rgnbase, + rgnsize)) { + if (rgn[idx].flags != LMB_NONE) + return -1; + rgn_cnt++; + idx_end = idx; + } + idx++; + } + + /* The merged region's base and size */ + rgnbase = rgn[idx_start].base; + mergebase = min(base, rgnbase); + rgnend = rgn[idx_end].base + rgn[idx_end].size; + mergeend = max(rgnend, (base + size)); + + rgn[idx_start].base = mergebase; + rgn[idx_start].size = mergeend - mergebase; + + /* Now remove the merged regions */ + while (--rgn_cnt) + lmb_remove_region(lmb_rgn_lst, idx_start + 1); + + return 0; } -/* This routine called with relocation disabled. */ -static long lmb_add_region_flags(struct lmb_region *rgn, phys_addr_t base, +/** + * lmb_add_region_flags() - Add an lmb region to the given list + * @lmb_rgn_lst: LMB list to which region is to be added(free/used) + * @base: Start address of the region + * @size: Size of the region to be added + * @flags: Attributes of the LMB region + * + * Add a region of memory to the list. If the region does not exist, add + * it to the list. Depending on the attributes of the region to be added, + * the function might resize an already existing region or coalesce two + * adjacent regions. + * + * + * Returns: 0 if the region addition successful, -1 on failure + */ +static long lmb_add_region_flags(struct alist *lmb_rgn_lst, phys_addr_t base, phys_size_t size, enum lmb_flags flags) { unsigned long coalesced = 0; - long adjacent, i; + long ret, i; + struct lmb_region *rgn = lmb_rgn_lst->data; - if (rgn->cnt == 0) { - rgn->region[0].base = base; - rgn->region[0].size = size; - rgn->region[0].flags = flags; - rgn->cnt = 1; - return 0; - } + if (alist_err(lmb_rgn_lst)) + return -1; /* First try and coalesce this LMB with another. */ - for (i = 0; i < rgn->cnt; i++) { - phys_addr_t rgnbase = rgn->region[i].base; - phys_size_t rgnsize = rgn->region[i].size; - phys_size_t rgnflags = rgn->region[i].flags; + for (i = 0; i < lmb_rgn_lst->count; i++) { + phys_addr_t rgnbase = rgn[i].base; + phys_size_t rgnsize = rgn[i].size; + phys_size_t rgnflags = rgn[i].flags; phys_addr_t end = base + size - 1; phys_addr_t rgnend = rgnbase + rgnsize - 1; if (rgnbase <= base && end <= rgnend) { @@ -282,119 +397,127 @@ static long lmb_add_region_flags(struct lmb_region *rgn, phys_addr_t base, return -1; /* regions with new flags */ } - adjacent = lmb_addrs_adjacent(base, size, rgnbase, rgnsize); - if (adjacent > 0) { + ret = lmb_addrs_adjacent(base, size, rgnbase, rgnsize); + if (ret > 0) { if (flags != rgnflags) break; - rgn->region[i].base -= size; - rgn->region[i].size += size; + rgn[i].base -= size; + rgn[i].size += size; coalesced++; break; - } else if (adjacent < 0) { + } else if (ret < 0) { if (flags != rgnflags) break; - rgn->region[i].size += size; + rgn[i].size += size; coalesced++; break; } else if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) { - /* regions overlap */ - return -1; + if (flags == LMB_NONE) { + ret = lmb_resize_regions(lmb_rgn_lst, i, base, + size); + if (ret < 0) + return -1; + + coalesced++; + break; + } else { + return -1; + } } } - if (i < rgn->cnt - 1 && rgn->region[i].flags == rgn->region[i + 1].flags) { - if (lmb_regions_adjacent(rgn, i, i + 1)) { - lmb_coalesce_regions(rgn, i, i + 1); - coalesced++; - } else if (lmb_regions_overlap(rgn, i, i + 1)) { - /* fix overlapping area */ - lmb_fix_over_lap_regions(rgn, i, i + 1); - coalesced++; + if (lmb_rgn_lst->count && i < lmb_rgn_lst->count - 1) { + rgn = lmb_rgn_lst->data; + if (rgn[i].flags == rgn[i + 1].flags) { + if (lmb_regions_adjacent(lmb_rgn_lst, i, i + 1)) { + lmb_coalesce_regions(lmb_rgn_lst, i, i + 1); + coalesced++; + } else if (lmb_regions_overlap(lmb_rgn_lst, i, i + 1)) { + /* fix overlapping area */ + lmb_fix_over_lap_regions(lmb_rgn_lst, i, i + 1); + coalesced++; + } } } if (coalesced) return coalesced; - if (rgn->cnt >= rgn->max) + + if (alist_full(lmb_rgn_lst) && + !alist_expand_by(lmb_rgn_lst, lmb_rgn_lst->alloc)) return -1; + rgn = lmb_rgn_lst->data; /* Couldn't coalesce the LMB, so add it to the sorted table. */ - for (i = rgn->cnt-1; i >= 0; i--) { - if (base < rgn->region[i].base) { - rgn->region[i + 1].base = rgn->region[i].base; - rgn->region[i + 1].size = rgn->region[i].size; - rgn->region[i + 1].flags = rgn->region[i].flags; + for (i = lmb_rgn_lst->count; i >= 0; i--) { + if (i && base < rgn[i - 1].base) { + rgn[i] = rgn[i - 1]; } else { - rgn->region[i + 1].base = base; - rgn->region[i + 1].size = size; - rgn->region[i + 1].flags = flags; + rgn[i].base = base; + rgn[i].size = size; + rgn[i].flags = flags; break; } } - if (base < rgn->region[0].base) { - rgn->region[0].base = base; - rgn->region[0].size = size; - rgn->region[0].flags = flags; - } - - rgn->cnt++; + lmb_rgn_lst->count++; return 0; } -static long lmb_add_region(struct lmb_region *rgn, phys_addr_t base, +static long lmb_add_region(struct alist *lmb_rgn_lst, phys_addr_t base, phys_size_t size) { - return lmb_add_region_flags(rgn, base, size, LMB_NONE); + return lmb_add_region_flags(lmb_rgn_lst, base, size, LMB_NONE); } /* This routine may be called with relocation disabled. */ -long lmb_add(struct lmb *lmb, phys_addr_t base, phys_size_t size) +long lmb_add(phys_addr_t base, phys_size_t size) { - struct lmb_region *_rgn = &(lmb->memory); + struct alist *lmb_rgn_lst = &lmb.free_mem; - return lmb_add_region(_rgn, base, size); + return lmb_add_region(lmb_rgn_lst, base, size); } -long lmb_free(struct lmb *lmb, phys_addr_t base, phys_size_t size) +long lmb_free(phys_addr_t base, phys_size_t size) { - struct lmb_region *rgn = &(lmb->reserved); + struct lmb_region *rgn; + struct alist *lmb_rgn_lst = &lmb.used_mem; phys_addr_t rgnbegin, rgnend; phys_addr_t end = base + size - 1; int i; rgnbegin = rgnend = 0; /* supress gcc warnings */ - + rgn = lmb_rgn_lst->data; /* Find the region where (base, size) belongs to */ - for (i = 0; i < rgn->cnt; i++) { - rgnbegin = rgn->region[i].base; - rgnend = rgnbegin + rgn->region[i].size - 1; + for (i = 0; i < lmb_rgn_lst->count; i++) { + rgnbegin = rgn[i].base; + rgnend = rgnbegin + rgn[i].size - 1; if ((rgnbegin <= base) && (end <= rgnend)) break; } /* Didn't find the region */ - if (i == rgn->cnt) + if (i == lmb_rgn_lst->count) return -1; /* Check to see if we are removing entire region */ if ((rgnbegin == base) && (rgnend == end)) { - lmb_remove_region(rgn, i); + lmb_remove_region(lmb_rgn_lst, i); return 0; } /* Check to see if region is matching at the front */ if (rgnbegin == base) { - rgn->region[i].base = end + 1; - rgn->region[i].size -= size; + rgn[i].base = end + 1; + rgn[i].size -= size; return 0; } /* Check to see if the region is matching at the end */ if (rgnend == end) { - rgn->region[i].size -= size; + rgn[i].size -= size; return 0; } @@ -402,55 +525,37 @@ long lmb_free(struct lmb *lmb, phys_addr_t base, phys_size_t size) * We need to split the entry - adjust the current one to the * beginging of the hole and add the region after hole. */ - rgn->region[i].size = base - rgn->region[i].base; - return lmb_add_region_flags(rgn, end + 1, rgnend - end, - rgn->region[i].flags); + rgn[i].size = base - rgn[i].base; + return lmb_add_region_flags(lmb_rgn_lst, end + 1, rgnend - end, + rgn[i].flags); } -long lmb_reserve_flags(struct lmb *lmb, phys_addr_t base, phys_size_t size, - enum lmb_flags flags) +long lmb_reserve_flags(phys_addr_t base, phys_size_t size, enum lmb_flags flags) { - struct lmb_region *_rgn = &(lmb->reserved); + struct alist *lmb_rgn_lst = &lmb.used_mem; - return lmb_add_region_flags(_rgn, base, size, flags); + return lmb_add_region_flags(lmb_rgn_lst, base, size, flags); } -long lmb_reserve(struct lmb *lmb, phys_addr_t base, phys_size_t size) +long lmb_reserve(phys_addr_t base, phys_size_t size) { - return lmb_reserve_flags(lmb, base, size, LMB_NONE); + return lmb_reserve_flags(base, size, LMB_NONE); } -static long lmb_overlaps_region(struct lmb_region *rgn, phys_addr_t base, +static long lmb_overlaps_region(struct alist *lmb_rgn_lst, phys_addr_t base, phys_size_t size) { unsigned long i; + struct lmb_region *rgn = lmb_rgn_lst->data; - for (i = 0; i < rgn->cnt; i++) { - phys_addr_t rgnbase = rgn->region[i].base; - phys_size_t rgnsize = rgn->region[i].size; + for (i = 0; i < lmb_rgn_lst->count; i++) { + phys_addr_t rgnbase = rgn[i].base; + phys_size_t rgnsize = rgn[i].size; if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) break; } - return (i < rgn->cnt) ? i : -1; -} - -phys_addr_t lmb_alloc(struct lmb *lmb, phys_size_t size, ulong align) -{ - return lmb_alloc_base(lmb, size, align, LMB_ALLOC_ANYWHERE); -} - -phys_addr_t lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align, phys_addr_t max_addr) -{ - phys_addr_t alloc; - - alloc = __lmb_alloc_base(lmb, size, align, max_addr); - - if (alloc == 0) - printf("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n", - (ulong)size, (ulong)max_addr); - - return alloc; + return (i < lmb_rgn_lst->count) ? i : -1; } static phys_addr_t lmb_align_down(phys_addr_t addr, phys_size_t size) @@ -458,15 +563,18 @@ static phys_addr_t lmb_align_down(phys_addr_t addr, phys_size_t size) return addr & ~(size - 1); } -phys_addr_t __lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align, phys_addr_t max_addr) +static phys_addr_t __lmb_alloc_base(phys_size_t size, ulong align, + phys_addr_t max_addr, enum lmb_flags flags) { long i, rgn; phys_addr_t base = 0; phys_addr_t res_base; + struct lmb_region *lmb_used = lmb.used_mem.data; + struct lmb_region *lmb_memory = lmb.free_mem.data; - for (i = lmb->memory.cnt - 1; i >= 0; i--) { - phys_addr_t lmbbase = lmb->memory.region[i].base; - phys_size_t lmbsize = lmb->memory.region[i].size; + for (i = lmb.free_mem.count - 1; i >= 0; i--) { + phys_addr_t lmbbase = lmb_memory[i].base; + phys_size_t lmbsize = lmb_memory[i].size; if (lmbsize < size) continue; @@ -482,15 +590,16 @@ phys_addr_t __lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align, phy continue; while (base && lmbbase <= base) { - rgn = lmb_overlaps_region(&lmb->reserved, base, size); + rgn = lmb_overlaps_region(&lmb.used_mem, base, size); if (rgn < 0) { /* This area isn't reserved, take it */ - if (lmb_add_region(&lmb->reserved, base, - size) < 0) + if (lmb_add_region_flags(&lmb.used_mem, base, + size, flags) < 0) return 0; return base; } - res_base = lmb->reserved.region[rgn].base; + + res_base = lmb_used[rgn].base; if (res_base < size) break; base = lmb_align_down(res_base - size, align); @@ -499,83 +608,177 @@ phys_addr_t __lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align, phy return 0; } -/* - * Try to allocate a specific address range: must be in defined memory but not - * reserved - */ -phys_addr_t lmb_alloc_addr(struct lmb *lmb, phys_addr_t base, phys_size_t size) +phys_addr_t lmb_alloc(phys_size_t size, ulong align) +{ + return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE); +} + +phys_addr_t lmb_alloc_base(phys_size_t size, ulong align, phys_addr_t max_addr) +{ + phys_addr_t alloc; + + alloc = __lmb_alloc_base(size, align, max_addr, LMB_NONE); + + if (alloc == 0) + printf("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n", + (ulong)size, (ulong)max_addr); + + return alloc; +} + +static phys_addr_t __lmb_alloc_addr(phys_addr_t base, phys_size_t size, + enum lmb_flags flags) { long rgn; + struct lmb_region *lmb_memory = lmb.free_mem.data; /* Check if the requested address is in one of the memory regions */ - rgn = lmb_overlaps_region(&lmb->memory, base, size); + rgn = lmb_overlaps_region(&lmb.free_mem, base, size); if (rgn >= 0) { /* * Check if the requested end address is in the same memory * region we found. */ - if (lmb_addrs_overlap(lmb->memory.region[rgn].base, - lmb->memory.region[rgn].size, + if (lmb_addrs_overlap(lmb_memory[rgn].base, + lmb_memory[rgn].size, base + size - 1, 1)) { /* ok, reserve the memory */ - if (lmb_reserve(lmb, base, size) >= 0) + if (lmb_reserve_flags(base, size, flags) >= 0) return base; } } + return 0; } +/* + * Try to allocate a specific address range: must be in defined memory but not + * reserved + */ +phys_addr_t lmb_alloc_addr(phys_addr_t base, phys_size_t size) +{ + return __lmb_alloc_addr(base, size, LMB_NONE); +} + /* Return number of bytes from a given address that are free */ -phys_size_t lmb_get_free_size(struct lmb *lmb, phys_addr_t addr) +phys_size_t lmb_get_free_size(phys_addr_t addr) { int i; long rgn; + struct lmb_region *lmb_used = lmb.used_mem.data; + struct lmb_region *lmb_memory = lmb.free_mem.data; /* check if the requested address is in the memory regions */ - rgn = lmb_overlaps_region(&lmb->memory, addr, 1); + rgn = lmb_overlaps_region(&lmb.free_mem, addr, 1); if (rgn >= 0) { - for (i = 0; i < lmb->reserved.cnt; i++) { - if (addr < lmb->reserved.region[i].base) { + for (i = 0; i < lmb.used_mem.count; i++) { + if (addr < lmb_used[i].base) { /* first reserved range > requested address */ - return lmb->reserved.region[i].base - addr; + return lmb_used[i].base - addr; } - if (lmb->reserved.region[i].base + - lmb->reserved.region[i].size > addr) { + if (lmb_used[i].base + + lmb_used[i].size > addr) { /* requested addr is in this reserved range */ return 0; } } /* if we come here: no reserved ranges above requested addr */ - return lmb->memory.region[lmb->memory.cnt - 1].base + - lmb->memory.region[lmb->memory.cnt - 1].size - addr; + return lmb_memory[lmb.free_mem.count - 1].base + + lmb_memory[lmb.free_mem.count - 1].size - addr; } return 0; } -int lmb_is_reserved_flags(struct lmb *lmb, phys_addr_t addr, int flags) +int lmb_is_reserved_flags(phys_addr_t addr, int flags) { int i; + struct lmb_region *lmb_used = lmb.used_mem.data; - for (i = 0; i < lmb->reserved.cnt; i++) { - phys_addr_t upper = lmb->reserved.region[i].base + - lmb->reserved.region[i].size - 1; - if ((addr >= lmb->reserved.region[i].base) && (addr <= upper)) - return (lmb->reserved.region[i].flags & flags) == flags; + for (i = 0; i < lmb.used_mem.count; i++) { + phys_addr_t upper = lmb_used[i].base + + lmb_used[i].size - 1; + if (addr >= lmb_used[i].base && addr <= upper) + return (lmb_used[i].flags & flags) == flags; } return 0; } -int lmb_is_reserved(struct lmb *lmb, phys_addr_t addr) +static int lmb_setup(void) { - return lmb_is_reserved_flags(lmb, addr, LMB_NONE); + bool ret; + + ret = alist_init(&lmb.free_mem, sizeof(struct lmb_region), + (uint)LMB_ALIST_INITIAL_SIZE); + if (!ret) { + log_debug("Unable to initialise the list for LMB free memory\n"); + return -ENOMEM; + } + + ret = alist_init(&lmb.used_mem, sizeof(struct lmb_region), + (uint)LMB_ALIST_INITIAL_SIZE); + if (!ret) { + log_debug("Unable to initialise the list for LMB used memory\n"); + return -ENOMEM; + } + + return 0; } -__weak void board_lmb_reserve(struct lmb *lmb) +/** + * lmb_init() - Initialise the LMB module + * + * Initialise the LMB lists needed for keeping the memory map. There + * are two lists, in form of alloced list data structure. One for the + * available memory, and one for the used memory. Initialise the two + * lists as part of board init. Add memory to the available memory + * list and reserve common areas by adding them to the used memory + * list. + * + * Return: 0 on success, -ve on error + */ +int lmb_init(void) { - /* please define platform specific board_lmb_reserve() */ + int ret; + + ret = lmb_setup(); + if (ret) { + log_info("Unable to init LMB\n"); + return ret; + } + + lmb_add_memory(); + + /* Reserve the U-Boot image region once U-Boot has relocated */ + if (spl_phase() == PHASE_SPL) + lmb_reserve_common_spl(); + else if (spl_phase() == PHASE_BOARD_R) + lmb_reserve_common((void *)gd->fdt_blob); + + return 0; +} + +#if CONFIG_IS_ENABLED(UNIT_TEST) +struct lmb *lmb_get(void) +{ + return &lmb; +} + +int lmb_push(struct lmb *store) +{ + int ret; + + *store = lmb; + ret = lmb_setup(); + if (ret) + return ret; + + return 0; } -__weak void arch_lmb_reserve(struct lmb *lmb) +void lmb_pop(struct lmb *store) { - /* please define platform specific arch_lmb_reserve() */ + alist_uninit(&lmb.free_mem); + alist_uninit(&lmb.used_mem); + lmb = *store; } +#endif /* UNIT_TEST */ |