summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/arc/lib/cache.c14
-rw-r--r--arch/arm/lib/stack.c14
-rw-r--r--arch/arm/mach-apple/board.c17
-rw-r--r--arch/arm/mach-snapdragon/board.c17
-rw-r--r--arch/arm/mach-stm32mp/dram_init.c33
-rw-r--r--arch/arm/mach-stm32mp/include/mach/stm32mp.h11
-rw-r--r--arch/arm/mach-stm32mp/stm32mp1/cpu.c7
-rw-r--r--arch/arm/mach-stm32mp/stm32mp1/spl.c17
-rw-r--r--arch/m68k/lib/bootm.c20
-rw-r--r--arch/microblaze/lib/bootm.c14
-rw-r--r--arch/mips/lib/bootm.c22
-rw-r--r--arch/nios2/lib/bootm.c13
-rw-r--r--arch/powerpc/cpu/mpc85xx/cpu_init.c16
-rw-r--r--arch/powerpc/cpu/mpc85xx/mp.c4
-rw-r--r--arch/powerpc/include/asm/mp.h4
-rw-r--r--arch/powerpc/lib/Makefile1
-rw-r--r--arch/powerpc/lib/bootm.c55
-rw-r--r--arch/powerpc/lib/misc.c62
-rw-r--r--arch/riscv/lib/bootm.c13
-rw-r--r--arch/sandbox/cpu/spl.c13
-rw-r--r--arch/sandbox/dts/test.dts2
-rw-r--r--arch/sandbox/include/asm/test.h4
-rw-r--r--arch/sh/lib/bootm.c13
-rw-r--r--arch/x86/lib/bootm.c18
-rw-r--r--arch/xtensa/lib/bootm.c13
-rw-r--r--board/xilinx/common/board.c33
-rw-r--r--boot/bootm.c38
-rw-r--r--boot/bootm_os.c5
-rw-r--r--boot/image-board.c36
-rw-r--r--boot/image-fdt.c35
-rw-r--r--cmd/bdinfo.c5
-rw-r--r--cmd/booti.c2
-rw-r--r--cmd/bootz.c2
-rw-r--r--cmd/elf.c2
-rw-r--r--cmd/load.c7
-rw-r--r--common/board_r.c10
-rw-r--r--common/spl/spl.c9
-rw-r--r--configs/a3y17lte_defconfig1
-rw-r--r--configs/a5y17lte_defconfig1
-rw-r--r--configs/a7y17lte_defconfig1
-rw-r--r--configs/apple_m1_defconfig1
-rw-r--r--configs/mt7981_emmc_rfb_defconfig1
-rw-r--r--configs/mt7981_rfb_defconfig1
-rw-r--r--configs/mt7981_sd_rfb_defconfig1
-rw-r--r--configs/mt7986_rfb_defconfig1
-rw-r--r--configs/mt7986a_bpir3_emmc_defconfig1
-rw-r--r--configs/mt7986a_bpir3_sd_defconfig1
-rw-r--r--configs/mt7988_rfb_defconfig1
-rw-r--r--configs/mt7988_sd_rfb_defconfig1
-rw-r--r--configs/qcom_defconfig1
-rw-r--r--configs/sandbox_noinst_defconfig1
-rw-r--r--configs/sandbox_spl_defconfig3
-rw-r--r--configs/stm32mp13_defconfig3
-rw-r--r--configs/stm32mp15_basic_defconfig3
-rw-r--r--configs/stm32mp15_defconfig3
-rw-r--r--configs/stm32mp15_trusted_defconfig3
-rw-r--r--configs/stm32mp25_defconfig3
-rw-r--r--configs/th1520_lpi4a_defconfig1
-rw-r--r--doc/arch/sandbox/sandbox.rst5
-rw-r--r--drivers/iommu/apple_dart.c8
-rw-r--r--drivers/iommu/sandbox_iommu.c35
-rw-r--r--fs/fs.c10
-rw-r--r--include/alist.h11
-rw-r--r--include/image.h28
-rw-r--r--include/lmb.h129
-rw-r--r--lib/Kconfig45
-rw-r--r--lib/Makefile2
-rw-r--r--lib/efi_loader/efi_dt_fixup.c2
-rw-r--r--lib/efi_loader/efi_helper.c2
-rw-r--r--lib/lmb.c693
-rw-r--r--net/tftp.c39
-rw-r--r--net/wget.c9
-rw-r--r--test/cmd/bdinfo.c41
-rw-r--r--test/lib/lmb.c542
74 files changed, 1044 insertions, 1191 deletions
diff --git a/arch/arc/lib/cache.c b/arch/arc/lib/cache.c
index 22e748868a7..5169fc627fa 100644
--- a/arch/arc/lib/cache.c
+++ b/arch/arc/lib/cache.c
@@ -10,7 +10,6 @@
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/log2.h>
-#include <lmb.h>
#include <asm/arcregs.h>
#include <asm/arc-bcr.h>
#include <asm/cache.h>
@@ -820,16 +819,3 @@ void sync_n_cleanup_cache_all(void)
__ic_entire_invalidate();
}
-
-static ulong get_sp(void)
-{
- ulong ret;
-
- asm("mov %0, sp" : "=r"(ret) : );
- return ret;
-}
-
-void arch_lmb_reserve(struct lmb *lmb)
-{
- arch_lmb_reserve_generic(lmb, get_sp(), gd->ram_top, 4096);
-}
diff --git a/arch/arm/lib/stack.c b/arch/arm/lib/stack.c
index ea1b937add7..2b21ec0734e 100644
--- a/arch/arm/lib/stack.c
+++ b/arch/arm/lib/stack.c
@@ -11,7 +11,6 @@
* Marius Groeger <mgroeger@sysgo.de>
*/
#include <init.h>
-#include <lmb.h>
#include <asm/global_data.h>
DECLARE_GLOBAL_DATA_PTR;
@@ -33,16 +32,3 @@ int arch_reserve_stacks(void)
return 0;
}
-
-static ulong get_sp(void)
-{
- ulong ret;
-
- asm("mov %0, sp" : "=r"(ret) : );
- return ret;
-}
-
-void arch_lmb_reserve(struct lmb *lmb)
-{
- arch_lmb_reserve_generic(lmb, get_sp(), gd->ram_top, 16384);
-}
diff --git a/arch/arm/mach-apple/board.c b/arch/arm/mach-apple/board.c
index 8bace3005eb..0b6d290b8ac 100644
--- a/arch/arm/mach-apple/board.c
+++ b/arch/arm/mach-apple/board.c
@@ -773,23 +773,20 @@ u64 get_page_table_size(void)
int board_late_init(void)
{
- struct lmb lmb;
u32 status = 0;
- lmb_init_and_reserve(&lmb, gd->bd, (void *)gd->fdt_blob);
-
/* somewhat based on the Linux Kernel boot requirements:
* align by 2M and maximal FDT size 2M
*/
- status |= env_set_hex("loadaddr", lmb_alloc(&lmb, SZ_1G, SZ_2M));
- status |= env_set_hex("fdt_addr_r", lmb_alloc(&lmb, SZ_2M, SZ_2M));
- status |= env_set_hex("kernel_addr_r", lmb_alloc(&lmb, SZ_128M, SZ_2M));
- status |= env_set_hex("ramdisk_addr_r", lmb_alloc(&lmb, SZ_1G, SZ_2M));
+ status |= env_set_hex("loadaddr", lmb_alloc(SZ_1G, SZ_2M));
+ status |= env_set_hex("fdt_addr_r", lmb_alloc(SZ_2M, SZ_2M));
+ status |= env_set_hex("kernel_addr_r", lmb_alloc(SZ_128M, SZ_2M));
+ status |= env_set_hex("ramdisk_addr_r", lmb_alloc(SZ_1G, SZ_2M));
status |= env_set_hex("kernel_comp_addr_r",
- lmb_alloc(&lmb, KERNEL_COMP_SIZE, SZ_2M));
+ lmb_alloc(KERNEL_COMP_SIZE, SZ_2M));
status |= env_set_hex("kernel_comp_size", KERNEL_COMP_SIZE);
- status |= env_set_hex("scriptaddr", lmb_alloc(&lmb, SZ_4M, SZ_2M));
- status |= env_set_hex("pxefile_addr_r", lmb_alloc(&lmb, SZ_4M, SZ_2M));
+ status |= env_set_hex("scriptaddr", lmb_alloc(SZ_4M, SZ_2M));
+ status |= env_set_hex("pxefile_addr_r", lmb_alloc(SZ_4M, SZ_2M));
if (status)
log_warning("late_init: Failed to set run time variables\n");
diff --git a/arch/arm/mach-snapdragon/board.c b/arch/arm/mach-snapdragon/board.c
index b439a19ec7e..22a7d2a637d 100644
--- a/arch/arm/mach-snapdragon/board.c
+++ b/arch/arm/mach-snapdragon/board.c
@@ -275,24 +275,21 @@ void __weak qcom_late_init(void)
#define KERNEL_COMP_SIZE SZ_64M
-#define addr_alloc(lmb, size) lmb_alloc(lmb, size, SZ_2M)
+#define addr_alloc(size) lmb_alloc(size, SZ_2M)
/* Stolen from arch/arm/mach-apple/board.c */
int board_late_init(void)
{
- struct lmb lmb;
u32 status = 0;
- lmb_init_and_reserve(&lmb, gd->bd, (void *)gd->fdt_blob);
-
/* We need to be fairly conservative here as we support boards with just 1G of TOTAL RAM */
- status |= env_set_hex("kernel_addr_r", addr_alloc(&lmb, SZ_128M));
- status |= env_set_hex("ramdisk_addr_r", addr_alloc(&lmb, SZ_128M));
- status |= env_set_hex("kernel_comp_addr_r", addr_alloc(&lmb, KERNEL_COMP_SIZE));
+ status |= env_set_hex("kernel_addr_r", addr_alloc(SZ_128M));
+ status |= env_set_hex("ramdisk_addr_r", addr_alloc(SZ_128M));
+ status |= env_set_hex("kernel_comp_addr_r", addr_alloc(KERNEL_COMP_SIZE));
status |= env_set_hex("kernel_comp_size", KERNEL_COMP_SIZE);
- status |= env_set_hex("scriptaddr", addr_alloc(&lmb, SZ_4M));
- status |= env_set_hex("pxefile_addr_r", addr_alloc(&lmb, SZ_4M));
- status |= env_set_hex("fdt_addr_r", addr_alloc(&lmb, SZ_2M));
+ status |= env_set_hex("scriptaddr", addr_alloc(SZ_4M));
+ status |= env_set_hex("pxefile_addr_r", addr_alloc(SZ_4M));
+ status |= env_set_hex("fdt_addr_r", addr_alloc(SZ_2M));
if (status)
log_warning("%s: Failed to set run time variables\n", __func__);
diff --git a/arch/arm/mach-stm32mp/dram_init.c b/arch/arm/mach-stm32mp/dram_init.c
index 6024959b97e..198785353f1 100644
--- a/arch/arm/mach-stm32mp/dram_init.c
+++ b/arch/arm/mach-stm32mp/dram_init.c
@@ -14,9 +14,26 @@
#include <ram.h>
#include <asm/global_data.h>
#include <asm/system.h>
+#include <mach/stm32mp.h>
DECLARE_GLOBAL_DATA_PTR;
+int optee_get_reserved_memory(u32 *start, u32 *size)
+{
+ fdt_addr_t fdt_mem_size;
+ fdt_addr_t fdt_start;
+ ofnode node;
+
+ node = ofnode_path("/reserved-memory/optee");
+ if (!ofnode_valid(node))
+ return -ENOENT;
+
+ fdt_start = ofnode_get_addr_size(node, "reg", &fdt_mem_size);
+ *start = fdt_start;
+ *size = fdt_mem_size;
+ return (fdt_start < 0) ? fdt_start : 0;
+}
+
int dram_init(void)
{
struct ram_info ram;
@@ -45,9 +62,10 @@ int dram_init(void)
phys_addr_t board_get_usable_ram_top(phys_size_t total_size)
{
+ int ret;
phys_size_t size;
phys_addr_t reg;
- struct lmb lmb;
+ u32 optee_start, optee_size;
if (!total_size)
return gd->ram_top;
@@ -57,17 +75,10 @@ phys_addr_t board_get_usable_ram_top(phys_size_t total_size)
* if the effective available memory is bigger
*/
gd->ram_top = clamp_val(gd->ram_top, 0, SZ_4G - 1);
+ size = ALIGN(SZ_8M + CONFIG_SYS_MALLOC_LEN + total_size, MMU_SECTION_SIZE);
- /* found enough not-reserved memory to relocated U-Boot */
- lmb_init(&lmb);
- lmb_add(&lmb, gd->ram_base, gd->ram_top - gd->ram_base);
- boot_fdt_add_mem_rsv_regions(&lmb, (void *)gd->fdt_blob);
- /* add 8M for reserved memory for display, fdt, gd,... */
- size = ALIGN(SZ_8M + CONFIG_SYS_MALLOC_LEN + total_size, MMU_SECTION_SIZE),
- reg = lmb_alloc(&lmb, size, MMU_SECTION_SIZE);
-
- if (!reg)
- reg = gd->ram_top - size;
+ ret = optee_get_reserved_memory(&optee_start, &optee_size);
+ reg = (!ret ? optee_start : gd->ram_top) - size;
/* before relocation, mark the U-Boot memory as cacheable by default */
if (!(gd->flags & GD_FLG_RELOC))
diff --git a/arch/arm/mach-stm32mp/include/mach/stm32mp.h b/arch/arm/mach-stm32mp/include/mach/stm32mp.h
new file mode 100644
index 00000000000..506a42559b8
--- /dev/null
+++ b/arch/arm/mach-stm32mp/include/mach/stm32mp.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */
+/*
+ * Copyright (C) 2024, STMicroelectronics - All Rights Reserved
+ */
+
+#ifndef __MACH_STM32MP_H_
+#define __MACH_STM32MP_H_
+
+int optee_get_reserved_memory(u32 *start, u32 *size);
+
+#endif
diff --git a/arch/arm/mach-stm32mp/stm32mp1/cpu.c b/arch/arm/mach-stm32mp/stm32mp1/cpu.c
index 478c3efae73..64480da9f8d 100644
--- a/arch/arm/mach-stm32mp/stm32mp1/cpu.c
+++ b/arch/arm/mach-stm32mp/stm32mp1/cpu.c
@@ -30,8 +30,6 @@
*/
u8 early_tlb[PGTABLE_SIZE] __section(".data") __aligned(0x4000);
-struct lmb lmb;
-
u32 get_bootmode(void)
{
/* read bootmode from TAMP backup register */
@@ -80,7 +78,7 @@ void dram_bank_mmu_setup(int bank)
i < (start >> MMU_SECTION_SHIFT) + (size >> MMU_SECTION_SHIFT);
i++) {
option = DCACHE_DEFAULT_OPTION;
- if (use_lmb && lmb_is_reserved_flags(&lmb, i << MMU_SECTION_SHIFT, LMB_NOMAP))
+ if (use_lmb && lmb_is_reserved_flags(i << MMU_SECTION_SHIFT, LMB_NOMAP))
option = 0; /* INVALID ENTRY in TLB */
set_section_dcache(i, option);
}
@@ -143,9 +141,6 @@ int mach_cpu_init(void)
void enable_caches(void)
{
- /* parse device tree when data cache is still activated */
- lmb_init_and_reserve(&lmb, gd->bd, (void *)gd->fdt_blob);
-
/* I-cache is already enabled in start.S: icache_enable() not needed */
/* deactivate the data cache, early enabled in arch_cpu_init() */
diff --git a/arch/arm/mach-stm32mp/stm32mp1/spl.c b/arch/arm/mach-stm32mp/stm32mp1/spl.c
index 6eae5c2f557..9c4fafbf478 100644
--- a/arch/arm/mach-stm32mp/stm32mp1/spl.c
+++ b/arch/arm/mach-stm32mp/stm32mp1/spl.c
@@ -18,6 +18,7 @@
#include <asm/io.h>
#include <asm/arch/sys_proto.h>
#include <mach/tzc.h>
+#include <mach/stm32mp.h>
#include <linux/libfdt.h>
u32 spl_boot_device(void)
@@ -110,22 +111,6 @@ uint32_t stm32mp_get_dram_size(void)
return ram.size;
}
-static int optee_get_reserved_memory(uint32_t *start, uint32_t *size)
-{
- fdt_addr_t fdt_mem_size;
- fdt_addr_t fdt_start;
- ofnode node;
-
- node = ofnode_path("/reserved-memory/optee");
- if (!ofnode_valid(node))
- return -ENOENT;
-
- fdt_start = ofnode_get_addr_size(node, "reg", &fdt_mem_size);
- *start = fdt_start;
- *size = fdt_mem_size;
- return (fdt_start < 0) ? fdt_start : 0;
-}
-
#define CFG_SHMEM_SIZE 0x200000
#define STM32_TZC_NSID_ALL 0xffff
#define STM32_TZC_FILTER_ALL 3
diff --git a/arch/m68k/lib/bootm.c b/arch/m68k/lib/bootm.c
index f2d02e43765..3dcff8076e3 100644
--- a/arch/m68k/lib/bootm.c
+++ b/arch/m68k/lib/bootm.c
@@ -9,7 +9,6 @@
#include <command.h>
#include <env.h>
#include <image.h>
-#include <lmb.h>
#include <log.h>
#include <asm/global_data.h>
#include <u-boot/zlib.h>
@@ -27,21 +26,14 @@ DECLARE_GLOBAL_DATA_PTR;
#define LINUX_MAX_ENVS 256
#define LINUX_MAX_ARGS 256
-static ulong get_sp (void);
static void set_clocks_in_mhz (struct bd_info *kbd);
-void arch_lmb_reserve(struct lmb *lmb)
-{
- arch_lmb_reserve_generic(lmb, get_sp(), gd->ram_top, 1024);
-}
-
int do_bootm_linux(int flag, struct bootm_info *bmi)
{
struct bootm_headers *images = bmi->images;
int ret;
struct bd_info *kbd;
void (*kernel) (struct bd_info *, ulong, ulong, ulong, ulong);
- struct lmb *lmb = &images->lmb;
/*
* allow the PREP bootm subcommand, it is required for bootm to work
@@ -53,7 +45,7 @@ int do_bootm_linux(int flag, struct bootm_info *bmi)
return 1;
/* allocate space for kernel copy of board info */
- ret = boot_get_kbd (lmb, &kbd);
+ ret = boot_get_kbd(&kbd);
if (ret) {
puts("ERROR with allocation of kernel bd\n");
goto error;
@@ -89,16 +81,6 @@ error:
return 1;
}
-static ulong get_sp (void)
-{
- ulong sp;
-
- asm("movel %%a7, %%d0\n"
- "movel %%d0, %0\n": "=d"(sp): :"%d0");
-
- return sp;
-}
-
static void set_clocks_in_mhz (struct bd_info *kbd)
{
char *s;
diff --git a/arch/microblaze/lib/bootm.c b/arch/microblaze/lib/bootm.c
index cbe9d85aa91..4879a41aab3 100644
--- a/arch/microblaze/lib/bootm.c
+++ b/arch/microblaze/lib/bootm.c
@@ -15,7 +15,6 @@
#include <fdt_support.h>
#include <hang.h>
#include <image.h>
-#include <lmb.h>
#include <log.h>
#include <asm/cache.h>
#include <asm/global_data.h>
@@ -24,19 +23,6 @@
DECLARE_GLOBAL_DATA_PTR;
-static ulong get_sp(void)
-{
- ulong ret;
-
- asm("addik %0, r1, 0" : "=r"(ret) : );
- return ret;
-}
-
-void arch_lmb_reserve(struct lmb *lmb)
-{
- arch_lmb_reserve_generic(lmb, get_sp(), gd->ram_top, 4096);
-}
-
static void boot_jump_linux(struct bootm_headers *images, int flag)
{
void (*thekernel)(char *cmdline, ulong rd, ulong dt);
diff --git a/arch/mips/lib/bootm.c b/arch/mips/lib/bootm.c
index adb6b6cc229..87195100023 100644
--- a/arch/mips/lib/bootm.c
+++ b/arch/mips/lib/bootm.c
@@ -9,7 +9,6 @@
#include <env.h>
#include <image.h>
#include <fdt_support.h>
-#include <lmb.h>
#include <log.h>
#include <asm/addrspace.h>
#include <asm/global_data.h>
@@ -28,20 +27,6 @@ static char **linux_env;
static char *linux_env_p;
static int linux_env_idx;
-static ulong arch_get_sp(void)
-{
- ulong ret;
-
- __asm__ __volatile__("move %0, $sp" : "=r"(ret) : );
-
- return ret;
-}
-
-void arch_lmb_reserve(struct lmb *lmb)
-{
- arch_lmb_reserve_generic(lmb, arch_get_sp(), gd->ram_top, 4096);
-}
-
static void linux_cmdline_init(void)
{
linux_argc = 1;
@@ -225,9 +210,8 @@ static int boot_reloc_fdt(struct bootm_headers *images)
}
#if CONFIG_IS_ENABLED(MIPS_BOOT_FDT) && CONFIG_IS_ENABLED(OF_LIBFDT)
- boot_fdt_add_mem_rsv_regions(&images->lmb, images->ft_addr);
- return boot_relocate_fdt(&images->lmb, &images->ft_addr,
- &images->ft_len);
+ boot_fdt_add_mem_rsv_regions(images->ft_addr);
+ return boot_relocate_fdt(&images->ft_addr, &images->ft_len);
#else
return 0;
#endif
@@ -248,7 +232,7 @@ static int boot_setup_fdt(struct bootm_headers *images)
images->initrd_start = virt_to_phys((void *)images->initrd_start);
images->initrd_end = virt_to_phys((void *)images->initrd_end);
- return image_setup_libfdt(images, images->ft_addr, &images->lmb);
+ return image_setup_libfdt(images, images->ft_addr, true);
}
static void boot_prep_linux(struct bootm_headers *images)
diff --git a/arch/nios2/lib/bootm.c b/arch/nios2/lib/bootm.c
index ce939ff5e15..71319839ba2 100644
--- a/arch/nios2/lib/bootm.c
+++ b/arch/nios2/lib/bootm.c
@@ -64,16 +64,3 @@ int do_bootm_linux(int flag, struct bootm_info *bmi)
return 1;
}
-
-static ulong get_sp(void)
-{
- ulong ret;
-
- asm("mov %0, sp" : "=r"(ret) : );
- return ret;
-}
-
-void arch_lmb_reserve(struct lmb *lmb)
-{
- arch_lmb_reserve_generic(lmb, get_sp(), gd->ram_top, 4096);
-}
diff --git a/arch/powerpc/cpu/mpc85xx/cpu_init.c b/arch/powerpc/cpu/mpc85xx/cpu_init.c
index a7b805bc674..739d14f8002 100644
--- a/arch/powerpc/cpu/mpc85xx/cpu_init.c
+++ b/arch/powerpc/cpu/mpc85xx/cpu_init.c
@@ -940,22 +940,6 @@ int cpu_init_r(void)
return 0;
}
-#ifdef CONFIG_ARCH_MISC_INIT
-int arch_misc_init(void)
-{
- if (IS_ENABLED(CONFIG_FSL_CAAM)) {
- struct udevice *dev;
- int ret;
-
- ret = uclass_get_device_by_driver(UCLASS_MISC, DM_DRIVER_GET(caam_jr), &dev);
- if (ret)
- printf("Failed to initialize caam_jr: %d\n", ret);
- }
-
- return 0;
-}
-#endif
-
void arch_preboot_os(void)
{
u32 msr;
diff --git a/arch/powerpc/cpu/mpc85xx/mp.c b/arch/powerpc/cpu/mpc85xx/mp.c
index 03f801ebbb7..bed465cb2cb 100644
--- a/arch/powerpc/cpu/mpc85xx/mp.c
+++ b/arch/powerpc/cpu/mpc85xx/mp.c
@@ -408,11 +408,11 @@ static void plat_mp_up(unsigned long bootpg, unsigned int pagesize)
}
#endif
-void cpu_mp_lmb_reserve(struct lmb *lmb)
+void cpu_mp_lmb_reserve(void)
{
u32 bootpg = determine_mp_bootpg(NULL);
- lmb_reserve(lmb, bootpg, 4096);
+ lmb_reserve(bootpg, 4096);
}
void setup_mp(void)
diff --git a/arch/powerpc/include/asm/mp.h b/arch/powerpc/include/asm/mp.h
index 8dacd2781d4..b3f59be8406 100644
--- a/arch/powerpc/include/asm/mp.h
+++ b/arch/powerpc/include/asm/mp.h
@@ -6,10 +6,8 @@
#ifndef _ASM_MP_H_
#define _ASM_MP_H_
-#include <lmb.h>
-
void setup_mp(void);
-void cpu_mp_lmb_reserve(struct lmb *lmb);
+void cpu_mp_lmb_reserve(void);
u32 determine_mp_bootpg(unsigned int *pagesize);
int is_core_disabled(int nr);
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index bb819dcbb6c..ecc2aba8f3c 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -39,6 +39,7 @@ obj-y += cache.o
obj-y += extable.o
obj-y += interrupts.o
obj-$(CONFIG_CMD_KGDB) += kgdb.o
+obj-y += misc.o
obj-y += stack.o
obj-y += time.o
obj-y += traps.o
diff --git a/arch/powerpc/lib/bootm.c b/arch/powerpc/lib/bootm.c
index 61e08728dd4..dc44bf3ab3a 100644
--- a/arch/powerpc/lib/bootm.c
+++ b/arch/powerpc/lib/bootm.c
@@ -12,7 +12,6 @@
#include <cpu_func.h>
#include <env.h>
#include <init.h>
-#include <lmb.h>
#include <log.h>
#include <watchdog.h>
#include <command.h>
@@ -37,14 +36,9 @@
DECLARE_GLOBAL_DATA_PTR;
-static ulong get_sp (void);
extern void ft_fixup_num_cores(void *blob);
static void set_clocks_in_mhz (struct bd_info *kbd);
-#ifndef CFG_SYS_LINUX_LOWMEM_MAX_SIZE
-#define CFG_SYS_LINUX_LOWMEM_MAX_SIZE (768*1024*1024)
-#endif
-
static void boot_jump_linux(struct bootm_headers *images)
{
void (*kernel)(struct bd_info *, ulong r4, ulong r5, ulong r6,
@@ -116,41 +110,6 @@ static void boot_jump_linux(struct bootm_headers *images)
return;
}
-void arch_lmb_reserve(struct lmb *lmb)
-{
- phys_size_t bootm_size;
- ulong size, bootmap_base;
-
- bootmap_base = env_get_bootm_low();
- bootm_size = env_get_bootm_size();
-
-#ifdef DEBUG
- if (((u64)bootmap_base + bootm_size) >
- (CFG_SYS_SDRAM_BASE + (u64)gd->ram_size))
- puts("WARNING: bootm_low + bootm_size exceed total memory\n");
- if ((bootmap_base + bootm_size) > get_effective_memsize())
- puts("WARNING: bootm_low + bootm_size exceed eff. memory\n");
-#endif
-
- size = min(bootm_size, get_effective_memsize());
- size = min(size, (ulong)CFG_SYS_LINUX_LOWMEM_MAX_SIZE);
-
- if (size < bootm_size) {
- ulong base = bootmap_base + size;
- printf("WARNING: adjusting available memory from 0x%lx to 0x%llx\n",
- size, (unsigned long long)bootm_size);
- lmb_reserve(lmb, base, bootm_size - size);
- }
-
- arch_lmb_reserve_generic(lmb, get_sp(), gd->ram_top, 4096);
-
-#ifdef CONFIG_MP
- cpu_mp_lmb_reserve(lmb);
-#endif
-
- return;
-}
-
static void boot_prep_linux(struct bootm_headers *images)
{
#ifdef CONFIG_MP
@@ -166,7 +125,6 @@ static void boot_prep_linux(struct bootm_headers *images)
static int boot_cmdline_linux(struct bootm_headers *images)
{
ulong of_size = images->ft_len;
- struct lmb *lmb = &images->lmb;
ulong *cmd_start = &images->cmdline_start;
ulong *cmd_end = &images->cmdline_end;
@@ -174,7 +132,7 @@ static int boot_cmdline_linux(struct bootm_headers *images)
if (!of_size) {
/* allocate space and init command line */
- ret = boot_get_cmdline (lmb, cmd_start, cmd_end);
+ ret = boot_get_cmdline(cmd_start, cmd_end);
if (ret) {
puts("ERROR with allocation of cmdline\n");
return ret;
@@ -187,14 +145,13 @@ static int boot_cmdline_linux(struct bootm_headers *images)
static int boot_bd_t_linux(struct bootm_headers *images)
{
ulong of_size = images->ft_len;
- struct lmb *lmb = &images->lmb;
struct bd_info **kbd = &images->kbd;
int ret = 0;
if (!of_size) {
/* allocate space for kernel copy of board info */
- ret = boot_get_kbd (lmb, kbd);
+ ret = boot_get_kbd(kbd);
if (ret) {
puts("ERROR with allocation of kernel bd\n");
return ret;
@@ -252,14 +209,6 @@ int do_bootm_linux(int flag, struct bootm_info *bmi)
return 0;
}
-static ulong get_sp (void)
-{
- ulong sp;
-
- asm( "mr %0,1": "=r"(sp) : );
- return sp;
-}
-
static void set_clocks_in_mhz (struct bd_info *kbd)
{
char *s;
diff --git a/arch/powerpc/lib/misc.c b/arch/powerpc/lib/misc.c
new file mode 100644
index 00000000000..4cd23b3406d
--- /dev/null
+++ b/arch/powerpc/lib/misc.c
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * (C) Copyright 2024 Linaro Ltd.
+ */
+
+#include <image.h>
+#include <init.h>
+#include <lmb.h>
+
+#include <asm/mp.h>
+#include <dm/device.h>
+#include <dm/uclass.h>
+
+#ifndef CFG_SYS_LINUX_LOWMEM_MAX_SIZE
+#define CFG_SYS_LINUX_LOWMEM_MAX_SIZE (768 * 1024 * 1024)
+#endif
+
+int arch_misc_init(void)
+{
+ if (CONFIG_IS_ENABLED(CMD_BOOTM)) {
+ phys_size_t bootm_size;
+ ulong size, bootmap_base;
+
+ bootmap_base = env_get_bootm_low();
+ bootm_size = env_get_bootm_size();
+
+#ifdef DEBUG
+ if (((u64)bootmap_base + bootm_size) >
+ (CFG_SYS_SDRAM_BASE + (u64)gd->ram_size))
+ puts("WARNING: bootm_low + bootm_size exceed total memory\n");
+ if ((bootmap_base + bootm_size) > get_effective_memsize())
+ puts("WARNING: bootm_low + bootm_size exceed eff. memory\n");
+#endif
+
+ size = min(bootm_size, get_effective_memsize());
+ size = min(size, (ulong)CFG_SYS_LINUX_LOWMEM_MAX_SIZE);
+
+ if (size < bootm_size) {
+ ulong base = bootmap_base + size;
+
+ printf("WARNING: adjusting available memory from 0x%lx to 0x%llx\n",
+ size, (unsigned long long)bootm_size);
+ lmb_reserve(base, bootm_size - size);
+ }
+
+#ifdef CONFIG_MP
+ cpu_mp_lmb_reserve();
+#endif
+ }
+
+ if (IS_ENABLED(CONFIG_FSL_CAAM)) {
+ struct udevice *dev;
+ int ret;
+
+ ret = uclass_get_device_by_driver(UCLASS_MISC,
+ DM_DRIVER_GET(caam_jr), &dev);
+ if (ret)
+ printf("Failed to initialize caam_jr: %d\n", ret);
+ }
+
+ return 0;
+}
diff --git a/arch/riscv/lib/bootm.c b/arch/riscv/lib/bootm.c
index 13cbaaba682..82502972eec 100644
--- a/arch/riscv/lib/bootm.c
+++ b/arch/riscv/lib/bootm.c
@@ -133,16 +133,3 @@ int do_bootm_vxworks(int flag, struct bootm_info *bmi)
{
return do_bootm_linux(flag, bmi);
}
-
-static ulong get_sp(void)
-{
- ulong ret;
-
- asm("mv %0, sp" : "=r"(ret) : );
- return ret;
-}
-
-void arch_lmb_reserve(struct lmb *lmb)
-{
- arch_lmb_reserve_generic(lmb, get_sp(), gd->ram_top, 4096);
-}
diff --git a/arch/sandbox/cpu/spl.c b/arch/sandbox/cpu/spl.c
index cc46965b73b..1c33a520c64 100644
--- a/arch/sandbox/cpu/spl.c
+++ b/arch/sandbox/cpu/spl.c
@@ -127,6 +127,15 @@ static int load_from_image(struct spl_image_info *spl_image,
}
SPL_LOAD_IMAGE_METHOD("sandbox_image", 7, BOOT_DEVICE_BOARD, load_from_image);
+int dram_init_banksize(void)
+{
+ /* These are necessary so TFTP can use LMBs to check its load address */
+ gd->bd->bi_dram[0].start = gd->ram_base;
+ gd->bd->bi_dram[0].size = get_effective_memsize();
+
+ return 0;
+}
+
void spl_board_init(void)
{
struct sandbox_state *state = state_get_current();
@@ -134,10 +143,6 @@ void spl_board_init(void)
if (!CONFIG_IS_ENABLED(UNIT_TEST))
return;
- /* These are necessary so TFTP can use LMBs to check its load address */
- gd->bd->bi_dram[0].start = gd->ram_base;
- gd->bd->bi_dram[0].size = get_effective_memsize();
-
if (state->run_unittests) {
struct unit_test *tests = UNIT_TEST_ALL_START();
const int count = UNIT_TEST_ALL_COUNT();
diff --git a/arch/sandbox/dts/test.dts b/arch/sandbox/dts/test.dts
index 5fb5eac862e..8412506c17a 100644
--- a/arch/sandbox/dts/test.dts
+++ b/arch/sandbox/dts/test.dts
@@ -78,7 +78,7 @@
event_log: tcg_event_log {
no-map;
- reg = <(CFG_SYS_SDRAM_SIZE - 0x2000) 0x2000>;
+ reg = <(CFG_SYS_SDRAM_BASE + 0x100000) 0x2000>;
};
};
diff --git a/arch/sandbox/include/asm/test.h b/arch/sandbox/include/asm/test.h
index 17159f8d674..0e8d19ce232 100644
--- a/arch/sandbox/include/asm/test.h
+++ b/arch/sandbox/include/asm/test.h
@@ -49,6 +49,10 @@ struct unit_test_state;
#define PCI_EA_BAR2_MAGIC 0x72727272
#define PCI_EA_BAR4_MAGIC 0x74747474
+/* Used by the sandbox iommu driver */
+#define SANDBOX_IOMMU_DVA_ADDR 0x89abc000
+#define SANDBOX_IOMMU_PAGE_SIZE SZ_4K
+
enum {
SANDBOX_IRQN_PEND = 1, /* Interrupt number for 'pending' test */
};
diff --git a/arch/sh/lib/bootm.c b/arch/sh/lib/bootm.c
index e298d766b52..bb0f59e0aa2 100644
--- a/arch/sh/lib/bootm.c
+++ b/arch/sh/lib/bootm.c
@@ -101,16 +101,3 @@ int do_bootm_linux(int flag, struct bootm_info *bmi)
/* does not return */
return 1;
}
-
-static ulong get_sp(void)
-{
- ulong ret;
-
- asm("mov r15, %0" : "=r"(ret) : );
- return ret;
-}
-
-void arch_lmb_reserve(struct lmb *lmb)
-{
- arch_lmb_reserve_generic(lmb, get_sp(), gd->ram_top, 4096);
-}
diff --git a/arch/x86/lib/bootm.c b/arch/x86/lib/bootm.c
index 2c889bcd33c..55f581836df 100644
--- a/arch/x86/lib/bootm.c
+++ b/arch/x86/lib/bootm.c
@@ -253,21 +253,3 @@ int do_bootm_linux(int flag, struct bootm_info *bmi)
return boot_jump_linux(images);
}
-
-static ulong get_sp(void)
-{
- ulong ret;
-
-#if CONFIG_IS_ENABLED(X86_64)
- asm("mov %%rsp, %0" : "=r"(ret) : );
-#else
- asm("mov %%esp, %0" : "=r"(ret) : );
-#endif
-
- return ret;
-}
-
-void arch_lmb_reserve(struct lmb *lmb)
-{
- arch_lmb_reserve_generic(lmb, get_sp(), gd->ram_top, 4096);
-}
diff --git a/arch/xtensa/lib/bootm.c b/arch/xtensa/lib/bootm.c
index 1de06b7fb53..2958f207397 100644
--- a/arch/xtensa/lib/bootm.c
+++ b/arch/xtensa/lib/bootm.c
@@ -197,16 +197,3 @@ int do_bootm_linux(int flag, struct bootm_info *bmi)
return 1;
}
-
-static ulong get_sp(void)
-{
- ulong ret;
-
- asm("mov %0, a1" : "=r"(ret) : );
- return ret;
-}
-
-void arch_lmb_reserve(struct lmb *lmb)
-{
- arch_lmb_reserve_generic(lmb, get_sp(), gd->ram_top, 4096);
-}
diff --git a/board/xilinx/common/board.c b/board/xilinx/common/board.c
index 0b43407b9e9..3440402ab46 100644
--- a/board/xilinx/common/board.c
+++ b/board/xilinx/common/board.c
@@ -12,7 +12,6 @@
#include <image.h>
#include <init.h>
#include <jffs2/load_kernel.h>
-#include <lmb.h>
#include <log.h>
#include <asm/global_data.h>
#include <asm/sections.h>
@@ -665,38 +664,6 @@ int embedded_dtb_select(void)
}
#endif
-#if defined(CONFIG_LMB)
-
-#ifndef MMU_SECTION_SIZE
-#define MMU_SECTION_SIZE (1 * 1024 * 1024)
-#endif
-
-phys_addr_t board_get_usable_ram_top(phys_size_t total_size)
-{
- phys_size_t size;
- phys_addr_t reg;
- struct lmb lmb;
-
- if (!total_size)
- return gd->ram_top;
-
- if (!IS_ALIGNED((ulong)gd->fdt_blob, 0x8))
- panic("Not 64bit aligned DT location: %p\n", gd->fdt_blob);
-
- /* found enough not-reserved memory to relocated U-Boot */
- lmb_init(&lmb);
- lmb_add(&lmb, gd->ram_base, gd->ram_size);
- boot_fdt_add_mem_rsv_regions(&lmb, (void *)gd->fdt_blob);
- size = ALIGN(CONFIG_SYS_MALLOC_LEN + total_size, MMU_SECTION_SIZE);
- reg = lmb_alloc(&lmb, size, MMU_SECTION_SIZE);
-
- if (!reg)
- reg = gd->ram_top - size;
-
- return reg + size;
-}
-#endif
-
#ifdef CONFIG_OF_BOARD_SETUP
#define MAX_RAND_SIZE 8
int ft_board_setup(void *blob, struct bd_info *bd)
diff --git a/boot/bootm.c b/boot/bootm.c
index 480f8e6a0e6..a61bbcfb45c 100644
--- a/boot/bootm.c
+++ b/boot/bootm.c
@@ -239,30 +239,11 @@ static int boot_get_kernel(const char *addr_fit, struct bootm_headers *images,
return 0;
}
-#ifdef CONFIG_LMB
-static void boot_start_lmb(struct bootm_headers *images)
-{
- phys_addr_t mem_start;
- phys_size_t mem_size;
-
- mem_start = env_get_bootm_low();
- mem_size = env_get_bootm_size();
-
- lmb_init_and_reserve_range(&images->lmb, mem_start,
- mem_size, NULL);
-}
-#else
-#define lmb_reserve(lmb, base, size)
-static inline void boot_start_lmb(struct bootm_headers *images) { }
-#endif
-
static int bootm_start(void)
{
memset((void *)&images, 0, sizeof(images));
images.verify = env_get_yesno("verify");
- boot_start_lmb(&images);
-
bootstage_mark_name(BOOTSTAGE_ID_BOOTM_START, "bootm_start");
images.state = BOOTM_STATE_START;
@@ -640,7 +621,7 @@ static int bootm_load_os(struct bootm_headers *images, int boot_progress)
if (os.type == IH_TYPE_KERNEL_NOLOAD && os.comp != IH_COMP_NONE) {
ulong req_size = ALIGN(image_len * 4, SZ_1M);
- load = lmb_alloc(&images->lmb, req_size, SZ_2M);
+ load = lmb_alloc(req_size, SZ_2M);
if (!load)
return 1;
os.load = load;
@@ -714,8 +695,9 @@ static int bootm_load_os(struct bootm_headers *images, int boot_progress)
images->os.end = relocated_addr + image_size;
}
- lmb_reserve(&images->lmb, images->os.load, (load_end -
- images->os.load));
+ if (CONFIG_IS_ENABLED(LMB))
+ lmb_reserve(images->os.load, (load_end - images->os.load));
+
return 0;
}
@@ -1029,19 +1011,19 @@ int bootm_run_states(struct bootm_info *bmi, int states)
if (!ret && (states & BOOTM_STATE_RAMDISK)) {
ulong rd_len = images->rd_end - images->rd_start;
- ret = boot_ramdisk_high(&images->lmb, images->rd_start,
- rd_len, &images->initrd_start, &images->initrd_end);
+ ret = boot_ramdisk_high(images->rd_start, rd_len,
+ &images->initrd_start,
+ &images->initrd_end);
if (!ret) {
env_set_hex("initrd_start", images->initrd_start);
env_set_hex("initrd_end", images->initrd_end);
}
}
#endif
-#if CONFIG_IS_ENABLED(OF_LIBFDT) && defined(CONFIG_LMB)
+#if CONFIG_IS_ENABLED(OF_LIBFDT) && CONFIG_IS_ENABLED(LMB)
if (!ret && (states & BOOTM_STATE_FDT)) {
- boot_fdt_add_mem_rsv_regions(&images->lmb, images->ft_addr);
- ret = boot_relocate_fdt(&images->lmb, &images->ft_addr,
- &images->ft_len);
+ boot_fdt_add_mem_rsv_regions(images->ft_addr);
+ ret = boot_relocate_fdt(&images->ft_addr, &images->ft_len);
}
#endif
diff --git a/boot/bootm_os.c b/boot/bootm_os.c
index 6a6621706f7..e9522cd3299 100644
--- a/boot/bootm_os.c
+++ b/boot/bootm_os.c
@@ -260,12 +260,11 @@ static void do_bootvx_fdt(struct bootm_headers *images)
char *bootline;
ulong of_size = images->ft_len;
char **of_flat_tree = &images->ft_addr;
- struct lmb *lmb = &images->lmb;
if (*of_flat_tree) {
- boot_fdt_add_mem_rsv_regions(lmb, *of_flat_tree);
+ boot_fdt_add_mem_rsv_regions(*of_flat_tree);
- ret = boot_relocate_fdt(lmb, of_flat_tree, &of_size);
+ ret = boot_relocate_fdt(of_flat_tree, &of_size);
if (ret)
return;
diff --git a/boot/image-board.c b/boot/image-board.c
index eca1b1d2bff..1757e5816d8 100644
--- a/boot/image-board.c
+++ b/boot/image-board.c
@@ -517,7 +517,6 @@ int boot_get_ramdisk(char const *select, struct bootm_headers *images,
/**
* boot_ramdisk_high - relocate init ramdisk
- * @lmb: pointer to lmb handle, will be used for memory mgmt
* @rd_data: ramdisk data start address
* @rd_len: ramdisk data length
* @initrd_start: pointer to a ulong variable, will hold final init ramdisk
@@ -536,8 +535,8 @@ int boot_get_ramdisk(char const *select, struct bootm_headers *images,
* 0 - success
* -1 - failure
*/
-int boot_ramdisk_high(struct lmb *lmb, ulong rd_data, ulong rd_len,
- ulong *initrd_start, ulong *initrd_end)
+int boot_ramdisk_high(ulong rd_data, ulong rd_len, ulong *initrd_start,
+ ulong *initrd_end)
{
char *s;
phys_addr_t initrd_high;
@@ -563,13 +562,14 @@ int boot_ramdisk_high(struct lmb *lmb, ulong rd_data, ulong rd_len,
debug(" in-place initrd\n");
*initrd_start = rd_data;
*initrd_end = rd_data + rd_len;
- lmb_reserve(lmb, rd_data, rd_len);
+ lmb_reserve(rd_data, rd_len);
} else {
if (initrd_high)
- *initrd_start = (ulong)lmb_alloc_base(lmb,
- rd_len, 0x1000, initrd_high);
+ *initrd_start = (ulong)lmb_alloc_base(rd_len,
+ 0x1000,
+ initrd_high);
else
- *initrd_start = (ulong)lmb_alloc(lmb, rd_len,
+ *initrd_start = (ulong)lmb_alloc(rd_len,
0x1000);
if (*initrd_start == 0) {
@@ -802,7 +802,6 @@ int boot_get_loadable(struct bootm_headers *images)
/**
* boot_get_cmdline - allocate and initialize kernel cmdline
- * @lmb: pointer to lmb handle, will be used for memory mgmt
* @cmd_start: pointer to a ulong variable, will hold cmdline start
* @cmd_end: pointer to a ulong variable, will hold cmdline end
*
@@ -815,7 +814,7 @@ int boot_get_loadable(struct bootm_headers *images)
* 0 - success
* -1 - failure
*/
-int boot_get_cmdline(struct lmb *lmb, ulong *cmd_start, ulong *cmd_end)
+int boot_get_cmdline(ulong *cmd_start, ulong *cmd_end)
{
int barg;
char *cmdline;
@@ -829,7 +828,7 @@ int boot_get_cmdline(struct lmb *lmb, ulong *cmd_start, ulong *cmd_end)
return 0;
barg = IF_ENABLED_INT(CONFIG_SYS_BOOT_GET_CMDLINE, CONFIG_SYS_BARGSIZE);
- cmdline = (char *)(ulong)lmb_alloc_base(lmb, barg, 0xf,
+ cmdline = (char *)(ulong)lmb_alloc_base(barg, 0xf,
env_get_bootm_mapsize() + env_get_bootm_low());
if (!cmdline)
return -1;
@@ -850,7 +849,6 @@ int boot_get_cmdline(struct lmb *lmb, ulong *cmd_start, ulong *cmd_end)
/**
* boot_get_kbd - allocate and initialize kernel copy of board info
- * @lmb: pointer to lmb handle, will be used for memory mgmt
* @kbd: double pointer to board info data
*
* boot_get_kbd() allocates space for kernel copy of board info data below
@@ -861,10 +859,9 @@ int boot_get_cmdline(struct lmb *lmb, ulong *cmd_start, ulong *cmd_end)
* 0 - success
* -1 - failure
*/
-int boot_get_kbd(struct lmb *lmb, struct bd_info **kbd)
+int boot_get_kbd(struct bd_info **kbd)
{
- *kbd = (struct bd_info *)(ulong)lmb_alloc_base(lmb,
- sizeof(struct bd_info),
+ *kbd = (struct bd_info *)(ulong)lmb_alloc_base(sizeof(struct bd_info),
0xf,
env_get_bootm_mapsize() +
env_get_bootm_low());
@@ -885,17 +882,16 @@ int image_setup_linux(struct bootm_headers *images)
{
ulong of_size = images->ft_len;
char **of_flat_tree = &images->ft_addr;
- struct lmb *lmb = images_lmb(images);
int ret;
/* This function cannot be called without lmb support */
- if (!IS_ENABLED(CONFIG_LMB))
+ if (!CONFIG_IS_ENABLED(LMB))
return -EFAULT;
if (CONFIG_IS_ENABLED(OF_LIBFDT))
- boot_fdt_add_mem_rsv_regions(lmb, *of_flat_tree);
+ boot_fdt_add_mem_rsv_regions(*of_flat_tree);
if (IS_ENABLED(CONFIG_SYS_BOOT_GET_CMDLINE)) {
- ret = boot_get_cmdline(lmb, &images->cmdline_start,
+ ret = boot_get_cmdline(&images->cmdline_start,
&images->cmdline_end);
if (ret) {
puts("ERROR with allocation of cmdline\n");
@@ -904,13 +900,13 @@ int image_setup_linux(struct bootm_headers *images)
}
if (CONFIG_IS_ENABLED(OF_LIBFDT)) {
- ret = boot_relocate_fdt(lmb, of_flat_tree, &of_size);
+ ret = boot_relocate_fdt(of_flat_tree, &of_size);
if (ret)
return ret;
}
if (CONFIG_IS_ENABLED(OF_LIBFDT) && of_size) {
- ret = image_setup_libfdt(images, *of_flat_tree, lmb);
+ ret = image_setup_libfdt(images, *of_flat_tree, true);
if (ret)
return ret;
}
diff --git a/boot/image-fdt.c b/boot/image-fdt.c
index 8332792b8e8..8eda521693d 100644
--- a/boot/image-fdt.c
+++ b/boot/image-fdt.c
@@ -68,12 +68,11 @@ static const struct legacy_img_hdr *image_get_fdt(ulong fdt_addr)
}
#endif
-static void boot_fdt_reserve_region(struct lmb *lmb, uint64_t addr,
- uint64_t size, enum lmb_flags flags)
+static void boot_fdt_reserve_region(u64 addr, u64 size, enum lmb_flags flags)
{
long ret;
- ret = lmb_reserve_flags(lmb, addr, size, flags);
+ ret = lmb_reserve_flags(addr, size, flags);
if (ret >= 0) {
debug(" reserving fdt memory region: addr=%llx size=%llx flags=%x\n",
(unsigned long long)addr,
@@ -89,14 +88,13 @@ static void boot_fdt_reserve_region(struct lmb *lmb, uint64_t addr,
/**
* boot_fdt_add_mem_rsv_regions - Mark the memreserve and reserved-memory
* sections as unusable
- * @lmb: pointer to lmb handle, will be used for memory mgmt
* @fdt_blob: pointer to fdt blob base address
*
* Adds the and reserved-memorymemreserve regions in the dtb to the lmb block.
* Adding the memreserve regions prevents u-boot from using them to store the
* initrd or the fdt blob.
*/
-void boot_fdt_add_mem_rsv_regions(struct lmb *lmb, void *fdt_blob)
+void boot_fdt_add_mem_rsv_regions(void *fdt_blob)
{
uint64_t addr, size;
int i, total, ret;
@@ -112,7 +110,7 @@ void boot_fdt_add_mem_rsv_regions(struct lmb *lmb, void *fdt_blob)
for (i = 0; i < total; i++) {
if (fdt_get_mem_rsv(fdt_blob, i, &addr, &size) != 0)
continue;
- boot_fdt_reserve_region(lmb, addr, size, LMB_NONE);
+ boot_fdt_reserve_region(addr, size, LMB_NONE);
}
/* process reserved-memory */
@@ -130,7 +128,7 @@ void boot_fdt_add_mem_rsv_regions(struct lmb *lmb, void *fdt_blob)
flags = LMB_NOMAP;
addr = res.start;
size = res.end - res.start + 1;
- boot_fdt_reserve_region(lmb, addr, size, flags);
+ boot_fdt_reserve_region(addr, size, flags);
}
subnode = fdt_next_subnode(fdt_blob, subnode);
@@ -140,7 +138,6 @@ void boot_fdt_add_mem_rsv_regions(struct lmb *lmb, void *fdt_blob)
/**
* boot_relocate_fdt - relocate flat device tree
- * @lmb: pointer to lmb handle, will be used for memory mgmt
* @of_flat_tree: pointer to a char* variable, will hold fdt start address
* @of_size: pointer to a ulong variable, will hold fdt length
*
@@ -155,7 +152,7 @@ void boot_fdt_add_mem_rsv_regions(struct lmb *lmb, void *fdt_blob)
* 0 - success
* 1 - failure
*/
-int boot_relocate_fdt(struct lmb *lmb, char **of_flat_tree, ulong *of_size)
+int boot_relocate_fdt(char **of_flat_tree, ulong *of_size)
{
u64 start, size, usable, addr, low, mapsize;
void *fdt_blob = *of_flat_tree;
@@ -187,18 +184,17 @@ int boot_relocate_fdt(struct lmb *lmb, char **of_flat_tree, ulong *of_size)
if (desired_addr == ~0UL) {
/* All ones means use fdt in place */
of_start = fdt_blob;
- lmb_reserve(lmb, map_to_sysmem(of_start), of_len);
+ lmb_reserve(map_to_sysmem(of_start), of_len);
disable_relocation = 1;
} else if (desired_addr) {
- addr = lmb_alloc_base(lmb, of_len, 0x1000,
- desired_addr);
+ addr = lmb_alloc_base(of_len, 0x1000, desired_addr);
of_start = map_sysmem(addr, of_len);
if (of_start == NULL) {
puts("Failed using fdt_high value for Device Tree");
goto error;
}
} else {
- addr = lmb_alloc(lmb, of_len, 0x1000);
+ addr = lmb_alloc(of_len, 0x1000);
of_start = map_sysmem(addr, of_len);
}
} else {
@@ -220,7 +216,7 @@ int boot_relocate_fdt(struct lmb *lmb, char **of_flat_tree, ulong *of_size)
* for LMB allocation.
*/
usable = min(start + size, low + mapsize);
- addr = lmb_alloc_base(lmb, of_len, 0x1000, usable);
+ addr = lmb_alloc_base(of_len, 0x1000, usable);
of_start = map_sysmem(addr, of_len);
/* Allocation succeeded, use this block. */
if (of_start != NULL)
@@ -569,8 +565,7 @@ __weak int arch_fixup_fdt(void *blob)
return 0;
}
-int image_setup_libfdt(struct bootm_headers *images, void *blob,
- struct lmb *lmb)
+int image_setup_libfdt(struct bootm_headers *images, void *blob, bool lmb)
{
ulong *initrd_start = &images->initrd_start;
ulong *initrd_end = &images->initrd_end;
@@ -670,8 +665,8 @@ int image_setup_libfdt(struct bootm_headers *images, void *blob,
}
/* Delete the old LMB reservation */
- if (lmb)
- lmb_free(lmb, map_to_sysmem(blob), fdt_totalsize(blob));
+ if (CONFIG_IS_ENABLED(LMB) && lmb)
+ lmb_free(map_to_sysmem(blob), fdt_totalsize(blob));
ret = fdt_shrink_to_minimum(blob, 0);
if (ret < 0)
@@ -679,8 +674,8 @@ int image_setup_libfdt(struct bootm_headers *images, void *blob,
of_size = ret;
/* Create a new LMB reservation */
- if (lmb)
- lmb_reserve(lmb, map_to_sysmem(blob), of_size);
+ if (CONFIG_IS_ENABLED(LMB) && lmb)
+ lmb_reserve(map_to_sysmem(blob), of_size);
#if defined(CONFIG_ARCH_KEYSTONE)
if (IS_ENABLED(CONFIG_OF_BOARD_SETUP))
diff --git a/cmd/bdinfo.c b/cmd/bdinfo.c
index 59fbaea498b..f6e534dd5bb 100644
--- a/cmd/bdinfo.c
+++ b/cmd/bdinfo.c
@@ -160,10 +160,7 @@ static int bdinfo_print_all(struct bd_info *bd)
bdinfo_print_num_l("multi_dtb_fit", (ulong)gd->multi_dtb_fit);
#endif
if (IS_ENABLED(CONFIG_LMB) && gd->fdt_blob) {
- struct lmb lmb;
-
- lmb_init_and_reserve(&lmb, gd->bd, (void *)gd->fdt_blob);
- lmb_dump_all_force(&lmb);
+ lmb_dump_all_force();
if (IS_ENABLED(CONFIG_OF_REAL))
printf("devicetree = %s\n", fdtdec_get_srcname());
}
diff --git a/cmd/booti.c b/cmd/booti.c
index 62b19e83436..6018cbacf0a 100644
--- a/cmd/booti.c
+++ b/cmd/booti.c
@@ -87,7 +87,7 @@ static int booti_start(struct bootm_info *bmi)
images->os.start = relocated_addr;
images->os.end = relocated_addr + image_size;
- lmb_reserve(&images->lmb, images->ep, le32_to_cpu(image_size));
+ lmb_reserve(images->ep, le32_to_cpu(image_size));
/*
* Handle the BOOTM_STATE_FINDOTHER state ourselves as we do not
diff --git a/cmd/bootz.c b/cmd/bootz.c
index 55837a7599b..787203f5bd7 100644
--- a/cmd/bootz.c
+++ b/cmd/bootz.c
@@ -56,7 +56,7 @@ static int bootz_start(struct cmd_tbl *cmdtp, int flag, int argc,
if (ret != 0)
return 1;
- lmb_reserve(&images->lmb, images->ep, zi_end - zi_start);
+ lmb_reserve(images->ep, zi_end - zi_start);
/*
* Handle the BOOTM_STATE_FINDOTHER state ourselves as we do not
diff --git a/cmd/elf.c b/cmd/elf.c
index 673c6c30511..f07e344a596 100644
--- a/cmd/elf.c
+++ b/cmd/elf.c
@@ -70,7 +70,7 @@ int do_bootelf(struct cmd_tbl *cmdtp, int flag, int argc, char *const argv[])
fdt_set_totalsize((void *)fdt_addr,
fdt_totalsize(fdt_addr) + CONFIG_SYS_FDT_PAD);
- if (image_setup_libfdt(&img, (void *)fdt_addr, NULL))
+ if (image_setup_libfdt(&img, (void *)fdt_addr, false))
return 1;
}
#endif
diff --git a/cmd/load.c b/cmd/load.c
index d773a25d70c..20d802502ae 100644
--- a/cmd/load.c
+++ b/cmd/load.c
@@ -141,7 +141,6 @@ static int do_load_serial(struct cmd_tbl *cmdtp, int flag, int argc,
static ulong load_serial(long offset)
{
- struct lmb lmb;
char record[SREC_MAXRECLEN + 1]; /* buffer for one S-Record */
char binbuf[SREC_MAXBINLEN]; /* buffer for binary data */
int binlen; /* no. of data bytes in S-Rec. */
@@ -154,8 +153,6 @@ static ulong load_serial(long offset)
int line_count = 0;
long ret;
- lmb_init_and_reserve(&lmb, gd->bd, (void *)gd->fdt_blob);
-
while (read_record(record, SREC_MAXRECLEN + 1) >= 0) {
type = srec_decode(record, &binlen, &addr, binbuf);
@@ -182,7 +179,7 @@ static ulong load_serial(long offset)
{
void *dst;
- ret = lmb_reserve(&lmb, store_addr, binlen);
+ ret = lmb_reserve(store_addr, binlen);
if (ret) {
printf("\nCannot overwrite reserved area (%08lx..%08lx)\n",
store_addr, store_addr + binlen);
@@ -191,7 +188,7 @@ static ulong load_serial(long offset)
dst = map_sysmem(store_addr, binlen);
memcpy(dst, binbuf, binlen);
unmap_sysmem(dst);
- lmb_free(&lmb, store_addr, binlen);
+ lmb_free(store_addr, binlen);
}
if ((store_addr) < start_addr)
start_addr = store_addr;
diff --git a/common/board_r.c b/common/board_r.c
index e88b7ea4d8a..4faaa202421 100644
--- a/common/board_r.c
+++ b/common/board_r.c
@@ -22,6 +22,7 @@
#include <hang.h>
#include <image.h>
#include <irq_func.h>
+#include <lmb.h>
#include <log.h>
#include <net.h>
#include <asm/cache.h>
@@ -510,6 +511,14 @@ int initr_mem(void)
}
#endif
+static int initr_lmb(void)
+{
+ if (CONFIG_IS_ENABLED(LMB))
+ return lmb_init();
+ else
+ return 0;
+}
+
static int dm_announce(void)
{
int device_count;
@@ -612,6 +621,7 @@ static init_fnc_t init_sequence_r[] = {
#ifdef CONFIG_CLOCKS
set_cpu_clk_info, /* Setup clock information */
#endif
+ initr_lmb,
#ifdef CONFIG_EFI_LOADER
efi_memory_init,
#endif
diff --git a/common/spl/spl.c b/common/spl/spl.c
index 5886d2c5121..c13b2b8f714 100644
--- a/common/spl/spl.c
+++ b/common/spl/spl.c
@@ -711,9 +711,6 @@ void board_init_r(gd_t *dummy1, ulong dummy2)
if (CONFIG_IS_ENABLED(SOC_INIT))
spl_soc_init();
- if (CONFIG_IS_ENABLED(BOARD_INIT))
- spl_board_init();
-
if (IS_ENABLED(CONFIG_SPL_WATCHDOG) && CONFIG_IS_ENABLED(WDT))
initr_watchdog();
@@ -721,6 +718,9 @@ void board_init_r(gd_t *dummy1, ulong dummy2)
IS_ENABLED(CONFIG_SPL_ATF) || IS_ENABLED(CONFIG_SPL_NET))
dram_init_banksize();
+ if (IS_ENABLED(CONFIG_SPL_LMB))
+ lmb_init();
+
if (CONFIG_IS_ENABLED(PCI) && !(gd->flags & GD_FLG_DM_DEAD)) {
ret = pci_init();
if (ret)
@@ -728,6 +728,9 @@ void board_init_r(gd_t *dummy1, ulong dummy2)
/* Don't fail. We still can try other boot methods. */
}
+ if (CONFIG_IS_ENABLED(BOARD_INIT))
+ spl_board_init();
+
bootcount_inc();
/* Dump driver model states to aid analysis */
diff --git a/configs/a3y17lte_defconfig b/configs/a3y17lte_defconfig
index 5c15d51fdc3..b012b985a30 100644
--- a/configs/a3y17lte_defconfig
+++ b/configs/a3y17lte_defconfig
@@ -23,4 +23,3 @@ CONFIG_HUSH_PARSER=y
CONFIG_CMD_GPIO=y
CONFIG_CMD_I2C=y
CONFIG_DM_I2C_GPIO=y
-CONFIG_LMB_MAX_REGIONS=64
diff --git a/configs/a5y17lte_defconfig b/configs/a5y17lte_defconfig
index 7c9b6b25117..25a7d5bc984 100644
--- a/configs/a5y17lte_defconfig
+++ b/configs/a5y17lte_defconfig
@@ -23,4 +23,3 @@ CONFIG_HUSH_PARSER=y
CONFIG_CMD_GPIO=y
CONFIG_CMD_I2C=y
CONFIG_DM_I2C_GPIO=y
-CONFIG_LMB_MAX_REGIONS=64
diff --git a/configs/a7y17lte_defconfig b/configs/a7y17lte_defconfig
index c7297f7d75c..c87379ab391 100644
--- a/configs/a7y17lte_defconfig
+++ b/configs/a7y17lte_defconfig
@@ -23,4 +23,3 @@ CONFIG_HUSH_PARSER=y
CONFIG_CMD_GPIO=y
CONFIG_CMD_I2C=y
CONFIG_DM_I2C_GPIO=y
-CONFIG_LMB_MAX_REGIONS=64
diff --git a/configs/apple_m1_defconfig b/configs/apple_m1_defconfig
index 20d2cff93f7..dca6e0ca8ba 100644
--- a/configs/apple_m1_defconfig
+++ b/configs/apple_m1_defconfig
@@ -26,4 +26,3 @@ CONFIG_SYS_WHITE_ON_BLACK=y
CONFIG_NO_FB_CLEAR=y
CONFIG_VIDEO_SIMPLE=y
# CONFIG_SMBIOS is not set
-CONFIG_LMB_MAX_REGIONS=64
diff --git a/configs/mt7981_emmc_rfb_defconfig b/configs/mt7981_emmc_rfb_defconfig
index 76ee2aa2d66..d3e833905f3 100644
--- a/configs/mt7981_emmc_rfb_defconfig
+++ b/configs/mt7981_emmc_rfb_defconfig
@@ -62,4 +62,3 @@ CONFIG_MTK_SERIAL=y
CONFIG_FAT_WRITE=y
CONFIG_HEXDUMP=y
# CONFIG_EFI_LOADER is not set
-CONFIG_LMB_MAX_REGIONS=64
diff --git a/configs/mt7981_rfb_defconfig b/configs/mt7981_rfb_defconfig
index 3989c79d2b1..4bc2173f13d 100644
--- a/configs/mt7981_rfb_defconfig
+++ b/configs/mt7981_rfb_defconfig
@@ -65,4 +65,3 @@ CONFIG_DM_SPI=y
CONFIG_MTK_SPIM=y
CONFIG_HEXDUMP=y
# CONFIG_EFI_LOADER is not set
-CONFIG_LMB_MAX_REGIONS=64
diff --git a/configs/mt7981_sd_rfb_defconfig b/configs/mt7981_sd_rfb_defconfig
index 9b332455279..8721b4074ac 100644
--- a/configs/mt7981_sd_rfb_defconfig
+++ b/configs/mt7981_sd_rfb_defconfig
@@ -62,4 +62,3 @@ CONFIG_MTK_SERIAL=y
CONFIG_FAT_WRITE=y
CONFIG_HEXDUMP=y
# CONFIG_EFI_LOADER is not set
-CONFIG_LMB_MAX_REGIONS=64
diff --git a/configs/mt7986_rfb_defconfig b/configs/mt7986_rfb_defconfig
index 4d0cc85d0e5..15c31de236f 100644
--- a/configs/mt7986_rfb_defconfig
+++ b/configs/mt7986_rfb_defconfig
@@ -65,4 +65,3 @@ CONFIG_DM_SPI=y
CONFIG_MTK_SPIM=y
CONFIG_HEXDUMP=y
# CONFIG_EFI_LOADER is not set
-CONFIG_LMB_MAX_REGIONS=64
diff --git a/configs/mt7986a_bpir3_emmc_defconfig b/configs/mt7986a_bpir3_emmc_defconfig
index 3c296ab803e..56921f36057 100644
--- a/configs/mt7986a_bpir3_emmc_defconfig
+++ b/configs/mt7986a_bpir3_emmc_defconfig
@@ -62,4 +62,3 @@ CONFIG_MTK_SERIAL=y
CONFIG_FAT_WRITE=y
CONFIG_HEXDUMP=y
# CONFIG_EFI_LOADER is not set
-CONFIG_LMB_MAX_REGIONS=64
diff --git a/configs/mt7986a_bpir3_sd_defconfig b/configs/mt7986a_bpir3_sd_defconfig
index f644070f4e1..4ed06b72d52 100644
--- a/configs/mt7986a_bpir3_sd_defconfig
+++ b/configs/mt7986a_bpir3_sd_defconfig
@@ -62,4 +62,3 @@ CONFIG_MTK_SERIAL=y
CONFIG_FAT_WRITE=y
CONFIG_HEXDUMP=y
# CONFIG_EFI_LOADER is not set
-CONFIG_LMB_MAX_REGIONS=64
diff --git a/configs/mt7988_rfb_defconfig b/configs/mt7988_rfb_defconfig
index d0ed2cc1c91..f7ceaceb303 100644
--- a/configs/mt7988_rfb_defconfig
+++ b/configs/mt7988_rfb_defconfig
@@ -81,4 +81,3 @@ CONFIG_MTK_SPIM=y
CONFIG_LZO=y
CONFIG_HEXDUMP=y
# CONFIG_EFI_LOADER is not set
-CONFIG_LMB_MAX_REGIONS=64
diff --git a/configs/mt7988_sd_rfb_defconfig b/configs/mt7988_sd_rfb_defconfig
index 5631eaa338f..808c8b90116 100644
--- a/configs/mt7988_sd_rfb_defconfig
+++ b/configs/mt7988_sd_rfb_defconfig
@@ -69,4 +69,3 @@ CONFIG_MTK_SPIM=y
CONFIG_LZO=y
CONFIG_HEXDUMP=y
# CONFIG_EFI_LOADER is not set
-CONFIG_LMB_MAX_REGIONS=64
diff --git a/configs/qcom_defconfig b/configs/qcom_defconfig
index 8852e83a52b..edbb624baf0 100644
--- a/configs/qcom_defconfig
+++ b/configs/qcom_defconfig
@@ -117,4 +117,3 @@ CONFIG_SYS_WHITE_ON_BLACK=y
CONFIG_NO_FB_CLEAR=y
CONFIG_VIDEO_SIMPLE=y
CONFIG_HEXDUMP=y
-CONFIG_LMB_MAX_REGIONS=64
diff --git a/configs/sandbox_noinst_defconfig b/configs/sandbox_noinst_defconfig
index bfd95e2398f..eb0f064ad28 100644
--- a/configs/sandbox_noinst_defconfig
+++ b/configs/sandbox_noinst_defconfig
@@ -286,3 +286,4 @@ CONFIG_UNIT_TEST=y
CONFIG_SPL_UNIT_TEST=y
CONFIG_UT_TIME=y
CONFIG_UT_DM=y
+CONFIG_SPL_LMB=y
diff --git a/configs/sandbox_spl_defconfig b/configs/sandbox_spl_defconfig
index f7b92dc8445..bc6a430fa35 100644
--- a/configs/sandbox_spl_defconfig
+++ b/configs/sandbox_spl_defconfig
@@ -1,4 +1,4 @@
-CONFIG_TEXT_BASE=0x200000
+CONFIG_TEXT_BASE=0x400000
CONFIG_SPL_GPIO=y
CONFIG_SPL_LIBCOMMON_SUPPORT=y
CONFIG_SPL_LIBGENERIC_SUPPORT=y
@@ -253,3 +253,4 @@ CONFIG_UNIT_TEST=y
CONFIG_SPL_UNIT_TEST=y
CONFIG_UT_TIME=y
CONFIG_UT_DM=y
+CONFIG_SPL_LMB=y
diff --git a/configs/stm32mp13_defconfig b/configs/stm32mp13_defconfig
index caaabf39ef3..9aa3560c7e3 100644
--- a/configs/stm32mp13_defconfig
+++ b/configs/stm32mp13_defconfig
@@ -103,6 +103,3 @@ CONFIG_USB_GADGET_VENDOR_NUM=0x0483
CONFIG_USB_GADGET_PRODUCT_NUM=0x5720
CONFIG_USB_GADGET_DWC2_OTG=y
CONFIG_ERRNO_STR=y
-# CONFIG_LMB_USE_MAX_REGIONS is not set
-CONFIG_LMB_MEMORY_REGIONS=2
-CONFIG_LMB_RESERVED_REGIONS=16
diff --git a/configs/stm32mp15_basic_defconfig b/configs/stm32mp15_basic_defconfig
index 2e22bf86000..806935f3892 100644
--- a/configs/stm32mp15_basic_defconfig
+++ b/configs/stm32mp15_basic_defconfig
@@ -190,6 +190,3 @@ CONFIG_WDT=y
CONFIG_WDT_STM32MP=y
# CONFIG_BINMAN_FDT is not set
CONFIG_ERRNO_STR=y
-# CONFIG_LMB_USE_MAX_REGIONS is not set
-CONFIG_LMB_MEMORY_REGIONS=2
-CONFIG_LMB_RESERVED_REGIONS=16
diff --git a/configs/stm32mp15_defconfig b/configs/stm32mp15_defconfig
index ffe7512650e..5f050ee0d05 100644
--- a/configs/stm32mp15_defconfig
+++ b/configs/stm32mp15_defconfig
@@ -166,6 +166,3 @@ CONFIG_WDT=y
CONFIG_WDT_STM32MP=y
# CONFIG_BINMAN_FDT is not set
CONFIG_ERRNO_STR=y
-# CONFIG_LMB_USE_MAX_REGIONS is not set
-CONFIG_LMB_MEMORY_REGIONS=2
-CONFIG_LMB_RESERVED_REGIONS=16
diff --git a/configs/stm32mp15_trusted_defconfig b/configs/stm32mp15_trusted_defconfig
index 74deaaba2e4..3c591d74af3 100644
--- a/configs/stm32mp15_trusted_defconfig
+++ b/configs/stm32mp15_trusted_defconfig
@@ -166,6 +166,3 @@ CONFIG_WDT=y
CONFIG_WDT_STM32MP=y
# CONFIG_BINMAN_FDT is not set
CONFIG_ERRNO_STR=y
-# CONFIG_LMB_USE_MAX_REGIONS is not set
-CONFIG_LMB_MEMORY_REGIONS=2
-CONFIG_LMB_RESERVED_REGIONS=16
diff --git a/configs/stm32mp25_defconfig b/configs/stm32mp25_defconfig
index 87038cc773a..f5623a19bb2 100644
--- a/configs/stm32mp25_defconfig
+++ b/configs/stm32mp25_defconfig
@@ -49,6 +49,3 @@ CONFIG_WDT_STM32MP=y
CONFIG_WDT_ARM_SMC=y
CONFIG_ERRNO_STR=y
# CONFIG_EFI_LOADER is not set
-# CONFIG_LMB_USE_MAX_REGIONS is not set
-CONFIG_LMB_MEMORY_REGIONS=2
-CONFIG_LMB_RESERVED_REGIONS=32
diff --git a/configs/th1520_lpi4a_defconfig b/configs/th1520_lpi4a_defconfig
index 49ff92f6de3..db80e33870b 100644
--- a/configs/th1520_lpi4a_defconfig
+++ b/configs/th1520_lpi4a_defconfig
@@ -79,4 +79,3 @@ CONFIG_BZIP2=y
CONFIG_ZSTD=y
CONFIG_LIB_RATIONAL=y
# CONFIG_EFI_LOADER is not set
-# CONFIG_LMB_USE_MAX_REGIONS is not set
diff --git a/doc/arch/sandbox/sandbox.rst b/doc/arch/sandbox/sandbox.rst
index 5f8db126657..49675517246 100644
--- a/doc/arch/sandbox/sandbox.rst
+++ b/doc/arch/sandbox/sandbox.rst
@@ -662,7 +662,8 @@ Addr Config Usage
b000 CONFIG_BLOBLIST_ADDR Blob list
10000 CFG_MALLOC_F_ADDR Early memory allocation
f0000 CONFIG_PRE_CON_BUF_ADDR Pre-console buffer
- 100000 CONFIG_TRACE_EARLY_ADDR Early trace buffer (if enabled). Also used
+ 100000 TCG Event log TCG Event Log
+ 200000 CONFIG_TRACE_EARLY_ADDR Early trace buffer (if enabled). Also used
as the SPL load buffer in spl_test_load().
- 200000 CONFIG_TEXT_BASE Load buffer for U-Boot (sandbox_spl only)
+ 400000 CONFIG_TEXT_BASE Load buffer for U-Boot (sandbox_spl only)
======= ======================== ===============================
diff --git a/drivers/iommu/apple_dart.c b/drivers/iommu/apple_dart.c
index 9327dea1e3b..611ac7cd6de 100644
--- a/drivers/iommu/apple_dart.c
+++ b/drivers/iommu/apple_dart.c
@@ -70,7 +70,6 @@
struct apple_dart_priv {
void *base;
- struct lmb lmb;
u64 *l1, *l2;
int bypass, shift;
@@ -124,7 +123,7 @@ static dma_addr_t apple_dart_map(struct udevice *dev, void *addr, size_t size)
off = (phys_addr_t)addr - paddr;
psize = ALIGN(size + off, DART_PAGE_SIZE);
- dva = lmb_alloc(&priv->lmb, psize, DART_PAGE_SIZE);
+ dva = lmb_alloc(psize, DART_PAGE_SIZE);
idx = dva / DART_PAGE_SIZE;
for (i = 0; i < psize / DART_PAGE_SIZE; i++) {
@@ -160,7 +159,7 @@ static void apple_dart_unmap(struct udevice *dev, dma_addr_t addr, size_t size)
(unsigned long)&priv->l2[idx + i]);
priv->flush_tlb(priv);
- lmb_free(&priv->lmb, dva, psize);
+ lmb_free(dva, psize);
}
static struct iommu_ops apple_dart_ops = {
@@ -213,8 +212,7 @@ static int apple_dart_probe(struct udevice *dev)
priv->dvabase = DART_PAGE_SIZE;
priv->dvaend = SZ_4G - DART_PAGE_SIZE;
- lmb_init(&priv->lmb);
- lmb_add(&priv->lmb, priv->dvabase, priv->dvaend - priv->dvabase);
+ lmb_add(priv->dvabase, priv->dvaend - priv->dvabase);
/* Disable translations. */
for (sid = 0; sid < priv->nsid; sid++)
diff --git a/drivers/iommu/sandbox_iommu.c b/drivers/iommu/sandbox_iommu.c
index e37976f86f0..c5eefec2185 100644
--- a/drivers/iommu/sandbox_iommu.c
+++ b/drivers/iommu/sandbox_iommu.c
@@ -5,28 +5,20 @@
#include <dm.h>
#include <iommu.h>
-#include <lmb.h>
#include <asm/io.h>
+#include <asm/test.h>
#include <linux/sizes.h>
-#define IOMMU_PAGE_SIZE SZ_4K
-
-struct sandbox_iommu_priv {
- struct lmb lmb;
-};
-
static dma_addr_t sandbox_iommu_map(struct udevice *dev, void *addr,
size_t size)
{
- struct sandbox_iommu_priv *priv = dev_get_priv(dev);
phys_addr_t paddr, dva;
phys_size_t psize, off;
- paddr = ALIGN_DOWN(virt_to_phys(addr), IOMMU_PAGE_SIZE);
+ paddr = ALIGN_DOWN(virt_to_phys(addr), SANDBOX_IOMMU_PAGE_SIZE);
off = virt_to_phys(addr) - paddr;
- psize = ALIGN(size + off, IOMMU_PAGE_SIZE);
-
- dva = lmb_alloc(&priv->lmb, psize, IOMMU_PAGE_SIZE);
+ psize = ALIGN(size + off, SANDBOX_IOMMU_PAGE_SIZE);
+ dva = (phys_addr_t)SANDBOX_IOMMU_DVA_ADDR;
return dva + off;
}
@@ -34,15 +26,12 @@ static dma_addr_t sandbox_iommu_map(struct udevice *dev, void *addr,
static void sandbox_iommu_unmap(struct udevice *dev, dma_addr_t addr,
size_t size)
{
- struct sandbox_iommu_priv *priv = dev_get_priv(dev);
phys_addr_t dva;
phys_size_t psize;
- dva = ALIGN_DOWN(addr, IOMMU_PAGE_SIZE);
+ dva = ALIGN_DOWN(addr, SANDBOX_IOMMU_PAGE_SIZE);
psize = size + (addr - dva);
- psize = ALIGN(psize, IOMMU_PAGE_SIZE);
-
- lmb_free(&priv->lmb, dva, psize);
+ psize = ALIGN(psize, SANDBOX_IOMMU_PAGE_SIZE);
}
static struct iommu_ops sandbox_iommu_ops = {
@@ -50,16 +39,6 @@ static struct iommu_ops sandbox_iommu_ops = {
.unmap = sandbox_iommu_unmap,
};
-static int sandbox_iommu_probe(struct udevice *dev)
-{
- struct sandbox_iommu_priv *priv = dev_get_priv(dev);
-
- lmb_init(&priv->lmb);
- lmb_add(&priv->lmb, 0x89abc000, SZ_16K);
-
- return 0;
-}
-
static const struct udevice_id sandbox_iommu_ids[] = {
{ .compatible = "sandbox,iommu" },
{ /* sentinel */ }
@@ -69,7 +48,5 @@ U_BOOT_DRIVER(sandbox_iommu) = {
.name = "sandbox_iommu",
.id = UCLASS_IOMMU,
.of_match = sandbox_iommu_ids,
- .priv_auto = sizeof(struct sandbox_iommu_priv),
.ops = &sandbox_iommu_ops,
- .probe = sandbox_iommu_probe,
};
diff --git a/fs/fs.c b/fs/fs.c
index 0c47943f333..4bc28d1dffb 100644
--- a/fs/fs.c
+++ b/fs/fs.c
@@ -526,12 +526,11 @@ int fs_size(const char *filename, loff_t *size)
return ret;
}
-#ifdef CONFIG_LMB
+#if CONFIG_IS_ENABLED(LMB)
/* Check if a file may be read to the given address */
static int fs_read_lmb_check(const char *filename, ulong addr, loff_t offset,
loff_t len, struct fstype_info *info)
{
- struct lmb lmb;
int ret;
loff_t size;
loff_t read_len;
@@ -550,10 +549,9 @@ static int fs_read_lmb_check(const char *filename, ulong addr, loff_t offset,
if (len && len < read_len)
read_len = len;
- lmb_init_and_reserve(&lmb, gd->bd, (void *)gd->fdt_blob);
- lmb_dump_all(&lmb);
+ lmb_dump_all();
- if (lmb_alloc_addr(&lmb, addr, read_len) == addr)
+ if (lmb_alloc_addr(addr, read_len) == addr)
return 0;
log_err("** Reading file would overwrite reserved memory **\n");
@@ -568,7 +566,7 @@ static int _fs_read(const char *filename, ulong addr, loff_t offset, loff_t len,
void *buf;
int ret;
-#ifdef CONFIG_LMB
+#if CONFIG_IS_ENABLED(LMB)
if (do_lmb_check) {
ret = fs_read_lmb_check(filename, addr, offset, len, info);
if (ret)
diff --git a/include/alist.h b/include/alist.h
index 586a1efa5c8..68d268f01af 100644
--- a/include/alist.h
+++ b/include/alist.h
@@ -83,6 +83,17 @@ static inline bool alist_err(struct alist *lst)
}
/**
+ * alist_full() - Check if the alist is full
+ *
+ * @lst: List to check
+ * Return: true if full, false otherwise
+ */
+static inline bool alist_full(struct alist *lst)
+{
+ return lst->count == lst->alloc;
+}
+
+/**
* alist_get_ptr() - Get the value of a pointer
*
* @lst: alist to check
diff --git a/include/image.h b/include/image.h
index 2ab17075287..c52fced9b40 100644
--- a/include/image.h
+++ b/include/image.h
@@ -20,7 +20,6 @@
#include <stdbool.h>
/* Define this to avoid #ifdefs later on */
-struct lmb;
struct fdt_region;
#ifdef USE_HOSTCC
@@ -412,18 +411,8 @@ struct bootm_headers {
#define BOOTM_STATE_PRE_LOAD 0x00000800
#define BOOTM_STATE_MEASURE 0x00001000
int state;
-
-#if defined(CONFIG_LMB) && !defined(USE_HOSTCC)
- struct lmb lmb; /* for memory mgmt */
-#endif
};
-#ifdef CONFIG_LMB
-#define images_lmb(_images) (&(_images)->lmb)
-#else
-#define images_lmb(_images) NULL
-#endif
-
extern struct bootm_headers images;
/*
@@ -835,13 +824,13 @@ int boot_get_fdt(void *buf, const char *select, uint arch,
struct bootm_headers *images, char **of_flat_tree,
ulong *of_size);
-void boot_fdt_add_mem_rsv_regions(struct lmb *lmb, void *fdt_blob);
-int boot_relocate_fdt(struct lmb *lmb, char **of_flat_tree, ulong *of_size);
+void boot_fdt_add_mem_rsv_regions(void *fdt_blob);
+int boot_relocate_fdt(char **of_flat_tree, ulong *of_size);
-int boot_ramdisk_high(struct lmb *lmb, ulong rd_data, ulong rd_len,
- ulong *initrd_start, ulong *initrd_end);
-int boot_get_cmdline(struct lmb *lmb, ulong *cmd_start, ulong *cmd_end);
-int boot_get_kbd(struct lmb *lmb, struct bd_info **kbd);
+int boot_ramdisk_high(ulong rd_data, ulong rd_len, ulong *initrd_start,
+ ulong *initrd_end);
+int boot_get_cmdline(ulong *cmd_start, ulong *cmd_end);
+int boot_get_kbd(struct bd_info **kbd);
/*******************************************************************/
/* Legacy format specific code (prefixed with image_) */
@@ -1029,11 +1018,10 @@ int image_decomp(int comp, ulong load, ulong image_start, int type,
*
* @images: Images information
* @blob: FDT to update
- * @lmb: Points to logical memory block structure
+ * @lmb: Flag indicating use of lmb for reserving FDT memory region
* Return: 0 if ok, <0 on failure
*/
-int image_setup_libfdt(struct bootm_headers *images, void *blob,
- struct lmb *lmb);
+int image_setup_libfdt(struct bootm_headers *images, void *blob, bool lmb);
/**
* Set up the FDT to use for booting a kernel
diff --git a/include/lmb.h b/include/lmb.h
index 231b68b27d9..fc2daaa7bfc 100644
--- a/include/lmb.h
+++ b/include/lmb.h
@@ -3,8 +3,10 @@
#define _LINUX_LMB_H
#ifdef __KERNEL__
+#include <alist.h>
#include <asm/types.h>
#include <asm/u-boot.h>
+#include <linux/bitops.h>
/*
* Logical memory blocks.
@@ -18,115 +20,75 @@
* @LMB_NOMAP: don't add to mmu configuration
*/
enum lmb_flags {
- LMB_NONE = 0x0,
- LMB_NOMAP = 0x4,
+ LMB_NONE = 0,
+ LMB_NOMAP = BIT(1),
+ LMB_NOOVERWRITE = BIT(2),
};
/**
- * struct lmb_property - Description of one region.
+ * struct lmb_region - Description of one region.
*
* @base: Base address of the region.
* @size: Size of the region
* @flags: memory region attributes
*/
-struct lmb_property {
+struct lmb_region {
phys_addr_t base;
phys_size_t size;
enum lmb_flags flags;
};
-/*
- * For regions size management, see LMB configuration in KConfig
- * all the #if test are done with CONFIG_LMB_USE_MAX_REGIONS (boolean)
- *
- * case 1. CONFIG_LMB_USE_MAX_REGIONS is defined (legacy mode)
- * => CONFIG_LMB_MAX_REGIONS is used to configure the region size,
- * directly in the array lmb_region.region[], with the same
- * configuration for memory and reserved regions.
+/**
+ * struct lmb - The LMB structure
*
- * case 2. CONFIG_LMB_USE_MAX_REGIONS is not defined, the size of each
- * region is configurated *independently* with
- * => CONFIG_LMB_MEMORY_REGIONS: struct lmb.memory_regions
- * => CONFIG_LMB_RESERVED_REGIONS: struct lmb.reserved_regions
- * lmb_region.region is only a pointer to the correct buffer,
- * initialized in lmb_init(). This configuration is useful to manage
- * more reserved memory regions with CONFIG_LMB_RESERVED_REGIONS.
+ * @free_mem: List of free memory regions
+ * @used_mem: List of used/reserved memory regions
*/
+struct lmb {
+ struct alist free_mem;
+ struct alist used_mem;
+};
/**
- * struct lmb_region - Description of a set of region.
+ * lmb_init() - Initialise the LMB module
+ *
+ * Initialise the LMB lists needed for keeping the memory map. There
+ * are two lists, in form of alloced list data structure. One for the
+ * available memory, and one for the used memory. Initialise the two
+ * lists as part of board init. Add memory to the available memory
+ * list and reserve common areas by adding them to the used memory
+ * list.
*
- * @cnt: Number of regions.
- * @max: Size of the region array, max value of cnt.
- * @region: Array of the region properties
+ * Return: 0 on success, -ve on error
*/
-struct lmb_region {
- unsigned long cnt;
- unsigned long max;
-#if IS_ENABLED(CONFIG_LMB_USE_MAX_REGIONS)
- struct lmb_property region[CONFIG_LMB_MAX_REGIONS];
-#else
- struct lmb_property *region;
-#endif
-};
+int lmb_init(void);
/**
- * struct lmb - Logical memory block handle.
+ * lmb_add_memory() - Add memory range for LMB allocations
*
- * Clients provide storage for Logical memory block (lmb) handles.
- * The content of the structure is managed by the lmb library.
- * A lmb struct is initialized by lmb_init() functions.
- * The lmb struct is passed to all other lmb APIs.
+ * Add the entire available memory range to the pool of memory that
+ * can be used by the LMB module for allocations.
*
- * @memory: Description of memory regions.
- * @reserved: Description of reserved regions.
- * @memory_regions: Array of the memory regions (statically allocated)
- * @reserved_regions: Array of the reserved regions (statically allocated)
+ * Return: None
*/
-struct lmb {
- struct lmb_region memory;
- struct lmb_region reserved;
-#if !IS_ENABLED(CONFIG_LMB_USE_MAX_REGIONS)
- struct lmb_property memory_regions[CONFIG_LMB_MEMORY_REGIONS];
- struct lmb_property reserved_regions[CONFIG_LMB_RESERVED_REGIONS];
-#endif
-};
+void lmb_add_memory(void);
-void lmb_init(struct lmb *lmb);
-void lmb_init_and_reserve(struct lmb *lmb, struct bd_info *bd, void *fdt_blob);
-void lmb_init_and_reserve_range(struct lmb *lmb, phys_addr_t base,
- phys_size_t size, void *fdt_blob);
-long lmb_add(struct lmb *lmb, phys_addr_t base, phys_size_t size);
-long lmb_reserve(struct lmb *lmb, phys_addr_t base, phys_size_t size);
+long lmb_add(phys_addr_t base, phys_size_t size);
+long lmb_reserve(phys_addr_t base, phys_size_t size);
/**
* lmb_reserve_flags - Reserve one region with a specific flags bitfield.
*
- * @lmb: the logical memory block struct
* @base: base address of the memory region
* @size: size of the memory region
* @flags: flags for the memory region
* Return: 0 if OK, > 0 for coalesced region or a negative error code.
*/
-long lmb_reserve_flags(struct lmb *lmb, phys_addr_t base,
- phys_size_t size, enum lmb_flags flags);
-phys_addr_t lmb_alloc(struct lmb *lmb, phys_size_t size, ulong align);
-phys_addr_t lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align,
- phys_addr_t max_addr);
-phys_addr_t __lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align,
- phys_addr_t max_addr);
-phys_addr_t lmb_alloc_addr(struct lmb *lmb, phys_addr_t base, phys_size_t size);
-phys_size_t lmb_get_free_size(struct lmb *lmb, phys_addr_t addr);
-
-/**
- * lmb_is_reserved() - test if address is in reserved region
- *
- * The function checks if a reserved region comprising @addr exists.
- *
- * @lmb: the logical memory block struct
- * @addr: address to be tested
- * Return: 1 if reservation exists, 0 otherwise
- */
-int lmb_is_reserved(struct lmb *lmb, phys_addr_t addr);
+long lmb_reserve_flags(phys_addr_t base, phys_size_t size,
+ enum lmb_flags flags);
+phys_addr_t lmb_alloc(phys_size_t size, ulong align);
+phys_addr_t lmb_alloc_base(phys_size_t size, ulong align, phys_addr_t max_addr);
+phys_addr_t lmb_alloc_addr(phys_addr_t base, phys_size_t size);
+phys_size_t lmb_get_free_size(phys_addr_t addr);
/**
* lmb_is_reserved_flags() - test if address is in reserved region with flag bits set
@@ -134,21 +96,20 @@ int lmb_is_reserved(struct lmb *lmb, phys_addr_t addr);
* The function checks if a reserved region comprising @addr exists which has
* all flag bits set which are set in @flags.
*
- * @lmb: the logical memory block struct
* @addr: address to be tested
* @flags: bitmap with bits to be tested
* Return: 1 if matching reservation exists, 0 otherwise
*/
-int lmb_is_reserved_flags(struct lmb *lmb, phys_addr_t addr, int flags);
+int lmb_is_reserved_flags(phys_addr_t addr, int flags);
-long lmb_free(struct lmb *lmb, phys_addr_t base, phys_size_t size);
+long lmb_free(phys_addr_t base, phys_size_t size);
-void lmb_dump_all(struct lmb *lmb);
-void lmb_dump_all_force(struct lmb *lmb);
+void lmb_dump_all(void);
+void lmb_dump_all_force(void);
-void board_lmb_reserve(struct lmb *lmb);
-void arch_lmb_reserve(struct lmb *lmb);
-void arch_lmb_reserve_generic(struct lmb *lmb, ulong sp, ulong end, ulong align);
+struct lmb *lmb_get(void);
+int lmb_push(struct lmb *store);
+void lmb_pop(struct lmb *store);
#endif /* __KERNEL__ */
diff --git a/lib/Kconfig b/lib/Kconfig
index 2059219a120..5f282ecb543 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -403,7 +403,7 @@ config TRACE_EARLY_CALL_DEPTH_LIMIT
config TRACE_EARLY_ADDR
hex "Address of early trace buffer in U-Boot"
depends on TRACE_EARLY
- default 0x00100000
+ default 0x00200000
help
Sets the address of the early trace buffer in U-Boot. This memory
must be accessible before relocation.
@@ -1102,42 +1102,19 @@ config LMB
bool "Enable the logical memory blocks library (lmb)"
default y if ARC || ARM || M68K || MICROBLAZE || MIPS || \
NIOS2 || PPC || RISCV || SANDBOX || SH || X86 || XTENSA
+ select ARCH_MISC_INIT if PPC
help
- Support the library logical memory blocks.
-
-config LMB_USE_MAX_REGIONS
- bool "Use a common number of memory and reserved regions in lmb lib"
- default y
- help
- Define the number of supported memory regions in the library logical
- memory blocks.
- This feature allow to reduce the lmb library size by using compiler
- optimization when LMB_MEMORY_REGIONS == LMB_RESERVED_REGIONS.
-
-config LMB_MAX_REGIONS
- int "Number of memory and reserved regions in lmb lib"
- depends on LMB_USE_MAX_REGIONS
- default 16
- help
- Define the number of supported regions, memory and reserved, in the
- library logical memory blocks.
-
-config LMB_MEMORY_REGIONS
- int "Number of memory regions in lmb lib"
- depends on !LMB_USE_MAX_REGIONS
- default 8
- help
- Define the number of supported memory regions in the library logical
- memory blocks.
- The minimal value is CONFIG_NR_DRAM_BANKS.
+ Support the library logical memory blocks. This will require
+ a malloc() implementation for defining the data structures
+ needed for maintaining the LMB memory map.
-config LMB_RESERVED_REGIONS
- int "Number of reserved regions in lmb lib"
- depends on !LMB_USE_MAX_REGIONS
- default 8
+config SPL_LMB
+ bool "Enable LMB module for SPL"
+ depends on SPL && SPL_FRAMEWORK && SPL_SYS_MALLOC
help
- Define the number of supported reserved regions in the library logical
- memory blocks.
+ Enable support for Logical Memory Block library routines in
+ SPL. This will require a malloc() implementation for defining
+ the data structures needed for maintaining the LMB memory map.
config PHANDLE_CHECK_SEQ
bool "Enable phandle check while getting sequence number"
diff --git a/lib/Makefile b/lib/Makefile
index 81b503ab526..d300249f57c 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -118,7 +118,7 @@ obj-$(CONFIG_$(SPL_TPL_)OF_LIBFDT) += fdtdec.o fdtdec_common.o
obj-y += hang.o
obj-y += linux_compat.o
obj-y += linux_string.o
-obj-$(CONFIG_LMB) += lmb.o
+obj-$(CONFIG_$(SPL_TPL_)LMB) += lmb.o
obj-y += membuff.o
obj-$(CONFIG_REGEX) += slre.o
obj-y += string.o
diff --git a/lib/efi_loader/efi_dt_fixup.c b/lib/efi_loader/efi_dt_fixup.c
index 9886e6897cd..9d017804eea 100644
--- a/lib/efi_loader/efi_dt_fixup.c
+++ b/lib/efi_loader/efi_dt_fixup.c
@@ -172,7 +172,7 @@ efi_dt_fixup(struct efi_dt_fixup_protocol *this, void *dtb,
}
fdt_set_totalsize(dtb, *buffer_size);
- if (image_setup_libfdt(&img, dtb, NULL)) {
+ if (image_setup_libfdt(&img, dtb, false)) {
log_err("failed to process device tree\n");
ret = EFI_INVALID_PARAMETER;
goto out;
diff --git a/lib/efi_loader/efi_helper.c b/lib/efi_loader/efi_helper.c
index 916f43da26c..96f847652ec 100644
--- a/lib/efi_loader/efi_helper.c
+++ b/lib/efi_loader/efi_helper.c
@@ -514,7 +514,7 @@ efi_status_t efi_install_fdt(void *fdt)
return EFI_OUT_OF_RESOURCES;
}
- if (image_setup_libfdt(&img, fdt, NULL)) {
+ if (image_setup_libfdt(&img, fdt, false)) {
log_err("ERROR: failed to process device tree\n");
return EFI_LOAD_ERROR;
}
diff --git a/lib/lmb.c b/lib/lmb.c
index 44f98205310..3ed570fb29b 100644
--- a/lib/lmb.c
+++ b/lib/lmb.c
@@ -6,50 +6,72 @@
* Copyright (C) 2001 Peter Bergner.
*/
+#include <alist.h>
#include <efi_loader.h>
#include <image.h>
#include <mapmem.h>
#include <lmb.h>
#include <log.h>
#include <malloc.h>
+#include <spl.h>
#include <asm/global_data.h>
#include <asm/sections.h>
+#include <linux/kernel.h>
+#include <linux/sizes.h>
DECLARE_GLOBAL_DATA_PTR;
#define LMB_ALLOC_ANYWHERE 0
+#define LMB_ALIST_INITIAL_SIZE 4
-static void lmb_dump_region(struct lmb_region *rgn, char *name)
+static struct lmb lmb;
+
+static void lmb_print_region_flags(enum lmb_flags flags)
+{
+ u64 bitpos;
+ const char *flag_str[] = { "none", "no-map", "no-overwrite" };
+
+ do {
+ bitpos = flags ? fls(flags) - 1 : 0;
+ printf("%s", flag_str[bitpos]);
+ flags &= ~(1ull << bitpos);
+ puts(flags ? ", " : "\n");
+ } while (flags);
+}
+
+static void lmb_dump_region(struct alist *lmb_rgn_lst, char *name)
{
+ struct lmb_region *rgn = lmb_rgn_lst->data;
unsigned long long base, size, end;
enum lmb_flags flags;
int i;
- printf(" %s.cnt = 0x%lx / max = 0x%lx\n", name, rgn->cnt, rgn->max);
+ printf(" %s.count = 0x%x\n", name, lmb_rgn_lst->count);
- for (i = 0; i < rgn->cnt; i++) {
- base = rgn->region[i].base;
- size = rgn->region[i].size;
+ for (i = 0; i < lmb_rgn_lst->count; i++) {
+ base = rgn[i].base;
+ size = rgn[i].size;
end = base + size - 1;
- flags = rgn->region[i].flags;
+ flags = rgn[i].flags;
- printf(" %s[%d]\t[0x%llx-0x%llx], 0x%08llx bytes flags: %x\n",
- name, i, base, end, size, flags);
+ printf(" %s[%d]\t[0x%llx-0x%llx], 0x%08llx bytes flags: ",
+ name, i, base, end, size);
+ lmb_print_region_flags(flags);
}
}
-void lmb_dump_all_force(struct lmb *lmb)
+void lmb_dump_all_force(void)
{
printf("lmb_dump_all:\n");
- lmb_dump_region(&lmb->memory, "memory");
- lmb_dump_region(&lmb->reserved, "reserved");
+ lmb_dump_region(&lmb.free_mem, "memory");
+ lmb_dump_region(&lmb.used_mem, "reserved");
}
-void lmb_dump_all(struct lmb *lmb)
+void lmb_dump_all(void)
{
#ifdef DEBUG
- lmb_dump_all_force(lmb);
+ lmb_dump_all_force();
#endif
}
@@ -73,111 +95,71 @@ static long lmb_addrs_adjacent(phys_addr_t base1, phys_size_t size1,
return 0;
}
-static long lmb_regions_overlap(struct lmb_region *rgn, unsigned long r1,
+static long lmb_regions_overlap(struct alist *lmb_rgn_lst, unsigned long r1,
unsigned long r2)
{
- phys_addr_t base1 = rgn->region[r1].base;
- phys_size_t size1 = rgn->region[r1].size;
- phys_addr_t base2 = rgn->region[r2].base;
- phys_size_t size2 = rgn->region[r2].size;
+ struct lmb_region *rgn = lmb_rgn_lst->data;
+
+ phys_addr_t base1 = rgn[r1].base;
+ phys_size_t size1 = rgn[r1].size;
+ phys_addr_t base2 = rgn[r2].base;
+ phys_size_t size2 = rgn[r2].size;
return lmb_addrs_overlap(base1, size1, base2, size2);
}
-static long lmb_regions_adjacent(struct lmb_region *rgn, unsigned long r1,
+
+static long lmb_regions_adjacent(struct alist *lmb_rgn_lst, unsigned long r1,
unsigned long r2)
{
- phys_addr_t base1 = rgn->region[r1].base;
- phys_size_t size1 = rgn->region[r1].size;
- phys_addr_t base2 = rgn->region[r2].base;
- phys_size_t size2 = rgn->region[r2].size;
+ struct lmb_region *rgn = lmb_rgn_lst->data;
+
+ phys_addr_t base1 = rgn[r1].base;
+ phys_size_t size1 = rgn[r1].size;
+ phys_addr_t base2 = rgn[r2].base;
+ phys_size_t size2 = rgn[r2].size;
return lmb_addrs_adjacent(base1, size1, base2, size2);
}
-static void lmb_remove_region(struct lmb_region *rgn, unsigned long r)
+static void lmb_remove_region(struct alist *lmb_rgn_lst, unsigned long r)
{
unsigned long i;
+ struct lmb_region *rgn = lmb_rgn_lst->data;
- for (i = r; i < rgn->cnt - 1; i++) {
- rgn->region[i].base = rgn->region[i + 1].base;
- rgn->region[i].size = rgn->region[i + 1].size;
- rgn->region[i].flags = rgn->region[i + 1].flags;
+ for (i = r; i < lmb_rgn_lst->count - 1; i++) {
+ rgn[i].base = rgn[i + 1].base;
+ rgn[i].size = rgn[i + 1].size;
+ rgn[i].flags = rgn[i + 1].flags;
}
- rgn->cnt--;
+ lmb_rgn_lst->count--;
}
/* Assumption: base addr of region 1 < base addr of region 2 */
-static void lmb_coalesce_regions(struct lmb_region *rgn, unsigned long r1,
+static void lmb_coalesce_regions(struct alist *lmb_rgn_lst, unsigned long r1,
unsigned long r2)
{
- rgn->region[r1].size += rgn->region[r2].size;
- lmb_remove_region(rgn, r2);
+ struct lmb_region *rgn = lmb_rgn_lst->data;
+
+ rgn[r1].size += rgn[r2].size;
+ lmb_remove_region(lmb_rgn_lst, r2);
}
/*Assumption : base addr of region 1 < base addr of region 2*/
-static void lmb_fix_over_lap_regions(struct lmb_region *rgn, unsigned long r1,
- unsigned long r2)
+static void lmb_fix_over_lap_regions(struct alist *lmb_rgn_lst,
+ unsigned long r1, unsigned long r2)
{
- phys_addr_t base1 = rgn->region[r1].base;
- phys_size_t size1 = rgn->region[r1].size;
- phys_addr_t base2 = rgn->region[r2].base;
- phys_size_t size2 = rgn->region[r2].size;
+ struct lmb_region *rgn = lmb_rgn_lst->data;
+
+ phys_addr_t base1 = rgn[r1].base;
+ phys_size_t size1 = rgn[r1].size;
+ phys_addr_t base2 = rgn[r2].base;
+ phys_size_t size2 = rgn[r2].size;
if (base1 + size1 > base2 + size2) {
printf("This will not be a case any time\n");
return;
}
- rgn->region[r1].size = base2 + size2 - base1;
- lmb_remove_region(rgn, r2);
-}
-
-void lmb_init(struct lmb *lmb)
-{
-#if IS_ENABLED(CONFIG_LMB_USE_MAX_REGIONS)
- lmb->memory.max = CONFIG_LMB_MAX_REGIONS;
- lmb->reserved.max = CONFIG_LMB_MAX_REGIONS;
-#else
- lmb->memory.max = CONFIG_LMB_MEMORY_REGIONS;
- lmb->reserved.max = CONFIG_LMB_RESERVED_REGIONS;
- lmb->memory.region = lmb->memory_regions;
- lmb->reserved.region = lmb->reserved_regions;
-#endif
- lmb->memory.cnt = 0;
- lmb->reserved.cnt = 0;
-}
-
-void arch_lmb_reserve_generic(struct lmb *lmb, ulong sp, ulong end, ulong align)
-{
- ulong bank_end;
- int bank;
-
- /*
- * Reserve memory from aligned address below the bottom of U-Boot stack
- * until end of U-Boot area using LMB to prevent U-Boot from overwriting
- * that memory.
- */
- debug("## Current stack ends at 0x%08lx ", sp);
-
- /* adjust sp by 4K to be safe */
- sp -= align;
- for (bank = 0; bank < CONFIG_NR_DRAM_BANKS; bank++) {
- if (!gd->bd->bi_dram[bank].size ||
- sp < gd->bd->bi_dram[bank].start)
- continue;
- /* Watch out for RAM at end of address space! */
- bank_end = gd->bd->bi_dram[bank].start +
- gd->bd->bi_dram[bank].size - 1;
- if (sp > bank_end)
- continue;
- if (bank_end > end)
- bank_end = end - 1;
-
- lmb_reserve(lmb, sp, bank_end - sp + 1);
-
- if (gd->flags & GD_FLG_SKIP_RELOC)
- lmb_reserve(lmb, (phys_addr_t)(uintptr_t)_start, gd->mon_len);
-
- break;
- }
+ rgn[r1].size = base2 + size2 - base1;
+ lmb_remove_region(lmb_rgn_lst, r2);
}
/**
@@ -186,10 +168,9 @@ void arch_lmb_reserve_generic(struct lmb *lmb, ulong sp, ulong end, ulong align)
* Add reservations for all EFI memory areas that are not
* EFI_CONVENTIONAL_MEMORY.
*
- * @lmb: lmb environment
* Return: 0 on success, 1 on failure
*/
-static __maybe_unused int efi_lmb_reserve(struct lmb *lmb)
+static __maybe_unused int efi_lmb_reserve(void)
{
struct efi_mem_desc *memmap = NULL, *map;
efi_uintn_t i, map_size = 0;
@@ -201,8 +182,7 @@ static __maybe_unused int efi_lmb_reserve(struct lmb *lmb)
for (i = 0, map = memmap; i < map_size / sizeof(*map); ++map, ++i) {
if (map->type != EFI_CONVENTIONAL_MEMORY) {
- lmb_reserve_flags(lmb,
- map_to_sysmem((void *)(uintptr_t)
+ lmb_reserve_flags(map_to_sysmem((void *)(uintptr_t)
map->physical_start),
map->num_pages * EFI_PAGE_SIZE,
map->type == EFI_RESERVED_MEMORY_TYPE
@@ -214,64 +194,199 @@ static __maybe_unused int efi_lmb_reserve(struct lmb *lmb)
return 0;
}
-static void lmb_reserve_common(struct lmb *lmb, void *fdt_blob)
+static void lmb_reserve_uboot_region(void)
+{
+ int bank;
+ ulong end, bank_end;
+ phys_addr_t rsv_start;
+
+ rsv_start = gd->start_addr_sp - CONFIG_STACK_SIZE;
+ end = gd->ram_top;
+
+ /*
+ * Reserve memory from aligned address below the bottom of U-Boot stack
+ * until end of RAM area to prevent LMB from overwriting that memory.
+ */
+ debug("## Current stack ends at 0x%08lx ", (ulong)rsv_start);
+
+ /* adjust sp by 16K to be safe */
+ rsv_start -= SZ_16K;
+ for (bank = 0; bank < CONFIG_NR_DRAM_BANKS; bank++) {
+ if (!gd->bd->bi_dram[bank].size ||
+ rsv_start < gd->bd->bi_dram[bank].start)
+ continue;
+ /* Watch out for RAM at end of address space! */
+ bank_end = gd->bd->bi_dram[bank].start +
+ gd->bd->bi_dram[bank].size - 1;
+ if (rsv_start > bank_end)
+ continue;
+ if (bank_end > end)
+ bank_end = end - 1;
+
+ lmb_reserve_flags(rsv_start, bank_end - rsv_start + 1,
+ LMB_NOOVERWRITE);
+
+ if (gd->flags & GD_FLG_SKIP_RELOC)
+ lmb_reserve_flags((phys_addr_t)(uintptr_t)_start,
+ gd->mon_len, LMB_NOOVERWRITE);
+
+ break;
+ }
+}
+
+static void lmb_reserve_common(void *fdt_blob)
{
- arch_lmb_reserve(lmb);
- board_lmb_reserve(lmb);
+ lmb_reserve_uboot_region();
if (CONFIG_IS_ENABLED(OF_LIBFDT) && fdt_blob)
- boot_fdt_add_mem_rsv_regions(lmb, fdt_blob);
+ boot_fdt_add_mem_rsv_regions(fdt_blob);
if (CONFIG_IS_ENABLED(EFI_LOADER))
- efi_lmb_reserve(lmb);
+ efi_lmb_reserve();
+}
+
+static __maybe_unused void lmb_reserve_common_spl(void)
+{
+ phys_addr_t rsv_start;
+ phys_size_t rsv_size;
+
+ /*
+ * Assume a SPL stack of 16KB. This must be
+ * more than enough for the SPL stage.
+ */
+ if (IS_ENABLED(CONFIG_SPL_STACK_R_ADDR)) {
+ rsv_start = gd->start_addr_sp - 16384;
+ rsv_size = 16384;
+ lmb_reserve_flags(rsv_start, rsv_size, LMB_NOOVERWRITE);
+ }
+
+ if (IS_ENABLED(CONFIG_SPL_SEPARATE_BSS)) {
+ /* Reserve the bss region */
+ rsv_start = (phys_addr_t)(uintptr_t)__bss_start;
+ rsv_size = (phys_addr_t)(uintptr_t)__bss_end -
+ (phys_addr_t)(uintptr_t)__bss_start;
+ lmb_reserve_flags(rsv_start, rsv_size, LMB_NOOVERWRITE);
+ }
}
-/* Initialize the struct, add memory and call arch/board reserve functions */
-void lmb_init_and_reserve(struct lmb *lmb, struct bd_info *bd, void *fdt_blob)
+/**
+ * lmb_add_memory() - Add memory range for LMB allocations
+ *
+ * Add the entire available memory range to the pool of memory that
+ * can be used by the LMB module for allocations.
+ *
+ * Return: None
+ */
+void lmb_add_memory(void)
{
int i;
+ phys_size_t size;
+ phys_addr_t rgn_top;
+ u64 ram_top = gd->ram_top;
+ struct bd_info *bd = gd->bd;
- lmb_init(lmb);
+ /* Assume a 4GB ram_top if not defined */
+ if (!ram_top)
+ ram_top = 0x100000000ULL;
for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
- if (bd->bi_dram[i].size) {
- lmb_add(lmb, bd->bi_dram[i].start,
- bd->bi_dram[i].size);
+ size = bd->bi_dram[i].size;
+ if (size) {
+ if (bd->bi_dram[i].start > ram_top)
+ continue;
+
+ rgn_top = bd->bi_dram[i].start +
+ bd->bi_dram[i].size;
+
+ if (rgn_top > ram_top)
+ size -= rgn_top - ram_top;
+
+ lmb_add(bd->bi_dram[i].start, size);
}
}
-
- lmb_reserve_common(lmb, fdt_blob);
}
-/* Initialize the struct, add memory and call arch/board reserve functions */
-void lmb_init_and_reserve_range(struct lmb *lmb, phys_addr_t base,
- phys_size_t size, void *fdt_blob)
+static long lmb_resize_regions(struct alist *lmb_rgn_lst,
+ unsigned long idx_start,
+ phys_addr_t base, phys_size_t size)
{
- lmb_init(lmb);
- lmb_add(lmb, base, size);
- lmb_reserve_common(lmb, fdt_blob);
+ phys_size_t rgnsize;
+ unsigned long rgn_cnt, idx, idx_end;
+ phys_addr_t rgnbase, rgnend;
+ phys_addr_t mergebase, mergeend;
+ struct lmb_region *rgn = lmb_rgn_lst->data;
+
+ rgn_cnt = 0;
+ idx = idx_start;
+ idx_end = idx_start;
+
+ /*
+ * First thing to do is to identify how many regions
+ * the requested region overlaps.
+ * If the flags match, combine all these overlapping
+ * regions into a single region, and remove the merged
+ * regions.
+ */
+ while (idx <= lmb_rgn_lst->count - 1) {
+ rgnbase = rgn[idx].base;
+ rgnsize = rgn[idx].size;
+
+ if (lmb_addrs_overlap(base, size, rgnbase,
+ rgnsize)) {
+ if (rgn[idx].flags != LMB_NONE)
+ return -1;
+ rgn_cnt++;
+ idx_end = idx;
+ }
+ idx++;
+ }
+
+ /* The merged region's base and size */
+ rgnbase = rgn[idx_start].base;
+ mergebase = min(base, rgnbase);
+ rgnend = rgn[idx_end].base + rgn[idx_end].size;
+ mergeend = max(rgnend, (base + size));
+
+ rgn[idx_start].base = mergebase;
+ rgn[idx_start].size = mergeend - mergebase;
+
+ /* Now remove the merged regions */
+ while (--rgn_cnt)
+ lmb_remove_region(lmb_rgn_lst, idx_start + 1);
+
+ return 0;
}
-/* This routine called with relocation disabled. */
-static long lmb_add_region_flags(struct lmb_region *rgn, phys_addr_t base,
+/**
+ * lmb_add_region_flags() - Add an lmb region to the given list
+ * @lmb_rgn_lst: LMB list to which region is to be added(free/used)
+ * @base: Start address of the region
+ * @size: Size of the region to be added
+ * @flags: Attributes of the LMB region
+ *
+ * Add a region of memory to the list. If the region does not exist, add
+ * it to the list. Depending on the attributes of the region to be added,
+ * the function might resize an already existing region or coalesce two
+ * adjacent regions.
+ *
+ *
+ * Returns: 0 if the region addition successful, -1 on failure
+ */
+static long lmb_add_region_flags(struct alist *lmb_rgn_lst, phys_addr_t base,
phys_size_t size, enum lmb_flags flags)
{
unsigned long coalesced = 0;
- long adjacent, i;
+ long ret, i;
+ struct lmb_region *rgn = lmb_rgn_lst->data;
- if (rgn->cnt == 0) {
- rgn->region[0].base = base;
- rgn->region[0].size = size;
- rgn->region[0].flags = flags;
- rgn->cnt = 1;
- return 0;
- }
+ if (alist_err(lmb_rgn_lst))
+ return -1;
/* First try and coalesce this LMB with another. */
- for (i = 0; i < rgn->cnt; i++) {
- phys_addr_t rgnbase = rgn->region[i].base;
- phys_size_t rgnsize = rgn->region[i].size;
- phys_size_t rgnflags = rgn->region[i].flags;
+ for (i = 0; i < lmb_rgn_lst->count; i++) {
+ phys_addr_t rgnbase = rgn[i].base;
+ phys_size_t rgnsize = rgn[i].size;
+ phys_size_t rgnflags = rgn[i].flags;
phys_addr_t end = base + size - 1;
phys_addr_t rgnend = rgnbase + rgnsize - 1;
if (rgnbase <= base && end <= rgnend) {
@@ -282,119 +397,127 @@ static long lmb_add_region_flags(struct lmb_region *rgn, phys_addr_t base,
return -1; /* regions with new flags */
}
- adjacent = lmb_addrs_adjacent(base, size, rgnbase, rgnsize);
- if (adjacent > 0) {
+ ret = lmb_addrs_adjacent(base, size, rgnbase, rgnsize);
+ if (ret > 0) {
if (flags != rgnflags)
break;
- rgn->region[i].base -= size;
- rgn->region[i].size += size;
+ rgn[i].base -= size;
+ rgn[i].size += size;
coalesced++;
break;
- } else if (adjacent < 0) {
+ } else if (ret < 0) {
if (flags != rgnflags)
break;
- rgn->region[i].size += size;
+ rgn[i].size += size;
coalesced++;
break;
} else if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) {
- /* regions overlap */
- return -1;
+ if (flags == LMB_NONE) {
+ ret = lmb_resize_regions(lmb_rgn_lst, i, base,
+ size);
+ if (ret < 0)
+ return -1;
+
+ coalesced++;
+ break;
+ } else {
+ return -1;
+ }
}
}
- if (i < rgn->cnt - 1 && rgn->region[i].flags == rgn->region[i + 1].flags) {
- if (lmb_regions_adjacent(rgn, i, i + 1)) {
- lmb_coalesce_regions(rgn, i, i + 1);
- coalesced++;
- } else if (lmb_regions_overlap(rgn, i, i + 1)) {
- /* fix overlapping area */
- lmb_fix_over_lap_regions(rgn, i, i + 1);
- coalesced++;
+ if (lmb_rgn_lst->count && i < lmb_rgn_lst->count - 1) {
+ rgn = lmb_rgn_lst->data;
+ if (rgn[i].flags == rgn[i + 1].flags) {
+ if (lmb_regions_adjacent(lmb_rgn_lst, i, i + 1)) {
+ lmb_coalesce_regions(lmb_rgn_lst, i, i + 1);
+ coalesced++;
+ } else if (lmb_regions_overlap(lmb_rgn_lst, i, i + 1)) {
+ /* fix overlapping area */
+ lmb_fix_over_lap_regions(lmb_rgn_lst, i, i + 1);
+ coalesced++;
+ }
}
}
if (coalesced)
return coalesced;
- if (rgn->cnt >= rgn->max)
+
+ if (alist_full(lmb_rgn_lst) &&
+ !alist_expand_by(lmb_rgn_lst, lmb_rgn_lst->alloc))
return -1;
+ rgn = lmb_rgn_lst->data;
/* Couldn't coalesce the LMB, so add it to the sorted table. */
- for (i = rgn->cnt-1; i >= 0; i--) {
- if (base < rgn->region[i].base) {
- rgn->region[i + 1].base = rgn->region[i].base;
- rgn->region[i + 1].size = rgn->region[i].size;
- rgn->region[i + 1].flags = rgn->region[i].flags;
+ for (i = lmb_rgn_lst->count; i >= 0; i--) {
+ if (i && base < rgn[i - 1].base) {
+ rgn[i] = rgn[i - 1];
} else {
- rgn->region[i + 1].base = base;
- rgn->region[i + 1].size = size;
- rgn->region[i + 1].flags = flags;
+ rgn[i].base = base;
+ rgn[i].size = size;
+ rgn[i].flags = flags;
break;
}
}
- if (base < rgn->region[0].base) {
- rgn->region[0].base = base;
- rgn->region[0].size = size;
- rgn->region[0].flags = flags;
- }
-
- rgn->cnt++;
+ lmb_rgn_lst->count++;
return 0;
}
-static long lmb_add_region(struct lmb_region *rgn, phys_addr_t base,
+static long lmb_add_region(struct alist *lmb_rgn_lst, phys_addr_t base,
phys_size_t size)
{
- return lmb_add_region_flags(rgn, base, size, LMB_NONE);
+ return lmb_add_region_flags(lmb_rgn_lst, base, size, LMB_NONE);
}
/* This routine may be called with relocation disabled. */
-long lmb_add(struct lmb *lmb, phys_addr_t base, phys_size_t size)
+long lmb_add(phys_addr_t base, phys_size_t size)
{
- struct lmb_region *_rgn = &(lmb->memory);
+ struct alist *lmb_rgn_lst = &lmb.free_mem;
- return lmb_add_region(_rgn, base, size);
+ return lmb_add_region(lmb_rgn_lst, base, size);
}
-long lmb_free(struct lmb *lmb, phys_addr_t base, phys_size_t size)
+long lmb_free(phys_addr_t base, phys_size_t size)
{
- struct lmb_region *rgn = &(lmb->reserved);
+ struct lmb_region *rgn;
+ struct alist *lmb_rgn_lst = &lmb.used_mem;
phys_addr_t rgnbegin, rgnend;
phys_addr_t end = base + size - 1;
int i;
rgnbegin = rgnend = 0; /* supress gcc warnings */
-
+ rgn = lmb_rgn_lst->data;
/* Find the region where (base, size) belongs to */
- for (i = 0; i < rgn->cnt; i++) {
- rgnbegin = rgn->region[i].base;
- rgnend = rgnbegin + rgn->region[i].size - 1;
+ for (i = 0; i < lmb_rgn_lst->count; i++) {
+ rgnbegin = rgn[i].base;
+ rgnend = rgnbegin + rgn[i].size - 1;
if ((rgnbegin <= base) && (end <= rgnend))
break;
}
/* Didn't find the region */
- if (i == rgn->cnt)
+ if (i == lmb_rgn_lst->count)
return -1;
/* Check to see if we are removing entire region */
if ((rgnbegin == base) && (rgnend == end)) {
- lmb_remove_region(rgn, i);
+ lmb_remove_region(lmb_rgn_lst, i);
return 0;
}
/* Check to see if region is matching at the front */
if (rgnbegin == base) {
- rgn->region[i].base = end + 1;
- rgn->region[i].size -= size;
+ rgn[i].base = end + 1;
+ rgn[i].size -= size;
return 0;
}
/* Check to see if the region is matching at the end */
if (rgnend == end) {
- rgn->region[i].size -= size;
+ rgn[i].size -= size;
return 0;
}
@@ -402,55 +525,37 @@ long lmb_free(struct lmb *lmb, phys_addr_t base, phys_size_t size)
* We need to split the entry - adjust the current one to the
* beginging of the hole and add the region after hole.
*/
- rgn->region[i].size = base - rgn->region[i].base;
- return lmb_add_region_flags(rgn, end + 1, rgnend - end,
- rgn->region[i].flags);
+ rgn[i].size = base - rgn[i].base;
+ return lmb_add_region_flags(lmb_rgn_lst, end + 1, rgnend - end,
+ rgn[i].flags);
}
-long lmb_reserve_flags(struct lmb *lmb, phys_addr_t base, phys_size_t size,
- enum lmb_flags flags)
+long lmb_reserve_flags(phys_addr_t base, phys_size_t size, enum lmb_flags flags)
{
- struct lmb_region *_rgn = &(lmb->reserved);
+ struct alist *lmb_rgn_lst = &lmb.used_mem;
- return lmb_add_region_flags(_rgn, base, size, flags);
+ return lmb_add_region_flags(lmb_rgn_lst, base, size, flags);
}
-long lmb_reserve(struct lmb *lmb, phys_addr_t base, phys_size_t size)
+long lmb_reserve(phys_addr_t base, phys_size_t size)
{
- return lmb_reserve_flags(lmb, base, size, LMB_NONE);
+ return lmb_reserve_flags(base, size, LMB_NONE);
}
-static long lmb_overlaps_region(struct lmb_region *rgn, phys_addr_t base,
+static long lmb_overlaps_region(struct alist *lmb_rgn_lst, phys_addr_t base,
phys_size_t size)
{
unsigned long i;
+ struct lmb_region *rgn = lmb_rgn_lst->data;
- for (i = 0; i < rgn->cnt; i++) {
- phys_addr_t rgnbase = rgn->region[i].base;
- phys_size_t rgnsize = rgn->region[i].size;
+ for (i = 0; i < lmb_rgn_lst->count; i++) {
+ phys_addr_t rgnbase = rgn[i].base;
+ phys_size_t rgnsize = rgn[i].size;
if (lmb_addrs_overlap(base, size, rgnbase, rgnsize))
break;
}
- return (i < rgn->cnt) ? i : -1;
-}
-
-phys_addr_t lmb_alloc(struct lmb *lmb, phys_size_t size, ulong align)
-{
- return lmb_alloc_base(lmb, size, align, LMB_ALLOC_ANYWHERE);
-}
-
-phys_addr_t lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align, phys_addr_t max_addr)
-{
- phys_addr_t alloc;
-
- alloc = __lmb_alloc_base(lmb, size, align, max_addr);
-
- if (alloc == 0)
- printf("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n",
- (ulong)size, (ulong)max_addr);
-
- return alloc;
+ return (i < lmb_rgn_lst->count) ? i : -1;
}
static phys_addr_t lmb_align_down(phys_addr_t addr, phys_size_t size)
@@ -458,15 +563,18 @@ static phys_addr_t lmb_align_down(phys_addr_t addr, phys_size_t size)
return addr & ~(size - 1);
}
-phys_addr_t __lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align, phys_addr_t max_addr)
+static phys_addr_t __lmb_alloc_base(phys_size_t size, ulong align,
+ phys_addr_t max_addr, enum lmb_flags flags)
{
long i, rgn;
phys_addr_t base = 0;
phys_addr_t res_base;
+ struct lmb_region *lmb_used = lmb.used_mem.data;
+ struct lmb_region *lmb_memory = lmb.free_mem.data;
- for (i = lmb->memory.cnt - 1; i >= 0; i--) {
- phys_addr_t lmbbase = lmb->memory.region[i].base;
- phys_size_t lmbsize = lmb->memory.region[i].size;
+ for (i = lmb.free_mem.count - 1; i >= 0; i--) {
+ phys_addr_t lmbbase = lmb_memory[i].base;
+ phys_size_t lmbsize = lmb_memory[i].size;
if (lmbsize < size)
continue;
@@ -482,15 +590,16 @@ phys_addr_t __lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align, phy
continue;
while (base && lmbbase <= base) {
- rgn = lmb_overlaps_region(&lmb->reserved, base, size);
+ rgn = lmb_overlaps_region(&lmb.used_mem, base, size);
if (rgn < 0) {
/* This area isn't reserved, take it */
- if (lmb_add_region(&lmb->reserved, base,
- size) < 0)
+ if (lmb_add_region_flags(&lmb.used_mem, base,
+ size, flags) < 0)
return 0;
return base;
}
- res_base = lmb->reserved.region[rgn].base;
+
+ res_base = lmb_used[rgn].base;
if (res_base < size)
break;
base = lmb_align_down(res_base - size, align);
@@ -499,83 +608,177 @@ phys_addr_t __lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align, phy
return 0;
}
-/*
- * Try to allocate a specific address range: must be in defined memory but not
- * reserved
- */
-phys_addr_t lmb_alloc_addr(struct lmb *lmb, phys_addr_t base, phys_size_t size)
+phys_addr_t lmb_alloc(phys_size_t size, ulong align)
+{
+ return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE);
+}
+
+phys_addr_t lmb_alloc_base(phys_size_t size, ulong align, phys_addr_t max_addr)
+{
+ phys_addr_t alloc;
+
+ alloc = __lmb_alloc_base(size, align, max_addr, LMB_NONE);
+
+ if (alloc == 0)
+ printf("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n",
+ (ulong)size, (ulong)max_addr);
+
+ return alloc;
+}
+
+static phys_addr_t __lmb_alloc_addr(phys_addr_t base, phys_size_t size,
+ enum lmb_flags flags)
{
long rgn;
+ struct lmb_region *lmb_memory = lmb.free_mem.data;
/* Check if the requested address is in one of the memory regions */
- rgn = lmb_overlaps_region(&lmb->memory, base, size);
+ rgn = lmb_overlaps_region(&lmb.free_mem, base, size);
if (rgn >= 0) {
/*
* Check if the requested end address is in the same memory
* region we found.
*/
- if (lmb_addrs_overlap(lmb->memory.region[rgn].base,
- lmb->memory.region[rgn].size,
+ if (lmb_addrs_overlap(lmb_memory[rgn].base,
+ lmb_memory[rgn].size,
base + size - 1, 1)) {
/* ok, reserve the memory */
- if (lmb_reserve(lmb, base, size) >= 0)
+ if (lmb_reserve_flags(base, size, flags) >= 0)
return base;
}
}
+
return 0;
}
+/*
+ * Try to allocate a specific address range: must be in defined memory but not
+ * reserved
+ */
+phys_addr_t lmb_alloc_addr(phys_addr_t base, phys_size_t size)
+{
+ return __lmb_alloc_addr(base, size, LMB_NONE);
+}
+
/* Return number of bytes from a given address that are free */
-phys_size_t lmb_get_free_size(struct lmb *lmb, phys_addr_t addr)
+phys_size_t lmb_get_free_size(phys_addr_t addr)
{
int i;
long rgn;
+ struct lmb_region *lmb_used = lmb.used_mem.data;
+ struct lmb_region *lmb_memory = lmb.free_mem.data;
/* check if the requested address is in the memory regions */
- rgn = lmb_overlaps_region(&lmb->memory, addr, 1);
+ rgn = lmb_overlaps_region(&lmb.free_mem, addr, 1);
if (rgn >= 0) {
- for (i = 0; i < lmb->reserved.cnt; i++) {
- if (addr < lmb->reserved.region[i].base) {
+ for (i = 0; i < lmb.used_mem.count; i++) {
+ if (addr < lmb_used[i].base) {
/* first reserved range > requested address */
- return lmb->reserved.region[i].base - addr;
+ return lmb_used[i].base - addr;
}
- if (lmb->reserved.region[i].base +
- lmb->reserved.region[i].size > addr) {
+ if (lmb_used[i].base +
+ lmb_used[i].size > addr) {
/* requested addr is in this reserved range */
return 0;
}
}
/* if we come here: no reserved ranges above requested addr */
- return lmb->memory.region[lmb->memory.cnt - 1].base +
- lmb->memory.region[lmb->memory.cnt - 1].size - addr;
+ return lmb_memory[lmb.free_mem.count - 1].base +
+ lmb_memory[lmb.free_mem.count - 1].size - addr;
}
return 0;
}
-int lmb_is_reserved_flags(struct lmb *lmb, phys_addr_t addr, int flags)
+int lmb_is_reserved_flags(phys_addr_t addr, int flags)
{
int i;
+ struct lmb_region *lmb_used = lmb.used_mem.data;
- for (i = 0; i < lmb->reserved.cnt; i++) {
- phys_addr_t upper = lmb->reserved.region[i].base +
- lmb->reserved.region[i].size - 1;
- if ((addr >= lmb->reserved.region[i].base) && (addr <= upper))
- return (lmb->reserved.region[i].flags & flags) == flags;
+ for (i = 0; i < lmb.used_mem.count; i++) {
+ phys_addr_t upper = lmb_used[i].base +
+ lmb_used[i].size - 1;
+ if (addr >= lmb_used[i].base && addr <= upper)
+ return (lmb_used[i].flags & flags) == flags;
}
return 0;
}
-int lmb_is_reserved(struct lmb *lmb, phys_addr_t addr)
+static int lmb_setup(void)
{
- return lmb_is_reserved_flags(lmb, addr, LMB_NONE);
+ bool ret;
+
+ ret = alist_init(&lmb.free_mem, sizeof(struct lmb_region),
+ (uint)LMB_ALIST_INITIAL_SIZE);
+ if (!ret) {
+ log_debug("Unable to initialise the list for LMB free memory\n");
+ return -ENOMEM;
+ }
+
+ ret = alist_init(&lmb.used_mem, sizeof(struct lmb_region),
+ (uint)LMB_ALIST_INITIAL_SIZE);
+ if (!ret) {
+ log_debug("Unable to initialise the list for LMB used memory\n");
+ return -ENOMEM;
+ }
+
+ return 0;
}
-__weak void board_lmb_reserve(struct lmb *lmb)
+/**
+ * lmb_init() - Initialise the LMB module
+ *
+ * Initialise the LMB lists needed for keeping the memory map. There
+ * are two lists, in form of alloced list data structure. One for the
+ * available memory, and one for the used memory. Initialise the two
+ * lists as part of board init. Add memory to the available memory
+ * list and reserve common areas by adding them to the used memory
+ * list.
+ *
+ * Return: 0 on success, -ve on error
+ */
+int lmb_init(void)
{
- /* please define platform specific board_lmb_reserve() */
+ int ret;
+
+ ret = lmb_setup();
+ if (ret) {
+ log_info("Unable to init LMB\n");
+ return ret;
+ }
+
+ lmb_add_memory();
+
+ /* Reserve the U-Boot image region once U-Boot has relocated */
+ if (spl_phase() == PHASE_SPL)
+ lmb_reserve_common_spl();
+ else if (spl_phase() == PHASE_BOARD_R)
+ lmb_reserve_common((void *)gd->fdt_blob);
+
+ return 0;
+}
+
+#if CONFIG_IS_ENABLED(UNIT_TEST)
+struct lmb *lmb_get(void)
+{
+ return &lmb;
+}
+
+int lmb_push(struct lmb *store)
+{
+ int ret;
+
+ *store = lmb;
+ ret = lmb_setup();
+ if (ret)
+ return ret;
+
+ return 0;
}
-__weak void arch_lmb_reserve(struct lmb *lmb)
+void lmb_pop(struct lmb *store)
{
- /* please define platform specific arch_lmb_reserve() */
+ alist_uninit(&lmb.free_mem);
+ alist_uninit(&lmb.used_mem);
+ lmb = *store;
}
+#endif /* UNIT_TEST */
diff --git a/net/tftp.c b/net/tftp.c
index 2e073183d5a..b5d227d8bc2 100644
--- a/net/tftp.c
+++ b/net/tftp.c
@@ -82,9 +82,7 @@ static ulong tftp_block_wrap;
static ulong tftp_block_wrap_offset;
static int tftp_state;
static ulong tftp_load_addr;
-#ifdef CONFIG_LMB
static ulong tftp_load_size;
-#endif
#ifdef CONFIG_TFTP_TSIZE
/* The file size reported by the server */
static int tftp_tsize;
@@ -160,19 +158,20 @@ static inline int store_block(int block, uchar *src, unsigned int len)
ulong store_addr = tftp_load_addr + offset;
void *ptr;
-#ifdef CONFIG_LMB
- ulong end_addr = tftp_load_addr + tftp_load_size;
+ if (CONFIG_IS_ENABLED(LMB)) {
+ ulong end_addr = tftp_load_addr + tftp_load_size;
- if (!end_addr)
- end_addr = ULONG_MAX;
+ if (!end_addr)
+ end_addr = ULONG_MAX;
- if (store_addr < tftp_load_addr ||
- store_addr + len > end_addr) {
- puts("\nTFTP error: ");
- puts("trying to overwrite reserved memory...\n");
- return -1;
+ if (store_addr < tftp_load_addr ||
+ store_addr + len > end_addr) {
+ puts("\nTFTP error: ");
+ puts("trying to overwrite reserved memory...\n");
+ return -1;
+ }
}
-#endif
+
ptr = map_sysmem(store_addr, len);
memcpy(ptr, src, len);
unmap_sysmem(ptr);
@@ -716,18 +715,16 @@ static void tftp_timeout_handler(void)
/* Initialize tftp_load_addr and tftp_load_size from image_load_addr and lmb */
static int tftp_init_load_addr(void)
{
-#ifdef CONFIG_LMB
- struct lmb lmb;
- phys_size_t max_size;
+ if (CONFIG_IS_ENABLED(LMB)) {
+ phys_size_t max_size;
- lmb_init_and_reserve(&lmb, gd->bd, (void *)gd->fdt_blob);
+ max_size = lmb_get_free_size(image_load_addr);
+ if (!max_size)
+ return -1;
- max_size = lmb_get_free_size(&lmb, image_load_addr);
- if (!max_size)
- return -1;
+ tftp_load_size = max_size;
+ }
- tftp_load_size = max_size;
-#endif
tftp_load_addr = image_load_addr;
return 0;
}
diff --git a/net/wget.c b/net/wget.c
index 945bfd26693..4a168641c65 100644
--- a/net/wget.c
+++ b/net/wget.c
@@ -73,12 +73,9 @@ static ulong wget_load_size;
*/
static int wget_init_load_size(void)
{
- struct lmb lmb;
phys_size_t max_size;
- lmb_init_and_reserve(&lmb, gd->bd, (void *)gd->fdt_blob);
-
- max_size = lmb_get_free_size(&lmb, image_load_addr);
+ max_size = lmb_get_free_size(image_load_addr);
if (!max_size)
return -1;
@@ -99,7 +96,7 @@ static inline int store_block(uchar *src, unsigned int offset, unsigned int len)
ulong newsize = offset + len;
uchar *ptr;
- if (IS_ENABLED(CONFIG_LMB)) {
+ if (CONFIG_IS_ENABLED(LMB)) {
ulong end_addr = image_load_addr + wget_load_size;
if (!end_addr)
@@ -496,7 +493,7 @@ void wget_start(void)
debug_cond(DEBUG_WGET,
"\nwget:Load address: 0x%lx\nLoading: *\b", image_load_addr);
- if (IS_ENABLED(CONFIG_LMB)) {
+ if (CONFIG_IS_ENABLED(LMB)) {
if (wget_init_load_size()) {
printf("\nwget error: ");
printf("trying to overwrite reserved memory...\n");
diff --git a/test/cmd/bdinfo.c b/test/cmd/bdinfo.c
index e624226cacf..770b3bfb560 100644
--- a/test/cmd/bdinfo.c
+++ b/test/cmd/bdinfo.c
@@ -5,6 +5,7 @@
* Copyright 2023 Marek Vasut <marek.vasut+renesas@mailbox.org>
*/
+#include <alist.h>
#include <console.h>
#include <mapmem.h>
#include <asm/global_data.h>
@@ -99,44 +100,39 @@ static int test_video_info(struct unit_test_state *uts)
}
static int lmb_test_dump_region(struct unit_test_state *uts,
- struct lmb_region *rgn, char *name)
+ struct alist *lmb_rgn_lst, char *name)
{
+ struct lmb_region *rgn = lmb_rgn_lst->data;
unsigned long long base, size, end;
enum lmb_flags flags;
int i;
- ut_assert_nextline(" %s.cnt = 0x%lx / max = 0x%lx", name, rgn->cnt, rgn->max);
+ ut_assert_nextline(" %s.count = 0x%hx", name, lmb_rgn_lst->count);
- for (i = 0; i < rgn->cnt; i++) {
- base = rgn->region[i].base;
- size = rgn->region[i].size;
+ for (i = 0; i < lmb_rgn_lst->count; i++) {
+ base = rgn[i].base;
+ size = rgn[i].size;
end = base + size - 1;
- flags = rgn->region[i].flags;
-
- /*
- * this entry includes the stack (get_sp()) on many platforms
- * so will different each time lmb_init_and_reserve() is called.
- * We could instead have the bdinfo command put its lmb region
- * in a known location, so we can check it directly, rather than
- * calling lmb_init_and_reserve() to create a new (and hopefully
- * identical one). But for now this seems good enough.
- */
+ flags = rgn[i].flags;
+
if (!IS_ENABLED(CONFIG_SANDBOX) && i == 3) {
ut_assert_nextlinen(" %s[%d]\t[", name, i);
continue;
}
- ut_assert_nextline(" %s[%d]\t[0x%llx-0x%llx], 0x%08llx bytes flags: %x",
- name, i, base, end, size, flags);
+ ut_assert_nextlinen(" %s[%d]\t[0x%llx-0x%llx], 0x%08llx bytes flags: ",
+ name, i, base, end, size);
}
return 0;
}
-static int lmb_test_dump_all(struct unit_test_state *uts, struct lmb *lmb)
+static int lmb_test_dump_all(struct unit_test_state *uts)
{
+ struct lmb *lmb = lmb_get();
+
ut_assert_nextline("lmb_dump_all:");
- ut_assertok(lmb_test_dump_region(uts, &lmb->memory, "memory"));
- ut_assertok(lmb_test_dump_region(uts, &lmb->reserved, "reserved"));
+ ut_assertok(lmb_test_dump_region(uts, &lmb->free_mem, "memory"));
+ ut_assertok(lmb_test_dump_region(uts, &lmb->used_mem, "reserved"));
return 0;
}
@@ -195,10 +191,7 @@ static int bdinfo_test_all(struct unit_test_state *uts)
#endif
if (IS_ENABLED(CONFIG_LMB) && gd->fdt_blob) {
- struct lmb lmb;
-
- lmb_init_and_reserve(&lmb, gd->bd, (void *)gd->fdt_blob);
- ut_assertok(lmb_test_dump_all(uts, &lmb));
+ ut_assertok(lmb_test_dump_all(uts));
if (IS_ENABLED(CONFIG_OF_REAL))
ut_assert_nextline("devicetree = %s", fdtdec_get_srcname());
}
diff --git a/test/lib/lmb.c b/test/lib/lmb.c
index 3c66138f732..b2c54fb4bcb 100644
--- a/test/lib/lmb.c
+++ b/test/lib/lmb.c
@@ -3,6 +3,7 @@
* (C) Copyright 2018 Simon Goldschmidt
*/
+#include <alist.h>
#include <dm.h>
#include <lmb.h>
#include <log.h>
@@ -12,50 +13,64 @@
#include <test/test.h>
#include <test/ut.h>
-static inline bool lmb_is_nomap(struct lmb_property *m)
+static inline bool lmb_is_nomap(struct lmb_region *m)
{
return m->flags & LMB_NOMAP;
}
-static int check_lmb(struct unit_test_state *uts, struct lmb *lmb,
- phys_addr_t ram_base, phys_size_t ram_size,
- unsigned long num_reserved,
+static int check_lmb(struct unit_test_state *uts, struct alist *mem_lst,
+ struct alist *used_lst, phys_addr_t ram_base,
+ phys_size_t ram_size, unsigned long num_reserved,
phys_addr_t base1, phys_size_t size1,
phys_addr_t base2, phys_size_t size2,
phys_addr_t base3, phys_size_t size3)
{
+ struct lmb_region *mem, *used;
+
+ mem = mem_lst->data;
+ used = used_lst->data;
+
if (ram_size) {
- ut_asserteq(lmb->memory.cnt, 1);
- ut_asserteq(lmb->memory.region[0].base, ram_base);
- ut_asserteq(lmb->memory.region[0].size, ram_size);
+ ut_asserteq(mem_lst->count, 1);
+ ut_asserteq(mem[0].base, ram_base);
+ ut_asserteq(mem[0].size, ram_size);
}
- ut_asserteq(lmb->reserved.cnt, num_reserved);
+ ut_asserteq(used_lst->count, num_reserved);
if (num_reserved > 0) {
- ut_asserteq(lmb->reserved.region[0].base, base1);
- ut_asserteq(lmb->reserved.region[0].size, size1);
+ ut_asserteq(used[0].base, base1);
+ ut_asserteq(used[0].size, size1);
}
if (num_reserved > 1) {
- ut_asserteq(lmb->reserved.region[1].base, base2);
- ut_asserteq(lmb->reserved.region[1].size, size2);
+ ut_asserteq(used[1].base, base2);
+ ut_asserteq(used[1].size, size2);
}
if (num_reserved > 2) {
- ut_asserteq(lmb->reserved.region[2].base, base3);
- ut_asserteq(lmb->reserved.region[2].size, size3);
+ ut_asserteq(used[2].base, base3);
+ ut_asserteq(used[2].size, size3);
}
return 0;
}
-#define ASSERT_LMB(lmb, ram_base, ram_size, num_reserved, base1, size1, \
+#define ASSERT_LMB(mem_lst, used_lst, ram_base, ram_size, num_reserved, base1, size1, \
base2, size2, base3, size3) \
- ut_assert(!check_lmb(uts, lmb, ram_base, ram_size, \
+ ut_assert(!check_lmb(uts, mem_lst, used_lst, ram_base, ram_size, \
num_reserved, base1, size1, base2, size2, base3, \
size3))
-/*
- * Test helper function that reserves 64 KiB somewhere in the simulated RAM and
- * then does some alloc + free tests.
- */
+static int setup_lmb_test(struct unit_test_state *uts, struct lmb *store,
+ struct alist **mem_lstp, struct alist **used_lstp)
+{
+ struct lmb *lmb;
+
+ ut_assertok(lmb_push(store));
+ lmb = lmb_get();
+ *mem_lstp = &lmb->free_mem;
+ *used_lstp = &lmb->used_mem;
+
+ return 0;
+}
+
static int test_multi_alloc(struct unit_test_state *uts, const phys_addr_t ram,
const phys_size_t ram_size, const phys_addr_t ram0,
const phys_size_t ram0_size,
@@ -64,9 +79,11 @@ static int test_multi_alloc(struct unit_test_state *uts, const phys_addr_t ram,
const phys_addr_t ram_end = ram + ram_size;
const phys_addr_t alloc_64k_end = alloc_64k_addr + 0x10000;
- struct lmb lmb;
long ret;
+ struct alist *mem_lst, *used_lst;
+ struct lmb_region *mem, *used;
phys_addr_t a, a2, b, b2, c, d;
+ struct lmb store;
/* check for overflow */
ut_assert(ram_end == 0 || ram_end > ram);
@@ -75,106 +92,110 @@ static int test_multi_alloc(struct unit_test_state *uts, const phys_addr_t ram,
ut_assert(alloc_64k_addr >= ram + 8);
ut_assert(alloc_64k_end <= ram_end - 8);
- lmb_init(&lmb);
+ ut_assertok(setup_lmb_test(uts, &store, &mem_lst, &used_lst));
+ mem = mem_lst->data;
+ used = used_lst->data;
if (ram0_size) {
- ret = lmb_add(&lmb, ram0, ram0_size);
+ ret = lmb_add(ram0, ram0_size);
ut_asserteq(ret, 0);
}
- ret = lmb_add(&lmb, ram, ram_size);
+ ret = lmb_add(ram, ram_size);
ut_asserteq(ret, 0);
if (ram0_size) {
- ut_asserteq(lmb.memory.cnt, 2);
- ut_asserteq(lmb.memory.region[0].base, ram0);
- ut_asserteq(lmb.memory.region[0].size, ram0_size);
- ut_asserteq(lmb.memory.region[1].base, ram);
- ut_asserteq(lmb.memory.region[1].size, ram_size);
+ ut_asserteq(mem_lst->count, 2);
+ ut_asserteq(mem[0].base, ram0);
+ ut_asserteq(mem[0].size, ram0_size);
+ ut_asserteq(mem[1].base, ram);
+ ut_asserteq(mem[1].size, ram_size);
} else {
- ut_asserteq(lmb.memory.cnt, 1);
- ut_asserteq(lmb.memory.region[0].base, ram);
- ut_asserteq(lmb.memory.region[0].size, ram_size);
+ ut_asserteq(mem_lst->count, 1);
+ ut_asserteq(mem[0].base, ram);
+ ut_asserteq(mem[0].size, ram_size);
}
/* reserve 64KiB somewhere */
- ret = lmb_reserve(&lmb, alloc_64k_addr, 0x10000);
+ ret = lmb_reserve(alloc_64k_addr, 0x10000);
ut_asserteq(ret, 0);
- ASSERT_LMB(&lmb, 0, 0, 1, alloc_64k_addr, 0x10000,
+ ASSERT_LMB(mem_lst, used_lst, 0, 0, 1, alloc_64k_addr, 0x10000,
0, 0, 0, 0);
/* allocate somewhere, should be at the end of RAM */
- a = lmb_alloc(&lmb, 4, 1);
+ a = lmb_alloc(4, 1);
ut_asserteq(a, ram_end - 4);
- ASSERT_LMB(&lmb, 0, 0, 2, alloc_64k_addr, 0x10000,
+ ASSERT_LMB(mem_lst, used_lst, 0, 0, 2, alloc_64k_addr, 0x10000,
ram_end - 4, 4, 0, 0);
/* alloc below end of reserved region -> below reserved region */
- b = lmb_alloc_base(&lmb, 4, 1, alloc_64k_end);
+ b = lmb_alloc_base(4, 1, alloc_64k_end);
ut_asserteq(b, alloc_64k_addr - 4);
- ASSERT_LMB(&lmb, 0, 0, 2,
+ ASSERT_LMB(mem_lst, used_lst, 0, 0, 2,
alloc_64k_addr - 4, 0x10000 + 4, ram_end - 4, 4, 0, 0);
/* 2nd time */
- c = lmb_alloc(&lmb, 4, 1);
+ c = lmb_alloc(4, 1);
ut_asserteq(c, ram_end - 8);
- ASSERT_LMB(&lmb, 0, 0, 2,
+ ASSERT_LMB(mem_lst, used_lst, 0, 0, 2,
alloc_64k_addr - 4, 0x10000 + 4, ram_end - 8, 8, 0, 0);
- d = lmb_alloc_base(&lmb, 4, 1, alloc_64k_end);
+ d = lmb_alloc_base(4, 1, alloc_64k_end);
ut_asserteq(d, alloc_64k_addr - 8);
- ASSERT_LMB(&lmb, 0, 0, 2,
+ ASSERT_LMB(mem_lst, used_lst, 0, 0, 2,
alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 8, 0, 0);
- ret = lmb_free(&lmb, a, 4);
+ ret = lmb_free(a, 4);
ut_asserteq(ret, 0);
- ASSERT_LMB(&lmb, 0, 0, 2,
+ ASSERT_LMB(mem_lst, used_lst, 0, 0, 2,
alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 4, 0, 0);
/* allocate again to ensure we get the same address */
- a2 = lmb_alloc(&lmb, 4, 1);
+ a2 = lmb_alloc(4, 1);
ut_asserteq(a, a2);
- ASSERT_LMB(&lmb, 0, 0, 2,
+ ASSERT_LMB(mem_lst, used_lst, 0, 0, 2,
alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 8, 0, 0);
- ret = lmb_free(&lmb, a2, 4);
+ ret = lmb_free(a2, 4);
ut_asserteq(ret, 0);
- ASSERT_LMB(&lmb, 0, 0, 2,
+ ASSERT_LMB(mem_lst, used_lst, 0, 0, 2,
alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 4, 0, 0);
- ret = lmb_free(&lmb, b, 4);
+ ret = lmb_free(b, 4);
ut_asserteq(ret, 0);
- ASSERT_LMB(&lmb, 0, 0, 3,
+ ASSERT_LMB(mem_lst, used_lst, 0, 0, 3,
alloc_64k_addr - 8, 4, alloc_64k_addr, 0x10000,
ram_end - 8, 4);
/* allocate again to ensure we get the same address */
- b2 = lmb_alloc_base(&lmb, 4, 1, alloc_64k_end);
+ b2 = lmb_alloc_base(4, 1, alloc_64k_end);
ut_asserteq(b, b2);
- ASSERT_LMB(&lmb, 0, 0, 2,
+ ASSERT_LMB(mem_lst, used_lst, 0, 0, 2,
alloc_64k_addr - 8, 0x10000 + 8, ram_end - 8, 4, 0, 0);
- ret = lmb_free(&lmb, b2, 4);
+ ret = lmb_free(b2, 4);
ut_asserteq(ret, 0);
- ASSERT_LMB(&lmb, 0, 0, 3,
+ ASSERT_LMB(mem_lst, used_lst, 0, 0, 3,
alloc_64k_addr - 8, 4, alloc_64k_addr, 0x10000,
ram_end - 8, 4);
- ret = lmb_free(&lmb, c, 4);
+ ret = lmb_free(c, 4);
ut_asserteq(ret, 0);
- ASSERT_LMB(&lmb, 0, 0, 2,
+ ASSERT_LMB(mem_lst, used_lst, 0, 0, 2,
alloc_64k_addr - 8, 4, alloc_64k_addr, 0x10000, 0, 0);
- ret = lmb_free(&lmb, d, 4);
+ ret = lmb_free(d, 4);
ut_asserteq(ret, 0);
- ASSERT_LMB(&lmb, 0, 0, 1, alloc_64k_addr, 0x10000,
+ ASSERT_LMB(mem_lst, used_lst, 0, 0, 1, alloc_64k_addr, 0x10000,
0, 0, 0, 0);
if (ram0_size) {
- ut_asserteq(lmb.memory.cnt, 2);
- ut_asserteq(lmb.memory.region[0].base, ram0);
- ut_asserteq(lmb.memory.region[0].size, ram0_size);
- ut_asserteq(lmb.memory.region[1].base, ram);
- ut_asserteq(lmb.memory.region[1].size, ram_size);
+ ut_asserteq(mem_lst->count, 2);
+ ut_asserteq(mem[0].base, ram0);
+ ut_asserteq(mem[0].size, ram0_size);
+ ut_asserteq(mem[1].base, ram);
+ ut_asserteq(mem[1].size, ram_size);
} else {
- ut_asserteq(lmb.memory.cnt, 1);
- ut_asserteq(lmb.memory.region[0].base, ram);
- ut_asserteq(lmb.memory.region[0].size, ram_size);
+ ut_asserteq(mem_lst->count, 1);
+ ut_asserteq(mem[0].base, ram);
+ ut_asserteq(mem[0].size, ram_size);
}
+ lmb_pop(&store);
+
return 0;
}
@@ -229,48 +250,51 @@ static int test_bigblock(struct unit_test_state *uts, const phys_addr_t ram)
const phys_size_t big_block_size = 0x10000000;
const phys_addr_t ram_end = ram + ram_size;
const phys_addr_t alloc_64k_addr = ram + 0x10000000;
- struct lmb lmb;
+ struct alist *mem_lst, *used_lst;
long ret;
phys_addr_t a, b;
+ struct lmb store;
/* check for overflow */
ut_assert(ram_end == 0 || ram_end > ram);
- lmb_init(&lmb);
+ ut_assertok(setup_lmb_test(uts, &store, &mem_lst, &used_lst));
- ret = lmb_add(&lmb, ram, ram_size);
+ ret = lmb_add(ram, ram_size);
ut_asserteq(ret, 0);
/* reserve 64KiB in the middle of RAM */
- ret = lmb_reserve(&lmb, alloc_64k_addr, 0x10000);
+ ret = lmb_reserve(alloc_64k_addr, 0x10000);
ut_asserteq(ret, 0);
- ASSERT_LMB(&lmb, ram, ram_size, 1, alloc_64k_addr, 0x10000,
+ ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1, alloc_64k_addr, 0x10000,
0, 0, 0, 0);
/* allocate a big block, should be below reserved */
- a = lmb_alloc(&lmb, big_block_size, 1);
+ a = lmb_alloc(big_block_size, 1);
ut_asserteq(a, ram);
- ASSERT_LMB(&lmb, ram, ram_size, 1, a,
+ ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1, a,
big_block_size + 0x10000, 0, 0, 0, 0);
/* allocate 2nd big block */
/* This should fail, printing an error */
- b = lmb_alloc(&lmb, big_block_size, 1);
+ b = lmb_alloc(big_block_size, 1);
ut_asserteq(b, 0);
- ASSERT_LMB(&lmb, ram, ram_size, 1, a,
+ ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1, a,
big_block_size + 0x10000, 0, 0, 0, 0);
- ret = lmb_free(&lmb, a, big_block_size);
+ ret = lmb_free(a, big_block_size);
ut_asserteq(ret, 0);
- ASSERT_LMB(&lmb, ram, ram_size, 1, alloc_64k_addr, 0x10000,
+ ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1, alloc_64k_addr, 0x10000,
0, 0, 0, 0);
/* allocate too big block */
/* This should fail, printing an error */
- a = lmb_alloc(&lmb, ram_size, 1);
+ a = lmb_alloc(ram_size, 1);
ut_asserteq(a, 0);
- ASSERT_LMB(&lmb, ram, ram_size, 1, alloc_64k_addr, 0x10000,
+ ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1, alloc_64k_addr, 0x10000,
0, 0, 0, 0);
+ lmb_pop(&store);
+
return 0;
}
@@ -294,56 +318,62 @@ static int test_noreserved(struct unit_test_state *uts, const phys_addr_t ram,
{
const phys_size_t ram_size = 0x20000000;
const phys_addr_t ram_end = ram + ram_size;
- struct lmb lmb;
long ret;
phys_addr_t a, b;
+ struct lmb store;
+ struct alist *mem_lst, *used_lst;
const phys_addr_t alloc_size_aligned = (alloc_size + align - 1) &
~(align - 1);
/* check for overflow */
ut_assert(ram_end == 0 || ram_end > ram);
- lmb_init(&lmb);
+ ut_assertok(setup_lmb_test(uts, &store, &mem_lst, &used_lst));
- ret = lmb_add(&lmb, ram, ram_size);
+ ret = lmb_add(ram, ram_size);
ut_asserteq(ret, 0);
- ASSERT_LMB(&lmb, ram, ram_size, 0, 0, 0, 0, 0, 0, 0);
+ ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 0, 0, 0, 0, 0, 0, 0);
/* allocate a block */
- a = lmb_alloc(&lmb, alloc_size, align);
+ a = lmb_alloc(alloc_size, align);
ut_assert(a != 0);
- ASSERT_LMB(&lmb, ram, ram_size, 1, ram + ram_size - alloc_size_aligned,
- alloc_size, 0, 0, 0, 0);
+ ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1,
+ ram + ram_size - alloc_size_aligned, alloc_size, 0, 0, 0, 0);
+
/* allocate another block */
- b = lmb_alloc(&lmb, alloc_size, align);
+ b = lmb_alloc(alloc_size, align);
ut_assert(b != 0);
if (alloc_size == alloc_size_aligned) {
- ASSERT_LMB(&lmb, ram, ram_size, 1, ram + ram_size -
+ ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1, ram + ram_size -
(alloc_size_aligned * 2), alloc_size * 2, 0, 0, 0,
0);
} else {
- ASSERT_LMB(&lmb, ram, ram_size, 2, ram + ram_size -
+ ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 2, ram + ram_size -
(alloc_size_aligned * 2), alloc_size, ram + ram_size
- alloc_size_aligned, alloc_size, 0, 0);
}
/* and free them */
- ret = lmb_free(&lmb, b, alloc_size);
+ ret = lmb_free(b, alloc_size);
ut_asserteq(ret, 0);
- ASSERT_LMB(&lmb, ram, ram_size, 1, ram + ram_size - alloc_size_aligned,
+ ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1,
+ ram + ram_size - alloc_size_aligned,
alloc_size, 0, 0, 0, 0);
- ret = lmb_free(&lmb, a, alloc_size);
+ ret = lmb_free(a, alloc_size);
ut_asserteq(ret, 0);
- ASSERT_LMB(&lmb, ram, ram_size, 0, 0, 0, 0, 0, 0, 0);
+ ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 0, 0, 0, 0, 0, 0, 0);
/* allocate a block with base*/
- b = lmb_alloc_base(&lmb, alloc_size, align, ram_end);
+ b = lmb_alloc_base(alloc_size, align, ram_end);
ut_assert(a == b);
- ASSERT_LMB(&lmb, ram, ram_size, 1, ram + ram_size - alloc_size_aligned,
+ ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1,
+ ram + ram_size - alloc_size_aligned,
alloc_size, 0, 0, 0, 0);
/* and free it */
- ret = lmb_free(&lmb, b, alloc_size);
+ ret = lmb_free(b, alloc_size);
ut_asserteq(ret, 0);
- ASSERT_LMB(&lmb, ram, ram_size, 0, 0, 0, 0, 0, 0, 0);
+ ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 0, 0, 0, 0, 0, 0, 0);
+
+ lmb_pop(&store);
return 0;
}
@@ -384,36 +414,39 @@ static int lib_test_lmb_at_0(struct unit_test_state *uts)
{
const phys_addr_t ram = 0;
const phys_size_t ram_size = 0x20000000;
- struct lmb lmb;
+ struct lmb store;
+ struct alist *mem_lst, *used_lst;
long ret;
phys_addr_t a, b;
- lmb_init(&lmb);
+ ut_assertok(setup_lmb_test(uts, &store, &mem_lst, &used_lst));
- ret = lmb_add(&lmb, ram, ram_size);
+ ret = lmb_add(ram, ram_size);
ut_asserteq(ret, 0);
/* allocate nearly everything */
- a = lmb_alloc(&lmb, ram_size - 4, 1);
+ a = lmb_alloc(ram_size - 4, 1);
ut_asserteq(a, ram + 4);
- ASSERT_LMB(&lmb, ram, ram_size, 1, a, ram_size - 4,
+ ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1, a, ram_size - 4,
0, 0, 0, 0);
/* allocate the rest */
/* This should fail as the allocated address would be 0 */
- b = lmb_alloc(&lmb, 4, 1);
+ b = lmb_alloc(4, 1);
ut_asserteq(b, 0);
/* check that this was an error by checking lmb */
- ASSERT_LMB(&lmb, ram, ram_size, 1, a, ram_size - 4,
+ ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1, a, ram_size - 4,
0, 0, 0, 0);
/* check that this was an error by freeing b */
- ret = lmb_free(&lmb, b, 4);
+ ret = lmb_free(b, 4);
ut_asserteq(ret, -1);
- ASSERT_LMB(&lmb, ram, ram_size, 1, a, ram_size - 4,
+ ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1, a, ram_size - 4,
0, 0, 0, 0);
- ret = lmb_free(&lmb, a, ram_size - 4);
+ ret = lmb_free(a, ram_size - 4);
ut_asserteq(ret, 0);
- ASSERT_LMB(&lmb, ram, ram_size, 0, 0, 0, 0, 0, 0, 0);
+ ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 0, 0, 0, 0, 0, 0, 0);
+
+ lmb_pop(&store);
return 0;
}
@@ -424,45 +457,50 @@ static int lib_test_lmb_overlapping_reserve(struct unit_test_state *uts)
{
const phys_addr_t ram = 0x40000000;
const phys_size_t ram_size = 0x20000000;
- struct lmb lmb;
+ struct lmb store;
+ struct alist *mem_lst, *used_lst;
long ret;
- lmb_init(&lmb);
+ ut_assertok(setup_lmb_test(uts, &store, &mem_lst, &used_lst));
- ret = lmb_add(&lmb, ram, ram_size);
+ ret = lmb_add(ram, ram_size);
ut_asserteq(ret, 0);
- ret = lmb_reserve(&lmb, 0x40010000, 0x10000);
+ ret = lmb_reserve(0x40010000, 0x10000);
ut_asserteq(ret, 0);
- ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x10000,
+ ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1, 0x40010000, 0x10000,
0, 0, 0, 0);
- /* allocate overlapping region should fail */
- ret = lmb_reserve(&lmb, 0x40011000, 0x10000);
- ut_asserteq(ret, -1);
- ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x10000,
+
+ /* allocate overlapping region should return the coalesced count */
+ ret = lmb_reserve(0x40011000, 0x10000);
+ ut_asserteq(ret, 1);
+ ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1, 0x40010000, 0x11000,
0, 0, 0, 0);
/* allocate 3nd region */
- ret = lmb_reserve(&lmb, 0x40030000, 0x10000);
+ ret = lmb_reserve(0x40030000, 0x10000);
ut_asserteq(ret, 0);
- ASSERT_LMB(&lmb, ram, ram_size, 2, 0x40010000, 0x10000,
+ ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 2, 0x40010000, 0x11000,
0x40030000, 0x10000, 0, 0);
/* allocate 2nd region , This should coalesced all region into one */
- ret = lmb_reserve(&lmb, 0x40020000, 0x10000);
+ ret = lmb_reserve(0x40020000, 0x10000);
ut_assert(ret >= 0);
- ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x30000,
+ ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1, 0x40010000, 0x30000,
0, 0, 0, 0);
/* allocate 2nd region, which should be added as first region */
- ret = lmb_reserve(&lmb, 0x40000000, 0x8000);
+ ret = lmb_reserve(0x40000000, 0x8000);
ut_assert(ret >= 0);
- ASSERT_LMB(&lmb, ram, ram_size, 2, 0x40000000, 0x8000,
+ ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 2, 0x40000000, 0x8000,
0x40010000, 0x30000, 0, 0);
/* allocate 3rd region, coalesce with first and overlap with second */
- ret = lmb_reserve(&lmb, 0x40008000, 0x10000);
+ ret = lmb_reserve(0x40008000, 0x10000);
ut_assert(ret >= 0);
- ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40000000, 0x40000,
+ ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1, 0x40000000, 0x40000,
0, 0, 0, 0);
+
+ lmb_pop(&store);
+
return 0;
}
LIB_TEST(lib_test_lmb_overlapping_reserve, 0);
@@ -473,112 +511,116 @@ LIB_TEST(lib_test_lmb_overlapping_reserve, 0);
*/
static int test_alloc_addr(struct unit_test_state *uts, const phys_addr_t ram)
{
+ struct lmb store;
+ struct alist *mem_lst, *used_lst;
const phys_size_t ram_size = 0x20000000;
const phys_addr_t ram_end = ram + ram_size;
const phys_size_t alloc_addr_a = ram + 0x8000000;
const phys_size_t alloc_addr_b = ram + 0x8000000 * 2;
const phys_size_t alloc_addr_c = ram + 0x8000000 * 3;
- struct lmb lmb;
long ret;
phys_addr_t a, b, c, d, e;
/* check for overflow */
ut_assert(ram_end == 0 || ram_end > ram);
- lmb_init(&lmb);
+ ut_assertok(setup_lmb_test(uts, &store, &mem_lst, &used_lst));
- ret = lmb_add(&lmb, ram, ram_size);
+ ret = lmb_add(ram, ram_size);
ut_asserteq(ret, 0);
/* reserve 3 blocks */
- ret = lmb_reserve(&lmb, alloc_addr_a, 0x10000);
+ ret = lmb_reserve(alloc_addr_a, 0x10000);
ut_asserteq(ret, 0);
- ret = lmb_reserve(&lmb, alloc_addr_b, 0x10000);
+ ret = lmb_reserve(alloc_addr_b, 0x10000);
ut_asserteq(ret, 0);
- ret = lmb_reserve(&lmb, alloc_addr_c, 0x10000);
+ ret = lmb_reserve(alloc_addr_c, 0x10000);
ut_asserteq(ret, 0);
- ASSERT_LMB(&lmb, ram, ram_size, 3, alloc_addr_a, 0x10000,
+ ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 3, alloc_addr_a, 0x10000,
alloc_addr_b, 0x10000, alloc_addr_c, 0x10000);
/* allocate blocks */
- a = lmb_alloc_addr(&lmb, ram, alloc_addr_a - ram);
+ a = lmb_alloc_addr(ram, alloc_addr_a - ram);
ut_asserteq(a, ram);
- ASSERT_LMB(&lmb, ram, ram_size, 3, ram, 0x8010000,
+ ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 3, ram, 0x8010000,
alloc_addr_b, 0x10000, alloc_addr_c, 0x10000);
- b = lmb_alloc_addr(&lmb, alloc_addr_a + 0x10000,
+ b = lmb_alloc_addr(alloc_addr_a + 0x10000,
alloc_addr_b - alloc_addr_a - 0x10000);
ut_asserteq(b, alloc_addr_a + 0x10000);
- ASSERT_LMB(&lmb, ram, ram_size, 2, ram, 0x10010000,
+ ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 2, ram, 0x10010000,
alloc_addr_c, 0x10000, 0, 0);
- c = lmb_alloc_addr(&lmb, alloc_addr_b + 0x10000,
+ c = lmb_alloc_addr(alloc_addr_b + 0x10000,
alloc_addr_c - alloc_addr_b - 0x10000);
ut_asserteq(c, alloc_addr_b + 0x10000);
- ASSERT_LMB(&lmb, ram, ram_size, 1, ram, 0x18010000,
+ ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1, ram, 0x18010000,
0, 0, 0, 0);
- d = lmb_alloc_addr(&lmb, alloc_addr_c + 0x10000,
+ d = lmb_alloc_addr(alloc_addr_c + 0x10000,
ram_end - alloc_addr_c - 0x10000);
ut_asserteq(d, alloc_addr_c + 0x10000);
- ASSERT_LMB(&lmb, ram, ram_size, 1, ram, ram_size,
+ ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1, ram, ram_size,
0, 0, 0, 0);
/* allocating anything else should fail */
- e = lmb_alloc(&lmb, 1, 1);
+ e = lmb_alloc(1, 1);
ut_asserteq(e, 0);
- ASSERT_LMB(&lmb, ram, ram_size, 1, ram, ram_size,
+ ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1, ram, ram_size,
0, 0, 0, 0);
- ret = lmb_free(&lmb, d, ram_end - alloc_addr_c - 0x10000);
+ ret = lmb_free(d, ram_end - alloc_addr_c - 0x10000);
ut_asserteq(ret, 0);
/* allocate at 3 points in free range */
- d = lmb_alloc_addr(&lmb, ram_end - 4, 4);
+ d = lmb_alloc_addr(ram_end - 4, 4);
ut_asserteq(d, ram_end - 4);
- ASSERT_LMB(&lmb, ram, ram_size, 2, ram, 0x18010000,
+ ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 2, ram, 0x18010000,
d, 4, 0, 0);
- ret = lmb_free(&lmb, d, 4);
+ ret = lmb_free(d, 4);
ut_asserteq(ret, 0);
- ASSERT_LMB(&lmb, ram, ram_size, 1, ram, 0x18010000,
+ ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1, ram, 0x18010000,
0, 0, 0, 0);
- d = lmb_alloc_addr(&lmb, ram_end - 128, 4);
+ d = lmb_alloc_addr(ram_end - 128, 4);
ut_asserteq(d, ram_end - 128);
- ASSERT_LMB(&lmb, ram, ram_size, 2, ram, 0x18010000,
+ ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 2, ram, 0x18010000,
d, 4, 0, 0);
- ret = lmb_free(&lmb, d, 4);
+ ret = lmb_free(d, 4);
ut_asserteq(ret, 0);
- ASSERT_LMB(&lmb, ram, ram_size, 1, ram, 0x18010000,
+ ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1, ram, 0x18010000,
0, 0, 0, 0);
- d = lmb_alloc_addr(&lmb, alloc_addr_c + 0x10000, 4);
+ d = lmb_alloc_addr(alloc_addr_c + 0x10000, 4);
ut_asserteq(d, alloc_addr_c + 0x10000);
- ASSERT_LMB(&lmb, ram, ram_size, 1, ram, 0x18010004,
+ ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1, ram, 0x18010004,
0, 0, 0, 0);
- ret = lmb_free(&lmb, d, 4);
+ ret = lmb_free(d, 4);
ut_asserteq(ret, 0);
- ASSERT_LMB(&lmb, ram, ram_size, 1, ram, 0x18010000,
+ ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1, ram, 0x18010000,
0, 0, 0, 0);
/* allocate at the bottom */
- ret = lmb_free(&lmb, a, alloc_addr_a - ram);
+ ret = lmb_free(a, alloc_addr_a - ram);
ut_asserteq(ret, 0);
- ASSERT_LMB(&lmb, ram, ram_size, 1, ram + 0x8000000, 0x10010000,
- 0, 0, 0, 0);
- d = lmb_alloc_addr(&lmb, ram, 4);
+ ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1, ram + 0x8000000,
+ 0x10010000, 0, 0, 0, 0);
+
+ d = lmb_alloc_addr(ram, 4);
ut_asserteq(d, ram);
- ASSERT_LMB(&lmb, ram, ram_size, 2, d, 4,
+ ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 2, d, 4,
ram + 0x8000000, 0x10010000, 0, 0);
/* check that allocating outside memory fails */
if (ram_end != 0) {
- ret = lmb_alloc_addr(&lmb, ram_end, 1);
+ ret = lmb_alloc_addr(ram_end, 1);
ut_asserteq(ret, 0);
}
if (ram != 0) {
- ret = lmb_alloc_addr(&lmb, ram - 1, 1);
+ ret = lmb_alloc_addr(ram - 1, 1);
ut_asserteq(ret, 0);
}
+ lmb_pop(&store);
+
return 0;
}
@@ -600,55 +642,57 @@ LIB_TEST(lib_test_lmb_alloc_addr, 0);
static int test_get_unreserved_size(struct unit_test_state *uts,
const phys_addr_t ram)
{
+ struct lmb store;
+ struct alist *mem_lst, *used_lst;
const phys_size_t ram_size = 0x20000000;
const phys_addr_t ram_end = ram + ram_size;
const phys_size_t alloc_addr_a = ram + 0x8000000;
const phys_size_t alloc_addr_b = ram + 0x8000000 * 2;
const phys_size_t alloc_addr_c = ram + 0x8000000 * 3;
- struct lmb lmb;
long ret;
phys_size_t s;
/* check for overflow */
ut_assert(ram_end == 0 || ram_end > ram);
+ ut_assertok(setup_lmb_test(uts, &store, &mem_lst, &used_lst));
- lmb_init(&lmb);
-
- ret = lmb_add(&lmb, ram, ram_size);
+ ret = lmb_add(ram, ram_size);
ut_asserteq(ret, 0);
/* reserve 3 blocks */
- ret = lmb_reserve(&lmb, alloc_addr_a, 0x10000);
+ ret = lmb_reserve(alloc_addr_a, 0x10000);
ut_asserteq(ret, 0);
- ret = lmb_reserve(&lmb, alloc_addr_b, 0x10000);
+ ret = lmb_reserve(alloc_addr_b, 0x10000);
ut_asserteq(ret, 0);
- ret = lmb_reserve(&lmb, alloc_addr_c, 0x10000);
+ ret = lmb_reserve(alloc_addr_c, 0x10000);
ut_asserteq(ret, 0);
- ASSERT_LMB(&lmb, ram, ram_size, 3, alloc_addr_a, 0x10000,
+ ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 3, alloc_addr_a, 0x10000,
alloc_addr_b, 0x10000, alloc_addr_c, 0x10000);
/* check addresses in between blocks */
- s = lmb_get_free_size(&lmb, ram);
+ s = lmb_get_free_size(ram);
ut_asserteq(s, alloc_addr_a - ram);
- s = lmb_get_free_size(&lmb, ram + 0x10000);
+ s = lmb_get_free_size(ram + 0x10000);
ut_asserteq(s, alloc_addr_a - ram - 0x10000);
- s = lmb_get_free_size(&lmb, alloc_addr_a - 4);
+ s = lmb_get_free_size(alloc_addr_a - 4);
ut_asserteq(s, 4);
- s = lmb_get_free_size(&lmb, alloc_addr_a + 0x10000);
+ s = lmb_get_free_size(alloc_addr_a + 0x10000);
ut_asserteq(s, alloc_addr_b - alloc_addr_a - 0x10000);
- s = lmb_get_free_size(&lmb, alloc_addr_a + 0x20000);
+ s = lmb_get_free_size(alloc_addr_a + 0x20000);
ut_asserteq(s, alloc_addr_b - alloc_addr_a - 0x20000);
- s = lmb_get_free_size(&lmb, alloc_addr_b - 4);
+ s = lmb_get_free_size(alloc_addr_b - 4);
ut_asserteq(s, 4);
- s = lmb_get_free_size(&lmb, alloc_addr_c + 0x10000);
+ s = lmb_get_free_size(alloc_addr_c + 0x10000);
ut_asserteq(s, ram_end - alloc_addr_c - 0x10000);
- s = lmb_get_free_size(&lmb, alloc_addr_c + 0x20000);
+ s = lmb_get_free_size(alloc_addr_c + 0x20000);
ut_asserteq(s, ram_end - alloc_addr_c - 0x20000);
- s = lmb_get_free_size(&lmb, ram_end - 4);
+ s = lmb_get_free_size(ram_end - 4);
ut_asserteq(s, 4);
+ lmb_pop(&store);
+
return 0;
}
@@ -666,158 +710,94 @@ static int lib_test_lmb_get_free_size(struct unit_test_state *uts)
}
LIB_TEST(lib_test_lmb_get_free_size, 0);
-#ifdef CONFIG_LMB_USE_MAX_REGIONS
-static int lib_test_lmb_max_regions(struct unit_test_state *uts)
-{
- const phys_addr_t ram = 0x00000000;
- /*
- * All of 32bit memory space will contain regions for this test, so
- * we need to scale ram_size (which in this case is the size of the lmb
- * region) to match.
- */
- const phys_size_t ram_size = ((0xFFFFFFFF >> CONFIG_LMB_MAX_REGIONS)
- + 1) * CONFIG_LMB_MAX_REGIONS;
- const phys_size_t blk_size = 0x10000;
- phys_addr_t offset;
- struct lmb lmb;
- int ret, i;
-
- lmb_init(&lmb);
-
- ut_asserteq(lmb.memory.cnt, 0);
- ut_asserteq(lmb.memory.max, CONFIG_LMB_MAX_REGIONS);
- ut_asserteq(lmb.reserved.cnt, 0);
- ut_asserteq(lmb.reserved.max, CONFIG_LMB_MAX_REGIONS);
-
- /* Add CONFIG_LMB_MAX_REGIONS memory regions */
- for (i = 0; i < CONFIG_LMB_MAX_REGIONS; i++) {
- offset = ram + 2 * i * ram_size;
- ret = lmb_add(&lmb, offset, ram_size);
- ut_asserteq(ret, 0);
- }
- ut_asserteq(lmb.memory.cnt, CONFIG_LMB_MAX_REGIONS);
- ut_asserteq(lmb.reserved.cnt, 0);
-
- /* error for the (CONFIG_LMB_MAX_REGIONS + 1) memory regions */
- offset = ram + 2 * (CONFIG_LMB_MAX_REGIONS + 1) * ram_size;
- ret = lmb_add(&lmb, offset, ram_size);
- ut_asserteq(ret, -1);
-
- ut_asserteq(lmb.memory.cnt, CONFIG_LMB_MAX_REGIONS);
- ut_asserteq(lmb.reserved.cnt, 0);
-
- /* reserve CONFIG_LMB_MAX_REGIONS regions */
- for (i = 0; i < CONFIG_LMB_MAX_REGIONS; i++) {
- offset = ram + 2 * i * blk_size;
- ret = lmb_reserve(&lmb, offset, blk_size);
- ut_asserteq(ret, 0);
- }
-
- ut_asserteq(lmb.memory.cnt, CONFIG_LMB_MAX_REGIONS);
- ut_asserteq(lmb.reserved.cnt, CONFIG_LMB_MAX_REGIONS);
-
- /* error for the 9th reserved blocks */
- offset = ram + 2 * (CONFIG_LMB_MAX_REGIONS + 1) * blk_size;
- ret = lmb_reserve(&lmb, offset, blk_size);
- ut_asserteq(ret, -1);
-
- ut_asserteq(lmb.memory.cnt, CONFIG_LMB_MAX_REGIONS);
- ut_asserteq(lmb.reserved.cnt, CONFIG_LMB_MAX_REGIONS);
-
- /* check each regions */
- for (i = 0; i < CONFIG_LMB_MAX_REGIONS; i++)
- ut_asserteq(lmb.memory.region[i].base, ram + 2 * i * ram_size);
-
- for (i = 0; i < CONFIG_LMB_MAX_REGIONS; i++)
- ut_asserteq(lmb.reserved.region[i].base, ram + 2 * i * blk_size);
-
- return 0;
-}
-LIB_TEST(lib_test_lmb_max_regions, 0);
-#endif
-
static int lib_test_lmb_flags(struct unit_test_state *uts)
{
+ struct lmb store;
+ struct lmb_region *mem, *used;
+ struct alist *mem_lst, *used_lst;
const phys_addr_t ram = 0x40000000;
const phys_size_t ram_size = 0x20000000;
- struct lmb lmb;
long ret;
- lmb_init(&lmb);
+ ut_assertok(setup_lmb_test(uts, &store, &mem_lst, &used_lst));
+ mem = mem_lst->data;
+ used = used_lst->data;
- ret = lmb_add(&lmb, ram, ram_size);
+ ret = lmb_add(ram, ram_size);
ut_asserteq(ret, 0);
/* reserve, same flag */
- ret = lmb_reserve_flags(&lmb, 0x40010000, 0x10000, LMB_NOMAP);
+ ret = lmb_reserve_flags(0x40010000, 0x10000, LMB_NOMAP);
ut_asserteq(ret, 0);
- ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x10000,
+ ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1, 0x40010000, 0x10000,
0, 0, 0, 0);
/* reserve again, same flag */
- ret = lmb_reserve_flags(&lmb, 0x40010000, 0x10000, LMB_NOMAP);
+ ret = lmb_reserve_flags(0x40010000, 0x10000, LMB_NOMAP);
ut_asserteq(ret, 0);
- ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x10000,
+ ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1, 0x40010000, 0x10000,
0, 0, 0, 0);
/* reserve again, new flag */
- ret = lmb_reserve_flags(&lmb, 0x40010000, 0x10000, LMB_NONE);
+ ret = lmb_reserve_flags(0x40010000, 0x10000, LMB_NONE);
ut_asserteq(ret, -1);
- ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x10000,
+ ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1, 0x40010000, 0x10000,
0, 0, 0, 0);
- ut_asserteq(lmb_is_nomap(&lmb.reserved.region[0]), 1);
+ ut_asserteq(lmb_is_nomap(&used[0]), 1);
/* merge after */
- ret = lmb_reserve_flags(&lmb, 0x40020000, 0x10000, LMB_NOMAP);
+ ret = lmb_reserve_flags(0x40020000, 0x10000, LMB_NOMAP);
ut_asserteq(ret, 1);
- ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40010000, 0x20000,
+ ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1, 0x40010000, 0x20000,
0, 0, 0, 0);
/* merge before */
- ret = lmb_reserve_flags(&lmb, 0x40000000, 0x10000, LMB_NOMAP);
+ ret = lmb_reserve_flags(0x40000000, 0x10000, LMB_NOMAP);
ut_asserteq(ret, 1);
- ASSERT_LMB(&lmb, ram, ram_size, 1, 0x40000000, 0x30000,
+ ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 1, 0x40000000, 0x30000,
0, 0, 0, 0);
- ut_asserteq(lmb_is_nomap(&lmb.reserved.region[0]), 1);
+ ut_asserteq(lmb_is_nomap(&used[0]), 1);
- ret = lmb_reserve_flags(&lmb, 0x40030000, 0x10000, LMB_NONE);
+ ret = lmb_reserve_flags(0x40030000, 0x10000, LMB_NONE);
ut_asserteq(ret, 0);
- ASSERT_LMB(&lmb, ram, ram_size, 2, 0x40000000, 0x30000,
+ ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 2, 0x40000000, 0x30000,
0x40030000, 0x10000, 0, 0);
- ut_asserteq(lmb_is_nomap(&lmb.reserved.region[0]), 1);
- ut_asserteq(lmb_is_nomap(&lmb.reserved.region[1]), 0);
+ ut_asserteq(lmb_is_nomap(&used[0]), 1);
+ ut_asserteq(lmb_is_nomap(&used[1]), 0);
/* test that old API use LMB_NONE */
- ret = lmb_reserve(&lmb, 0x40040000, 0x10000);
+ ret = lmb_reserve(0x40040000, 0x10000);
ut_asserteq(ret, 1);
- ASSERT_LMB(&lmb, ram, ram_size, 2, 0x40000000, 0x30000,
+ ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 2, 0x40000000, 0x30000,
0x40030000, 0x20000, 0, 0);
- ut_asserteq(lmb_is_nomap(&lmb.reserved.region[0]), 1);
- ut_asserteq(lmb_is_nomap(&lmb.reserved.region[1]), 0);
+ ut_asserteq(lmb_is_nomap(&used[0]), 1);
+ ut_asserteq(lmb_is_nomap(&used[1]), 0);
- ret = lmb_reserve_flags(&lmb, 0x40070000, 0x10000, LMB_NOMAP);
+ ret = lmb_reserve_flags(0x40070000, 0x10000, LMB_NOMAP);
ut_asserteq(ret, 0);
- ASSERT_LMB(&lmb, ram, ram_size, 3, 0x40000000, 0x30000,
+ ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 3, 0x40000000, 0x30000,
0x40030000, 0x20000, 0x40070000, 0x10000);
- ret = lmb_reserve_flags(&lmb, 0x40050000, 0x10000, LMB_NOMAP);
+ ret = lmb_reserve_flags(0x40050000, 0x10000, LMB_NOMAP);
ut_asserteq(ret, 0);
- ASSERT_LMB(&lmb, ram, ram_size, 4, 0x40000000, 0x30000,
+ ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 4, 0x40000000, 0x30000,
0x40030000, 0x20000, 0x40050000, 0x10000);
/* merge with 2 adjacent regions */
- ret = lmb_reserve_flags(&lmb, 0x40060000, 0x10000, LMB_NOMAP);
+ ret = lmb_reserve_flags(0x40060000, 0x10000, LMB_NOMAP);
ut_asserteq(ret, 2);
- ASSERT_LMB(&lmb, ram, ram_size, 3, 0x40000000, 0x30000,
+ ASSERT_LMB(mem_lst, used_lst, ram, ram_size, 3, 0x40000000, 0x30000,
0x40030000, 0x20000, 0x40050000, 0x30000);
- ut_asserteq(lmb_is_nomap(&lmb.reserved.region[0]), 1);
- ut_asserteq(lmb_is_nomap(&lmb.reserved.region[1]), 0);
- ut_asserteq(lmb_is_nomap(&lmb.reserved.region[2]), 1);
+ ut_asserteq(lmb_is_nomap(&used[0]), 1);
+ ut_asserteq(lmb_is_nomap(&used[1]), 0);
+ ut_asserteq(lmb_is_nomap(&used[2]), 1);
+
+ lmb_pop(&store);
return 0;
}