diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/efi_loader/efi_bootmgr.c | 25 | ||||
-rw-r--r-- | lib/efi_loader/efi_helper.c | 85 | ||||
-rw-r--r-- | lib/efi_loader/efi_memory.c | 37 | ||||
-rw-r--r-- | lib/efi_loader/elf_efi.ldsi | 6 | ||||
-rw-r--r-- | lib/lmb.c | 146 | ||||
-rw-r--r-- | lib/rsa/rsa-verify.c | 5 |
6 files changed, 201 insertions, 103 deletions
diff --git a/lib/efi_loader/efi_bootmgr.c b/lib/efi_loader/efi_bootmgr.c index c6124c590d9..f9534ef85ed 100644 --- a/lib/efi_loader/efi_bootmgr.c +++ b/lib/efi_loader/efi_bootmgr.c @@ -18,6 +18,8 @@ #include <efi_loader.h> #include <efi_variable.h> #include <asm/unaligned.h> +#include <linux/kernel.h> +#include <linux/sizes.h> static const struct efi_boot_services *bs; static const struct efi_runtime_services *rs; @@ -348,6 +350,7 @@ static efi_status_t prepare_loaded_image(u16 *label, ulong addr, ulong size, struct efi_device_path **dp, struct udevice **blk) { + u64 pages; efi_status_t ret; struct udevice *ramdisk_blk; @@ -362,13 +365,18 @@ static efi_status_t prepare_loaded_image(u16 *label, ulong addr, ulong size, } /* - * TODO: expose the ramdisk to OS. - * Need to pass the ramdisk information by the architecture-specific - * methods such as 'pmem' device-tree node. + * Linux supports 'pmem' which allows OS installers to find, reclaim + * the mounted images and continue the installation since the contents + * of the pmem region are treated as local media. + * + * The memory regions used for it needs to be carved out of the EFI + * memory map. */ - ret = efi_add_memory_map(addr, size, EFI_RESERVED_MEMORY_TYPE); + pages = efi_size_in_pages(size + (addr & EFI_PAGE_MASK)); + ret = efi_update_memory_map(addr, pages, EFI_CONVENTIONAL_MEMORY, + false, true); if (ret != EFI_SUCCESS) { - log_err("Memory reservation failed\n"); + log_err("Failed to reserve memory\n"); goto err; } @@ -490,6 +498,13 @@ static efi_status_t try_load_from_uri_path(struct efi_device_path_uri *uridp, ret = EFI_INVALID_PARAMETER; goto err; } + /* + * Depending on the kernel configuration, pmem memory areas must be + * page aligned or 2MiB aligned. PowerPC is an exception here and + * requires 16MiB alignment, but since we don't have EFI support for + * it, limit the alignment to 2MiB. + */ + image_size = ALIGN(image_size, SZ_2M); /* * If the file extension is ".iso" or ".img", mount it and try to load diff --git a/lib/efi_loader/efi_helper.c b/lib/efi_loader/efi_helper.c index 04b2efc4a3b..8c32059edda 100644 --- a/lib/efi_loader/efi_helper.c +++ b/lib/efi_loader/efi_helper.c @@ -5,6 +5,7 @@ #define LOG_CATEGORY LOGC_EFI +#include <blkmap.h> #include <bootm.h> #include <env.h> #include <image.h> @@ -454,22 +455,29 @@ efi_status_t efi_env_set_load_options(efi_handle_t handle, */ static efi_status_t copy_fdt(void **fdtp) { - unsigned long fdt_ram_start = -1L, fdt_pages; efi_status_t ret = 0; void *fdt, *new_fdt; - u64 new_fdt_addr; - uint fdt_size; - int i; + static u64 new_fdt_addr; + static efi_uintn_t fdt_pages; + ulong fdt_size; - for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) { - u64 ram_start = gd->bd->bi_dram[i].start; - u64 ram_size = gd->bd->bi_dram[i].size; + /* + * Remove the configuration table that might already be + * installed, ignoring EFI_NOT_FOUND if no device-tree + * is installed + */ + efi_install_configuration_table(&efi_guid_fdt, NULL); - if (!ram_size) - continue; + if (new_fdt_addr) { + log_debug("%s: Found allocated memory at %#llx, with %#zx pages\n", + __func__, new_fdt_addr, fdt_pages); - if (ram_start < fdt_ram_start) - fdt_ram_start = ram_start; + ret = efi_free_pages(new_fdt_addr, fdt_pages); + if (ret != EFI_SUCCESS) + log_err("Unable to free up existing FDT memory region\n"); + + new_fdt_addr = 0; + fdt_pages = 0; } /* @@ -485,15 +493,18 @@ static efi_status_t copy_fdt(void **fdtp) &new_fdt_addr); if (ret != EFI_SUCCESS) { log_err("Failed to reserve space for FDT\n"); - goto done; + return ret; } + log_debug("%s: Allocated memory at %#llx, with %#zx pages\n", + __func__, new_fdt_addr, fdt_pages); + new_fdt = (void *)(uintptr_t)new_fdt_addr; memcpy(new_fdt, fdt, fdt_totalsize(fdt)); fdt_set_totalsize(new_fdt, fdt_size); - *fdtp = (void *)(uintptr_t)new_fdt_addr; -done: - return ret; + *fdtp = new_fdt; + + return EFI_SUCCESS; } /** @@ -546,9 +557,6 @@ efi_status_t efi_install_fdt(void *fdt) const char *fdt_opt; uintptr_t fdt_addr; - /* Look for device tree that is already installed */ - if (efi_get_configuration_table(&efi_guid_fdt)) - return EFI_SUCCESS; /* Check if there is a hardware device tree */ fdt_opt = env_get("fdt_addr"); /* Use our own device tree as fallback */ @@ -680,3 +688,44 @@ out: return ret; } + +/** + * pmem_node_efi_memmap_setup() - Add pmem node and tweak EFI memmap + * @fdt: The devicetree to which pmem node is added + * @addr: start address of the pmem node + * @size: size of the memory of the pmem node + * + * The function adds the pmem node to the device-tree along with removing + * the corresponding region from the EFI memory map. Used primarily to + * pass the information of a RAM based ISO image to the OS. + * + * Return: 0 on success, -ve value on error + */ +static int pmem_node_efi_memmap_setup(void *fdt, u64 addr, u64 size) +{ + int ret; + u64 pages; + efi_status_t status; + + ret = fdt_fixup_pmem_region(fdt, addr, size); + if (ret) { + log_err("Failed to setup pmem node for addr %#llx, size %#llx, err %d\n", + addr, size, ret); + return ret; + } + + /* Remove the pmem region from the EFI memory map */ + pages = efi_size_in_pages(size + (addr & EFI_PAGE_MASK)); + status = efi_update_memory_map(addr, pages, EFI_CONVENTIONAL_MEMORY, + false, true); + if (status != EFI_SUCCESS) + return -1; + + return 0; +} + +int fdt_efi_pmem_setup(void *fdt) +{ + return blkmap_get_preserved_pmem_slices(pmem_node_efi_memmap_setup, + fdt); +} diff --git a/lib/efi_loader/efi_memory.c b/lib/efi_loader/efi_memory.c index 6d00b186250..0abb1f6159a 100644 --- a/lib/efi_loader/efi_memory.c +++ b/lib/efi_loader/efi_memory.c @@ -258,7 +258,7 @@ static s64 efi_mem_carve_out(struct efi_mem_list *map, } /** - * efi_add_memory_map_pg() - add pages to the memory map + * efi_update_memory_map() - update the memory map by adding/removing pages * * @start: start address, must be a multiple of * EFI_PAGE_SIZE @@ -266,12 +266,11 @@ static s64 efi_mem_carve_out(struct efi_mem_list *map, * @memory_type: type of memory added * @overlap_conventional: region may only overlap free(conventional) * memory + * @remove: remove memory map * Return: status code */ -static -efi_status_t efi_add_memory_map_pg(u64 start, u64 pages, - int memory_type, - bool overlap_conventional) +efi_status_t efi_update_memory_map(u64 start, u64 pages, int memory_type, + bool overlap_conventional, bool remove) { struct efi_mem_list *lmem; struct efi_mem_list *newlist; @@ -279,9 +278,9 @@ efi_status_t efi_add_memory_map_pg(u64 start, u64 pages, uint64_t carved_pages = 0; struct efi_event *evt; - EFI_PRINT("%s: 0x%llx 0x%llx %d %s\n", __func__, + EFI_PRINT("%s: 0x%llx 0x%llx %d %s %s\n", __func__, start, pages, memory_type, overlap_conventional ? - "yes" : "no"); + "yes" : "no", remove ? "remove" : "add"); if (memory_type >= EFI_MAX_MEMORY_TYPE) return EFI_INVALID_PARAMETER; @@ -364,7 +363,10 @@ efi_status_t efi_add_memory_map_pg(u64 start, u64 pages, } /* Add our new map */ - list_add_tail(&newlist->link, &efi_mem); + if (!remove) + list_add_tail(&newlist->link, &efi_mem); + else + free(newlist); /* And make sure memory is listed in descending order */ efi_mem_sort(); @@ -401,7 +403,7 @@ efi_status_t efi_add_memory_map(u64 start, u64 size, int memory_type) pages = efi_size_in_pages(size + (start & EFI_PAGE_MASK)); start &= ~EFI_PAGE_MASK; - return efi_add_memory_map_pg(start, pages, memory_type, false); + return efi_update_memory_map(start, pages, memory_type, false, false); } /** @@ -491,8 +493,7 @@ efi_status_t efi_allocate_pages(enum efi_allocate_type type, return EFI_NOT_FOUND; addr = map_to_sysmem((void *)(uintptr_t)*memory); - addr = (u64)lmb_alloc_addr(addr, len, flags); - if (!addr) + if (lmb_alloc_addr(addr, len, flags)) return EFI_NOT_FOUND; break; default: @@ -502,7 +503,7 @@ efi_status_t efi_allocate_pages(enum efi_allocate_type type, efi_addr = (u64)(uintptr_t)map_sysmem(addr, 0); /* Reserve that map in our memory maps */ - ret = efi_add_memory_map_pg(efi_addr, pages, memory_type, true); + ret = efi_update_memory_map(efi_addr, pages, memory_type, true, false); if (ret != EFI_SUCCESS) { /* Map would overlap, bail out */ lmb_free_flags(addr, (u64)pages << EFI_PAGE_SHIFT, flags); @@ -823,8 +824,8 @@ static void add_u_boot_and_runtime(void) uboot_stack_size) & ~EFI_PAGE_MASK; uboot_pages = ((uintptr_t)map_sysmem(gd->ram_top - 1, 0) - uboot_start + EFI_PAGE_MASK) >> EFI_PAGE_SHIFT; - efi_add_memory_map_pg(uboot_start, uboot_pages, EFI_BOOT_SERVICES_CODE, - false); + efi_update_memory_map(uboot_start, uboot_pages, EFI_BOOT_SERVICES_CODE, + false, false); #if defined(__aarch64__) /* * Runtime Services must be 64KiB aligned according to the @@ -842,8 +843,8 @@ static void add_u_boot_and_runtime(void) runtime_end = (uintptr_t)__efi_runtime_stop; runtime_end = (runtime_end + runtime_mask) & ~runtime_mask; runtime_pages = (runtime_end - runtime_start) >> EFI_PAGE_SHIFT; - efi_add_memory_map_pg(runtime_start, runtime_pages, - EFI_RUNTIME_SERVICES_CODE, false); + efi_update_memory_map(runtime_start, runtime_pages, + EFI_RUNTIME_SERVICES_CODE, false, false); } int efi_memory_init(void) @@ -878,11 +879,11 @@ int efi_map_update_notify(phys_addr_t addr, phys_size_t size, pages = efi_size_in_pages(size + (efi_addr & EFI_PAGE_MASK)); efi_addr &= ~EFI_PAGE_MASK; - status = efi_add_memory_map_pg(efi_addr, pages, + status = efi_update_memory_map(efi_addr, pages, op == LMB_MAP_OP_RESERVE ? EFI_BOOT_SERVICES_DATA : EFI_CONVENTIONAL_MEMORY, - false); + false, false); if (status != EFI_SUCCESS) { log_err("LMB Map notify failure %lu\n", status & ~EFI_ERROR_MASK); diff --git a/lib/efi_loader/elf_efi.ldsi b/lib/efi_loader/elf_efi.ldsi index 190a88fb69e..4fa5ca43872 100644 --- a/lib/efi_loader/elf_efi.ldsi +++ b/lib/efi_loader/elf_efi.ldsi @@ -21,10 +21,10 @@ SECTIONS *(.gnu.linkonce.t.*) *(.srodata) *(.rodata*) - . = ALIGN(16); - *(.dynamic); - . = ALIGN(512); } + . = ALIGN(16); + .dynamic : { *(.dynamic) } + . = ALIGN(512); .rela.dyn : { *(.rela.dyn) } .rela.plt : { *(.rela.plt) } .rela.got : { *(.rela.got) } diff --git a/lib/lmb.c b/lib/lmb.c index 93fc1bea07c..bb6f232f6bc 100644 --- a/lib/lmb.c +++ b/lib/lmb.c @@ -23,6 +23,9 @@ DECLARE_GLOBAL_DATA_PTR; +#define LMB_RGN_OVERLAP 1 +#define LMB_RGN_ADJACENT 2 + /* * The following low level LMB functions must not access the global LMB memory * map since they are also used to manage IOVA memory maps in iommu drivers like @@ -49,8 +52,22 @@ static long lmb_addrs_adjacent(phys_addr_t base1, phys_size_t size1, return 0; } -static long lmb_regions_overlap(struct alist *lmb_rgn_lst, unsigned long r1, - unsigned long r2) +/** + * lmb_regions_check() - Check if the regions overlap, or are adjacent + * @lmb_rgn_lst: List of LMB regions + * @r1: First region to check + * @r2: Second region to check + * + * Check if the two regions with matching flags, r1 and r2 are + * adjacent to each other, or if they overlap. + * + * Return: + * * %LMB_RGN_OVERLAP - Regions overlap + * * %LMB_RGN_ADJACENT - Regions adjacent to each other + * * 0 - Neither of the above, or flags mismatch + */ +static long lmb_regions_check(struct alist *lmb_rgn_lst, unsigned long r1, + unsigned long r2) { struct lmb_region *rgn = lmb_rgn_lst->data; phys_addr_t base1 = rgn[r1].base; @@ -58,19 +75,15 @@ static long lmb_regions_overlap(struct alist *lmb_rgn_lst, unsigned long r1, phys_addr_t base2 = rgn[r2].base; phys_size_t size2 = rgn[r2].size; - return lmb_addrs_overlap(base1, size1, base2, size2); -} + if (rgn[r1].flags != rgn[r2].flags) + return 0; -static long lmb_regions_adjacent(struct alist *lmb_rgn_lst, unsigned long r1, - unsigned long r2) -{ - struct lmb_region *rgn = lmb_rgn_lst->data; - phys_addr_t base1 = rgn[r1].base; - phys_size_t size1 = rgn[r1].size; - phys_addr_t base2 = rgn[r2].base; - phys_size_t size2 = rgn[r2].size; + if (lmb_addrs_overlap(base1, size1, base2, size2)) + return LMB_RGN_OVERLAP; + else if (lmb_addrs_adjacent(base1, size1, base2, size2)) + return LMB_RGN_ADJACENT; - return lmb_addrs_adjacent(base1, size1, base2, size2); + return 0; } static void lmb_remove_region(struct alist *lmb_rgn_lst, unsigned long r) @@ -96,25 +109,6 @@ static void lmb_coalesce_regions(struct alist *lmb_rgn_lst, unsigned long r1, lmb_remove_region(lmb_rgn_lst, r2); } -/*Assumption : base addr of region 1 < base addr of region 2*/ -static void lmb_fix_over_lap_regions(struct alist *lmb_rgn_lst, - unsigned long r1, unsigned long r2) -{ - struct lmb_region *rgn = lmb_rgn_lst->data; - - phys_addr_t base1 = rgn[r1].base; - phys_size_t size1 = rgn[r1].size; - phys_addr_t base2 = rgn[r2].base; - phys_size_t size2 = rgn[r2].size; - - if (base1 + size1 > base2 + size2) { - printf("This will not be a case any time\n"); - return; - } - rgn[r1].size = base2 + size2 - base1; - lmb_remove_region(lmb_rgn_lst, r2); -} - static long lmb_resize_regions(struct alist *lmb_rgn_lst, unsigned long idx_start, phys_addr_t base, phys_size_t size) @@ -209,14 +203,11 @@ static long lmb_add_region_flags(struct alist *lmb_rgn_lst, phys_addr_t base, break; } else if (ret < 0) { if (flags != rgnflags) - break; + continue; rgn[i].size += size; coalesced++; break; } else if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) { - if (flags != LMB_NONE) - return -EEXIST; - ret = lmb_resize_regions(lmb_rgn_lst, i, base, size); if (ret < 0) return -1; @@ -229,16 +220,21 @@ static long lmb_add_region_flags(struct alist *lmb_rgn_lst, phys_addr_t base, } if (lmb_rgn_lst->count && i < lmb_rgn_lst->count - 1) { - rgn = lmb_rgn_lst->data; - if (rgn[i].flags == rgn[i + 1].flags) { - if (lmb_regions_adjacent(lmb_rgn_lst, i, i + 1)) { - lmb_coalesce_regions(lmb_rgn_lst, i, i + 1); - coalesced++; - } else if (lmb_regions_overlap(lmb_rgn_lst, i, i + 1)) { - /* fix overlapping area */ - lmb_fix_over_lap_regions(lmb_rgn_lst, i, i + 1); - coalesced++; - } + ret = lmb_regions_check(lmb_rgn_lst, i, i + 1); + if (ret == LMB_RGN_ADJACENT) { + lmb_coalesce_regions(lmb_rgn_lst, i, i + 1); + coalesced++; + } else if (ret == LMB_RGN_OVERLAP) { + /* fix overlapping areas */ + phys_addr_t rgnbase = rgn[i].base; + phys_size_t rgnsize = rgn[i].size; + + ret = lmb_resize_regions(lmb_rgn_lst, i, + rgnbase, rgnsize); + if (ret < 0) + return -1; + + coalesced++; } } @@ -561,6 +557,39 @@ static __maybe_unused void lmb_reserve_common_spl(void) } } +/** + * lmb_can_reserve_region() - check if the region can be reserved + * @base: base address of region to be reserved + * @size: size of region to be reserved + * @flags: flag of the region to be reserved + * + * Go through all the reserved regions and ensure that the requested + * region does not overlap with any existing regions. An overlap is + * allowed only when the flag of the request region and the existing + * region is LMB_NONE. + * + * Return: true if region can be reserved, false otherwise + */ +static bool lmb_can_reserve_region(phys_addr_t base, phys_size_t size, + u32 flags) +{ + uint i; + struct lmb_region *lmb_reserved = lmb.used_mem.data; + + for (i = 0; i < lmb.used_mem.count; i++) { + u32 rgnflags = lmb_reserved[i].flags; + phys_addr_t rgnbase = lmb_reserved[i].base; + phys_size_t rgnsize = lmb_reserved[i].size; + + if (lmb_addrs_overlap(base, size, rgnbase, rgnsize)) { + if (flags != LMB_NONE || flags != rgnflags) + return false; + } + } + + return true; +} + void lmb_add_memory(void) { int i; @@ -633,6 +662,9 @@ long lmb_reserve(phys_addr_t base, phys_size_t size, u32 flags) long ret = 0; struct alist *lmb_rgn_lst = &lmb.used_mem; + if (!lmb_can_reserve_region(base, size, flags)) + return -EEXIST; + ret = lmb_add_region_flags(lmb_rgn_lst, base, size, flags); if (ret) return ret; @@ -692,29 +724,25 @@ static phys_addr_t _lmb_alloc_base(phys_size_t size, ulong align, base = ALIGN_DOWN(res_base - size, align); } } + + log_debug("%s: Failed to allocate 0x%lx bytes below 0x%lx\n", + __func__, (ulong)size, (ulong)max_addr); + return 0; } phys_addr_t lmb_alloc(phys_size_t size, ulong align) { - return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE, LMB_NONE); + return _lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE, LMB_NONE); } phys_addr_t lmb_alloc_base(phys_size_t size, ulong align, phys_addr_t max_addr, uint flags) { - phys_addr_t alloc; - - alloc = _lmb_alloc_base(size, align, max_addr, flags); - - if (alloc == 0) - printf("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n", - (ulong)size, (ulong)max_addr); - - return alloc; + return _lmb_alloc_base(size, align, max_addr, flags); } -phys_addr_t lmb_alloc_addr(phys_addr_t base, phys_size_t size, u32 flags) +int lmb_alloc_addr(phys_addr_t base, phys_size_t size, u32 flags) { long rgn; struct lmb_region *lmb_memory = lmb.available_mem.data; @@ -731,11 +759,11 @@ phys_addr_t lmb_alloc_addr(phys_addr_t base, phys_size_t size, u32 flags) base + size - 1, 1)) { /* ok, reserve the memory */ if (!lmb_reserve(base, size, flags)) - return base; + return 0; } } - return 0; + return -1; } /* Return number of bytes from a given address that are free */ diff --git a/lib/rsa/rsa-verify.c b/lib/rsa/rsa-verify.c index b74aaf86e6d..4a0418a75f1 100644 --- a/lib/rsa/rsa-verify.c +++ b/lib/rsa/rsa-verify.c @@ -449,6 +449,11 @@ static int rsa_verify_with_keynode(struct image_sign_info *info, } algo = fdt_getprop(blob, node, "algo", NULL); + if (!algo) { + debug("%s: Missing 'algo' property\n", __func__); + return -EFAULT; + } + if (strcmp(info->name, algo)) { debug("%s: Wrong algo: have %s, expected %s\n", __func__, info->name, algo); |