summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/cpus/aarch64/cortex_a76.S29
-rw-r--r--lib/el3_runtime/aarch64/context.S2
-rw-r--r--lib/romlib/Makefile15
-rwxr-xr-xlib/romlib/genwrappers.sh6
-rw-r--r--lib/xlat_tables_v2/xlat_tables_core.c328
-rw-r--r--lib/xlat_tables_v2/xlat_tables_utils.c166
6 files changed, 179 insertions, 367 deletions
diff --git a/lib/cpus/aarch64/cortex_a76.S b/lib/cpus/aarch64/cortex_a76.S
index ac513432..e544018c 100644
--- a/lib/cpus/aarch64/cortex_a76.S
+++ b/lib/cpus/aarch64/cortex_a76.S
@@ -13,20 +13,17 @@
#include <plat_macros.S>
#include <services/arm_arch_svc.h>
-#if !DYNAMIC_WORKAROUND_CVE_2018_3639
-#error Cortex A76 requires DYNAMIC_WORKAROUND_CVE_2018_3639=1
-#endif
-
#define ESR_EL3_A64_SMC0 0x5e000000
#define ESR_EL3_A32_SMC0 0x4e000000
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
/*
* This macro applies the mitigation for CVE-2018-3639.
- * It implements a fash path where `SMCCC_ARCH_WORKAROUND_2`
+ * It implements a fast path where `SMCCC_ARCH_WORKAROUND_2`
* SMC calls from a lower EL running in AArch32 or AArch64
* will go through the fast and return early.
*
- * The macro saves x2-x3 to the context. In the fast path
+ * The macro saves x2-x3 to the context. In the fast path
* x0-x3 registers do not need to be restored as the calling
* context will have saved them.
*/
@@ -63,7 +60,7 @@
* When the calling context wants mitigation disabled,
* we program the mitigation disable function in the
* CPU context, which gets invoked on subsequent exits from
- * EL3 via the `el3_exit` function. Otherwise NULL is
+ * EL3 via the `el3_exit` function. Otherwise NULL is
* programmed in the CPU context, which results in caller's
* inheriting the EL3 mitigation state (enabled) on subsequent
* `el3_exit`.
@@ -82,7 +79,7 @@
.endif
1:
/*
- * Always enable v4 mitigation during EL3 execution. This is not
+ * Always enable v4 mitigation during EL3 execution. This is not
* required for the fast path above because it does not perform any
* memory loads.
*/
@@ -188,6 +185,7 @@ vector_entry cortex_a76_serror_aarch32
apply_cve_2018_3639_wa _is_sync_exception=0 _esr_el3_val=ESR_EL3_A32_SMC0
b serror_aarch32
end_vector_entry cortex_a76_serror_aarch32
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639 */
/* --------------------------------------------------
* Errata Workaround for Cortex A76 Errata #1073348.
@@ -319,9 +317,13 @@ func cortex_a76_reset_func
/* If the PE implements SSBS, we don't need the dynamic workaround */
mrs x0, id_aa64pfr1_el1
lsr x0, x0, #ID_AA64PFR1_EL1_SSBS_SHIFT
- and x0, x0, #ID_AA64PFR1_EL1_SSBS_MASK
+ and x0, x0, #ID_AA64PFR1_EL1_SSBS_MASK
+#if !DYNAMIC_WORKAROUND_CVE_2018_3639 && ENABLE_ASSERTIONS
+ cmp x0, 0
+ ASM_ASSERT(ne)
+#endif
+#if DYNAMIC_WORKAROUND_CVE_2018_3639
cbnz x0, 1f
-
mrs x0, CORTEX_A76_CPUACTLR2_EL1
orr x0, x0, #CORTEX_A76_CPUACTLR2_EL1_DISABLE_LOAD_PASS_STORE
msr CORTEX_A76_CPUACTLR2_EL1, x0
@@ -330,16 +332,17 @@ func cortex_a76_reset_func
#ifdef IMAGE_BL31
/*
* The Cortex-A76 generic vectors are overwritten to use the vectors
- * defined above. This is required in order to apply mitigation
+ * defined above. This is required in order to apply mitigation
* against CVE-2018-3639 on exception entry from lower ELs.
*/
adr x0, cortex_a76_wa_cve_2018_3639_a76_vbar
msr vbar_el3, x0
isb
-#endif
+#endif /* IMAGE_BL31 */
1:
-#endif
+#endif /* DYNAMIC_WORKAROUND_CVE_2018_3639 */
+#endif /* WORKAROUND_CVE_2018_3639 */
#if ERRATA_DSU_936184
bl errata_dsu_936184_wa
diff --git a/lib/el3_runtime/aarch64/context.S b/lib/el3_runtime/aarch64/context.S
index 4371cb2b..e6ab19bc 100644
--- a/lib/el3_runtime/aarch64/context.S
+++ b/lib/el3_runtime/aarch64/context.S
@@ -356,7 +356,7 @@ func pauth_context_restore
msr APIAKeyLo_EL1, x9
msr APIAKeyHi_EL1, x10
- ldp x9, x10, [x11, #CTX_PACIAKEY_LO]
+ ldp x9, x10, [x11, #CTX_PACIBKEY_LO]
msr APIBKeyLo_EL1, x9
msr APIBKeyHi_EL1, x10
diff --git a/lib/romlib/Makefile b/lib/romlib/Makefile
index 0e5d4475..12fdfa8e 100644
--- a/lib/romlib/Makefile
+++ b/lib/romlib/Makefile
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2018-2019, ARM Limited and Contributors. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
@@ -58,18 +58,15 @@ $(WRAPPER_DIR)/jmpvar.s: $(BUILD_DIR)/romlib.elf
@echo " VAR $@"
$(Q)./genvar.sh -o $@ $(BUILD_DIR)/romlib.elf
-$(LIB_DIR)/libwrappers.a: jmptbl.i $(WRAPPER_DIR)/jmpvar.o
+$(LIB_DIR)/libwrappers.a: $(BUILD_DIR)/jmptbl.i $(WRAPPER_DIR)/jmpvar.o
@echo " AR $@"
$(Q)./genwrappers.sh -b $(WRAPPER_DIR) -o $@ $(BUILD_DIR)/jmptbl.i
-$(BUILD_DIR)/jmptbl.s: jmptbl.i
+$(BUILD_DIR)/jmptbl.i: $(BUILD_DIR)/jmptbl.s
+
+$(BUILD_DIR)/jmptbl.s: ../../$(PLAT_DIR)/jmptbl.i
@echo " TBL $@"
- if [ -e "../../$(PLAT_DIR)/jmptbl.i" ] ; \
- then \
- $(Q)./gentbl.sh -o $@ -b $(BUILD_DIR) ../../$(PLAT_DIR)/jmptbl.i; \
- else \
- @echo "USE_ROMLIB=1 requires jump table list file: jmptbl.i in platform directory"; \
- fi
+ $(Q)./gentbl.sh -o $@ -b $(BUILD_DIR) ../../$(PLAT_DIR)/jmptbl.i
clean:
@rm -f $(BUILD_DIR)/*
diff --git a/lib/romlib/genwrappers.sh b/lib/romlib/genwrappers.sh
index 746e4ba9..07d59ac4 100755
--- a/lib/romlib/genwrappers.sh
+++ b/lib/romlib/genwrappers.sh
@@ -1,5 +1,5 @@
#!/bin/sh
-# Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
+# Copyright (c) 2018-2019, ARM Limited and Contributors. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
@@ -31,7 +31,7 @@ do
done
awk '{sub(/[:blank:]*#.*/,"")}
-!/^$/ && !/\\tpatch$/ !/\\treserved$/ {print $1*4, $2, $3}' "$@" |
+!/^$/ && $NF != "patch" && $NF != "reserved" {print $1*4, $2, $3}' "$@" |
while read idx lib sym
do
file=$build/${lib}_$sym
@@ -41,7 +41,7 @@ do
$sym:
ldr x17, =jmptbl
ldr x17, [x17]
- mov x16, $idx
+ mov x16, #$idx
add x16, x16, x17
br x16
EOF
diff --git a/lib/xlat_tables_v2/xlat_tables_core.c b/lib/xlat_tables_v2/xlat_tables_core.c
index 7957b61a..0e6a6fa8 100644
--- a/lib/xlat_tables_v2/xlat_tables_core.c
+++ b/lib/xlat_tables_v2/xlat_tables_core.c
@@ -325,9 +325,8 @@ static action_t xlat_tables_unmap_region_action(const mmap_region_t *mm,
return action;
}
-
/*
- * Function that writes to the translation tables and unmaps the
+ * Recursive function that writes to the translation tables and unmaps the
* specified region.
*/
static void xlat_tables_unmap_region(xlat_ctx_t *ctx, mmap_region_t *mm,
@@ -338,137 +337,70 @@ static void xlat_tables_unmap_region(xlat_ctx_t *ctx, mmap_region_t *mm,
{
assert((level >= ctx->base_level) && (level <= XLAT_TABLE_LEVEL_MAX));
- /*
- * data structure to track DESC_TABLE entry before iterate into subtable
- * of next translation level. it will be used to restore previous level
- * after finish subtable iteration.
- */
- struct desc_table_unmap {
- uint64_t *table_base;
- uintptr_t table_idx_va;
- unsigned int idx;
- } desc_tables[XLAT_TABLE_LEVEL_MAX + 1] = {
- {NULL, 0U, XLAT_TABLE_ENTRIES}, };
-
- unsigned int this_level = level;
- uint64_t *this_base = table_base;
- unsigned int max_entries = table_entries;
- size_t level_size = XLAT_BLOCK_SIZE(this_level);
- unsigned int table_idx;
+ uint64_t *subtable;
+ uint64_t desc;
+
uintptr_t table_idx_va;
+ uintptr_t table_idx_end_va; /* End VA of this entry */
uintptr_t region_end_va = mm->base_va + mm->size - 1U;
+ unsigned int table_idx;
+
table_idx_va = xlat_tables_find_start_va(mm, table_base_va, level);
table_idx = xlat_tables_va_to_index(table_base_va, table_idx_va, level);
- while (this_base != NULL) {
-
- uint64_t desc;
- uint64_t desc_type;
- uintptr_t table_idx_end_va; /* End VA of this entry */
- action_t action;
-
- /* finish current xlat level iteration. */
- if (table_idx >= max_entries) {
- if (this_level > ctx->base_level) {
- xlat_table_dec_regions_count(ctx, this_base);
- }
-
- if (this_level > level) {
- uint64_t *subtable;
-
- /* back from subtable iteration, restore
- * previous DESC_TABLE entry.
- */
- this_level--;
- this_base = desc_tables[this_level].table_base;
- table_idx = desc_tables[this_level].idx;
- table_idx_va =
- desc_tables[this_level].table_idx_va;
- level_size = XLAT_BLOCK_SIZE(this_level);
-
- if (this_level == level) {
- max_entries = table_entries;
- } else {
- max_entries = XLAT_TABLE_ENTRIES;
- }
-
- desc = this_base[table_idx];
- subtable = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
- /*
- * If the subtable is now empty, remove its reference.
- */
- if (xlat_table_is_empty(ctx, subtable)) {
- this_base[table_idx] = INVALID_DESC;
- xlat_arch_tlbi_va(table_idx_va,
- ctx->xlat_regime);
- }
- table_idx++;
- table_idx_va += level_size;
-
- } else {
- /* reached end of top level, exit.*/
- this_base = NULL;
- break;
- }
-
- }
-
- /* If reached the end of the region, stop iterating entries in
- * current xlat level.
- */
- if (region_end_va <= table_idx_va) {
- table_idx = max_entries;
- continue;
- }
+ while (table_idx < table_entries) {
+ table_idx_end_va = table_idx_va + XLAT_BLOCK_SIZE(level) - 1U;
- table_idx_end_va = table_idx_va + XLAT_BLOCK_SIZE(this_level) - 1U;
+ desc = table_base[table_idx];
+ uint64_t desc_type = desc & DESC_MASK;
- desc = this_base[table_idx];
- desc_type = desc & DESC_MASK;
-
- action = xlat_tables_unmap_region_action(mm, table_idx_va,
- table_idx_end_va,
- this_level,
- desc_type);
+ action_t action = xlat_tables_unmap_region_action(mm,
+ table_idx_va, table_idx_end_va, level,
+ desc_type);
if (action == ACTION_WRITE_BLOCK_ENTRY) {
- this_base[table_idx] = INVALID_DESC;
+
+ table_base[table_idx] = INVALID_DESC;
xlat_arch_tlbi_va(table_idx_va, ctx->xlat_regime);
- table_idx++;
- table_idx_va += level_size;
} else if (action == ACTION_RECURSE_INTO_TABLE) {
- uint64_t *subtable;
- uintptr_t base_va;
-
subtable = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
- desc_tables[this_level].table_base = this_base;
- desc_tables[this_level].table_idx_va = table_idx_va;
- base_va = table_idx_va;
- desc_tables[this_level].idx = table_idx;
-
- this_base = subtable;
- this_level++;
-
- max_entries = XLAT_TABLE_ENTRIES;
- level_size = XLAT_BLOCK_SIZE(this_level);
+ /* Recurse to write into subtable */
+ xlat_tables_unmap_region(ctx, mm, table_idx_va,
+ subtable, XLAT_TABLE_ENTRIES,
+ level + 1U);
+#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
+ xlat_clean_dcache_range((uintptr_t)subtable,
+ XLAT_TABLE_ENTRIES * sizeof(uint64_t));
+#endif
+ /*
+ * If the subtable is now empty, remove its reference.
+ */
+ if (xlat_table_is_empty(ctx, subtable)) {
+ table_base[table_idx] = INVALID_DESC;
+ xlat_arch_tlbi_va(table_idx_va,
+ ctx->xlat_regime);
+ }
- table_idx_va = xlat_tables_find_start_va(mm,
- base_va, this_level);
- table_idx = xlat_tables_va_to_index(base_va,
- table_idx_va, this_level);
} else {
assert(action == ACTION_NONE);
-
- table_idx++;
- table_idx_va += level_size;
}
+
+ table_idx++;
+ table_idx_va += XLAT_BLOCK_SIZE(level);
+
+ /* If reached the end of the region, exit */
+ if (region_end_va <= table_idx_va)
+ break;
}
+
+ if (level > ctx->base_level)
+ xlat_table_dec_regions_count(ctx, table_base);
}
#endif /* PLAT_XLAT_TABLES_DYNAMIC */
@@ -605,169 +537,105 @@ static action_t xlat_tables_map_region_action(const mmap_region_t *mm,
}
/*
- * Function that writes to the translation tables and maps the
+ * Recursive function that writes to the translation tables and maps the
* specified region. On success, it returns the VA of the last byte that was
* successfully mapped. On error, it returns the VA of the next entry that
* should have been mapped.
*/
static uintptr_t xlat_tables_map_region(xlat_ctx_t *ctx, mmap_region_t *mm,
- const uintptr_t table_base_va,
+ uintptr_t table_base_va,
uint64_t *const table_base,
unsigned int table_entries,
unsigned int level)
{
-
assert((level >= ctx->base_level) && (level <= XLAT_TABLE_LEVEL_MAX));
- /*
- * data structure to track DESC_TABLE entry before iterate into subtable
- * of next translation level. it will be used to restore previous level
- * after finish subtable iteration.
- */
- struct desc_table_map {
- uint64_t *table_base;
- uintptr_t table_idx_va;
- unsigned int idx;
- } desc_tables[XLAT_TABLE_LEVEL_MAX + 1] = {
- {NULL, 0U, XLAT_TABLE_ENTRIES}, };
-
- unsigned int this_level = level;
- uint64_t *this_base = table_base;
- unsigned int max_entries = table_entries;
- size_t level_size = XLAT_BLOCK_SIZE(this_level);
uintptr_t mm_end_va = mm->base_va + mm->size - 1U;
uintptr_t table_idx_va;
+ unsigned long long table_idx_pa;
+
+ uint64_t *subtable;
+ uint64_t desc;
+
unsigned int table_idx;
table_idx_va = xlat_tables_find_start_va(mm, table_base_va, level);
table_idx = xlat_tables_va_to_index(table_base_va, table_idx_va, level);
- while (this_base != NULL) {
-
- uint64_t desc;
- uint64_t desc_type;
- unsigned long long table_idx_pa;
- action_t action;
-
- /* finish current xlat level iteration. */
- if (table_idx >= max_entries) {
- if (this_level <= level) {
- this_base = NULL;
- break;
- } else {
-
- /* back from subtable iteration, restore
- * previous DESC_TABLE entry.
- */
- this_level--;
- level_size = XLAT_BLOCK_SIZE(this_level);
- this_base = desc_tables[this_level].table_base;
- table_idx = desc_tables[this_level].idx;
- if (this_level == level) {
- max_entries = table_entries;
- } else {
- max_entries = XLAT_TABLE_ENTRIES;
- }
-#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
- uintptr_t subtable;
- desc = this_base[table_idx];
- subtable = (uintptr_t)(desc & TABLE_ADDR_MASK);
- xlat_clean_dcache_range(subtable,
- XLAT_TABLE_ENTRIES * sizeof(uint64_t));
+#if PLAT_XLAT_TABLES_DYNAMIC
+ if (level > ctx->base_level)
+ xlat_table_inc_regions_count(ctx, table_base);
#endif
- table_idx++;
- table_idx_va =
- desc_tables[this_level].table_idx_va +
- level_size;
- }
- }
+ while (table_idx < table_entries) {
- desc = this_base[table_idx];
- desc_type = desc & DESC_MASK;
+ desc = table_base[table_idx];
table_idx_pa = mm->base_pa + table_idx_va - mm->base_va;
- /* If reached the end of the region, simply exit since we
- * already write all BLOCK entries and create all required
- * subtables.
- */
- if (mm_end_va <= table_idx_va) {
- this_base = NULL;
- break;
- }
-
- action = xlat_tables_map_region_action(mm, desc_type,
- table_idx_pa, table_idx_va, this_level);
+ action_t action = xlat_tables_map_region_action(mm,
+ (uint32_t)(desc & DESC_MASK), table_idx_pa,
+ table_idx_va, level);
if (action == ACTION_WRITE_BLOCK_ENTRY) {
- this_base[table_idx] = xlat_desc(ctx, mm->attr,
- table_idx_pa, this_level);
- table_idx++;
- table_idx_va += level_size;
- } else if (action == ACTION_CREATE_NEW_TABLE) {
- uintptr_t base_va;
+ table_base[table_idx] =
+ xlat_desc(ctx, (uint32_t)mm->attr, table_idx_pa,
+ level);
+
+ } else if (action == ACTION_CREATE_NEW_TABLE) {
+ uintptr_t end_va;
- uint64_t *subtable = xlat_table_get_empty(ctx);
+ subtable = xlat_table_get_empty(ctx);
if (subtable == NULL) {
- /* Not enough free tables to map this region. */
+ /* Not enough free tables to map this region */
return table_idx_va;
}
/* Point to new subtable from this one. */
- this_base[table_idx] = TABLE_DESC | (unsigned long)subtable;
-
- desc_tables[this_level].table_base = this_base;
- desc_tables[this_level].table_idx_va = table_idx_va;
- desc_tables[this_level].idx = table_idx;
- base_va = table_idx_va;
-
- this_level++;
- this_base = subtable;
- level_size = XLAT_BLOCK_SIZE(this_level);
- table_idx_va = xlat_tables_find_start_va(mm, base_va,
- this_level);
- table_idx = xlat_tables_va_to_index(base_va,
- table_idx_va, this_level);
- max_entries = XLAT_TABLE_ENTRIES;
+ table_base[table_idx] = TABLE_DESC | (unsigned long)subtable;
-#if PLAT_XLAT_TABLES_DYNAMIC
- if (this_level > ctx->base_level) {
- xlat_table_inc_regions_count(ctx, subtable);
- }
+ /* Recurse to write into subtable */
+ end_va = xlat_tables_map_region(ctx, mm, table_idx_va,
+ subtable, XLAT_TABLE_ENTRIES,
+ level + 1U);
+#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
+ xlat_clean_dcache_range((uintptr_t)subtable,
+ XLAT_TABLE_ENTRIES * sizeof(uint64_t));
#endif
+ if (end_va !=
+ (table_idx_va + XLAT_BLOCK_SIZE(level) - 1U))
+ return end_va;
} else if (action == ACTION_RECURSE_INTO_TABLE) {
+ uintptr_t end_va;
- uintptr_t base_va;
- uint64_t *subtable = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
-
- desc_tables[this_level].table_base = this_base;
- desc_tables[this_level].table_idx_va = table_idx_va;
- desc_tables[this_level].idx = table_idx;
- base_va = table_idx_va;
-
- this_level++;
- level_size = XLAT_BLOCK_SIZE(this_level);
- table_idx_va = xlat_tables_find_start_va(mm, base_va,
- this_level);
- table_idx = xlat_tables_va_to_index(base_va,
- table_idx_va, this_level);
- this_base = subtable;
- max_entries = XLAT_TABLE_ENTRIES;
-
-#if PLAT_XLAT_TABLES_DYNAMIC
- if (this_level > ctx->base_level) {
- xlat_table_inc_regions_count(ctx, subtable);
- }
+ subtable = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
+ /* Recurse to write into subtable */
+ end_va = xlat_tables_map_region(ctx, mm, table_idx_va,
+ subtable, XLAT_TABLE_ENTRIES,
+ level + 1U);
+#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
+ xlat_clean_dcache_range((uintptr_t)subtable,
+ XLAT_TABLE_ENTRIES * sizeof(uint64_t));
#endif
+ if (end_va !=
+ (table_idx_va + XLAT_BLOCK_SIZE(level) - 1U))
+ return end_va;
+
} else {
+
assert(action == ACTION_NONE);
- table_idx++;
- table_idx_va += level_size;
+
}
+
+ table_idx++;
+ table_idx_va += XLAT_BLOCK_SIZE(level);
+
+ /* If reached the end of the region, exit */
+ if (mm_end_va <= table_idx_va)
+ break;
}
return table_idx_va - 1U;
diff --git a/lib/xlat_tables_v2/xlat_tables_utils.c b/lib/xlat_tables_v2/xlat_tables_utils.c
index 7d0449af..f5848a25 100644
--- a/lib/xlat_tables_v2/xlat_tables_utils.c
+++ b/lib/xlat_tables_v2/xlat_tables_utils.c
@@ -109,7 +109,7 @@ static const char *invalid_descriptors_ommited =
"%s(%d invalid descriptors omitted)\n";
/*
- * Function that reads the translation tables passed as an argument
+ * Recursive function that reads the translation tables passed as an argument
* and prints their status.
*/
static void xlat_tables_print_internal(xlat_ctx_t *ctx, uintptr_t table_base_va,
@@ -118,23 +118,10 @@ static void xlat_tables_print_internal(xlat_ctx_t *ctx, uintptr_t table_base_va,
{
assert(level <= XLAT_TABLE_LEVEL_MAX);
- /*
- * data structure to track DESC_TABLE entry before iterate into subtable
- * of next translation level. it will be restored after return from
- * subtable iteration.
- */
- struct desc_table {
- const uint64_t *table_base;
- uintptr_t table_idx_va;
- unsigned int idx;
- } desc_tables[XLAT_TABLE_LEVEL_MAX + 1] = {
- {NULL, 0U, XLAT_TABLE_ENTRIES}, };
- unsigned int this_level = level;
- const uint64_t *this_base = table_base;
- unsigned int max_entries = table_entries;
- size_t level_size = XLAT_BLOCK_SIZE(this_level);
- unsigned int table_idx = 0U;
+ uint64_t desc;
uintptr_t table_idx_va = table_base_va;
+ unsigned int table_idx = 0U;
+ size_t level_size = XLAT_BLOCK_SIZE(level);
/*
* Keep track of how many invalid descriptors are counted in a row.
@@ -144,110 +131,67 @@ static void xlat_tables_print_internal(xlat_ctx_t *ctx, uintptr_t table_base_va,
*/
int invalid_row_count = 0;
- while (this_base != NULL) {
- /* finish current xlat level */
- if (table_idx >= max_entries) {
+ while (table_idx < table_entries) {
+
+ desc = table_base[table_idx];
+
+ if ((desc & DESC_MASK) == INVALID_DESC) {
+
+ if (invalid_row_count == 0) {
+ printf("%sVA:0x%lx size:0x%zx\n",
+ level_spacers[level],
+ table_idx_va, level_size);
+ }
+ invalid_row_count++;
+
+ } else {
+
if (invalid_row_count > 1) {
printf(invalid_descriptors_ommited,
- level_spacers[this_level],
- invalid_row_count - 1);
+ level_spacers[level],
+ invalid_row_count - 1);
}
invalid_row_count = 0;
- /* no parent level to iterate. */
- if (this_level <= level) {
- this_base = NULL;
- table_idx = max_entries + 1;
- } else {
- /* retore previous DESC_TABLE entry and start
- * to iterate.
- */
- this_level--;
- level_size = XLAT_BLOCK_SIZE(this_level);
- this_base = desc_tables[this_level].table_base;
- table_idx = desc_tables[this_level].idx;
- table_idx_va =
- desc_tables[this_level].table_idx_va;
- if (this_level == level) {
- max_entries = table_entries;
- } else {
- max_entries = XLAT_TABLE_ENTRIES;
- }
-
- assert(this_base != NULL);
- }
- } else {
- uint64_t desc = this_base[table_idx];
-
- if ((desc & DESC_MASK) == INVALID_DESC) {
- if (invalid_row_count == 0) {
- printf("%sVA:0x%lx size:0x%zx\n",
- level_spacers[this_level],
- table_idx_va, level_size);
- }
- invalid_row_count++;
- table_idx++;
- table_idx_va += level_size;
- } else {
- if (invalid_row_count > 1) {
- printf(invalid_descriptors_ommited,
- level_spacers[this_level],
- invalid_row_count - 1);
- }
- invalid_row_count = 0;
+ /*
+ * Check if this is a table or a block. Tables are only
+ * allowed in levels other than 3, but DESC_PAGE has the
+ * same value as DESC_TABLE, so we need to check.
+ */
+ if (((desc & DESC_MASK) == TABLE_DESC) &&
+ (level < XLAT_TABLE_LEVEL_MAX)) {
/*
- * Check if this is a table or a block. Tables
- * are only allowed in levels other than 3, but
- * DESC_PAGE has the same value as DESC_TABLE,
- * so we need to check.
+ * Do not print any PA for a table descriptor,
+ * as it doesn't directly map physical memory
+ * but instead points to the next translation
+ * table in the translation table walk.
*/
+ printf("%sVA:0x%lx size:0x%zx\n",
+ level_spacers[level],
+ table_idx_va, level_size);
+
+ uintptr_t addr_inner = desc & TABLE_ADDR_MASK;
- if (((desc & DESC_MASK) == TABLE_DESC) &&
- (this_level < XLAT_TABLE_LEVEL_MAX)) {
- uintptr_t addr_inner;
-
- /*
- * Do not print any PA for a table
- * descriptor, as it doesn't directly
- * map physical memory but instead
- * points to the next translation
- * table in the translation table walk.
- */
- printf("%sVA:0x%lx size:0x%zx\n",
- level_spacers[this_level],
- table_idx_va, level_size);
-
- addr_inner = desc & TABLE_ADDR_MASK;
- /* save current xlat level */
- desc_tables[this_level].table_base =
- this_base;
- desc_tables[this_level].idx =
- table_idx + 1;
- desc_tables[this_level].table_idx_va =
- table_idx_va + level_size;
-
- /* start iterating next level entries */
- this_base = (uint64_t *)addr_inner;
- max_entries = XLAT_TABLE_ENTRIES;
- this_level++;
- level_size =
- XLAT_BLOCK_SIZE(this_level);
- table_idx = 0U;
- } else {
- printf("%sVA:0x%lx PA:0x%llx size:0x%zx ",
- level_spacers[this_level],
- table_idx_va,
- (uint64_t)(desc & TABLE_ADDR_MASK),
- level_size);
- xlat_desc_print(ctx, desc);
- printf("\n");
-
- table_idx++;
- table_idx_va += level_size;
-
- }
+ xlat_tables_print_internal(ctx, table_idx_va,
+ (uint64_t *)addr_inner,
+ XLAT_TABLE_ENTRIES, level + 1U);
+ } else {
+ printf("%sVA:0x%lx PA:0x%llx size:0x%zx ",
+ level_spacers[level], table_idx_va,
+ (uint64_t)(desc & TABLE_ADDR_MASK),
+ level_size);
+ xlat_desc_print(ctx, desc);
+ printf("\n");
}
}
+
+ table_idx++;
+ table_idx_va += level_size;
+ }
+
+ if (invalid_row_count > 1) {
+ printf(invalid_descriptors_ommited,
+ level_spacers[level], invalid_row_count - 1);
}
}