summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorDimitris Papastamos <dimitris.papastamos@arm.com>2018-08-09 15:16:19 +0100
committerGitHub <noreply@github.com>2018-08-09 15:16:19 +0100
commit29be1b55f4e0fdce5632e9c169f2cb43e1955e3f (patch)
tree6a4900368a09b53293203c7f48e46f6596a60d34 /lib
parent781842ea8a0d144b5bf2920e0bc0a9701236bdfb (diff)
parent3e318e40374471e337d33806bb87b24a6777d546 (diff)
Merge pull request #1513 from antonio-nino-diaz-arm/an/xlat-caches
xlat v2: Cleanup and dcache coherency bug fix
Diffstat (limited to 'lib')
-rw-r--r--lib/psci/aarch32/psci_helpers.S22
-rw-r--r--lib/psci/aarch64/psci_helpers.S22
-rw-r--r--lib/xlat_tables_v2/aarch32/xlat_tables_arch.c5
-rw-r--r--lib/xlat_tables_v2/aarch64/xlat_tables_arch.c11
-rw-r--r--lib/xlat_tables_v2/xlat_tables_context.c10
-rw-r--r--lib/xlat_tables_v2/xlat_tables_core.c39
-rw-r--r--lib/xlat_tables_v2/xlat_tables_private.h3
-rw-r--r--lib/xlat_tables_v2/xlat_tables_utils.c28
8 files changed, 79 insertions, 61 deletions
diff --git a/lib/psci/aarch32/psci_helpers.S b/lib/psci/aarch32/psci_helpers.S
index a29a29c4..63d7e708 100644
--- a/lib/psci/aarch32/psci_helpers.S
+++ b/lib/psci/aarch32/psci_helpers.S
@@ -91,28 +91,6 @@ func psci_do_pwrup_cache_maintenance
stcopr r0, SCTLR
isb
-#if PLAT_XLAT_TABLES_DYNAMIC
- /* ---------------------------------------------
- * During warm boot the MMU is enabled with data
- * cache disabled, then the interconnect is set
- * up and finally the data cache is enabled.
- *
- * During this period, if another CPU modifies
- * the translation tables, the MMU table walker
- * may read the old entries. This is only a
- * problem for dynamic regions, the warm boot
- * code isn't affected because it is static.
- *
- * Invalidate all TLB entries loaded while the
- * CPU wasn't coherent with the rest of the
- * system.
- * ---------------------------------------------
- */
- stcopr r0, TLBIALL
- dsb ish
- isb
-#endif
-
pop {r12, pc}
endfunc psci_do_pwrup_cache_maintenance
diff --git a/lib/psci/aarch64/psci_helpers.S b/lib/psci/aarch64/psci_helpers.S
index d37ca764..06d6636e 100644
--- a/lib/psci/aarch64/psci_helpers.S
+++ b/lib/psci/aarch64/psci_helpers.S
@@ -115,28 +115,6 @@ func psci_do_pwrup_cache_maintenance
msr sctlr_el3, x0
isb
-#if PLAT_XLAT_TABLES_DYNAMIC
- /* ---------------------------------------------
- * During warm boot the MMU is enabled with data
- * cache disabled, then the interconnect is set
- * up and finally the data cache is enabled.
- *
- * During this period, if another CPU modifies
- * the translation tables, the MMU table walker
- * may read the old entries. This is only a
- * problem for dynamic regions, the warm boot
- * code isn't affected because it is static.
- *
- * Invalidate all TLB entries loaded while the
- * CPU wasn't coherent with the rest of the
- * system.
- * ---------------------------------------------
- */
- tlbi alle3
- dsb ish
- isb
-#endif
-
ldp x29, x30, [sp], #16
ret
endfunc psci_do_pwrup_cache_maintenance
diff --git a/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c b/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
index c09fb596..24828409 100644
--- a/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
+++ b/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
@@ -48,6 +48,11 @@ bool is_mmu_enabled_ctx(const xlat_ctx_t *ctx __unused)
return (read_sctlr() & SCTLR_M_BIT) != 0;
}
+bool is_dcache_enabled(void)
+{
+ return (read_sctlr() & SCTLR_C_BIT) != 0;
+}
+
uint64_t xlat_arch_regime_get_xn_desc(int xlat_regime __unused)
{
return UPPER_ATTRS(XN);
diff --git a/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
index 4e4292ff..cf8070ad 100644
--- a/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
+++ b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c
@@ -112,6 +112,17 @@ bool is_mmu_enabled_ctx(const xlat_ctx_t *ctx)
}
}
+bool is_dcache_enabled(void)
+{
+ unsigned int el = (unsigned int)GET_EL(read_CurrentEl());
+
+ if (el == 1U) {
+ return (read_sctlr_el1() & SCTLR_C_BIT) != 0U;
+ } else {
+ return (read_sctlr_el3() & SCTLR_C_BIT) != 0U;
+ }
+}
+
uint64_t xlat_arch_regime_get_xn_desc(int xlat_regime)
{
if (xlat_regime == EL1_EL0_REGIME) {
diff --git a/lib/xlat_tables_v2/xlat_tables_context.c b/lib/xlat_tables_v2/xlat_tables_context.c
index d7b2ebf8..143f08ab 100644
--- a/lib/xlat_tables_v2/xlat_tables_context.c
+++ b/lib/xlat_tables_v2/xlat_tables_context.c
@@ -90,6 +90,16 @@ void init_xlat_tables(void)
init_xlat_tables_ctx(&tf_xlat_ctx);
}
+int xlat_get_mem_attributes(uintptr_t base_va, uint32_t *attr)
+{
+ return xlat_get_mem_attributes_ctx(&tf_xlat_ctx, base_va, attr);
+}
+
+int xlat_change_mem_attributes(uintptr_t base_va, size_t size, uint32_t attr)
+{
+ return xlat_change_mem_attributes_ctx(&tf_xlat_ctx, base_va, size, attr);
+}
+
/*
* If dynamic allocation of new regions is disabled then by the time we call the
* function enabling the MMU, we'll have registered all the memory regions to
diff --git a/lib/xlat_tables_v2/xlat_tables_core.c b/lib/xlat_tables_v2/xlat_tables_core.c
index 54e58341..56b9514c 100644
--- a/lib/xlat_tables_v2/xlat_tables_core.c
+++ b/lib/xlat_tables_v2/xlat_tables_core.c
@@ -18,6 +18,13 @@
#include "xlat_tables_private.h"
+/* Helper function that cleans the data cache only if it is enabled. */
+static inline void xlat_clean_dcache_range(uintptr_t addr, size_t size)
+{
+ if (is_dcache_enabled())
+ clean_dcache_range(addr, size);
+}
+
#if PLAT_XLAT_TABLES_DYNAMIC
/*
@@ -329,7 +336,10 @@ static void xlat_tables_unmap_region(xlat_ctx_t *ctx, mmap_region_t *mm,
xlat_tables_unmap_region(ctx, mm, table_idx_va,
subtable, XLAT_TABLE_ENTRIES,
level + 1U);
-
+#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
+ xlat_clean_dcache_range((uintptr_t)subtable,
+ XLAT_TABLE_ENTRIES * sizeof(uint64_t));
+#endif
/*
* If the subtable is now empty, remove its reference.
*/
@@ -563,6 +573,10 @@ static uintptr_t xlat_tables_map_region(xlat_ctx_t *ctx, mmap_region_t *mm,
end_va = xlat_tables_map_region(ctx, mm, table_idx_va,
subtable, XLAT_TABLE_ENTRIES,
level + 1U);
+#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
+ xlat_clean_dcache_range((uintptr_t)subtable,
+ XLAT_TABLE_ENTRIES * sizeof(uint64_t));
+#endif
if (end_va !=
(table_idx_va + XLAT_BLOCK_SIZE(level) - 1U))
return end_va;
@@ -575,6 +589,10 @@ static uintptr_t xlat_tables_map_region(xlat_ctx_t *ctx, mmap_region_t *mm,
end_va = xlat_tables_map_region(ctx, mm, table_idx_va,
subtable, XLAT_TABLE_ENTRIES,
level + 1U);
+#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
+ xlat_clean_dcache_range((uintptr_t)subtable,
+ XLAT_TABLE_ENTRIES * sizeof(uint64_t));
+#endif
if (end_va !=
(table_idx_va + XLAT_BLOCK_SIZE(level) - 1U))
return end_va;
@@ -859,7 +877,10 @@ int mmap_add_dynamic_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
end_va = xlat_tables_map_region(ctx, mm_cursor,
0U, ctx->base_table, ctx->base_table_entries,
ctx->base_level);
-
+#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
+ xlat_clean_dcache_range((uintptr_t)ctx->base_table,
+ ctx->base_table_entries * sizeof(uint64_t));
+#endif
/* Failed to map, remove mmap entry, unmap and return error. */
if (end_va != (mm_cursor->base_va + mm_cursor->size - 1U)) {
(void)memmove(mm_cursor, mm_cursor + 1U,
@@ -885,7 +906,10 @@ int mmap_add_dynamic_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
xlat_tables_unmap_region(ctx, &unmap_mm, 0U,
ctx->base_table, ctx->base_table_entries,
ctx->base_level);
-
+#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
+ xlat_clean_dcache_range((uintptr_t)ctx->base_table,
+ ctx->base_table_entries * sizeof(uint64_t));
+#endif
return -ENOMEM;
}
@@ -951,6 +975,10 @@ int mmap_remove_dynamic_region_ctx(xlat_ctx_t *ctx, uintptr_t base_va,
xlat_tables_unmap_region(ctx, mm, 0U, ctx->base_table,
ctx->base_table_entries,
ctx->base_level);
+#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
+ xlat_clean_dcache_range((uintptr_t)ctx->base_table,
+ ctx->base_table_entries * sizeof(uint64_t));
+#endif
xlat_arch_tlbi_va_sync();
}
@@ -1012,7 +1040,10 @@ void init_xlat_tables_ctx(xlat_ctx_t *ctx)
uintptr_t end_va = xlat_tables_map_region(ctx, mm, 0U,
ctx->base_table, ctx->base_table_entries,
ctx->base_level);
-
+#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
+ xlat_clean_dcache_range((uintptr_t)ctx->base_table,
+ ctx->base_table_entries * sizeof(uint64_t));
+#endif
if (end_va != (mm->base_va + mm->size - 1U)) {
ERROR("Not enough memory to map region:\n"
" VA:0x%lx PA:0x%llx size:0x%zx attr:0x%x\n",
diff --git a/lib/xlat_tables_v2/xlat_tables_private.h b/lib/xlat_tables_v2/xlat_tables_private.h
index 313bd8d6..528996a2 100644
--- a/lib/xlat_tables_v2/xlat_tables_private.h
+++ b/lib/xlat_tables_v2/xlat_tables_private.h
@@ -97,4 +97,7 @@ unsigned long long xlat_arch_get_max_supported_pa(void);
*/
bool is_mmu_enabled_ctx(const xlat_ctx_t *ctx);
+/* Returns true if the data cache is enabled at the current EL. */
+bool is_dcache_enabled(void);
+
#endif /* XLAT_TABLES_PRIVATE_H */
diff --git a/lib/xlat_tables_v2/xlat_tables_utils.c b/lib/xlat_tables_v2/xlat_tables_utils.c
index 0cbd45e4..8cad3483 100644
--- a/lib/xlat_tables_v2/xlat_tables_utils.c
+++ b/lib/xlat_tables_v2/xlat_tables_utils.c
@@ -314,8 +314,8 @@ static uint64_t *find_xlat_table_entry(uintptr_t virtual_addr,
}
-static int get_mem_attributes_internal(const xlat_ctx_t *ctx, uintptr_t base_va,
- uint32_t *attributes, uint64_t **table_entry,
+static int xlat_get_mem_attributes_internal(const xlat_ctx_t *ctx,
+ uintptr_t base_va, uint32_t *attributes, uint64_t **table_entry,
unsigned long long *addr_pa, unsigned int *table_level)
{
uint64_t *entry;
@@ -407,18 +407,16 @@ static int get_mem_attributes_internal(const xlat_ctx_t *ctx, uintptr_t base_va,
}
-int get_mem_attributes(const xlat_ctx_t *ctx, uintptr_t base_va,
- uint32_t *attributes)
+int xlat_get_mem_attributes_ctx(const xlat_ctx_t *ctx, uintptr_t base_va,
+ uint32_t *attr)
{
- return get_mem_attributes_internal(ctx, base_va, attributes,
- NULL, NULL, NULL);
+ return xlat_get_mem_attributes_internal(ctx, base_va, attr,
+ NULL, NULL, NULL);
}
-int change_mem_attributes(const xlat_ctx_t *ctx,
- uintptr_t base_va,
- size_t size,
- uint32_t attr)
+int xlat_change_mem_attributes_ctx(const xlat_ctx_t *ctx, uintptr_t base_va,
+ size_t size, uint32_t attr)
{
/* Note: This implementation isn't optimized. */
@@ -517,7 +515,7 @@ int change_mem_attributes(const xlat_ctx_t *ctx,
unsigned int level = 0U;
unsigned long long addr_pa = 0ULL;
- (void) get_mem_attributes_internal(ctx, base_va, &old_attr,
+ (void) xlat_get_mem_attributes_internal(ctx, base_va, &old_attr,
&entry, &addr_pa, &level);
/*
@@ -541,7 +539,9 @@ int change_mem_attributes(const xlat_ctx_t *ctx,
* before writing the new descriptor.
*/
*entry = INVALID_DESC;
-
+#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
+ dccvac((uintptr_t)entry);
+#endif
/* Invalidate any cached copy of this mapping in the TLBs. */
xlat_arch_tlbi_va(base_va, ctx->xlat_regime);
@@ -550,7 +550,9 @@ int change_mem_attributes(const xlat_ctx_t *ctx,
/* Write new descriptor */
*entry = xlat_desc(ctx, new_attr, addr_pa, level);
-
+#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
+ dccvac((uintptr_t)entry);
+#endif
base_va += PAGE_SIZE;
}