summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authordavidcunado-arm <david.cunado@arm.com>2017-10-20 17:17:09 +0100
committerGitHub <noreply@github.com>2017-10-20 17:17:09 +0100
commit8b9f419e3e175bc4243118b5c9fc2586a365fe24 (patch)
treebb1d9e47deeb7a76133e52003dc1b625490d8f30 /lib
parent37b5614ffef15a09fd602e4842493548777b8b55 (diff)
parentec0c8fdacf410b8d33891d569f0c4df6bcb41823 (diff)
Merge pull request #1136 from antonio-nino-diaz-arm/an/xlat-get-set-attr
Add APIs to get and modify attributes of memory regions
Diffstat (limited to 'lib')
-rw-r--r--lib/aarch64/misc_helpers.S30
-rw-r--r--lib/xlat_tables_v2/aarch32/xlat_tables_arch.c4
-rw-r--r--lib/xlat_tables_v2/xlat_tables_internal.c347
3 files changed, 373 insertions, 8 deletions
diff --git a/lib/aarch64/misc_helpers.S b/lib/aarch64/misc_helpers.S
index 78153bfb..9dfe46a2 100644
--- a/lib/aarch64/misc_helpers.S
+++ b/lib/aarch64/misc_helpers.S
@@ -18,7 +18,9 @@
.globl zeromem16
.globl memcpy16
+ .globl disable_mmu_el1
.globl disable_mmu_el3
+ .globl disable_mmu_icache_el1
.globl disable_mmu_icache_el3
#if SUPPORT_VFP
@@ -451,11 +453,11 @@ endfunc memcpy16
func disable_mmu_el3
mov x1, #(SCTLR_M_BIT | SCTLR_C_BIT)
-do_disable_mmu:
+do_disable_mmu_el3:
mrs x0, sctlr_el3
bic x0, x0, x1
msr sctlr_el3, x0
- isb // ensure MMU is off
+ isb /* ensure MMU is off */
dsb sy
ret
endfunc disable_mmu_el3
@@ -463,10 +465,32 @@ endfunc disable_mmu_el3
func disable_mmu_icache_el3
mov x1, #(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT)
- b do_disable_mmu
+ b do_disable_mmu_el3
endfunc disable_mmu_icache_el3
/* ---------------------------------------------------------------------------
+ * Disable the MMU at EL1
+ * ---------------------------------------------------------------------------
+ */
+
+func disable_mmu_el1
+ mov x1, #(SCTLR_M_BIT | SCTLR_C_BIT)
+do_disable_mmu_el1:
+ mrs x0, sctlr_el1
+ bic x0, x0, x1
+ msr sctlr_el1, x0
+ isb /* ensure MMU is off */
+ dsb sy
+ ret
+endfunc disable_mmu_el1
+
+
+func disable_mmu_icache_el1
+ mov x1, #(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT)
+ b do_disable_mmu_el1
+endfunc disable_mmu_icache_el1
+
+/* ---------------------------------------------------------------------------
* Enable the use of VFP at EL3
* ---------------------------------------------------------------------------
*/
diff --git a/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c b/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
index cbc86850..642f799a 100644
--- a/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
+++ b/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c
@@ -27,8 +27,6 @@ int is_mmu_enabled_ctx(const xlat_ctx_t *ctx __unused)
return (read_sctlr() & SCTLR_M_BIT) != 0;
}
-#if PLAT_XLAT_TABLES_DYNAMIC
-
void xlat_arch_tlbi_va(uintptr_t va)
{
/*
@@ -77,8 +75,6 @@ void xlat_arch_tlbi_va_sync(void)
isb();
}
-#endif /* PLAT_XLAT_TABLES_DYNAMIC */
-
int xlat_arch_current_el(void)
{
/*
diff --git a/lib/xlat_tables_v2/xlat_tables_internal.c b/lib/xlat_tables_v2/xlat_tables_internal.c
index 9faeb7ef..0acfacbf 100644
--- a/lib/xlat_tables_v2/xlat_tables_internal.c
+++ b/lib/xlat_tables_v2/xlat_tables_internal.c
@@ -1022,7 +1022,7 @@ int mmap_remove_dynamic_region(uintptr_t base_va, size_t size)
#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
/* Print the attributes of the specified block descriptor. */
-static void xlat_desc_print(xlat_ctx_t *ctx, uint64_t desc)
+static void xlat_desc_print(const xlat_ctx_t *ctx, uint64_t desc)
{
int mem_type_index = ATTR_INDEX_GET(desc);
xlat_regime_t xlat_regime = ctx->xlat_regime;
@@ -1315,3 +1315,348 @@ void enable_mmu_el3(unsigned int flags)
}
#endif /* AARCH32 */
+
+/*
+ * Do a translation table walk to find the block or page descriptor that maps
+ * virtual_addr.
+ *
+ * On success, return the address of the descriptor within the translation
+ * table. Its lookup level is stored in '*out_level'.
+ * On error, return NULL.
+ *
+ * xlat_table_base
+ * Base address for the initial lookup level.
+ * xlat_table_base_entries
+ * Number of entries in the translation table for the initial lookup level.
+ * virt_addr_space_size
+ * Size in bytes of the virtual address space.
+ */
+static uint64_t *find_xlat_table_entry(uintptr_t virtual_addr,
+ void *xlat_table_base,
+ int xlat_table_base_entries,
+ unsigned long long virt_addr_space_size,
+ int *out_level)
+{
+ unsigned int start_level;
+ uint64_t *table;
+ int entries;
+
+ VERBOSE("%s(%p)\n", __func__, (void *)virtual_addr);
+
+ start_level = GET_XLAT_TABLE_LEVEL_BASE(virt_addr_space_size);
+ VERBOSE("Starting translation table walk from level %i\n", start_level);
+
+ table = xlat_table_base;
+ entries = xlat_table_base_entries;
+
+ for (unsigned int level = start_level;
+ level <= XLAT_TABLE_LEVEL_MAX;
+ ++level) {
+ int idx;
+ uint64_t desc;
+ uint64_t desc_type;
+
+ VERBOSE("Table address: %p\n", (void *)table);
+
+ idx = XLAT_TABLE_IDX(virtual_addr, level);
+ VERBOSE("Index into level %i table: %i\n", level, idx);
+ if (idx >= entries) {
+ VERBOSE("Invalid address\n");
+ return NULL;
+ }
+
+ desc = table[idx];
+ desc_type = desc & DESC_MASK;
+ VERBOSE("Descriptor at level %i: 0x%llx\n", level,
+ (unsigned long long)desc);
+
+ if (desc_type == INVALID_DESC) {
+ VERBOSE("Invalid entry (memory not mapped)\n");
+ return NULL;
+ }
+
+ if (level == XLAT_TABLE_LEVEL_MAX) {
+ /*
+ * There can't be table entries at the final lookup
+ * level.
+ */
+ assert(desc_type == PAGE_DESC);
+ VERBOSE("Descriptor mapping a memory page (size: 0x%llx)\n",
+ (unsigned long long)XLAT_BLOCK_SIZE(XLAT_TABLE_LEVEL_MAX));
+ *out_level = level;
+ return &table[idx];
+ }
+
+ if (desc_type == BLOCK_DESC) {
+ VERBOSE("Descriptor mapping a memory block (size: 0x%llx)\n",
+ (unsigned long long)XLAT_BLOCK_SIZE(level));
+ *out_level = level;
+ return &table[idx];
+ }
+
+ assert(desc_type == TABLE_DESC);
+ VERBOSE("Table descriptor, continuing xlat table walk...\n");
+ table = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
+ entries = XLAT_TABLE_ENTRIES;
+ }
+
+ /*
+ * This shouldn't be reached, the translation table walk should end at
+ * most at level XLAT_TABLE_LEVEL_MAX and return from inside the loop.
+ */
+ assert(0);
+
+ return NULL;
+}
+
+
+static int get_mem_attributes_internal(const xlat_ctx_t *ctx, uintptr_t base_va,
+ mmap_attr_t *attributes, uint64_t **table_entry,
+ unsigned long long *addr_pa, int *table_level)
+{
+ uint64_t *entry;
+ uint64_t desc;
+ int level;
+ unsigned long long virt_addr_space_size;
+
+ /*
+ * Sanity-check arguments.
+ */
+ assert(ctx != NULL);
+ assert(ctx->initialized);
+ assert(ctx->xlat_regime == EL1_EL0_REGIME || ctx->xlat_regime == EL3_REGIME);
+
+ virt_addr_space_size = (unsigned long long)ctx->va_max_address + 1;
+ assert(virt_addr_space_size > 0);
+
+ entry = find_xlat_table_entry(base_va,
+ ctx->base_table,
+ ctx->base_table_entries,
+ virt_addr_space_size,
+ &level);
+ if (entry == NULL) {
+ WARN("Address %p is not mapped.\n", (void *)base_va);
+ return -EINVAL;
+ }
+
+ if (addr_pa != NULL) {
+ *addr_pa = *entry & TABLE_ADDR_MASK;
+ }
+
+ if (table_entry != NULL) {
+ *table_entry = entry;
+ }
+
+ if (table_level != NULL) {
+ *table_level = level;
+ }
+
+ desc = *entry;
+
+#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
+ VERBOSE("Attributes: ");
+ xlat_desc_print(ctx, desc);
+ tf_printf("\n");
+#endif /* LOG_LEVEL >= LOG_LEVEL_VERBOSE */
+
+ assert(attributes != NULL);
+ *attributes = 0;
+
+ int attr_index = (desc >> ATTR_INDEX_SHIFT) & ATTR_INDEX_MASK;
+
+ if (attr_index == ATTR_IWBWA_OWBWA_NTR_INDEX) {
+ *attributes |= MT_MEMORY;
+ } else if (attr_index == ATTR_NON_CACHEABLE_INDEX) {
+ *attributes |= MT_NON_CACHEABLE;
+ } else {
+ assert(attr_index == ATTR_DEVICE_INDEX);
+ *attributes |= MT_DEVICE;
+ }
+
+ int ap2_bit = (desc >> AP2_SHIFT) & 1;
+
+ if (ap2_bit == AP2_RW)
+ *attributes |= MT_RW;
+
+ if (ctx->xlat_regime == EL1_EL0_REGIME) {
+ int ap1_bit = (desc >> AP1_SHIFT) & 1;
+ if (ap1_bit == AP1_ACCESS_UNPRIVILEGED)
+ *attributes |= MT_USER;
+ }
+
+ int ns_bit = (desc >> NS_SHIFT) & 1;
+
+ if (ns_bit == 1)
+ *attributes |= MT_NS;
+
+ uint64_t xn_mask = xlat_arch_regime_get_xn_desc(ctx->xlat_regime);
+
+ if ((desc & xn_mask) == xn_mask) {
+ *attributes |= MT_EXECUTE_NEVER;
+ } else {
+ assert((desc & xn_mask) == 0);
+ }
+
+ return 0;
+}
+
+
+int get_mem_attributes(const xlat_ctx_t *ctx, uintptr_t base_va,
+ mmap_attr_t *attributes)
+{
+ return get_mem_attributes_internal(ctx, base_va, attributes,
+ NULL, NULL, NULL);
+}
+
+
+int change_mem_attributes(xlat_ctx_t *ctx,
+ uintptr_t base_va,
+ size_t size,
+ mmap_attr_t attr)
+{
+ /* Note: This implementation isn't optimized. */
+
+ assert(ctx != NULL);
+ assert(ctx->initialized);
+
+ unsigned long long virt_addr_space_size =
+ (unsigned long long)ctx->va_max_address + 1;
+ assert(virt_addr_space_size > 0);
+
+ if (!IS_PAGE_ALIGNED(base_va)) {
+ WARN("%s: Address %p is not aligned on a page boundary.\n",
+ __func__, (void *)base_va);
+ return -EINVAL;
+ }
+
+ if (size == 0) {
+ WARN("%s: Size is 0.\n", __func__);
+ return -EINVAL;
+ }
+
+ if ((size % PAGE_SIZE) != 0) {
+ WARN("%s: Size 0x%zx is not a multiple of a page size.\n",
+ __func__, size);
+ return -EINVAL;
+ }
+
+ if (((attr & MT_EXECUTE_NEVER) == 0) && ((attr & MT_RW) != 0)) {
+ WARN("%s() doesn't allow to remap memory as read-write and executable.\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ int pages_count = size / PAGE_SIZE;
+
+ VERBOSE("Changing memory attributes of %i pages starting from address %p...\n",
+ pages_count, (void *)base_va);
+
+ uintptr_t base_va_original = base_va;
+
+ /*
+ * Sanity checks.
+ */
+ for (int i = 0; i < pages_count; ++i) {
+ uint64_t *entry;
+ uint64_t desc;
+ int level;
+
+ entry = find_xlat_table_entry(base_va,
+ ctx->base_table,
+ ctx->base_table_entries,
+ virt_addr_space_size,
+ &level);
+ if (entry == NULL) {
+ WARN("Address %p is not mapped.\n", (void *)base_va);
+ return -EINVAL;
+ }
+
+ desc = *entry;
+
+ /*
+ * Check that all the required pages are mapped at page
+ * granularity.
+ */
+ if (((desc & DESC_MASK) != PAGE_DESC) ||
+ (level != XLAT_TABLE_LEVEL_MAX)) {
+ WARN("Address %p is not mapped at the right granularity.\n",
+ (void *)base_va);
+ WARN("Granularity is 0x%llx, should be 0x%x.\n",
+ (unsigned long long)XLAT_BLOCK_SIZE(level), PAGE_SIZE);
+ return -EINVAL;
+ }
+
+ /*
+ * If the region type is device, it shouldn't be executable.
+ */
+ int attr_index = (desc >> ATTR_INDEX_SHIFT) & ATTR_INDEX_MASK;
+ if (attr_index == ATTR_DEVICE_INDEX) {
+ if ((attr & MT_EXECUTE_NEVER) == 0) {
+ WARN("Setting device memory as executable at address %p.",
+ (void *)base_va);
+ return -EINVAL;
+ }
+ }
+
+ base_va += PAGE_SIZE;
+ }
+
+ /* Restore original value. */
+ base_va = base_va_original;
+
+ VERBOSE("%s: All pages are mapped, now changing their attributes...\n",
+ __func__);
+
+ for (int i = 0; i < pages_count; ++i) {
+
+ mmap_attr_t old_attr, new_attr;
+ uint64_t *entry;
+ int level;
+ unsigned long long addr_pa;
+
+ get_mem_attributes_internal(ctx, base_va, &old_attr,
+ &entry, &addr_pa, &level);
+
+ VERBOSE("Old attributes: 0x%x\n", old_attr);
+
+ /*
+ * From attr, only MT_RO/MT_RW, MT_EXECUTE/MT_EXECUTE_NEVER and
+ * MT_USER/MT_PRIVILEGED are taken into account. Any other
+ * information is ignored.
+ */
+
+ /* Clean the old attributes so that they can be rebuilt. */
+ new_attr = old_attr & ~(MT_RW|MT_EXECUTE_NEVER|MT_USER);
+
+ /*
+ * Update attributes, but filter out the ones this function
+ * isn't allowed to change.
+ */
+ new_attr |= attr & (MT_RW|MT_EXECUTE_NEVER|MT_USER);
+
+ VERBOSE("New attributes: 0x%x\n", new_attr);
+
+ /*
+ * The break-before-make sequence requires writing an invalid
+ * descriptor and making sure that the system sees the change
+ * before writing the new descriptor.
+ */
+ *entry = INVALID_DESC;
+
+ /* Invalidate any cached copy of this mapping in the TLBs. */
+ xlat_arch_tlbi_va_regime(base_va, ctx->xlat_regime);
+
+ /* Ensure completion of the invalidation. */
+ xlat_arch_tlbi_va_sync();
+
+ /* Write new descriptor */
+ *entry = xlat_desc(ctx, new_attr, addr_pa, level);
+
+ base_va += PAGE_SIZE;
+ }
+
+ /* Ensure that the last descriptor writen is seen by the system. */
+ dsbish();
+
+ return 0;
+}