summaryrefslogtreecommitdiff
path: root/arch/arm64/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/mm')
-rw-r--r--arch/arm64/mm/context.c9
-rw-r--r--arch/arm64/mm/dma-mapping.c66
-rw-r--r--arch/arm64/mm/fault.c5
-rw-r--r--arch/arm64/mm/flush.c4
-rw-r--r--arch/arm64/mm/init.c2
-rw-r--r--arch/arm64/mm/numa.c28
6 files changed, 60 insertions, 54 deletions
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index b7b397802088..efcf1f7ef1e4 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -179,7 +179,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
&asid_generation);
flush_context(cpu);
- /* We have at least 1 ASID per CPU, so this will always succeed */
+ /* We have more ASIDs than CPUs, so this will always succeed */
asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
set_asid:
@@ -227,8 +227,11 @@ switch_mm_fastpath:
static int asids_init(void)
{
asid_bits = get_cpu_asid_bits();
- /* If we end up with more CPUs than ASIDs, expect things to crash */
- WARN_ON(NUM_USER_ASIDS < num_possible_cpus());
+ /*
+ * Expect allocation after rollover to fail if we don't have at least
+ * one more ASID than CPUs. ASID #0 is reserved for init_mm.
+ */
+ WARN_ON(NUM_USER_ASIDS - 1 <= num_possible_cpus());
atomic64_set(&asid_generation, ASID_FIRST_VERSION);
asid_map = kzalloc(BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(*asid_map),
GFP_KERNEL);
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index f6c55afab3e2..c4284c432ae8 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -32,10 +32,10 @@
static int swiotlb __read_mostly;
-static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot,
+static pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot,
bool coherent)
{
- if (!coherent || dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs))
+ if (!coherent || (attrs & DMA_ATTR_WRITE_COMBINE))
return pgprot_writecombine(prot);
return prot;
}
@@ -91,7 +91,7 @@ static int __free_from_pool(void *start, size_t size)
static void *__dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
if (dev == NULL) {
WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
@@ -121,7 +121,7 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size,
static void __dma_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
bool freed;
phys_addr_t paddr = dma_to_phys(dev, dma_handle);
@@ -140,7 +140,7 @@ static void __dma_free_coherent(struct device *dev, size_t size,
static void *__dma_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct page *page;
void *ptr, *coherent_ptr;
@@ -188,7 +188,7 @@ no_mem:
static void __dma_free(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle));
@@ -205,7 +205,7 @@ static void __dma_free(struct device *dev, size_t size,
static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
dma_addr_t dev_addr;
@@ -219,7 +219,7 @@ static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
if (!is_device_dma_coherent(dev))
__dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
@@ -228,7 +228,7 @@ static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr,
static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
int nelems, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scatterlist *sg;
int i, ret;
@@ -245,7 +245,7 @@ static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
static void __swiotlb_unmap_sg_attrs(struct device *dev,
struct scatterlist *sgl, int nelems,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scatterlist *sg;
int i;
@@ -306,7 +306,7 @@ static void __swiotlb_sync_sg_for_device(struct device *dev,
static int __swiotlb_mmap(struct device *dev,
struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
int ret = -ENXIO;
unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >>
@@ -333,7 +333,7 @@ static int __swiotlb_mmap(struct device *dev,
static int __swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t handle, size_t size,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
int ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
@@ -435,21 +435,21 @@ out:
static void *__dummy_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
return NULL;
}
static void __dummy_free(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
}
static int __dummy_mmap(struct device *dev,
struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
return -ENXIO;
}
@@ -457,20 +457,20 @@ static int __dummy_mmap(struct device *dev,
static dma_addr_t __dummy_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
return DMA_ERROR_CODE;
}
static void __dummy_unmap_page(struct device *dev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
}
static int __dummy_map_sg(struct device *dev, struct scatterlist *sgl,
int nelems, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
return 0;
}
@@ -478,7 +478,7 @@ static int __dummy_map_sg(struct device *dev, struct scatterlist *sgl,
static void __dummy_unmap_sg(struct device *dev,
struct scatterlist *sgl, int nelems,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
}
@@ -553,7 +553,7 @@ static void flush_page(struct device *dev, const void *virt, phys_addr_t phys)
static void *__iommu_alloc_attrs(struct device *dev, size_t size,
dma_addr_t *handle, gfp_t gfp,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
bool coherent = is_device_dma_coherent(dev);
int ioprot = dma_direction_to_prot(DMA_BIDIRECTIONAL, coherent);
@@ -613,7 +613,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
}
static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
- dma_addr_t handle, struct dma_attrs *attrs)
+ dma_addr_t handle, unsigned long attrs)
{
size_t iosize = size;
@@ -629,7 +629,7 @@ static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
* Hence how dodgy the below logic looks...
*/
if (__in_atomic_pool(cpu_addr, size)) {
- iommu_dma_unmap_page(dev, handle, iosize, 0, NULL);
+ iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
__free_from_pool(cpu_addr, size);
} else if (is_vmalloc_addr(cpu_addr)){
struct vm_struct *area = find_vm_area(cpu_addr);
@@ -639,14 +639,14 @@ static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
iommu_dma_free(dev, area->pages, iosize, &handle);
dma_common_free_remap(cpu_addr, size, VM_USERMAP);
} else {
- iommu_dma_unmap_page(dev, handle, iosize, 0, NULL);
+ iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
__free_pages(virt_to_page(cpu_addr), get_order(size));
}
}
static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct vm_struct *area;
int ret;
@@ -666,7 +666,7 @@ static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr,
- size_t size, struct dma_attrs *attrs)
+ size_t size, unsigned long attrs)
{
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
struct vm_struct *area = find_vm_area(cpu_addr);
@@ -707,14 +707,14 @@ static void __iommu_sync_single_for_device(struct device *dev,
static dma_addr_t __iommu_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
bool coherent = is_device_dma_coherent(dev);
int prot = dma_direction_to_prot(dir, coherent);
dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot);
if (!iommu_dma_mapping_error(dev, dev_addr) &&
- !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+ (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
__iommu_sync_single_for_device(dev, dev_addr, size, dir);
return dev_addr;
@@ -722,9 +722,9 @@ static dma_addr_t __iommu_map_page(struct device *dev, struct page *page,
static void __iommu_unmap_page(struct device *dev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
- if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+ if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
__iommu_sync_single_for_cpu(dev, dev_addr, size, dir);
iommu_dma_unmap_page(dev, dev_addr, size, dir, attrs);
@@ -760,11 +760,11 @@ static void __iommu_sync_sg_for_device(struct device *dev,
static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
int nelems, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
bool coherent = is_device_dma_coherent(dev);
- if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+ if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
__iommu_sync_sg_for_device(dev, sgl, nelems, dir);
return iommu_dma_map_sg(dev, sgl, nelems,
@@ -774,9 +774,9 @@ static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
static void __iommu_unmap_sg_attrs(struct device *dev,
struct scatterlist *sgl, int nelems,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
- if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+ if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
__iommu_sync_sg_for_cpu(dev, sgl, nelems, dir);
iommu_dma_unmap_sg(dev, sgl, nelems, dir, attrs);
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 4ebda515a016..c8beaa0da7df 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -253,7 +253,7 @@ good_area:
goto out;
}
- return handle_mm_fault(mm, vma, addr & PAGE_MASK, mm_flags);
+ return handle_mm_fault(vma, addr & PAGE_MASK, mm_flags);
check_stack:
if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr))
@@ -308,7 +308,8 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
}
if (is_permission_fault(esr) && (addr < USER_DS)) {
- if (get_fs() == KERNEL_DS)
+ /* regs->orig_addr_limit may be 0 if we entered from EL0 */
+ if (regs->orig_addr_limit == KERNEL_DS)
die("Accessing user space memory with fs=KERNEL_DS", regs, esr);
if (!search_exception_tables(regs->pc))
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index dbd12ea8ce68..43a76b07eb32 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -71,10 +71,6 @@ void __sync_icache_dcache(pte_t pte, unsigned long addr)
{
struct page *page = pte_page(pte);
- /* no flushing needed for anonymous pages */
- if (!page_mapping(page))
- return;
-
if (!test_and_set_bit(PG_dcache_clean, &page->flags))
sync_icache_aliases(page_address(page),
PAGE_SIZE << compound_order(page));
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 2ade7a6a10a7..bbb7ee76e319 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -224,7 +224,7 @@ void __init arm64_memblock_init(void)
* via the linear mapping.
*/
if (memory_limit != (phys_addr_t)ULLONG_MAX) {
- memblock_enforce_memory_limit(memory_limit);
+ memblock_mem_limit_remove_map(memory_limit);
memblock_add(__pa(_text), (u64)(_end - _text));
}
diff --git a/arch/arm64/mm/numa.c b/arch/arm64/mm/numa.c
index 98dc1047f2a2..c7fe3ec70774 100644
--- a/arch/arm64/mm/numa.c
+++ b/arch/arm64/mm/numa.c
@@ -17,6 +17,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/acpi.h>
#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/module.h>
@@ -29,7 +30,7 @@ static int cpu_to_node_map[NR_CPUS] = { [0 ... NR_CPUS-1] = NUMA_NO_NODE };
static int numa_distance_cnt;
static u8 *numa_distance;
-static int numa_off;
+static bool numa_off;
static __init int numa_parse_early_param(char *opt)
{
@@ -37,7 +38,7 @@ static __init int numa_parse_early_param(char *opt)
return -EINVAL;
if (!strncmp(opt, "off", 3)) {
pr_info("%s\n", "NUMA turned off");
- numa_off = 1;
+ numa_off = true;
}
return 0;
}
@@ -131,25 +132,25 @@ void __init early_map_cpu_to_node(unsigned int cpu, int nid)
* numa_add_memblk - Set node id to memblk
* @nid: NUMA node ID of the new memblk
* @start: Start address of the new memblk
- * @size: Size of the new memblk
+ * @end: End address of the new memblk
*
* RETURNS:
* 0 on success, -errno on failure.
*/
-int __init numa_add_memblk(int nid, u64 start, u64 size)
+int __init numa_add_memblk(int nid, u64 start, u64 end)
{
int ret;
- ret = memblock_set_node(start, size, &memblock.memory, nid);
+ ret = memblock_set_node(start, (end - start), &memblock.memory, nid);
if (ret < 0) {
pr_err("NUMA: memblock [0x%llx - 0x%llx] failed to add on node %d\n",
- start, (start + size - 1), nid);
+ start, (end - 1), nid);
return ret;
}
node_set(nid, numa_nodes_parsed);
pr_info("NUMA: Adding memblock [0x%llx - 0x%llx] on node %d\n",
- start, (start + size - 1), nid);
+ start, (end - 1), nid);
return ret;
}
@@ -362,12 +363,15 @@ static int __init dummy_numa_init(void)
int ret;
struct memblock_region *mblk;
- pr_info("%s\n", "No NUMA configuration found");
+ if (numa_off)
+ pr_info("NUMA disabled\n"); /* Forced off on command line. */
+ else
+ pr_info("No NUMA configuration found\n");
pr_info("NUMA: Faking a node at [mem %#018Lx-%#018Lx]\n",
0LLU, PFN_PHYS(max_pfn) - 1);
for_each_memblock(memory, mblk) {
- ret = numa_add_memblk(0, mblk->base, mblk->size);
+ ret = numa_add_memblk(0, mblk->base, mblk->base + mblk->size);
if (!ret)
continue;
@@ -375,7 +379,7 @@ static int __init dummy_numa_init(void)
return ret;
}
- numa_off = 1;
+ numa_off = true;
return 0;
}
@@ -388,7 +392,9 @@ static int __init dummy_numa_init(void)
void __init arm64_numa_init(void)
{
if (!numa_off) {
- if (!numa_init(of_numa_init))
+ if (!acpi_disabled && !numa_init(arm64_acpi_numa_init))
+ return;
+ if (acpi_disabled && !numa_init(of_numa_init))
return;
}