summaryrefslogtreecommitdiff
path: root/arch/arm
diff options
context:
space:
mode:
authorKevin Hilman <khilman@linaro.org>2013-12-20 08:30:50 -0800
committerKevin Hilman <khilman@linaro.org>2013-12-20 08:30:50 -0800
commitcd15c51d6c2f577f896471a058f33a95f164dba2 (patch)
tree9d42b8ea581ca35bdf75467b8bbbab21e7056ff9 /arch/arm
parent5b8314a98888b12159d0b1b14982b8dd2d94e3f5 (diff)
parent130f769e81fc472beb2211320777e26050e3fa15 (diff)
Merge tag 'omap-for-v3.13/display-fix' of git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap into fixes
I accidentally removed some mux code for omap4 that I thought was dead code as omap4 has been booting with device tree only since v3.10. Turns out I also removed some display related mux code, so let's revert that except for the dead code parts. * tag 'omap-for-v3.13/display-fix' of git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap: (439 commits) Revert "ARM: OMAP2+: Remove legacy mux code for display.c" +Linux 3.13-rc4
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/include/asm/memory.h31
-rw-r--r--arch/arm/kernel/head-nommu.S4
-rw-r--r--arch/arm/kernel/head.S2
-rw-r--r--arch/arm/kernel/process.c7
-rw-r--r--arch/arm/kernel/setup.c3
-rw-r--r--arch/arm/kernel/stacktrace.c2
-rw-r--r--arch/arm/kernel/traps.c3
-rw-r--r--arch/arm/mach-omap2/display.c38
-rw-r--r--arch/arm/mm/dma-mapping.c91
-rw-r--r--arch/arm/mm/init.c2
10 files changed, 105 insertions, 78 deletions
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 9ecccc865046..6976b03e5213 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -100,23 +100,19 @@
#define TASK_UNMAPPED_BASE UL(0x00000000)
#endif
-#ifndef PHYS_OFFSET
-#define PHYS_OFFSET UL(CONFIG_DRAM_BASE)
-#endif
-
#ifndef END_MEM
#define END_MEM (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE)
#endif
#ifndef PAGE_OFFSET
-#define PAGE_OFFSET (PHYS_OFFSET)
+#define PAGE_OFFSET PLAT_PHYS_OFFSET
#endif
/*
* The module can be at any place in ram in nommu mode.
*/
#define MODULES_END (END_MEM)
-#define MODULES_VADDR (PHYS_OFFSET)
+#define MODULES_VADDR PAGE_OFFSET
#define XIP_VIRT_ADDR(physaddr) (physaddr)
@@ -157,6 +153,16 @@
#endif
#define ARCH_PGD_MASK ((1 << ARCH_PGD_SHIFT) - 1)
+/*
+ * PLAT_PHYS_OFFSET is the offset (from zero) of the start of physical
+ * memory. This is used for XIP and NoMMU kernels, or by kernels which
+ * have their own mach/memory.h. Assembly code must always use
+ * PLAT_PHYS_OFFSET and not PHYS_OFFSET.
+ */
+#ifndef PLAT_PHYS_OFFSET
+#define PLAT_PHYS_OFFSET UL(CONFIG_PHYS_OFFSET)
+#endif
+
#ifndef __ASSEMBLY__
/*
@@ -239,6 +245,8 @@ static inline unsigned long __phys_to_virt(phys_addr_t x)
#else
+#define PHYS_OFFSET PLAT_PHYS_OFFSET
+
static inline phys_addr_t __virt_to_phys(unsigned long x)
{
return (phys_addr_t)x - PAGE_OFFSET + PHYS_OFFSET;
@@ -251,17 +259,6 @@ static inline unsigned long __phys_to_virt(phys_addr_t x)
#endif
#endif
-#endif /* __ASSEMBLY__ */
-
-#ifndef PHYS_OFFSET
-#ifdef PLAT_PHYS_OFFSET
-#define PHYS_OFFSET PLAT_PHYS_OFFSET
-#else
-#define PHYS_OFFSET UL(CONFIG_PHYS_OFFSET)
-#endif
-#endif
-
-#ifndef __ASSEMBLY__
/*
* PFNs are used to describe any physical page; this means
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index 14235ba64a90..716249cc2ee1 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -68,7 +68,7 @@ ENTRY(stext)
#ifdef CONFIG_ARM_MPU
/* Calculate the size of a region covering just the kernel */
- ldr r5, =PHYS_OFFSET @ Region start: PHYS_OFFSET
+ ldr r5, =PLAT_PHYS_OFFSET @ Region start: PHYS_OFFSET
ldr r6, =(_end) @ Cover whole kernel
sub r6, r6, r5 @ Minimum size of region to map
clz r6, r6 @ Region size must be 2^N...
@@ -213,7 +213,7 @@ ENTRY(__setup_mpu)
set_region_nr r0, #MPU_RAM_REGION
isb
/* Full access from PL0, PL1, shared for CONFIG_SMP, cacheable */
- ldr r0, =PHYS_OFFSET @ RAM starts at PHYS_OFFSET
+ ldr r0, =PLAT_PHYS_OFFSET @ RAM starts at PHYS_OFFSET
ldr r5,=(MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL)
setup_region r0, r5, r6, MPU_DATA_SIDE @ PHYS_OFFSET, shared, enabled
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 11d59b32fb8d..32f317e5828a 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -110,7 +110,7 @@ ENTRY(stext)
sub r4, r3, r4 @ (PHYS_OFFSET - PAGE_OFFSET)
add r8, r8, r4 @ PHYS_OFFSET
#else
- ldr r8, =PHYS_OFFSET @ always constant in this case
+ ldr r8, =PLAT_PHYS_OFFSET @ always constant in this case
#endif
/*
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 94f6b05f9e24..92f7b15dd221 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -404,6 +404,7 @@ EXPORT_SYMBOL(dump_fpu);
unsigned long get_wchan(struct task_struct *p)
{
struct stackframe frame;
+ unsigned long stack_page;
int count = 0;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
@@ -412,9 +413,11 @@ unsigned long get_wchan(struct task_struct *p)
frame.sp = thread_saved_sp(p);
frame.lr = 0; /* recovered from the stack */
frame.pc = thread_saved_pc(p);
+ stack_page = (unsigned long)task_stack_page(p);
do {
- int ret = unwind_frame(&frame);
- if (ret < 0)
+ if (frame.sp < stack_page ||
+ frame.sp >= stack_page + THREAD_SIZE ||
+ unwind_frame(&frame) < 0)
return 0;
if (!in_sched_functions(frame.pc))
return frame.pc;
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 6a1b8a81b1ae..987a7f5bce5f 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -873,8 +873,6 @@ void __init setup_arch(char **cmdline_p)
machine_desc = mdesc;
machine_name = mdesc->name;
- setup_dma_zone(mdesc);
-
if (mdesc->reboot_mode != REBOOT_HARD)
reboot_mode = mdesc->reboot_mode;
@@ -892,6 +890,7 @@ void __init setup_arch(char **cmdline_p)
sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
early_paging_init(mdesc, lookup_processor_type(read_cpuid_id()));
+ setup_dma_zone(mdesc);
sanity_check_meminfo();
arm_memblock_init(&meminfo, mdesc);
diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c
index 00f79e59985b..af4e8c8a5422 100644
--- a/arch/arm/kernel/stacktrace.c
+++ b/arch/arm/kernel/stacktrace.c
@@ -31,7 +31,7 @@ int notrace unwind_frame(struct stackframe *frame)
high = ALIGN(low, THREAD_SIZE);
/* check current frame pointer is within bounds */
- if (fp < (low + 12) || fp + 4 >= high)
+ if (fp < low + 12 || fp > high - 4)
return -EINVAL;
/* restore the registers from the stack frame */
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index dbf0923e8d76..7940241f0576 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -509,9 +509,10 @@ static inline int
__do_cache_op(unsigned long start, unsigned long end)
{
int ret;
- unsigned long chunk = PAGE_SIZE;
do {
+ unsigned long chunk = min(PAGE_SIZE, end - start);
+
if (signal_pending(current)) {
struct thread_info *ti = current_thread_info();
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c
index 58347bb874a0..4cf165502b35 100644
--- a/arch/arm/mach-omap2/display.c
+++ b/arch/arm/mach-omap2/display.c
@@ -101,13 +101,51 @@ static const struct omap_dss_hwmod_data omap4_dss_hwmod_data[] __initconst = {
{ "dss_hdmi", "omapdss_hdmi", -1 },
};
+static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes)
+{
+ u32 enable_mask, enable_shift;
+ u32 pipd_mask, pipd_shift;
+ u32 reg;
+
+ if (dsi_id == 0) {
+ enable_mask = OMAP4_DSI1_LANEENABLE_MASK;
+ enable_shift = OMAP4_DSI1_LANEENABLE_SHIFT;
+ pipd_mask = OMAP4_DSI1_PIPD_MASK;
+ pipd_shift = OMAP4_DSI1_PIPD_SHIFT;
+ } else if (dsi_id == 1) {
+ enable_mask = OMAP4_DSI2_LANEENABLE_MASK;
+ enable_shift = OMAP4_DSI2_LANEENABLE_SHIFT;
+ pipd_mask = OMAP4_DSI2_PIPD_MASK;
+ pipd_shift = OMAP4_DSI2_PIPD_SHIFT;
+ } else {
+ return -ENODEV;
+ }
+
+ reg = omap4_ctrl_pad_readl(OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_DSIPHY);
+
+ reg &= ~enable_mask;
+ reg &= ~pipd_mask;
+
+ reg |= (lanes << enable_shift) & enable_mask;
+ reg |= (lanes << pipd_shift) & pipd_mask;
+
+ omap4_ctrl_pad_writel(reg, OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_DSIPHY);
+
+ return 0;
+}
+
static int omap_dsi_enable_pads(int dsi_id, unsigned lane_mask)
{
+ if (cpu_is_omap44xx())
+ return omap4_dsi_mux_pads(dsi_id, lane_mask);
+
return 0;
}
static void omap_dsi_disable_pads(int dsi_id, unsigned lane_mask)
{
+ if (cpu_is_omap44xx())
+ omap4_dsi_mux_pads(dsi_id, 0);
}
static int omap_dss_set_min_bus_tput(struct device *dev, unsigned long tput)
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index f6b6bfa88ecf..f61a5707823a 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -158,13 +158,49 @@ struct dma_map_ops arm_coherent_dma_ops = {
};
EXPORT_SYMBOL(arm_coherent_dma_ops);
+static int __dma_supported(struct device *dev, u64 mask, bool warn)
+{
+ unsigned long max_dma_pfn;
+
+ /*
+ * If the mask allows for more memory than we can address,
+ * and we actually have that much memory, then we must
+ * indicate that DMA to this device is not supported.
+ */
+ if (sizeof(mask) != sizeof(dma_addr_t) &&
+ mask > (dma_addr_t)~0 &&
+ dma_to_pfn(dev, ~0) < max_pfn) {
+ if (warn) {
+ dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n",
+ mask);
+ dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n");
+ }
+ return 0;
+ }
+
+ max_dma_pfn = min(max_pfn, arm_dma_pfn_limit);
+
+ /*
+ * Translate the device's DMA mask to a PFN limit. This
+ * PFN number includes the page which we can DMA to.
+ */
+ if (dma_to_pfn(dev, mask) < max_dma_pfn) {
+ if (warn)
+ dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n",
+ mask,
+ dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1,
+ max_dma_pfn + 1);
+ return 0;
+ }
+
+ return 1;
+}
+
static u64 get_coherent_dma_mask(struct device *dev)
{
u64 mask = (u64)DMA_BIT_MASK(32);
if (dev) {
- unsigned long max_dma_pfn;
-
mask = dev->coherent_dma_mask;
/*
@@ -176,34 +212,8 @@ static u64 get_coherent_dma_mask(struct device *dev)
return 0;
}
- max_dma_pfn = min(max_pfn, arm_dma_pfn_limit);
-
- /*
- * If the mask allows for more memory than we can address,
- * and we actually have that much memory, then fail the
- * allocation.
- */
- if (sizeof(mask) != sizeof(dma_addr_t) &&
- mask > (dma_addr_t)~0 &&
- dma_to_pfn(dev, ~0) > max_dma_pfn) {
- dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n",
- mask);
- dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n");
- return 0;
- }
-
- /*
- * Now check that the mask, when translated to a PFN,
- * fits within the allowable addresses which we can
- * allocate.
- */
- if (dma_to_pfn(dev, mask) < max_dma_pfn) {
- dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n",
- mask,
- dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1,
- arm_dma_pfn_limit + 1);
+ if (!__dma_supported(dev, mask, true))
return 0;
- }
}
return mask;
@@ -1032,28 +1042,7 @@ void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
*/
int dma_supported(struct device *dev, u64 mask)
{
- unsigned long limit;
-
- /*
- * If the mask allows for more memory than we can address,
- * and we actually have that much memory, then we must
- * indicate that DMA to this device is not supported.
- */
- if (sizeof(mask) != sizeof(dma_addr_t) &&
- mask > (dma_addr_t)~0 &&
- dma_to_pfn(dev, ~0) > arm_dma_pfn_limit)
- return 0;
-
- /*
- * Translate the device's DMA mask to a PFN limit. This
- * PFN number includes the page which we can DMA to.
- */
- limit = dma_to_pfn(dev, mask);
-
- if (limit < arm_dma_pfn_limit)
- return 0;
-
- return 1;
+ return __dma_supported(dev, mask, false);
}
EXPORT_SYMBOL(dma_supported);
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 3e8f106ee5fe..1f7b19a47060 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -229,7 +229,7 @@ void __init setup_dma_zone(const struct machine_desc *mdesc)
#ifdef CONFIG_ZONE_DMA
if (mdesc->dma_zone_size) {
arm_dma_zone_size = mdesc->dma_zone_size;
- arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
+ arm_dma_limit = __pv_phys_offset + arm_dma_zone_size - 1;
} else
arm_dma_limit = 0xffffffff;
arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT;