summaryrefslogtreecommitdiff
path: root/arch/arm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/common/dmabounce.c37
-rw-r--r--arch/arm/include/asm/dma-mapping.h38
-rw-r--r--arch/arm/include/asm/memory.h13
-rw-r--r--arch/arm/mach-iop13xx/include/mach/memory.h2
-rw-r--r--arch/arm/mach-ks8695/include/mach/memory.h7
-rw-r--r--arch/arm/mach-tegra/Makefile3
-rw-r--r--arch/arm/mach-tegra/board_nvodm.c6
-rw-r--r--arch/arm/mach-tegra/dma.c4
-rw-r--r--arch/arm/mach-tegra/include/mach/memory.h11
-rw-r--r--arch/arm/mach-tegra/include/nvrm_power_private.h18
-rw-r--r--arch/arm/mach-tegra/init_common.c9
-rw-r--r--arch/arm/mach-tegra/nvrm/core/ap20/ap20rm_clock_config.c11
-rw-r--r--arch/arm/mach-tegra/nvrm/core/common/nvrm_power_dfs.c65
-rw-r--r--arch/arm/mach-tegra/nvrm/io/ap15/ap15rm_pwm.c6
-rw-r--r--arch/arm/mach-tegra/nvrm_user.c145
-rw-r--r--arch/arm/mach-tegra/pci-enum.c17
-rw-r--r--arch/arm/mach-tegra/pci.c14
-rw-r--r--arch/arm/mach-tegra/suspend_ops.c81
-rw-r--r--arch/arm/mm/cache-v6.S13
-rw-r--r--arch/arm/mm/cache-v7.S6
-rw-r--r--arch/arm/mm/dma-mapping.c68
-rw-r--r--arch/arm/plat-omap/include/mach/memory.h7
22 files changed, 396 insertions, 185 deletions
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
index f030f0775be7..8f3beaa332a1 100644
--- a/arch/arm/common/dmabounce.c
+++ b/arch/arm/common/dmabounce.c
@@ -276,7 +276,7 @@ static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
* We don't need to sync the DMA buffer since
* it was allocated via the coherent allocators.
*/
- dma_cache_maint(ptr, size, dir);
+ __dma_cache_maint(ptr, size, dir);
}
return dma_addr;
@@ -307,17 +307,15 @@ static inline void unmap_single(struct device *dev, dma_addr_t dma_addr,
memcpy(ptr, buf->safe, size);
/*
- * DMA buffers must have the same cache properties
- * as if they were really used for DMA - which means
- * data must be written back to RAM. Note that
- * we don't use dmac_flush_range() here for the
- * bidirectional case because we know the cache
- * lines will be coherent with the data written.
+ * Since we may have written to a page cache page,
+ * we need to ensure that the data will be coherent
+ * with user mappings.
*/
- dmac_clean_range(ptr, ptr + size);
- outer_clean_range(__pa(ptr), __pa(ptr) + size);
+ __cpuc_flush_dcache_area(ptr, size);
}
free_safe_buffer(dev->archdata.dmabounce, buf);
+ } else if (dir != DMA_TO_DEVICE) {
+ __dma_cache_maint(dma_to_virt(dev, dma_addr), size, 0);
}
}
@@ -341,6 +339,22 @@ dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
}
EXPORT_SYMBOL(dma_map_single);
+/*
+ * see if a mapped address was really a "safe" buffer and if so, copy
+ * the data from the safe buffer back to the unsafe buffer and free up
+ * the safe buffer. (basically return things back to the way they
+ * should be)
+ */
+void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction dir)
+{
+ dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
+ __func__, (void *) dma_addr, size, dir);
+
+ unmap_single(dev, dma_addr, size, dir);
+}
+EXPORT_SYMBOL(dma_unmap_single);
+
dma_addr_t dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir)
{
@@ -359,8 +373,7 @@ EXPORT_SYMBOL(dma_map_page);
* the safe buffer. (basically return things back to the way they
* should be)
*/
-
-void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
+void dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
enum dma_data_direction dir)
{
dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
@@ -368,7 +381,7 @@ void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
unmap_single(dev, dma_addr, size, dir);
}
-EXPORT_SYMBOL(dma_unmap_single);
+EXPORT_SYMBOL(dma_unmap_page);
int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
unsigned long off, size_t sz, enum dma_data_direction dir)
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index 59fa762e9c66..5fd23a810a26 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -17,7 +17,7 @@
#ifndef __arch_page_to_dma
static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
{
- return (dma_addr_t)__virt_to_bus((unsigned long)page_address(page));
+ return (dma_addr_t)__pfn_to_bus(page_to_pfn(page));
}
static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
@@ -35,6 +35,11 @@ static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
return __arch_page_to_dma(dev, page);
}
+static inline struct page *dma_to_page(struct device *dev, dma_addr_t addr)
+{
+ return __arch_dma_to_page(dev, addr);
+}
+
static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
{
return __arch_dma_to_virt(dev, addr);
@@ -56,9 +61,9 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
* platforms with CONFIG_DMABOUNCE.
* Use the driver DMA support - see dma-mapping.h (dma_sync_*)
*/
-extern void dma_cache_maint(const void *kaddr, size_t size, int rw);
-extern void dma_cache_maint_page(struct page *page, unsigned long offset,
- size_t size, int rw);
+extern void __dma_cache_maint(const void *kaddr, size_t size, int map);
+extern void __dma_cache_maint_page(struct page *page, unsigned long offset,
+ size_t size, int map);
/*
* Return whether the given device DMA address mask can be supported
@@ -247,9 +252,11 @@ extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);
*/
extern dma_addr_t dma_map_single(struct device *, void *, size_t,
enum dma_data_direction);
+extern void dma_unmap_single(struct device *, dma_addr_t, size_t,
+ enum dma_data_direction);
extern dma_addr_t dma_map_page(struct device *, struct page *,
unsigned long, size_t, enum dma_data_direction);
-extern void dma_unmap_single(struct device *, dma_addr_t, size_t,
+extern void dma_unmap_page(struct device *, dma_addr_t, size_t,
enum dma_data_direction);
/*
@@ -293,7 +300,7 @@ static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
BUG_ON(!valid_dma_direction(dir));
if (!arch_is_coherent())
- dma_cache_maint(cpu_addr, size, dir);
+ __dma_cache_maint(cpu_addr, size, 1);
return virt_to_dma(dev, cpu_addr);
}
@@ -318,7 +325,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
BUG_ON(!valid_dma_direction(dir));
if (!arch_is_coherent())
- dma_cache_maint_page(page, offset, size, dir);
+ __dma_cache_maint_page(page, offset, size, 1);
return page_to_dma(dev, page) + offset;
}
@@ -340,9 +347,9 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir)
{
- /* nothing to do */
+ if (dir != DMA_TO_DEVICE)
+ __dma_cache_maint(dma_to_virt(dev, handle), size, 0);
}
-#endif /* CONFIG_DMABOUNCE */
/**
* dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
@@ -361,8 +368,11 @@ static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir)
{
- dma_unmap_single(dev, handle, size, dir);
+ if (dir != DMA_TO_DEVICE)
+ __dma_cache_maint_page(dma_to_page(dev, handle),
+ handle & ~PAGE_MASK, size, 0);
}
+#endif /* CONFIG_DMABOUNCE */
/**
* dma_sync_single_range_for_cpu
@@ -388,7 +398,11 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
{
BUG_ON(!valid_dma_direction(dir));
- dmabounce_sync_for_cpu(dev, handle, offset, size, dir);
+ if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir))
+ return;
+
+ if (dir != DMA_TO_DEVICE)
+ __dma_cache_maint(dma_to_virt(dev, handle) + offset, size, 0);
}
static inline void dma_sync_single_range_for_device(struct device *dev,
@@ -401,7 +415,7 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
return;
if (!arch_is_coherent())
- dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir);
+ __dma_cache_maint(dma_to_virt(dev, handle) + offset, size, 1);
}
static inline void dma_sync_single_for_cpu(struct device *dev,
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 43c99f60cbe4..415433c91343 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -135,6 +135,12 @@
#define __phys_to_pfn(paddr) ((paddr) >> PAGE_SHIFT)
#define __pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT)
+/*
+ * Convert a page to/from a physical address
+ */
+#define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page)))
+#define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys)))
+
#ifndef __ASSEMBLY__
/*
@@ -195,6 +201,8 @@ static inline void *phys_to_virt(unsigned long x)
#ifndef __virt_to_bus
#define __virt_to_bus __virt_to_phys
#define __bus_to_virt __phys_to_virt
+#define __pfn_to_bus(x) __pfn_to_phys(x)
+#define __bus_to_pfn(x) __phys_to_pfn(x)
#endif
static inline __deprecated unsigned long virt_to_bus(void *x)
@@ -310,11 +318,6 @@ static inline __deprecated void *bus_to_virt(unsigned long x)
#endif /* !CONFIG_DISCONTIGMEM */
/*
- * For BIO. "will die". Kill me when bio_to_phys() and bvec_to_phys() die.
- */
-#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
-
-/*
* Optional coherency support. Currently used only by selected
* Intel XSC3-based systems.
*/
diff --git a/arch/arm/mach-iop13xx/include/mach/memory.h b/arch/arm/mach-iop13xx/include/mach/memory.h
index e012bf13c955..ff0f2ee9d3f7 100644
--- a/arch/arm/mach-iop13xx/include/mach/memory.h
+++ b/arch/arm/mach-iop13xx/include/mach/memory.h
@@ -61,6 +61,8 @@ static inline unsigned long __lbus_to_virt(dma_addr_t x)
#define __arch_page_to_dma(dev, page) \
__arch_virt_to_dma(dev, page_address(page))
+#define __arch_dma_to_page(dev, addr) phys_to_page(addr)
+
#endif /* CONFIG_ARCH_IOP13XX */
#endif /* !ASSEMBLY */
diff --git a/arch/arm/mach-ks8695/include/mach/memory.h b/arch/arm/mach-ks8695/include/mach/memory.h
index 6d5887cf5742..50ccc00fdd04 100644
--- a/arch/arm/mach-ks8695/include/mach/memory.h
+++ b/arch/arm/mach-ks8695/include/mach/memory.h
@@ -37,6 +37,13 @@ extern struct bus_type platform_bus_type;
(dma_addr_t)__virt_to_phys(x) : (dma_addr_t)__virt_to_bus(x); })
#define __arch_page_to_dma(dev, x) __arch_virt_to_dma(dev, page_address(x))
+#define __arch_dma_to_page(dev, x) \
+ ({ dma_addr_t __dma = x; \
+ if (!is_lbus_device(dev)) \
+ __dma += PHYS_OFFSET - KS8695_PCIMEM_PA; \
+ phys_to_page(__dma); \
+ })
+
#endif
#endif
diff --git a/arch/arm/mach-tegra/Makefile b/arch/arm/mach-tegra/Makefile
index 075d18a45193..88296db9a1f6 100644
--- a/arch/arm/mach-tegra/Makefile
+++ b/arch/arm/mach-tegra/Makefile
@@ -16,6 +16,9 @@ obj-y += irq_gpio.o
obj-y += timer.o
obj-y += tegra_sysmap.o
+# Tegra suspend operation
+obj-$(CONFIG_PM) += suspend_ops.o
+
# Export symbols used by loadable modules
obj-y += tegra_exports.o
diff --git a/arch/arm/mach-tegra/board_nvodm.c b/arch/arm/mach-tegra/board_nvodm.c
index 61a54f02e99d..5fa01becb3e1 100644
--- a/arch/arm/mach-tegra/board_nvodm.c
+++ b/arch/arm/mach-tegra/board_nvodm.c
@@ -68,6 +68,10 @@ extern const char* tegra_boot_device;
extern void __init tegra_init_irq(void);
extern void __init tegra_map_common_io(void);
+static struct platform_device nvrm_device =
+{
+ .name = "nvrm"
+};
#ifdef CONFIG_DEVNVMAP
static struct platform_device nvmap_device = {
@@ -362,6 +366,8 @@ static void __init tegra_machine_init(void)
NvU32 serial_number[2] = {0};
#endif
+ (void) platform_device_register(&nvrm_device);
+
tegra_common_init();
tegra_clk_init();
NvConfigDebugConsole(s_hRmGlobal);
diff --git a/arch/arm/mach-tegra/dma.c b/arch/arm/mach-tegra/dma.c
index 064ac861a3c7..fa4186513ce5 100644
--- a/arch/arm/mach-tegra/dma.c
+++ b/arch/arm/mach-tegra/dma.c
@@ -193,9 +193,11 @@ int tegra_dma_dequeue_req(int channel, struct tegra_dma_req *_req)
tegra_dma_update_hw(ch, next_req);
}
req->status = -TEGRA_DMA_REQ_ERROR_ABOTRED;
- spin_unlock_irqrestore(&ch->lock, irq_flags);
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
req->complete(req, req->status);
+ spin_lock_irqsave(&ch->lock, irq_flags);
+
}
spin_unlock_irqrestore(&ch->lock, irq_flags);
diff --git a/arch/arm/mach-tegra/include/mach/memory.h b/arch/arm/mach-tegra/include/mach/memory.h
index 20043d3e9d8d..1e1d44cb4d2d 100644
--- a/arch/arm/mach-tegra/include/mach/memory.h
+++ b/arch/arm/mach-tegra/include/mach/memory.h
@@ -36,9 +36,14 @@
#define NET_IP_ALIGN 0
#define NET_SKB_PAD L1_CACHE_BYTES
-/* bus address and physical addresses are identical */
-#define __virt_to_bus(x) __virt_to_phys(x)
-#define __bus_to_virt(x) __phys_to_virt(x)
+
+#define __arch_page_to_dma(dev, page) ((dma_addr_t)__virt_to_phys(page_address(page)))
+
+#define __arch_dma_to_virt(dev, addr) ((void *) __phys_to_virt(addr))
+
+#define __arch_virt_to_dma(dev, addr) ((dma_addr_t) __virt_to_phys((unsigned long)(addr)))
+
+#define __arch_dma_to_page(dev, addr) (phys_to_page(addr))
#endif
diff --git a/arch/arm/mach-tegra/include/nvrm_power_private.h b/arch/arm/mach-tegra/include/nvrm_power_private.h
index 692599040a93..5afd6bd8affa 100644
--- a/arch/arm/mach-tegra/include/nvrm_power_private.h
+++ b/arch/arm/mach-tegra/include/nvrm_power_private.h
@@ -513,6 +513,24 @@ void NvRmPrivVoltageScale(
void NvRmPrivDvsRequest(NvRmMilliVolts TargetMv);
/**
+ * Gets low threshold and present voltage on the given rail.
+ *
+ * @param RailId The targeted voltage rail ID.
+ * @param pLowMv Output storage pointer for low voltage threshold (in
+ * millivolt).
+ * @param pPresentMv Output storage pointer for present rail voltage (in
+ * millivolt). This parameter is optional, set to NULL if only low
+ * threshold is to be retrieved.
+ *
+ * NvRmVoltsUnspecified is returned if targeted rail does not exist on SoC.
+ */
+void
+NvRmPrivGetLowVoltageThreshold(
+ NvRmDfsVoltageRailId RailId,
+ NvRmMilliVolts* pLowMv,
+ NvRmMilliVolts* pPresentMv);
+
+/**
* Outputs debug messages for starvation hints sent by the specified client.
*
* @param ClientId The client ID assigned by the RM power manager.
diff --git a/arch/arm/mach-tegra/init_common.c b/arch/arm/mach-tegra/init_common.c
index fa87882fc45e..32746e5e4d50 100644
--- a/arch/arm/mach-tegra/init_common.c
+++ b/arch/arm/mach-tegra/init_common.c
@@ -46,6 +46,11 @@ const char *tegra_partition_list = NULL;
char *tegra_boot_device = NULL;
NvRmGpioHandle s_hGpioGlobal = NULL;
+#ifdef CONFIG_PM
+/* FIXME : Uncomment this for actual suspend/resume
+extern void tegra_set_suspend_ops(void); */
+#endif
+
/*
* The format for the partition list command line parameter is
* tagrapart=<linux_name>:<start_sector>:<length_in_sectors>:<sector_size>,...
@@ -801,5 +806,9 @@ void __init tegra_common_init(void)
tegra_register_uart();
tegra_register_sdio();
tegra_register_usb();
+#ifdef CONFIG_PM
+ /* FIXME : Uncomment this for actual suspend/resume
+ tegra_set_suspend_ops(); */
+#endif
}
diff --git a/arch/arm/mach-tegra/nvrm/core/ap20/ap20rm_clock_config.c b/arch/arm/mach-tegra/nvrm/core/ap20/ap20rm_clock_config.c
index 882d0c184c68..4add3ff74fa9 100644
--- a/arch/arm/mach-tegra/nvrm/core/ap20/ap20rm_clock_config.c
+++ b/arch/arm/mach-tegra/nvrm/core/ap20/ap20rm_clock_config.c
@@ -1215,12 +1215,13 @@ Ap20CpuClockSourceFind(
NvRmMilliVolts* pSystemMv)
{
NvU32 i;
- NvRmMilliVolts DivMv;
+ NvRmMilliVolts DivMv = 0;
+ NvRmMilliVolts CpuMv = 0;
NvRmFreqKHz SourceKHz;
NV_ASSERT(DomainKHz <= MaxKHz);
NV_ASSERT(s_Ap20CpuConfig.pPllXStepsKHz);
- DivMv = pDfsSource->DividerSetting = 0; // no 2ndary divider by default
+ pDfsSource->DividerSetting = 0; // no 2ndary divider by default
// 1st try oscillator
SourceKHz = NvRmPrivGetClockSourceFreq(NvRmClockSource_ClkM);
@@ -1279,7 +1280,11 @@ get_mv:
// Finally get operational voltage for found source
pDfsSource->MinMv = NvRmPrivModuleVscaleGetMV(
hRmDevice, NvRmModuleID_Cpu, pDfsSource->SourceKHz);
- *pSystemMv = ((pDfsSource->MinMv * s_Ap20CpuConfig.CoreOverCpuSlope) >>
+#if !NV_OAL
+ NvRmPrivGetLowVoltageThreshold(NvRmDfsVoltageRailId_Cpu, &CpuMv, NULL);
+#endif
+ CpuMv = NV_MAX(CpuMv, pDfsSource->MinMv);
+ *pSystemMv = ((CpuMv * s_Ap20CpuConfig.CoreOverCpuSlope) >>
FIXED_POINT_BITS) + s_Ap20CpuConfig.CoreOverCpuOffset;
*pSystemMv = NV_MAX(DivMv, (*pSystemMv));
}
diff --git a/arch/arm/mach-tegra/nvrm/core/common/nvrm_power_dfs.c b/arch/arm/mach-tegra/nvrm/core/common/nvrm_power_dfs.c
index 6bba8ab0f129..bc4ebf099024 100644
--- a/arch/arm/mach-tegra/nvrm/core/common/nvrm_power_dfs.c
+++ b/arch/arm/mach-tegra/nvrm/core/common/nvrm_power_dfs.c
@@ -1737,9 +1737,7 @@ DttClockUpdate(
if (NVRM_DTT_DISABLED || (!pDtt->hOdmTcore))
return NV_FALSE;
- // Update temperature
- if (pDtt->TcorePolicy.UpdateFlag &&
- NvOdmTmonTemperatureGet(pDtt->hOdmTcore, &TemperatureC))
+ if (pDtt->TcorePolicy.UpdateFlag)
{
// Register TMON interrupt, if it is supported by device, and chip
// policy, but has not been registered yet. Set initial temperature
@@ -1750,6 +1748,7 @@ DttClockUpdate(
DttPolicyUpdate(pDfs->hRm, TemperatureC, pDtt);
LowLimit = pDtt->TcorePolicy.LowLimit;
HighLimit = pDtt->TcorePolicy.HighLimit;
+
if ((LowLimit != ODM_TMON_PARAMETER_UNSPECIFIED) &&
(HighLimit != ODM_TMON_PARAMETER_UNSPECIFIED))
{
@@ -2451,6 +2450,42 @@ void NvRmPrivDvsRequest(NvRmMilliVolts TargetMv)
}
}
+void
+NvRmPrivGetLowVoltageThreshold(
+ NvRmDfsVoltageRailId RailId,
+ NvRmMilliVolts* pLowMv,
+ NvRmMilliVolts* pPresentMv)
+{
+ NvRmDfs* pDfs = &s_Dfs;
+ NvRmDvs* pDvs = &s_Dfs.VoltageScaler;
+ NV_ASSERT(pLowMv);
+
+ switch (RailId)
+ {
+ case NvRmDfsVoltageRailId_Core:
+ *pLowMv = pDvs->LowCornerCoreMv;
+ if(pPresentMv)
+ *pPresentMv = pDvs->CurrentCoreMv;
+ break;
+
+ case NvRmDfsVoltageRailId_Cpu:
+ if (NvRmPrivIsCpuRailDedicated(pDfs->hRm))
+ {
+ *pLowMv = pDvs->LowCornerCpuMv;
+ if(pPresentMv)
+ *pPresentMv = pDvs->CurrentCpuMv;
+ break;
+ }
+ // fall through
+
+ default:
+ *pLowMv = NvRmVoltsUnspecified;
+ if(pPresentMv)
+ *pPresentMv = NvRmVoltsUnspecified;
+ break;
+ }
+}
+
static void NvRmPrivDvsStopAtNominal(void)
{
NvRmDfs* pDfs = &s_Dfs;
@@ -3446,32 +3481,10 @@ NvRmDfsGetLowVoltageThreshold(
NvRmMilliVolts* pLowMv,
NvRmMilliVolts* pPresentMv)
{
- NvRmDvs* pDvs = &s_Dfs.VoltageScaler;
-
NV_ASSERT(hRmDeviceHandle);
NvRmPrivLockSharedPll();
- switch (RailId)
- {
- case NvRmDfsVoltageRailId_Core:
- *pLowMv = pDvs->LowCornerCoreMv;
- *pPresentMv = pDvs->CurrentCoreMv;
- break;
-
- case NvRmDfsVoltageRailId_Cpu:
- if (NvRmPrivIsCpuRailDedicated(hRmDeviceHandle))
- {
- *pLowMv = pDvs->LowCornerCpuMv;
- *pPresentMv = pDvs->CurrentCpuMv;
- break;
- }
- // fall through
-
- default:
- *pLowMv = NvRmVoltsUnspecified;
- *pPresentMv = NvRmVoltsUnspecified;
- break;
- }
+ NvRmPrivGetLowVoltageThreshold(RailId, pLowMv, pPresentMv);
NvRmPrivUnlockSharedPll();
}
diff --git a/arch/arm/mach-tegra/nvrm/io/ap15/ap15rm_pwm.c b/arch/arm/mach-tegra/nvrm/io/ap15/ap15rm_pwm.c
index dbaf41563bd4..14c58e4e28f3 100644
--- a/arch/arm/mach-tegra/nvrm/io/ap15/ap15rm_pwm.c
+++ b/arch/arm/mach-tegra/nvrm/io/ap15/ap15rm_pwm.c
@@ -316,7 +316,7 @@ void NvRmPwmClose(NvRmPwmHandle hPwm)
NvOsMutexUnlock(s_hPwmMutex);
}
-#define MAX_DUTY_CYCLE 256
+#define MAX_DUTY_CYCLE 255
NvError NvRmPwmConfig(
NvRmPwmHandle hPwm,
@@ -416,7 +416,9 @@ NvError NvRmPwmConfig(
* Convert from percentage unsigned 15.16 fixed point
* format to actual register value
*/
- DCycle = (NvU8)((DutyCycle * MAX_DUTY_CYCLE/100)>>16);
+ DCycle = (DutyCycle * MAX_DUTY_CYCLE/100)>>16;
+ if (DCycle > MAX_DUTY_CYCLE)
+ DCycle = MAX_DUTY_CYCLE;
RegValue = PWM_SETNUM(CSR_0, ENB, PwmMode) |
PWM_SETNUM(CSR_0, PWM_0, DCycle);
diff --git a/arch/arm/mach-tegra/nvrm_user.c b/arch/arm/mach-tegra/nvrm_user.c
index c4c2ed214930..543c1fb27f96 100644
--- a/arch/arm/mach-tegra/nvrm_user.c
+++ b/arch/arm/mach-tegra/nvrm_user.c
@@ -27,6 +27,7 @@
#include <linux/cpumask.h>
#include <linux/sched.h>
#include <linux/cpu.h>
+#include <linux/platform_device.h>
#include "nvcommon.h"
#include "nvassert.h"
#include "nvos.h"
@@ -164,18 +165,19 @@ static void NvRmDfsThread(void *args)
}
if (Request & NvRmPmRequest_CpuOnFlag)
{
-#ifdef CONFIG_HOTPLUG_CPU
+#ifdef CONFIG_HOTPLUG_CPU
printk("DFS requested CPU ON\n");
cpu_up(1);
-#endif
+#endif
}
+
if (Request & NvRmPmRequest_CpuOffFlag)
{
-#ifdef CONFIG_HOTPLUG_CPU
+#ifdef CONFIG_HOTPLUG_CPU
printk("DFS requested CPU OFF\n");
cpu_down(1);
-#endif
- }
+#endif
+ }
}
}
}
@@ -204,46 +206,6 @@ static void client_detach(NvRtClientHandle client)
}
}
-static int __init nvrm_init( void )
-{
- int e = 0;
- NvU32 NumTypes = NvRtObjType_NvRm_Num;
-
- printk("nvrm init\n");
-
- NV_ASSERT(s_RtHandle == NULL);
-
- if (NvRtCreate(1, &NumTypes, &s_RtHandle) != NvSuccess)
- {
- e = -ENOMEM;
- }
-
- if (e == 0)
- {
- e = misc_register( &nvrm_dev );
- }
-
- if( e < 0 )
- {
- if (s_RtHandle)
- {
- NvRtDestroy(s_RtHandle);
- s_RtHandle = NULL;
- }
-
- printk("nvrm failed to open\n");
- }
-
- return e;
-}
-
-static void __exit nvrm_deinit( void )
-{
- misc_deregister( &nvrm_dev );
- NvRtDestroy(s_RtHandle);
- s_RtHandle = NULL;
-}
-
int nvrm_open(struct inode *inode, struct file *file)
{
NvRtClientHandle Client;
@@ -522,7 +484,7 @@ long nvrm_unlocked_ioctl(struct file *file,
goto fail;
}
}
- break;
+ break;
case NvRmIoctls_NvRmGetClientId:
err = NvOsCopyIn(&p, (void*)arg, sizeof(p));
if (err != NvSuccess)
@@ -534,7 +496,7 @@ long nvrm_unlocked_ioctl(struct file *file,
NV_ASSERT(p.InBufferSize == 0);
NV_ASSERT(p.OutBufferSize == sizeof(NvRtClientHandle));
NV_ASSERT(p.InOutBufferSize == 0);
-
+
if (NvOsCopyOut(p.pBuffer,
&file->private_data,
sizeof(NvRtClientHandle)) != NvSuccess)
@@ -553,7 +515,7 @@ long nvrm_unlocked_ioctl(struct file *file,
NvOsDebugPrintf("NvRmIoctls_NvRmClientAttach: copy in failed\n");
goto fail;
}
-
+
NV_ASSERT(p.InBufferSize == sizeof(NvRtClientHandle));
NV_ASSERT(p.OutBufferSize == 0);
NV_ASSERT(p.InOutBufferSize == 0);
@@ -590,11 +552,11 @@ long nvrm_unlocked_ioctl(struct file *file,
NvOsDebugPrintf("NvRmIoctls_NvRmClientAttach: copy in failed\n");
goto fail;
}
-
+
NV_ASSERT(p.InBufferSize == sizeof(NvRtClientHandle));
NV_ASSERT(p.OutBufferSize == 0);
NV_ASSERT(p.InOutBufferSize == 0);
-
+
if (NvOsCopyIn((void*)&Client,
p.pBuffer,
sizeof(NvRtClientHandle)) != NvSuccess)
@@ -610,10 +572,10 @@ long nvrm_unlocked_ioctl(struct file *file,
// The daemon is detaching from itself, no need to dec refcount
break;
}
-
- client_detach(Client);
+
+ client_detach(Client);
break;
- }
+ }
// FIXME: power ioctls?
default:
printk( "unknown ioctl code\n" );
@@ -640,5 +602,82 @@ int nvrm_mmap(struct file *file, struct vm_area_struct *vma)
return 0;
}
+static int nvrm_probe(struct platform_device *pdev)
+{
+ int e = 0;
+ NvU32 NumTypes = NvRtObjType_NvRm_Num;
+
+ printk("nvrm probe\n");
+
+ NV_ASSERT(s_RtHandle == NULL);
+
+ if (NvRtCreate(1, &NumTypes, &s_RtHandle) != NvSuccess)
+ {
+ e = -ENOMEM;
+ }
+
+ if (e == 0)
+ {
+ e = misc_register( &nvrm_dev );
+ }
+
+ if( e < 0 )
+ {
+ if (s_RtHandle)
+ {
+ NvRtDestroy(s_RtHandle);
+ s_RtHandle = NULL;
+ }
+
+ printk("nvrm probe failed to open\n");
+ }
+ return e;
+}
+
+static int nvrm_remove(struct platform_device *pdev)
+{
+ misc_deregister( &nvrm_dev );
+ NvRtDestroy(s_RtHandle);
+ s_RtHandle = NULL;
+ return 0;
+}
+
+static int nvrm_suspend(struct platform_device *pdev)
+{
+ NvError Err = NvSuccess;
+ printk(KERN_INFO "%s called\n", __func__);
+ return Err;
+}
+
+static int nvrm_resume(struct platform_device *pdev)
+{
+ NvError Err = NvSuccess;
+ printk(KERN_INFO "%s called\n", __func__);
+ return Err;
+}
+
+static struct platform_driver nvrm_driver =
+{
+ .probe = nvrm_probe,
+ .remove = nvrm_remove,
+ .suspend = nvrm_suspend,
+ .resume = nvrm_resume,
+ .driver = { .name = "nvrm" }
+};
+
+static int __init nvrm_init(void)
+{
+ int ret = 0;
+ printk(KERN_INFO "%s called\n", __func__);
+ ret= platform_driver_register(&nvrm_driver);
+ return ret;
+}
+
+static void __exit nvrm_deinit(void)
+{
+ printk(KERN_INFO "%s called\n", __func__);
+ platform_driver_unregister(&nvrm_driver);
+}
+
module_init(nvrm_init);
module_exit(nvrm_deinit);
diff --git a/arch/arm/mach-tegra/pci-enum.c b/arch/arm/mach-tegra/pci-enum.c
index 1dab1de0a86d..4f8c614ff8fc 100644
--- a/arch/arm/mach-tegra/pci-enum.c
+++ b/arch/arm/mach-tegra/pci-enum.c
@@ -434,8 +434,8 @@ static void pci_tegra_setup_pci_bridge(struct pci_tegra_device *dev)
reg |= PCI_COMMAND_SERR;
pci_conf_write16(dev->bus, dev->devfn, PCI_COMMAND, reg);
- /* FIXME how to handle interrutps */
- pci_conf_write8(dev->bus, dev->devfn, PCI_INTERRUPT_LINE, 0x82);
+ pci_conf_write8(dev->bus, dev->devfn, PCI_INTERRUPT_LINE,
+ tegra_get_module_inst_irq("pcie", 0, 0));
pci_conf_write8(dev->bus, dev->devfn, PCI_INTERRUPT_PIN, 0xa);
}
@@ -535,8 +535,9 @@ static void pci_tegra_setup_pci_device(struct pci_tegra_device *dev)
reg |= PCI_COMMAND_SERR;
pci_conf_write16(dev->bus, dev->devfn, PCI_COMMAND, reg);
- /* FIXME how to handle interrutps */
-
+ pci_conf_write8(dev->bus, dev->devfn, PCI_INTERRUPT_LINE,
+ tegra_get_module_inst_irq("pcie", 0, 0));
+ pci_conf_write8(dev->bus, dev->devfn, PCI_INTERRUPT_PIN, 0xa);
}
static void pci_tegra_print_device_tree(struct pci_tegra_device *dev)
@@ -612,8 +613,12 @@ void pci_tegra_enumerate(void)
/* Disable all execptions */
pci_tegra_afi_writel(0, AFI_FPCI_ERROR_MASKS_0);
- /* Set the base and limits of the resources */
- pci_tegra_io_base = TEGRA_PCIE_BASE + PCIE_DOWNSTREAM_IO_OFFSET;
+ /* Set the base and limits for the resources */
+
+ /* Starting the IO offset from non-zero value as linux equating a value
+ * of 0 as unallocated resoruce and bailing out!
+ */
+ pci_tegra_io_base = TEGRA_PCIE_BASE + PCIE_DOWNSTREAM_IO_OFFSET + 16;
pci_tegra_io_limt = pci_tegra_io_base + PCIE_DOWNSTREAM_IO_SIZE;
pci_tegra_mem_base = FPCI_NON_PREFETCH_MEMORY_OFFSET;
diff --git a/arch/arm/mach-tegra/pci.c b/arch/arm/mach-tegra/pci.c
index 2ecba6015229..40a46e13c443 100644
--- a/arch/arm/mach-tegra/pci.c
+++ b/arch/arm/mach-tegra/pci.c
@@ -20,8 +20,8 @@
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
-#define DEBUG
-#define VERBOSE_DEBUG
+//#define DEBUG
+//#define VERBOSE_DEBUG
#include <linux/kernel.h>
@@ -116,8 +116,11 @@ static int pci_tegra_read_conf(struct pci_bus *bus, u32 devfn,
if (where & 2) v >>= 16;
v &= 0xffff;
break;
- default:
+ case 4:
break;
+ default:
+ /* If the PCI stack is sane, we should not get here */
+ BUG();
}
*val = v;
@@ -176,9 +179,12 @@ static int pci_tegra_write_conf(struct pci_bus *bus, u32 devfn,
temp |= val << ((where & 0x3) * 8);
writel(temp, (u32)addr & ~0x3);
break;
- default:
+ case 4:
writel(val, addr);
break;
+ default:
+ /* If the PCI stack is sane, we should not get here */
+ BUG();
}
fail:
return PCIBIOS_SUCCESSFUL;
diff --git a/arch/arm/mach-tegra/suspend_ops.c b/arch/arm/mach-tegra/suspend_ops.c
new file mode 100644
index 000000000000..0aef7afe151e
--- /dev/null
+++ b/arch/arm/mach-tegra/suspend_ops.c
@@ -0,0 +1,81 @@
+/*
+ * arch/arm/mach-tegra/suspend_ops.c
+ *
+ * Suspend Operation API implementation
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/suspend.h>
+#include "nvcommon.h"
+
+int tegra_state_valid(suspend_state_t state)
+{
+ printk("%s CALLED\n", __func__);
+ if (state > PM_SUSPEND_ON && state <= PM_SUSPEND_MAX)
+ return 1;
+ return 0;
+}
+
+int tegra_state_begin(suspend_state_t state)
+{
+ printk("%s CALLED with state = %d\n", __func__, state);
+ return 0;
+}
+
+int tegra_state_prepare(void)
+{
+ printk("%s CALLED \n", __func__);
+ return 0;
+}
+
+int tegra_state_enter(suspend_state_t state)
+{
+ printk("%s CALLED with state = %d\n", __func__, state);
+ return 0;
+}
+
+void tegra_state_finish(void)
+{
+ printk("%s CALLED \n", __func__);
+}
+
+void tegra_state_end(void)
+{
+ printk("%s CALLED \n", __func__);
+}
+
+void tegra_state_recover(void)
+{
+ printk("%s CALLED \n", __func__);
+}
+
+static struct platform_suspend_ops tegra_suspend_ops =
+{
+ .valid = tegra_state_valid,
+ .begin = tegra_state_begin,
+ .prepare = tegra_state_prepare,
+ .enter = tegra_state_enter,
+ .finish = tegra_state_finish,
+ .end = tegra_state_end,
+ .recover = tegra_state_recover
+};
+
+void tegra_set_suspend_ops()
+{
+ suspend_set_ops(&tegra_suspend_ops);
+}
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S
index b804c9d3e420..55f7ecd1264e 100644
--- a/arch/arm/mm/cache-v6.S
+++ b/arch/arm/mm/cache-v6.S
@@ -203,20 +203,7 @@ ENTRY(v6_flush_kern_dcache_page)
* - end - virtual end address of region
*/
ENTRY(v6_dma_inv_range)
- tst r0, #D_CACHE_LINE_SIZE - 1
bic r0, r0, #D_CACHE_LINE_SIZE - 1
-#ifdef HARVARD_CACHE
- mcrne p15, 0, r0, c7, c10, 1 @ clean D line
-#else
- mcrne p15, 0, r0, c7, c11, 1 @ clean unified line
-#endif
- tst r1, #D_CACHE_LINE_SIZE - 1
- bic r1, r1, #D_CACHE_LINE_SIZE - 1
-#ifdef HARVARD_CACHE
- mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D line
-#else
- mcrne p15, 0, r1, c7, c15, 1 @ clean & invalidate unified line
-#endif
1:
#ifdef HARVARD_CACHE
mcr p15, 0, r0, c7, c6, 1 @ invalidate D line
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index 4b733d14076a..5327bd1b9bcf 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -218,15 +218,9 @@ ENDPROC(v7_flush_kern_dcache_page)
ENTRY(v7_dma_inv_range)
dcache_line_size r2, r3
sub r3, r2, #1
- tst r0, r3
bic r0, r0, r3
it ne
- mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line
- tst r1, r3
- bic r1, r1, r3
- it ne
- mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D / U line
1:
mcr p15, 0, r0, c7, c6, 1 @ invalidate D / U line
add r0, r0, r2
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 9198f94fac89..1f37fae59d01 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -509,58 +509,40 @@ core_initcall(consistent_init);
* platforms with CONFIG_DMABOUNCE.
* Use the driver DMA support - see dma-mapping.h (dma_sync_*)
*/
-void dma_cache_maint(const void *start, size_t size, int direction)
+void __dma_cache_maint(const void *start, size_t size, int map)
{
void (*inner_op)(const void *, const void *);
void (*outer_op)(unsigned long, unsigned long);
BUG_ON(!virt_addr_valid(start) || !virt_addr_valid(start + size - 1));
- switch (direction) {
- case DMA_FROM_DEVICE: /* invalidate only */
- inner_op = smp_dma_inv_range;
- outer_op = outer_inv_range;
- break;
- case DMA_TO_DEVICE: /* writeback only */
- inner_op = smp_dma_clean_range;
- outer_op = outer_clean_range;
- break;
- case DMA_BIDIRECTIONAL: /* writeback and invalidate */
- inner_op = smp_dma_flush_range;
- outer_op = outer_flush_range;
- break;
- default:
- BUG();
+ if (map) { /* writeback only */
+ inner_op = dmac_clean_range;
+ outer_op = outer_clean_range;
+ } else { /* Invalidate only */
+ inner_op = dmac_inv_range;
+ outer_op = outer_inv_range;
}
inner_op(start, start + size);
outer_op(__pa(start), __pa(start) + size);
}
-EXPORT_SYMBOL(dma_cache_maint);
+EXPORT_SYMBOL(__dma_cache_maint);
static void dma_cache_maint_contiguous(struct page *page, unsigned long offset,
- size_t size, int direction)
+ size_t size, int map)
{
void *vaddr;
unsigned long paddr;
void (*inner_op)(const void *, const void *);
void (*outer_op)(unsigned long, unsigned long);
- switch (direction) {
- case DMA_FROM_DEVICE: /* invalidate only */
- inner_op = smp_dma_inv_range;
- outer_op = outer_inv_range;
- break;
- case DMA_TO_DEVICE: /* writeback only */
- inner_op = smp_dma_clean_range;
- outer_op = outer_clean_range;
- break;
- case DMA_BIDIRECTIONAL: /* writeback and invalidate */
- inner_op = smp_dma_flush_range;
- outer_op = outer_flush_range;
- break;
- default:
- BUG();
+ if (map) { /* writeback only */
+ inner_op = dmac_clean_range;
+ outer_op = outer_clean_range;
+ } else { /* Invalidate only */
+ inner_op = dmac_inv_range;
+ outer_op = outer_inv_range;
}
if (!PageHighMem(page)) {
@@ -579,8 +561,8 @@ static void dma_cache_maint_contiguous(struct page *page, unsigned long offset,
outer_op(paddr, paddr + size);
}
-void dma_cache_maint_page(struct page *page, unsigned long offset,
- size_t size, int dir)
+void __dma_cache_maint_page(struct page *page, unsigned long offset,
+ size_t size, int map)
{
/*
* A single sg entry may refer to multiple physically contiguous
@@ -598,13 +580,13 @@ void dma_cache_maint_page(struct page *page, unsigned long offset,
}
len = PAGE_SIZE - offset;
}
- dma_cache_maint_contiguous(page, offset, len, dir);
+ dma_cache_maint_contiguous(page, offset, len, map);
offset = 0;
page++;
left -= len;
} while (left);
}
-EXPORT_SYMBOL(dma_cache_maint_page);
+EXPORT_SYMBOL(__dma_cache_maint_page);
/**
* dma_map_sg - map a set of SG buffers for streaming mode DMA
@@ -680,6 +662,14 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
for_each_sg(sg, s, nents, i) {
dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0,
sg_dma_len(s), dir);
+
+ if (!dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0,
+ sg_dma_len(s), dir))
+ continue;
+
+ if (!arch_is_coherent() && dir != DMA_TO_DEVICE)
+ __dma_cache_maint_page(sg_page(s), s->offset,
+ s->length, 0);
}
}
EXPORT_SYMBOL(dma_sync_sg_for_cpu);
@@ -703,8 +693,8 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
continue;
if (!arch_is_coherent())
- dma_cache_maint_page(sg_page(s), s->offset,
- s->length, dir);
+ __dma_cache_maint_page(sg_page(s), s->offset,
+ s->length, 1);
}
}
EXPORT_SYMBOL(dma_sync_sg_for_device);
diff --git a/arch/arm/plat-omap/include/mach/memory.h b/arch/arm/plat-omap/include/mach/memory.h
index d6b5ca6c7da2..1383ac5d1f07 100644
--- a/arch/arm/plat-omap/include/mach/memory.h
+++ b/arch/arm/plat-omap/include/mach/memory.h
@@ -65,6 +65,13 @@
(dma_addr_t)virt_to_lbus(page_address(page)) : \
(dma_addr_t)__virt_to_phys(page_address(page));})
+#define __arch_dma_to_page(dev, addr) \
+ ({ dma_addr_t __dma = addr; \
+ if (is_lbus_device(dev)) \
+ __dma += PHYS_OFFSET - OMAP1510_LB_OFFSET; \
+ phys_to_page(__dma); \
+ })
+
#define __arch_dma_to_virt(dev, addr) ({ (void *) (is_lbus_device(dev) ? \
lbus_to_virt(addr) : \
__phys_to_virt(addr)); })