summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/arm/common/dmabounce.c37
-rw-r--r--arch/arm/include/asm/dma-mapping.h38
-rw-r--r--arch/arm/include/asm/memory.h13
-rw-r--r--arch/arm/mach-iop13xx/include/mach/memory.h2
-rw-r--r--arch/arm/mach-ks8695/include/mach/memory.h7
-rw-r--r--arch/arm/mach-tegra/Makefile3
-rw-r--r--arch/arm/mach-tegra/board_nvodm.c6
-rw-r--r--arch/arm/mach-tegra/dma.c4
-rw-r--r--arch/arm/mach-tegra/include/mach/memory.h11
-rw-r--r--arch/arm/mach-tegra/include/nvrm_power_private.h18
-rw-r--r--arch/arm/mach-tegra/init_common.c9
-rw-r--r--arch/arm/mach-tegra/nvrm/core/ap20/ap20rm_clock_config.c11
-rw-r--r--arch/arm/mach-tegra/nvrm/core/common/nvrm_power_dfs.c65
-rw-r--r--arch/arm/mach-tegra/nvrm/io/ap15/ap15rm_pwm.c6
-rw-r--r--arch/arm/mach-tegra/nvrm_user.c145
-rw-r--r--arch/arm/mach-tegra/pci-enum.c17
-rw-r--r--arch/arm/mach-tegra/pci.c14
-rw-r--r--arch/arm/mach-tegra/suspend_ops.c81
-rw-r--r--arch/arm/mm/cache-v6.S13
-rw-r--r--arch/arm/mm/cache-v7.S6
-rw-r--r--arch/arm/mm/dma-mapping.c68
-rw-r--r--arch/arm/plat-omap/include/mach/memory.h7
-rw-r--r--drivers/serial/tegra_hsuart.c190
-rw-r--r--drivers/usb/host/ehci-tegra.c36
24 files changed, 512 insertions, 295 deletions
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
index f030f0775be7..8f3beaa332a1 100644
--- a/arch/arm/common/dmabounce.c
+++ b/arch/arm/common/dmabounce.c
@@ -276,7 +276,7 @@ static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
* We don't need to sync the DMA buffer since
* it was allocated via the coherent allocators.
*/
- dma_cache_maint(ptr, size, dir);
+ __dma_cache_maint(ptr, size, dir);
}
return dma_addr;
@@ -307,17 +307,15 @@ static inline void unmap_single(struct device *dev, dma_addr_t dma_addr,
memcpy(ptr, buf->safe, size);
/*
- * DMA buffers must have the same cache properties
- * as if they were really used for DMA - which means
- * data must be written back to RAM. Note that
- * we don't use dmac_flush_range() here for the
- * bidirectional case because we know the cache
- * lines will be coherent with the data written.
+ * Since we may have written to a page cache page,
+ * we need to ensure that the data will be coherent
+ * with user mappings.
*/
- dmac_clean_range(ptr, ptr + size);
- outer_clean_range(__pa(ptr), __pa(ptr) + size);
+ __cpuc_flush_dcache_area(ptr, size);
}
free_safe_buffer(dev->archdata.dmabounce, buf);
+ } else if (dir != DMA_TO_DEVICE) {
+ __dma_cache_maint(dma_to_virt(dev, dma_addr), size, 0);
}
}
@@ -341,6 +339,22 @@ dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
}
EXPORT_SYMBOL(dma_map_single);
+/*
+ * see if a mapped address was really a "safe" buffer and if so, copy
+ * the data from the safe buffer back to the unsafe buffer and free up
+ * the safe buffer. (basically return things back to the way they
+ * should be)
+ */
+void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction dir)
+{
+ dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
+ __func__, (void *) dma_addr, size, dir);
+
+ unmap_single(dev, dma_addr, size, dir);
+}
+EXPORT_SYMBOL(dma_unmap_single);
+
dma_addr_t dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir)
{
@@ -359,8 +373,7 @@ EXPORT_SYMBOL(dma_map_page);
* the safe buffer. (basically return things back to the way they
* should be)
*/
-
-void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
+void dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
enum dma_data_direction dir)
{
dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
@@ -368,7 +381,7 @@ void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
unmap_single(dev, dma_addr, size, dir);
}
-EXPORT_SYMBOL(dma_unmap_single);
+EXPORT_SYMBOL(dma_unmap_page);
int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
unsigned long off, size_t sz, enum dma_data_direction dir)
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index 59fa762e9c66..5fd23a810a26 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -17,7 +17,7 @@
#ifndef __arch_page_to_dma
static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
{
- return (dma_addr_t)__virt_to_bus((unsigned long)page_address(page));
+ return (dma_addr_t)__pfn_to_bus(page_to_pfn(page));
}
static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
@@ -35,6 +35,11 @@ static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
return __arch_page_to_dma(dev, page);
}
+static inline struct page *dma_to_page(struct device *dev, dma_addr_t addr)
+{
+ return __arch_dma_to_page(dev, addr);
+}
+
static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
{
return __arch_dma_to_virt(dev, addr);
@@ -56,9 +61,9 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
* platforms with CONFIG_DMABOUNCE.
* Use the driver DMA support - see dma-mapping.h (dma_sync_*)
*/
-extern void dma_cache_maint(const void *kaddr, size_t size, int rw);
-extern void dma_cache_maint_page(struct page *page, unsigned long offset,
- size_t size, int rw);
+extern void __dma_cache_maint(const void *kaddr, size_t size, int map);
+extern void __dma_cache_maint_page(struct page *page, unsigned long offset,
+ size_t size, int map);
/*
* Return whether the given device DMA address mask can be supported
@@ -247,9 +252,11 @@ extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);
*/
extern dma_addr_t dma_map_single(struct device *, void *, size_t,
enum dma_data_direction);
+extern void dma_unmap_single(struct device *, dma_addr_t, size_t,
+ enum dma_data_direction);
extern dma_addr_t dma_map_page(struct device *, struct page *,
unsigned long, size_t, enum dma_data_direction);
-extern void dma_unmap_single(struct device *, dma_addr_t, size_t,
+extern void dma_unmap_page(struct device *, dma_addr_t, size_t,
enum dma_data_direction);
/*
@@ -293,7 +300,7 @@ static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
BUG_ON(!valid_dma_direction(dir));
if (!arch_is_coherent())
- dma_cache_maint(cpu_addr, size, dir);
+ __dma_cache_maint(cpu_addr, size, 1);
return virt_to_dma(dev, cpu_addr);
}
@@ -318,7 +325,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
BUG_ON(!valid_dma_direction(dir));
if (!arch_is_coherent())
- dma_cache_maint_page(page, offset, size, dir);
+ __dma_cache_maint_page(page, offset, size, 1);
return page_to_dma(dev, page) + offset;
}
@@ -340,9 +347,9 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir)
{
- /* nothing to do */
+ if (dir != DMA_TO_DEVICE)
+ __dma_cache_maint(dma_to_virt(dev, handle), size, 0);
}
-#endif /* CONFIG_DMABOUNCE */
/**
* dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
@@ -361,8 +368,11 @@ static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir)
{
- dma_unmap_single(dev, handle, size, dir);
+ if (dir != DMA_TO_DEVICE)
+ __dma_cache_maint_page(dma_to_page(dev, handle),
+ handle & ~PAGE_MASK, size, 0);
}
+#endif /* CONFIG_DMABOUNCE */
/**
* dma_sync_single_range_for_cpu
@@ -388,7 +398,11 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
{
BUG_ON(!valid_dma_direction(dir));
- dmabounce_sync_for_cpu(dev, handle, offset, size, dir);
+ if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir))
+ return;
+
+ if (dir != DMA_TO_DEVICE)
+ __dma_cache_maint(dma_to_virt(dev, handle) + offset, size, 0);
}
static inline void dma_sync_single_range_for_device(struct device *dev,
@@ -401,7 +415,7 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
return;
if (!arch_is_coherent())
- dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir);
+ __dma_cache_maint(dma_to_virt(dev, handle) + offset, size, 1);
}
static inline void dma_sync_single_for_cpu(struct device *dev,
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 43c99f60cbe4..415433c91343 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -135,6 +135,12 @@
#define __phys_to_pfn(paddr) ((paddr) >> PAGE_SHIFT)
#define __pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT)
+/*
+ * Convert a page to/from a physical address
+ */
+#define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page)))
+#define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys)))
+
#ifndef __ASSEMBLY__
/*
@@ -195,6 +201,8 @@ static inline void *phys_to_virt(unsigned long x)
#ifndef __virt_to_bus
#define __virt_to_bus __virt_to_phys
#define __bus_to_virt __phys_to_virt
+#define __pfn_to_bus(x) __pfn_to_phys(x)
+#define __bus_to_pfn(x) __phys_to_pfn(x)
#endif
static inline __deprecated unsigned long virt_to_bus(void *x)
@@ -310,11 +318,6 @@ static inline __deprecated void *bus_to_virt(unsigned long x)
#endif /* !CONFIG_DISCONTIGMEM */
/*
- * For BIO. "will die". Kill me when bio_to_phys() and bvec_to_phys() die.
- */
-#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
-
-/*
* Optional coherency support. Currently used only by selected
* Intel XSC3-based systems.
*/
diff --git a/arch/arm/mach-iop13xx/include/mach/memory.h b/arch/arm/mach-iop13xx/include/mach/memory.h
index e012bf13c955..ff0f2ee9d3f7 100644
--- a/arch/arm/mach-iop13xx/include/mach/memory.h
+++ b/arch/arm/mach-iop13xx/include/mach/memory.h
@@ -61,6 +61,8 @@ static inline unsigned long __lbus_to_virt(dma_addr_t x)
#define __arch_page_to_dma(dev, page) \
__arch_virt_to_dma(dev, page_address(page))
+#define __arch_dma_to_page(dev, addr) phys_to_page(addr)
+
#endif /* CONFIG_ARCH_IOP13XX */
#endif /* !ASSEMBLY */
diff --git a/arch/arm/mach-ks8695/include/mach/memory.h b/arch/arm/mach-ks8695/include/mach/memory.h
index 6d5887cf5742..50ccc00fdd04 100644
--- a/arch/arm/mach-ks8695/include/mach/memory.h
+++ b/arch/arm/mach-ks8695/include/mach/memory.h
@@ -37,6 +37,13 @@ extern struct bus_type platform_bus_type;
(dma_addr_t)__virt_to_phys(x) : (dma_addr_t)__virt_to_bus(x); })
#define __arch_page_to_dma(dev, x) __arch_virt_to_dma(dev, page_address(x))
+#define __arch_dma_to_page(dev, x) \
+ ({ dma_addr_t __dma = x; \
+ if (!is_lbus_device(dev)) \
+ __dma += PHYS_OFFSET - KS8695_PCIMEM_PA; \
+ phys_to_page(__dma); \
+ })
+
#endif
#endif
diff --git a/arch/arm/mach-tegra/Makefile b/arch/arm/mach-tegra/Makefile
index 075d18a45193..88296db9a1f6 100644
--- a/arch/arm/mach-tegra/Makefile
+++ b/arch/arm/mach-tegra/Makefile
@@ -16,6 +16,9 @@ obj-y += irq_gpio.o
obj-y += timer.o
obj-y += tegra_sysmap.o
+# Tegra suspend operation
+obj-$(CONFIG_PM) += suspend_ops.o
+
# Export symbols used by loadable modules
obj-y += tegra_exports.o
diff --git a/arch/arm/mach-tegra/board_nvodm.c b/arch/arm/mach-tegra/board_nvodm.c
index 61a54f02e99d..5fa01becb3e1 100644
--- a/arch/arm/mach-tegra/board_nvodm.c
+++ b/arch/arm/mach-tegra/board_nvodm.c
@@ -68,6 +68,10 @@ extern const char* tegra_boot_device;
extern void __init tegra_init_irq(void);
extern void __init tegra_map_common_io(void);
+static struct platform_device nvrm_device =
+{
+ .name = "nvrm"
+};
#ifdef CONFIG_DEVNVMAP
static struct platform_device nvmap_device = {
@@ -362,6 +366,8 @@ static void __init tegra_machine_init(void)
NvU32 serial_number[2] = {0};
#endif
+ (void) platform_device_register(&nvrm_device);
+
tegra_common_init();
tegra_clk_init();
NvConfigDebugConsole(s_hRmGlobal);
diff --git a/arch/arm/mach-tegra/dma.c b/arch/arm/mach-tegra/dma.c
index 064ac861a3c7..fa4186513ce5 100644
--- a/arch/arm/mach-tegra/dma.c
+++ b/arch/arm/mach-tegra/dma.c
@@ -193,9 +193,11 @@ int tegra_dma_dequeue_req(int channel, struct tegra_dma_req *_req)
tegra_dma_update_hw(ch, next_req);
}
req->status = -TEGRA_DMA_REQ_ERROR_ABOTRED;
- spin_unlock_irqrestore(&ch->lock, irq_flags);
+ spin_unlock_irqrestore(&ch->lock, irq_flags);
req->complete(req, req->status);
+ spin_lock_irqsave(&ch->lock, irq_flags);
+
}
spin_unlock_irqrestore(&ch->lock, irq_flags);
diff --git a/arch/arm/mach-tegra/include/mach/memory.h b/arch/arm/mach-tegra/include/mach/memory.h
index 20043d3e9d8d..1e1d44cb4d2d 100644
--- a/arch/arm/mach-tegra/include/mach/memory.h
+++ b/arch/arm/mach-tegra/include/mach/memory.h
@@ -36,9 +36,14 @@
#define NET_IP_ALIGN 0
#define NET_SKB_PAD L1_CACHE_BYTES
-/* bus address and physical addresses are identical */
-#define __virt_to_bus(x) __virt_to_phys(x)
-#define __bus_to_virt(x) __phys_to_virt(x)
+
+#define __arch_page_to_dma(dev, page) ((dma_addr_t)__virt_to_phys(page_address(page)))
+
+#define __arch_dma_to_virt(dev, addr) ((void *) __phys_to_virt(addr))
+
+#define __arch_virt_to_dma(dev, addr) ((dma_addr_t) __virt_to_phys((unsigned long)(addr)))
+
+#define __arch_dma_to_page(dev, addr) (phys_to_page(addr))
#endif
diff --git a/arch/arm/mach-tegra/include/nvrm_power_private.h b/arch/arm/mach-tegra/include/nvrm_power_private.h
index 692599040a93..5afd6bd8affa 100644
--- a/arch/arm/mach-tegra/include/nvrm_power_private.h
+++ b/arch/arm/mach-tegra/include/nvrm_power_private.h
@@ -513,6 +513,24 @@ void NvRmPrivVoltageScale(
void NvRmPrivDvsRequest(NvRmMilliVolts TargetMv);
/**
+ * Gets low threshold and present voltage on the given rail.
+ *
+ * @param RailId The targeted voltage rail ID.
+ * @param pLowMv Output storage pointer for low voltage threshold (in
+ * millivolt).
+ * @param pPresentMv Output storage pointer for present rail voltage (in
+ * millivolt). This parameter is optional, set to NULL if only low
+ * threshold is to be retrieved.
+ *
+ * NvRmVoltsUnspecified is returned if targeted rail does not exist on SoC.
+ */
+void
+NvRmPrivGetLowVoltageThreshold(
+ NvRmDfsVoltageRailId RailId,
+ NvRmMilliVolts* pLowMv,
+ NvRmMilliVolts* pPresentMv);
+
+/**
* Outputs debug messages for starvation hints sent by the specified client.
*
* @param ClientId The client ID assigned by the RM power manager.
diff --git a/arch/arm/mach-tegra/init_common.c b/arch/arm/mach-tegra/init_common.c
index fa87882fc45e..32746e5e4d50 100644
--- a/arch/arm/mach-tegra/init_common.c
+++ b/arch/arm/mach-tegra/init_common.c
@@ -46,6 +46,11 @@ const char *tegra_partition_list = NULL;
char *tegra_boot_device = NULL;
NvRmGpioHandle s_hGpioGlobal = NULL;
+#ifdef CONFIG_PM
+/* FIXME : Uncomment this for actual suspend/resume
+extern void tegra_set_suspend_ops(void); */
+#endif
+
/*
* The format for the partition list command line parameter is
* tagrapart=<linux_name>:<start_sector>:<length_in_sectors>:<sector_size>,...
@@ -801,5 +806,9 @@ void __init tegra_common_init(void)
tegra_register_uart();
tegra_register_sdio();
tegra_register_usb();
+#ifdef CONFIG_PM
+ /* FIXME : Uncomment this for actual suspend/resume
+ tegra_set_suspend_ops(); */
+#endif
}
diff --git a/arch/arm/mach-tegra/nvrm/core/ap20/ap20rm_clock_config.c b/arch/arm/mach-tegra/nvrm/core/ap20/ap20rm_clock_config.c
index 882d0c184c68..4add3ff74fa9 100644
--- a/arch/arm/mach-tegra/nvrm/core/ap20/ap20rm_clock_config.c
+++ b/arch/arm/mach-tegra/nvrm/core/ap20/ap20rm_clock_config.c
@@ -1215,12 +1215,13 @@ Ap20CpuClockSourceFind(
NvRmMilliVolts* pSystemMv)
{
NvU32 i;
- NvRmMilliVolts DivMv;
+ NvRmMilliVolts DivMv = 0;
+ NvRmMilliVolts CpuMv = 0;
NvRmFreqKHz SourceKHz;
NV_ASSERT(DomainKHz <= MaxKHz);
NV_ASSERT(s_Ap20CpuConfig.pPllXStepsKHz);
- DivMv = pDfsSource->DividerSetting = 0; // no 2ndary divider by default
+ pDfsSource->DividerSetting = 0; // no 2ndary divider by default
// 1st try oscillator
SourceKHz = NvRmPrivGetClockSourceFreq(NvRmClockSource_ClkM);
@@ -1279,7 +1280,11 @@ get_mv:
// Finally get operational voltage for found source
pDfsSource->MinMv = NvRmPrivModuleVscaleGetMV(
hRmDevice, NvRmModuleID_Cpu, pDfsSource->SourceKHz);
- *pSystemMv = ((pDfsSource->MinMv * s_Ap20CpuConfig.CoreOverCpuSlope) >>
+#if !NV_OAL
+ NvRmPrivGetLowVoltageThreshold(NvRmDfsVoltageRailId_Cpu, &CpuMv, NULL);
+#endif
+ CpuMv = NV_MAX(CpuMv, pDfsSource->MinMv);
+ *pSystemMv = ((CpuMv * s_Ap20CpuConfig.CoreOverCpuSlope) >>
FIXED_POINT_BITS) + s_Ap20CpuConfig.CoreOverCpuOffset;
*pSystemMv = NV_MAX(DivMv, (*pSystemMv));
}
diff --git a/arch/arm/mach-tegra/nvrm/core/common/nvrm_power_dfs.c b/arch/arm/mach-tegra/nvrm/core/common/nvrm_power_dfs.c
index 6bba8ab0f129..bc4ebf099024 100644
--- a/arch/arm/mach-tegra/nvrm/core/common/nvrm_power_dfs.c
+++ b/arch/arm/mach-tegra/nvrm/core/common/nvrm_power_dfs.c
@@ -1737,9 +1737,7 @@ DttClockUpdate(
if (NVRM_DTT_DISABLED || (!pDtt->hOdmTcore))
return NV_FALSE;
- // Update temperature
- if (pDtt->TcorePolicy.UpdateFlag &&
- NvOdmTmonTemperatureGet(pDtt->hOdmTcore, &TemperatureC))
+ if (pDtt->TcorePolicy.UpdateFlag)
{
// Register TMON interrupt, if it is supported by device, and chip
// policy, but has not been registered yet. Set initial temperature
@@ -1750,6 +1748,7 @@ DttClockUpdate(
DttPolicyUpdate(pDfs->hRm, TemperatureC, pDtt);
LowLimit = pDtt->TcorePolicy.LowLimit;
HighLimit = pDtt->TcorePolicy.HighLimit;
+
if ((LowLimit != ODM_TMON_PARAMETER_UNSPECIFIED) &&
(HighLimit != ODM_TMON_PARAMETER_UNSPECIFIED))
{
@@ -2451,6 +2450,42 @@ void NvRmPrivDvsRequest(NvRmMilliVolts TargetMv)
}
}
+void
+NvRmPrivGetLowVoltageThreshold(
+ NvRmDfsVoltageRailId RailId,
+ NvRmMilliVolts* pLowMv,
+ NvRmMilliVolts* pPresentMv)
+{
+ NvRmDfs* pDfs = &s_Dfs;
+ NvRmDvs* pDvs = &s_Dfs.VoltageScaler;
+ NV_ASSERT(pLowMv);
+
+ switch (RailId)
+ {
+ case NvRmDfsVoltageRailId_Core:
+ *pLowMv = pDvs->LowCornerCoreMv;
+ if(pPresentMv)
+ *pPresentMv = pDvs->CurrentCoreMv;
+ break;
+
+ case NvRmDfsVoltageRailId_Cpu:
+ if (NvRmPrivIsCpuRailDedicated(pDfs->hRm))
+ {
+ *pLowMv = pDvs->LowCornerCpuMv;
+ if(pPresentMv)
+ *pPresentMv = pDvs->CurrentCpuMv;
+ break;
+ }
+ // fall through
+
+ default:
+ *pLowMv = NvRmVoltsUnspecified;
+ if(pPresentMv)
+ *pPresentMv = NvRmVoltsUnspecified;
+ break;
+ }
+}
+
static void NvRmPrivDvsStopAtNominal(void)
{
NvRmDfs* pDfs = &s_Dfs;
@@ -3446,32 +3481,10 @@ NvRmDfsGetLowVoltageThreshold(
NvRmMilliVolts* pLowMv,
NvRmMilliVolts* pPresentMv)
{
- NvRmDvs* pDvs = &s_Dfs.VoltageScaler;
-
NV_ASSERT(hRmDeviceHandle);
NvRmPrivLockSharedPll();
- switch (RailId)
- {
- case NvRmDfsVoltageRailId_Core:
- *pLowMv = pDvs->LowCornerCoreMv;
- *pPresentMv = pDvs->CurrentCoreMv;
- break;
-
- case NvRmDfsVoltageRailId_Cpu:
- if (NvRmPrivIsCpuRailDedicated(hRmDeviceHandle))
- {
- *pLowMv = pDvs->LowCornerCpuMv;
- *pPresentMv = pDvs->CurrentCpuMv;
- break;
- }
- // fall through
-
- default:
- *pLowMv = NvRmVoltsUnspecified;
- *pPresentMv = NvRmVoltsUnspecified;
- break;
- }
+ NvRmPrivGetLowVoltageThreshold(RailId, pLowMv, pPresentMv);
NvRmPrivUnlockSharedPll();
}
diff --git a/arch/arm/mach-tegra/nvrm/io/ap15/ap15rm_pwm.c b/arch/arm/mach-tegra/nvrm/io/ap15/ap15rm_pwm.c
index dbaf41563bd4..14c58e4e28f3 100644
--- a/arch/arm/mach-tegra/nvrm/io/ap15/ap15rm_pwm.c
+++ b/arch/arm/mach-tegra/nvrm/io/ap15/ap15rm_pwm.c
@@ -316,7 +316,7 @@ void NvRmPwmClose(NvRmPwmHandle hPwm)
NvOsMutexUnlock(s_hPwmMutex);
}
-#define MAX_DUTY_CYCLE 256
+#define MAX_DUTY_CYCLE 255
NvError NvRmPwmConfig(
NvRmPwmHandle hPwm,
@@ -416,7 +416,9 @@ NvError NvRmPwmConfig(
* Convert from percentage unsigned 15.16 fixed point
* format to actual register value
*/
- DCycle = (NvU8)((DutyCycle * MAX_DUTY_CYCLE/100)>>16);
+ DCycle = (DutyCycle * MAX_DUTY_CYCLE/100)>>16;
+ if (DCycle > MAX_DUTY_CYCLE)
+ DCycle = MAX_DUTY_CYCLE;
RegValue = PWM_SETNUM(CSR_0, ENB, PwmMode) |
PWM_SETNUM(CSR_0, PWM_0, DCycle);
diff --git a/arch/arm/mach-tegra/nvrm_user.c b/arch/arm/mach-tegra/nvrm_user.c
index c4c2ed214930..543c1fb27f96 100644
--- a/arch/arm/mach-tegra/nvrm_user.c
+++ b/arch/arm/mach-tegra/nvrm_user.c
@@ -27,6 +27,7 @@
#include <linux/cpumask.h>
#include <linux/sched.h>
#include <linux/cpu.h>
+#include <linux/platform_device.h>
#include "nvcommon.h"
#include "nvassert.h"
#include "nvos.h"
@@ -164,18 +165,19 @@ static void NvRmDfsThread(void *args)
}
if (Request & NvRmPmRequest_CpuOnFlag)
{
-#ifdef CONFIG_HOTPLUG_CPU
+#ifdef CONFIG_HOTPLUG_CPU
printk("DFS requested CPU ON\n");
cpu_up(1);
-#endif
+#endif
}
+
if (Request & NvRmPmRequest_CpuOffFlag)
{
-#ifdef CONFIG_HOTPLUG_CPU
+#ifdef CONFIG_HOTPLUG_CPU
printk("DFS requested CPU OFF\n");
cpu_down(1);
-#endif
- }
+#endif
+ }
}
}
}
@@ -204,46 +206,6 @@ static void client_detach(NvRtClientHandle client)
}
}
-static int __init nvrm_init( void )
-{
- int e = 0;
- NvU32 NumTypes = NvRtObjType_NvRm_Num;
-
- printk("nvrm init\n");
-
- NV_ASSERT(s_RtHandle == NULL);
-
- if (NvRtCreate(1, &NumTypes, &s_RtHandle) != NvSuccess)
- {
- e = -ENOMEM;
- }
-
- if (e == 0)
- {
- e = misc_register( &nvrm_dev );
- }
-
- if( e < 0 )
- {
- if (s_RtHandle)
- {
- NvRtDestroy(s_RtHandle);
- s_RtHandle = NULL;
- }
-
- printk("nvrm failed to open\n");
- }
-
- return e;
-}
-
-static void __exit nvrm_deinit( void )
-{
- misc_deregister( &nvrm_dev );
- NvRtDestroy(s_RtHandle);
- s_RtHandle = NULL;
-}
-
int nvrm_open(struct inode *inode, struct file *file)
{
NvRtClientHandle Client;
@@ -522,7 +484,7 @@ long nvrm_unlocked_ioctl(struct file *file,
goto fail;
}
}
- break;
+ break;
case NvRmIoctls_NvRmGetClientId:
err = NvOsCopyIn(&p, (void*)arg, sizeof(p));
if (err != NvSuccess)
@@ -534,7 +496,7 @@ long nvrm_unlocked_ioctl(struct file *file,
NV_ASSERT(p.InBufferSize == 0);
NV_ASSERT(p.OutBufferSize == sizeof(NvRtClientHandle));
NV_ASSERT(p.InOutBufferSize == 0);
-
+
if (NvOsCopyOut(p.pBuffer,
&file->private_data,
sizeof(NvRtClientHandle)) != NvSuccess)
@@ -553,7 +515,7 @@ long nvrm_unlocked_ioctl(struct file *file,
NvOsDebugPrintf("NvRmIoctls_NvRmClientAttach: copy in failed\n");
goto fail;
}
-
+
NV_ASSERT(p.InBufferSize == sizeof(NvRtClientHandle));
NV_ASSERT(p.OutBufferSize == 0);
NV_ASSERT(p.InOutBufferSize == 0);
@@ -590,11 +552,11 @@ long nvrm_unlocked_ioctl(struct file *file,
NvOsDebugPrintf("NvRmIoctls_NvRmClientAttach: copy in failed\n");
goto fail;
}
-
+
NV_ASSERT(p.InBufferSize == sizeof(NvRtClientHandle));
NV_ASSERT(p.OutBufferSize == 0);
NV_ASSERT(p.InOutBufferSize == 0);
-
+
if (NvOsCopyIn((void*)&Client,
p.pBuffer,
sizeof(NvRtClientHandle)) != NvSuccess)
@@ -610,10 +572,10 @@ long nvrm_unlocked_ioctl(struct file *file,
// The daemon is detaching from itself, no need to dec refcount
break;
}
-
- client_detach(Client);
+
+ client_detach(Client);
break;
- }
+ }
// FIXME: power ioctls?
default:
printk( "unknown ioctl code\n" );
@@ -640,5 +602,82 @@ int nvrm_mmap(struct file *file, struct vm_area_struct *vma)
return 0;
}
+static int nvrm_probe(struct platform_device *pdev)
+{
+ int e = 0;
+ NvU32 NumTypes = NvRtObjType_NvRm_Num;
+
+ printk("nvrm probe\n");
+
+ NV_ASSERT(s_RtHandle == NULL);
+
+ if (NvRtCreate(1, &NumTypes, &s_RtHandle) != NvSuccess)
+ {
+ e = -ENOMEM;
+ }
+
+ if (e == 0)
+ {
+ e = misc_register( &nvrm_dev );
+ }
+
+ if( e < 0 )
+ {
+ if (s_RtHandle)
+ {
+ NvRtDestroy(s_RtHandle);
+ s_RtHandle = NULL;
+ }
+
+ printk("nvrm probe failed to open\n");
+ }
+ return e;
+}
+
+static int nvrm_remove(struct platform_device *pdev)
+{
+ misc_deregister( &nvrm_dev );
+ NvRtDestroy(s_RtHandle);
+ s_RtHandle = NULL;
+ return 0;
+}
+
+static int nvrm_suspend(struct platform_device *pdev)
+{
+ NvError Err = NvSuccess;
+ printk(KERN_INFO "%s called\n", __func__);
+ return Err;
+}
+
+static int nvrm_resume(struct platform_device *pdev)
+{
+ NvError Err = NvSuccess;
+ printk(KERN_INFO "%s called\n", __func__);
+ return Err;
+}
+
+static struct platform_driver nvrm_driver =
+{
+ .probe = nvrm_probe,
+ .remove = nvrm_remove,
+ .suspend = nvrm_suspend,
+ .resume = nvrm_resume,
+ .driver = { .name = "nvrm" }
+};
+
+static int __init nvrm_init(void)
+{
+ int ret = 0;
+ printk(KERN_INFO "%s called\n", __func__);
+ ret= platform_driver_register(&nvrm_driver);
+ return ret;
+}
+
+static void __exit nvrm_deinit(void)
+{
+ printk(KERN_INFO "%s called\n", __func__);
+ platform_driver_unregister(&nvrm_driver);
+}
+
module_init(nvrm_init);
module_exit(nvrm_deinit);
diff --git a/arch/arm/mach-tegra/pci-enum.c b/arch/arm/mach-tegra/pci-enum.c
index 1dab1de0a86d..4f8c614ff8fc 100644
--- a/arch/arm/mach-tegra/pci-enum.c
+++ b/arch/arm/mach-tegra/pci-enum.c
@@ -434,8 +434,8 @@ static void pci_tegra_setup_pci_bridge(struct pci_tegra_device *dev)
reg |= PCI_COMMAND_SERR;
pci_conf_write16(dev->bus, dev->devfn, PCI_COMMAND, reg);
- /* FIXME how to handle interrutps */
- pci_conf_write8(dev->bus, dev->devfn, PCI_INTERRUPT_LINE, 0x82);
+ pci_conf_write8(dev->bus, dev->devfn, PCI_INTERRUPT_LINE,
+ tegra_get_module_inst_irq("pcie", 0, 0));
pci_conf_write8(dev->bus, dev->devfn, PCI_INTERRUPT_PIN, 0xa);
}
@@ -535,8 +535,9 @@ static void pci_tegra_setup_pci_device(struct pci_tegra_device *dev)
reg |= PCI_COMMAND_SERR;
pci_conf_write16(dev->bus, dev->devfn, PCI_COMMAND, reg);
- /* FIXME how to handle interrutps */
-
+ pci_conf_write8(dev->bus, dev->devfn, PCI_INTERRUPT_LINE,
+ tegra_get_module_inst_irq("pcie", 0, 0));
+ pci_conf_write8(dev->bus, dev->devfn, PCI_INTERRUPT_PIN, 0xa);
}
static void pci_tegra_print_device_tree(struct pci_tegra_device *dev)
@@ -612,8 +613,12 @@ void pci_tegra_enumerate(void)
/* Disable all execptions */
pci_tegra_afi_writel(0, AFI_FPCI_ERROR_MASKS_0);
- /* Set the base and limits of the resources */
- pci_tegra_io_base = TEGRA_PCIE_BASE + PCIE_DOWNSTREAM_IO_OFFSET;
+ /* Set the base and limits for the resources */
+
+ /* Starting the IO offset from non-zero value as linux equating a value
+ * of 0 as unallocated resoruce and bailing out!
+ */
+ pci_tegra_io_base = TEGRA_PCIE_BASE + PCIE_DOWNSTREAM_IO_OFFSET + 16;
pci_tegra_io_limt = pci_tegra_io_base + PCIE_DOWNSTREAM_IO_SIZE;
pci_tegra_mem_base = FPCI_NON_PREFETCH_MEMORY_OFFSET;
diff --git a/arch/arm/mach-tegra/pci.c b/arch/arm/mach-tegra/pci.c
index 2ecba6015229..40a46e13c443 100644
--- a/arch/arm/mach-tegra/pci.c
+++ b/arch/arm/mach-tegra/pci.c
@@ -20,8 +20,8 @@
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
-#define DEBUG
-#define VERBOSE_DEBUG
+//#define DEBUG
+//#define VERBOSE_DEBUG
#include <linux/kernel.h>
@@ -116,8 +116,11 @@ static int pci_tegra_read_conf(struct pci_bus *bus, u32 devfn,
if (where & 2) v >>= 16;
v &= 0xffff;
break;
- default:
+ case 4:
break;
+ default:
+ /* If the PCI stack is sane, we should not get here */
+ BUG();
}
*val = v;
@@ -176,9 +179,12 @@ static int pci_tegra_write_conf(struct pci_bus *bus, u32 devfn,
temp |= val << ((where & 0x3) * 8);
writel(temp, (u32)addr & ~0x3);
break;
- default:
+ case 4:
writel(val, addr);
break;
+ default:
+ /* If the PCI stack is sane, we should not get here */
+ BUG();
}
fail:
return PCIBIOS_SUCCESSFUL;
diff --git a/arch/arm/mach-tegra/suspend_ops.c b/arch/arm/mach-tegra/suspend_ops.c
new file mode 100644
index 000000000000..0aef7afe151e
--- /dev/null
+++ b/arch/arm/mach-tegra/suspend_ops.c
@@ -0,0 +1,81 @@
+/*
+ * arch/arm/mach-tegra/suspend_ops.c
+ *
+ * Suspend Operation API implementation
+ *
+ * Copyright (c) 2010, NVIDIA Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/suspend.h>
+#include "nvcommon.h"
+
+int tegra_state_valid(suspend_state_t state)
+{
+ printk("%s CALLED\n", __func__);
+ if (state > PM_SUSPEND_ON && state <= PM_SUSPEND_MAX)
+ return 1;
+ return 0;
+}
+
+int tegra_state_begin(suspend_state_t state)
+{
+ printk("%s CALLED with state = %d\n", __func__, state);
+ return 0;
+}
+
+int tegra_state_prepare(void)
+{
+ printk("%s CALLED \n", __func__);
+ return 0;
+}
+
+int tegra_state_enter(suspend_state_t state)
+{
+ printk("%s CALLED with state = %d\n", __func__, state);
+ return 0;
+}
+
+void tegra_state_finish(void)
+{
+ printk("%s CALLED \n", __func__);
+}
+
+void tegra_state_end(void)
+{
+ printk("%s CALLED \n", __func__);
+}
+
+void tegra_state_recover(void)
+{
+ printk("%s CALLED \n", __func__);
+}
+
+static struct platform_suspend_ops tegra_suspend_ops =
+{
+ .valid = tegra_state_valid,
+ .begin = tegra_state_begin,
+ .prepare = tegra_state_prepare,
+ .enter = tegra_state_enter,
+ .finish = tegra_state_finish,
+ .end = tegra_state_end,
+ .recover = tegra_state_recover
+};
+
+void tegra_set_suspend_ops()
+{
+ suspend_set_ops(&tegra_suspend_ops);
+}
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S
index b804c9d3e420..55f7ecd1264e 100644
--- a/arch/arm/mm/cache-v6.S
+++ b/arch/arm/mm/cache-v6.S
@@ -203,20 +203,7 @@ ENTRY(v6_flush_kern_dcache_page)
* - end - virtual end address of region
*/
ENTRY(v6_dma_inv_range)
- tst r0, #D_CACHE_LINE_SIZE - 1
bic r0, r0, #D_CACHE_LINE_SIZE - 1
-#ifdef HARVARD_CACHE
- mcrne p15, 0, r0, c7, c10, 1 @ clean D line
-#else
- mcrne p15, 0, r0, c7, c11, 1 @ clean unified line
-#endif
- tst r1, #D_CACHE_LINE_SIZE - 1
- bic r1, r1, #D_CACHE_LINE_SIZE - 1
-#ifdef HARVARD_CACHE
- mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D line
-#else
- mcrne p15, 0, r1, c7, c15, 1 @ clean & invalidate unified line
-#endif
1:
#ifdef HARVARD_CACHE
mcr p15, 0, r0, c7, c6, 1 @ invalidate D line
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index 4b733d14076a..5327bd1b9bcf 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -218,15 +218,9 @@ ENDPROC(v7_flush_kern_dcache_page)
ENTRY(v7_dma_inv_range)
dcache_line_size r2, r3
sub r3, r2, #1
- tst r0, r3
bic r0, r0, r3
it ne
- mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line
- tst r1, r3
- bic r1, r1, r3
- it ne
- mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D / U line
1:
mcr p15, 0, r0, c7, c6, 1 @ invalidate D / U line
add r0, r0, r2
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 9198f94fac89..1f37fae59d01 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -509,58 +509,40 @@ core_initcall(consistent_init);
* platforms with CONFIG_DMABOUNCE.
* Use the driver DMA support - see dma-mapping.h (dma_sync_*)
*/
-void dma_cache_maint(const void *start, size_t size, int direction)
+void __dma_cache_maint(const void *start, size_t size, int map)
{
void (*inner_op)(const void *, const void *);
void (*outer_op)(unsigned long, unsigned long);
BUG_ON(!virt_addr_valid(start) || !virt_addr_valid(start + size - 1));
- switch (direction) {
- case DMA_FROM_DEVICE: /* invalidate only */
- inner_op = smp_dma_inv_range;
- outer_op = outer_inv_range;
- break;
- case DMA_TO_DEVICE: /* writeback only */
- inner_op = smp_dma_clean_range;
- outer_op = outer_clean_range;
- break;
- case DMA_BIDIRECTIONAL: /* writeback and invalidate */
- inner_op = smp_dma_flush_range;
- outer_op = outer_flush_range;
- break;
- default:
- BUG();
+ if (map) { /* writeback only */
+ inner_op = dmac_clean_range;
+ outer_op = outer_clean_range;
+ } else { /* Invalidate only */
+ inner_op = dmac_inv_range;
+ outer_op = outer_inv_range;
}
inner_op(start, start + size);
outer_op(__pa(start), __pa(start) + size);
}
-EXPORT_SYMBOL(dma_cache_maint);
+EXPORT_SYMBOL(__dma_cache_maint);
static void dma_cache_maint_contiguous(struct page *page, unsigned long offset,
- size_t size, int direction)
+ size_t size, int map)
{
void *vaddr;
unsigned long paddr;
void (*inner_op)(const void *, const void *);
void (*outer_op)(unsigned long, unsigned long);
- switch (direction) {
- case DMA_FROM_DEVICE: /* invalidate only */
- inner_op = smp_dma_inv_range;
- outer_op = outer_inv_range;
- break;
- case DMA_TO_DEVICE: /* writeback only */
- inner_op = smp_dma_clean_range;
- outer_op = outer_clean_range;
- break;
- case DMA_BIDIRECTIONAL: /* writeback and invalidate */
- inner_op = smp_dma_flush_range;
- outer_op = outer_flush_range;
- break;
- default:
- BUG();
+ if (map) { /* writeback only */
+ inner_op = dmac_clean_range;
+ outer_op = outer_clean_range;
+ } else { /* Invalidate only */
+ inner_op = dmac_inv_range;
+ outer_op = outer_inv_range;
}
if (!PageHighMem(page)) {
@@ -579,8 +561,8 @@ static void dma_cache_maint_contiguous(struct page *page, unsigned long offset,
outer_op(paddr, paddr + size);
}
-void dma_cache_maint_page(struct page *page, unsigned long offset,
- size_t size, int dir)
+void __dma_cache_maint_page(struct page *page, unsigned long offset,
+ size_t size, int map)
{
/*
* A single sg entry may refer to multiple physically contiguous
@@ -598,13 +580,13 @@ void dma_cache_maint_page(struct page *page, unsigned long offset,
}
len = PAGE_SIZE - offset;
}
- dma_cache_maint_contiguous(page, offset, len, dir);
+ dma_cache_maint_contiguous(page, offset, len, map);
offset = 0;
page++;
left -= len;
} while (left);
}
-EXPORT_SYMBOL(dma_cache_maint_page);
+EXPORT_SYMBOL(__dma_cache_maint_page);
/**
* dma_map_sg - map a set of SG buffers for streaming mode DMA
@@ -680,6 +662,14 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
for_each_sg(sg, s, nents, i) {
dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0,
sg_dma_len(s), dir);
+
+ if (!dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0,
+ sg_dma_len(s), dir))
+ continue;
+
+ if (!arch_is_coherent() && dir != DMA_TO_DEVICE)
+ __dma_cache_maint_page(sg_page(s), s->offset,
+ s->length, 0);
}
}
EXPORT_SYMBOL(dma_sync_sg_for_cpu);
@@ -703,8 +693,8 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
continue;
if (!arch_is_coherent())
- dma_cache_maint_page(sg_page(s), s->offset,
- s->length, dir);
+ __dma_cache_maint_page(sg_page(s), s->offset,
+ s->length, 1);
}
}
EXPORT_SYMBOL(dma_sync_sg_for_device);
diff --git a/arch/arm/plat-omap/include/mach/memory.h b/arch/arm/plat-omap/include/mach/memory.h
index d6b5ca6c7da2..1383ac5d1f07 100644
--- a/arch/arm/plat-omap/include/mach/memory.h
+++ b/arch/arm/plat-omap/include/mach/memory.h
@@ -65,6 +65,13 @@
(dma_addr_t)virt_to_lbus(page_address(page)) : \
(dma_addr_t)__virt_to_phys(page_address(page));})
+#define __arch_dma_to_page(dev, addr) \
+ ({ dma_addr_t __dma = addr; \
+ if (is_lbus_device(dev)) \
+ __dma += PHYS_OFFSET - OMAP1510_LB_OFFSET; \
+ phys_to_page(__dma); \
+ })
+
#define __arch_dma_to_virt(dev, addr) ({ (void *) (is_lbus_device(dev) ? \
lbus_to_virt(addr) : \
__phys_to_virt(addr)); })
diff --git a/drivers/serial/tegra_hsuart.c b/drivers/serial/tegra_hsuart.c
index ba582f420a58..fac5e5eff24f 100644
--- a/drivers/serial/tegra_hsuart.c
+++ b/drivers/serial/tegra_hsuart.c
@@ -20,6 +20,9 @@
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
+// #define DEBUG 1
+// #define VERBOSE_DEBUG 1
+
#include <linux/module.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
@@ -46,14 +49,7 @@
#define UART_RX_DMA_PING_BUFFER_SIZE 0x800
-static int use_rx_dma = 1;
-static int use_tx_dma = 1;
-
-struct nv_uart_buffer {
- void *rx_dma_virt;
- dma_addr_t rx_dma_phys;
- NvU32 rx_dma_size;
-};
+static int use_dma = 1;
struct tegra_uart_port {
struct uart_port uport;
@@ -65,7 +61,7 @@ struct tegra_uart_port {
void __iomem *regs;
NvOsPhysAddr phys;
NvU32 size;
- struct clk *clk;
+ struct clk *clk;
/* Register shadow */
unsigned char fcr_shadow;
@@ -90,7 +86,6 @@ struct tegra_uart_port {
/* DMA requests */
struct tegra_dma_req rx_dma_req[2];
- struct nv_uart_buffer rx_buf[2];
int rx_dma;
struct tegra_dma_req tx_dma_req;
@@ -99,6 +94,9 @@ struct tegra_uart_port {
unsigned char *rx_pio_buffer;
int rx_pio_buffer_size;
+ bool use_rx_dma;
+ bool dma_for_tx;
+
struct tasklet_struct tasklet;
};
@@ -349,7 +347,7 @@ void tegra_rx_dma_complete_callback(struct tegra_dma_req *req, int err)
t = container_of(u, struct tegra_uart_port, uport);
if (req->bytes_transferred) {
tty_insert_flip_string(tty,
- ((unsigned char *)(req->virt_addr)),
+ ((unsigned char *)(req->virt_addr)),
req->bytes_transferred);
}
@@ -389,7 +387,7 @@ static irqreturn_t tegra_uart_isr(int irq, void *data)
case 2: /* Receive */
case 3: /* Receive error */
case 6: /* Rx timeout */
- if (likely(use_rx_dma))
+ if (likely(t->use_rx_dma))
do_handle_rx_dma(u, 0);
else
do_handle_rx_pio(u);
@@ -418,6 +416,7 @@ static int tegra_uart_hwinit(struct tegra_uart_port *t)
{
unsigned char fcr;
unsigned char mcr;
+ unsigned char ier;
NvError err;
dev_vdbg(t->uport.dev, "+tegra_uart_hwinit\n");
@@ -472,7 +471,7 @@ static int tegra_uart_hwinit(struct tegra_uart_port *t)
t->fcr_shadow = NV_FLD_SET_DRF_DEF(UART, IIR_FCR, RX_TRIG,
FIFO_COUNT_GREATER_4, t->fcr_shadow);
- if (use_tx_dma) {
+ if (t->dma_for_tx) {
t->fcr_shadow = NV_FLD_SET_DRF_DEF(UART, IIR_FCR, TX_TRIG,
FIFO_COUNT_GREATER_4, t->fcr_shadow);
} else {
@@ -484,7 +483,7 @@ static int tegra_uart_hwinit(struct tegra_uart_port *t)
t->tx_low_watermark = 8;
t->rx_high_watermark = 4;
- if (use_rx_dma)
+ if (t->use_rx_dma)
t->fcr_shadow = NV_FLD_SET_DRF_DEF(UART, IIR_FCR, DMA, CHANGE,
t->fcr_shadow);
else
@@ -500,6 +499,36 @@ static int tegra_uart_hwinit(struct tegra_uart_port *t)
t->mcr_shadow = mcr;
writeb(mcr, t->regs + UART_MCR_0);
+ /*
+ * Enable IE_RXS for the receive status interrupts like line errros.
+ * Enable IE_RX_TIMEOUT to get the bytes which cannot be DMA'd.
+ *
+ * If using DMA mode, enable EORD instead of receive interrupt which
+ * will interrupt after the UART is done with the receive instead of
+ * the interrupt when the FIFO "threshold" is reached.
+ *
+ * EORD is different interrupt than RX_TIMEOUT - RX_TIMEOUT occurs when
+ * the DATA is sitting in the FIFO and couldn't be transferred to the
+ * DMA as the DMA size alignment(4 bytes) is not met. EORD will be
+ * triggered when there is a pause of the incomming data stream for 4
+ * characters long.
+ *
+ * For pauses in the data which is not aligned to 4 bytes, we get
+ * both the EORD as well as RX_TIMEOUT - SW sees RX_TIMEOUT first
+ * then the EORD.
+ *
+ * Don't get confused, believe in the magic of nvidia hw...:-)
+ */
+ ier = 0;
+ ier = NV_FLD_SET_DRF_DEF(UART, IER_DLAB_0, IE_RXS, ENABLE, ier);
+ ier = NV_FLD_SET_DRF_DEF(UART, IER_DLAB_0, IE_RX_TIMEOUT, ENABLE, ier);
+ if (t->use_rx_dma)
+ ier = NV_FLD_SET_DRF_DEF(UART, IER_DLAB_0, IE_EORD, ENABLE,ier);
+ else
+ ier = NV_FLD_SET_DRF_DEF(UART, IER_DLAB_0, IE_RHR, ENABLE, ier);
+ t->ier_shadow = ier;
+ writeb(ier, t->regs + UART_IER_DLAB_0_0);
+
dev_vdbg(t->uport.dev, "-tegra_uart_hwinit\n");
return 0;
@@ -517,6 +546,7 @@ static int tegra_uart_init_rx_dma(struct tegra_uart_port *t)
if (t->rx_dma < 0)
return -ENODEV;
+ memset(t->rx_dma_req, 0, sizeof(t->rx_dma_req));
for (i=0; i<2; i++) {
dma_addr_t rx_dma_phys;
void *rx_dma_virt;
@@ -528,13 +558,15 @@ static int tegra_uart_init_rx_dma(struct tegra_uart_port *t)
rx_dma_virt = dma_alloc_coherent(t->uport.dev,
t->rx_dma_req[i].size, &rx_dma_phys, GFP_KERNEL);
if (!rx_dma_virt) {
- dev_err(t->uport.dev, "Could not allocate dma buffers\n");
- return -ENODEV;
+ dev_err(t->uport.dev, "DMA buffers allocate failed \n");
+ goto fail;
}
+ t->rx_dma_req[i].dest_addr = rx_dma_phys;
+ t->rx_dma_req[i].virt_addr = rx_dma_virt;
+ }
- /* Polulate Rx DMA buffer */
+ for (i=0; i<2; i++) {
t->rx_dma_req[i].source_addr = t->phys;
- t->rx_dma_req[i].dest_addr = rx_dma_phys;
t->rx_dma_req[i].source_wrap = 4;
t->rx_dma_req[i].dest_wrap = 0;
t->rx_dma_req[i].to_memory = 1;
@@ -543,21 +575,31 @@ static int tegra_uart_init_rx_dma(struct tegra_uart_port *t)
t->rx_dma_req[i].complete = tegra_rx_dma_complete_callback;
t->rx_dma_req[i].size = t->rx_dma_req[i].size;
t->rx_dma_req[i].data = &t->uport;
- t->rx_dma_req[i].virt_addr = rx_dma_virt;
INIT_LIST_HEAD(&(t->rx_dma_req[i].list));
if (tegra_dma_enqueue_req(t->rx_dma, &t->rx_dma_req[i])) {
dev_err(t->uport.dev, "Could not enqueue Rx DMA req\n");
- return -ENODEV;
+ goto fail;
}
}
+
return 0;
+fail:
+ tegra_dma_free_channel(t->rx_dma);
+ if (t->rx_dma_req[0].dest_addr)
+ dma_free_coherent(t->uport.dev, t->rx_dma_req[0].size,
+ t->rx_dma_req[0].virt_addr, t->rx_dma_req[0].dest_addr);
+ if (t->rx_dma_req[1].dest_addr)
+ dma_free_coherent(t->uport.dev, t->rx_dma_req[1].size,
+ t->rx_dma_req[1].virt_addr, t->rx_dma_req[1].dest_addr);
+ return -ENODEV;
}
static int tegra_startup(struct uart_port *u)
{
- struct tegra_uart_port *t;
+ struct tegra_uart_port *t = container_of(u,
+ struct tegra_uart_port, uport);
int ret = 0;
- unsigned char ier;
+ struct circ_buf *xmit = &u->info->xmit;
t = container_of(u, struct tegra_uart_port, uport);
sprintf(t->port_name, "tegra_uart_%d", u->line);
@@ -568,30 +610,16 @@ static int tegra_startup(struct uart_port *u)
dev_err(u->dev, "Cannot map UART registers\n");
return -ENODEV;
}
-
- ret = tegra_uart_hwinit(t);
- if (ret)
- return ret;
-
t->irq = NvRmGetIrqForLogicalInterrupt(s_hRmGlobal, t->modid, 0);
BUG_ON(t->irq == (NvU32)(-1));
- ret = request_irq(t->irq, tegra_uart_isr, IRQF_SHARED, t->port_name, u);
- if (ret) {
- dev_err(u->dev, "Failed to register ISR for IRQ %d\n", t->irq);
- }
- /* Set the irq flags to irq valid, which is the default linux behaviour.
- * For irqs used by Nv* APIs, IRQF_NOAUTOEN is also set */
- set_irq_flags(t->irq, IRQF_VALID);
-
- if (use_tx_dma) {
- struct circ_buf *xmit = &u->info->xmit;
-
- /* Allocate DMA, set the DMA buffer */
+ t->dma_for_tx = false;
+ if (use_dma) {
t->tx_dma = tegra_dma_allocate_channel(TEGRA_DMA_MODE_ONESHOT);
- if (t->tx_dma < 0) {
- goto fail;
- }
+ if (t->tx_dma >= 0)
+ t->dma_for_tx = true;
+ }
+ if (t->dma_for_tx) {
t->tx_dma_virt = xmit->buf;
t->tx_dma_phys = dma_map_single(u->dev, xmit->buf,
UART_XMIT_SIZE, DMA_BIDIRECTIONAL);
@@ -610,51 +638,30 @@ static int tegra_startup(struct uart_port *u)
t->tx_dma_req.source_wrap = 0;
t->tx_dma_req.data = &t->tasklet;
}
- if (use_rx_dma && tegra_uart_init_rx_dma(t))
+
+ t->use_rx_dma = false;
+ if (use_dma) {
+ if (!tegra_uart_init_rx_dma(t))
+ t->use_rx_dma = true;
+ }
+ ret = tegra_uart_hwinit(t);
+ if (ret)
goto fail;
- /*
- * Enable IE_RXS for the receive status interrupts like line errros.
- * Enable IE_RX_TIMEOUT to get the bytes which cannot be DMA'd.
- *
- * If using DMA mode, enable EORD instead of receive interrupt which
- * will interrupt after the UART is done with the receive instead of
- * the interrupt when the FIFO "threshold" is reached.
- *
- * EORD is different interrupt than RX_TIMEOUT - RX_TIMEOUT occurs when
- * the DATA is sitting in the FIFO and couldn't be transferred to the
- * DMA as the DMA size alignment(4 bytes) is not met. EORD will be
- * triggered when there is a pause of the incomming data stream for 4
- * characters long.
- *
- * For pauses in the data which is not aligned to 4 bytes, we get
- * both the EORD as well as RX_TIMEOUT - SW sees RX_TIMEOUT first
- * then the EORD.
- *
- * Don't get confused, believe in the magic of nvidia hw...:-)
- */
- ier = 0;
- ier = NV_FLD_SET_DRF_DEF(UART, IER_DLAB_0, IE_RXS, ENABLE, ier);
- ier = NV_FLD_SET_DRF_DEF(UART, IER_DLAB_0, IE_RX_TIMEOUT, ENABLE, ier);
- if (use_rx_dma)
- ier = NV_FLD_SET_DRF_DEF(UART, IER_DLAB_0, IE_EORD, ENABLE,ier);
- else
- ier = NV_FLD_SET_DRF_DEF(UART, IER_DLAB_0, IE_RHR, ENABLE, ier);
- t->ier_shadow = ier;
- writeb(ier, t->regs + UART_IER_DLAB_0_0);
+ ret = request_irq(t->irq, tegra_uart_isr, IRQF_SHARED, t->port_name, u);
+ if (ret) {
+ dev_err(u->dev, "Failed to register ISR for IRQ %d\n", t->irq);
+ goto fail;
+ }
+ /* Set the irq flags to irq valid, which is the default linux behaviour.
+ * For irqs used by Nv* APIs, IRQF_NOAUTOEN is also set */
+ set_irq_flags(t->irq, IRQF_VALID);
dev_info(u->dev,"Started UART port %d\n", u->line);
- return 0;
+ return 0;
fail:
- /* FIXME: Do proper clean-up */
- dev_err(u->dev, " %s failed\n", __func__);
- if (use_rx_dma) {
- tegra_dma_free_channel(t->rx_dma);
- }
- if (use_tx_dma) {
- tegra_dma_free_channel(t->tx_dma);
- }
- return -ENODEV;
+ dev_err(u->dev, "Tegra UART startup failed\n");
+ return ret;
}
#define TX_EMPTY_STATUS (NV_DRF_DEF(UART, LSR, TMTY, EMPTY) | \
@@ -669,7 +676,7 @@ static void tegra_shutdown(struct uart_port *u)
t = container_of(u, struct tegra_uart_port, uport);
dev_vdbg(u->dev, "+tegra_shutdown\n");
- if (!use_tx_dma) {
+ if (!t->dma_for_tx) {
/* wait for 10 msec to drain the Tx buffer, if not empty */
unsigned char lsr;
do {
@@ -691,7 +698,7 @@ static void tegra_shutdown(struct uart_port *u)
dev_info(u->dev, "DMA wait timedout\n");
}
- if (use_rx_dma) {
+ if (t->use_rx_dma) {
tegra_dma_flush(t->rx_dma);
tegra_dma_free_channel(t->rx_dma);
dma_free_coherent(u->dev, t->rx_dma_req[0].size,
@@ -699,7 +706,7 @@ static void tegra_shutdown(struct uart_port *u)
dma_free_coherent(u->dev, t->rx_dma_req[1].size,
t->rx_dma_req[1].virt_addr, t->rx_dma_req[1].dest_addr);
}
- if (use_tx_dma) {
+ if (t->dma_for_tx) {
tegra_dma_free_channel(t->tx_dma);
}
@@ -788,7 +795,7 @@ static void tegra_start_tx_locked(struct uart_port *u)
// dev_vdbg(t->uport.dev, "+tegra_start_tx_locked\n");
- if (!use_tx_dma) {
+ if (!t->dma_for_tx) {
/* Enable interrupt on transmit FIFO empty, if it is disabled */
if (!(t->ier_shadow & NV_DRF_DEF(UART, IER_DLAB_0, IE_THR,
ENABLE))) {
@@ -889,7 +896,8 @@ void tegra_set_termios(struct uart_port *u, struct ktermios *termios,
strlcat(debug_string, "even parity ", 50);
lcr = NV_FLD_SET_DRF_DEF(UART, LCR, PAR, PARITY, lcr);
lcr = NV_FLD_SET_DRF_DEF(UART, LCR, EVEN, DISABLE, lcr);
- lcr = NV_FLD_SET_DRF_DEF(UART, LCR, SET_P, NO_PARITY, lcr);
+ lcr = NV_FLD_SET_DRF_DEF(UART, LCR, SET_P, NO_PARITY,
+ lcr);
} else if (CMSPAR == (c_cflag & CMSPAR)) {
strlcat(debug_string, "space parity ", 50);
/* FIXME What is space parity? */
@@ -898,7 +906,8 @@ void tegra_set_termios(struct uart_port *u, struct ktermios *termios,
strlcat(debug_string, "odd parity ", 50);
lcr = NV_FLD_SET_DRF_DEF(UART, LCR, PAR, PARITY, lcr);
lcr = NV_FLD_SET_DRF_DEF(UART, LCR, EVEN, ENABLE, lcr);
- lcr = NV_FLD_SET_DRF_DEF(UART, LCR, SET_P, NO_PARITY, lcr);
+ lcr = NV_FLD_SET_DRF_DEF(UART, LCR, SET_P, NO_PARITY,
+ lcr);
}
}
@@ -982,7 +991,7 @@ static int __devexit tegra_uart_remove(struct platform_device *pdev);
static struct platform_driver tegra_uart_platform_driver = {
.remove = tegra_uart_remove,
.probe = tegra_uart_probe,
- .driver = {
+ .driver = {
.name = "tegra_uart"
}
};
@@ -1024,9 +1033,6 @@ static int __init tegra_uart_probe(struct platform_device *pdev)
int ret;
char clk_name[MAX_CLK_NAME_CHARS];
- if (pdev->id != 1)
- return -ENODEV;
-
if (pdev->id < 0 || pdev->id > tegra_uart_driver.nr) {
printk(KERN_ERR "Invalid Uart instance (%d) \n", pdev->id);
return -ENODEV;
diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
index ba167c776652..698e5f7f365e 100644
--- a/drivers/usb/host/ehci-tegra.c
+++ b/drivers/usb/host/ehci-tegra.c
@@ -37,16 +37,12 @@
#include "nvrm_hardware_access.h"
#include "nvddk_usbphy.h"
-/* FIXME: Power Management is un-ported so temporarily disable it */
-#undef CONFIG_PM
-
#define TEGRA_USB_ID_INT_ENABLE (1 << 0)
#define TEGRA_USB_ID_INT_STATUS (1 << 1)
#define TEGRA_USB_ID_PIN_STATUS (1 << 2)
#define TEGRA_USB_ID_PIN_WAKEUP_ENABLE (1 << 6)
#define TEGRA_USB_PHY_WAKEUP_REG_OFFSET (0x408)
-
static void tegra_ehci_shutdown (struct usb_hcd *hcd)
{
struct tegra_hcd_platform_data *pdata;
@@ -62,7 +58,6 @@ static void tegra_ehci_shutdown (struct usb_hcd *hcd)
NV_ASSERT_SUCCESS(NvDdkUsbPhyPowerDown(pdata->hUsbPhy, 0));
}
-
static irqreturn_t tegra_ehci_irq (struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
@@ -95,7 +90,6 @@ static irqreturn_t tegra_ehci_irq (struct usb_hcd *hcd)
return ehci_irq(hcd);
}
-
static int tegra_ehci_reinit(struct usb_hcd *hcd)
{
struct tegra_hcd_platform_data *pdata;
@@ -154,6 +148,18 @@ static int tegra_ehci_setup(struct usb_hcd *hcd)
return retval;
}
+static int tegra_ehci_bus_suspend(struct usb_hcd *hcd)
+{
+ printk("%s called\n", __func__);
+ return 0;
+}
+
+static int tegra_ehci_bus_resume(struct usb_hcd *hcd)
+{
+ printk("%s called\n", __func__);
+ return 0;
+}
+
static const struct hc_driver tegra_ehci_hc_driver = {
.description = hcd_name,
.product_desc = "Tegra Ehci host controller",
@@ -173,13 +179,12 @@ static const struct hc_driver tegra_ehci_hc_driver = {
.get_frame_number = ehci_get_frame,
.hub_status_data = ehci_hub_status_data,
.hub_control = ehci_hub_control,
- .bus_suspend = ehci_bus_suspend,
- .bus_resume = ehci_bus_resume,
+ .bus_suspend = tegra_ehci_bus_suspend,
+ .bus_resume = tegra_ehci_bus_resume,
.relinquish_port = ehci_relinquish_port,
.port_handed_over = ehci_port_handed_over,
};
-
static int tegra_ehci_probe(struct platform_device *pdev)
{
int instance = pdev->id;
@@ -317,20 +322,17 @@ static int tegra_ehci_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
static int tegra_ehci_suspend(struct platform_device *pdev,
pm_message_t message)
{
- return -ENXIO;
+ printk("%s called\n", __func__);
+ return 0;
}
static int tegra_ehci_resume(struct platform_device *pdev)
{
- return -ENXIO;
+ printk("%s called\n", __func__);
+ return 0;
}
-#else
-#define tegra_ehci_resume NULL
-#define tegra_ehci_suspend NULL
-#endif
static struct platform_driver tegra_ehci_driver =
{
@@ -343,5 +345,3 @@ static struct platform_driver tegra_ehci_driver =
.name = "tegra-ehci",
}
};
-
-